diff options
Diffstat (limited to 'test/syscalls/linux')
-rw-r--r-- | test/syscalls/linux/BUILD | 20 | ||||
-rw-r--r-- | test/syscalls/linux/ip6tables.cc | 26 | ||||
-rw-r--r-- | test/syscalls/linux/iptables.cc | 26 | ||||
-rw-r--r-- | test/syscalls/linux/kcov.cc | 137 | ||||
-rw-r--r-- | test/syscalls/linux/membarrier.cc | 268 | ||||
-rw-r--r-- | test/syscalls/linux/proc.cc | 23 |
6 files changed, 483 insertions, 17 deletions
diff --git a/test/syscalls/linux/BUILD b/test/syscalls/linux/BUILD index c775a6d75..36b7f1b97 100644 --- a/test/syscalls/linux/BUILD +++ b/test/syscalls/linux/BUILD @@ -1079,6 +1079,7 @@ cc_binary( gtest, "//test/util:test_main", "//test/util:test_util", + "//test/util:thread_util", ], ) @@ -1155,6 +1156,24 @@ cc_binary( ) cc_binary( + name = "membarrier_test", + testonly = 1, + srcs = ["membarrier.cc"], + linkstatic = 1, + deps = [ + "@com_google_absl//absl/time", + gtest, + "//test/util:cleanup", + "//test/util:logging", + "//test/util:memory_util", + "//test/util:posix_error", + "//test/util:test_main", + "//test/util:test_util", + "//test/util:thread_util", + ], +) + +cc_binary( name = "mempolicy_test", testonly = 1, srcs = ["mempolicy.cc"], @@ -1667,6 +1686,7 @@ cc_binary( "//test/util:cleanup", "//test/util:file_descriptor", "//test/util:fs_util", + "@com_google_absl//absl/container:node_hash_set", "@com_google_absl//absl/strings", "@com_google_absl//absl/synchronization", "@com_google_absl//absl/time", diff --git a/test/syscalls/linux/ip6tables.cc b/test/syscalls/linux/ip6tables.cc index 97297ee2b..de0a1c114 100644 --- a/test/syscalls/linux/ip6tables.cc +++ b/test/syscalls/linux/ip6tables.cc @@ -82,6 +82,32 @@ TEST(IP6TablesBasic, GetEntriesErrorPrecedence) { SyscallFailsWithErrno(EINVAL)); } +TEST(IP6TablesBasic, GetRevision) { + SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW))); + + int sock; + ASSERT_THAT(sock = socket(AF_INET6, SOCK_RAW, IPPROTO_RAW), + SyscallSucceeds()); + + struct xt_get_revision rev = { + .name = "REDIRECT", + .revision = 0, + }; + socklen_t rev_len = sizeof(rev); + + // Revision 0 exists. + EXPECT_THAT( + getsockopt(sock, SOL_IPV6, IP6T_SO_GET_REVISION_TARGET, &rev, &rev_len), + SyscallSucceeds()); + EXPECT_EQ(rev.revision, 0); + + // Revisions > 0 don't exist. + rev.revision = 1; + EXPECT_THAT( + getsockopt(sock, SOL_IPV6, IP6T_SO_GET_REVISION_TARGET, &rev, &rev_len), + SyscallFailsWithErrno(EPROTONOSUPPORT)); +} + // This tests the initial state of a machine with empty ip6tables via // getsockopt(IP6T_SO_GET_INFO). We don't have a guarantee that the iptables are // empty when running in native, but we can test that gVisor has the same diff --git a/test/syscalls/linux/iptables.cc b/test/syscalls/linux/iptables.cc index 83b6a164a..7ee10bbde 100644 --- a/test/syscalls/linux/iptables.cc +++ b/test/syscalls/linux/iptables.cc @@ -117,6 +117,32 @@ TEST(IPTablesBasic, OriginalDstErrors) { SyscallFailsWithErrno(ENOTCONN)); } +TEST(IPTablesBasic, GetRevision) { + SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW))); + + int sock; + ASSERT_THAT(sock = socket(AF_INET, SOCK_RAW, IPPROTO_ICMP), + SyscallSucceeds()); + + struct xt_get_revision rev = { + .name = "REDIRECT", + .revision = 0, + }; + socklen_t rev_len = sizeof(rev); + + // Revision 0 exists. + EXPECT_THAT( + getsockopt(sock, SOL_IP, IPT_SO_GET_REVISION_TARGET, &rev, &rev_len), + SyscallSucceeds()); + EXPECT_EQ(rev.revision, 0); + + // Revisions > 0 don't exist. + rev.revision = 1; + EXPECT_THAT( + getsockopt(sock, SOL_IP, IPT_SO_GET_REVISION_TARGET, &rev, &rev_len), + SyscallFailsWithErrno(EPROTONOSUPPORT)); +} + // Fixture for iptables tests. class IPTablesTest : public ::testing::Test { protected: diff --git a/test/syscalls/linux/kcov.cc b/test/syscalls/linux/kcov.cc index 6afcb4e75..6816c1fd0 100644 --- a/test/syscalls/linux/kcov.cc +++ b/test/syscalls/linux/kcov.cc @@ -16,39 +16,47 @@ #include <sys/ioctl.h> #include <sys/mman.h> +#include <atomic> + #include "gtest/gtest.h" #include "test/util/capability_util.h" #include "test/util/file_descriptor.h" #include "test/util/test_util.h" +#include "test/util/thread_util.h" namespace gvisor { namespace testing { namespace { -// For this test to work properly, it must be run with coverage enabled. On +// For this set of tests to run, they must be run with coverage enabled. On // native Linux, this involves compiling the kernel with kcov enabled. For -// gVisor, we need to enable the Go coverage tool, e.g. -// bazel test --collect_coverage_data --instrumentation_filter=//pkg/... <test>. +// gVisor, we need to enable the Go coverage tool, e.g. bazel test -- +// collect_coverage_data --instrumentation_filter=//pkg/... <test>. + +constexpr char kcovPath[] = "/sys/kernel/debug/kcov"; +constexpr int kSize = 4096; +constexpr int KCOV_INIT_TRACE = 0x80086301; +constexpr int KCOV_ENABLE = 0x6364; +constexpr int KCOV_DISABLE = 0x6365; + +uint64_t* KcovMmap(int fd) { + return (uint64_t*)mmap(nullptr, kSize * sizeof(uint64_t), + PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); +} + TEST(KcovTest, Kcov) { SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability((CAP_DAC_OVERRIDE)))); - constexpr int kSize = 4096; - constexpr int KCOV_INIT_TRACE = 0x80086301; - constexpr int KCOV_ENABLE = 0x6364; - constexpr int KCOV_DISABLE = 0x6365; - int fd; - ASSERT_THAT(fd = open("/sys/kernel/debug/kcov", O_RDWR), + ASSERT_THAT(fd = open(kcovPath, O_RDWR), AnyOf(SyscallSucceeds(), SyscallFailsWithErrno(ENOENT))); - // Kcov not available. SKIP_IF(errno == ENOENT); + auto fd_closer = Cleanup([fd]() { close(fd); }); ASSERT_THAT(ioctl(fd, KCOV_INIT_TRACE, kSize), SyscallSucceeds()); - uint64_t* area = (uint64_t*)mmap(nullptr, kSize * sizeof(uint64_t), - PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); - + uint64_t* area = KcovMmap(fd); ASSERT_TRUE(area != MAP_FAILED); ASSERT_THAT(ioctl(fd, KCOV_ENABLE, 0), SyscallSucceeds()); @@ -67,6 +75,109 @@ TEST(KcovTest, Kcov) { ASSERT_THAT(ioctl(fd, KCOV_DISABLE, 0), SyscallSucceeds()); } +TEST(KcovTest, PrematureMmap) { + SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability((CAP_DAC_OVERRIDE)))); + + int fd; + ASSERT_THAT(fd = open(kcovPath, O_RDWR), + AnyOf(SyscallSucceeds(), SyscallFailsWithErrno(ENOENT))); + // Kcov not available. + SKIP_IF(errno == ENOENT); + auto fd_closer = Cleanup([fd]() { close(fd); }); + + // Cannot mmap before KCOV_INIT_TRACE. + uint64_t* area = KcovMmap(fd); + ASSERT_TRUE(area == MAP_FAILED); +} + +// Tests that multiple kcov fds can be used simultaneously. +TEST(KcovTest, MultipleFds) { + SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability((CAP_DAC_OVERRIDE)))); + + int fd1; + ASSERT_THAT(fd1 = open(kcovPath, O_RDWR), + AnyOf(SyscallSucceeds(), SyscallFailsWithErrno(ENOENT))); + // Kcov not available. + SKIP_IF(errno == ENOENT); + + int fd2; + ASSERT_THAT(fd2 = open(kcovPath, O_RDWR), SyscallSucceeds()); + auto fd_closer = Cleanup([fd1, fd2]() { + close(fd1); + close(fd2); + }); + + auto t1 = ScopedThread([&] { + ASSERT_THAT(ioctl(fd1, KCOV_INIT_TRACE, kSize), SyscallSucceeds()); + uint64_t* area = KcovMmap(fd1); + ASSERT_TRUE(area != MAP_FAILED); + ASSERT_THAT(ioctl(fd1, KCOV_ENABLE, 0), SyscallSucceeds()); + }); + + ASSERT_THAT(ioctl(fd2, KCOV_INIT_TRACE, kSize), SyscallSucceeds()); + uint64_t* area = KcovMmap(fd2); + ASSERT_TRUE(area != MAP_FAILED); + ASSERT_THAT(ioctl(fd2, KCOV_ENABLE, 0), SyscallSucceeds()); +} + +// Tests behavior for two threads trying to use the same kcov fd. +TEST(KcovTest, MultipleThreads) { + SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability((CAP_DAC_OVERRIDE)))); + + int fd; + ASSERT_THAT(fd = open(kcovPath, O_RDWR), + AnyOf(SyscallSucceeds(), SyscallFailsWithErrno(ENOENT))); + // Kcov not available. + SKIP_IF(errno == ENOENT); + auto fd_closer = Cleanup([fd]() { close(fd); }); + + // Test the behavior of multiple threads trying to use the same kcov fd + // simultaneously. + std::atomic<bool> t1_enabled(false), t1_disabled(false), t2_failed(false), + t2_exited(false); + auto t1 = ScopedThread([&] { + ASSERT_THAT(ioctl(fd, KCOV_INIT_TRACE, kSize), SyscallSucceeds()); + uint64_t* area = KcovMmap(fd); + ASSERT_TRUE(area != MAP_FAILED); + ASSERT_THAT(ioctl(fd, KCOV_ENABLE, 0), SyscallSucceeds()); + t1_enabled = true; + + // After t2 has made sure that enabling kcov again fails, disable it. + while (!t2_failed) { + sched_yield(); + } + ASSERT_THAT(ioctl(fd, KCOV_DISABLE, 0), SyscallSucceeds()); + t1_disabled = true; + + // Wait for t2 to enable kcov and then exit, after which we should be able + // to enable kcov again, without needing to set up a new memory mapping. + while (!t2_exited) { + sched_yield(); + } + ASSERT_THAT(ioctl(fd, KCOV_ENABLE, 0), SyscallSucceeds()); + }); + + auto t2 = ScopedThread([&] { + // Wait for t1 to enable kcov, and make sure that enabling kcov again fails. + while (!t1_enabled) { + sched_yield(); + } + ASSERT_THAT(ioctl(fd, KCOV_ENABLE, 0), SyscallFailsWithErrno(EINVAL)); + t2_failed = true; + + // Wait for t1 to disable kcov, after which using fd should now succeed. + while (!t1_disabled) { + sched_yield(); + } + uint64_t* area = KcovMmap(fd); + ASSERT_TRUE(area != MAP_FAILED); + ASSERT_THAT(ioctl(fd, KCOV_ENABLE, 0), SyscallSucceeds()); + }); + + t2.Join(); + t2_exited = true; +} + } // namespace } // namespace testing diff --git a/test/syscalls/linux/membarrier.cc b/test/syscalls/linux/membarrier.cc new file mode 100644 index 000000000..516956a25 --- /dev/null +++ b/test/syscalls/linux/membarrier.cc @@ -0,0 +1,268 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include <errno.h> +#include <signal.h> +#include <sys/syscall.h> +#include <sys/types.h> +#include <unistd.h> + +#include <atomic> + +#include "absl/time/clock.h" +#include "absl/time/time.h" +#include "test/util/cleanup.h" +#include "test/util/logging.h" +#include "test/util/memory_util.h" +#include "test/util/posix_error.h" +#include "test/util/test_util.h" +#include "test/util/thread_util.h" + +namespace gvisor { +namespace testing { + +namespace { + +// This is the classic test case for memory fences on architectures with total +// store ordering; see e.g. Intel SDM Vol. 3A Sec. 8.2.3.4 "Loads May Be +// Reordered with Earlier Stores to Different Locations". In each iteration of +// the test, given two variables X and Y initially set to 0 +// (MembarrierTestSharedState::local_var and remote_var in the code), two +// threads execute as follows: +// +// T1 T2 +// -- -- +// +// X = 1 Y = 1 +// T1fence() T2fence() +// read Y read X +// +// On architectures where memory writes may be locally buffered by each CPU +// (essentially all architectures), if T1fence() and T2fence() are omitted or +// ineffective, it is possible for both T1 and T2 to read 0 because the memory +// write from the other CPU is not yet visible outside that CPU. T1fence() and +// T2fence() are expected to perform the necessary synchronization to restore +// sequential consistency: both threads agree on a order of memory accesses that +// is consistent with program order in each thread, such that at least one +// thread reads 1. +// +// In the NoMembarrier test, T1fence() and T2fence() are both ordinary memory +// fences establishing ordering between memory accesses before and after the +// fence (std::atomic_thread_fence). In all other test cases, T1fence() is not a +// memory fence at all, but only prevents compiler reordering of memory accesses +// (std::atomic_signal_fence); T2fence() is an invocation of the membarrier() +// syscall, which establishes ordering of memory accesses before and after the +// syscall on both threads. + +template <typename F> +int DoMembarrierTestSide(std::atomic<int>* our_var, + std::atomic<int> const& their_var, + F const& test_fence) { + our_var->store(1, std::memory_order_relaxed); + test_fence(); + return their_var.load(std::memory_order_relaxed); +} + +struct MembarrierTestSharedState { + std::atomic<int64_t> remote_iter_cur; + std::atomic<int64_t> remote_iter_done; + std::atomic<int> local_var; + std::atomic<int> remote_var; + int remote_obs_of_local_var; + + void Init() { + remote_iter_cur.store(-1, std::memory_order_relaxed); + remote_iter_done.store(-1, std::memory_order_relaxed); + } +}; + +// Special value for MembarrierTestSharedState::remote_iter_cur indicating that +// the remote thread should terminate. +constexpr int64_t kRemoteIterStop = -2; + +// Must be async-signal-safe. +template <typename F> +void RunMembarrierTestRemoteSide(MembarrierTestSharedState* state, + F const& test_fence) { + int64_t i = 0; + int64_t cur; + while (true) { + while ((cur = state->remote_iter_cur.load(std::memory_order_acquire)) < i) { + if (cur == kRemoteIterStop) { + return; + } + // spin + } + state->remote_obs_of_local_var = + DoMembarrierTestSide(&state->remote_var, state->local_var, test_fence); + state->remote_iter_done.store(i, std::memory_order_release); + i++; + } +} + +template <typename F> +void RunMembarrierTestLocalSide(MembarrierTestSharedState* state, + F const& test_fence) { + // On test completion, instruct the remote thread to terminate. + Cleanup cleanup_remote([&] { + state->remote_iter_cur.store(kRemoteIterStop, std::memory_order_relaxed); + }); + + int64_t i = 0; + absl::Time end = absl::Now() + absl::Seconds(5); // arbitrary test duration + while (absl::Now() < end) { + // Reset both vars to 0. + state->local_var.store(0, std::memory_order_relaxed); + state->remote_var.store(0, std::memory_order_relaxed); + // Instruct the remote thread to begin this iteration. + state->remote_iter_cur.store(i, std::memory_order_release); + // Perform our side of the test. + auto local_obs_of_remote_var = + DoMembarrierTestSide(&state->local_var, state->remote_var, test_fence); + // Wait for the remote thread to finish this iteration. + while (state->remote_iter_done.load(std::memory_order_acquire) < i) { + // spin + } + ASSERT_TRUE(local_obs_of_remote_var != 0 || + state->remote_obs_of_local_var != 0); + i++; + } +} + +TEST(MembarrierTest, NoMembarrier) { + MembarrierTestSharedState state; + state.Init(); + + ScopedThread remote_thread([&] { + RunMembarrierTestRemoteSide( + &state, [] { std::atomic_thread_fence(std::memory_order_seq_cst); }); + }); + RunMembarrierTestLocalSide( + &state, [] { std::atomic_thread_fence(std::memory_order_seq_cst); }); +} + +enum membarrier_cmd { + MEMBARRIER_CMD_QUERY = 0, + MEMBARRIER_CMD_GLOBAL = (1 << 0), + MEMBARRIER_CMD_GLOBAL_EXPEDITED = (1 << 1), + MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED = (1 << 2), + MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3), + MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4), +}; + +int membarrier(membarrier_cmd cmd, int flags) { + return syscall(SYS_membarrier, cmd, flags); +} + +PosixErrorOr<int> SupportedMembarrierCommands() { + int cmds = membarrier(MEMBARRIER_CMD_QUERY, 0); + if (cmds < 0) { + if (errno == ENOSYS) { + // No commands are supported. + return 0; + } + return PosixError(errno, "membarrier(MEMBARRIER_CMD_QUERY) failed"); + } + return cmds; +} + +TEST(MembarrierTest, Global) { + SKIP_IF((ASSERT_NO_ERRNO_AND_VALUE(SupportedMembarrierCommands()) & + MEMBARRIER_CMD_GLOBAL) == 0); + + Mapping m = ASSERT_NO_ERRNO_AND_VALUE( + MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED)); + auto state = static_cast<MembarrierTestSharedState*>(m.ptr()); + state->Init(); + + pid_t const child_pid = fork(); + if (child_pid == 0) { + // In child process. + RunMembarrierTestRemoteSide( + state, [] { TEST_PCHECK(membarrier(MEMBARRIER_CMD_GLOBAL, 0) == 0); }); + _exit(0); + } + // In parent process. + ASSERT_THAT(child_pid, SyscallSucceeds()); + Cleanup cleanup_child([&] { + int status; + ASSERT_THAT(waitpid(child_pid, &status, 0), + SyscallSucceedsWithValue(child_pid)); + EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0) + << " status " << status; + }); + RunMembarrierTestLocalSide( + state, [] { std::atomic_signal_fence(std::memory_order_seq_cst); }); +} + +TEST(MembarrierTest, GlobalExpedited) { + constexpr int kRequiredCommands = MEMBARRIER_CMD_GLOBAL_EXPEDITED | + MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED; + SKIP_IF((ASSERT_NO_ERRNO_AND_VALUE(SupportedMembarrierCommands()) & + kRequiredCommands) != kRequiredCommands); + + ASSERT_THAT(membarrier(MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED, 0), + SyscallSucceeds()); + + Mapping m = ASSERT_NO_ERRNO_AND_VALUE( + MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED)); + auto state = static_cast<MembarrierTestSharedState*>(m.ptr()); + state->Init(); + + pid_t const child_pid = fork(); + if (child_pid == 0) { + // In child process. + RunMembarrierTestRemoteSide(state, [] { + TEST_PCHECK(membarrier(MEMBARRIER_CMD_GLOBAL_EXPEDITED, 0) == 0); + }); + _exit(0); + } + // In parent process. + ASSERT_THAT(child_pid, SyscallSucceeds()); + Cleanup cleanup_child([&] { + int status; + ASSERT_THAT(waitpid(child_pid, &status, 0), + SyscallSucceedsWithValue(child_pid)); + EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0) + << " status " << status; + }); + RunMembarrierTestLocalSide( + state, [] { std::atomic_signal_fence(std::memory_order_seq_cst); }); +} + +TEST(MembarrierTest, PrivateExpedited) { + constexpr int kRequiredCommands = MEMBARRIER_CMD_PRIVATE_EXPEDITED | + MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED; + SKIP_IF((ASSERT_NO_ERRNO_AND_VALUE(SupportedMembarrierCommands()) & + kRequiredCommands) != kRequiredCommands); + + ASSERT_THAT(membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED, 0), + SyscallSucceeds()); + + MembarrierTestSharedState state; + state.Init(); + + ScopedThread remote_thread([&] { + RunMembarrierTestRemoteSide(&state, [] { + TEST_PCHECK(membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0) == 0); + }); + }); + RunMembarrierTestLocalSide( + &state, [] { std::atomic_signal_fence(std::memory_order_seq_cst); }); +} + +} // namespace + +} // namespace testing +} // namespace gvisor diff --git a/test/syscalls/linux/proc.cc b/test/syscalls/linux/proc.cc index c1488b06b..e8fcc4439 100644 --- a/test/syscalls/linux/proc.cc +++ b/test/syscalls/linux/proc.cc @@ -47,6 +47,7 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" +#include "absl/container/node_hash_set.h" #include "absl/strings/ascii.h" #include "absl/strings/match.h" #include "absl/strings/numbers.h" @@ -721,8 +722,8 @@ static void CheckFdDirGetdentsDuplicates(const std::string& path) { EXPECT_GE(newfd, 1024); auto fd_closer = Cleanup([newfd]() { close(newfd); }); auto fd_files = ASSERT_NO_ERRNO_AND_VALUE(ListDir(path.c_str(), false)); - std::unordered_set<std::string> fd_files_dedup(fd_files.begin(), - fd_files.end()); + absl::node_hash_set<std::string> fd_files_dedup(fd_files.begin(), + fd_files.end()); EXPECT_EQ(fd_files.size(), fd_files_dedup.size()); } @@ -779,8 +780,12 @@ TEST(ProcSelfFdInfo, Flags) { } TEST(ProcSelfExe, Absolute) { - auto exe = ASSERT_NO_ERRNO_AND_VALUE( - ReadLink(absl::StrCat("/proc/", getpid(), "/exe"))); + auto exe = ASSERT_NO_ERRNO_AND_VALUE(ReadLink("/proc/self/exe")); + EXPECT_EQ(exe[0], '/'); +} + +TEST(ProcSelfCwd, Absolute) { + auto exe = ASSERT_NO_ERRNO_AND_VALUE(ReadLink("/proc/self/cwd")); EXPECT_EQ(exe[0], '/'); } @@ -1472,6 +1477,16 @@ TEST(ProcPidExe, Subprocess) { EXPECT_EQ(actual, expected_absolute_path); } +// /proc/PID/cwd points to the correct directory. +TEST(ProcPidCwd, Subprocess) { + auto want = ASSERT_NO_ERRNO_AND_VALUE(GetCWD()); + + char got[PATH_MAX + 1] = {}; + ASSERT_THAT(ReadlinkWhileRunning("cwd", got, sizeof(got)), + SyscallSucceedsWithValue(Gt(0))); + EXPECT_EQ(got, want); +} + // Test whether /proc/PID/ files can be read for a running process. TEST(ProcPidFile, SubprocessRunning) { char buf[1]; |