summaryrefslogtreecommitdiffhomepage
path: root/test/syscalls
diff options
context:
space:
mode:
Diffstat (limited to 'test/syscalls')
-rw-r--r--test/syscalls/linux/BUILD4
-rw-r--r--test/syscalls/linux/read.cc40
-rw-r--r--test/syscalls/linux/setgid.cc95
-rw-r--r--test/syscalls/linux/socket_inet_loopback.cc111
-rw-r--r--test/syscalls/linux/write.cc78
5 files changed, 298 insertions, 30 deletions
diff --git a/test/syscalls/linux/BUILD b/test/syscalls/linux/BUILD
index 4509b5e55..043ada583 100644
--- a/test/syscalls/linux/BUILD
+++ b/test/syscalls/linux/BUILD
@@ -1922,7 +1922,9 @@ cc_binary(
linkstatic = 1,
deps = [
"//test/util:file_descriptor",
+ "@com_google_absl//absl/base:core_headers",
gtest,
+ "//test/util:cleanup",
"//test/util:temp_path",
"//test/util:test_main",
"//test/util:test_util",
@@ -2162,6 +2164,7 @@ cc_binary(
"//test/util:temp_path",
"//test/util:test_main",
"//test/util:test_util",
+ "@com_google_absl//absl/flags:flag",
"@com_google_absl//absl/strings",
gtest,
],
@@ -3990,6 +3993,7 @@ cc_binary(
linkstatic = 1,
deps = [
"//test/util:cleanup",
+ "@com_google_absl//absl/base:core_headers",
gtest,
"//test/util:temp_path",
"//test/util:test_main",
diff --git a/test/syscalls/linux/read.cc b/test/syscalls/linux/read.cc
index 98d5e432d..087262535 100644
--- a/test/syscalls/linux/read.cc
+++ b/test/syscalls/linux/read.cc
@@ -13,11 +13,14 @@
// limitations under the License.
#include <fcntl.h>
+#include <sys/mman.h>
#include <unistd.h>
#include <vector>
#include "gtest/gtest.h"
+#include "absl/base/macros.h"
+#include "test/util/cleanup.h"
#include "test/util/file_descriptor.h"
#include "test/util/temp_path.h"
#include "test/util/test_util.h"
@@ -121,6 +124,43 @@ TEST_F(ReadTest, ReadWithOpath) {
EXPECT_THAT(ReadFd(fd.get(), buf.data(), 1), SyscallFailsWithErrno(EBADF));
}
+// Test that partial writes that hit SIGSEGV are correctly handled and return
+// partial write.
+TEST_F(ReadTest, PartialReadSIGSEGV) {
+ // Allocate 2 pages and remove permission from the second.
+ const size_t size = 2 * kPageSize;
+ void* addr =
+ mmap(0, size, PROT_WRITE | PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
+ ASSERT_NE(addr, MAP_FAILED);
+ auto cleanup = Cleanup(
+ [addr, size] { EXPECT_THAT(munmap(addr, size), SyscallSucceeds()); });
+
+ FileDescriptor fd =
+ ASSERT_NO_ERRNO_AND_VALUE(Open(name_.c_str(), O_RDWR, 0666));
+ for (size_t i = 0; i < 2; i++) {
+ EXPECT_THAT(pwrite(fd.get(), addr, size, 0),
+ SyscallSucceedsWithValue(size));
+ }
+
+ void* badAddr = reinterpret_cast<char*>(addr) + kPageSize;
+ ASSERT_THAT(mprotect(badAddr, kPageSize, PROT_NONE), SyscallSucceeds());
+
+ // Attempt to read to both pages. Create a non-contiguous iovec pair to
+ // ensure operation is done in 2 steps.
+ struct iovec iov[] = {
+ {
+ .iov_base = addr,
+ .iov_len = kPageSize,
+ },
+ {
+ .iov_base = addr,
+ .iov_len = size,
+ },
+ };
+ EXPECT_THAT(preadv(fd.get(), iov, ABSL_ARRAYSIZE(iov), 0),
+ SyscallSucceedsWithValue(size));
+}
+
} // namespace
} // namespace testing
diff --git a/test/syscalls/linux/setgid.cc b/test/syscalls/linux/setgid.cc
index cd030b094..98f8f3dfe 100644
--- a/test/syscalls/linux/setgid.cc
+++ b/test/syscalls/linux/setgid.cc
@@ -17,6 +17,7 @@
#include <unistd.h>
#include "gtest/gtest.h"
+#include "absl/flags/flag.h"
#include "test/util/capability_util.h"
#include "test/util/cleanup.h"
#include "test/util/fs_util.h"
@@ -24,6 +25,11 @@
#include "test/util/temp_path.h"
#include "test/util/test_util.h"
+ABSL_FLAG(std::vector<std::string>, groups, std::vector<std::string>({}),
+ "groups the test can use");
+
+constexpr gid_t kNobody = 65534;
+
namespace gvisor {
namespace testing {
@@ -46,6 +52,18 @@ PosixErrorOr<Cleanup> Setegid(gid_t egid) {
// Returns a pair of groups that the user is a member of.
PosixErrorOr<std::pair<gid_t, gid_t>> Groups() {
+ // Were we explicitly passed GIDs?
+ std::vector<std::string> flagged_groups = absl::GetFlag(FLAGS_groups);
+ if (flagged_groups.size() >= 2) {
+ int group1;
+ int group2;
+ if (!absl::SimpleAtoi(flagged_groups[0], &group1) ||
+ !absl::SimpleAtoi(flagged_groups[1], &group2)) {
+ return PosixError(EINVAL, "failed converting group flags to ints");
+ }
+ return std::pair<gid_t, gid_t>(group1, group2);
+ }
+
// See whether the user is a member of at least 2 groups.
std::vector<gid_t> groups(64);
for (; groups.size() <= NGROUPS_MAX; groups.resize(groups.size() * 2)) {
@@ -58,26 +76,47 @@ PosixErrorOr<std::pair<gid_t, gid_t>> Groups() {
return PosixError(errno, absl::StrFormat("getgroups(%d, %p)",
groups.size(), groups.data()));
}
- if (ngroups >= 2) {
- return std::pair<gid_t, gid_t>(groups[0], groups[1]);
+
+ if (ngroups < 2) {
+ // There aren't enough groups.
+ break;
+ }
+
+ // TODO(b/181878080): Read /proc/sys/fs/overflowgid once it is supported in
+ // gVisor.
+ if (groups[0] == kNobody || groups[1] == kNobody) {
+ // These groups aren't mapped into our user namespace, so we can't use
+ // them.
+ break;
}
- // There aren't enough groups.
- break;
+ return std::pair<gid_t, gid_t>(groups[0], groups[1]);
}
- // If we're root in the root user namespace, we can set our GID to whatever we
- // want. Try that before giving up.
- constexpr gid_t kGID1 = 1111;
- constexpr gid_t kGID2 = 2222;
- auto cleanup1 = Setegid(kGID1);
+ // If we're running in gVisor and are root in the root user namespace, we can
+ // set our GID to whatever we want. Try that before giving up.
+ //
+ // This won't work in native tests, as despite having CAP_SETGID, the gofer
+ // process will be sandboxed and unable to change file GIDs.
+ if (!IsRunningOnGvisor()) {
+ return PosixError(EPERM, "no valid groups for native testing");
+ }
+ PosixErrorOr<bool> capable = HaveCapability(CAP_SETGID);
+ if (!capable.ok()) {
+ return capable.error();
+ }
+ if (!capable.ValueOrDie()) {
+ return PosixError(EPERM, "missing CAP_SETGID");
+ }
+ gid_t gid = getegid();
+ auto cleanup1 = Setegid(gid);
if (!cleanup1.ok()) {
return cleanup1.error();
}
- auto cleanup2 = Setegid(kGID2);
+ auto cleanup2 = Setegid(kNobody);
if (!cleanup2.ok()) {
return cleanup2.error();
}
- return std::pair<gid_t, gid_t>(kGID1, kGID2);
+ return std::pair<gid_t, gid_t>(gid, kNobody);
}
class SetgidDirTest : public ::testing::Test {
@@ -85,17 +124,21 @@ class SetgidDirTest : public ::testing::Test {
void SetUp() override {
original_gid_ = getegid();
- // TODO(b/175325250): Enable when setgid directories are supported.
SKIP_IF(IsRunningWithVFS1());
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SETGID)));
+ // If we can't find two usable groups, we're in an unsupporting environment.
+ // Skip the test.
+ PosixErrorOr<std::pair<gid_t, gid_t>> groups = Groups();
+ SKIP_IF(!groups.ok());
+ groups_ = groups.ValueOrDie();
+
+ auto cleanup = Setegid(groups_.first);
temp_dir_ = ASSERT_NO_ERRNO_AND_VALUE(
TempPath::CreateDirWith(GetAbsoluteTestTmpdir(), 0777 /* mode */));
- groups_ = ASSERT_NO_ERRNO_AND_VALUE(Groups());
}
void TearDown() override {
- ASSERT_THAT(setegid(original_gid_), SyscallSucceeds());
+ EXPECT_THAT(setegid(original_gid_), SyscallSucceeds());
}
void MkdirAsGid(gid_t gid, const std::string& path, mode_t mode) {
@@ -131,7 +174,7 @@ TEST_F(SetgidDirTest, Control) {
ASSERT_NO_FATAL_FAILURE(MkdirAsGid(groups_.first, g1owned, 0777));
// Set group to G2, create a file in g1owned, and confirm that G2 owns it.
- ASSERT_THAT(setegid(groups_.second), SyscallSucceeds());
+ auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(Setegid(groups_.second));
FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(
Open(JoinPath(g1owned, "g2owned").c_str(), O_CREAT | O_RDWR, 0777));
struct stat stats = ASSERT_NO_ERRNO_AND_VALUE(Stat(fd));
@@ -146,7 +189,7 @@ TEST_F(SetgidDirTest, CreateFile) {
ASSERT_THAT(chmod(g1owned.c_str(), kDirmodeSgid), SyscallSucceeds());
// Set group to G2, create a file, and confirm that G1 owns it.
- ASSERT_THAT(setegid(groups_.second), SyscallSucceeds());
+ auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(Setegid(groups_.second));
FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(
Open(JoinPath(g1owned, "g2created").c_str(), O_CREAT | O_RDWR, 0666));
struct stat stats = ASSERT_NO_ERRNO_AND_VALUE(Stat(fd));
@@ -194,7 +237,7 @@ TEST_F(SetgidDirTest, OldFile) {
ASSERT_THAT(chmod(g1owned.c_str(), kDirmodeNoSgid), SyscallSucceeds());
// Set group to G2, create a file, confirm that G2 owns it.
- ASSERT_THAT(setegid(groups_.second), SyscallSucceeds());
+ auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(Setegid(groups_.second));
FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(
Open(JoinPath(g1owned, "g2created").c_str(), O_CREAT | O_RDWR, 0666));
struct stat stats = ASSERT_NO_ERRNO_AND_VALUE(Stat(fd));
@@ -217,7 +260,7 @@ TEST_F(SetgidDirTest, OldDir) {
ASSERT_THAT(chmod(g1owned.c_str(), kDirmodeNoSgid), SyscallSucceeds());
// Set group to G2, create a directory, confirm that G2 owns it.
- ASSERT_THAT(setegid(groups_.second), SyscallSucceeds());
+ auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(Setegid(groups_.second));
auto g2created = JoinPath(g1owned, "g2created");
ASSERT_NO_FATAL_FAILURE(MkdirAsGid(groups_.second, g2created, 0666));
struct stat stats = ASSERT_NO_ERRNO_AND_VALUE(Stat(g2created));
@@ -306,6 +349,10 @@ class FileModeTest : public ::testing::TestWithParam<FileModeTestcase> {};
TEST_P(FileModeTest, WriteToFile) {
SKIP_IF(IsRunningWithVFS1());
+ PosixErrorOr<std::pair<gid_t, gid_t>> groups = Groups();
+ SKIP_IF(!groups.ok());
+
+ auto cleanup = Setegid(groups.ValueOrDie().first);
auto temp_dir = ASSERT_NO_ERRNO_AND_VALUE(
TempPath::CreateDirWith(GetAbsoluteTestTmpdir(), 0777 /* mode */));
auto path = JoinPath(temp_dir.path(), GetParam().name);
@@ -329,26 +376,28 @@ TEST_P(FileModeTest, WriteToFile) {
TEST_P(FileModeTest, TruncateFile) {
SKIP_IF(IsRunningWithVFS1());
+ PosixErrorOr<std::pair<gid_t, gid_t>> groups = Groups();
+ SKIP_IF(!groups.ok());
+
+ auto cleanup = Setegid(groups.ValueOrDie().first);
auto temp_dir = ASSERT_NO_ERRNO_AND_VALUE(
TempPath::CreateDirWith(GetAbsoluteTestTmpdir(), 0777 /* mode */));
auto path = JoinPath(temp_dir.path(), GetParam().name);
FileDescriptor fd =
ASSERT_NO_ERRNO_AND_VALUE(Open(path.c_str(), O_CREAT | O_RDWR, 0666));
- ASSERT_THAT(fchmod(fd.get(), GetParam().mode), SyscallSucceeds());
- struct stat stats;
- ASSERT_THAT(fstat(fd.get(), &stats), SyscallSucceeds());
- EXPECT_EQ(stats.st_mode & kDirmodeMask, GetParam().mode);
// Write something to the file, as truncating an empty file is a no-op.
constexpr char c = 'M';
ASSERT_THAT(write(fd.get(), &c, sizeof(c)),
SyscallSucceedsWithValue(sizeof(c)));
+ ASSERT_THAT(fchmod(fd.get(), GetParam().mode), SyscallSucceeds());
// For security reasons, truncating the file clears the SUID bit, and clears
// the SGID bit when the group executable bit is unset (which is not a true
// SGID binary).
ASSERT_THAT(ftruncate(fd.get(), 0), SyscallSucceeds());
+ struct stat stats;
ASSERT_THAT(fstat(fd.get(), &stats), SyscallSucceeds());
EXPECT_EQ(stats.st_mode & kDirmodeMask, GetParam().result_mode);
}
diff --git a/test/syscalls/linux/socket_inet_loopback.cc b/test/syscalls/linux/socket_inet_loopback.cc
index 54b45b075..597b5bcb1 100644
--- a/test/syscalls/linux/socket_inet_loopback.cc
+++ b/test/syscalls/linux/socket_inet_loopback.cc
@@ -490,7 +490,11 @@ void TestListenWhileConnect(const TestParam& param,
TestAddress const& connector = param.connector;
constexpr int kBacklog = 2;
- constexpr int kClients = kBacklog + 1;
+ // Linux completes one more connection than the listen backlog argument.
+ // To ensure that there is at least one client connection that stays in
+ // connecting state, keep 2 more client connections than the listen backlog.
+ // gVisor differs in this behavior though, gvisor.dev/issue/3153.
+ constexpr int kClients = kBacklog + 2;
// Create the listening socket.
FileDescriptor listen_fd = ASSERT_NO_ERRNO_AND_VALUE(
@@ -527,7 +531,7 @@ void TestListenWhileConnect(const TestParam& param,
for (auto& client : clients) {
constexpr int kTimeout = 10000;
- struct pollfd pfd = {
+ pollfd pfd = {
.fd = client.get(),
.events = POLLIN,
};
@@ -543,6 +547,10 @@ void TestListenWhileConnect(const TestParam& param,
ASSERT_THAT(read(client.get(), &c, sizeof(c)),
AnyOf(SyscallFailsWithErrno(ECONNRESET),
SyscallFailsWithErrno(ECONNREFUSED)));
+ // The last client connection would be in connecting (SYN_SENT) state.
+ if (client.get() == clients[kClients - 1].get()) {
+ ASSERT_EQ(errno, ECONNREFUSED) << strerror(errno);
+ }
}
}
@@ -598,7 +606,7 @@ TEST_P(SocketInetLoopbackTest, TCPbacklog_NoRandomSave) {
connector.addr_len);
if (ret != 0) {
EXPECT_THAT(ret, SyscallFailsWithErrno(EINPROGRESS));
- struct pollfd pfd = {
+ pollfd pfd = {
.fd = conn_fd.get(),
.events = POLLOUT,
};
@@ -623,6 +631,95 @@ TEST_P(SocketInetLoopbackTest, TCPbacklog_NoRandomSave) {
}
}
+// Test if the stack completes atmost listen backlog number of client
+// connections. It exercises the path of the stack that enqueues completed
+// connections to accept queue vs new incoming SYNs.
+TEST_P(SocketInetLoopbackTest, TCPConnectBacklog_NoRandomSave) {
+ const auto& param = GetParam();
+ const TestAddress& listener = param.listener;
+ const TestAddress& connector = param.connector;
+
+ constexpr int kBacklog = 1;
+ // Keep the number of client connections more than the listen backlog.
+ // Linux completes one more connection than the listen backlog argument.
+ // gVisor differs in this behavior though, gvisor.dev/issue/3153.
+ int kClients = kBacklog + 2;
+ if (IsRunningOnGvisor()) {
+ kClients--;
+ }
+
+ // Run the following test for few iterations to test race between accept queue
+ // getting filled with incoming SYNs.
+ for (int num = 0; num < 10; num++) {
+ FileDescriptor listen_fd = ASSERT_NO_ERRNO_AND_VALUE(
+ Socket(listener.family(), SOCK_STREAM, IPPROTO_TCP));
+ sockaddr_storage listen_addr = listener.addr;
+ ASSERT_THAT(bind(listen_fd.get(), reinterpret_cast<sockaddr*>(&listen_addr),
+ listener.addr_len),
+ SyscallSucceeds());
+ ASSERT_THAT(listen(listen_fd.get(), kBacklog), SyscallSucceeds());
+
+ socklen_t addrlen = listener.addr_len;
+ ASSERT_THAT(
+ getsockname(listen_fd.get(), reinterpret_cast<sockaddr*>(&listen_addr),
+ &addrlen),
+ SyscallSucceeds());
+ uint16_t const port =
+ ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr));
+ sockaddr_storage conn_addr = connector.addr;
+ ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));
+
+ std::vector<FileDescriptor> clients;
+ // Issue multiple non-blocking client connects.
+ for (int i = 0; i < kClients; i++) {
+ FileDescriptor client = ASSERT_NO_ERRNO_AND_VALUE(
+ Socket(connector.family(), SOCK_STREAM | SOCK_NONBLOCK, IPPROTO_TCP));
+ int ret = connect(client.get(), reinterpret_cast<sockaddr*>(&conn_addr),
+ connector.addr_len);
+ if (ret != 0) {
+ EXPECT_THAT(ret, SyscallFailsWithErrno(EINPROGRESS));
+ }
+ clients.push_back(std::move(client));
+ }
+
+ // Now that client connects are issued, wait for the accept queue to get
+ // filled and ensure no new client connection is completed.
+ for (int i = 0; i < kClients; i++) {
+ pollfd pfd = {
+ .fd = clients[i].get(),
+ .events = POLLOUT,
+ };
+ if (i < kClients - 1) {
+ // Poll for client side connection completions with a large timeout.
+ // We cannot poll on the listener side without calling accept as poll
+ // stays level triggered with non-zero accept queue length.
+ //
+ // Client side poll would not guarantee that the completed connection
+ // has been enqueued in to the acccept queue, but the fact that the
+ // listener ACKd the SYN, means that it cannot complete any new incoming
+ // SYNs when it has already ACKd for > backlog number of SYNs.
+ ASSERT_THAT(poll(&pfd, 1, 10000), SyscallSucceedsWithValue(1))
+ << "num=" << num << " i=" << i << " kClients=" << kClients;
+ ASSERT_EQ(pfd.revents, POLLOUT) << "num=" << num << " i=" << i;
+ } else {
+ // Now that we expect accept queue filled up, ensure that the last
+ // client connection never completes with a smaller poll timeout.
+ ASSERT_THAT(poll(&pfd, 1, 1000), SyscallSucceedsWithValue(0))
+ << "num=" << num << " i=" << i;
+ }
+
+ ASSERT_THAT(close(clients[i].release()), SyscallSucceedsWithValue(0))
+ << "num=" << num << " i=" << i;
+ }
+ clients.clear();
+ // We close the listening side and open a new listener. We could instead
+ // drain the accept queue by calling accept() and reuse the listener, but
+ // that is racy as the retransmitted SYNs could get ACKd as we make room in
+ // the accept queue.
+ ASSERT_THAT(close(listen_fd.release()), SyscallSucceedsWithValue(0));
+ }
+}
+
// TCPFinWait2Test creates a pair of connected sockets then closes one end to
// trigger FIN_WAIT2 state for the closed endpoint. Then it binds the same local
// IP/port on a new socket and tries to connect. The connect should fail w/
@@ -937,7 +1034,7 @@ void setupTimeWaitClose(const TestAddress* listener,
ASSERT_THAT(shutdown(active_closefd.get(), SHUT_WR), SyscallSucceeds());
{
constexpr int kTimeout = 10000;
- struct pollfd pfd = {
+ pollfd pfd = {
.fd = passive_closefd.get(),
.events = POLLIN,
};
@@ -948,7 +1045,7 @@ void setupTimeWaitClose(const TestAddress* listener,
{
constexpr int kTimeout = 10000;
constexpr int16_t want_events = POLLHUP;
- struct pollfd pfd = {
+ pollfd pfd = {
.fd = active_closefd.get(),
.events = want_events,
};
@@ -1181,7 +1278,7 @@ TEST_P(SocketInetLoopbackTest, TCPAcceptAfterReset) {
// Wait for accept_fd to process the RST.
constexpr int kTimeout = 10000;
- struct pollfd pfd = {
+ pollfd pfd = {
.fd = accept_fd.get(),
.events = POLLIN,
};
@@ -1705,7 +1802,7 @@ TEST_P(SocketInetReusePortTest, UdpPortReuseMultiThreadShort_NoRandomSave) {
SyscallSucceedsWithValue(sizeof(i)));
}
- struct pollfd pollfds[kThreadCount];
+ pollfd pollfds[kThreadCount];
for (int i = 0; i < kThreadCount; i++) {
pollfds[i].fd = listener_fds[i].get();
pollfds[i].events = POLLIN;
diff --git a/test/syscalls/linux/write.cc b/test/syscalls/linux/write.cc
index 740992d0a..3373ba72b 100644
--- a/test/syscalls/linux/write.cc
+++ b/test/syscalls/linux/write.cc
@@ -15,6 +15,7 @@
#include <errno.h>
#include <fcntl.h>
#include <signal.h>
+#include <sys/mman.h>
#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/types.h>
@@ -23,6 +24,7 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
+#include "absl/base/macros.h"
#include "test/util/cleanup.h"
#include "test/util/temp_path.h"
#include "test/util/test_util.h"
@@ -256,6 +258,82 @@ TEST_F(WriteTest, PwriteWithOpath) {
SyscallFailsWithErrno(EBADF));
}
+// Test that partial writes that hit SIGSEGV are correctly handled and return
+// partial write.
+TEST_F(WriteTest, PartialWriteSIGSEGV) {
+ // Allocate 2 pages and remove permission from the second.
+ const size_t size = 2 * kPageSize;
+ void* addr = mmap(0, size, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
+ ASSERT_NE(addr, MAP_FAILED);
+ auto cleanup = Cleanup(
+ [addr, size] { EXPECT_THAT(munmap(addr, size), SyscallSucceeds()); });
+
+ void* badAddr = reinterpret_cast<char*>(addr) + kPageSize;
+ ASSERT_THAT(mprotect(badAddr, kPageSize, PROT_NONE), SyscallSucceeds());
+
+ TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
+ FileDescriptor fd =
+ ASSERT_NO_ERRNO_AND_VALUE(Open(file.path().c_str(), O_WRONLY));
+
+ // Attempt to write both pages to the file. Create a non-contiguous iovec pair
+ // to ensure operation is done in 2 steps.
+ struct iovec iov[] = {
+ {
+ .iov_base = addr,
+ .iov_len = kPageSize,
+ },
+ {
+ .iov_base = addr,
+ .iov_len = size,
+ },
+ };
+ // Write should succeed for the first iovec and half of the second (=2 pages).
+ EXPECT_THAT(pwritev(fd.get(), iov, ABSL_ARRAYSIZE(iov), 0),
+ SyscallSucceedsWithValue(2 * kPageSize));
+}
+
+// Test that partial writes that hit SIGBUS are correctly handled and return
+// partial write.
+TEST_F(WriteTest, PartialWriteSIGBUS) {
+ SKIP_IF(getenv("GVISOR_GOFER_UNCACHED")); // Can't mmap from uncached files.
+
+ TempPath mapfile = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
+ FileDescriptor fd_map =
+ ASSERT_NO_ERRNO_AND_VALUE(Open(mapfile.path().c_str(), O_RDWR));
+
+ // Let the first page be read to force a partial write.
+ ASSERT_THAT(ftruncate(fd_map.get(), kPageSize), SyscallSucceeds());
+
+ // Map 2 pages, one of which is not allocated in the backing file. Reading
+ // from it will trigger a SIGBUS.
+ const size_t size = 2 * kPageSize;
+ void* addr =
+ mmap(NULL, size, PROT_READ, MAP_FILE | MAP_PRIVATE, fd_map.get(), 0);
+ ASSERT_NE(addr, MAP_FAILED);
+ auto cleanup = Cleanup(
+ [addr, size] { EXPECT_THAT(munmap(addr, size), SyscallSucceeds()); });
+
+ TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
+ FileDescriptor fd =
+ ASSERT_NO_ERRNO_AND_VALUE(Open(file.path().c_str(), O_WRONLY));
+
+ // Attempt to write both pages to the file. Create a non-contiguous iovec pair
+ // to ensure operation is done in 2 steps.
+ struct iovec iov[] = {
+ {
+ .iov_base = addr,
+ .iov_len = kPageSize,
+ },
+ {
+ .iov_base = addr,
+ .iov_len = size,
+ },
+ };
+ // Write should succeed for the first iovec and half of the second (=2 pages).
+ ASSERT_THAT(pwritev(fd.get(), iov, ABSL_ARRAYSIZE(iov), 0),
+ SyscallSucceedsWithValue(2 * kPageSize));
+}
+
} // namespace
} // namespace testing