summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorIan Gudger <igudger@google.com>2019-06-13 17:24:58 -0700
committerShentubot <shentubot@google.com>2019-06-13 17:26:22 -0700
commit0a5ee6f7b20ef4f5533766d753b85005f79ae613 (patch)
tree3ffc2f9ad60ab4ba97e9a924d925f3ff00fd8f5a
parent05ff1ffaadaa0ac370365eb14febc761506735ce (diff)
Fix deadlock in fasync.
The deadlock can occur when both ends of a connected Unix socket which has FIOASYNC enabled on at least one end are closed at the same time. One end notifies that it is closing, calling (*waiter.Queue).Notify which takes waiter.Queue.mu (as a read lock) and then calls (*FileAsync).Callback, which takes FileAsync.mu. The other end tries to unregister for notifications by calling (*FileAsync).Unregister, which takes FileAsync.mu and calls (*waiter.Queue).EventUnregister which takes waiter.Queue.mu. This is fixed by moving the calls to waiter.Waitable.EventRegister and waiter.Waitable.EventUnregister outside of the protection of any mutex used in (*FileAsync).Callback. The new test is related, but does not cover this particular situation. Also fix a data race on FileAsync.e.Callback. (*FileAsync).Callback checked FileAsync.e.Callback under the protection of FileAsync.mu, but the waiter calling (*FileAsync).Callback could not and did not. This is fixed by making FileAsync.e.Callback immutable before passing it to the waiter for the first time. Fixes #346 PiperOrigin-RevId: 253138340
-rw-r--r--pkg/sentry/kernel/fasync/fasync.go45
-rw-r--r--test/syscalls/linux/ioctl.cc31
2 files changed, 66 insertions, 10 deletions
diff --git a/pkg/sentry/kernel/fasync/fasync.go b/pkg/sentry/kernel/fasync/fasync.go
index bcaca58f7..6b0bb0324 100644
--- a/pkg/sentry/kernel/fasync/fasync.go
+++ b/pkg/sentry/kernel/fasync/fasync.go
@@ -34,9 +34,23 @@ func New() fs.FileAsync {
//
// +stateify savable
type FileAsync struct {
- mu sync.Mutex `state:"nosave"`
- e waiter.Entry
- requester *auth.Credentials
+ // e is immutable after first use (which is protected by mu below).
+ e waiter.Entry
+
+ // regMu protects registeration and unregistration actions on e.
+ //
+ // regMu must be held while registration decisions are being made
+ // through the registration action itself.
+ //
+ // Lock ordering: regMu, mu.
+ regMu sync.Mutex `state:"nosave"`
+
+ // mu protects all following fields.
+ //
+ // Lock ordering: e.mu, mu.
+ mu sync.Mutex `state:"nosave"`
+ requester *auth.Credentials
+ registered bool
// Only one of the following is allowed to be non-nil.
recipientPG *kernel.ProcessGroup
@@ -47,7 +61,7 @@ type FileAsync struct {
// Callback sends a signal.
func (a *FileAsync) Callback(e *waiter.Entry) {
a.mu.Lock()
- if a.e.Callback == nil {
+ if !a.registered {
a.mu.Unlock()
return
}
@@ -80,14 +94,21 @@ func (a *FileAsync) Callback(e *waiter.Entry) {
//
// The file must not be currently registered.
func (a *FileAsync) Register(w waiter.Waitable) {
+ a.regMu.Lock()
+ defer a.regMu.Unlock()
a.mu.Lock()
- defer a.mu.Unlock()
- if a.e.Callback != nil {
+ if a.registered {
+ a.mu.Unlock()
panic("registering already registered file")
}
- a.e.Callback = a
+ if a.e.Callback == nil {
+ a.e.Callback = a
+ }
+ a.registered = true
+
+ a.mu.Unlock()
w.EventRegister(&a.e, waiter.EventIn|waiter.EventOut|waiter.EventErr|waiter.EventHUp)
}
@@ -95,15 +116,19 @@ func (a *FileAsync) Register(w waiter.Waitable) {
//
// The file must be currently registered.
func (a *FileAsync) Unregister(w waiter.Waitable) {
+ a.regMu.Lock()
+ defer a.regMu.Unlock()
a.mu.Lock()
- defer a.mu.Unlock()
- if a.e.Callback == nil {
+ if !a.registered {
+ a.mu.Unlock()
panic("unregistering unregistered file")
}
+ a.registered = false
+
+ a.mu.Unlock()
w.EventUnregister(&a.e)
- a.e.Callback = nil
}
// Owner returns who is currently getting signals. All return values will be
diff --git a/test/syscalls/linux/ioctl.cc b/test/syscalls/linux/ioctl.cc
index c525d41d2..4948a76f0 100644
--- a/test/syscalls/linux/ioctl.cc
+++ b/test/syscalls/linux/ioctl.cc
@@ -229,6 +229,37 @@ TEST_F(IoctlTest, FIOASYNCSelfTarget2) {
EXPECT_EQ(io_received, 1);
}
+// Check that closing an FD does not result in an event.
+TEST_F(IoctlTest, FIOASYNCSelfTargetClose) {
+ // Count SIGIOs received.
+ struct sigaction sa;
+ io_received = 0;
+ sa.sa_sigaction = inc_io_handler;
+ sigfillset(&sa.sa_mask);
+ sa.sa_flags = SA_RESTART;
+ auto sa_cleanup = ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGIO, sa));
+
+ // Actually allow SIGIO delivery.
+ auto mask_cleanup =
+ ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_UNBLOCK, SIGIO));
+
+ for (int i = 0; i < 2; i++) {
+ auto pair = ASSERT_NO_ERRNO_AND_VALUE(
+ UnixDomainSocketPair(SOCK_SEQPACKET).Create());
+
+ pid_t pid = getpid();
+ EXPECT_THAT(ioctl(pair->second_fd(), FIOSETOWN, &pid), SyscallSucceeds());
+
+ int set = 1;
+ EXPECT_THAT(ioctl(pair->second_fd(), FIOASYNC, &set), SyscallSucceeds());
+ }
+
+ // FIXME(b/120624367): gVisor erroneously sends SIGIO on close.
+ SKIP_IF(IsRunningOnGvisor());
+
+ EXPECT_EQ(io_received, 0);
+}
+
TEST_F(IoctlTest, FIOASYNCInvalidPID) {
auto pair =
ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());