summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/kernel/eventfd
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/sentry/kernel/eventfd')
-rw-r--r--pkg/sentry/kernel/eventfd/eventfd.go8
-rw-r--r--pkg/sentry/kernel/eventfd/eventfd_test.go2
2 files changed, 5 insertions, 5 deletions
diff --git a/pkg/sentry/kernel/eventfd/eventfd.go b/pkg/sentry/kernel/eventfd/eventfd.go
index 64f1cc631..2aca02fd5 100644
--- a/pkg/sentry/kernel/eventfd/eventfd.go
+++ b/pkg/sentry/kernel/eventfd/eventfd.go
@@ -183,7 +183,7 @@ func (e *EventOperations) read(ctx context.Context, dst usermem.IOSequence) erro
// Notify writers. We do this even if we were already writable because
// it is possible that a writer is waiting to write the maximum value
// to the event.
- e.wq.Notify(waiter.EventOut)
+ e.wq.Notify(waiter.WritableEvents)
var buf [8]byte
usermem.ByteOrder.PutUint64(buf[:], val)
@@ -236,7 +236,7 @@ func (e *EventOperations) Signal(val uint64) error {
e.mu.Unlock()
// Always trigger a notification.
- e.wq.Notify(waiter.EventIn)
+ e.wq.Notify(waiter.ReadableEvents)
return nil
}
@@ -251,11 +251,11 @@ func (e *EventOperations) Readiness(mask waiter.EventMask) waiter.EventMask {
ready := waiter.EventMask(0)
if e.val > 0 {
- ready |= waiter.EventIn
+ ready |= waiter.ReadableEvents
}
if e.val < math.MaxUint64-1 {
- ready |= waiter.EventOut
+ ready |= waiter.WritableEvents
}
e.mu.Unlock()
diff --git a/pkg/sentry/kernel/eventfd/eventfd_test.go b/pkg/sentry/kernel/eventfd/eventfd_test.go
index 9b4892f74..1b9e60b3a 100644
--- a/pkg/sentry/kernel/eventfd/eventfd_test.go
+++ b/pkg/sentry/kernel/eventfd/eventfd_test.go
@@ -39,7 +39,7 @@ func TestEventfd(t *testing.T) {
// Register a callback for a write event.
w, ch := waiter.NewChannelEntry(nil)
- event.EventRegister(&w, waiter.EventIn)
+ event.EventRegister(&w, waiter.ReadableEvents)
defer event.EventUnregister(&w)
data := []byte("00000124")