diff options
Diffstat (limited to 'pkg/sentry/kernel')
-rw-r--r-- | pkg/sentry/kernel/epoll/epoll.go | 6 | ||||
-rw-r--r-- | pkg/sentry/kernel/epoll/epoll_state.go | 2 | ||||
-rw-r--r-- | pkg/sentry/kernel/epoll/epoll_test.go | 2 | ||||
-rw-r--r-- | pkg/sentry/kernel/eventfd/eventfd.go | 8 | ||||
-rw-r--r-- | pkg/sentry/kernel/eventfd/eventfd_test.go | 2 | ||||
-rw-r--r-- | pkg/sentry/kernel/fasync/fasync.go | 2 | ||||
-rw-r--r-- | pkg/sentry/kernel/pipe/pipe.go | 8 | ||||
-rw-r--r-- | pkg/sentry/kernel/pipe/pipe_test.go | 4 | ||||
-rw-r--r-- | pkg/sentry/kernel/pipe/pipe_util.go | 10 | ||||
-rw-r--r-- | pkg/sentry/kernel/pipe/vfs.go | 24 | ||||
-rw-r--r-- | pkg/sentry/kernel/signalfd/signalfd.go | 4 |
11 files changed, 36 insertions, 36 deletions
diff --git a/pkg/sentry/kernel/epoll/epoll.go b/pkg/sentry/kernel/epoll/epoll.go index ba73a7812..6006c46a9 100644 --- a/pkg/sentry/kernel/epoll/epoll.go +++ b/pkg/sentry/kernel/epoll/epoll.go @@ -213,8 +213,8 @@ func (e *EventPoll) eventsAvailable() bool { func (e *EventPoll) Readiness(mask waiter.EventMask) waiter.EventMask { ready := waiter.EventMask(0) - if (mask&waiter.EventIn) != 0 && e.eventsAvailable() { - ready |= waiter.EventIn + if (mask&waiter.ReadableEvents) != 0 && e.eventsAvailable() { + ready |= waiter.ReadableEvents } return ready @@ -290,7 +290,7 @@ func (p *pollEntry) Callback(*waiter.Entry, waiter.EventMask) { p.curList = &e.readyList e.listsMu.Unlock() - e.Notify(waiter.EventIn) + e.Notify(waiter.ReadableEvents) return } diff --git a/pkg/sentry/kernel/epoll/epoll_state.go b/pkg/sentry/kernel/epoll/epoll_state.go index 7c61e0258..e08d6287f 100644 --- a/pkg/sentry/kernel/epoll/epoll_state.go +++ b/pkg/sentry/kernel/epoll/epoll_state.go @@ -45,7 +45,7 @@ func (e *EventPoll) afterLoad() { e.waitingList.Remove(entry) e.readyList.PushBack(entry) entry.curList = &e.readyList - e.Notify(waiter.EventIn) + e.Notify(waiter.ReadableEvents) } } } diff --git a/pkg/sentry/kernel/epoll/epoll_test.go b/pkg/sentry/kernel/epoll/epoll_test.go index 55b505593..8ef6cb3e7 100644 --- a/pkg/sentry/kernel/epoll/epoll_test.go +++ b/pkg/sentry/kernel/epoll/epoll_test.go @@ -29,7 +29,7 @@ func TestFileDestroyed(t *testing.T) { ctx := contexttest.Context(t) efile := NewEventPoll(ctx) e := efile.FileOperations.(*EventPoll) - if err := e.AddEntry(id, 0, waiter.EventIn, [2]int32{}); err != nil { + if err := e.AddEntry(id, 0, waiter.ReadableEvents, [2]int32{}); err != nil { t.Fatalf("addEntry failed: %v", err) } diff --git a/pkg/sentry/kernel/eventfd/eventfd.go b/pkg/sentry/kernel/eventfd/eventfd.go index 64f1cc631..2aca02fd5 100644 --- a/pkg/sentry/kernel/eventfd/eventfd.go +++ b/pkg/sentry/kernel/eventfd/eventfd.go @@ -183,7 +183,7 @@ func (e *EventOperations) read(ctx context.Context, dst usermem.IOSequence) erro // Notify writers. We do this even if we were already writable because // it is possible that a writer is waiting to write the maximum value // to the event. - e.wq.Notify(waiter.EventOut) + e.wq.Notify(waiter.WritableEvents) var buf [8]byte usermem.ByteOrder.PutUint64(buf[:], val) @@ -236,7 +236,7 @@ func (e *EventOperations) Signal(val uint64) error { e.mu.Unlock() // Always trigger a notification. - e.wq.Notify(waiter.EventIn) + e.wq.Notify(waiter.ReadableEvents) return nil } @@ -251,11 +251,11 @@ func (e *EventOperations) Readiness(mask waiter.EventMask) waiter.EventMask { ready := waiter.EventMask(0) if e.val > 0 { - ready |= waiter.EventIn + ready |= waiter.ReadableEvents } if e.val < math.MaxUint64-1 { - ready |= waiter.EventOut + ready |= waiter.WritableEvents } e.mu.Unlock() diff --git a/pkg/sentry/kernel/eventfd/eventfd_test.go b/pkg/sentry/kernel/eventfd/eventfd_test.go index 9b4892f74..1b9e60b3a 100644 --- a/pkg/sentry/kernel/eventfd/eventfd_test.go +++ b/pkg/sentry/kernel/eventfd/eventfd_test.go @@ -39,7 +39,7 @@ func TestEventfd(t *testing.T) { // Register a callback for a write event. w, ch := waiter.NewChannelEntry(nil) - event.EventRegister(&w, waiter.EventIn) + event.EventRegister(&w, waiter.ReadableEvents) defer event.EventUnregister(&w) data := []byte("00000124") diff --git a/pkg/sentry/kernel/fasync/fasync.go b/pkg/sentry/kernel/fasync/fasync.go index b66d61c6f..dbbbaeeb0 100644 --- a/pkg/sentry/kernel/fasync/fasync.go +++ b/pkg/sentry/kernel/fasync/fasync.go @@ -162,7 +162,7 @@ func (a *FileAsync) Register(w waiter.Waitable) { a.registered = true a.mu.Unlock() - w.EventRegister(&a.e, waiter.EventIn|waiter.EventOut|waiter.EventErr|waiter.EventHUp) + w.EventRegister(&a.e, waiter.ReadableEvents|waiter.WritableEvents|waiter.EventErr|waiter.EventHUp) } // Unregister stops monitoring a file. diff --git a/pkg/sentry/kernel/pipe/pipe.go b/pkg/sentry/kernel/pipe/pipe.go index 68a55a186..d004f2357 100644 --- a/pkg/sentry/kernel/pipe/pipe.go +++ b/pkg/sentry/kernel/pipe/pipe.go @@ -183,7 +183,7 @@ func (p *Pipe) Open(ctx context.Context, d *fs.Dirent, flags fs.FileFlags) *fs.F // // peekLocked does not mutate the pipe; if the read consumes bytes from the // pipe, then the caller is responsible for calling p.consumeLocked() and -// p.Notify(waiter.EventOut). (The latter must be called with p.mu unlocked.) +// p.Notify(waiter.WritableEvents). (The latter must be called with p.mu unlocked.) // // Preconditions: // * p.mu must be locked. @@ -237,7 +237,7 @@ func (p *Pipe) consumeLocked(n int64) { // Unlike peekLocked, writeLocked assumes that f returns the number of bytes // written to the pipe, and increases the number of bytes stored in the pipe // accordingly. Callers are still responsible for calling -// p.Notify(waiter.EventIn) with p.mu unlocked. +// p.Notify(waiter.ReadableEvents) with p.mu unlocked. // // Preconditions: // * p.mu must be locked. @@ -357,7 +357,7 @@ func (p *Pipe) HasWriters() bool { func (p *Pipe) rReadinessLocked() waiter.EventMask { ready := waiter.EventMask(0) if p.HasReaders() && p.size != 0 { - ready |= waiter.EventIn + ready |= waiter.ReadableEvents } if !p.HasWriters() && p.hadWriter { // POLLHUP must be suppressed until the pipe has had at least one writer @@ -383,7 +383,7 @@ func (p *Pipe) rReadiness() waiter.EventMask { func (p *Pipe) wReadinessLocked() waiter.EventMask { ready := waiter.EventMask(0) if p.HasWriters() && p.size < p.max { - ready |= waiter.EventOut + ready |= waiter.WritableEvents } if !p.HasReaders() { ready |= waiter.EventErr diff --git a/pkg/sentry/kernel/pipe/pipe_test.go b/pkg/sentry/kernel/pipe/pipe_test.go index 3dd739080..867f4a76b 100644 --- a/pkg/sentry/kernel/pipe/pipe_test.go +++ b/pkg/sentry/kernel/pipe/pipe_test.go @@ -97,7 +97,7 @@ func TestPipeWriteUntilEnd(t *testing.T) { buf := make([]byte, len(msg)+1) dst := usermem.BytesIOSequence(buf) e, ch := waiter.NewChannelEntry(nil) - r.EventRegister(&e, waiter.EventIn) + r.EventRegister(&e, waiter.ReadableEvents) defer r.EventUnregister(&e) for { n, err := r.Readv(ctx, dst) @@ -124,7 +124,7 @@ func TestPipeWriteUntilEnd(t *testing.T) { src := usermem.BytesIOSequence(msg) e, ch := waiter.NewChannelEntry(nil) - w.EventRegister(&e, waiter.EventOut) + w.EventRegister(&e, waiter.WritableEvents) defer w.EventUnregister(&e) for src.NumBytes() != 0 { n, err := w.Writev(ctx, src) diff --git a/pkg/sentry/kernel/pipe/pipe_util.go b/pkg/sentry/kernel/pipe/pipe_util.go index 76ea389ca..2d89b9ccd 100644 --- a/pkg/sentry/kernel/pipe/pipe_util.go +++ b/pkg/sentry/kernel/pipe/pipe_util.go @@ -39,14 +39,14 @@ func (p *Pipe) Release(context.Context) { p.wClose() // Wake up readers and writers. - p.Notify(waiter.EventIn | waiter.EventOut) + p.Notify(waiter.ReadableEvents | waiter.WritableEvents) } // Read reads from the Pipe into dst. func (p *Pipe) Read(ctx context.Context, dst usermem.IOSequence) (int64, error) { n, err := dst.CopyOutFrom(ctx, p) if n > 0 { - p.Notify(waiter.EventOut) + p.Notify(waiter.WritableEvents) } return n, err } @@ -75,7 +75,7 @@ func (p *Pipe) WriteTo(ctx context.Context, w io.Writer, count int64, dup bool) return safemem.FromIOWriter{w}.WriteFromBlocks(srcs) }, !dup /* removeFromSrc */) if n > 0 && !dup { - p.Notify(waiter.EventOut) + p.Notify(waiter.WritableEvents) } return n, err } @@ -84,7 +84,7 @@ func (p *Pipe) WriteTo(ctx context.Context, w io.Writer, count int64, dup bool) func (p *Pipe) Write(ctx context.Context, src usermem.IOSequence) (int64, error) { n, err := src.CopyInTo(ctx, p) if n > 0 { - p.Notify(waiter.EventIn) + p.Notify(waiter.ReadableEvents) } return n, err } @@ -109,7 +109,7 @@ func (p *Pipe) ReadFrom(ctx context.Context, r io.Reader, count int64) (int64, e return safemem.FromIOReader{r}.ReadToBlocks(dsts) }) if n > 0 { - p.Notify(waiter.EventIn) + p.Notify(waiter.ReadableEvents) } return n, err } diff --git a/pkg/sentry/kernel/pipe/vfs.go b/pkg/sentry/kernel/pipe/vfs.go index 09c0ccaf2..e524afad5 100644 --- a/pkg/sentry/kernel/pipe/vfs.go +++ b/pkg/sentry/kernel/pipe/vfs.go @@ -194,11 +194,11 @@ func (fd *VFSPipeFD) Release(context.Context) { var event waiter.EventMask if fd.vfsfd.IsReadable() { fd.pipe.rClose() - event |= waiter.EventOut + event |= waiter.WritableEvents } if fd.vfsfd.IsWritable() { fd.pipe.wClose() - event |= waiter.EventIn | waiter.EventHUp + event |= waiter.ReadableEvents | waiter.EventHUp } if event == 0 { panic("invalid pipe flags: must be readable, writable, or both") @@ -293,7 +293,7 @@ func (fd *VFSPipeFD) SpliceToNonPipe(ctx context.Context, out *vfs.FileDescripti fd.pipe.mu.Unlock() if n > 0 { - fd.pipe.Notify(waiter.EventOut) + fd.pipe.Notify(waiter.WritableEvents) } return n, err } @@ -318,14 +318,14 @@ func (fd *VFSPipeFD) SpliceFromNonPipe(ctx context.Context, in *vfs.FileDescript fd.pipe.mu.Unlock() if n > 0 { - fd.pipe.Notify(waiter.EventIn) + fd.pipe.Notify(waiter.ReadableEvents) } return n, err } // CopyIn implements usermem.IO.CopyIn. Note that it is the caller's // responsibility to call fd.pipe.consumeLocked() and -// fd.pipe.Notify(waiter.EventOut) after the read is completed. +// fd.pipe.Notify(waiter.WritableEvents) after the read is completed. // // Preconditions: fd.pipe.mu must be locked. func (fd *VFSPipeFD) CopyIn(ctx context.Context, addr usermem.Addr, dst []byte, opts usermem.IOOpts) (int, error) { @@ -336,8 +336,8 @@ func (fd *VFSPipeFD) CopyIn(ctx context.Context, addr usermem.Addr, dst []byte, } // CopyOut implements usermem.IO.CopyOut. Note that it is the caller's -// responsibility to call fd.pipe.Notify(waiter.EventIn) after the -// write is completed. +// responsibility to call fd.pipe.Notify(waiter.ReadableEvents) after the write +// is completed. // // Preconditions: fd.pipe.mu must be locked. func (fd *VFSPipeFD) CopyOut(ctx context.Context, addr usermem.Addr, src []byte, opts usermem.IOOpts) (int, error) { @@ -359,7 +359,7 @@ func (fd *VFSPipeFD) ZeroOut(ctx context.Context, addr usermem.Addr, toZero int6 // CopyInTo implements usermem.IO.CopyInTo. Note that it is the caller's // responsibility to call fd.pipe.consumeLocked() and -// fd.pipe.Notify(waiter.EventOut) after the read is completed. +// fd.pipe.Notify(waiter.WritableEvents) after the read is completed. // // Preconditions: fd.pipe.mu must be locked. func (fd *VFSPipeFD) CopyInTo(ctx context.Context, ars usermem.AddrRangeSeq, dst safemem.Writer, opts usermem.IOOpts) (int64, error) { @@ -369,8 +369,8 @@ func (fd *VFSPipeFD) CopyInTo(ctx context.Context, ars usermem.AddrRangeSeq, dst } // CopyOutFrom implements usermem.IO.CopyOutFrom. Note that it is the caller's -// responsibility to call fd.pipe.Notify(waiter.EventIn) after the write is -// completed. +// responsibility to call fd.pipe.Notify(waiter.ReadableEvents) after the write +// is completed. // // Preconditions: fd.pipe.mu must be locked. func (fd *VFSPipeFD) CopyOutFrom(ctx context.Context, ars usermem.AddrRangeSeq, src safemem.Reader, opts usermem.IOOpts) (int64, error) { @@ -431,9 +431,9 @@ func spliceOrTee(ctx context.Context, dst, src *VFSPipeFD, count int64, removeFr src.pipe.mu.Unlock() if n > 0 { - dst.pipe.Notify(waiter.EventIn) + dst.pipe.Notify(waiter.ReadableEvents) if removeFromSrc { - src.pipe.Notify(waiter.EventOut) + src.pipe.Notify(waiter.WritableEvents) } } return n, err diff --git a/pkg/sentry/kernel/signalfd/signalfd.go b/pkg/sentry/kernel/signalfd/signalfd.go index 884966120..f58ec4194 100644 --- a/pkg/sentry/kernel/signalfd/signalfd.go +++ b/pkg/sentry/kernel/signalfd/signalfd.go @@ -122,8 +122,8 @@ func (s *SignalOperations) Read(ctx context.Context, _ *fs.File, dst usermem.IOS // Readiness implements waiter.Waitable.Readiness. func (s *SignalOperations) Readiness(mask waiter.EventMask) waiter.EventMask { - if mask&waiter.EventIn != 0 && s.target.PendingSignals()&s.Mask() != 0 { - return waiter.EventIn // Pending signals. + if mask&waiter.ReadableEvents != 0 && s.target.PendingSignals()&s.Mask() != 0 { + return waiter.ReadableEvents // Pending signals. } return 0 } |