summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/kernel/pipe
diff options
context:
space:
mode:
authorgVisor bot <gvisor-bot@google.com>2020-02-28 20:31:56 +0000
committergVisor bot <gvisor-bot@google.com>2020-02-28 20:31:56 +0000
commitcb43e84c172bd20a5c63f3e18fddf7fa7cebf929 (patch)
tree66b9b3bd3fb5383d4003c7ca57e00f6b2648e895 /pkg/sentry/kernel/pipe
parente4a1a72f1fd527bc05f756c75556c43ee90e2b21 (diff)
parent463f4217d109ded8af758fe51a5daf8670da9794 (diff)
Merge release-20200219.0-69-g463f421 (automated)
Diffstat (limited to 'pkg/sentry/kernel/pipe')
-rw-r--r--pkg/sentry/kernel/pipe/buffer.go115
-rwxr-xr-xpkg/sentry/kernel/pipe/buffer_list.go173
-rw-r--r--pkg/sentry/kernel/pipe/pipe.go118
-rwxr-xr-xpkg/sentry/kernel/pipe/pipe_state_autogen.go52
-rwxr-xr-xpkg/sentry/kernel/pipe/pipe_util.go25
5 files changed, 36 insertions, 447 deletions
diff --git a/pkg/sentry/kernel/pipe/buffer.go b/pkg/sentry/kernel/pipe/buffer.go
deleted file mode 100644
index fe3be5dbd..000000000
--- a/pkg/sentry/kernel/pipe/buffer.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package pipe
-
-import (
- "io"
-
- "gvisor.dev/gvisor/pkg/safemem"
- "gvisor.dev/gvisor/pkg/sync"
-)
-
-// buffer encapsulates a queueable byte buffer.
-//
-// Note that the total size is slightly less than two pages. This
-// is done intentionally to ensure that the buffer object aligns
-// with runtime internals. We have no hard size or alignment
-// requirements. This two page size will effectively minimize
-// internal fragmentation, but still have a large enough chunk
-// to limit excessive segmentation.
-//
-// +stateify savable
-type buffer struct {
- data [8144]byte
- read int
- write int
- bufferEntry
-}
-
-// Reset resets internal data.
-//
-// This must be called before use.
-func (b *buffer) Reset() {
- b.read = 0
- b.write = 0
-}
-
-// Empty indicates the buffer is empty.
-//
-// This indicates there is no data left to read.
-func (b *buffer) Empty() bool {
- return b.read == b.write
-}
-
-// Full indicates the buffer is full.
-//
-// This indicates there is no capacity left to write.
-func (b *buffer) Full() bool {
- return b.write == len(b.data)
-}
-
-// WriteFromBlocks implements safemem.Writer.WriteFromBlocks.
-func (b *buffer) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error) {
- dst := safemem.BlockSeqOf(safemem.BlockFromSafeSlice(b.data[b.write:]))
- n, err := safemem.CopySeq(dst, srcs)
- b.write += int(n)
- return n, err
-}
-
-// WriteFromReader writes to the buffer from an io.Reader.
-func (b *buffer) WriteFromReader(r io.Reader, count int64) (int64, error) {
- dst := b.data[b.write:]
- if count < int64(len(dst)) {
- dst = b.data[b.write:][:count]
- }
- n, err := r.Read(dst)
- b.write += n
- return int64(n), err
-}
-
-// ReadToBlocks implements safemem.Reader.ReadToBlocks.
-func (b *buffer) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) {
- src := safemem.BlockSeqOf(safemem.BlockFromSafeSlice(b.data[b.read:b.write]))
- n, err := safemem.CopySeq(dsts, src)
- b.read += int(n)
- return n, err
-}
-
-// ReadToWriter reads from the buffer into an io.Writer.
-func (b *buffer) ReadToWriter(w io.Writer, count int64, dup bool) (int64, error) {
- src := b.data[b.read:b.write]
- if count < int64(len(src)) {
- src = b.data[b.read:][:count]
- }
- n, err := w.Write(src)
- if !dup {
- b.read += n
- }
- return int64(n), err
-}
-
-// bufferPool is a pool for buffers.
-var bufferPool = sync.Pool{
- New: func() interface{} {
- return new(buffer)
- },
-}
-
-// newBuffer grabs a new buffer from the pool.
-func newBuffer() *buffer {
- b := bufferPool.Get().(*buffer)
- b.Reset()
- return b
-}
diff --git a/pkg/sentry/kernel/pipe/buffer_list.go b/pkg/sentry/kernel/pipe/buffer_list.go
deleted file mode 100755
index 42ec78788..000000000
--- a/pkg/sentry/kernel/pipe/buffer_list.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package pipe
-
-// ElementMapper provides an identity mapping by default.
-//
-// This can be replaced to provide a struct that maps elements to linker
-// objects, if they are not the same. An ElementMapper is not typically
-// required if: Linker is left as is, Element is left as is, or Linker and
-// Element are the same type.
-type bufferElementMapper struct{}
-
-// linkerFor maps an Element to a Linker.
-//
-// This default implementation should be inlined.
-//
-//go:nosplit
-func (bufferElementMapper) linkerFor(elem *buffer) *buffer { return elem }
-
-// List is an intrusive list. Entries can be added to or removed from the list
-// in O(1) time and with no additional memory allocations.
-//
-// The zero value for List is an empty list ready to use.
-//
-// To iterate over a list (where l is a List):
-// for e := l.Front(); e != nil; e = e.Next() {
-// // do something with e.
-// }
-//
-// +stateify savable
-type bufferList struct {
- head *buffer
- tail *buffer
-}
-
-// Reset resets list l to the empty state.
-func (l *bufferList) Reset() {
- l.head = nil
- l.tail = nil
-}
-
-// Empty returns true iff the list is empty.
-func (l *bufferList) Empty() bool {
- return l.head == nil
-}
-
-// Front returns the first element of list l or nil.
-func (l *bufferList) Front() *buffer {
- return l.head
-}
-
-// Back returns the last element of list l or nil.
-func (l *bufferList) Back() *buffer {
- return l.tail
-}
-
-// PushFront inserts the element e at the front of list l.
-func (l *bufferList) PushFront(e *buffer) {
- bufferElementMapper{}.linkerFor(e).SetNext(l.head)
- bufferElementMapper{}.linkerFor(e).SetPrev(nil)
-
- if l.head != nil {
- bufferElementMapper{}.linkerFor(l.head).SetPrev(e)
- } else {
- l.tail = e
- }
-
- l.head = e
-}
-
-// PushBack inserts the element e at the back of list l.
-func (l *bufferList) PushBack(e *buffer) {
- bufferElementMapper{}.linkerFor(e).SetNext(nil)
- bufferElementMapper{}.linkerFor(e).SetPrev(l.tail)
-
- if l.tail != nil {
- bufferElementMapper{}.linkerFor(l.tail).SetNext(e)
- } else {
- l.head = e
- }
-
- l.tail = e
-}
-
-// PushBackList inserts list m at the end of list l, emptying m.
-func (l *bufferList) PushBackList(m *bufferList) {
- if l.head == nil {
- l.head = m.head
- l.tail = m.tail
- } else if m.head != nil {
- bufferElementMapper{}.linkerFor(l.tail).SetNext(m.head)
- bufferElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
-
- l.tail = m.tail
- }
-
- m.head = nil
- m.tail = nil
-}
-
-// InsertAfter inserts e after b.
-func (l *bufferList) InsertAfter(b, e *buffer) {
- a := bufferElementMapper{}.linkerFor(b).Next()
- bufferElementMapper{}.linkerFor(e).SetNext(a)
- bufferElementMapper{}.linkerFor(e).SetPrev(b)
- bufferElementMapper{}.linkerFor(b).SetNext(e)
-
- if a != nil {
- bufferElementMapper{}.linkerFor(a).SetPrev(e)
- } else {
- l.tail = e
- }
-}
-
-// InsertBefore inserts e before a.
-func (l *bufferList) InsertBefore(a, e *buffer) {
- b := bufferElementMapper{}.linkerFor(a).Prev()
- bufferElementMapper{}.linkerFor(e).SetNext(a)
- bufferElementMapper{}.linkerFor(e).SetPrev(b)
- bufferElementMapper{}.linkerFor(a).SetPrev(e)
-
- if b != nil {
- bufferElementMapper{}.linkerFor(b).SetNext(e)
- } else {
- l.head = e
- }
-}
-
-// Remove removes e from l.
-func (l *bufferList) Remove(e *buffer) {
- prev := bufferElementMapper{}.linkerFor(e).Prev()
- next := bufferElementMapper{}.linkerFor(e).Next()
-
- if prev != nil {
- bufferElementMapper{}.linkerFor(prev).SetNext(next)
- } else {
- l.head = next
- }
-
- if next != nil {
- bufferElementMapper{}.linkerFor(next).SetPrev(prev)
- } else {
- l.tail = prev
- }
-}
-
-// Entry is a default implementation of Linker. Users can add anonymous fields
-// of this type to their structs to make them automatically implement the
-// methods needed by List.
-//
-// +stateify savable
-type bufferEntry struct {
- next *buffer
- prev *buffer
-}
-
-// Next returns the entry that follows e in the list.
-func (e *bufferEntry) Next() *buffer {
- return e.next
-}
-
-// Prev returns the entry that precedes e in the list.
-func (e *bufferEntry) Prev() *buffer {
- return e.prev
-}
-
-// SetNext assigns 'entry' as the entry that follows e in the list.
-func (e *bufferEntry) SetNext(elem *buffer) {
- e.next = elem
-}
-
-// SetPrev assigns 'entry' as the entry that precedes e in the list.
-func (e *bufferEntry) SetPrev(elem *buffer) {
- e.prev = elem
-}
diff --git a/pkg/sentry/kernel/pipe/pipe.go b/pkg/sentry/kernel/pipe/pipe.go
index 08410283f..725e9db7d 100644
--- a/pkg/sentry/kernel/pipe/pipe.go
+++ b/pkg/sentry/kernel/pipe/pipe.go
@@ -20,6 +20,7 @@ import (
"sync/atomic"
"syscall"
+ "gvisor.dev/gvisor/pkg/buffer"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sync"
@@ -70,10 +71,10 @@ type Pipe struct {
// mu protects all pipe internal state below.
mu sync.Mutex `state:"nosave"`
- // data is the buffer queue of pipe contents.
+ // view is the underlying set of buffers.
//
// This is protected by mu.
- data bufferList
+ view buffer.View
// max is the maximum size of the pipe in bytes. When this max has been
// reached, writers will get EWOULDBLOCK.
@@ -81,11 +82,6 @@ type Pipe struct {
// This is protected by mu.
max int64
- // size is the current size of the pipe in bytes.
- //
- // This is protected by mu.
- size int64
-
// hadWriter indicates if this pipe ever had a writer. Note that this
// does not necessarily indicate there is *currently* a writer, just
// that there has been a writer at some point since the pipe was
@@ -196,7 +192,7 @@ type readOps struct {
limit func(int64)
// read performs the actual read operation.
- read func(*buffer) (int64, error)
+ read func(*buffer.View) (int64, error)
}
// read reads data from the pipe into dst and returns the number of bytes
@@ -213,7 +209,7 @@ func (p *Pipe) read(ctx context.Context, ops readOps) (int64, error) {
defer p.mu.Unlock()
// Is the pipe empty?
- if p.size == 0 {
+ if p.view.Size() == 0 {
if !p.HasWriters() {
// There are no writers, return EOF.
return 0, nil
@@ -222,71 +218,13 @@ func (p *Pipe) read(ctx context.Context, ops readOps) (int64, error) {
}
// Limit how much we consume.
- if ops.left() > p.size {
- ops.limit(p.size)
+ if ops.left() > p.view.Size() {
+ ops.limit(p.view.Size())
}
- done := int64(0)
- for ops.left() > 0 {
- // Pop the first buffer.
- first := p.data.Front()
- if first == nil {
- break
- }
-
- // Copy user data.
- n, err := ops.read(first)
- done += int64(n)
- p.size -= n
-
- // Empty buffer?
- if first.Empty() {
- // Push to the free list.
- p.data.Remove(first)
- bufferPool.Put(first)
- }
-
- // Handle errors.
- if err != nil {
- return done, err
- }
- }
-
- return done, nil
-}
-
-// dup duplicates all data from this pipe into the given writer.
-//
-// There is no blocking behavior implemented here. The writer may propagate
-// some blocking error. All the writes must be complete writes.
-func (p *Pipe) dup(ctx context.Context, ops readOps) (int64, error) {
- p.mu.Lock()
- defer p.mu.Unlock()
-
- // Is the pipe empty?
- if p.size == 0 {
- if !p.HasWriters() {
- // See above.
- return 0, nil
- }
- return 0, syserror.ErrWouldBlock
- }
-
- // Limit how much we consume.
- if ops.left() > p.size {
- ops.limit(p.size)
- }
-
- done := int64(0)
- for buf := p.data.Front(); buf != nil; buf = buf.Next() {
- n, err := ops.read(buf)
- done += n
- if err != nil {
- return done, err
- }
- }
-
- return done, nil
+ // Copy user data; the read op is responsible for trimming.
+ done, err := ops.read(&p.view)
+ return done, err
}
type writeOps struct {
@@ -297,7 +235,7 @@ type writeOps struct {
limit func(int64)
// write should write to the provided buffer.
- write func(*buffer) (int64, error)
+ write func(*buffer.View) (int64, error)
}
// write writes data from sv into the pipe and returns the number of bytes
@@ -317,33 +255,19 @@ func (p *Pipe) write(ctx context.Context, ops writeOps) (int64, error) {
// POSIX requires that a write smaller than atomicIOBytes (PIPE_BUF) be
// atomic, but requires no atomicity for writes larger than this.
wanted := ops.left()
- if avail := p.max - p.size; wanted > avail {
+ if avail := p.max - p.view.Size(); wanted > avail {
if wanted <= p.atomicIOBytes {
return 0, syserror.ErrWouldBlock
}
ops.limit(avail)
}
- done := int64(0)
- for ops.left() > 0 {
- // Need a new buffer?
- last := p.data.Back()
- if last == nil || last.Full() {
- // Add a new buffer to the data list.
- last = newBuffer()
- p.data.PushBack(last)
- }
-
- // Copy user data.
- n, err := ops.write(last)
- done += int64(n)
- p.size += n
-
- // Handle errors.
- if err != nil {
- return done, err
- }
+ // Copy user data.
+ done, err := ops.write(&p.view)
+ if err != nil {
+ return done, err
}
+
if wanted > done {
// Partial write due to full pipe.
return done, syserror.ErrWouldBlock
@@ -396,7 +320,7 @@ func (p *Pipe) HasWriters() bool {
// Precondition: mu must be held.
func (p *Pipe) rReadinessLocked() waiter.EventMask {
ready := waiter.EventMask(0)
- if p.HasReaders() && p.data.Front() != nil {
+ if p.HasReaders() && p.view.Size() != 0 {
ready |= waiter.EventIn
}
if !p.HasWriters() && p.hadWriter {
@@ -422,7 +346,7 @@ func (p *Pipe) rReadiness() waiter.EventMask {
// Precondition: mu must be held.
func (p *Pipe) wReadinessLocked() waiter.EventMask {
ready := waiter.EventMask(0)
- if p.HasWriters() && p.size < p.max {
+ if p.HasWriters() && p.view.Size() < p.max {
ready |= waiter.EventOut
}
if !p.HasReaders() {
@@ -451,7 +375,7 @@ func (p *Pipe) rwReadiness() waiter.EventMask {
func (p *Pipe) queued() int64 {
p.mu.Lock()
defer p.mu.Unlock()
- return p.size
+ return p.view.Size()
}
// FifoSize implements fs.FifoSizer.FifoSize.
@@ -474,7 +398,7 @@ func (p *Pipe) SetFifoSize(size int64) (int64, error) {
}
p.mu.Lock()
defer p.mu.Unlock()
- if size < p.size {
+ if size < p.view.Size() {
return 0, syserror.EBUSY
}
p.max = size
diff --git a/pkg/sentry/kernel/pipe/pipe_state_autogen.go b/pkg/sentry/kernel/pipe/pipe_state_autogen.go
index f7f9f32dc..b49ab46f9 100755
--- a/pkg/sentry/kernel/pipe/pipe_state_autogen.go
+++ b/pkg/sentry/kernel/pipe/pipe_state_autogen.go
@@ -6,49 +6,6 @@ import (
"gvisor.dev/gvisor/pkg/state"
)
-func (x *buffer) beforeSave() {}
-func (x *buffer) save(m state.Map) {
- x.beforeSave()
- m.Save("data", &x.data)
- m.Save("read", &x.read)
- m.Save("write", &x.write)
- m.Save("bufferEntry", &x.bufferEntry)
-}
-
-func (x *buffer) afterLoad() {}
-func (x *buffer) load(m state.Map) {
- m.Load("data", &x.data)
- m.Load("read", &x.read)
- m.Load("write", &x.write)
- m.Load("bufferEntry", &x.bufferEntry)
-}
-
-func (x *bufferList) beforeSave() {}
-func (x *bufferList) save(m state.Map) {
- x.beforeSave()
- m.Save("head", &x.head)
- m.Save("tail", &x.tail)
-}
-
-func (x *bufferList) afterLoad() {}
-func (x *bufferList) load(m state.Map) {
- m.Load("head", &x.head)
- m.Load("tail", &x.tail)
-}
-
-func (x *bufferEntry) beforeSave() {}
-func (x *bufferEntry) save(m state.Map) {
- x.beforeSave()
- m.Save("next", &x.next)
- m.Save("prev", &x.prev)
-}
-
-func (x *bufferEntry) afterLoad() {}
-func (x *bufferEntry) load(m state.Map) {
- m.Load("next", &x.next)
- m.Load("prev", &x.prev)
-}
-
func (x *inodeOperations) beforeSave() {}
func (x *inodeOperations) save(m state.Map) {
x.beforeSave()
@@ -69,9 +26,8 @@ func (x *Pipe) save(m state.Map) {
m.Save("atomicIOBytes", &x.atomicIOBytes)
m.Save("readers", &x.readers)
m.Save("writers", &x.writers)
- m.Save("data", &x.data)
+ m.Save("view", &x.view)
m.Save("max", &x.max)
- m.Save("size", &x.size)
m.Save("hadWriter", &x.hadWriter)
}
@@ -81,9 +37,8 @@ func (x *Pipe) load(m state.Map) {
m.Load("atomicIOBytes", &x.atomicIOBytes)
m.Load("readers", &x.readers)
m.Load("writers", &x.writers)
- m.Load("data", &x.data)
+ m.Load("view", &x.view)
m.Load("max", &x.max)
- m.Load("size", &x.size)
m.Load("hadWriter", &x.hadWriter)
}
@@ -121,9 +76,6 @@ func (x *Writer) load(m state.Map) {
}
func init() {
- state.Register("pkg/sentry/kernel/pipe.buffer", (*buffer)(nil), state.Fns{Save: (*buffer).save, Load: (*buffer).load})
- state.Register("pkg/sentry/kernel/pipe.bufferList", (*bufferList)(nil), state.Fns{Save: (*bufferList).save, Load: (*bufferList).load})
- state.Register("pkg/sentry/kernel/pipe.bufferEntry", (*bufferEntry)(nil), state.Fns{Save: (*bufferEntry).save, Load: (*bufferEntry).load})
state.Register("pkg/sentry/kernel/pipe.inodeOperations", (*inodeOperations)(nil), state.Fns{Save: (*inodeOperations).save, Load: (*inodeOperations).load})
state.Register("pkg/sentry/kernel/pipe.Pipe", (*Pipe)(nil), state.Fns{Save: (*Pipe).save, Load: (*Pipe).load})
state.Register("pkg/sentry/kernel/pipe.Reader", (*Reader)(nil), state.Fns{Save: (*Reader).save, Load: (*Reader).load})
diff --git a/pkg/sentry/kernel/pipe/pipe_util.go b/pkg/sentry/kernel/pipe/pipe_util.go
index 80158239e..5a1d4fd57 100755
--- a/pkg/sentry/kernel/pipe/pipe_util.go
+++ b/pkg/sentry/kernel/pipe/pipe_util.go
@@ -21,6 +21,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/amutex"
+ "gvisor.dev/gvisor/pkg/buffer"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sync"
@@ -49,9 +50,10 @@ func (p *Pipe) Read(ctx context.Context, dst usermem.IOSequence) (int64, error)
limit: func(l int64) {
dst = dst.TakeFirst64(l)
},
- read: func(buf *buffer) (int64, error) {
- n, err := dst.CopyOutFrom(ctx, buf)
+ read: func(view *buffer.View) (int64, error) {
+ n, err := dst.CopyOutFrom(ctx, view)
dst = dst.DropFirst64(n)
+ view.TrimFront(n)
return n, err
},
})
@@ -70,16 +72,15 @@ func (p *Pipe) WriteTo(ctx context.Context, w io.Writer, count int64, dup bool)
limit: func(l int64) {
count = l
},
- read: func(buf *buffer) (int64, error) {
- n, err := buf.ReadToWriter(w, count, dup)
+ read: func(view *buffer.View) (int64, error) {
+ n, err := view.ReadToWriter(w, count)
+ if !dup {
+ view.TrimFront(n)
+ }
count -= n
return n, err
},
}
- if dup {
- // There is no notification for dup operations.
- return p.dup(ctx, ops)
- }
n, err := p.read(ctx, ops)
if n > 0 {
p.Notify(waiter.EventOut)
@@ -96,8 +97,8 @@ func (p *Pipe) Write(ctx context.Context, src usermem.IOSequence) (int64, error)
limit: func(l int64) {
src = src.TakeFirst64(l)
},
- write: func(buf *buffer) (int64, error) {
- n, err := src.CopyInTo(ctx, buf)
+ write: func(view *buffer.View) (int64, error) {
+ n, err := src.CopyInTo(ctx, view)
src = src.DropFirst64(n)
return n, err
},
@@ -117,8 +118,8 @@ func (p *Pipe) ReadFrom(ctx context.Context, r io.Reader, count int64) (int64, e
limit: func(l int64) {
count = l
},
- write: func(buf *buffer) (int64, error) {
- n, err := buf.WriteFromReader(r, count)
+ write: func(view *buffer.View) (int64, error) {
+ n, err := view.WriteFromReader(r, count)
count -= n
return n, err
},