summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/fs/tty
diff options
context:
space:
mode:
authorKevin Krakauer <krakauer@google.com>2019-03-21 18:03:49 -0700
committerShentubot <shentubot@google.com>2019-03-21 18:05:07 -0700
commit0cd5f2004444b1c792ab3d4bd3b01699b11b9553 (patch)
tree110883d8ae024c9df8929fb8834926b63fd7cba0 /pkg/sentry/fs/tty
parentba828233b9e934992ac024232e5018ce9971f334 (diff)
Replace manual pty copies to/from userspace with safemem operations.
Also, changing queue.writeBuf from a buffer.Bytes to a [][]byte should reduce copying and reallocating of slices. PiperOrigin-RevId: 239713547 Change-Id: I6ee5ff19c3ee2662f1af5749cae7b73db0569e96
Diffstat (limited to 'pkg/sentry/fs/tty')
-rw-r--r--pkg/sentry/fs/tty/BUILD1
-rw-r--r--pkg/sentry/fs/tty/line_discipline.go46
-rw-r--r--pkg/sentry/fs/tty/queue.go154
3 files changed, 123 insertions, 78 deletions
diff --git a/pkg/sentry/fs/tty/BUILD b/pkg/sentry/fs/tty/BUILD
index bee2db3f3..908d9de09 100644
--- a/pkg/sentry/fs/tty/BUILD
+++ b/pkg/sentry/fs/tty/BUILD
@@ -24,6 +24,7 @@ go_library(
"//pkg/sentry/fs",
"//pkg/sentry/fs/fsutil",
"//pkg/sentry/kernel/auth",
+ "//pkg/sentry/safemem",
"//pkg/sentry/socket/unix/transport",
"//pkg/sentry/unimpl",
"//pkg/sentry/usermem",
diff --git a/pkg/sentry/fs/tty/line_discipline.go b/pkg/sentry/fs/tty/line_discipline.go
index 484366f85..31b6344f2 100644
--- a/pkg/sentry/fs/tty/line_discipline.go
+++ b/pkg/sentry/fs/tty/line_discipline.go
@@ -140,9 +140,9 @@ func (l *lineDiscipline) setTermios(ctx context.Context, io usermem.IO, args arc
// buffer to its read buffer. Anything already in the read buffer is
// now readable.
if oldCanonEnabled && !l.termios.LEnabled(linux.ICANON) {
- if n := l.inQueue.pushWaitBuf(l); n > 0 {
- l.slaveWaiter.Notify(waiter.EventIn)
- }
+ l.inQueue.pushWaitBuf(l)
+ l.inQueue.readable = true
+ l.slaveWaiter.Notify(waiter.EventIn)
}
return 0, err
@@ -263,7 +263,7 @@ type outputQueueTransformer struct{}
// transform does output processing for one end of the pty. See
// drivers/tty/n_tty.c:do_output_char for an analogous kernel function.
//
-// Precondition:
+// Preconditions:
// * l.termiosMu must be held for reading.
// * q.mu must be held.
func (*outputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte) int {
@@ -271,11 +271,11 @@ func (*outputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte
// master termios never has ICANON set.
if !l.termios.OEnabled(linux.OPOST) {
- n, _ := q.readBuf.Write(buf)
- if q.readBuf.Len() > 0 {
+ q.readBuf = append(q.readBuf, buf...)
+ if len(q.readBuf) > 0 {
q.readable = true
}
- return n
+ return len(buf)
}
var ret int
@@ -289,7 +289,7 @@ func (*outputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte
l.column = 0
}
if l.termios.OEnabled(linux.ONLCR) {
- q.readBuf.Write([]byte{'\r', '\n'})
+ q.readBuf = append(q.readBuf, '\r', '\n')
continue
}
case '\r':
@@ -308,7 +308,7 @@ func (*outputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte
spaces := spacesPerTab - l.column%spacesPerTab
if l.termios.OutputFlags&linux.TABDLY == linux.XTABS {
l.column += spaces
- q.readBuf.Write(bytes.Repeat([]byte{' '}, spacesPerTab))
+ q.readBuf = append(q.readBuf, bytes.Repeat([]byte{' '}, spacesPerTab)...)
continue
}
l.column += spaces
@@ -319,9 +319,12 @@ func (*outputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte
default:
l.column++
}
- q.readBuf.WriteRune(c)
+ // The compiler optimizes this by growing readBuf without
+ // creating the intermediate slice.
+ q.readBuf = append(q.readBuf, make([]byte, size)...)
+ utf8.EncodeRune(q.readBuf[len(q.readBuf)-size:], c)
}
- if q.readBuf.Len() > 0 {
+ if len(q.readBuf) > 0 {
q.readable = true
}
return ret
@@ -338,7 +341,7 @@ type inputQueueTransformer struct{}
// drivers/tty/n_tty.c:n_tty_receive_char_special for an analogous kernel
// function.
//
-// Precondition:
+// Preconditions:
// * l.termiosMu must be held for reading.
// * q.mu must be held.
func (*inputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte) int {
@@ -354,7 +357,7 @@ func (*inputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte)
}
var ret int
- for len(buf) > 0 && q.readBuf.Len() < canonMaxBytes {
+ for len(buf) > 0 && len(q.readBuf) < canonMaxBytes {
c, size := l.peekRune(buf)
switch c {
case '\r':
@@ -381,7 +384,7 @@ func (*inputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte)
}
// Stop if the buffer would be overfilled.
- if q.readBuf.Len()+size > maxBytes {
+ if len(q.readBuf)+size > maxBytes {
break
}
cBytes := buf[:size]
@@ -394,12 +397,15 @@ func (*inputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte)
break
}
- q.readBuf.WriteRune(c)
+ // The compiler optimizes this by growing readBuf without
+ // creating the intermediate slice.
+ q.readBuf = append(q.readBuf, make([]byte, size)...)
+ utf8.EncodeRune(q.readBuf[len(q.readBuf)-size:], c)
+
// Anything written to the readBuf will have to be echoed.
if l.termios.LEnabled(linux.ECHO) {
- if l.outQueue.writeBytes(cBytes, l) > 0 {
- l.masterWaiter.Notify(waiter.EventIn)
- }
+ l.outQueue.writeBytes(cBytes, l)
+ l.masterWaiter.Notify(waiter.EventIn)
}
// If we finish a line, make it available for reading.
@@ -410,7 +416,7 @@ func (*inputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte)
}
// In noncanonical mode, everything is readable.
- if !l.termios.LEnabled(linux.ICANON) && q.readBuf.Len() > 0 {
+ if !l.termios.LEnabled(linux.ICANON) && len(q.readBuf) > 0 {
q.readable = true
}
@@ -425,7 +431,7 @@ func (*inputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte)
// * l.termiosMu must be held for reading.
// * q.mu must be held.
func (l *lineDiscipline) shouldDiscard(q *queue, c rune) bool {
- return l.termios.LEnabled(linux.ICANON) && q.readBuf.Len()+utf8.RuneLen(c) >= canonMaxBytes && !l.termios.IsTerminating(c)
+ return l.termios.LEnabled(linux.ICANON) && len(q.readBuf)+utf8.RuneLen(c) >= canonMaxBytes && !l.termios.IsTerminating(c)
}
// peekRune returns the first rune from the byte array depending on whether
diff --git a/pkg/sentry/fs/tty/queue.go b/pkg/sentry/fs/tty/queue.go
index a09ca0119..f39f47941 100644
--- a/pkg/sentry/fs/tty/queue.go
+++ b/pkg/sentry/fs/tty/queue.go
@@ -15,17 +15,21 @@
package tty
import (
- "bytes"
"sync"
"gvisor.googlesource.com/gvisor/pkg/abi/linux"
"gvisor.googlesource.com/gvisor/pkg/sentry/arch"
"gvisor.googlesource.com/gvisor/pkg/sentry/context"
+ "gvisor.googlesource.com/gvisor/pkg/sentry/safemem"
"gvisor.googlesource.com/gvisor/pkg/sentry/usermem"
"gvisor.googlesource.com/gvisor/pkg/syserror"
"gvisor.googlesource.com/gvisor/pkg/waiter"
)
+// waitBufMaxBytes is the maximum size of a wait buffer. It is based on
+// TTYB_DEFAULT_MEM_LIMIT.
+const waitBufMaxBytes = 131072
+
// queue represents one of the input or output queues between a pty master and
// slave. Bytes written to a queue are added to the read buffer until it is
// full, at which point they are written to the wait buffer. Bytes are
@@ -40,12 +44,13 @@ type queue struct {
// readBuf is buffer of data ready to be read when readable is true.
// This data has been processed.
- readBuf bytes.Buffer `state:".([]byte)"`
+ readBuf []byte
// waitBuf contains data that can't fit into readBuf. It is put here
// until it can be loaded into the read buffer. waitBuf contains data
// that hasn't been processed.
- waitBuf bytes.Buffer `state:".([]byte)"`
+ waitBuf [][]byte
+ waitBufLen uint64
// readable indicates whether the read buffer can be read from. In
// canonical mode, there can be an unterminated line in the read buffer,
@@ -58,31 +63,54 @@ type queue struct {
transformer
}
-// saveReadBuf is invoked by stateify.
-func (q *queue) saveReadBuf() []byte {
- return append([]byte(nil), q.readBuf.Bytes()...)
-}
+// ReadToBlocks implements safemem.Reader.ReadToBlocks.
+func (q *queue) ReadToBlocks(dst safemem.BlockSeq) (uint64, error) {
+ src := safemem.BlockSeqOf(safemem.BlockFromSafeSlice(q.readBuf))
+ n, err := safemem.CopySeq(dst, src)
+ if err != nil {
+ return 0, err
+ }
+ q.readBuf = q.readBuf[n:]
-// loadReadBuf is invoked by stateify.
-func (q *queue) loadReadBuf(b []byte) {
- q.readBuf.Write(b)
-}
+ // If we read everything, this queue is no longer readable.
+ if len(q.readBuf) == 0 {
+ q.readable = false
+ }
-// saveWaitBuf is invoked by stateify.
-func (q *queue) saveWaitBuf() []byte {
- return append([]byte(nil), q.waitBuf.Bytes()...)
+ return n, nil
}
-// loadWaitBuf is invoked by stateify.
-func (q *queue) loadWaitBuf(b []byte) {
- q.waitBuf.Write(b)
+// WriteFromBlocks implements safemem.Writer.WriteFromBlocks.
+func (q *queue) WriteFromBlocks(src safemem.BlockSeq) (uint64, error) {
+ copyLen := src.NumBytes()
+ room := waitBufMaxBytes - q.waitBufLen
+ // If out of room, return EAGAIN.
+ if room == 0 && copyLen > 0 {
+ return 0, syserror.ErrWouldBlock
+ }
+ // Cap the size of the wait buffer.
+ if copyLen > room {
+ copyLen = room
+ src = src.TakeFirst64(room)
+ }
+ buf := make([]byte, copyLen)
+
+ // Copy the data into the wait buffer.
+ dst := safemem.BlockSeqOf(safemem.BlockFromSafeSlice(buf))
+ n, err := safemem.CopySeq(dst, src)
+ if err != nil {
+ return 0, err
+ }
+ q.waitBufAppend(buf)
+
+ return n, nil
}
// readReadiness returns whether q is ready to be read from.
func (q *queue) readReadiness(t *linux.KernelTermios) waiter.EventMask {
q.mu.Lock()
defer q.mu.Unlock()
- if q.readBuf.Len() > 0 && q.readable {
+ if len(q.readBuf) > 0 && q.readable {
return waiter.EventIn
}
return waiter.EventMask(0)
@@ -90,8 +118,10 @@ func (q *queue) readReadiness(t *linux.KernelTermios) waiter.EventMask {
// writeReadiness returns whether q is ready to be written to.
func (q *queue) writeReadiness(t *linux.KernelTermios) waiter.EventMask {
- // Like Linux, we don't impose a maximum size on what can be enqueued.
- return waiter.EventOut
+ if q.waitBufLen < waitBufMaxBytes {
+ return waiter.EventOut
+ }
+ return waiter.EventMask(0)
}
// readableSize writes the number of readable bytes to userspace.
@@ -100,7 +130,7 @@ func (q *queue) readableSize(ctx context.Context, io usermem.IO, args arch.Sysca
defer q.mu.Unlock()
var size int32
if q.readable {
- size = int32(q.readBuf.Len())
+ size = int32(len(q.readBuf))
}
_, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), size, usermem.IOOpts{
@@ -119,29 +149,19 @@ func (q *queue) readableSize(ctx context.Context, io usermem.IO, args arch.Sysca
func (q *queue) read(ctx context.Context, dst usermem.IOSequence, l *lineDiscipline) (int64, bool, error) {
q.mu.Lock()
defer q.mu.Unlock()
+
if !q.readable {
return 0, false, syserror.ErrWouldBlock
}
- // Read out from the read buffer.
- n := canonMaxBytes
- if n > int(dst.NumBytes()) {
- n = int(dst.NumBytes())
+ if dst.NumBytes() > canonMaxBytes {
+ dst = dst.TakeFirst(canonMaxBytes)
}
- if n > q.readBuf.Len() {
- n = q.readBuf.Len()
- }
- n, err := dst.Writer(ctx).Write(q.readBuf.Bytes()[:n])
+
+ n, err := dst.CopyOutFrom(ctx, q)
if err != nil {
return 0, false, err
}
- // Discard bytes read out.
- q.readBuf.Next(n)
-
- // If we read everything, this queue is no longer readable.
- if q.readBuf.Len() == 0 {
- q.readable = false
- }
// Move data from the queue's wait buffer to its read buffer.
nPushed := q.pushWaitBufLocked(l)
@@ -154,37 +174,32 @@ func (q *queue) read(ctx context.Context, dst usermem.IOSequence, l *lineDiscipl
// Preconditions:
// * l.termiosMu must be held for reading.
func (q *queue) write(ctx context.Context, src usermem.IOSequence, l *lineDiscipline) (int64, error) {
- // TODO: Use CopyInTo/safemem to avoid extra copying.
- // Copy in the bytes to write from user-space.
- b := make([]byte, src.NumBytes())
- n, err := src.CopyIn(ctx, b)
+ q.mu.Lock()
+ defer q.mu.Unlock()
+
+ // Copy data into the wait buffer.
+ n, err := src.CopyInTo(ctx, q)
if err != nil {
return 0, err
}
- b = b[:n]
- // If state changed, notify any waiters. If we were unable to write
- // anything, let the caller know we could block.
- if c := q.writeBytes(b, l); c > 0 {
- return c, nil
- }
- return 0, syserror.ErrWouldBlock
+ // Push data from the wait to the read buffer.
+ q.pushWaitBufLocked(l)
+
+ return n, nil
}
// writeBytes writes to q from b.
//
// Preconditions:
// * l.termiosMu must be held for reading.
-func (q *queue) writeBytes(b []byte, l *lineDiscipline) int64 {
+func (q *queue) writeBytes(b []byte, l *lineDiscipline) {
q.mu.Lock()
defer q.mu.Unlock()
- // Write as much as possible to the read buffer.
- n := q.transform(l, q, b)
-
- // Write remaining data to the wait buffer.
- nWaiting, _ := q.waitBuf.Write(b[n:])
- return int64(n + nWaiting)
+ // Write to the wait buffer.
+ q.waitBufAppend(b)
+ q.pushWaitBufLocked(l)
}
// pushWaitBuf fills the queue's read buffer with data from the wait buffer.
@@ -201,9 +216,32 @@ func (q *queue) pushWaitBuf(l *lineDiscipline) int {
// * l.termiosMu must be held for reading.
// * q.mu must be locked.
func (q *queue) pushWaitBufLocked(l *lineDiscipline) int {
- // Remove bytes from the wait buffer and move them to the read buffer.
- n := q.transform(l, q, q.waitBuf.Bytes())
- q.waitBuf.Next(n)
+ if q.waitBufLen == 0 {
+ return 0
+ }
+
+ // Move data from the wait to the read buffer.
+ var total int
+ var i int
+ for i = 0; i < len(q.waitBuf); i++ {
+ n := q.transform(l, q, q.waitBuf[i])
+ total += n
+ if n != len(q.waitBuf[i]) {
+ // The read buffer filled up without consuming the
+ // entire buffer.
+ q.waitBuf[i] = q.waitBuf[i][n:]
+ break
+ }
+ }
+
+ // Update wait buffer based on consumed data.
+ q.waitBuf = q.waitBuf[i:]
+ q.waitBufLen -= uint64(total)
+
+ return total
+}
- return n
+func (q *queue) waitBufAppend(b []byte) {
+ q.waitBuf = append(q.waitBuf, b)
+ q.waitBufLen += uint64(len(b))
}