From 26e8d9981fcf6d08199a9fd9c609d9715c3cf37e Mon Sep 17 00:00:00 2001 From: Jamie Liu Date: Fri, 29 Mar 2019 16:24:29 -0700 Subject: Use kernel.Task.CopyScratchBuffer in syscalls/linux where possible. PiperOrigin-RevId: 241072126 Change-Id: Ib4d9f58f550732ac4c5153d3cf159a5b1a9749da --- pkg/sentry/kernel/task.go | 9 ++++----- pkg/sentry/syscalls/linux/sys_prctl.go | 2 +- pkg/sentry/syscalls/linux/sys_socket.go | 2 +- pkg/sentry/syscalls/linux/sys_stat.go | 2 +- 4 files changed, 7 insertions(+), 8 deletions(-) (limited to 'pkg') diff --git a/pkg/sentry/kernel/task.go b/pkg/sentry/kernel/task.go index f958aba26..9c365e781 100644 --- a/pkg/sentry/kernel/task.go +++ b/pkg/sentry/kernel/task.go @@ -550,17 +550,16 @@ func (t *Task) afterLoad() { t.futexWaiter = futex.NewWaiter() } -// copyScratchBufferLen is the length of the copyScratchBuffer field of the Task -// struct. -const copyScratchBufferLen = 52 +// copyScratchBufferLen is the length of Task.copyScratchBuffer. +const copyScratchBufferLen = 144 // sizeof(struct stat) // CopyScratchBuffer returns a scratch buffer to be used in CopyIn/CopyOut // functions. It must only be used within those functions and can only be used // by the task goroutine; it exists to improve performance and thus // intentionally lacks any synchronization. // -// Callers should pass a constant value as an argument, which will allow the -// compiler to inline and optimize out the if statement below. +// Callers should pass a constant value as an argument if possible, which will +// allow the compiler to inline and optimize out the if statement below. func (t *Task) CopyScratchBuffer(size int) []byte { if size > copyScratchBufferLen { return make([]byte, size) diff --git a/pkg/sentry/syscalls/linux/sys_prctl.go b/pkg/sentry/syscalls/linux/sys_prctl.go index 4938f27bd..7a29bd9b7 100644 --- a/pkg/sentry/syscalls/linux/sys_prctl.go +++ b/pkg/sentry/syscalls/linux/sys_prctl.go @@ -75,7 +75,7 @@ func Prctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall case linux.PR_GET_NAME: addr := args[1].Pointer() - buf := make([]byte, linux.TASK_COMM_LEN) + buf := t.CopyScratchBuffer(linux.TASK_COMM_LEN) len := copy(buf, t.Name()) if len < linux.TASK_COMM_LEN { buf[len] = 0 diff --git a/pkg/sentry/syscalls/linux/sys_socket.go b/pkg/sentry/syscalls/linux/sys_socket.go index 564357bac..49e6f4aeb 100644 --- a/pkg/sentry/syscalls/linux/sys_socket.go +++ b/pkg/sentry/syscalls/linux/sys_socket.go @@ -516,7 +516,7 @@ func SetSockOpt(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy if optLen > maxOptLen { return 0, nil, syscall.EINVAL } - buf := make([]byte, optLen) + buf := t.CopyScratchBuffer(int(optLen)) if _, err := t.CopyIn(optValAddr, &buf); err != nil { return 0, nil, err } diff --git a/pkg/sentry/syscalls/linux/sys_stat.go b/pkg/sentry/syscalls/linux/sys_stat.go index 8d6a8f616..bdfb9b3ef 100644 --- a/pkg/sentry/syscalls/linux/sys_stat.go +++ b/pkg/sentry/syscalls/linux/sys_stat.go @@ -133,7 +133,7 @@ func stat(t *kernel.Task, d *fs.Dirent, dirPath bool, statAddr usermem.Addr) err // common syscall for many applications, and t.CopyObjectOut has // noticeable performance impact due to its many slice allocations and // use of reflection. - b := make([]byte, 0, linux.SizeOfStat) + b := t.CopyScratchBuffer(int(linux.SizeOfStat))[:0] // Dev (uint64) b = binary.AppendUint64(b, usermem.ByteOrder, uint64(d.Inode.StableAttr.DeviceID)) -- cgit v1.2.3