summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/sentry/kernel')
-rw-r--r--pkg/sentry/kernel/auth/user_namespace.go3
-rw-r--r--pkg/sentry/kernel/futex/futex.go6
-rw-r--r--pkg/sentry/kernel/kcov.go7
-rw-r--r--pkg/sentry/kernel/pipe/node.go4
-rw-r--r--pkg/sentry/kernel/pipe/pipe.go2
-rw-r--r--pkg/sentry/kernel/pipe/pipe_util.go3
-rw-r--r--pkg/sentry/kernel/pipe/vfs.go6
-rw-r--r--pkg/sentry/kernel/posixtimer.go3
-rw-r--r--pkg/sentry/kernel/rseq.go2
-rw-r--r--pkg/sentry/kernel/semaphore/semaphore.go2
-rw-r--r--pkg/sentry/kernel/task_block.go2
-rw-r--r--pkg/sentry/kernel/task_cgroup.go4
-rw-r--r--pkg/sentry/kernel/task_exit.go3
-rw-r--r--pkg/sentry/kernel/task_signals.go10
-rw-r--r--pkg/sentry/kernel/task_start.go3
15 files changed, 30 insertions, 30 deletions
diff --git a/pkg/sentry/kernel/auth/user_namespace.go b/pkg/sentry/kernel/auth/user_namespace.go
index bec0c28cd..40a406f9d 100644
--- a/pkg/sentry/kernel/auth/user_namespace.go
+++ b/pkg/sentry/kernel/auth/user_namespace.go
@@ -19,7 +19,6 @@ import (
"gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
// A UserNamespace represents a user namespace. See user_namespaces(7) for
@@ -106,7 +105,7 @@ func (c *Credentials) NewChildUserNamespace() (*UserNamespace, error) {
if c.UserNamespace.depth() >= maxUserNamespaceDepth {
// "... Calls to unshare(2) or clone(2) that would cause this limit to
// be exceeded fail with the error EUSERS." - user_namespaces(7)
- return nil, syserror.EUSERS
+ return nil, linuxerr.EUSERS
}
// "EPERM: CLONE_NEWUSER was specified in flags, but either the effective
// user ID or the effective group ID of the caller does not have a mapping
diff --git a/pkg/sentry/kernel/futex/futex.go b/pkg/sentry/kernel/futex/futex.go
index 52fc6f2b7..6377abb94 100644
--- a/pkg/sentry/kernel/futex/futex.go
+++ b/pkg/sentry/kernel/futex/futex.go
@@ -123,7 +123,7 @@ func check(t Target, addr hostarch.Addr, val uint32) error {
return err
}
if cur != val {
- return syserror.EAGAIN
+ return linuxerr.EAGAIN
}
return nil
}
@@ -671,7 +671,7 @@ func (m *Manager) lockPILocked(w *Waiter, t Target, addr hostarch.Addr, tid uint
return false, err
}
if (cur & linux.FUTEX_TID_MASK) == tid {
- return false, syserror.EDEADLK
+ return false, linuxerr.EDEADLK
}
if (cur & linux.FUTEX_TID_MASK) == 0 {
@@ -774,7 +774,7 @@ func (m *Manager) unlockPILocked(t Target, addr hostarch.Addr, tid uint32, b *bu
if prev != cur {
// Let user mode handle CAS races. This is different than lock, which
// retries when CAS fails.
- return syserror.EAGAIN
+ return linuxerr.EAGAIN
}
return nil
}
diff --git a/pkg/sentry/kernel/kcov.go b/pkg/sentry/kernel/kcov.go
index 941cc373f..e8a71bec1 100644
--- a/pkg/sentry/kernel/kcov.go
+++ b/pkg/sentry/kernel/kcov.go
@@ -29,7 +29,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/mm"
"gvisor.dev/gvisor/pkg/sentry/pgalloc"
"gvisor.dev/gvisor/pkg/sentry/usage"
- "gvisor.dev/gvisor/pkg/syserror"
)
// kcovAreaSizeMax is the maximum number of uint64 entries allowed in the kcov
@@ -126,7 +125,7 @@ func (kcov *Kcov) InitTrace(size uint64) error {
defer kcov.mu.Unlock()
if kcov.mode != linux.KCOV_MODE_DISABLED {
- return syserror.EBUSY
+ return linuxerr.EBUSY
}
// To simplify all the logic around mapping, we require that the length of the
@@ -166,13 +165,13 @@ func (kcov *Kcov) EnableTrace(ctx context.Context, traceKind uint8) error {
kcov.mode = linux.KCOV_MODE_TRACE_PC
case linux.KCOV_TRACE_CMP:
// We do not support KCOV_MODE_TRACE_CMP.
- return syserror.ENOTSUP
+ return linuxerr.ENOTSUP
default:
return linuxerr.EINVAL
}
if kcov.owningTask != nil && kcov.owningTask != t {
- return syserror.EBUSY
+ return linuxerr.EBUSY
}
kcov.owningTask = t
diff --git a/pkg/sentry/kernel/pipe/node.go b/pkg/sentry/kernel/pipe/node.go
index 2321d26dc..08786d704 100644
--- a/pkg/sentry/kernel/pipe/node.go
+++ b/pkg/sentry/kernel/pipe/node.go
@@ -113,7 +113,7 @@ func (i *inodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.Fi
// read side isn't open yet.
if flags.NonBlocking {
w.DecRef(ctx)
- return nil, syserror.ENXIO
+ return nil, linuxerr.ENXIO
}
if !waitFor(&i.mu, &i.rWakeup, ctx) {
@@ -136,5 +136,5 @@ func (i *inodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.Fi
}
func (*inodeOperations) Allocate(_ context.Context, _ *fs.Inode, _, _ int64) error {
- return syserror.EPIPE
+ return linuxerr.EPIPE
}
diff --git a/pkg/sentry/kernel/pipe/pipe.go b/pkg/sentry/kernel/pipe/pipe.go
index 4d68a6e4a..85e3ce9f4 100644
--- a/pkg/sentry/kernel/pipe/pipe.go
+++ b/pkg/sentry/kernel/pipe/pipe.go
@@ -440,7 +440,7 @@ func (p *Pipe) SetFifoSize(size int64) (int64, error) {
p.mu.Lock()
defer p.mu.Unlock()
if size < p.size {
- return 0, syserror.EBUSY
+ return 0, linuxerr.EBUSY
}
p.max = size
return size, nil
diff --git a/pkg/sentry/kernel/pipe/pipe_util.go b/pkg/sentry/kernel/pipe/pipe_util.go
index 3fa5d1d2f..84f9f6234 100644
--- a/pkg/sentry/kernel/pipe/pipe_util.go
+++ b/pkg/sentry/kernel/pipe/pipe_util.go
@@ -22,6 +22,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/amutex"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/arch"
@@ -86,7 +87,7 @@ func (p *Pipe) Write(ctx context.Context, src usermem.IOSequence) (int64, error)
if n > 0 {
p.Notify(waiter.ReadableEvents)
}
- if err == unix.EPIPE {
+ if linuxerr.Equals(linuxerr.EPIPE, err) {
// If we are returning EPIPE send SIGPIPE to the task.
if sendSig := linux.SignalNoInfoFuncFromContext(ctx); sendSig != nil {
sendSig(linux.SIGPIPE)
diff --git a/pkg/sentry/kernel/pipe/vfs.go b/pkg/sentry/kernel/pipe/vfs.go
index 623375417..077d5fd7f 100644
--- a/pkg/sentry/kernel/pipe/vfs.go
+++ b/pkg/sentry/kernel/pipe/vfs.go
@@ -80,7 +80,7 @@ func (vp *VFSPipe) ReaderWriterPair(ctx context.Context, mnt *vfs.Mount, vfsd *v
// Allocate implements vfs.FileDescriptionImpl.Allocate.
func (*VFSPipe) Allocate(context.Context, uint64, uint64, uint64) error {
- return syserror.ESPIPE
+ return linuxerr.ESPIPE
}
// Open opens the pipe represented by vp.
@@ -132,7 +132,7 @@ func (vp *VFSPipe) Open(ctx context.Context, mnt *vfs.Mount, vfsd *vfs.Dentry, s
// side isn't open yet.
if statusFlags&linux.O_NONBLOCK != 0 {
fd.DecRef(ctx)
- return nil, syserror.ENXIO
+ return nil, linuxerr.ENXIO
}
// Wait for a reader to open the other end.
if !waitFor(&vp.mu, &vp.rWakeup, ctx) {
@@ -225,7 +225,7 @@ func (fd *VFSPipeFD) Readiness(mask waiter.EventMask) waiter.EventMask {
// Allocate implements vfs.FileDescriptionImpl.Allocate.
func (fd *VFSPipeFD) Allocate(ctx context.Context, mode, offset, length uint64) error {
- return syserror.ESPIPE
+ return linuxerr.ESPIPE
}
// EventRegister implements waiter.Waitable.EventRegister.
diff --git a/pkg/sentry/kernel/posixtimer.go b/pkg/sentry/kernel/posixtimer.go
index 049cc07df..319754a42 100644
--- a/pkg/sentry/kernel/posixtimer.go
+++ b/pkg/sentry/kernel/posixtimer.go
@@ -20,7 +20,6 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/errors/linuxerr"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
- "gvisor.dev/gvisor/pkg/syserror"
)
// IntervalTimer represents a POSIX interval timer as described by
@@ -176,7 +175,7 @@ func (t *Task) IntervalTimerCreate(c ktime.Clock, sigev *linux.Sigevent) (linux.
break
}
if t.tg.nextTimerID == end {
- return 0, syserror.EAGAIN
+ return 0, linuxerr.EAGAIN
}
}
diff --git a/pkg/sentry/kernel/rseq.go b/pkg/sentry/kernel/rseq.go
index 47babaa7a..5e0618e44 100644
--- a/pkg/sentry/kernel/rseq.go
+++ b/pkg/sentry/kernel/rseq.go
@@ -65,7 +65,7 @@ func (t *Task) SetRSeq(addr hostarch.Addr, length, signature uint32) error {
if t.rseqSignature != signature {
return linuxerr.EINVAL
}
- return syserror.EBUSY
+ return linuxerr.EBUSY
}
// rseq must be aligned and correctly sized.
diff --git a/pkg/sentry/kernel/semaphore/semaphore.go b/pkg/sentry/kernel/semaphore/semaphore.go
index dda22cfb8..067cc75c1 100644
--- a/pkg/sentry/kernel/semaphore/semaphore.go
+++ b/pkg/sentry/kernel/semaphore/semaphore.go
@@ -582,7 +582,7 @@ func (s *Set) ExecuteOps(ctx context.Context, ops []linux.Sembuf, creds *auth.Cr
readOnly := true
for _, op := range ops {
if s.findSem(int32(op.SemNum)) == nil {
- return nil, 0, syserror.EFBIG
+ return nil, 0, linuxerr.EFBIG
}
if op.SemOp != 0 {
readOnly = false
diff --git a/pkg/sentry/kernel/task_block.go b/pkg/sentry/kernel/task_block.go
index 07533d982..b2520eecf 100644
--- a/pkg/sentry/kernel/task_block.go
+++ b/pkg/sentry/kernel/task_block.go
@@ -163,7 +163,7 @@ func (t *Task) block(C <-chan struct{}, timerChan <-chan struct{}) error {
region.End()
t.SleepFinish(true)
// We've timed out.
- return syserror.ETIMEDOUT
+ return linuxerr.ETIMEDOUT
}
}
diff --git a/pkg/sentry/kernel/task_cgroup.go b/pkg/sentry/kernel/task_cgroup.go
index 7c138e80f..7dd6d3108 100644
--- a/pkg/sentry/kernel/task_cgroup.go
+++ b/pkg/sentry/kernel/task_cgroup.go
@@ -20,8 +20,8 @@ import (
"sort"
"strings"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/log"
- "gvisor.dev/gvisor/pkg/syserror"
)
// EnterInitialCgroups moves t into an initial set of cgroups.
@@ -67,7 +67,7 @@ func (t *Task) EnterCgroup(c Cgroup) error {
//
// TODO(b/183137098): Implement cgroup migration.
log.Warningf("Cgroup migration is not implemented")
- return syserror.EBUSY
+ return linuxerr.EBUSY
}
}
}
diff --git a/pkg/sentry/kernel/task_exit.go b/pkg/sentry/kernel/task_exit.go
index d115b8783..fe08c7519 100644
--- a/pkg/sentry/kernel/task_exit.go
+++ b/pkg/sentry/kernel/task_exit.go
@@ -31,6 +31,7 @@ import (
"strings"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/waiter"
@@ -942,7 +943,7 @@ func (t *Task) waitOnce(opts *WaitOptions) (*WaitResult, error) {
if anyWaitableTasks {
return nil, ErrNoWaitableEvent
}
- return nil, syserror.ECHILD
+ return nil, linuxerr.ECHILD
}
// Preconditions: The TaskSet mutex must be locked for writing.
diff --git a/pkg/sentry/kernel/task_signals.go b/pkg/sentry/kernel/task_signals.go
index f54c774cb..72dce7cd9 100644
--- a/pkg/sentry/kernel/task_signals.go
+++ b/pkg/sentry/kernel/task_signals.go
@@ -339,7 +339,7 @@ func (t *Task) Sigtimedwait(set linux.SignalSet, timeout time.Duration) (*linux.
}
if timeout == 0 {
- return nil, syserror.EAGAIN
+ return nil, linuxerr.EAGAIN
}
// Unblock signals we're waiting for. Remember the original signal mask so
@@ -360,8 +360,8 @@ func (t *Task) Sigtimedwait(set linux.SignalSet, timeout time.Duration) (*linux.
if info := t.dequeueSignalLocked(mask); info != nil {
return info, nil
}
- if err == syserror.ETIMEDOUT {
- return nil, syserror.EAGAIN
+ if err == linuxerr.ETIMEDOUT {
+ return nil, linuxerr.EAGAIN
}
return nil, err
}
@@ -372,7 +372,7 @@ func (t *Task) Sigtimedwait(set linux.SignalSet, timeout time.Duration) (*linux.
//
// syserror.ESRCH - The task has exited.
// linuxerr.EINVAL - The signal is not valid.
-// syserror.EAGAIN - THe signal is realtime, and cannot be queued.
+// linuxerr.EAGAIN - THe signal is realtime, and cannot be queued.
//
func (t *Task) SendSignal(info *linux.SignalInfo) error {
t.tg.pidns.owner.mu.RLock()
@@ -451,7 +451,7 @@ func (t *Task) sendSignalTimerLocked(info *linux.SignalInfo, group bool, timer *
}
if !q.enqueue(info, timer) {
if sig.IsRealtime() {
- return syserror.EAGAIN
+ return linuxerr.EAGAIN
}
t.Debugf("Discarding duplicate signal %d", sig)
if timer != nil {
diff --git a/pkg/sentry/kernel/task_start.go b/pkg/sentry/kernel/task_start.go
index 41fd2d471..0565059c1 100644
--- a/pkg/sentry/kernel/task_start.go
+++ b/pkg/sentry/kernel/task_start.go
@@ -17,6 +17,7 @@ package kernel
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/inet"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
@@ -299,7 +300,7 @@ func (ns *PIDNamespace) allocateTID() (ThreadID, error) {
// Did we do a full cycle?
if tid == ns.last {
// No tid available.
- return 0, syserror.EAGAIN
+ return 0, linuxerr.EAGAIN
}
}
}