summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/mm
diff options
context:
space:
mode:
authorgVisor bot <gvisor-bot@google.com>2021-07-12 22:31:07 +0000
committergVisor bot <gvisor-bot@google.com>2021-07-12 22:31:07 +0000
commitcbf3251b8d2ea4b6ab3eff2bd6a4b2804a11706c (patch)
tree24b26c36e631a803a71bffeff8f0658247db0fc2 /pkg/sentry/mm
parentf28c339def09aea90fe60b3abb38215c5e5c8e52 (diff)
parente3fdd1593217894328d5a917bbc356d0a7d145f0 (diff)
Merge release-20210705.0-11-ge3fdd1593 (automated)
Diffstat (limited to 'pkg/sentry/mm')
-rw-r--r--pkg/sentry/mm/aio_context.go7
-rw-r--r--pkg/sentry/mm/io.go26
-rw-r--r--pkg/sentry/mm/pma.go6
-rw-r--r--pkg/sentry/mm/special_mappable.go3
-rw-r--r--pkg/sentry/mm/syscalls.go16
-rw-r--r--pkg/sentry/mm/vma.go4
6 files changed, 30 insertions, 32 deletions
diff --git a/pkg/sentry/mm/aio_context.go b/pkg/sentry/mm/aio_context.go
index 23747daba..b7f765cd7 100644
--- a/pkg/sentry/mm/aio_context.go
+++ b/pkg/sentry/mm/aio_context.go
@@ -23,7 +23,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/pgalloc"
"gvisor.dev/gvisor/pkg/sentry/usage"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -306,7 +305,7 @@ func (m *aioMappable) AddMapping(_ context.Context, _ memmap.MappingSpace, ar ho
// Don't allow mappings to be expanded (in Linux, fs/aio.c:aio_ring_mmap()
// sets VM_DONTEXPAND).
if offset != 0 || uint64(ar.Length()) != aioRingBufferSize {
- return syserror.EFAULT
+ return linuxerr.EFAULT
}
return nil
}
@@ -320,7 +319,7 @@ func (m *aioMappable) CopyMapping(ctx context.Context, ms memmap.MappingSpace, s
// Don't allow mappings to be expanded (in Linux, fs/aio.c:aio_ring_mmap()
// sets VM_DONTEXPAND).
if offset != 0 || uint64(dstAR.Length()) != aioRingBufferSize {
- return syserror.EFAULT
+ return linuxerr.EFAULT
}
// Require that the mapping correspond to a live AIOContext. Compare
// Linux's fs/aio.c:aio_ring_mremap().
@@ -351,7 +350,7 @@ func (m *aioMappable) CopyMapping(ctx context.Context, ms memmap.MappingSpace, s
func (m *aioMappable) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) {
var err error
if required.End > m.fr.Length() {
- err = &memmap.BusError{syserror.EFAULT}
+ err = &memmap.BusError{linuxerr.EFAULT}
}
if source := optional.Intersect(memmap.MappableRange{0, m.fr.Length()}); source.Length() != 0 {
return []memmap.Translation{
diff --git a/pkg/sentry/mm/io.go b/pkg/sentry/mm/io.go
index 16f318ab3..5fcfeb473 100644
--- a/pkg/sentry/mm/io.go
+++ b/pkg/sentry/mm/io.go
@@ -16,10 +16,10 @@ package mm
import (
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/platform"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -97,14 +97,14 @@ func translateIOError(ctx context.Context, err error) error {
if logIOErrors {
ctx.Debugf("MM I/O error: %v", err)
}
- return syserror.EFAULT
+ return linuxerr.EFAULT
}
// CopyOut implements usermem.IO.CopyOut.
func (mm *MemoryManager) CopyOut(ctx context.Context, addr hostarch.Addr, src []byte, opts usermem.IOOpts) (int, error) {
ar, ok := mm.CheckIORange(addr, int64(len(src)))
if !ok {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
if len(src) == 0 {
@@ -147,7 +147,7 @@ func (mm *MemoryManager) asCopyOut(ctx context.Context, addr hostarch.Addr, src
func (mm *MemoryManager) CopyIn(ctx context.Context, addr hostarch.Addr, dst []byte, opts usermem.IOOpts) (int, error) {
ar, ok := mm.CheckIORange(addr, int64(len(dst)))
if !ok {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
if len(dst) == 0 {
@@ -190,7 +190,7 @@ func (mm *MemoryManager) asCopyIn(ctx context.Context, addr hostarch.Addr, dst [
func (mm *MemoryManager) ZeroOut(ctx context.Context, addr hostarch.Addr, toZero int64, opts usermem.IOOpts) (int64, error) {
ar, ok := mm.CheckIORange(addr, toZero)
if !ok {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
if toZero == 0 {
@@ -231,7 +231,7 @@ func (mm *MemoryManager) asZeroOut(ctx context.Context, addr hostarch.Addr, toZe
// CopyOutFrom implements usermem.IO.CopyOutFrom.
func (mm *MemoryManager) CopyOutFrom(ctx context.Context, ars hostarch.AddrRangeSeq, src safemem.Reader, opts usermem.IOOpts) (int64, error) {
if !mm.checkIOVec(ars) {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
if ars.NumBytes() == 0 {
@@ -276,7 +276,7 @@ func (mm *MemoryManager) CopyOutFrom(ctx context.Context, ars hostarch.AddrRange
// CopyInTo implements usermem.IO.CopyInTo.
func (mm *MemoryManager) CopyInTo(ctx context.Context, ars hostarch.AddrRangeSeq, dst safemem.Writer, opts usermem.IOOpts) (int64, error) {
if !mm.checkIOVec(ars) {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
if ars.NumBytes() == 0 {
@@ -314,7 +314,7 @@ func (mm *MemoryManager) CopyInTo(ctx context.Context, ars hostarch.AddrRangeSeq
func (mm *MemoryManager) SwapUint32(ctx context.Context, addr hostarch.Addr, new uint32, opts usermem.IOOpts) (uint32, error) {
ar, ok := mm.CheckIORange(addr, 4)
if !ok {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
// Do AddressSpace IO if applicable.
@@ -339,7 +339,7 @@ func (mm *MemoryManager) SwapUint32(ctx context.Context, addr hostarch.Addr, new
_, err := mm.withInternalMappings(ctx, ar, hostarch.ReadWrite, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) {
if ims.NumBlocks() != 1 || ims.NumBytes() != 4 {
// Atomicity is unachievable across mappings.
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
im := ims.Head()
var err error
@@ -357,7 +357,7 @@ func (mm *MemoryManager) SwapUint32(ctx context.Context, addr hostarch.Addr, new
func (mm *MemoryManager) CompareAndSwapUint32(ctx context.Context, addr hostarch.Addr, old, new uint32, opts usermem.IOOpts) (uint32, error) {
ar, ok := mm.CheckIORange(addr, 4)
if !ok {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
// Do AddressSpace IO if applicable.
@@ -382,7 +382,7 @@ func (mm *MemoryManager) CompareAndSwapUint32(ctx context.Context, addr hostarch
_, err := mm.withInternalMappings(ctx, ar, hostarch.ReadWrite, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) {
if ims.NumBlocks() != 1 || ims.NumBytes() != 4 {
// Atomicity is unachievable across mappings.
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
im := ims.Head()
var err error
@@ -400,7 +400,7 @@ func (mm *MemoryManager) CompareAndSwapUint32(ctx context.Context, addr hostarch
func (mm *MemoryManager) LoadUint32(ctx context.Context, addr hostarch.Addr, opts usermem.IOOpts) (uint32, error) {
ar, ok := mm.CheckIORange(addr, 4)
if !ok {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
// Do AddressSpace IO if applicable.
@@ -425,7 +425,7 @@ func (mm *MemoryManager) LoadUint32(ctx context.Context, addr hostarch.Addr, opt
_, err := mm.withInternalMappings(ctx, ar, hostarch.Read, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) {
if ims.NumBlocks() != 1 || ims.NumBytes() != 4 {
// Atomicity is unachievable across mappings.
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
im := ims.Head()
var err error
diff --git a/pkg/sentry/mm/pma.go b/pkg/sentry/mm/pma.go
index 5583f62b2..9f4cc238f 100644
--- a/pkg/sentry/mm/pma.go
+++ b/pkg/sentry/mm/pma.go
@@ -18,12 +18,12 @@ import (
"fmt"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safecopy"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/usage"
- "gvisor.dev/gvisor/pkg/syserror"
)
// existingPMAsLocked checks that pmas exist for all addresses in ar, and
@@ -116,7 +116,7 @@ func (mm *MemoryManager) getPMAsLocked(ctx context.Context, vseg vmaIterator, ar
var alignerr error
if !ok {
end = ar.End.RoundDown()
- alignerr = syserror.EFAULT
+ alignerr = linuxerr.EFAULT
}
ar = hostarch.AddrRange{ar.Start.RoundDown(), end}
@@ -162,7 +162,7 @@ func (mm *MemoryManager) getVecPMAsLocked(ctx context.Context, ars hostarch.Addr
var alignerr error
if !ok {
end = ar.End.RoundDown()
- alignerr = syserror.EFAULT
+ alignerr = linuxerr.EFAULT
}
ar = hostarch.AddrRange{ar.Start.RoundDown(), end}
diff --git a/pkg/sentry/mm/special_mappable.go b/pkg/sentry/mm/special_mappable.go
index feafe76c1..69c6e77a7 100644
--- a/pkg/sentry/mm/special_mappable.go
+++ b/pkg/sentry/mm/special_mappable.go
@@ -21,7 +21,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/pgalloc"
"gvisor.dev/gvisor/pkg/sentry/usage"
- "gvisor.dev/gvisor/pkg/syserror"
)
// SpecialMappable implements memmap.MappingIdentity and memmap.Mappable with
@@ -95,7 +94,7 @@ func (*SpecialMappable) CopyMapping(context.Context, memmap.MappingSpace, hostar
func (m *SpecialMappable) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) {
var err error
if required.End > m.fr.Length() {
- err = &memmap.BusError{syserror.EFAULT}
+ err = &memmap.BusError{linuxerr.EFAULT}
}
if source := optional.Intersect(memmap.MappableRange{0, m.fr.Length()}); source.Length() != 0 {
return []memmap.Translation{
diff --git a/pkg/sentry/mm/syscalls.go b/pkg/sentry/mm/syscalls.go
index b51ec6aa7..256eb4afb 100644
--- a/pkg/sentry/mm/syscalls.go
+++ b/pkg/sentry/mm/syscalls.go
@@ -37,7 +37,7 @@ import (
func (mm *MemoryManager) HandleUserFault(ctx context.Context, addr hostarch.Addr, at hostarch.AccessType, sp hostarch.Addr) error {
ar, ok := addr.RoundDown().ToRange(hostarch.PageSize)
if !ok {
- return syserror.EFAULT
+ return linuxerr.EFAULT
}
// Don't bother trying existingPMAsLocked; in most cases, if we did have
@@ -357,7 +357,7 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr hostarch.Addr, oldS
// All cases require that a vma exists at oldAddr.
vseg := mm.vmas.FindSegment(oldAddr)
if !vseg.Ok() {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
// Behavior matrix:
@@ -404,7 +404,7 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr hostarch.Addr, oldS
// Check that oldEnd maps to the same vma as oldAddr.
if vseg.End() < oldEnd {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
// "Grow" the existing vma by creating a new mergeable one.
vma := vseg.ValuePtr()
@@ -494,7 +494,7 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr hostarch.Addr, oldS
// Check that oldEnd maps to the same vma as oldAddr.
if vseg.End() < oldEnd {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
// Check against RLIMIT_AS.
@@ -732,7 +732,7 @@ func (mm *MemoryManager) Brk(ctx context.Context, addr hostarch.Addr) (hostarch.
if !ok {
addr = mm.brk.End
mm.mappingMu.Unlock()
- return addr, syserror.EFAULT
+ return addr, linuxerr.EFAULT
}
switch {
@@ -972,7 +972,7 @@ func (mm *MemoryManager) NumaPolicy(addr hostarch.Addr) (linux.NumaPolicy, uint6
defer mm.mappingMu.RUnlock()
vseg := mm.vmas.FindSegment(addr)
if !vseg.Ok() {
- return 0, 0, syserror.EFAULT
+ return 0, 0, linuxerr.EFAULT
}
vma := vseg.ValuePtr()
return vma.numaPolicy, vma.numaNodemask, nil
@@ -1005,7 +1005,7 @@ func (mm *MemoryManager) SetNumaPolicy(addr hostarch.Addr, length uint64, policy
if !vseg.Ok() || lastEnd < vseg.Start() {
// "EFAULT: ... there was an unmapped hole in the specified memory
// range specified [sic] by addr and len." - mbind(2)
- return syserror.EFAULT
+ return linuxerr.EFAULT
}
vseg = mm.vmas.Isolate(vseg, ar)
vma := vseg.ValuePtr()
@@ -1193,7 +1193,7 @@ func (mm *MemoryManager) MSync(ctx context.Context, addr hostarch.Addr, length u
func (mm *MemoryManager) GetSharedFutexKey(ctx context.Context, addr hostarch.Addr) (futex.Key, error) {
ar, ok := addr.ToRange(4) // sizeof(int32).
if !ok {
- return futex.Key{}, syserror.EFAULT
+ return futex.Key{}, linuxerr.EFAULT
}
mm.mappingMu.RLock()
diff --git a/pkg/sentry/mm/vma.go b/pkg/sentry/mm/vma.go
index a411eabbf..5f8ab7ca3 100644
--- a/pkg/sentry/mm/vma.go
+++ b/pkg/sentry/mm/vma.go
@@ -289,7 +289,7 @@ func (mm *MemoryManager) getVMAsLocked(ctx context.Context, ar hostarch.AddrRang
vma := vseg.ValuePtr()
if addr < vseg.Start() {
// TODO(jamieliu): Implement vma.growsDown here.
- return vbegin, vgap, syserror.EFAULT
+ return vbegin, vgap, linuxerr.EFAULT
}
perms := vma.effectivePerms
@@ -309,7 +309,7 @@ func (mm *MemoryManager) getVMAsLocked(ctx context.Context, ar hostarch.AddrRang
}
// Ran out of vmas before ar.End.
- return vbegin, vgap, syserror.EFAULT
+ return vbegin, vgap, linuxerr.EFAULT
}
// getVecVMAsLocked ensures that vmas exist for all addresses in ars, and