summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--pkg/sentry/fs/file_overlay.go12
-rw-r--r--pkg/sentry/fs/fsutil/file.go4
-rw-r--r--pkg/sentry/fs/fsutil/host_file_mapper.go21
-rw-r--r--pkg/sentry/mm/syscalls.go2
-rw-r--r--pkg/sentry/vfs/file_description_impl_util.go4
-rw-r--r--test/syscalls/linux/mmap.cc41
6 files changed, 70 insertions, 14 deletions
diff --git a/pkg/sentry/fs/file_overlay.go b/pkg/sentry/fs/file_overlay.go
index 031cd33ce..a27dd0b9a 100644
--- a/pkg/sentry/fs/file_overlay.go
+++ b/pkg/sentry/fs/file_overlay.go
@@ -16,6 +16,7 @@ package fs
import (
"io"
+ "math"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/errors/linuxerr"
@@ -360,10 +361,13 @@ func (*overlayFileOperations) ConfigureMMap(ctx context.Context, file *File, opt
return linuxerr.ENODEV
}
- // FIXME(jamieliu): This is a copy/paste of fsutil.GenericConfigureMMap,
- // which we can't use because the overlay implementation is in package fs,
- // so depending on fs/fsutil would create a circular dependency. Move
- // overlay to fs/overlay.
+ // TODO(gvisor.dev/issue/1624): This is a copy/paste of
+ // fsutil.GenericConfigureMMap, which we can't use because the overlay
+ // implementation is in package fs, so depending on fs/fsutil would create
+ // a circular dependency. VFS2 overlay doesn't have this issue.
+ if opts.Offset+opts.Length > math.MaxInt64 {
+ return linuxerr.EOVERFLOW
+ }
opts.Mappable = o
opts.MappingIdentity = file
file.IncRef()
diff --git a/pkg/sentry/fs/fsutil/file.go b/pkg/sentry/fs/fsutil/file.go
index 3ece73b81..38e3ed42d 100644
--- a/pkg/sentry/fs/fsutil/file.go
+++ b/pkg/sentry/fs/fsutil/file.go
@@ -16,6 +16,7 @@ package fsutil
import (
"io"
+ "math"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/errors/linuxerr"
@@ -210,6 +211,9 @@ func (FileNoMMap) ConfigureMMap(context.Context, *fs.File, *memmap.MMapOpts) err
// GenericConfigureMMap implements fs.FileOperations.ConfigureMMap for most
// filesystems that support memory mapping.
func GenericConfigureMMap(file *fs.File, m memmap.Mappable, opts *memmap.MMapOpts) error {
+ if opts.Offset+opts.Length > math.MaxInt64 {
+ return linuxerr.EOVERFLOW
+ }
opts.Mappable = m
opts.MappingIdentity = file
file.IncRef()
diff --git a/pkg/sentry/fs/fsutil/host_file_mapper.go b/pkg/sentry/fs/fsutil/host_file_mapper.go
index 23528bf25..37ddb1a3c 100644
--- a/pkg/sentry/fs/fsutil/host_file_mapper.go
+++ b/pkg/sentry/fs/fsutil/host_file_mapper.go
@@ -93,7 +93,8 @@ func NewHostFileMapper() *HostFileMapper {
func (f *HostFileMapper) IncRefOn(mr memmap.MappableRange) {
f.refsMu.Lock()
defer f.refsMu.Unlock()
- for chunkStart := mr.Start &^ chunkMask; chunkStart < mr.End; chunkStart += chunkSize {
+ chunkStart := mr.Start &^ chunkMask
+ for {
refs := f.refs[chunkStart]
pgs := pagesInChunk(mr, chunkStart)
if refs+pgs < refs {
@@ -101,6 +102,10 @@ func (f *HostFileMapper) IncRefOn(mr memmap.MappableRange) {
panic(fmt.Sprintf("HostFileMapper.IncRefOn(%v): adding %d page references to chunk %#x, which has %d page references", mr, pgs, chunkStart, refs))
}
f.refs[chunkStart] = refs + pgs
+ chunkStart += chunkSize
+ if chunkStart >= mr.End || chunkStart == 0 {
+ break
+ }
}
}
@@ -112,7 +117,8 @@ func (f *HostFileMapper) IncRefOn(mr memmap.MappableRange) {
func (f *HostFileMapper) DecRefOn(mr memmap.MappableRange) {
f.refsMu.Lock()
defer f.refsMu.Unlock()
- for chunkStart := mr.Start &^ chunkMask; chunkStart < mr.End; chunkStart += chunkSize {
+ chunkStart := mr.Start &^ chunkMask
+ for {
refs := f.refs[chunkStart]
pgs := pagesInChunk(mr, chunkStart)
switch {
@@ -128,6 +134,10 @@ func (f *HostFileMapper) DecRefOn(mr memmap.MappableRange) {
case refs < pgs:
panic(fmt.Sprintf("HostFileMapper.DecRefOn(%v): removing %d page references from chunk %#x, which has %d page references", mr, pgs, chunkStart, refs))
}
+ chunkStart += chunkSize
+ if chunkStart >= mr.End || chunkStart == 0 {
+ break
+ }
}
}
@@ -161,7 +171,8 @@ func (f *HostFileMapper) forEachMappingBlockLocked(fr memmap.FileRange, fd int,
if write {
prot |= unix.PROT_WRITE
}
- for chunkStart := fr.Start &^ chunkMask; chunkStart < fr.End; chunkStart += chunkSize {
+ chunkStart := fr.Start &^ chunkMask
+ for {
m, ok := f.mappings[chunkStart]
if !ok {
addr, _, errno := unix.Syscall6(
@@ -201,6 +212,10 @@ func (f *HostFileMapper) forEachMappingBlockLocked(fr memmap.FileRange, fd int,
endOff = fr.End - chunkStart
}
fn(f.unsafeBlockFromChunkMapping(m.addr).TakeFirst64(endOff).DropFirst64(startOff))
+ chunkStart += chunkSize
+ if chunkStart >= fr.End || chunkStart == 0 {
+ break
+ }
}
return nil
}
diff --git a/pkg/sentry/mm/syscalls.go b/pkg/sentry/mm/syscalls.go
index 9e00c2cec..dc12ad357 100644
--- a/pkg/sentry/mm/syscalls.go
+++ b/pkg/sentry/mm/syscalls.go
@@ -89,7 +89,7 @@ func (mm *MemoryManager) MMap(ctx context.Context, opts memmap.MMapOpts) (hostar
}
// Offset + length must not overflow.
if end := opts.Offset + opts.Length; end < opts.Offset {
- return 0, linuxerr.ENOMEM
+ return 0, linuxerr.EOVERFLOW
}
} else {
opts.Offset = 0
diff --git a/pkg/sentry/vfs/file_description_impl_util.go b/pkg/sentry/vfs/file_description_impl_util.go
index 5dab069ed..452f5f1f9 100644
--- a/pkg/sentry/vfs/file_description_impl_util.go
+++ b/pkg/sentry/vfs/file_description_impl_util.go
@@ -17,6 +17,7 @@ package vfs
import (
"bytes"
"io"
+ "math"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
@@ -399,6 +400,9 @@ func (fd *DynamicBytesFileDescriptionImpl) Write(ctx context.Context, src userme
// GenericConfigureMMap may be used by most implementations of
// FileDescriptionImpl.ConfigureMMap.
func GenericConfigureMMap(fd *FileDescription, m memmap.Mappable, opts *memmap.MMapOpts) error {
+ if opts.Offset+opts.Length > math.MaxInt64 {
+ return linuxerr.EOVERFLOW
+ }
opts.Mappable = m
opts.MappingIdentity = fd
fd.IncRef()
diff --git a/test/syscalls/linux/mmap.cc b/test/syscalls/linux/mmap.cc
index fda176261..bb2a0cb57 100644
--- a/test/syscalls/linux/mmap.cc
+++ b/test/syscalls/linux/mmap.cc
@@ -29,6 +29,7 @@
#include <sys/wait.h>
#include <unistd.h>
+#include <limits>
#include <vector>
#include "gmock/gmock.h"
@@ -913,13 +914,41 @@ TEST_F(MMapFileTest, MapOffsetBeyondEnd) {
::testing::KilledBySignal(SIGBUS), "");
}
-// Verify mmap fails when sum of length and offset overflows.
-TEST_F(MMapFileTest, MapLengthPlusOffsetOverflows) {
+TEST_F(MMapFileTest, MapSecondToLastPositivePage) {
SKIP_IF(!FSSupportsMap());
- const size_t length = static_cast<size_t>(-kPageSize);
- const off_t offset = kPageSize;
- ASSERT_THAT(Map(0, length, PROT_READ, MAP_PRIVATE, fd_.get(), offset),
- SyscallFailsWithErrno(ENOMEM));
+ EXPECT_THAT(
+ Map(0, kPageSize, PROT_READ, MAP_SHARED, fd_.get(),
+ (std::numeric_limits<off_t>::max() - kPageSize) & ~(kPageSize - 1)),
+ SyscallSucceeds());
+}
+
+TEST_F(MMapFileTest, MapLastPositivePage) {
+ SKIP_IF(!FSSupportsMap());
+ // For regular files, this should fail due to integer overflow of the end
+ // offset.
+ EXPECT_THAT(Map(0, kPageSize, PROT_READ, MAP_SHARED, fd_.get(),
+ std::numeric_limits<off_t>::max() & ~(kPageSize - 1)),
+ SyscallFailsWithErrno(EOVERFLOW));
+}
+
+TEST_F(MMapFileTest, MapFirstNegativePage) {
+ SKIP_IF(!FSSupportsMap());
+ EXPECT_THAT(Map(0, kPageSize, PROT_READ, MAP_SHARED, fd_.get(),
+ std::numeric_limits<off_t>::min()),
+ SyscallFailsWithErrno(EOVERFLOW));
+}
+
+TEST_F(MMapFileTest, MapSecondToLastNegativePage) {
+ SKIP_IF(!FSSupportsMap());
+ EXPECT_THAT(
+ Map(0, kPageSize, PROT_READ, MAP_SHARED, fd_.get(), -(2 * kPageSize)),
+ SyscallFailsWithErrno(EOVERFLOW));
+}
+
+TEST_F(MMapFileTest, MapLastNegativePage) {
+ SKIP_IF(!FSSupportsMap());
+ EXPECT_THAT(Map(0, kPageSize, PROT_READ, MAP_SHARED, fd_.get(), -kPageSize),
+ SyscallFailsWithErrno(EOVERFLOW));
}
// MAP_PRIVATE PROT_WRITE is allowed on read-only FDs.