diff options
author | Jamie Liu <jamieliu@google.com> | 2020-10-13 12:43:03 -0700 |
---|---|---|
committer | gVisor bot <gvisor-bot@google.com> | 2020-10-13 12:45:51 -0700 |
commit | 1a6948737323436ff5925f2e092fcbbce429deb3 (patch) | |
tree | f50913d5232a19b3d7f127f04926467a168a885c | |
parent | 7053f17859d6905408b8f724e89711910851eb1b (diff) |
Don't read beyond EOF when inserting into sentry page cache.
The sentry page cache stores file contents at page granularity; this is
necessary for memory mappings. Thus file offset ranges passed to
fsutil.FileRangeSet.Fill() must be page-aligned. If the read callback passed to
Fill() returns (partial read, nil error) when reading up to EOF (which is the
case for p9.ClientFile.ReadAt() since 9P's Rread cannot convey both a partial
read and EOF), Fill() will re-invoke the read callback to try to read from EOF
to the end of the containing page, which is harmless but needlessly expensive.
Fix this by handling file size explicitly in fsutil.FileRangeSet.Fill().
PiperOrigin-RevId: 336934075
-rw-r--r-- | pkg/sentry/fs/fsutil/file_range_set.go | 21 | ||||
-rw-r--r-- | pkg/sentry/fs/fsutil/inode_cached.go | 4 | ||||
-rw-r--r-- | pkg/sentry/fs/tmpfs/inode_file.go | 2 | ||||
-rw-r--r-- | pkg/sentry/fsimpl/gofer/regular_file.go | 12 | ||||
-rw-r--r-- | pkg/sentry/fsimpl/tmpfs/regular_file.go | 2 |
5 files changed, 28 insertions, 13 deletions
diff --git a/pkg/sentry/fs/fsutil/file_range_set.go b/pkg/sentry/fs/fsutil/file_range_set.go index 9197aeb88..1dc409d38 100644 --- a/pkg/sentry/fs/fsutil/file_range_set.go +++ b/pkg/sentry/fs/fsutil/file_range_set.go @@ -84,7 +84,8 @@ func (seg FileRangeIterator) FileRangeOf(mr memmap.MappableRange) memmap.FileRan // returns a successful partial read, Fill will call it repeatedly until all // bytes have been read.) EOF is handled consistently with the requirements of // mmap(2): bytes after EOF on the same page are zeroed; pages after EOF are -// invalid. +// invalid. fileSize is an upper bound on the file's size; bytes after fileSize +// will be zeroed without calling readAt. // // Fill may read offsets outside of required, but will never read offsets // outside of optional. It returns a non-nil error if any error occurs, even @@ -94,7 +95,7 @@ func (seg FileRangeIterator) FileRangeOf(mr memmap.MappableRange) memmap.FileRan // * required.Length() > 0. // * optional.IsSupersetOf(required). // * required and optional must be page-aligned. -func (frs *FileRangeSet) Fill(ctx context.Context, required, optional memmap.MappableRange, mf *pgalloc.MemoryFile, kind usage.MemoryKind, readAt func(ctx context.Context, dsts safemem.BlockSeq, offset uint64) (uint64, error)) error { +func (frs *FileRangeSet) Fill(ctx context.Context, required, optional memmap.MappableRange, fileSize uint64, mf *pgalloc.MemoryFile, kind usage.MemoryKind, readAt func(ctx context.Context, dsts safemem.BlockSeq, offset uint64) (uint64, error)) error { gap := frs.LowerBoundGap(required.Start) for gap.Ok() && gap.Start() < required.End { if gap.Range().Length() == 0 { @@ -107,7 +108,21 @@ func (frs *FileRangeSet) Fill(ctx context.Context, required, optional memmap.Map fr, err := mf.AllocateAndFill(gr.Length(), kind, safemem.ReaderFunc(func(dsts safemem.BlockSeq) (uint64, error) { var done uint64 for !dsts.IsEmpty() { - n, err := readAt(ctx, dsts, gr.Start+done) + n, err := func() (uint64, error) { + off := gr.Start + done + if off >= fileSize { + return 0, io.EOF + } + if off+dsts.NumBytes() > fileSize { + rd := fileSize - off + n, err := readAt(ctx, dsts.TakeFirst64(rd), off) + if n == rd && err == nil { + return n, io.EOF + } + return n, err + } + return readAt(ctx, dsts, off) + }() done += n dsts = dsts.DropFirst64(n) if err != nil { diff --git a/pkg/sentry/fs/fsutil/inode_cached.go b/pkg/sentry/fs/fsutil/inode_cached.go index 85a23432b..82eda3e43 100644 --- a/pkg/sentry/fs/fsutil/inode_cached.go +++ b/pkg/sentry/fs/fsutil/inode_cached.go @@ -644,7 +644,7 @@ func (rw *inodeReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) { End: fs.OffsetPageEnd(int64(gapMR.End)), } optMR := gap.Range() - err := rw.c.cache.Fill(rw.ctx, reqMR, maxFillRange(reqMR, optMR), mem, usage.PageCache, rw.c.backingFile.ReadToBlocksAt) + err := rw.c.cache.Fill(rw.ctx, reqMR, maxFillRange(reqMR, optMR), uint64(rw.c.attr.Size), mem, usage.PageCache, rw.c.backingFile.ReadToBlocksAt) mem.MarkEvictable(rw.c, pgalloc.EvictableRange{optMR.Start, optMR.End}) seg, gap = rw.c.cache.Find(uint64(rw.offset)) if !seg.Ok() { @@ -870,7 +870,7 @@ func (c *CachingInodeOperations) Translate(ctx context.Context, required, option } mf := c.mfp.MemoryFile() - cerr := c.cache.Fill(ctx, required, maxFillRange(required, optional), mf, usage.PageCache, c.backingFile.ReadToBlocksAt) + cerr := c.cache.Fill(ctx, required, maxFillRange(required, optional), uint64(c.attr.Size), mf, usage.PageCache, c.backingFile.ReadToBlocksAt) var ts []memmap.Translation var translatedEnd uint64 diff --git a/pkg/sentry/fs/tmpfs/inode_file.go b/pkg/sentry/fs/tmpfs/inode_file.go index 1dc75291d..fc0498f17 100644 --- a/pkg/sentry/fs/tmpfs/inode_file.go +++ b/pkg/sentry/fs/tmpfs/inode_file.go @@ -613,7 +613,7 @@ func (f *fileInodeOperations) Translate(ctx context.Context, required, optional } mf := f.kernel.MemoryFile() - cerr := f.data.Fill(ctx, required, optional, mf, f.memUsage, func(_ context.Context, dsts safemem.BlockSeq, _ uint64) (uint64, error) { + cerr := f.data.Fill(ctx, required, optional, uint64(f.attr.Size), mf, f.memUsage, func(_ context.Context, dsts safemem.BlockSeq, _ uint64) (uint64, error) { // Newly-allocated pages are zeroed, so we don't need to do anything. return dsts.NumBytes(), nil }) diff --git a/pkg/sentry/fsimpl/gofer/regular_file.go b/pkg/sentry/fsimpl/gofer/regular_file.go index eeaf6e444..f8b19bae7 100644 --- a/pkg/sentry/fsimpl/gofer/regular_file.go +++ b/pkg/sentry/fsimpl/gofer/regular_file.go @@ -395,7 +395,7 @@ func (rw *dentryReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) End: gapEnd, } optMR := gap.Range() - err := rw.d.cache.Fill(rw.ctx, reqMR, maxFillRange(reqMR, optMR), mf, usage.PageCache, h.readToBlocksAt) + err := rw.d.cache.Fill(rw.ctx, reqMR, maxFillRange(reqMR, optMR), rw.d.size, mf, usage.PageCache, h.readToBlocksAt) mf.MarkEvictable(rw.d, pgalloc.EvictableRange{optMR.Start, optMR.End}) seg, gap = rw.d.cache.Find(rw.off) if !seg.Ok() { @@ -403,10 +403,10 @@ func (rw *dentryReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) rw.d.handleMu.RUnlock() return done, err } - // err might have occurred in part of gap.Range() outside - // gapMR. Forget about it for now; if the error matters and - // persists, we'll run into it again in a later iteration of - // this loop. + // err might have occurred in part of gap.Range() outside gapMR + // (in particular, gap.End() might be beyond EOF). Forget about + // it for now; if the error matters and persists, we'll run + // into it again in a later iteration of this loop. } else { // Read directly from the file. gapDsts := dsts.TakeFirst64(gapMR.Length()) @@ -780,7 +780,7 @@ func (d *dentry) Translate(ctx context.Context, required, optional memmap.Mappab mf := d.fs.mfp.MemoryFile() h := d.readHandleLocked() - cerr := d.cache.Fill(ctx, required, maxFillRange(required, optional), mf, usage.PageCache, h.readToBlocksAt) + cerr := d.cache.Fill(ctx, required, maxFillRange(required, optional), d.size, mf, usage.PageCache, h.readToBlocksAt) var ts []memmap.Translation var translatedEnd uint64 diff --git a/pkg/sentry/fsimpl/tmpfs/regular_file.go b/pkg/sentry/fsimpl/tmpfs/regular_file.go index a199eb33d..ce4e3eda7 100644 --- a/pkg/sentry/fsimpl/tmpfs/regular_file.go +++ b/pkg/sentry/fsimpl/tmpfs/regular_file.go @@ -293,7 +293,7 @@ func (rf *regularFile) Translate(ctx context.Context, required, optional memmap. optional.End = pgend } - cerr := rf.data.Fill(ctx, required, optional, rf.memFile, rf.memoryUsageKind, func(_ context.Context, dsts safemem.BlockSeq, _ uint64) (uint64, error) { + cerr := rf.data.Fill(ctx, required, optional, rf.size, rf.memFile, rf.memoryUsageKind, func(_ context.Context, dsts safemem.BlockSeq, _ uint64) (uint64, error) { // Newly-allocated pages are zeroed, so we don't need to do anything. return dsts.NumBytes(), nil }) |