diff options
Diffstat (limited to 'pkg/sentry/fs/fsutil')
-rw-r--r-- | pkg/sentry/fs/fsutil/BUILD | 1 | ||||
-rw-r--r-- | pkg/sentry/fs/fsutil/README.md | 11 | ||||
-rw-r--r-- | pkg/sentry/fs/fsutil/file_range_set.go | 23 | ||||
-rw-r--r-- | pkg/sentry/fs/fsutil/inode_cached.go | 42 |
4 files changed, 41 insertions, 36 deletions
diff --git a/pkg/sentry/fs/fsutil/BUILD b/pkg/sentry/fs/fsutil/BUILD index d41fc17cc..01098675d 100644 --- a/pkg/sentry/fs/fsutil/BUILD +++ b/pkg/sentry/fs/fsutil/BUILD @@ -85,6 +85,7 @@ go_library( "//pkg/sentry/fs", "//pkg/sentry/kernel/time", "//pkg/sentry/memmap", + "//pkg/sentry/pgalloc", "//pkg/sentry/platform", "//pkg/sentry/safemem", "//pkg/sentry/socket/unix/transport", diff --git a/pkg/sentry/fs/fsutil/README.md b/pkg/sentry/fs/fsutil/README.md index 6e677890c..8be367334 100644 --- a/pkg/sentry/fs/fsutil/README.md +++ b/pkg/sentry/fs/fsutil/README.md @@ -112,11 +112,12 @@ finds the file that was mapped and its `CachingInodeOperations`. It then calls It may choose to allocate more memory (i.e. do "readahead") to minimize subsequent faults. -Memory that is allocated comes from a host tmpfs file (see `filemem.FileMem`). -The host tmpfs file memory is brought up to date with the contents of the mapped -file on its filesystem. The region of the host tmpfs file that reflects the -mapped file is then mapped into the host address space of the application so -that subsequent memory accesses do not repeatedly generate a `SIGSEGV`. +Memory that is allocated comes from a host tmpfs file (see +`pgalloc.MemoryFile`). The host tmpfs file memory is brought up to date with the +contents of the mapped file on its filesystem. The region of the host tmpfs file +that reflects the mapped file is then mapped into the host address space of the +application so that subsequent memory accesses do not repeatedly generate a +`SIGSEGV`. The range that was allocated, including any extra memory allocation to minimize faults, is marked dirty due to the write fault. This overcounts dirty memory if diff --git a/pkg/sentry/fs/fsutil/file_range_set.go b/pkg/sentry/fs/fsutil/file_range_set.go index dd7ab4b4a..32ebf64ff 100644 --- a/pkg/sentry/fs/fsutil/file_range_set.go +++ b/pkg/sentry/fs/fsutil/file_range_set.go @@ -21,6 +21,7 @@ import ( "gvisor.googlesource.com/gvisor/pkg/sentry/context" "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/pgalloc" "gvisor.googlesource.com/gvisor/pkg/sentry/platform" "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" "gvisor.googlesource.com/gvisor/pkg/sentry/usage" @@ -77,7 +78,7 @@ func (seg FileRangeIterator) FileRangeOf(mr memmap.MappableRange) platform.FileR } // Fill attempts to ensure that all memmap.Mappable offsets in required are -// mapped to a platform.File offset, by allocating from mem with the given +// mapped to a platform.File offset, by allocating from mf with the given // memory usage kind and invoking readAt to store data into memory. (If readAt // returns a successful partial read, Fill will call it repeatedly until all // bytes have been read.) EOF is handled consistently with the requirements of @@ -90,7 +91,7 @@ func (seg FileRangeIterator) FileRangeOf(mr memmap.MappableRange) platform.FileR // // Preconditions: required.Length() > 0. optional.IsSupersetOf(required). // required and optional must be page-aligned. -func (frs *FileRangeSet) Fill(ctx context.Context, required, optional memmap.MappableRange, mem platform.Memory, kind usage.MemoryKind, readAt func(ctx context.Context, dsts safemem.BlockSeq, offset uint64) (uint64, error)) error { +func (frs *FileRangeSet) Fill(ctx context.Context, required, optional memmap.MappableRange, mf *pgalloc.MemoryFile, kind usage.MemoryKind, readAt func(ctx context.Context, dsts safemem.BlockSeq, offset uint64) (uint64, error)) error { gap := frs.LowerBoundGap(required.Start) for gap.Ok() && gap.Start() < required.End { if gap.Range().Length() == 0 { @@ -100,7 +101,7 @@ func (frs *FileRangeSet) Fill(ctx context.Context, required, optional memmap.Map gr := gap.Range().Intersect(optional) // Read data into the gap. - fr, err := platform.AllocateAndFill(mem, gr.Length(), kind, safemem.ReaderFunc(func(dsts safemem.BlockSeq) (uint64, error) { + fr, err := mf.AllocateAndFill(gr.Length(), kind, safemem.ReaderFunc(func(dsts safemem.BlockSeq) (uint64, error) { var done uint64 for !dsts.IsEmpty() { n, err := readAt(ctx, dsts, gr.Start+done) @@ -108,7 +109,7 @@ func (frs *FileRangeSet) Fill(ctx context.Context, required, optional memmap.Map dsts = dsts.DropFirst64(n) if err != nil { if err == io.EOF { - // platform.AllocateAndFill truncates down to a page + // MemoryFile.AllocateAndFill truncates down to a page // boundary, but FileRangeSet.Fill is supposed to // zero-fill to the end of the page in this case. donepgaddr, ok := usermem.Addr(done).RoundUp() @@ -143,20 +144,20 @@ func (frs *FileRangeSet) Fill(ctx context.Context, required, optional memmap.Map // corresponding platform.FileRanges. // // Preconditions: mr must be page-aligned. -func (frs *FileRangeSet) Drop(mr memmap.MappableRange, mem platform.Memory) { +func (frs *FileRangeSet) Drop(mr memmap.MappableRange, mf *pgalloc.MemoryFile) { seg := frs.LowerBoundSegment(mr.Start) for seg.Ok() && seg.Start() < mr.End { seg = frs.Isolate(seg, mr) - mem.DecRef(seg.FileRange()) + mf.DecRef(seg.FileRange()) seg = frs.Remove(seg).NextSegment() } } // DropAll removes all segments in mr, freeing the corresponding // platform.FileRanges. -func (frs *FileRangeSet) DropAll(mem platform.Memory) { +func (frs *FileRangeSet) DropAll(mf *pgalloc.MemoryFile) { for seg := frs.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { - mem.DecRef(seg.FileRange()) + mf.DecRef(seg.FileRange()) } frs.RemoveAll() } @@ -164,7 +165,7 @@ func (frs *FileRangeSet) DropAll(mem platform.Memory) { // Truncate updates frs to reflect Mappable truncation to the given length: // bytes after the new EOF on the same page are zeroed, and pages after the new // EOF are freed. -func (frs *FileRangeSet) Truncate(end uint64, mem platform.Memory) { +func (frs *FileRangeSet) Truncate(end uint64, mf *pgalloc.MemoryFile) { pgendaddr, ok := usermem.Addr(end).RoundUp() if ok { pgend := uint64(pgendaddr) @@ -173,7 +174,7 @@ func (frs *FileRangeSet) Truncate(end uint64, mem platform.Memory) { frs.SplitAt(pgend) seg := frs.LowerBoundSegment(pgend) for seg.Ok() { - mem.DecRef(seg.FileRange()) + mf.DecRef(seg.FileRange()) seg = frs.Remove(seg).NextSegment() } @@ -189,7 +190,7 @@ func (frs *FileRangeSet) Truncate(end uint64, mem platform.Memory) { if seg.Ok() { fr := seg.FileRange() fr.Start += end - seg.Start() - ims, err := mem.MapInternal(fr, usermem.Write) + ims, err := mf.MapInternal(fr, usermem.Write) if err != nil { // There's no good recourse from here. This means // that we can't keep cached memory consistent with diff --git a/pkg/sentry/fs/fsutil/inode_cached.go b/pkg/sentry/fs/fsutil/inode_cached.go index ef11676b8..9bd923678 100644 --- a/pkg/sentry/fs/fsutil/inode_cached.go +++ b/pkg/sentry/fs/fsutil/inode_cached.go @@ -25,6 +25,7 @@ import ( "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/pgalloc" "gvisor.googlesource.com/gvisor/pkg/sentry/platform" "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" "gvisor.googlesource.com/gvisor/pkg/sentry/usage" @@ -62,8 +63,8 @@ type CachingInodeOperations struct { // backingFile is a handle to a cached file object. backingFile CachedFileObject - // platform is used to allocate memory that caches backingFile's contents. - platform platform.Platform + // mfp is used to allocate memory that caches backingFile's contents. + mfp pgalloc.MemoryFileProvider // forcePageCache indicates the sentry page cache should be used regardless // of whether the platform supports host mapped I/O or not. This must not be @@ -96,7 +97,7 @@ type CachingInodeOperations struct { dataMu sync.RWMutex `state:"nosave"` // cache maps offsets into the cached file to offsets into - // platform.Memory() that store the file's data. + // mfp.MemoryFile() that store the file's data. // // cache is protected by dataMu. cache FileRangeSet @@ -148,13 +149,13 @@ type CachedFileObject interface { // NewCachingInodeOperations returns a new CachingInodeOperations backed by // a CachedFileObject and its initial unstable attributes. func NewCachingInodeOperations(ctx context.Context, backingFile CachedFileObject, uattr fs.UnstableAttr, forcePageCache bool) *CachingInodeOperations { - p := platform.FromContext(ctx) - if p == nil { - panic(fmt.Sprintf("context.Context %T lacks non-nil value for key %T", ctx, platform.CtxPlatform)) + mfp := pgalloc.MemoryFileProviderFromContext(ctx) + if mfp == nil { + panic(fmt.Sprintf("context.Context %T lacks non-nil value for key %T", ctx, pgalloc.CtxMemoryFileProvider)) } return &CachingInodeOperations{ backingFile: backingFile, - platform: p, + mfp: mfp, forcePageCache: forcePageCache, attr: uattr, hostFileMapper: NewHostFileMapper(), @@ -311,7 +312,7 @@ func (c *CachingInodeOperations) Truncate(ctx context.Context, inode *fs.Inode, // written back. c.dataMu.Lock() defer c.dataMu.Unlock() - c.cache.Truncate(uint64(size), c.platform.Memory()) + c.cache.Truncate(uint64(size), c.mfp.MemoryFile()) c.dirty.KeepClean(memmap.MappableRange{uint64(size), oldpgend}) return nil @@ -323,7 +324,7 @@ func (c *CachingInodeOperations) WriteOut(ctx context.Context, inode *fs.Inode) // Write dirty pages back. c.dataMu.Lock() - err := SyncDirtyAll(ctx, &c.cache, &c.dirty, uint64(c.attr.Size), c.platform.Memory(), c.backingFile.WriteFromBlocksAt) + err := SyncDirtyAll(ctx, &c.cache, &c.dirty, uint64(c.attr.Size), c.mfp.MemoryFile(), c.backingFile.WriteFromBlocksAt) c.dataMu.Unlock() if err != nil { c.attrMu.Unlock() @@ -527,7 +528,7 @@ func (rw *inodeReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) { return 0, nil } - mem := rw.c.platform.Memory() + mem := rw.c.mfp.MemoryFile() var done uint64 seg, gap := rw.c.cache.Find(uint64(rw.offset)) for rw.offset < end { @@ -613,7 +614,7 @@ func (rw *inodeReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error return 0, nil } - mem := rw.c.platform.Memory() + mf := rw.c.mfp.MemoryFile() var done uint64 seg, gap := rw.c.cache.Find(uint64(rw.offset)) for rw.offset < end { @@ -622,7 +623,7 @@ func (rw *inodeReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error case seg.Ok() && seg.Start() < mr.End: // Get internal mappings from the cache. segMR := seg.Range().Intersect(mr) - ims, err := mem.MapInternal(seg.FileRangeOf(segMR), usermem.Write) + ims, err := mf.MapInternal(seg.FileRangeOf(segMR), usermem.Write) if err != nil { rw.maybeGrowFile() rw.c.dataMu.Unlock() @@ -711,13 +712,13 @@ func (c *CachingInodeOperations) RemoveMapping(ctx context.Context, ms memmap.Ma // Writeback dirty mapped memory now that there are no longer any // mappings that reference it. This is our naive memory eviction // strategy. - mem := c.platform.Memory() + mf := c.mfp.MemoryFile() c.dataMu.Lock() for _, r := range unmapped { - if err := SyncDirty(ctx, r, &c.cache, &c.dirty, uint64(c.attr.Size), c.platform.Memory(), c.backingFile.WriteFromBlocksAt); err != nil { + if err := SyncDirty(ctx, r, &c.cache, &c.dirty, uint64(c.attr.Size), mf, c.backingFile.WriteFromBlocksAt); err != nil { log.Warningf("Failed to writeback cached data %v: %v", r, err) } - c.cache.Drop(r, mem) + c.cache.Drop(r, mf) c.dirty.KeepClean(r) } c.dataMu.Unlock() @@ -760,8 +761,8 @@ func (c *CachingInodeOperations) Translate(ctx context.Context, required, option optional.End = pgend } - mem := c.platform.Memory() - cerr := c.cache.Fill(ctx, required, maxFillRange(required, optional), mem, usage.PageCache, c.backingFile.ReadToBlocksAt) + mf := c.mfp.MemoryFile() + cerr := c.cache.Fill(ctx, required, maxFillRange(required, optional), mf, usage.PageCache, c.backingFile.ReadToBlocksAt) var ts []memmap.Translation var translatedEnd uint64 @@ -769,7 +770,7 @@ func (c *CachingInodeOperations) Translate(ctx context.Context, required, option segMR := seg.Range().Intersect(optional) ts = append(ts, memmap.Translation{ Source: segMR, - File: mem, + File: mf, Offset: seg.FileRangeOf(segMR).Start, }) if at.Write { @@ -820,16 +821,17 @@ func (c *CachingInodeOperations) InvalidateUnsavable(ctx context.Context) error // Sync the cache's contents so that if we have a host fd after restore, // the remote file's contents are coherent. + mf := c.mfp.MemoryFile() c.dataMu.Lock() defer c.dataMu.Unlock() - if err := SyncDirtyAll(ctx, &c.cache, &c.dirty, uint64(c.attr.Size), c.platform.Memory(), c.backingFile.WriteFromBlocksAt); err != nil { + if err := SyncDirtyAll(ctx, &c.cache, &c.dirty, uint64(c.attr.Size), mf, c.backingFile.WriteFromBlocksAt); err != nil { return err } // Discard the cache so that it's not stored in saved state. This is safe // because per InvalidateUnsavable invariants, no new translations can have // been returned after we invalidated all existing translations above. - c.cache.DropAll(c.platform.Memory()) + c.cache.DropAll(mf) c.dirty.RemoveAll() return nil |