diff options
author | Nicolas Lacasse <nlacasse@google.com> | 2018-06-20 13:05:00 -0700 |
---|---|---|
committer | Shentubot <shentubot@google.com> | 2018-06-20 13:05:54 -0700 |
commit | d93f55e863c598de9126a0316a813f872b11e29f (patch) | |
tree | d80a363f2003108ddd858297a3bd02e4883c0f1b /pkg/sentry/fs/fsutil | |
parent | af6f9f56f80027a89ee517b79502ca6183094a39 (diff) |
Remove some defers in hot paths in the filesystem code.
PiperOrigin-RevId: 201401727
Change-Id: Ia5589882ba58a00efb522ab372e206b7e8e62aee
Diffstat (limited to 'pkg/sentry/fs/fsutil')
-rw-r--r-- | pkg/sentry/fs/fsutil/inode_cached.go | 89 |
1 files changed, 59 insertions, 30 deletions
diff --git a/pkg/sentry/fs/fsutil/inode_cached.go b/pkg/sentry/fs/fsutil/inode_cached.go index 484668735..7c0f96ac2 100644 --- a/pkg/sentry/fs/fsutil/inode_cached.go +++ b/pkg/sentry/fs/fsutil/inode_cached.go @@ -179,8 +179,9 @@ func (c *CachingInodeOperations) Release() { // UnstableAttr implements fs.InodeOperations.UnstableAttr. func (c *CachingInodeOperations) UnstableAttr(ctx context.Context, inode *fs.Inode) (fs.UnstableAttr, error) { c.attrMu.Lock() - defer c.attrMu.Unlock() - return c.attr, nil + attr := c.attr + c.attrMu.Unlock() + return attr, nil } // SetPermissions implements fs.InodeOperations.SetPermissions. @@ -463,15 +464,17 @@ func (c *CachingInodeOperations) Read(ctx context.Context, file *fs.File, dst us // // If Write partially fills src, a non-nil error is returned. func (c *CachingInodeOperations) Write(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) { + // Hot path. Avoid defers. if src.NumBytes() == 0 { return 0, nil } c.attrMu.Lock() - defer c.attrMu.Unlock() // Compare Linux's mm/filemap.c:__generic_file_write_iter() => file_update_time(). c.touchModificationTimeLocked(ctx) - return src.CopyInTo(ctx, &inodeReadWriter{ctx, c, offset}) + n, err := src.CopyInTo(ctx, &inodeReadWriter{ctx, c, offset}) + c.attrMu.Unlock() + return n, err } type inodeReadWriter struct { @@ -482,15 +485,17 @@ type inodeReadWriter struct { // ReadToBlocks implements safemem.Reader.ReadToBlocks. func (rw *inodeReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) { + // Hot path. Avoid defers. rw.c.dataMu.RLock() - defer rw.c.dataMu.RUnlock() // Compute the range to read. if rw.offset >= rw.c.attr.Size { + rw.c.dataMu.RUnlock() return 0, io.EOF } end := fs.ReadEndOffset(rw.offset, int64(dsts.NumBytes()), rw.c.attr.Size) if end == rw.offset { // dsts.NumBytes() == 0? + rw.c.dataMu.RUnlock() return 0, nil } @@ -504,6 +509,7 @@ func (rw *inodeReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) { // Get internal mappings from the cache. ims, err := mem.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), usermem.Read) if err != nil { + rw.c.dataMu.RUnlock() return done, err } @@ -513,6 +519,7 @@ func (rw *inodeReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) { rw.offset += int64(n) dsts = dsts.DropFirst64(n) if err != nil { + rw.c.dataMu.RUnlock() return done, err } @@ -529,6 +536,7 @@ func (rw *inodeReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) { dsts = dsts.DropFirst64(n) // Partial reads are fine. But we must stop reading. if n != dst.NumBytes() || err != nil { + rw.c.dataMu.RUnlock() return done, err } @@ -539,38 +547,44 @@ func (rw *inodeReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) { break } } + rw.c.dataMu.RUnlock() return done, nil } +// maybeGrowFile grows the file's size if data has been written past the old +// size. +// +// Preconditions: rw.c.attrMu and rw.c.dataMu bust be locked. +func (rw *inodeReadWriter) maybeGrowFile() { + // If the write ends beyond the file's previous size, it causes the + // file to grow. + if rw.offset > rw.c.attr.Size { + rw.c.attr.Size = rw.offset + rw.c.dirtyAttr.Size = true + } + if rw.offset > rw.c.attr.Usage { + // This is incorrect if CachingInodeOperations is caching a sparse + // file. (In Linux, keeping inode::i_blocks up to date is the + // filesystem's responsibility.) + rw.c.attr.Usage = rw.offset + rw.c.dirtyAttr.Usage = true + } +} + // WriteFromBlocks implements safemem.Writer.WriteFromBlocks. // // Preconditions: rw.c.attrMu must be locked. func (rw *inodeReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error) { + // Hot path. Avoid defers. rw.c.dataMu.Lock() - defer rw.c.dataMu.Unlock() // Compute the range to write. end := fs.WriteEndOffset(rw.offset, int64(srcs.NumBytes())) if end == rw.offset { // srcs.NumBytes() == 0? + rw.c.dataMu.Unlock() return 0, nil } - defer func() { - // If the write ends beyond the file's previous size, it causes the - // file to grow. - if rw.offset > rw.c.attr.Size { - rw.c.attr.Size = rw.offset - rw.c.dirtyAttr.Size = true - } - if rw.offset > rw.c.attr.Usage { - // This is incorrect if CachingInodeOperations is caching a sparse - // file. (In Linux, keeping inode::i_blocks up to date is the - // filesystem's responsibility.) - rw.c.attr.Usage = rw.offset - rw.c.dirtyAttr.Usage = true - } - }() - mem := rw.c.platform.Memory() var done uint64 seg, gap := rw.c.cache.Find(uint64(rw.offset)) @@ -582,6 +596,8 @@ func (rw *inodeReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error segMR := seg.Range().Intersect(mr) ims, err := mem.MapInternal(seg.FileRangeOf(segMR), usermem.Write) if err != nil { + rw.maybeGrowFile() + rw.c.dataMu.Unlock() return done, err } @@ -592,6 +608,8 @@ func (rw *inodeReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error srcs = srcs.DropFirst64(n) rw.c.dirty.MarkDirty(segMR) if err != nil { + rw.maybeGrowFile() + rw.c.dataMu.Unlock() return done, err } @@ -608,6 +626,8 @@ func (rw *inodeReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error srcs = srcs.DropFirst64(n) // Partial writes are fine. But we must stop writing. if n != src.NumBytes() || err != nil { + rw.maybeGrowFile() + rw.c.dataMu.Unlock() return done, err } @@ -618,13 +638,15 @@ func (rw *inodeReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error break } } + rw.maybeGrowFile() + rw.c.dataMu.Unlock() return done, nil } // AddMapping implements memmap.Mappable.AddMapping. func (c *CachingInodeOperations) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64) error { + // Hot path. Avoid defers. c.mapsMu.Lock() - defer c.mapsMu.Unlock() mapped := c.mappings.AddMapping(ms, ar, offset) // Do this unconditionally since whether we have c.backingFile.FD() >= 0 // can change across save/restore. @@ -636,13 +658,14 @@ func (c *CachingInodeOperations) AddMapping(ctx context.Context, ms memmap.Mappi usage.MemoryAccounting.Inc(r.Length(), usage.Mapped) } } + c.mapsMu.Unlock() return nil } // RemoveMapping implements memmap.Mappable.RemoveMapping. func (c *CachingInodeOperations) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64) { + // Hot path. Avoid defers. c.mapsMu.Lock() - defer c.mapsMu.Unlock() unmapped := c.mappings.RemoveMapping(ms, ar, offset) for _, r := range unmapped { c.hostFileMapper.DecRefOn(r) @@ -653,6 +676,7 @@ func (c *CachingInodeOperations) RemoveMapping(ctx context.Context, ms memmap.Ma usage.MemoryAccounting.Dec(r.Length(), usage.Mapped) } } + c.mapsMu.Unlock() return } @@ -661,7 +685,6 @@ func (c *CachingInodeOperations) RemoveMapping(ctx context.Context, ms memmap.Ma // strategy. mem := c.platform.Memory() c.dataMu.Lock() - defer c.dataMu.Unlock() for _, r := range unmapped { if err := SyncDirty(ctx, r, &c.cache, &c.dirty, uint64(c.attr.Size), c.platform.Memory(), c.backingFile.WriteFromBlocksAt); err != nil { log.Warningf("Failed to writeback cached data %v: %v", r, err) @@ -669,6 +692,8 @@ func (c *CachingInodeOperations) RemoveMapping(ctx context.Context, ms memmap.Ma c.cache.Drop(r, mem) c.dirty.KeepClean(r) } + c.dataMu.Unlock() + c.mapsMu.Unlock() } // CopyMapping implements memmap.Mappable.CopyMapping. @@ -678,6 +703,7 @@ func (c *CachingInodeOperations) CopyMapping(ctx context.Context, ms memmap.Mapp // Translate implements memmap.Mappable.Translate. func (c *CachingInodeOperations) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) { + // Hot path. Avoid defer. if !c.forcePageCache && c.backingFile.FD() >= 0 { return []memmap.Translation{ { @@ -689,7 +715,6 @@ func (c *CachingInodeOperations) Translate(ctx context.Context, required, option } c.dataMu.Lock() - defer c.dataMu.Unlock() // Constrain translations to c.attr.Size (rounded up) to prevent // translation to pages that may be concurrently truncated. @@ -697,6 +722,7 @@ func (c *CachingInodeOperations) Translate(ctx context.Context, required, option var beyondEOF bool if required.End > pgend { if required.Start >= pgend { + c.dataMu.Unlock() return nil, &memmap.BusError{io.EOF} } beyondEOF = true @@ -726,6 +752,8 @@ func (c *CachingInodeOperations) Translate(ctx context.Context, required, option translatedEnd = segMR.End } + c.dataMu.Unlock() + // Don't return the error returned by c.cache.Fill if it occurred outside // of required. if translatedEnd < required.End && cerr != nil { @@ -797,9 +825,8 @@ func (c *CachingInodeOperations) MapInternal(fr platform.FileRange, at usermem.A // underlying host fd and CachingInodeOperations is used as the platform.File // during translation. func (c *CachingInodeOperations) IncRef(fr platform.FileRange) { + // Hot path. Avoid defers. c.dataMu.Lock() - defer c.dataMu.Unlock() - seg, gap := c.refs.Find(fr.Start) for { switch { @@ -815,6 +842,7 @@ func (c *CachingInodeOperations) IncRef(fr platform.FileRange) { seg, gap = c.refs.InsertWithoutMerging(gap, newRange, 1).NextNonEmpty() default: c.refs.MergeAdjacent(fr) + c.dataMu.Unlock() return } } @@ -824,9 +852,8 @@ func (c *CachingInodeOperations) IncRef(fr platform.FileRange) { // underlying host fd and CachingInodeOperations is used as the platform.File // during translation. func (c *CachingInodeOperations) DecRef(fr platform.FileRange) { + // Hot path. Avoid defers. c.dataMu.Lock() - defer c.dataMu.Unlock() - seg := c.refs.FindSegment(fr.Start) for seg.Ok() && seg.Start() < fr.End { @@ -842,4 +869,6 @@ func (c *CachingInodeOperations) DecRef(fr platform.FileRange) { } } c.refs.MergeAdjacent(fr) + c.dataMu.Unlock() + } |