diff options
author | gVisor bot <gvisor-bot@google.com> | 2020-08-05 01:23:10 +0000 |
---|---|---|
committer | gVisor bot <gvisor-bot@google.com> | 2020-08-05 01:23:10 +0000 |
commit | 4fbe1cc8b9fc8d371bbf69a71103a991f07ebe5e (patch) | |
tree | f79ed4c824c107d28037c8bce465b4c661008935 /pkg | |
parent | d064e8d6dfe4785f64f768bda985ce421fa234b7 (diff) | |
parent | b44408b40e3e8762a77ccf5eeb7f2ef567235c43 (diff) |
Merge release-20200622.1-333-gb44408b40 (automated)
Diffstat (limited to 'pkg')
-rw-r--r-- | pkg/abi/linux/linux_abi_autogen_unsafe.go | 14 | ||||
-rw-r--r-- | pkg/refs/refcounter.go | 7 | ||||
-rw-r--r-- | pkg/sentry/fsimpl/tmpfs/inode_refs.go | 112 | ||||
-rw-r--r-- | pkg/sentry/fsimpl/tmpfs/tmpfs.go | 27 | ||||
-rw-r--r-- | pkg/sentry/fsimpl/tmpfs/tmpfs_state_autogen.go | 24 |
5 files changed, 28 insertions, 156 deletions
diff --git a/pkg/abi/linux/linux_abi_autogen_unsafe.go b/pkg/abi/linux/linux_abi_autogen_unsafe.go index 1456b1fde..3cc56623f 100644 --- a/pkg/abi/linux/linux_abi_autogen_unsafe.go +++ b/pkg/abi/linux/linux_abi_autogen_unsafe.go @@ -150,7 +150,7 @@ func (s *Statx) Packed() bool { // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. func (s *Statx) MarshalUnsafe(dst []byte) { - if s.Ctime.Packed() && s.Mtime.Packed() && s.Atime.Packed() && s.Btime.Packed() { + if s.Mtime.Packed() && s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() { safecopy.CopyIn(dst, unsafe.Pointer(s)) } else { // Type Statx doesn't have a packed layout in memory, fallback to MarshalBytes. @@ -171,7 +171,7 @@ func (s *Statx) UnmarshalUnsafe(src []byte) { // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit func (s *Statx) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) { - if !s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() { + if !s.Mtime.Packed() && s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() { // Type Statx doesn't have a packed layout in memory, fall back to MarshalBytes. buf := task.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. s.MarshalBytes(buf) // escapes: fallback. @@ -227,7 +227,7 @@ func (s *Statx) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) { // WriteTo implements io.WriterTo.WriteTo. func (s *Statx) WriteTo(w io.Writer) (int64, error) { - if !s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() { + if !s.Mtime.Packed() && s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() { // Type Statx doesn't have a packed layout in memory, fall back to MarshalBytes. buf := make([]byte, s.SizeBytes()) s.MarshalBytes(buf) @@ -1833,7 +1833,7 @@ func (i *IPTIP) MarshalUnsafe(dst []byte) { // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. func (i *IPTIP) UnmarshalUnsafe(src []byte) { - if i.SrcMask.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed() { + if i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() { safecopy.CopyOut(unsafe.Pointer(i), src) } else { // Type IPTIP doesn't have a packed layout in memory, fallback to UnmarshalBytes. @@ -1844,7 +1844,7 @@ func (i *IPTIP) UnmarshalUnsafe(src []byte) { // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit func (i *IPTIP) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) { - if !i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() && i.Src.Packed() { + if !i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() { // Type IPTIP doesn't have a packed layout in memory, fall back to MarshalBytes. buf := task.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. i.MarshalBytes(buf) // escapes: fallback. @@ -1874,7 +1874,7 @@ func (i *IPTIP) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) { // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit func (i *IPTIP) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) { - if !i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() { + if !i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() { // Type IPTIP doesn't have a packed layout in memory, fall back to UnmarshalBytes. buf := task.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. length, err := task.CopyInBytes(addr, buf) // escapes: okay. @@ -1900,7 +1900,7 @@ func (i *IPTIP) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) { // WriteTo implements io.WriterTo.WriteTo. func (i *IPTIP) WriteTo(w io.Writer) (int64, error) { - if !i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() { + if !i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() && i.Src.Packed() { // Type IPTIP doesn't have a packed layout in memory, fall back to MarshalBytes. buf := make([]byte, i.SizeBytes()) i.MarshalBytes(buf) diff --git a/pkg/refs/refcounter.go b/pkg/refs/refcounter.go index 3f39edb66..61790221b 100644 --- a/pkg/refs/refcounter.go +++ b/pkg/refs/refcounter.go @@ -215,8 +215,6 @@ type AtomicRefCount struct { // LeakMode configures the leak checker. type LeakMode uint32 -// TODO(gvisor.dev/issue/1624): Simplify down to two modes once vfs1 ref -// counting is gone. const ( // UninitializedLeakChecking indicates that the leak checker has not yet been initialized. UninitializedLeakChecking LeakMode = iota @@ -246,11 +244,6 @@ func SetLeakMode(mode LeakMode) { atomic.StoreUint32(&leakMode, uint32(mode)) } -// GetLeakMode returns the current leak mode. -func GetLeakMode() LeakMode { - return LeakMode(atomic.LoadUint32(&leakMode)) -} - const maxStackFrames = 40 type fileLine struct { diff --git a/pkg/sentry/fsimpl/tmpfs/inode_refs.go b/pkg/sentry/fsimpl/tmpfs/inode_refs.go deleted file mode 100644 index 3d3f6ff11..000000000 --- a/pkg/sentry/fsimpl/tmpfs/inode_refs.go +++ /dev/null @@ -1,112 +0,0 @@ -package tmpfs - -import ( - "sync/atomic" - - "gvisor.dev/gvisor/pkg/log" - refs_vfs1 "gvisor.dev/gvisor/pkg/refs" - "runtime" -) - -// ownerType is used to customize logging. Note that we use a pointer to T so -// that we do not copy the entire object when passed as a format parameter. -var inodeownerType *inode - -// Refs implements refs.RefCounter. It keeps a reference count using atomic -// operations and calls the destructor when the count reaches zero. -// -// Note that the number of references is actually refCount + 1 so that a default -// zero-value Refs object contains one reference. -// -// +stateify savable -type inodeRefs struct { - // refCount is composed of two fields: - // - // [32-bit speculative references]:[32-bit real references] - // - // Speculative references are used for TryIncRef, to avoid a CompareAndSwap - // loop. See IncRef, DecRef and TryIncRef for details of how these fields are - // used. - refCount int64 -} - -func (r *inodeRefs) finalize() { - var note string - switch refs_vfs1.GetLeakMode() { - case refs_vfs1.NoLeakChecking: - return - case refs_vfs1.UninitializedLeakChecking: - note = "(Leak checker uninitialized): " - } - if n := r.ReadRefs(); n != 0 { - log.Warningf("%sAtomicRefCount %p owned by %T garbage collected with ref count of %d (want 0)", note, r, inodeownerType, n) - } -} - -// EnableLeakCheck checks for reference leaks when Refs gets garbage collected. -func (r *inodeRefs) EnableLeakCheck() { - if refs_vfs1.GetLeakMode() != refs_vfs1.NoLeakChecking { - runtime.SetFinalizer(r, (*inodeRefs).finalize) - } -} - -// ReadRefs returns the current number of references. The returned count is -// inherently racy and is unsafe to use without external synchronization. -func (r *inodeRefs) ReadRefs() int64 { - - return atomic.LoadInt64(&r.refCount) + 1 -} - -// IncRef implements refs.RefCounter.IncRef. -// -//go:nosplit -func (r *inodeRefs) IncRef() { - if v := atomic.AddInt64(&r.refCount, 1); v <= 0 { - panic("Incrementing non-positive ref count") - } -} - -// TryIncRef implements refs.RefCounter.TryIncRef. -// -// To do this safely without a loop, a speculative reference is first acquired -// on the object. This allows multiple concurrent TryIncRef calls to distinguish -// other TryIncRef calls from genuine references held. -// -//go:nosplit -func (r *inodeRefs) TryIncRef() bool { - const speculativeRef = 1 << 32 - v := atomic.AddInt64(&r.refCount, speculativeRef) - if int32(v) < 0 { - - atomic.AddInt64(&r.refCount, -speculativeRef) - return false - } - - atomic.AddInt64(&r.refCount, -speculativeRef+1) - return true -} - -// DecRef implements refs.RefCounter.DecRef. -// -// Note that speculative references are counted here. Since they were added -// prior to real references reaching zero, they will successfully convert to -// real references. In other words, we see speculative references only in the -// following case: -// -// A: TryIncRef [speculative increase => sees non-negative references] -// B: DecRef [real decrease] -// A: TryIncRef [transform speculative to real] -// -//go:nosplit -func (r *inodeRefs) DecRef(destroy func()) { - switch v := atomic.AddInt64(&r.refCount, -1); { - case v < -1: - panic("Decrementing non-positive ref count") - - case v == -1: - - if destroy != nil { - destroy() - } - } -} diff --git a/pkg/sentry/fsimpl/tmpfs/tmpfs.go b/pkg/sentry/fsimpl/tmpfs/tmpfs.go index 5640380dc..68e615e8b 100644 --- a/pkg/sentry/fsimpl/tmpfs/tmpfs.go +++ b/pkg/sentry/fsimpl/tmpfs/tmpfs.go @@ -285,10 +285,13 @@ type inode struct { // fs is the owning filesystem. fs is immutable. fs *filesystem + // refs is a reference count. refs is accessed using atomic memory + // operations. + // // A reference is held on all inodes as long as they are reachable in the // filesystem tree, i.e. nlink is nonzero. This reference is dropped when // nlink reaches 0. - refs inodeRefs + refs int64 // xattrs implements extended attributes. // @@ -324,6 +327,7 @@ func (i *inode) init(impl interface{}, fs *filesystem, kuid auth.KUID, kgid auth panic("file type is required in FileMode") } i.fs = fs + i.refs = 1 i.mode = uint32(mode) i.uid = uint32(kuid) i.gid = uint32(kgid) @@ -335,7 +339,6 @@ func (i *inode) init(impl interface{}, fs *filesystem, kuid auth.KUID, kgid auth i.mtime = now // i.nlink initialized by caller i.impl = impl - i.refs.EnableLeakCheck() } // incLinksLocked increments i's link count. @@ -366,15 +369,25 @@ func (i *inode) decLinksLocked(ctx context.Context) { } func (i *inode) incRef() { - i.refs.IncRef() + if atomic.AddInt64(&i.refs, 1) <= 1 { + panic("tmpfs.inode.incRef() called without holding a reference") + } } func (i *inode) tryIncRef() bool { - return i.refs.TryIncRef() + for { + refs := atomic.LoadInt64(&i.refs) + if refs == 0 { + return false + } + if atomic.CompareAndSwapInt64(&i.refs, refs, refs+1) { + return true + } + } } func (i *inode) decRef(ctx context.Context) { - i.refs.DecRef(func() { + if refs := atomic.AddInt64(&i.refs, -1); refs == 0 { i.watches.HandleDeletion(ctx) if regFile, ok := i.impl.(*regularFile); ok { // Release memory used by regFile to store data. Since regFile is @@ -382,7 +395,9 @@ func (i *inode) decRef(ctx context.Context) { // metadata. regFile.data.DropAll(regFile.memFile) } - }) + } else if refs < 0 { + panic("tmpfs.inode.decRef() called without holding a reference") + } } func (i *inode) checkPermissions(creds *auth.Credentials, ats vfs.AccessTypes) error { diff --git a/pkg/sentry/fsimpl/tmpfs/tmpfs_state_autogen.go b/pkg/sentry/fsimpl/tmpfs/tmpfs_state_autogen.go index d88136656..1b617bd35 100644 --- a/pkg/sentry/fsimpl/tmpfs/tmpfs_state_autogen.go +++ b/pkg/sentry/fsimpl/tmpfs/tmpfs_state_autogen.go @@ -58,31 +58,7 @@ func (x *dentryEntry) StateLoad(m state.Source) { m.Load(1, &x.prev) } -func (x *inodeRefs) StateTypeName() string { - return "pkg/sentry/fsimpl/tmpfs.inodeRefs" -} - -func (x *inodeRefs) StateFields() []string { - return []string{ - "refCount", - } -} - -func (x *inodeRefs) beforeSave() {} - -func (x *inodeRefs) StateSave(m state.Sink) { - x.beforeSave() - m.Save(0, &x.refCount) -} - -func (x *inodeRefs) afterLoad() {} - -func (x *inodeRefs) StateLoad(m state.Source) { - m.Load(0, &x.refCount) -} - func init() { state.Register((*dentryList)(nil)) state.Register((*dentryEntry)(nil)) - state.Register((*inodeRefs)(nil)) } |