summaryrefslogtreecommitdiffhomepage
path: root/pkg
diff options
context:
space:
mode:
Diffstat (limited to 'pkg')
-rw-r--r--pkg/sentry/fs/copy_up.go11
-rw-r--r--pkg/sentry/fs/gofer/inode_state.go1
-rw-r--r--pkg/sentry/fsimpl/overlay/filesystem.go3
-rw-r--r--pkg/sentry/pgalloc/pgalloc.go120
-rw-r--r--pkg/sentry/vfs/dentry.go8
-rw-r--r--pkg/sentry/vfs/mount.go17
-rw-r--r--pkg/sync/mutex_unsafe.go1
-rw-r--r--pkg/sync/rwmutex_unsafe.go6
-rw-r--r--pkg/tcpip/transport/tcp/endpoint.go2
-rw-r--r--pkg/tcpip/transport/tcp/endpoint_state.go1
10 files changed, 98 insertions, 72 deletions
diff --git a/pkg/sentry/fs/copy_up.go b/pkg/sentry/fs/copy_up.go
index 8e0aa9019..58deb25fc 100644
--- a/pkg/sentry/fs/copy_up.go
+++ b/pkg/sentry/fs/copy_up.go
@@ -303,17 +303,18 @@ func copyUpLocked(ctx context.Context, parent *Dirent, next *Dirent) error {
// Take a reference on the upper Inode (transferred to
// next.Inode.overlay.upper) and make new translations use it.
- next.Inode.overlay.dataMu.Lock()
+ overlay := next.Inode.overlay
+ overlay.dataMu.Lock()
childUpperInode.IncRef()
- next.Inode.overlay.upper = childUpperInode
- next.Inode.overlay.dataMu.Unlock()
+ overlay.upper = childUpperInode
+ overlay.dataMu.Unlock()
// Invalidate existing translations through the lower Inode.
- next.Inode.overlay.mappings.InvalidateAll(memmap.InvalidateOpts{})
+ overlay.mappings.InvalidateAll(memmap.InvalidateOpts{})
// Remove existing memory mappings from the lower Inode.
if lowerMappable != nil {
- for seg := next.Inode.overlay.mappings.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ for seg := overlay.mappings.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
for m := range seg.Value() {
lowerMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start(), m.Writable)
}
diff --git a/pkg/sentry/fs/gofer/inode_state.go b/pkg/sentry/fs/gofer/inode_state.go
index 141e3c27f..e2af1d2ae 100644
--- a/pkg/sentry/fs/gofer/inode_state.go
+++ b/pkg/sentry/fs/gofer/inode_state.go
@@ -109,6 +109,7 @@ func (i *inodeFileState) loadLoading(_ struct{}) {
}
// afterLoad is invoked by stateify.
+// +checklocks:i.loading
func (i *inodeFileState) afterLoad() {
load := func() (err error) {
// See comment on i.loading().
diff --git a/pkg/sentry/fsimpl/overlay/filesystem.go b/pkg/sentry/fsimpl/overlay/filesystem.go
index f7f795b10..917709d75 100644
--- a/pkg/sentry/fsimpl/overlay/filesystem.go
+++ b/pkg/sentry/fsimpl/overlay/filesystem.go
@@ -85,6 +85,8 @@ func putDentrySlice(ds *[]*dentry) {
// but dentry slices are allocated lazily, and it's much easier to say "defer
// fs.renameMuRUnlockAndCheckDrop(&ds)" than "defer func() {
// fs.renameMuRUnlockAndCheckDrop(ds) }()" to work around this.
+//
+// +checklocks:fs.renameMu
func (fs *filesystem) renameMuRUnlockAndCheckDrop(ctx context.Context, dsp **[]*dentry) {
fs.renameMu.RUnlock()
if *dsp == nil {
@@ -110,6 +112,7 @@ func (fs *filesystem) renameMuRUnlockAndCheckDrop(ctx context.Context, dsp **[]*
putDentrySlice(*dsp)
}
+// +checklocks:fs.renameMu
func (fs *filesystem) renameMuUnlockAndCheckDrop(ctx context.Context, ds **[]*dentry) {
if *ds == nil {
fs.renameMu.Unlock()
diff --git a/pkg/sentry/pgalloc/pgalloc.go b/pkg/sentry/pgalloc/pgalloc.go
index 58cc11a13..a4af3e21b 100644
--- a/pkg/sentry/pgalloc/pgalloc.go
+++ b/pkg/sentry/pgalloc/pgalloc.go
@@ -876,6 +876,7 @@ func (f *MemoryFile) UpdateUsage() error {
// in bs, sets committed[i] to 1 if the page is committed and 0 otherwise.
//
// Precondition: f.mu must be held; it may be unlocked and reacquired.
+// +checklocks:f.mu
func (f *MemoryFile) updateUsageLocked(currentUsage uint64, checkCommitted func(bs []byte, committed []byte) error) error {
// Track if anything changed to elide the merge. In the common case, we
// expect all segments to be committed and no merge to occur.
@@ -925,72 +926,73 @@ func (f *MemoryFile) updateUsageLocked(currentUsage uint64, checkCommitted func(
r := seg.Range()
var checkErr error
- err := f.forEachMappingSlice(r, func(s []byte) {
- if checkErr != nil {
- return
- }
-
- // Ensure that we have sufficient buffer for the call
- // (one byte per page). The length of each slice must
- // be page-aligned.
- bufLen := len(s) / usermem.PageSize
- if len(buf) < bufLen {
- buf = make([]byte, bufLen)
- }
+ err := f.forEachMappingSlice(r,
+ func(s []byte) {
+ if checkErr != nil {
+ return
+ }
- // Query for new pages in core.
- // NOTE(b/165896008): mincore (which is passed as checkCommitted)
- // by f.UpdateUsage() might take a really long time. So unlock f.mu
- // while checkCommitted runs.
- f.mu.Unlock()
- err := checkCommitted(s, buf)
- f.mu.Lock()
- if err != nil {
- checkErr = err
- return
- }
+ // Ensure that we have sufficient buffer for the call
+ // (one byte per page). The length of each slice must
+ // be page-aligned.
+ bufLen := len(s) / usermem.PageSize
+ if len(buf) < bufLen {
+ buf = make([]byte, bufLen)
+ }
- // Scan each page and switch out segments.
- seg := f.usage.LowerBoundSegment(r.Start)
- for i := 0; i < bufLen; {
- if buf[i]&0x1 == 0 {
- i++
- continue
+ // Query for new pages in core.
+ // NOTE(b/165896008): mincore (which is passed as checkCommitted)
+ // by f.UpdateUsage() might take a really long time. So unlock f.mu
+ // while checkCommitted runs.
+ f.mu.Unlock()
+ err := checkCommitted(s, buf)
+ f.mu.Lock()
+ if err != nil {
+ checkErr = err
+ return
}
- // Scan to the end of this committed range.
- j := i + 1
- for ; j < bufLen; j++ {
- if buf[j]&0x1 == 0 {
- break
+
+ // Scan each page and switch out segments.
+ seg := f.usage.LowerBoundSegment(r.Start)
+ for i := 0; i < bufLen; {
+ if buf[i]&0x1 == 0 {
+ i++
+ continue
}
- }
- committedFR := memmap.FileRange{
- Start: r.Start + uint64(i*usermem.PageSize),
- End: r.Start + uint64(j*usermem.PageSize),
- }
- // Advance seg to committedFR.Start.
- for seg.Ok() && seg.End() < committedFR.Start {
- seg = seg.NextSegment()
- }
- // Mark pages overlapping committedFR as committed.
- for seg.Ok() && seg.Start() < committedFR.End {
- if seg.ValuePtr().canCommit() {
- seg = f.usage.Isolate(seg, committedFR)
- seg.ValuePtr().knownCommitted = true
- amount := seg.Range().Length()
- usage.MemoryAccounting.Inc(amount, seg.ValuePtr().kind)
- f.usageExpected += amount
- changedAny = true
+ // Scan to the end of this committed range.
+ j := i + 1
+ for ; j < bufLen; j++ {
+ if buf[j]&0x1 == 0 {
+ break
+ }
}
- seg = seg.NextSegment()
+ committedFR := memmap.FileRange{
+ Start: r.Start + uint64(i*usermem.PageSize),
+ End: r.Start + uint64(j*usermem.PageSize),
+ }
+ // Advance seg to committedFR.Start.
+ for seg.Ok() && seg.End() < committedFR.Start {
+ seg = seg.NextSegment()
+ }
+ // Mark pages overlapping committedFR as committed.
+ for seg.Ok() && seg.Start() < committedFR.End {
+ if seg.ValuePtr().canCommit() {
+ seg = f.usage.Isolate(seg, committedFR)
+ seg.ValuePtr().knownCommitted = true
+ amount := seg.Range().Length()
+ usage.MemoryAccounting.Inc(amount, seg.ValuePtr().kind)
+ f.usageExpected += amount
+ changedAny = true
+ }
+ seg = seg.NextSegment()
+ }
+ // Continue scanning for committed pages.
+ i = j + 1
}
- // Continue scanning for committed pages.
- i = j + 1
- }
- // Advance r.Start.
- r.Start += uint64(len(s))
- })
+ // Advance r.Start.
+ r.Start += uint64(len(s))
+ })
if checkErr != nil {
return checkErr
}
diff --git a/pkg/sentry/vfs/dentry.go b/pkg/sentry/vfs/dentry.go
index 320ab7ce1..e7ca24d96 100644
--- a/pkg/sentry/vfs/dentry.go
+++ b/pkg/sentry/vfs/dentry.go
@@ -211,12 +211,14 @@ func (vfs *VirtualFilesystem) PrepareDeleteDentry(mntns *MountNamespace, d *Dent
// AbortDeleteDentry must be called after PrepareDeleteDentry if the deletion
// fails.
+// +checklocks:d.mu
func (vfs *VirtualFilesystem) AbortDeleteDentry(d *Dentry) {
d.mu.Unlock()
}
// CommitDeleteDentry must be called after PrepareDeleteDentry if the deletion
// succeeds.
+// +checklocks:d.mu
func (vfs *VirtualFilesystem) CommitDeleteDentry(ctx context.Context, d *Dentry) {
d.dead = true
d.mu.Unlock()
@@ -270,6 +272,8 @@ func (vfs *VirtualFilesystem) PrepareRenameDentry(mntns *MountNamespace, from, t
// AbortRenameDentry must be called after PrepareRenameDentry if the rename
// fails.
+// +checklocks:from.mu
+// +checklocks:to.mu
func (vfs *VirtualFilesystem) AbortRenameDentry(from, to *Dentry) {
from.mu.Unlock()
if to != nil {
@@ -282,6 +286,8 @@ func (vfs *VirtualFilesystem) AbortRenameDentry(from, to *Dentry) {
// that was replaced by from.
//
// Preconditions: PrepareRenameDentry was previously called on from and to.
+// +checklocks:from.mu
+// +checklocks:to.mu
func (vfs *VirtualFilesystem) CommitRenameReplaceDentry(ctx context.Context, from, to *Dentry) {
from.mu.Unlock()
if to != nil {
@@ -297,6 +303,8 @@ func (vfs *VirtualFilesystem) CommitRenameReplaceDentry(ctx context.Context, fro
// from and to are exchanged by rename(RENAME_EXCHANGE).
//
// Preconditions: PrepareRenameDentry was previously called on from and to.
+// +checklocks:from.mu
+// +checklocks:to.mu
func (vfs *VirtualFilesystem) CommitRenameExchangeDentry(from, to *Dentry) {
from.mu.Unlock()
to.mu.Unlock()
diff --git a/pkg/sentry/vfs/mount.go b/pkg/sentry/vfs/mount.go
index 7063066ff..bac9eb905 100644
--- a/pkg/sentry/vfs/mount.go
+++ b/pkg/sentry/vfs/mount.go
@@ -217,20 +217,21 @@ func (vfs *VirtualFilesystem) ConnectMountAt(ctx context.Context, creds *auth.Cr
return err
}
vfs.mountMu.Lock()
- vd.dentry.mu.Lock()
+ vdDentry := vd.dentry
+ vdDentry.mu.Lock()
for {
- if vd.dentry.dead {
- vd.dentry.mu.Unlock()
+ if vdDentry.dead {
+ vdDentry.mu.Unlock()
vfs.mountMu.Unlock()
vd.DecRef(ctx)
return syserror.ENOENT
}
// vd might have been mounted over between vfs.GetDentryAt() and
// vfs.mountMu.Lock().
- if !vd.dentry.isMounted() {
+ if !vdDentry.isMounted() {
break
}
- nextmnt := vfs.mounts.Lookup(vd.mount, vd.dentry)
+ nextmnt := vfs.mounts.Lookup(vd.mount, vdDentry)
if nextmnt == nil {
break
}
@@ -243,13 +244,13 @@ func (vfs *VirtualFilesystem) ConnectMountAt(ctx context.Context, creds *auth.Cr
}
// This can't fail since we're holding vfs.mountMu.
nextmnt.root.IncRef()
- vd.dentry.mu.Unlock()
+ vdDentry.mu.Unlock()
vd.DecRef(ctx)
vd = VirtualDentry{
mount: nextmnt,
dentry: nextmnt.root,
}
- vd.dentry.mu.Lock()
+ vdDentry.mu.Lock()
}
// TODO(gvisor.dev/issue/1035): Linux requires that either both the mount
// point and the mount root are directories, or neither are, and returns
@@ -258,7 +259,7 @@ func (vfs *VirtualFilesystem) ConnectMountAt(ctx context.Context, creds *auth.Cr
vfs.mounts.seq.BeginWrite()
vfs.connectLocked(mnt, vd, mntns)
vfs.mounts.seq.EndWrite()
- vd.dentry.mu.Unlock()
+ vdDentry.mu.Unlock()
vfs.mountMu.Unlock()
return nil
}
diff --git a/pkg/sync/mutex_unsafe.go b/pkg/sync/mutex_unsafe.go
index 4e49a9b89..411a80a8a 100644
--- a/pkg/sync/mutex_unsafe.go
+++ b/pkg/sync/mutex_unsafe.go
@@ -72,6 +72,7 @@ func (m *Mutex) Lock() {
// Preconditions:
// * m is locked.
// * m was locked by this goroutine.
+// +checklocksignore
func (m *Mutex) Unlock() {
noteUnlock(unsafe.Pointer(m))
m.m.Unlock()
diff --git a/pkg/sync/rwmutex_unsafe.go b/pkg/sync/rwmutex_unsafe.go
index 4cf3fcd6e..892d3e641 100644
--- a/pkg/sync/rwmutex_unsafe.go
+++ b/pkg/sync/rwmutex_unsafe.go
@@ -105,6 +105,7 @@ func (rw *CrossGoroutineRWMutex) RUnlock() {
// TryLock locks rw for writing. It returns true if it succeeds and false
// otherwise. It does not block.
+// +checklocksignore
func (rw *CrossGoroutineRWMutex) TryLock() bool {
if RaceEnabled {
RaceDisable()
@@ -155,6 +156,7 @@ func (rw *CrossGoroutineRWMutex) Lock() {
//
// Preconditions:
// * rw is locked for writing.
+// +checklocksignore
func (rw *CrossGoroutineRWMutex) Unlock() {
if RaceEnabled {
RaceRelease(unsafe.Pointer(&rw.writerSem))
@@ -181,6 +183,7 @@ func (rw *CrossGoroutineRWMutex) Unlock() {
//
// Preconditions:
// * rw is locked for writing.
+// +checklocksignore
func (rw *CrossGoroutineRWMutex) DowngradeLock() {
if RaceEnabled {
RaceRelease(unsafe.Pointer(&rw.readerSem))
@@ -250,6 +253,7 @@ func (rw *RWMutex) RLock() {
// Preconditions:
// * rw is locked for reading.
// * rw was locked by this goroutine.
+// +checklocksignore
func (rw *RWMutex) RUnlock() {
rw.m.RUnlock()
noteUnlock(unsafe.Pointer(rw))
@@ -279,6 +283,7 @@ func (rw *RWMutex) Lock() {
// Preconditions:
// * rw is locked for writing.
// * rw was locked by this goroutine.
+// +checklocksignore
func (rw *RWMutex) Unlock() {
rw.m.Unlock()
noteUnlock(unsafe.Pointer(rw))
@@ -288,6 +293,7 @@ func (rw *RWMutex) Unlock() {
//
// Preconditions:
// * rw is locked for writing.
+// +checklocksignore
func (rw *RWMutex) DowngradeLock() {
// No note change for DowngradeLock.
rw.m.DowngradeLock()
diff --git a/pkg/tcpip/transport/tcp/endpoint.go b/pkg/tcpip/transport/tcp/endpoint.go
index f47b39ccc..9ce6868df 100644
--- a/pkg/tcpip/transport/tcp/endpoint.go
+++ b/pkg/tcpip/transport/tcp/endpoint.go
@@ -760,6 +760,7 @@ func (e *endpoint) LockUser() {
// protocol goroutine altogether.
//
// Precondition: e.LockUser() must have been called before calling e.UnlockUser()
+// +checklocks:e.mu
func (e *endpoint) UnlockUser() {
// Lock segment queue before checking so that we avoid a race where
// segments can be queued between the time we check if queue is empty
@@ -800,6 +801,7 @@ func (e *endpoint) StopWork() {
}
// ResumeWork resumes packet processing. Only to be used in tests.
+// +checklocks:e.mu
func (e *endpoint) ResumeWork() {
e.mu.Unlock()
}
diff --git a/pkg/tcpip/transport/tcp/endpoint_state.go b/pkg/tcpip/transport/tcp/endpoint_state.go
index e4368026f..7c15690a3 100644
--- a/pkg/tcpip/transport/tcp/endpoint_state.go
+++ b/pkg/tcpip/transport/tcp/endpoint_state.go
@@ -25,6 +25,7 @@ import (
"gvisor.dev/gvisor/pkg/tcpip/stack"
)
+// +checklocks:e.mu
func (e *endpoint) drainSegmentLocked() {
// Drain only up to once.
if e.drainDone != nil {