summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/pgalloc
diff options
context:
space:
mode:
authorBhasker Hariharan <bhaskerh@google.com>2021-03-03 12:18:04 -0800
committergVisor bot <gvisor-bot@google.com>2021-03-03 12:24:21 -0800
commit3e69f5d088d121f1d3c4bf44ca637a48f13c4819 (patch)
tree153fa7d51f509c8a5cf066a7aea90fd334a08899 /pkg/sentry/pgalloc
parent80bc67c268dba0126cd258075c06d744399e0f02 (diff)
Add checklocks analyzer.
This validates that struct fields if annotated with "// checklocks:mu" where "mu" is a mutex field in the same struct then access to the field is only done with "mu" locked. All types that are guarded by a mutex must be annotated with // +checklocks:<mutex field name> For more details please refer to README.md. PiperOrigin-RevId: 360729328
Diffstat (limited to 'pkg/sentry/pgalloc')
-rw-r--r--pkg/sentry/pgalloc/pgalloc.go120
1 files changed, 61 insertions, 59 deletions
diff --git a/pkg/sentry/pgalloc/pgalloc.go b/pkg/sentry/pgalloc/pgalloc.go
index 58cc11a13..a4af3e21b 100644
--- a/pkg/sentry/pgalloc/pgalloc.go
+++ b/pkg/sentry/pgalloc/pgalloc.go
@@ -876,6 +876,7 @@ func (f *MemoryFile) UpdateUsage() error {
// in bs, sets committed[i] to 1 if the page is committed and 0 otherwise.
//
// Precondition: f.mu must be held; it may be unlocked and reacquired.
+// +checklocks:f.mu
func (f *MemoryFile) updateUsageLocked(currentUsage uint64, checkCommitted func(bs []byte, committed []byte) error) error {
// Track if anything changed to elide the merge. In the common case, we
// expect all segments to be committed and no merge to occur.
@@ -925,72 +926,73 @@ func (f *MemoryFile) updateUsageLocked(currentUsage uint64, checkCommitted func(
r := seg.Range()
var checkErr error
- err := f.forEachMappingSlice(r, func(s []byte) {
- if checkErr != nil {
- return
- }
-
- // Ensure that we have sufficient buffer for the call
- // (one byte per page). The length of each slice must
- // be page-aligned.
- bufLen := len(s) / usermem.PageSize
- if len(buf) < bufLen {
- buf = make([]byte, bufLen)
- }
+ err := f.forEachMappingSlice(r,
+ func(s []byte) {
+ if checkErr != nil {
+ return
+ }
- // Query for new pages in core.
- // NOTE(b/165896008): mincore (which is passed as checkCommitted)
- // by f.UpdateUsage() might take a really long time. So unlock f.mu
- // while checkCommitted runs.
- f.mu.Unlock()
- err := checkCommitted(s, buf)
- f.mu.Lock()
- if err != nil {
- checkErr = err
- return
- }
+ // Ensure that we have sufficient buffer for the call
+ // (one byte per page). The length of each slice must
+ // be page-aligned.
+ bufLen := len(s) / usermem.PageSize
+ if len(buf) < bufLen {
+ buf = make([]byte, bufLen)
+ }
- // Scan each page and switch out segments.
- seg := f.usage.LowerBoundSegment(r.Start)
- for i := 0; i < bufLen; {
- if buf[i]&0x1 == 0 {
- i++
- continue
+ // Query for new pages in core.
+ // NOTE(b/165896008): mincore (which is passed as checkCommitted)
+ // by f.UpdateUsage() might take a really long time. So unlock f.mu
+ // while checkCommitted runs.
+ f.mu.Unlock()
+ err := checkCommitted(s, buf)
+ f.mu.Lock()
+ if err != nil {
+ checkErr = err
+ return
}
- // Scan to the end of this committed range.
- j := i + 1
- for ; j < bufLen; j++ {
- if buf[j]&0x1 == 0 {
- break
+
+ // Scan each page and switch out segments.
+ seg := f.usage.LowerBoundSegment(r.Start)
+ for i := 0; i < bufLen; {
+ if buf[i]&0x1 == 0 {
+ i++
+ continue
}
- }
- committedFR := memmap.FileRange{
- Start: r.Start + uint64(i*usermem.PageSize),
- End: r.Start + uint64(j*usermem.PageSize),
- }
- // Advance seg to committedFR.Start.
- for seg.Ok() && seg.End() < committedFR.Start {
- seg = seg.NextSegment()
- }
- // Mark pages overlapping committedFR as committed.
- for seg.Ok() && seg.Start() < committedFR.End {
- if seg.ValuePtr().canCommit() {
- seg = f.usage.Isolate(seg, committedFR)
- seg.ValuePtr().knownCommitted = true
- amount := seg.Range().Length()
- usage.MemoryAccounting.Inc(amount, seg.ValuePtr().kind)
- f.usageExpected += amount
- changedAny = true
+ // Scan to the end of this committed range.
+ j := i + 1
+ for ; j < bufLen; j++ {
+ if buf[j]&0x1 == 0 {
+ break
+ }
}
- seg = seg.NextSegment()
+ committedFR := memmap.FileRange{
+ Start: r.Start + uint64(i*usermem.PageSize),
+ End: r.Start + uint64(j*usermem.PageSize),
+ }
+ // Advance seg to committedFR.Start.
+ for seg.Ok() && seg.End() < committedFR.Start {
+ seg = seg.NextSegment()
+ }
+ // Mark pages overlapping committedFR as committed.
+ for seg.Ok() && seg.Start() < committedFR.End {
+ if seg.ValuePtr().canCommit() {
+ seg = f.usage.Isolate(seg, committedFR)
+ seg.ValuePtr().knownCommitted = true
+ amount := seg.Range().Length()
+ usage.MemoryAccounting.Inc(amount, seg.ValuePtr().kind)
+ f.usageExpected += amount
+ changedAny = true
+ }
+ seg = seg.NextSegment()
+ }
+ // Continue scanning for committed pages.
+ i = j + 1
}
- // Continue scanning for committed pages.
- i = j + 1
- }
- // Advance r.Start.
- r.Start += uint64(len(s))
- })
+ // Advance r.Start.
+ r.Start += uint64(len(s))
+ })
if checkErr != nil {
return checkErr
}