1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
|
package cgroupfs
import (
"fmt"
"sync/atomic"
"gvisor.dev/gvisor/pkg/refsvfs2"
)
// enableLogging indicates whether reference-related events should be logged (with
// stack traces). This is false by default and should only be set to true for
// debugging purposes, as it can generate an extremely large amount of output
// and drastically degrade performance.
const direnableLogging = false
// obj is used to customize logging. Note that we use a pointer to T so that
// we do not copy the entire object when passed as a format parameter.
var dirobj *dir
// Refs implements refs.RefCounter. It keeps a reference count using atomic
// operations and calls the destructor when the count reaches zero.
//
// NOTE: Do not introduce additional fields to the Refs struct. It is used by
// many filesystem objects, and we want to keep it as small as possible (i.e.,
// the same size as using an int64 directly) to avoid taking up extra cache
// space. In general, this template should not be extended at the cost of
// performance. If it does not offer enough flexibility for a particular object
// (example: b/187877947), we should implement the RefCounter/CheckedObject
// interfaces manually.
//
// +stateify savable
type dirRefs struct {
// refCount is composed of two fields:
//
// [32-bit speculative references]:[32-bit real references]
//
// Speculative references are used for TryIncRef, to avoid a CompareAndSwap
// loop. See IncRef, DecRef and TryIncRef for details of how these fields are
// used.
refCount int64
}
// InitRefs initializes r with one reference and, if enabled, activates leak
// checking.
func (r *dirRefs) InitRefs() {
atomic.StoreInt64(&r.refCount, 1)
refsvfs2.Register(r)
}
// RefType implements refsvfs2.CheckedObject.RefType.
func (r *dirRefs) RefType() string {
return fmt.Sprintf("%T", dirobj)[1:]
}
// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
func (r *dirRefs) LeakMessage() string {
return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
}
// LogRefs implements refsvfs2.CheckedObject.LogRefs.
func (r *dirRefs) LogRefs() bool {
return direnableLogging
}
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *dirRefs) ReadRefs() int64 {
return atomic.LoadInt64(&r.refCount)
}
// IncRef implements refs.RefCounter.IncRef.
//
//go:nosplit
func (r *dirRefs) IncRef() {
v := atomic.AddInt64(&r.refCount, 1)
if direnableLogging {
refsvfs2.LogIncRef(r, v)
}
if v <= 1 {
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
}
}
// TryIncRef implements refs.RefCounter.TryIncRef.
//
// To do this safely without a loop, a speculative reference is first acquired
// on the object. This allows multiple concurrent TryIncRef calls to distinguish
// other TryIncRef calls from genuine references held.
//
//go:nosplit
func (r *dirRefs) TryIncRef() bool {
const speculativeRef = 1 << 32
if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
atomic.AddInt64(&r.refCount, -speculativeRef)
return false
}
v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
if direnableLogging {
refsvfs2.LogTryIncRef(r, v)
}
return true
}
// DecRef implements refs.RefCounter.DecRef.
//
// Note that speculative references are counted here. Since they were added
// prior to real references reaching zero, they will successfully convert to
// real references. In other words, we see speculative references only in the
// following case:
//
// A: TryIncRef [speculative increase => sees non-negative references]
// B: DecRef [real decrease]
// A: TryIncRef [transform speculative to real]
//
//go:nosplit
func (r *dirRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
if direnableLogging {
refsvfs2.LogDecRef(r, v)
}
switch {
case v < 0:
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
case v == 0:
refsvfs2.Unregister(r)
if destroy != nil {
destroy()
}
}
}
func (r *dirRefs) afterLoad() {
if r.ReadRefs() > 0 {
refsvfs2.Register(r)
}
}
|