1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
|
package kernel
import (
"fmt"
"sync/atomic"
"gvisor.dev/gvisor/pkg/refsvfs2"
)
// enableLogging indicates whether reference-related events should be logged (with
// stack traces). This is false by default and should only be set to true for
// debugging purposes, as it can generate an extremely large amount of output
// and drastically degrade performance.
const FDTableenableLogging = false
// obj is used to customize logging. Note that we use a pointer to T so that
// we do not copy the entire object when passed as a format parameter.
var FDTableobj *FDTable
// Refs implements refs.RefCounter. It keeps a reference count using atomic
// operations and calls the destructor when the count reaches zero.
//
// Note that the number of references is actually refCount + 1 so that a default
// zero-value Refs object contains one reference.
//
// +stateify savable
type FDTableRefs struct {
// refCount is composed of two fields:
//
// [32-bit speculative references]:[32-bit real references]
//
// Speculative references are used for TryIncRef, to avoid a CompareAndSwap
// loop. See IncRef, DecRef and TryIncRef for details of how these fields are
// used.
refCount int64
}
// RefType implements refsvfs2.CheckedObject.RefType.
func (r *FDTableRefs) RefType() string {
return fmt.Sprintf("%T", FDTableobj)[1:]
}
// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
func (r *FDTableRefs) LeakMessage() string {
return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
}
// LogRefs implements refsvfs2.CheckedObject.LogRefs.
func (r *FDTableRefs) LogRefs() bool {
return FDTableenableLogging
}
// EnableLeakCheck enables reference leak checking on r.
func (r *FDTableRefs) EnableLeakCheck() {
refsvfs2.Register(r)
}
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *FDTableRefs) ReadRefs() int64 {
return atomic.LoadInt64(&r.refCount) + 1
}
// IncRef implements refs.RefCounter.IncRef.
//
//go:nosplit
func (r *FDTableRefs) IncRef() {
v := atomic.AddInt64(&r.refCount, 1)
refsvfs2.LogIncRef(r, v+1)
if v <= 0 {
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
}
}
// TryIncRef implements refs.RefCounter.TryIncRef.
//
// To do this safely without a loop, a speculative reference is first acquired
// on the object. This allows multiple concurrent TryIncRef calls to distinguish
// other TryIncRef calls from genuine references held.
//
//go:nosplit
func (r *FDTableRefs) TryIncRef() bool {
const speculativeRef = 1 << 32
if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
atomic.AddInt64(&r.refCount, -speculativeRef)
return false
}
v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
refsvfs2.LogTryIncRef(r, v+1)
return true
}
// DecRef implements refs.RefCounter.DecRef.
//
// Note that speculative references are counted here. Since they were added
// prior to real references reaching zero, they will successfully convert to
// real references. In other words, we see speculative references only in the
// following case:
//
// A: TryIncRef [speculative increase => sees non-negative references]
// B: DecRef [real decrease]
// A: TryIncRef [transform speculative to real]
//
//go:nosplit
func (r *FDTableRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
refsvfs2.LogDecRef(r, v+1)
switch {
case v < -1:
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
case v == -1:
refsvfs2.Unregister(r)
if destroy != nil {
destroy()
}
}
}
func (r *FDTableRefs) afterLoad() {
if r.ReadRefs() > 0 {
r.EnableLeakCheck()
}
}
|