1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
|
// Copyright 2020 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package atomicptrmap doesn't exist. This file must be instantiated using the
// go_template_instance rule in tools/go_generics/defs.bzl.
package atomicptrmap
import (
"sync/atomic"
"unsafe"
"gvisor.dev/gvisor/pkg/gohacks"
"gvisor.dev/gvisor/pkg/sync"
)
// Key is a required type parameter.
type Key struct{}
// Value is a required type parameter.
type Value struct{}
const (
// ShardOrder is an optional parameter specifying the base-2 log of the
// number of shards per AtomicPtrMap. Higher values of ShardOrder reduce
// unnecessary synchronization between unrelated concurrent operations,
// improving performance for write-heavy workloads, but increase memory
// usage for small maps.
ShardOrder = 0
)
// Hasher is an optional type parameter. If Hasher is provided, it must define
// the Init and Hash methods. One Hasher will be shared by all AtomicPtrMaps.
type Hasher struct {
defaultHasher
}
// defaultHasher is the default Hasher. This indirection exists because
// defaultHasher must exist even if a custom Hasher is provided, to prevent the
// Go compiler from complaining about defaultHasher's unused imports.
type defaultHasher struct {
fn func(unsafe.Pointer, uintptr) uintptr
seed uintptr
}
// Init initializes the Hasher.
func (h *defaultHasher) Init() {
h.fn = sync.MapKeyHasher(map[Key]*Value(nil))
h.seed = sync.RandUintptr()
}
// Hash returns the hash value for the given Key.
func (h *defaultHasher) Hash(key Key) uintptr {
return h.fn(gohacks.Noescape(unsafe.Pointer(&key)), h.seed)
}
var hasher Hasher
func init() {
hasher.Init()
}
// An AtomicPtrMap maps Keys to non-nil pointers to Values. AtomicPtrMap are
// safe for concurrent use from multiple goroutines without additional
// synchronization.
//
// The zero value of AtomicPtrMap is empty (maps all Keys to nil) and ready for
// use. AtomicPtrMaps must not be copied after first use.
//
// sync.Map may be faster than AtomicPtrMap if most operations on the map are
// concurrent writes to a fixed set of keys. AtomicPtrMap is usually faster in
// other circumstances.
type AtomicPtrMap struct {
// AtomicPtrMap is implemented as a hash table with the following
// properties:
//
// * Collisions are resolved with quadratic probing. Of the two major
// alternatives, Robin Hood linear probing makes it difficult for writers
// to execute in parallel, and bucketing is less effective in Go due to
// lack of SIMD.
//
// * The table is optionally divided into shards indexed by hash to further
// reduce unnecessary synchronization.
shards [1 << ShardOrder]apmShard
}
func (m *AtomicPtrMap) shard(hash uintptr) *apmShard {
// Go defines right shifts >= width of shifted unsigned operand as 0, so
// this is correct even if ShardOrder is 0 (although nogo complains because
// nogo is dumb).
const indexLSB = unsafe.Sizeof(uintptr(0))*8 - ShardOrder
index := hash >> indexLSB
return (*apmShard)(unsafe.Pointer(uintptr(unsafe.Pointer(&m.shards)) + (index * unsafe.Sizeof(apmShard{}))))
}
type apmShard struct {
apmShardMutationData
_ [apmShardMutationDataPadding]byte
apmShardLookupData
_ [apmShardLookupDataPadding]byte
}
type apmShardMutationData struct {
dirtyMu sync.Mutex // serializes slot transitions out of empty
dirty uintptr // # slots with val != nil
count uintptr // # slots with val != nil and val != tombstone()
rehashMu sync.Mutex // serializes rehashing
}
type apmShardLookupData struct {
seq sync.SeqCount // allows atomic reads of slots+mask
slots unsafe.Pointer // [mask+1]slot or nil; protected by rehashMu/seq
mask uintptr // always (a power of 2) - 1; protected by rehashMu/seq
}
const (
cacheLineBytes = 64
// Cache line padding is enabled if sharding is.
apmEnablePadding = (ShardOrder + 63) >> 6 // 0 if ShardOrder == 0, 1 otherwise
// The -1 and +1 below are required to ensure that if unsafe.Sizeof(T) %
// cacheLineBytes == 0, then padding is 0 (rather than cacheLineBytes).
apmShardMutationDataRequiredPadding = cacheLineBytes - (((unsafe.Sizeof(apmShardMutationData{}) - 1) % cacheLineBytes) + 1)
apmShardMutationDataPadding = apmEnablePadding * apmShardMutationDataRequiredPadding
apmShardLookupDataRequiredPadding = cacheLineBytes - (((unsafe.Sizeof(apmShardLookupData{}) - 1) % cacheLineBytes) + 1)
apmShardLookupDataPadding = apmEnablePadding * apmShardLookupDataRequiredPadding
// These define fractional thresholds for when apmShard.rehash() is called
// (i.e. the load factor) and when it rehases to a larger table
// respectively. They are chosen such that the rehash threshold = the
// expansion threshold + 1/2, so that when reuse of deleted slots is rare
// or non-existent, rehashing occurs after the insertion of at least 1/2
// the table's size in new entries, which is acceptably infrequent.
apmRehashThresholdNum = 2
apmRehashThresholdDen = 3
apmExpansionThresholdNum = 1
apmExpansionThresholdDen = 6
)
type apmSlot struct {
// slot states are indicated by val:
//
// * Empty: val == nil; key is meaningless. May transition to full or
// evacuated with dirtyMu locked.
//
// * Full: val != nil, tombstone(), or evacuated(); key is immutable. val
// is the Value mapped to key. May transition to deleted or evacuated.
//
// * Deleted: val == tombstone(); key is still immutable. key is mapped to
// no Value. May transition to full or evacuated.
//
// * Evacuated: val == evacuated(); key is immutable. Set by rehashing on
// slots that have already been moved, requiring readers to wait for
// rehashing to complete and use the new table. Terminal state.
//
// Note that once val is non-nil, it cannot become nil again. That is, the
// transition from empty to non-empty is irreversible for a given slot;
// the only way to create more empty slots is by rehashing.
val unsafe.Pointer
key Key
}
func apmSlotAt(slots unsafe.Pointer, pos uintptr) *apmSlot {
return (*apmSlot)(unsafe.Pointer(uintptr(slots) + pos*unsafe.Sizeof(apmSlot{})))
}
var tombstoneObj byte
func tombstone() unsafe.Pointer {
return unsafe.Pointer(&tombstoneObj)
}
var evacuatedObj byte
func evacuated() unsafe.Pointer {
return unsafe.Pointer(&evacuatedObj)
}
// Load returns the Value stored in m for key.
func (m *AtomicPtrMap) Load(key Key) *Value {
hash := hasher.Hash(key)
shard := m.shard(hash)
retry:
epoch := shard.seq.BeginRead()
slots := atomic.LoadPointer(&shard.slots)
mask := atomic.LoadUintptr(&shard.mask)
if !shard.seq.ReadOk(epoch) {
goto retry
}
if slots == nil {
return nil
}
i := hash & mask
inc := uintptr(1)
for {
slot := apmSlotAt(slots, i)
slotVal := atomic.LoadPointer(&slot.val)
if slotVal == nil {
// Empty slot; end of probe sequence.
return nil
}
if slotVal == evacuated() {
// Racing with rehashing.
goto retry
}
if slot.key == key {
if slotVal == tombstone() {
return nil
}
return (*Value)(slotVal)
}
i = (i + inc) & mask
inc++
}
}
// Store stores the Value val for key.
func (m *AtomicPtrMap) Store(key Key, val *Value) {
m.maybeCompareAndSwap(key, false, nil, val)
}
// Swap stores the Value val for key and returns the previously-mapped Value.
func (m *AtomicPtrMap) Swap(key Key, val *Value) *Value {
return m.maybeCompareAndSwap(key, false, nil, val)
}
// CompareAndSwap checks that the Value stored for key is oldVal; if it is, it
// stores the Value newVal for key. CompareAndSwap returns the previous Value
// stored for key, whether or not it stores newVal.
func (m *AtomicPtrMap) CompareAndSwap(key Key, oldVal, newVal *Value) *Value {
return m.maybeCompareAndSwap(key, true, oldVal, newVal)
}
func (m *AtomicPtrMap) maybeCompareAndSwap(key Key, compare bool, typedOldVal, typedNewVal *Value) *Value {
hash := hasher.Hash(key)
shard := m.shard(hash)
oldVal := tombstone()
if typedOldVal != nil {
oldVal = unsafe.Pointer(typedOldVal)
}
newVal := tombstone()
if typedNewVal != nil {
newVal = unsafe.Pointer(typedNewVal)
}
retry:
epoch := shard.seq.BeginRead()
slots := atomic.LoadPointer(&shard.slots)
mask := atomic.LoadUintptr(&shard.mask)
if !shard.seq.ReadOk(epoch) {
goto retry
}
if slots == nil {
if (compare && oldVal != tombstone()) || newVal == tombstone() {
return nil
}
// Need to allocate a table before insertion.
shard.rehash(nil)
goto retry
}
i := hash & mask
inc := uintptr(1)
for {
slot := apmSlotAt(slots, i)
slotVal := atomic.LoadPointer(&slot.val)
if slotVal == nil {
if (compare && oldVal != tombstone()) || newVal == tombstone() {
return nil
}
// Try to grab this slot for ourselves.
shard.dirtyMu.Lock()
slotVal = atomic.LoadPointer(&slot.val)
if slotVal == nil {
// Check if we need to rehash before dirtying a slot.
if dirty, capacity := shard.dirty+1, mask+1; dirty*apmRehashThresholdDen >= capacity*apmRehashThresholdNum {
shard.dirtyMu.Unlock()
shard.rehash(slots)
goto retry
}
slot.key = key
atomic.StorePointer(&slot.val, newVal) // transitions slot to full
shard.dirty++
atomic.AddUintptr(&shard.count, 1)
shard.dirtyMu.Unlock()
return nil
}
// Raced with another store; the slot is no longer empty. Continue
// with the new value of slotVal since we may have raced with
// another store of key.
shard.dirtyMu.Unlock()
}
if slotVal == evacuated() {
// Racing with rehashing.
goto retry
}
if slot.key == key {
// We're reusing an existing slot, so rehashing isn't necessary.
for {
if (compare && oldVal != slotVal) || newVal == slotVal {
if slotVal == tombstone() {
return nil
}
return (*Value)(slotVal)
}
if atomic.CompareAndSwapPointer(&slot.val, slotVal, newVal) {
if slotVal == tombstone() {
atomic.AddUintptr(&shard.count, 1)
return nil
}
if newVal == tombstone() {
atomic.AddUintptr(&shard.count, ^uintptr(0) /* -1 */)
}
return (*Value)(slotVal)
}
slotVal = atomic.LoadPointer(&slot.val)
if slotVal == evacuated() {
goto retry
}
}
}
// This produces a triangular number sequence of offsets from the
// initially-probed position.
i = (i + inc) & mask
inc++
}
}
// rehash is marked nosplit to avoid preemption during table copying.
//go:nosplit
func (shard *apmShard) rehash(oldSlots unsafe.Pointer) {
shard.rehashMu.Lock()
defer shard.rehashMu.Unlock()
if shard.slots != oldSlots {
// Raced with another call to rehash().
return
}
// Determine the size of the new table. Constraints:
//
// * The size of the table must be a power of two to ensure that every slot
// is visitable by every probe sequence under quadratic probing with
// triangular numbers.
//
// * The size of the table cannot decrease because even if shard.count is
// currently smaller than shard.dirty, concurrent stores that reuse
// existing slots can drive shard.count back up to a maximum of
// shard.dirty.
newSize := uintptr(8) // arbitrary initial size
if oldSlots != nil {
oldSize := shard.mask + 1
newSize = oldSize
if count := atomic.LoadUintptr(&shard.count) + 1; count*apmExpansionThresholdDen > oldSize*apmExpansionThresholdNum {
newSize *= 2
}
}
// Allocate the new table.
newSlotsSlice := make([]apmSlot, newSize)
newSlotsHeader := (*gohacks.SliceHeader)(unsafe.Pointer(&newSlotsSlice))
newSlots := newSlotsHeader.Data
newMask := newSize - 1
// Start a writer critical section now so that racing users of the old
// table that observe evacuated() wait for the new table. (But lock dirtyMu
// first since doing so may block, which we don't want to do during the
// writer critical section.)
shard.dirtyMu.Lock()
shard.seq.BeginWrite()
if oldSlots != nil {
realCount := uintptr(0)
// Copy old entries to the new table.
oldMask := shard.mask
for i := uintptr(0); i <= oldMask; i++ {
oldSlot := apmSlotAt(oldSlots, i)
val := atomic.SwapPointer(&oldSlot.val, evacuated())
if val == nil || val == tombstone() {
continue
}
hash := hasher.Hash(oldSlot.key)
j := hash & newMask
inc := uintptr(1)
for {
newSlot := apmSlotAt(newSlots, j)
if newSlot.val == nil {
newSlot.val = val
newSlot.key = oldSlot.key
break
}
j = (j + inc) & newMask
inc++
}
realCount++
}
// Update dirty to reflect that tombstones were not copied to the new
// table. Use realCount since a concurrent mutator may not have updated
// shard.count yet.
shard.dirty = realCount
}
// Switch to the new table.
atomic.StorePointer(&shard.slots, newSlots)
atomic.StoreUintptr(&shard.mask, newMask)
shard.seq.EndWrite()
shard.dirtyMu.Unlock()
}
// Range invokes f on each Key-Value pair stored in m. If any call to f returns
// false, Range stops iteration and returns.
//
// Range does not necessarily correspond to any consistent snapshot of the
// Map's contents: no Key will be visited more than once, but if the Value for
// any Key is stored or deleted concurrently, Range may reflect any mapping for
// that Key from any point during the Range call.
//
// f must not call other methods on m.
func (m *AtomicPtrMap) Range(f func(key Key, val *Value) bool) {
for si := 0; si < len(m.shards); si++ {
shard := &m.shards[si]
if !shard.doRange(f) {
return
}
}
}
func (shard *apmShard) doRange(f func(key Key, val *Value) bool) bool {
// We have to lock rehashMu because if we handled races with rehashing by
// retrying, f could see the same key twice.
shard.rehashMu.Lock()
defer shard.rehashMu.Unlock()
slots := shard.slots
if slots == nil {
return true
}
mask := shard.mask
for i := uintptr(0); i <= mask; i++ {
slot := apmSlotAt(slots, i)
slotVal := atomic.LoadPointer(&slot.val)
if slotVal == nil || slotVal == tombstone() {
continue
}
if !f(slot.key, (*Value)(slotVal)) {
return false
}
}
return true
}
// RangeRepeatable is like Range, but:
//
// * RangeRepeatable may visit the same Key multiple times in the presence of
// concurrent mutators, possibly passing different Values to f in different
// calls.
//
// * It is safe for f to call other methods on m.
func (m *AtomicPtrMap) RangeRepeatable(f func(key Key, val *Value) bool) {
for si := 0; si < len(m.shards); si++ {
shard := &m.shards[si]
retry:
epoch := shard.seq.BeginRead()
slots := atomic.LoadPointer(&shard.slots)
mask := atomic.LoadUintptr(&shard.mask)
if !shard.seq.ReadOk(epoch) {
goto retry
}
if slots == nil {
continue
}
for i := uintptr(0); i <= mask; i++ {
slot := apmSlotAt(slots, i)
slotVal := atomic.LoadPointer(&slot.val)
if slotVal == evacuated() {
goto retry
}
if slotVal == nil || slotVal == tombstone() {
continue
}
if !f(slot.key, (*Value)(slotVal)) {
return
}
}
}
}
|