1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
|
// Copyright 2018 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sync
import (
"fmt"
"reflect"
"runtime"
"sync/atomic"
)
// SeqCount is a synchronization primitive for optimistic reader/writer
// synchronization in cases where readers can work with stale data and
// therefore do not need to block writers.
//
// Compared to sync/atomic.Value:
//
// - Mutation of SeqCount-protected data does not require memory allocation,
// whereas atomic.Value generally does. This is a significant advantage when
// writes are common.
//
// - Atomic reads of SeqCount-protected data require copying. This is a
// disadvantage when atomic reads are common.
//
// - SeqCount may be more flexible: correct use of SeqCount.ReadOk allows other
// operations to be made atomic with reads of SeqCount-protected data.
//
// - SeqCount may be less flexible: as of this writing, SeqCount-protected data
// cannot include pointers.
//
// - SeqCount is more cumbersome to use; atomic reads of SeqCount-protected
// data require instantiating function templates using go_generics (see
// seqatomic.go).
type SeqCount struct {
// epoch is incremented by BeginWrite and EndWrite, such that epoch is odd
// if a writer critical section is active, and a read from data protected
// by this SeqCount is atomic iff epoch is the same even value before and
// after the read.
epoch uint32
}
// SeqCountEpoch tracks writer critical sections in a SeqCount.
type SeqCountEpoch struct {
val uint32
}
// We assume that:
//
// - All functions in sync/atomic that perform a memory read are at least a
// read fence: memory reads before calls to such functions cannot be reordered
// after the call, and memory reads after calls to such functions cannot be
// reordered before the call, even if those reads do not use sync/atomic.
//
// - All functions in sync/atomic that perform a memory write are at least a
// write fence: memory writes before calls to such functions cannot be
// reordered after the call, and memory writes after calls to such functions
// cannot be reordered before the call, even if those writes do not use
// sync/atomic.
//
// As of this writing, the Go memory model completely fails to describe
// sync/atomic, but these properties are implied by
// https://groups.google.com/forum/#!topic/golang-nuts/7EnEhM3U7B8.
// BeginRead indicates the beginning of a reader critical section. Reader
// critical sections DO NOT BLOCK writer critical sections, so operations in a
// reader critical section MAY RACE with writer critical sections. Races are
// detected by ReadOk at the end of the reader critical section. Thus, the
// low-level structure of readers is generally:
//
// for {
// epoch := seq.BeginRead()
// // do something idempotent with seq-protected data
// if seq.ReadOk(epoch) {
// break
// }
// }
//
// However, since reader critical sections may race with writer critical
// sections, the Go race detector will (accurately) flag data races in readers
// using this pattern. Most users of SeqCount will need to use the
// SeqAtomicLoad function template in seqatomic.go.
func (s *SeqCount) BeginRead() SeqCountEpoch {
epoch := atomic.LoadUint32(&s.epoch)
for epoch&1 != 0 {
runtime.Gosched()
epoch = atomic.LoadUint32(&s.epoch)
}
return SeqCountEpoch{epoch}
}
// ReadOk returns true if the reader critical section initiated by a previous
// call to BeginRead() that returned epoch did not race with any writer critical
// sections.
//
// ReadOk may be called any number of times during a reader critical section.
// Reader critical sections do not need to be explicitly terminated; the last
// call to ReadOk is implicitly the end of the reader critical section.
func (s *SeqCount) ReadOk(epoch SeqCountEpoch) bool {
return atomic.LoadUint32(&s.epoch) == epoch.val
}
// BeginWrite indicates the beginning of a writer critical section.
//
// SeqCount does not support concurrent writer critical sections; clients with
// concurrent writers must synchronize them using e.g. sync.Mutex.
func (s *SeqCount) BeginWrite() {
if epoch := atomic.AddUint32(&s.epoch, 1); epoch&1 == 0 {
panic("SeqCount.BeginWrite during writer critical section")
}
}
// EndWrite ends the effect of a preceding BeginWrite.
func (s *SeqCount) EndWrite() {
if epoch := atomic.AddUint32(&s.epoch, 1); epoch&1 != 0 {
panic("SeqCount.EndWrite outside writer critical section")
}
}
// PointersInType returns a list of pointers reachable from values named
// valName of the given type.
//
// PointersInType is not exhaustive, but it is guaranteed that if typ contains
// at least one pointer, then PointersInTypeOf returns a non-empty list.
func PointersInType(typ reflect.Type, valName string) []string {
switch kind := typ.Kind(); kind {
case reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
return nil
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.String, reflect.UnsafePointer:
return []string{valName}
case reflect.Array:
return PointersInType(typ.Elem(), valName+"[]")
case reflect.Struct:
var ptrs []string
for i, n := 0, typ.NumField(); i < n; i++ {
field := typ.Field(i)
ptrs = append(ptrs, PointersInType(field.Type, fmt.Sprintf("%s.%s", valName, field.Name))...)
}
return ptrs
default:
return []string{fmt.Sprintf("%s (of type %s with unknown kind %s)", valName, typ, kind)}
}
}
|