1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fsutil
import (
"math"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/usermem"
)
// DirtySet maps offsets into a memmap.Mappable to DirtyInfo. It is used to
// implement Mappables that cache data from another source.
//
// type DirtySet <generated by go_generics>
// DirtyInfo is the value type of DirtySet, and represents information about a
// Mappable offset that is dirty (the cached data for that offset is newer than
// its source).
//
// +stateify savable
type DirtyInfo struct {
// Keep is true if the represented offset is concurrently writable, such
// that writing the data for that offset back to the source does not
// guarantee that the offset is clean (since it may be concurrently
// rewritten after the writeback).
Keep bool
}
// dirtySetFunctions implements segment.Functions for DirtySet.
type dirtySetFunctions struct{}
// MinKey implements segment.Functions.MinKey.
func (dirtySetFunctions) MinKey() uint64 {
return 0
}
// MaxKey implements segment.Functions.MaxKey.
func (dirtySetFunctions) MaxKey() uint64 {
return math.MaxUint64
}
// ClearValue implements segment.Functions.ClearValue.
func (dirtySetFunctions) ClearValue(val *DirtyInfo) {
}
// Merge implements segment.Functions.Merge.
func (dirtySetFunctions) Merge(_ memmap.MappableRange, val1 DirtyInfo, _ memmap.MappableRange, val2 DirtyInfo) (DirtyInfo, bool) {
if val1 != val2 {
return DirtyInfo{}, false
}
return val1, true
}
// Split implements segment.Functions.Split.
func (dirtySetFunctions) Split(_ memmap.MappableRange, val DirtyInfo, _ uint64) (DirtyInfo, DirtyInfo) {
return val, val
}
// MarkClean marks all offsets in mr as not dirty, except for those to which
// KeepDirty has been applied.
func (ds *DirtySet) MarkClean(mr memmap.MappableRange) {
seg := ds.LowerBoundSegment(mr.Start)
for seg.Ok() && seg.Start() < mr.End {
if seg.Value().Keep {
seg = seg.NextSegment()
continue
}
seg = ds.Isolate(seg, mr)
seg = ds.Remove(seg).NextSegment()
}
}
// KeepClean marks all offsets in mr as not dirty, even those that were
// previously kept dirty by KeepDirty.
func (ds *DirtySet) KeepClean(mr memmap.MappableRange) {
ds.RemoveRange(mr)
}
// MarkDirty marks all offsets in mr as dirty.
func (ds *DirtySet) MarkDirty(mr memmap.MappableRange) {
ds.setDirty(mr, false)
}
// KeepDirty marks all offsets in mr as dirty and prevents them from being
// marked as clean by MarkClean.
func (ds *DirtySet) KeepDirty(mr memmap.MappableRange) {
ds.setDirty(mr, true)
}
func (ds *DirtySet) setDirty(mr memmap.MappableRange, keep bool) {
var changedAny bool
defer func() {
if changedAny {
// Merge segments split by Isolate to reduce cost of iteration.
ds.MergeRange(mr)
}
}()
seg, gap := ds.Find(mr.Start)
for {
switch {
case seg.Ok() && seg.Start() < mr.End:
if keep && !seg.Value().Keep {
changedAny = true
seg = ds.Isolate(seg, mr)
seg.ValuePtr().Keep = true
}
seg, gap = seg.NextNonEmpty()
case gap.Ok() && gap.Start() < mr.End:
changedAny = true
seg = ds.Insert(gap, gap.Range().Intersect(mr), DirtyInfo{keep})
seg, gap = seg.NextNonEmpty()
default:
return
}
}
}
// AllowClean allows MarkClean to mark offsets in mr as not dirty, ending the
// effect of a previous call to KeepDirty. (It does not itself mark those
// offsets as not dirty.)
func (ds *DirtySet) AllowClean(mr memmap.MappableRange) {
var changedAny bool
defer func() {
if changedAny {
// Merge segments split by Isolate to reduce cost of iteration.
ds.MergeRange(mr)
}
}()
for seg := ds.LowerBoundSegment(mr.Start); seg.Ok() && seg.Start() < mr.End; seg = seg.NextSegment() {
if seg.Value().Keep {
changedAny = true
seg = ds.Isolate(seg, mr)
seg.ValuePtr().Keep = false
}
}
}
// SyncDirty passes pages in the range mr that are stored in cache and
// identified as dirty to writeAt, updating dirty to reflect successful writes.
// If writeAt returns a successful partial write, SyncDirty will call it
// repeatedly until all bytes have been written. max is the true size of the
// cached object; offsets beyond max will not be passed to writeAt, even if
// they are marked dirty.
func SyncDirty(ctx context.Context, mr memmap.MappableRange, cache *FileRangeSet, dirty *DirtySet, max uint64, mem memmap.File, writeAt func(ctx context.Context, srcs safemem.BlockSeq, offset uint64) (uint64, error)) error {
var changedDirty bool
defer func() {
if changedDirty {
// Merge segments split by Isolate to reduce cost of iteration.
dirty.MergeRange(mr)
}
}()
dseg := dirty.LowerBoundSegment(mr.Start)
for dseg.Ok() && dseg.Start() < mr.End {
var dr memmap.MappableRange
if dseg.Value().Keep {
dr = dseg.Range().Intersect(mr)
} else {
changedDirty = true
dseg = dirty.Isolate(dseg, mr)
dr = dseg.Range()
}
if err := syncDirtyRange(ctx, dr, cache, max, mem, writeAt); err != nil {
return err
}
if dseg.Value().Keep {
dseg = dseg.NextSegment()
} else {
dseg = dirty.Remove(dseg).NextSegment()
}
}
return nil
}
// SyncDirtyAll passes all pages stored in cache identified as dirty to
// writeAt, updating dirty to reflect successful writes. If writeAt returns a
// successful partial write, SyncDirtyAll will call it repeatedly until all
// bytes have been written. max is the true size of the cached object; offsets
// beyond max will not be passed to writeAt, even if they are marked dirty.
func SyncDirtyAll(ctx context.Context, cache *FileRangeSet, dirty *DirtySet, max uint64, mem memmap.File, writeAt func(ctx context.Context, srcs safemem.BlockSeq, offset uint64) (uint64, error)) error {
dseg := dirty.FirstSegment()
for dseg.Ok() {
if err := syncDirtyRange(ctx, dseg.Range(), cache, max, mem, writeAt); err != nil {
return err
}
if dseg.Value().Keep {
dseg = dseg.NextSegment()
} else {
dseg = dirty.Remove(dseg).NextSegment()
}
}
return nil
}
// Preconditions: mr must be page-aligned.
func syncDirtyRange(ctx context.Context, mr memmap.MappableRange, cache *FileRangeSet, max uint64, mem memmap.File, writeAt func(ctx context.Context, srcs safemem.BlockSeq, offset uint64) (uint64, error)) error {
for cseg := cache.LowerBoundSegment(mr.Start); cseg.Ok() && cseg.Start() < mr.End; cseg = cseg.NextSegment() {
wbr := cseg.Range().Intersect(mr)
if max < wbr.Start {
break
}
ims, err := mem.MapInternal(cseg.FileRangeOf(wbr), usermem.Read)
if err != nil {
return err
}
if max < wbr.End {
ims = ims.TakeFirst64(max - wbr.Start)
}
offset := wbr.Start
for !ims.IsEmpty() {
n, err := writeAt(ctx, ims, offset)
if err != nil {
return err
}
offset += n
ims = ims.DropFirst64(n)
}
}
return nil
}
|