1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
|
// Copyright 2018 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kvm
import (
"reflect"
"sync"
"sync/atomic"
"gvisor.googlesource.com/gvisor/pkg/atomicbitops"
"gvisor.googlesource.com/gvisor/pkg/sentry/platform"
"gvisor.googlesource.com/gvisor/pkg/sentry/platform/filemem"
"gvisor.googlesource.com/gvisor/pkg/sentry/platform/ring0/pagetables"
"gvisor.googlesource.com/gvisor/pkg/sentry/usermem"
)
type vCPUBitArray [(_KVM_NR_VCPUS + 63) / 64]uint64
// dirtySet tracks vCPUs for invalidation.
type dirtySet struct {
vCPUs vCPUBitArray
}
// forEach iterates over all CPUs in the dirty set.
func (ds *dirtySet) forEach(m *machine, fn func(c *vCPU)) {
var localSet vCPUBitArray
for index := 0; index < len(ds.vCPUs); index++ {
// Clear the dirty set, copy to the local one.
localSet[index] = atomic.SwapUint64(&ds.vCPUs[index], 0)
}
m.mu.RLock()
defer m.mu.RUnlock()
for _, c := range m.vCPUs {
index := uint64(c.id) / 64
bit := uint64(1) << uint(c.id%64)
// Call the function if it was set.
if localSet[index]&bit != 0 {
fn(c)
}
}
}
// mark marks the given vCPU as dirty and returns whether it was previously
// clean. Being previously clean implies that a flush is needed on entry.
func (ds *dirtySet) mark(c *vCPU) bool {
index := uint64(c.id) / 64
bit := uint64(1) << uint(c.id%64)
oldValue := atomic.LoadUint64(&ds.vCPUs[index])
if oldValue&bit != 0 {
return false // Not clean.
}
// Set the bit unilaterally, and ensure that a flush takes place. Note
// that it's possible for races to occur here, but since the flush is
// taking place long after these lines there's no race in practice.
atomicbitops.OrUint64(&ds.vCPUs[index], bit)
return true // Previously clean.
}
// addressSpace is a wrapper for PageTables.
type addressSpace struct {
platform.NoAddressSpaceIO
// mu is the lock for modifications to the address space.
//
// Note that the page tables themselves are not locked.
mu sync.Mutex
// filemem is the memory instance.
filemem *filemem.FileMem
// machine is the underlying machine.
machine *machine
// pageTables are for this particular address space.
pageTables *pagetables.PageTables
// dirtySet is the set of dirty vCPUs.
dirtySet dirtySet
// files contains files mapped in the host address space.
//
// See host_map.go for more information.
files hostMap
}
// invalidate is the implementation for Invalidate.
func (as *addressSpace) invalidate() {
as.dirtySet.forEach(as.machine, func(c *vCPU) {
if c.active.get() == as { // If this happens to be active,
c.BounceToKernel() // ... force a kernel transition.
}
})
}
// Invalidate interrupts all dirty contexts.
func (as *addressSpace) Invalidate() {
as.mu.Lock()
defer as.mu.Unlock()
as.invalidate()
}
// Touch adds the given vCPU to the dirty list.
//
// The return value indicates whether a flush is required.
func (as *addressSpace) Touch(c *vCPU) bool {
return as.dirtySet.mark(c)
}
func (as *addressSpace) mapHost(addr usermem.Addr, m hostMapEntry, at usermem.AccessType) (inv bool) {
for m.length > 0 {
physical, length, ok := translateToPhysical(m.addr)
if !ok {
panic("unable to translate segment")
}
if length > m.length {
length = m.length
}
// Ensure that this map has physical mappings. If the page does
// not have physical mappings, the KVM module may inject
// spurious exceptions when emulation fails (i.e. it tries to
// emulate because the RIP is pointed at those pages).
as.machine.mapPhysical(physical, length)
// Install the page table mappings. Note that the ordering is
// important; if the pagetable mappings were installed before
// ensuring the physical pages were available, then some other
// thread could theoretically access them.
//
// Due to the way KVM's shadow paging implementation works,
// modifications to the page tables while in host mode may not
// be trapped, leading to the shadow pages being out of sync.
// Therefore, we need to ensure that we are in guest mode for
// page table modifications. See the call to bluepill, below.
as.machine.retryInGuest(func() {
inv = as.pageTables.Map(addr, length, pagetables.MapOpts{
AccessType: at,
User: true,
}, physical) || inv
})
m.addr += length
m.length -= length
addr += usermem.Addr(length)
}
return inv
}
func (as *addressSpace) mapHostFile(addr usermem.Addr, fd int, fr platform.FileRange, at usermem.AccessType) error {
// Create custom host mappings.
ms, err := as.files.CreateMappings(usermem.AddrRange{
Start: addr,
End: addr + usermem.Addr(fr.End-fr.Start),
}, at, fd, fr.Start)
if err != nil {
return err
}
inv := false
for _, m := range ms {
// The host mapped slices are guaranteed to be aligned.
prev := as.mapHost(addr, m, at)
inv = inv || prev
addr += usermem.Addr(m.length)
}
if inv {
as.invalidate()
}
return nil
}
func (as *addressSpace) mapFilemem(addr usermem.Addr, fr platform.FileRange, at usermem.AccessType, precommit bool) error {
// TODO: Lock order at the platform level is not sufficiently
// well-defined to guarantee that the caller (FileMem.MapInto) is not
// holding any locks that FileMem.MapInternal may take.
// Retrieve mappings for the underlying filemem. Note that the
// permissions here are largely irrelevant, since it corresponds to
// physical memory for the guest. We enforce the given access type
// below, in the guest page tables.
bs, err := as.filemem.MapInternal(fr, usermem.AccessType{
Read: true,
Write: true,
})
if err != nil {
return err
}
// Save the original range for invalidation.
orig := usermem.AddrRange{
Start: addr,
End: addr + usermem.Addr(fr.End-fr.Start),
}
inv := false
for !bs.IsEmpty() {
b := bs.Head()
bs = bs.Tail()
// Since fr was page-aligned, b should also be page-aligned. We do the
// lookup in our host page tables for this translation.
s := b.ToSlice()
if precommit {
for i := 0; i < len(s); i += usermem.PageSize {
_ = s[i] // Touch to commit.
}
}
prev := as.mapHost(addr, hostMapEntry{
addr: reflect.ValueOf(&s[0]).Pointer(),
length: uintptr(len(s)),
}, at)
inv = inv || prev
addr += usermem.Addr(len(s))
}
if inv {
as.invalidate()
as.files.DeleteMapping(orig)
}
return nil
}
// MapFile implements platform.AddressSpace.MapFile.
func (as *addressSpace) MapFile(addr usermem.Addr, fd int, fr platform.FileRange, at usermem.AccessType, precommit bool) error {
as.mu.Lock()
defer as.mu.Unlock()
// Create an appropriate mapping. If this is filemem, we don't create
// custom mappings for each in-application mapping. For files however,
// we create distinct mappings for each address space. Unfortunately,
// there's not a better way to manage this here. The file underlying
// this fd can change at any time, so we can't actually index the file
// and share between address space. Oh well. It's all referring to the
// same physical pages, hopefully we don't run out of address space.
if fd != int(as.filemem.File().Fd()) {
// N.B. precommit is ignored for host files.
return as.mapHostFile(addr, fd, fr, at)
}
return as.mapFilemem(addr, fr, at, precommit)
}
// Unmap unmaps the given range by calling pagetables.PageTables.Unmap.
func (as *addressSpace) Unmap(addr usermem.Addr, length uint64) {
as.mu.Lock()
defer as.mu.Unlock()
// See above re: retryInGuest.
var prev bool
as.machine.retryInGuest(func() {
prev = as.pageTables.Unmap(addr, uintptr(length)) || prev
})
if prev {
as.invalidate()
as.files.DeleteMapping(usermem.AddrRange{
Start: addr,
End: addr + usermem.Addr(length),
})
}
}
// Release releases the page tables.
func (as *addressSpace) Release() {
as.Unmap(0, ^uint64(0))
// Free all pages from the allocator.
as.pageTables.Allocator.(allocator).base.Drain()
// Drop all cached machine references.
as.machine.dropPageTables(as.pageTables)
}
|