summaryrefslogtreecommitdiffhomepage
path: root/pkg
diff options
context:
space:
mode:
authorHoward Zhang <howard.zhang@arm.com>2021-07-23 11:31:38 +0800
committerHoward Zhang <howard.zhang@arm.com>2021-07-28 11:37:54 +0800
commitd393d696b25b94d712404a7a2f0f683518ba2ef0 (patch)
tree19e8fbe642c113a807530ea6af58eed999c5c926 /pkg
parentaa2698170041e9bc00542378269231b5eaeffd5d (diff)
tunning hasSlot function and fix store wrong value in usedSlots
Make hasSlot scan allocated slot, rather than the whole slice. It is supposed to store physicalStart in usedSlot. Signed-off-by: Howard Zhang <howard.zhang@arm.com>
Diffstat (limited to 'pkg')
-rw-r--r--pkg/sentry/platform/kvm/bluepill_fault.go2
-rw-r--r--pkg/sentry/platform/kvm/machine.go13
2 files changed, 11 insertions, 4 deletions
diff --git a/pkg/sentry/platform/kvm/bluepill_fault.go b/pkg/sentry/platform/kvm/bluepill_fault.go
index 28a613a54..8fd8287b3 100644
--- a/pkg/sentry/platform/kvm/bluepill_fault.go
+++ b/pkg/sentry/platform/kvm/bluepill_fault.go
@@ -101,7 +101,7 @@ func handleBluepillFault(m *machine, physical uintptr, phyRegions []physicalRegi
// Store the physical address in the slot. This is used to
// avoid calls to handleBluepillFault in the future (see
// machine.mapPhysical).
- atomic.StoreUintptr(&m.usedSlots[slot], physical)
+ atomic.StoreUintptr(&m.usedSlots[slot], physicalStart)
// Successfully added region; we can increment nextSlot and
// allow another set to proceed here.
atomic.StoreUint32(&m.nextSlot, slot+1)
diff --git a/pkg/sentry/platform/kvm/machine.go b/pkg/sentry/platform/kvm/machine.go
index 1b5d5f66e..e7092a756 100644
--- a/pkg/sentry/platform/kvm/machine.go
+++ b/pkg/sentry/platform/kvm/machine.go
@@ -70,7 +70,7 @@ type machine struct {
// tscControl checks whether cpu supports TSC scaling
tscControl bool
- // usedSlots is the set of used physical addresses (sorted).
+ // usedSlots is the set of used physical addresses (not sorted).
usedSlots []uintptr
// nextID is the next vCPU ID.
@@ -296,13 +296,20 @@ func newMachine(vm int) (*machine, error) {
return m, nil
}
-// hasSlot returns true iff the given address is mapped.
+// hasSlot returns true if the given address is mapped.
//
// This must be done via a linear scan.
//
//go:nosplit
func (m *machine) hasSlot(physical uintptr) bool {
- for i := 0; i < len(m.usedSlots); i++ {
+ slotLen := int(atomic.LoadUint32(&m.nextSlot))
+ // When slots are being updated, nextSlot is ^uint32(0). As this situation
+ // is less likely happen, we just set the slotLen to m.maxSlots, and scan
+ // the whole usedSlots array.
+ if slotLen == int(^uint32(0)) {
+ slotLen = m.maxSlots
+ }
+ for i := 0; i < slotLen; i++ {
if p := atomic.LoadUintptr(&m.usedSlots[i]); p == physical {
return true
}