summaryrefslogtreecommitdiffhomepage
path: root/pkg
diff options
context:
space:
mode:
authorAdin Scannell <ascannell@google.com>2019-04-26 13:51:48 -0700
committerShentubot <shentubot@google.com>2019-04-26 13:53:12 -0700
commit5749f64314d38516badec156ab048d3523294a81 (patch)
tree0f2120a4d4e8d040407c24b70b6a9e5810fd91c2 /pkg
parent228dc15fd13eb91f03a907f75a3fbcec692a61a3 (diff)
kvm: remove non-sane sanity check
Apparently some platforms don't have pSize < vSize. Fixes #208 PiperOrigin-RevId: 245480998 Change-Id: I2a98229912f4ccbfcd8e79dfa355104f14275a9c
Diffstat (limited to 'pkg')
-rw-r--r--pkg/sentry/platform/kvm/physical_map.go15
1 files changed, 8 insertions, 7 deletions
diff --git a/pkg/sentry/platform/kvm/physical_map.go b/pkg/sentry/platform/kvm/physical_map.go
index b908cae6a..9d7dca5b3 100644
--- a/pkg/sentry/platform/kvm/physical_map.go
+++ b/pkg/sentry/platform/kvm/physical_map.go
@@ -50,8 +50,9 @@ type physicalRegion struct {
var physicalRegions []physicalRegion
// fillAddressSpace fills the host address space with PROT_NONE mappings until
-// the number of available bits until we have a host address space size that is
-// equal to the physical address space.
+// we have a host address space size that is less than or equal to the physical
+// address space. This allows us to have an injective host virtual to guest
+// physical mapping.
//
// The excluded regions are returned.
func fillAddressSpace() (excludedRegions []region) {
@@ -67,11 +68,6 @@ func fillAddressSpace() (excludedRegions []region) {
pSize := uintptr(1) << ring0.PhysicalAddressBits()
pSize -= reservedMemory
- // Sanity check.
- if vSize < pSize {
- panic(fmt.Sprintf("vSize (%x) < pSize (%x)", vSize, pSize))
- }
-
// Add specifically excluded regions; see excludeVirtualRegion.
applyVirtualRegions(func(vr virtualRegion) {
if excludeVirtualRegion(vr) {
@@ -81,6 +77,11 @@ func fillAddressSpace() (excludedRegions []region) {
}
})
+ // Do we need any more work?
+ if vSize < pSize {
+ return excludedRegions
+ }
+
// Calculate the required space and fill it.
//
// Note carefully that we add faultBlockSize to required up front, and