xen/22504-iommu-dom0-holes.patch
Charles Arnold 0c76f22ef1 - Update to Xen 4.0.2 rc2-pre, changeset 21443
- bnc#633573 - System fail to boot after running several warm
  reboot tests
  22749-vtd-workarounds.patch
- Upstream patches from Jan
  22744-ept-pod-locking.patch
  22777-vtd-ats-fixes.patch
  22781-pod-hap-logdirty.patch
  22782-x86-emul-smsw.patch
  22789-i386-no-x2apic.patch
  22790-svm-resume-migrate-pirqs.patch
  22816-x86-pirq-drop-priv-check.patch

OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=94
2011-02-04 21:19:54 +00:00

103 lines
3.7 KiB
Diff

# HG changeset patch
# User Keir Fraser <keir@xen.org>
# Date 1292320377 0
# Node ID fd4cbfbbd83e6091a343844eae1da1468f54b72b
# Parent ab785e37499c8cdadd1fd5e4ab1bfbbacebf358b
x86/iommu: don't map RAM holes above 4G
References: bnc#658163
Matching the comment in iommu_set_dom0_mapping(), map only actual RAM
from the address range starting at 4G. It's not clear though whether
that comment is actually correct (which is why I'm sending this as
RFC), but it is certain that on systems with sparse physical memory
map we're currently wasting a potentially significant amount of memory
for setting up IOMMU page tables that will never be used.
The main question is what happens for MMIO ranges living above 4G. Of
course, the same issue would currently exist for any such ranges
sitting beyond the end of RAM.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -194,7 +194,7 @@ static unsigned long __init compute_dom0
unsigned int s;
for ( s = 9; s < BITS_PER_LONG; s += 9 )
- avail -= max_page >> s;
+ avail -= max_pdx >> s;
}
/*
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -239,8 +239,16 @@ static void amd_iommu_dom0_init(struct d
if ( !iommu_passthrough && !need_iommu(d) )
{
/* Set up 1:1 page table for dom0 */
- for ( i = 0; i < max_page; i++ )
- amd_iommu_map_page(d, i, i, IOMMUF_readable|IOMMUF_writable);
+ for ( i = 0; i < max_pdx; i++ )
+ {
+ unsigned long pfn = pdx_to_pfn(i);
+
+ /*
+ * XXX Should we really map all non-RAM (above 4G)? Minimally
+ * a pfn_valid() check would seem desirable here.
+ */
+ amd_iommu_map_page(d, pfn, pfn, IOMMUF_readable|IOMMUF_writable);
+ }
}
amd_iommu_setup_dom0_devices(d);
--- a/xen/drivers/passthrough/vtd/x86/vtd.c
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c
@@ -128,14 +128,14 @@ void hvm_dpci_isairq_eoi(struct domain *
void iommu_set_dom0_mapping(struct domain *d)
{
- u64 i, j, tmp, max_pfn;
+ unsigned long i, j, tmp, top;
extern int xen_in_range(unsigned long mfn);
BUG_ON(d->domain_id != 0);
- max_pfn = max_t(u64, max_page, 0x100000000ull >> PAGE_SHIFT);
+ top = max(max_pdx, pfn_to_pdx(0xffffffffUL >> PAGE_SHIFT) + 1);
- for ( i = 0; i < max_pfn; i++ )
+ for ( i = 0; i < top; i++ )
{
/*
* Set up 1:1 mapping for dom0. Default to use only conventional RAM
@@ -143,18 +143,23 @@ void iommu_set_dom0_mapping(struct domai
* inclusive mapping maps in everything below 4GB except unusable
* ranges.
*/
- if ( !page_is_ram_type(i, RAM_TYPE_CONVENTIONAL) &&
- (!iommu_inclusive_mapping ||
- page_is_ram_type(i, RAM_TYPE_UNUSABLE)) )
+ unsigned long pfn = pdx_to_pfn(i);
+
+ if ( pfn > (0xffffffffUL >> PAGE_SHIFT) ?
+ (!mfn_valid(pfn) ||
+ !page_is_ram_type(pfn, RAM_TYPE_CONVENTIONAL)) :
+ iommu_inclusive_mapping ?
+ page_is_ram_type(pfn, RAM_TYPE_UNUSABLE) :
+ !page_is_ram_type(pfn, RAM_TYPE_CONVENTIONAL) )
continue;
/* Exclude Xen bits */
- if ( xen_in_range(i) )
+ if ( xen_in_range(pfn) )
continue;
tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K);
for ( j = 0; j < tmp; j++ )
- iommu_map_page(d, (i*tmp+j), (i*tmp+j),
+ iommu_map_page(d, pfn * tmp + j, pfn * tmp + j,
IOMMUF_readable|IOMMUF_writable);
if (!(i & (0xfffff >> (PAGE_SHIFT - PAGE_SHIFT_4K))))