xen/560e6d34-x86-p2m-pt-tighten-conditions-of-IOMMU-mapping-updates.patch
Charles Arnold 19d8f590f0 - bsc#949046 - Increase %suse_version in SP1 to 1316
xen.spec

- bsc#945167 - Running command ’ xl pci-assignable-add 03:10.1’
  secondly show errors
  55f7f9d2-libxl-slightly-refine-pci-assignable-add-remove-handling.patch
- Upstream patches from Jan
  55f2e438-x86-hvm-fix-saved-pmtimer-and-hpet-values.patch
  55f9345b-x86-MSI-fail-if-no-hardware-support.patch
  5604f239-x86-PV-properly-populate-descriptor-tables.patch
  5604f2e6-vt-d-fix-IM-bit-mask-and-unmask-of-FECTL_REG.patch
  560a4af9-x86-EPT-tighten-conditions-of-IOMMU-mapping-updates.patch
  560a7c36-x86-p2m-pt-delay-freeing-of-intermediate-page-tables.patch
  560a7c53-x86-p2m-pt-ignore-pt-share-flag-for-shadow-mode-guests.patch
  560bd926-credit1-fix-tickling-when-it-happens-from-a-remote-pCPU.patch
  560e6d34-x86-p2m-pt-tighten-conditions-of-IOMMU-mapping-updates.patch

- bsc#941074 - VmError: Device 51728 (vbd) could not be connected.
  Hotplug scripts not working.
  hotplug-Linux-block-performance-fix.patch

- bsc#947165 - VUL-0: CVE-2015-7311: xen: libxl fails to honour
  readonly flag on disks with qemu-xen (xsa-142)
  CVE-2015-7311-xsa142.patch

OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=378
2015-10-06 15:11:48 +00:00

160 lines
6.3 KiB
Diff

# Commit 660fd65d5578a95ec5eac522128bba23325179eb
# Date 2015-10-02 13:40:36 +0200
# Author Jan Beulich <jbeulich@suse.com>
# Committer Jan Beulich <jbeulich@suse.com>
x86/p2m-pt: tighten conditions of IOMMU mapping updates
Whether the MFN changes does not depend on the new entry being valid
(but solely on the old one), and the need to update or TLB-flush also
depends on permission changes.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -493,7 +493,18 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
l3_pgentry_t l3e_content;
int rc;
unsigned int iommu_pte_flags = p2m_get_iommu_flags(p2mt);
- unsigned long old_mfn = 0;
+ /*
+ * old_mfn and iommu_old_flags control possible flush/update needs on the
+ * IOMMU: We need to flush when MFN or flags (i.e. permissions) change.
+ * iommu_old_flags being initialized to zero covers the case of the entry
+ * getting replaced being a non-present (leaf or intermediate) one. For
+ * present leaf entries the real value will get calculated below, while
+ * for present intermediate entries ~0 (guaranteed != iommu_pte_flags)
+ * will be used (to cover all cases of what the leaf entries underneath
+ * the intermediate one might be).
+ */
+ unsigned int flags, iommu_old_flags = 0;
+ unsigned long old_mfn = INVALID_MFN;
if ( tb_init_done )
{
@@ -540,12 +551,20 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
L3_PAGETABLE_SHIFT - PAGE_SHIFT,
L3_PAGETABLE_ENTRIES);
ASSERT(p2m_entry);
- if ( (l1e_get_flags(*p2m_entry) & _PAGE_PRESENT) &&
- !(l1e_get_flags(*p2m_entry) & _PAGE_PSE) )
+ flags = l1e_get_flags(*p2m_entry);
+ if ( flags & _PAGE_PRESENT )
{
- /* We're replacing a non-SP page with a superpage. Make sure to
- * handle freeing the table properly. */
- intermediate_entry = *p2m_entry;
+ if ( flags & _PAGE_PSE )
+ {
+ iommu_old_flags =
+ p2m_get_iommu_flags(p2m_flags_to_type(flags));
+ old_mfn = l1e_get_pfn(*p2m_entry);
+ }
+ else
+ {
+ iommu_old_flags = ~0;
+ intermediate_entry = *p2m_entry;
+ }
}
ASSERT(!mfn_valid(mfn) || p2mt != p2m_mmio_direct);
@@ -556,10 +575,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
entry_content.l1 = l3e_content.l3;
if ( entry_content.l1 != 0 )
- {
p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
- old_mfn = l1e_get_pfn(*p2m_entry);
- }
p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 3);
/* NB: paging_write_p2m_entry() handles tlb flushes properly */
@@ -584,7 +600,10 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
p2m_entry = p2m_find_entry(table, &gfn_remainder, gfn,
0, L1_PAGETABLE_ENTRIES);
ASSERT(p2m_entry);
-
+ iommu_old_flags =
+ p2m_get_iommu_flags(p2m_flags_to_type(l1e_get_flags(*p2m_entry)));
+ old_mfn = l1e_get_pfn(*p2m_entry);
+
if ( mfn_valid(mfn) || (p2mt == p2m_mmio_direct)
|| p2m_is_paging(p2mt) )
entry_content = p2m_l1e_from_pfn(mfn_x(mfn),
@@ -593,10 +612,8 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
entry_content = l1e_empty();
if ( entry_content.l1 != 0 )
- {
p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
- old_mfn = l1e_get_pfn(*p2m_entry);
- }
+
/* level 1 entry */
p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 1);
/* NB: paging_write_p2m_entry() handles tlb flushes properly */
@@ -607,14 +624,20 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
L2_PAGETABLE_SHIFT - PAGE_SHIFT,
L2_PAGETABLE_ENTRIES);
ASSERT(p2m_entry);
-
- /* FIXME: Deal with 4k replaced by 2meg pages */
- if ( (l1e_get_flags(*p2m_entry) & _PAGE_PRESENT) &&
- !(l1e_get_flags(*p2m_entry) & _PAGE_PSE) )
- {
- /* We're replacing a non-SP page with a superpage. Make sure to
- * handle freeing the table properly. */
- intermediate_entry = *p2m_entry;
+ flags = l1e_get_flags(*p2m_entry);
+ if ( flags & _PAGE_PRESENT )
+ {
+ if ( flags & _PAGE_PSE )
+ {
+ iommu_old_flags =
+ p2m_get_iommu_flags(p2m_flags_to_type(flags));
+ old_mfn = l1e_get_pfn(*p2m_entry);
+ }
+ else
+ {
+ iommu_old_flags = ~0;
+ intermediate_entry = *p2m_entry;
+ }
}
ASSERT(!mfn_valid(mfn) || p2mt != p2m_mmio_direct);
@@ -628,10 +651,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
entry_content.l1 = l2e_content.l2;
if ( entry_content.l1 != 0 )
- {
p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
- old_mfn = l1e_get_pfn(*p2m_entry);
- }
p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 2);
/* NB: paging_write_p2m_entry() handles tlb flushes properly */
@@ -642,17 +662,17 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
&& (gfn + (1UL << page_order) - 1 > p2m->max_mapped_pfn) )
p2m->max_mapped_pfn = gfn + (1UL << page_order) - 1;
- if ( iommu_enabled && need_iommu(p2m->domain) )
+ if ( iommu_enabled && need_iommu(p2m->domain) &&
+ (iommu_old_flags != iommu_pte_flags || old_mfn != mfn_x(mfn)) )
{
if ( iommu_use_hap_pt(p2m->domain) )
{
- if ( old_mfn && (old_mfn != mfn_x(mfn)) )
+ if ( iommu_old_flags )
amd_iommu_flush_pages(p2m->domain, gfn, page_order);
}
else
{
- unsigned int flags = p2m_get_iommu_flags(p2mt);
-
+ flags = p2m_get_iommu_flags(p2mt);
if ( flags != 0 )
for ( i = 0; i < (1UL << page_order); i++ )
iommu_map_page(p2m->domain, gfn+i, mfn_x(mfn)+i, flags);