- Upstream patches from Jan

5321b20b-common-make-hypercall-preemption-checks-consistent.patch
  5321b257-x86-make-hypercall-preemption-checks-consistent.patch
  53271880-VT-d-fix-RMRR-handling.patch
  5327190a-x86-Intel-work-around-Xeon-7400-series-erratum-AAI65.patch
- Dropped the following as now part of 5321b257
  5310bac3-mm-ensure-useful-progress-in-decrease_reservation.patch
- bnc#867910 - VUL-0: EMBARGOED: xen: XSA-89: HVMOP_set_mem_access
  is not preemptible
  xsa89.patch

OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=306
This commit is contained in:
Charles Arnold 2014-03-27 22:23:58 +00:00 committed by Git OBS Bridge
parent e46082b3ea
commit 314583a822
8 changed files with 685 additions and 45 deletions

View File

@ -1,32 +0,0 @@
# Commit 79de2d31f1ff8910231b7ec15519405953e6571a
# Date 2014-02-28 17:35:15 +0100
# Author Wei Liu <wei.liu2@citrix.com>
# Committer Jan Beulich <jbeulich@suse.com>
mm: ensure useful progress in decrease_reservation
During my fun time playing with balloon driver I found that hypervisor's
preemption check kept decrease_reservation from doing any useful work
for 32 bit guests, resulting in hanging the guests.
As Andrew suggested, we can force the check to fail for the first
iteration to ensure progress. We did this in d3a55d7d9 "x86/mm: Ensure
useful progress in alloc_l2_table()" already.
After this change I cannot see the hang caused by continuation logic
anymore.
Signed-off-by: Wei Liu <wei.liu2@citrix.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Keir Fraser <keir@xen.org>
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -268,7 +268,7 @@ static void decrease_reservation(struct
for ( i = a->nr_done; i < a->nr_extents; i++ )
{
- if ( hypercall_preempt_check() )
+ if ( hypercall_preempt_check() && i != a->nr_done )
{
a->preempted = 1;
goto out;

View File

@ -0,0 +1,81 @@
# Commit 8c0eed2cc8d8a2ccccdffe4c386b625b672dc12a
# Date 2014-03-13 14:26:35 +0100
# Author Jan Beulich <jbeulich@suse.com>
# Committer Jan Beulich <jbeulich@suse.com>
common: make hypercall preemption checks consistent
- never preempt on the first iteration (ensure forward progress)
- do cheap checks first
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Tim Deegan <tim@xen.org>
Acked-by: Keir Fraser <keir@xen.org>
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -63,7 +63,7 @@ static void increase_reservation(struct
for ( i = a->nr_done; i < a->nr_extents; i++ )
{
- if ( hypercall_preempt_check() )
+ if ( i != a->nr_done && hypercall_preempt_check() )
{
a->preempted = 1;
goto out;
@@ -109,7 +109,7 @@ static void populate_physmap(struct memo
for ( i = a->nr_done; i < a->nr_extents; i++ )
{
- if ( hypercall_preempt_check() )
+ if ( i != a->nr_done && hypercall_preempt_check() )
{
a->preempted = 1;
goto out;
@@ -268,7 +268,7 @@ static void decrease_reservation(struct
for ( i = a->nr_done; i < a->nr_extents; i++ )
{
- if ( hypercall_preempt_check() )
+ if ( i != a->nr_done && hypercall_preempt_check() )
{
a->preempted = 1;
goto out;
@@ -398,7 +398,8 @@ static long memory_exchange(XEN_GUEST_HA
i < (exch.in.nr_extents >> in_chunk_order);
i++ )
{
- if ( hypercall_preempt_check() )
+ if ( i != (exch.nr_exchanged >> in_chunk_order) &&
+ hypercall_preempt_check() )
{
exch.nr_exchanged = i << in_chunk_order;
rcu_unlock_domain(d);
--- a/xen/common/multicall.c
+++ b/xen/common/multicall.c
@@ -52,7 +52,7 @@ do_multicall(
for ( i = 0; !rc && i < nr_calls; i++ )
{
- if ( hypercall_preempt_check() )
+ if ( i && hypercall_preempt_check() )
goto preempted;
if ( unlikely(__copy_from_guest(&mcs->call, call_list, 1)) )
--- a/xen/drivers/char/console.c
+++ b/xen/drivers/char/console.c
@@ -375,12 +375,12 @@ static DECLARE_SOFTIRQ_TASKLET(notify_do
static long guest_console_write(XEN_GUEST_HANDLE_PARAM(char) buffer, int count)
{
char kbuf[128];
- int kcount;
+ int kcount = 0;
struct domain *cd = current->domain;
while ( count > 0 )
{
- if ( hypercall_preempt_check() )
+ if ( kcount && hypercall_preempt_check() )
return hypercall_create_continuation(
__HYPERVISOR_console_io, "iih",
CONSOLEIO_write, count, buffer);

View File

@ -0,0 +1,156 @@
# Commit fd7bfce0395ace266159760e35dc49f7af3b90ce
# Date 2014-03-13 14:27:51 +0100
# Author Jan Beulich <jbeulich@suse.com>
# Committer Jan Beulich <jbeulich@suse.com>
x86: make hypercall preemption checks consistent
- never preempt on the first iteration (ensure forward progress)
- never preempt on the last iteration (pointless/wasteful)
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Tim Deegan <tim@xen.org>
Acked-by: Keir Fraser <keir@xen.org>
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2934,7 +2934,7 @@ long do_mmuext_op(
for ( i = 0; i < count; i++ )
{
- if ( curr->arch.old_guest_table || hypercall_preempt_check() )
+ if ( curr->arch.old_guest_table || (i && hypercall_preempt_check()) )
{
rc = -EAGAIN;
break;
@@ -3481,7 +3481,7 @@ long do_mmu_update(
for ( i = 0; i < count; i++ )
{
- if ( curr->arch.old_guest_table || hypercall_preempt_check() )
+ if ( curr->arch.old_guest_table || (i && hypercall_preempt_check()) )
{
rc = -EAGAIN;
break;
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -326,7 +326,7 @@ hap_set_allocation(struct domain *d, uns
else
pages -= d->arch.paging.hap.p2m_pages;
- while ( d->arch.paging.hap.total_pages != pages )
+ for ( ; ; )
{
if ( d->arch.paging.hap.total_pages < pages )
{
@@ -355,6 +355,8 @@ hap_set_allocation(struct domain *d, uns
d->arch.paging.hap.total_pages--;
free_domheap_page(pg);
}
+ else
+ break;
/* Check to see if we need to yield and try again */
if ( preempted && hypercall_preempt_check() )
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -242,7 +242,8 @@ p2m_pod_set_cache_target(struct p2m_doma
p2m_pod_cache_add(p2m, page, order);
- if ( hypercall_preempt_check() && preemptible )
+ if ( preemptible && pod_target != p2m->pod.count &&
+ hypercall_preempt_check() )
{
ret = -EAGAIN;
goto out;
@@ -286,7 +287,8 @@ p2m_pod_set_cache_target(struct p2m_doma
put_page(page+i);
- if ( hypercall_preempt_check() && preemptible )
+ if ( preemptible && pod_target != p2m->pod.count &&
+ hypercall_preempt_check() )
{
ret = -EAGAIN;
goto out;
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -1674,7 +1674,7 @@ static unsigned int sh_set_allocation(st
SHADOW_PRINTK("current %i target %i\n",
d->arch.paging.shadow.total_pages, pages);
- while ( d->arch.paging.shadow.total_pages != pages )
+ for ( ; ; )
{
if ( d->arch.paging.shadow.total_pages < pages )
{
@@ -1709,6 +1709,8 @@ static unsigned int sh_set_allocation(st
d->arch.paging.shadow.total_pages--;
free_domheap_page(sp);
}
+ else
+ break;
/* Check to see if we need to yield and try again */
if ( preempted && hypercall_preempt_check() )
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -3595,13 +3595,6 @@ long do_set_trap_table(XEN_GUEST_HANDLE_
for ( ; ; )
{
- if ( hypercall_preempt_check() )
- {
- rc = hypercall_create_continuation(
- __HYPERVISOR_set_trap_table, "h", traps);
- break;
- }
-
if ( copy_from_guest(&cur, traps, 1) )
{
rc = -EFAULT;
@@ -3622,6 +3615,13 @@ long do_set_trap_table(XEN_GUEST_HANDLE_
init_int80_direct_trap(curr);
guest_handle_add_offset(traps, 1);
+
+ if ( hypercall_preempt_check() )
+ {
+ rc = hypercall_create_continuation(
+ __HYPERVISOR_set_trap_table, "h", traps);
+ break;
+ }
}
return rc;
--- a/xen/arch/x86/x86_64/compat/traps.c
+++ b/xen/arch/x86/x86_64/compat/traps.c
@@ -329,13 +329,6 @@ int compat_set_trap_table(XEN_GUEST_HAND
for ( ; ; )
{
- if ( hypercall_preempt_check() )
- {
- rc = hypercall_create_continuation(
- __HYPERVISOR_set_trap_table, "h", traps);
- break;
- }
-
if ( copy_from_guest(&cur, traps, 1) )
{
rc = -EFAULT;
@@ -353,6 +346,13 @@ int compat_set_trap_table(XEN_GUEST_HAND
init_int80_direct_trap(current);
guest_handle_add_offset(traps, 1);
+
+ if ( hypercall_preempt_check() )
+ {
+ rc = hypercall_create_continuation(
+ __HYPERVISOR_set_trap_table, "h", traps);
+ break;
+ }
}
return rc;

View File

@ -0,0 +1,255 @@
# Commit dd527061770789d8152b1dea68056987b202d87a
# Date 2014-03-17 16:45:04 +0100
# Author Jan Beulich <jbeulich@suse.com>
# Committer Jan Beulich <jbeulich@suse.com>
VT-d: fix RMRR handling
Removing mapped RMRR tracking structures in dma_pte_clear_one() is
wrong for two reasons: First, these regions may cover more than a
single page. And second, multiple devices (and hence multiple devices
assigned to any particular guest) may share a single RMRR (whether
assigning such devices to distinct guests is a safe thing to do is
another question).
Therefore move the removal of the tracking structures into the
counterpart function to the one doing the insertion -
intel_iommu_remove_device(), and add a reference count to the tracking
structure.
Further, for the handling of the mappings of the respective memory
regions to be correct, RMRRs must not overlap. Add a respective check
to acpi_parse_one_rmrr().
And finally, with all of this being VT-d specific, move the cleanup
of the list as well as the structure type definition where it belongs -
in VT-d specific rather than IOMMU generic code.
Note that this doesn't address yet another issue associated with RMRR
handling: The purpose of the RMRRs as well as the way the respective
IOMMU page table mappings get inserted both suggest that these regions
would need to be marked E820_RESERVED in all (HVM?) guests' memory
maps, yet nothing like this is being done in hvmloader. (For PV guests
this would also seem to be necessary, but may conflict with PV guests
possibly assuming there to be just a single E820 entry representing all
of its RAM.)
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Xiantao Zhang <xiantao.zhang@intel.com>
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -412,9 +412,8 @@ static int iommu_populate_page_table(str
void iommu_domain_destroy(struct domain *d)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
- struct list_head *ioport_list, *rmrr_list, *tmp;
+ struct list_head *ioport_list, *tmp;
struct g2m_ioport *ioport;
- struct mapped_rmrr *mrmrr;
if ( !iommu_enabled || !hd->platform_ops )
return;
@@ -428,13 +427,6 @@ void iommu_domain_destroy(struct domain
list_del(&ioport->list);
xfree(ioport);
}
-
- list_for_each_safe ( rmrr_list, tmp, &hd->mapped_rmrrs )
- {
- mrmrr = list_entry(rmrr_list, struct mapped_rmrr, list);
- list_del(&mrmrr->list);
- xfree(mrmrr);
- }
}
int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
--- a/xen/drivers/passthrough/vtd/dmar.c
+++ b/xen/drivers/passthrough/vtd/dmar.c
@@ -580,6 +580,16 @@ acpi_parse_one_rmrr(struct acpi_dmar_hea
if ( (ret = acpi_dmar_check_length(header, sizeof(*rmrr))) != 0 )
return ret;
+ list_for_each_entry(rmrru, &acpi_rmrr_units, list)
+ if ( base_addr <= rmrru->end_address && rmrru->base_address <= end_addr )
+ {
+ printk(XENLOG_ERR VTDPREFIX
+ "Overlapping RMRRs [%"PRIx64",%"PRIx64"] and [%"PRIx64",%"PRIx64"]\n",
+ rmrru->base_address, rmrru->end_address,
+ base_addr, end_addr);
+ return -EEXIST;
+ }
+
/* This check is here simply to detect when RMRR values are
* not properly represented in the system memory map and
* inform the user
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -42,6 +42,12 @@
#include "vtd.h"
#include "../ats.h"
+struct mapped_rmrr {
+ struct list_head list;
+ u64 base, end;
+ unsigned int count;
+};
+
/* Possible unfiltered LAPIC/MSI messages from untrusted sources? */
bool_t __read_mostly untrusted_msi;
@@ -619,7 +625,6 @@ static void dma_pte_clear_one(struct dom
struct hvm_iommu *hd = domain_hvm_iommu(domain);
struct dma_pte *page = NULL, *pte = NULL;
u64 pg_maddr;
- struct mapped_rmrr *mrmrr;
spin_lock(&hd->mapping_lock);
/* get last level pte */
@@ -648,21 +653,6 @@ static void dma_pte_clear_one(struct dom
__intel_iommu_iotlb_flush(domain, addr >> PAGE_SHIFT_4K, 1, 1);
unmap_vtd_domain_page(page);
-
- /* if the cleared address is between mapped RMRR region,
- * remove the mapped RMRR
- */
- spin_lock(&hd->mapping_lock);
- list_for_each_entry ( mrmrr, &hd->mapped_rmrrs, list )
- {
- if ( addr >= mrmrr->base && addr <= mrmrr->end )
- {
- list_del(&mrmrr->list);
- xfree(mrmrr);
- break;
- }
- }
- spin_unlock(&hd->mapping_lock);
}
static void iommu_free_pagetable(u64 pt_maddr, int level)
@@ -1700,10 +1690,17 @@ static int reassign_device_ownership(
void iommu_domain_teardown(struct domain *d)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
+ struct mapped_rmrr *mrmrr, *tmp;
if ( list_empty(&acpi_drhd_units) )
return;
+ list_for_each_entry_safe ( mrmrr, tmp, &hd->mapped_rmrrs, list )
+ {
+ list_del(&mrmrr->list);
+ xfree(mrmrr);
+ }
+
if ( iommu_use_hap_pt(d) )
return;
@@ -1848,14 +1845,17 @@ static int rmrr_identity_mapping(struct
ASSERT(rmrr->base_address < rmrr->end_address);
/*
- * No need to acquire hd->mapping_lock, as the only theoretical race is
- * with the insertion below (impossible due to holding pcidevs_lock).
+ * No need to acquire hd->mapping_lock: Both insertion and removal
+ * get done while holding pcidevs_lock.
*/
list_for_each_entry( mrmrr, &hd->mapped_rmrrs, list )
{
if ( mrmrr->base == rmrr->base_address &&
mrmrr->end == rmrr->end_address )
+ {
+ ++mrmrr->count;
return 0;
+ }
}
base = rmrr->base_address & PAGE_MASK_4K;
@@ -1876,9 +1876,8 @@ static int rmrr_identity_mapping(struct
return -ENOMEM;
mrmrr->base = rmrr->base_address;
mrmrr->end = rmrr->end_address;
- spin_lock(&hd->mapping_lock);
+ mrmrr->count = 1;
list_add_tail(&mrmrr->list, &hd->mapped_rmrrs);
- spin_unlock(&hd->mapping_lock);
return 0;
}
@@ -1940,17 +1939,52 @@ static int intel_iommu_remove_device(u8
if ( !pdev->domain )
return -EINVAL;
- /* If the device belongs to dom0, and it has RMRR, don't remove it
- * from dom0, because BIOS may use RMRR at booting time.
- */
- if ( pdev->domain->domain_id == 0 )
+ for_each_rmrr_device ( rmrr, bdf, i )
{
- for_each_rmrr_device ( rmrr, bdf, i )
+ struct hvm_iommu *hd;
+ struct mapped_rmrr *mrmrr, *tmp;
+
+ if ( rmrr->segment != pdev->seg ||
+ PCI_BUS(bdf) != pdev->bus ||
+ PCI_DEVFN2(bdf) != devfn )
+ continue;
+
+ /*
+ * If the device belongs to dom0, and it has RMRR, don't remove
+ * it from dom0, because BIOS may use RMRR at booting time.
+ */
+ if ( is_hardware_domain(pdev->domain) )
+ return 0;
+
+ hd = domain_hvm_iommu(pdev->domain);
+
+ /*
+ * No need to acquire hd->mapping_lock: Both insertion and removal
+ * get done while holding pcidevs_lock.
+ */
+ ASSERT(spin_is_locked(&pcidevs_lock));
+ list_for_each_entry_safe ( mrmrr, tmp, &hd->mapped_rmrrs, list )
{
- if ( rmrr->segment == pdev->seg &&
- PCI_BUS(bdf) == pdev->bus &&
- PCI_DEVFN2(bdf) == devfn )
- return 0;
+ unsigned long base_pfn, end_pfn;
+
+ if ( rmrr->base_address != mrmrr->base ||
+ rmrr->end_address != mrmrr->end )
+ continue;
+
+ if ( --mrmrr->count )
+ break;
+
+ base_pfn = (mrmrr->base & PAGE_MASK_4K) >> PAGE_SHIFT_4K;
+ end_pfn = PAGE_ALIGN_4K(mrmrr->end) >> PAGE_SHIFT_4K;
+ while ( base_pfn < end_pfn )
+ {
+ if ( intel_iommu_unmap_page(pdev->domain, base_pfn) )
+ return -ENXIO;
+ base_pfn++;
+ }
+
+ list_del(&mrmrr->list);
+ xfree(mrmrr);
}
}
--- a/xen/include/xen/hvm/iommu.h
+++ b/xen/include/xen/hvm/iommu.h
@@ -29,12 +29,6 @@ struct g2m_ioport {
unsigned int np;
};
-struct mapped_rmrr {
- struct list_head list;
- u64 base;
- u64 end;
-};
-
struct hvm_iommu {
u64 pgd_maddr; /* io page directory machine address */
spinlock_t mapping_lock; /* io page table lock */

View File

@ -0,0 +1,62 @@
# Commit 96d1b237ae9b2f2718bb1c59820701f17d3d86e0
# Date 2014-03-17 16:47:22 +0100
# Author Jan Beulich <jbeulich@suse.com>
# Committer Jan Beulich <jbeulich@suse.com>
x86/Intel: work around Xeon 7400 series erratum AAI65
Linux commit 40e2d7f9b5dae048789c64672bf3027fbb663ffa ("x86 idle:
Repair large-server 50-watt idle-power regression") tells us that this
applies not just to the named Xeon 7400 series, but also NHM-EX and
WSM-EX; sadly Intel's documentation is so badly searchable that I
wasn't able to locate the respective errata (and hence can't quote
their numbers here).
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Kevin Tian <kevin.tian@intel.com>
--- a/xen/arch/x86/acpi/cpu_idle.c
+++ b/xen/arch/x86/acpi/cpu_idle.c
@@ -296,6 +296,9 @@ void mwait_idle_with_hints(unsigned int
unsigned int cpu = smp_processor_id();
s_time_t expires = per_cpu(timer_deadline, cpu);
+ if ( boot_cpu_has(X86_FEATURE_CLFLUSH_MONITOR) )
+ clflush((void *)&mwait_wakeup(cpu));
+
__monitor((void *)&mwait_wakeup(cpu), 0, 0);
smp_mb();
--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -147,6 +147,9 @@ void __devinit early_intel_workaround(st
/*
* P4 Xeon errata 037 workaround.
* Hardware prefetcher may cause stale data to be loaded into the cache.
+ *
+ * Xeon 7400 erratum AAI65 (and further newer Xeons)
+ * MONITOR/MWAIT may have excessive false wakeups
*/
static void __devinit Intel_errata_workarounds(struct cpuinfo_x86 *c)
{
@@ -161,6 +164,10 @@ static void __devinit Intel_errata_worka
wrmsr (MSR_IA32_MISC_ENABLE, lo, hi);
}
}
+
+ if (c->x86 == 6 && cpu_has_clflush &&
+ (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
+ set_bit(X86_FEATURE_CLFLUSH_MONITOR, c->x86_capability);
}
--- a/xen/include/asm-x86/cpufeature.h
+++ b/xen/include/asm-x86/cpufeature.h
@@ -71,6 +71,7 @@
#define X86_FEATURE_TSC_RELIABLE (3*32+12) /* TSC is known to be reliable */
#define X86_FEATURE_XTOPOLOGY (3*32+13) /* cpu topology enum extensions */
#define X86_FEATURE_CPUID_FAULTING (3*32+14) /* cpuid faulting */
+#define X86_FEATURE_CLFLUSH_MONITOR (3*32+15) /* clflush reqd with monitor */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */

View File

@ -1,6 +1,20 @@
-------------------------------------------------------------------
Wed Mar 19 14:18:06 MDT 2014 - carnold@suse.com
- Upstream patches from Jan
5321b20b-common-make-hypercall-preemption-checks-consistent.patch
5321b257-x86-make-hypercall-preemption-checks-consistent.patch
53271880-VT-d-fix-RMRR-handling.patch
5327190a-x86-Intel-work-around-Xeon-7400-series-erratum-AAI65.patch
- Dropped the following as now part of 5321b257
5310bac3-mm-ensure-useful-progress-in-decrease_reservation.patch
-------------------------------------------------------------------
Wed Mar 12 08:20:42 MDT 2014 - carnold@suse.com
- bnc#867910 - VUL-0: EMBARGOED: xen: XSA-89: HVMOP_set_mem_access
is not preemptible
xsa89.patch
- Upstream patches from Jan
530b27fd-x86-MCE-Fix-race-condition-in-mctelem_reserve.patch
530b2880-Nested-VMX-update-nested-paging-mode-on-vmexit.patch

View File

@ -84,19 +84,19 @@ BuildRequires: libfdt1-devel
%ifarch %ix86 x86_64
BuildRequires: dev86
%endif
BuildRequires: flex
BuildRequires: bison
BuildRequires: fdupes
BuildRequires: flex
BuildRequires: glib2-devel
BuildRequires: libaio-devel
BuildRequires: libbz2-devel
BuildRequires: libpixman-1-0-devel
BuildRequires: libuuid-devel
BuildRequires: libxml2-devel
BuildRequires: libyajl-devel
BuildRequires: libpixman-1-0-devel
%if %{?with_qemu_traditional}0
BuildRequires: pciutils-devel
BuildRequires: SDL-devel
BuildRequires: pciutils-devel
%endif
%if %{?with_stubdom}0
%if 0%{?suse_version} < 1230
@ -139,7 +139,7 @@ BuildRequires: xorg-x11-util-devel
%endif
%endif
Version: 4.4.0_08
Version: 4.4.0_10
Release: 0
PreReq: %insserv_prereq %fillup_prereq
Summary: Xen Virtualization: Hypervisor (aka VMM aka Microkernel)
@ -202,15 +202,19 @@ Patch1: 530b27fd-x86-MCE-Fix-race-condition-in-mctelem_reserve.patch
Patch2: 530b2880-Nested-VMX-update-nested-paging-mode-on-vmexit.patch
Patch3: 530b28c5-x86-MSI-don-t-risk-division-by-zero.patch
Patch4: 530c54c3-x86-mce-Reduce-boot-time-logspam.patch
Patch5: 5310bac3-mm-ensure-useful-progress-in-decrease_reservation.patch
Patch6: 5315a254-IOMMU-generalize-and-correct-softirq-processing.patch
Patch7: 5315a3bb-x86-don-t-propagate-acpi_skip_timer_override-do-Dom0.patch
Patch8: 5315a43a-x86-ACPI-also-print-address-space-for-PM1x-fields.patch
Patch9: 531d8db1-x86-hvm-refine-the-judgment-on-IDENT_PT-for-EMT.patch
Patch10: 531d8e09-x86-HVM-fix-memory-type-merging-in-epte_get_entry_emt.patch
Patch11: 531d8e34-x86-HVM-consolidate-passthrough-handling-in-epte_get_entry_emt.patch
Patch12: 531d8fd0-kexec-identify-which-cpu-the-kexec-image-is-being-executed-on.patch
Patch13: 531dc0e2-xmalloc-handle-correctly-page-allocation-when-align-size.patch
Patch5: 5315a254-IOMMU-generalize-and-correct-softirq-processing.patch
Patch6: 5315a3bb-x86-don-t-propagate-acpi_skip_timer_override-do-Dom0.patch
Patch7: 5315a43a-x86-ACPI-also-print-address-space-for-PM1x-fields.patch
Patch8: 531d8db1-x86-hvm-refine-the-judgment-on-IDENT_PT-for-EMT.patch
Patch9: 531d8e09-x86-HVM-fix-memory-type-merging-in-epte_get_entry_emt.patch
Patch10: 531d8e34-x86-HVM-consolidate-passthrough-handling-in-epte_get_entry_emt.patch
Patch11: 531d8fd0-kexec-identify-which-cpu-the-kexec-image-is-being-executed-on.patch
Patch12: 531dc0e2-xmalloc-handle-correctly-page-allocation-when-align-size.patch
Patch13: 5321b20b-common-make-hypercall-preemption-checks-consistent.patch
Patch14: 5321b257-x86-make-hypercall-preemption-checks-consistent.patch
Patch15: 53271880-VT-d-fix-RMRR-handling.patch
Patch16: 5327190a-x86-Intel-work-around-Xeon-7400-series-erratum-AAI65.patch
Patch17: xsa89.patch
# Upstream qemu
Patch250: VNC-Support-for-ExtendedKeyEvent-client-message.patch
Patch251: 0001-net-move-the-tap-buffer-into-TAPState.patch
@ -577,6 +581,10 @@ Authors:
%patch11 -p1
%patch12 -p1
%patch13 -p1
%patch14 -p1
%patch15 -p1
%patch16 -p1
%patch17 -p1
# Upstream qemu patches
%patch250 -p1
%patch251 -p1

96
xsa89.patch Normal file
View File

@ -0,0 +1,96 @@
x86: enforce preemption in HVM_set_mem_access / p2m_set_mem_access()
Processing up to 4G PFNs may take almost arbitrarily long, so
preemption is needed here.
This is XSA-89.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Tim Deegan <tim@xen.org>
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4465,6 +4465,15 @@ long do_hvm_op(unsigned long op, XEN_GUE
goto param_fail5;
rc = p2m_set_mem_access(d, a.first_pfn, a.nr, a.hvmmem_access);
+ if ( rc > 0 )
+ {
+ a.first_pfn += a.nr - rc;
+ a.nr = rc;
+ if ( __copy_to_guest(arg, &a, 1) )
+ rc = -EFAULT;
+ else
+ rc = -EAGAIN;
+ }
param_fail5:
rcu_unlock_domain(d);
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1366,15 +1366,14 @@ void p2m_mem_access_resume(struct domain
/* Set access type for a region of pfns.
* If start_pfn == -1ul, sets the default access type */
-int p2m_set_mem_access(struct domain *d, unsigned long start_pfn,
- uint32_t nr, hvmmem_access_t access)
+long p2m_set_mem_access(struct domain *d, unsigned long pfn, uint32_t nr,
+ hvmmem_access_t access)
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
- unsigned long pfn;
p2m_access_t a, _a;
p2m_type_t t;
mfn_t mfn;
- int rc = 0;
+ long rc;
/* N.B. _not_ static: initializer depends on p2m->default_access */
p2m_access_t memaccess[] = {
@@ -1397,14 +1396,17 @@ int p2m_set_mem_access(struct domain *d,
a = memaccess[access];
/* If request to set default access */
- if ( start_pfn == ~0ull )
+ if ( pfn == ~0ul )
{
p2m->default_access = a;
return 0;
}
+ if ( !nr )
+ return 0;
+
p2m_lock(p2m);
- for ( pfn = start_pfn; pfn < start_pfn + nr; pfn++ )
+ for ( ; ; ++pfn )
{
mfn = p2m->get_entry(p2m, pfn, &t, &_a, 0, NULL);
if ( p2m->set_entry(p2m, pfn, mfn, PAGE_ORDER_4K, t, a) == 0 )
@@ -1412,6 +1414,13 @@ int p2m_set_mem_access(struct domain *d,
rc = -ENOMEM;
break;
}
+
+ /* Check for continuation if it's not the last interation. */
+ if ( !--nr || hypercall_preempt_check() )
+ {
+ rc = nr;
+ break;
+ }
}
p2m_unlock(p2m);
return rc;
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -576,8 +576,8 @@ void p2m_mem_access_resume(struct domain
/* Set access type for a region of pfns.
* If start_pfn == -1ul, sets the default access type */
-int p2m_set_mem_access(struct domain *d, unsigned long start_pfn,
- uint32_t nr, hvmmem_access_t access);
+long p2m_set_mem_access(struct domain *d, unsigned long start_pfn,
+ uint32_t nr, hvmmem_access_t access);
/* Get access type for a pfn
* If pfn == -1ul, gets the default access type */