0c76f22ef1
- bnc#633573 - System fail to boot after running several warm reboot tests 22749-vtd-workarounds.patch - Upstream patches from Jan 22744-ept-pod-locking.patch 22777-vtd-ats-fixes.patch 22781-pod-hap-logdirty.patch 22782-x86-emul-smsw.patch 22789-i386-no-x2apic.patch 22790-svm-resume-migrate-pirqs.patch 22816-x86-pirq-drop-priv-check.patch OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=94
65 lines
2.5 KiB
Diff
65 lines
2.5 KiB
Diff
# HG changeset patch
|
|
# User George Dunlap <george.dunlap@eu.citrix.com>
|
|
# Date 1295274541 0
|
|
# Node ID 3decd02e0b18ae21fb926c6bad96a4cd02c48272
|
|
# Parent 97ab84aca65cdcbce2ddccc51629fb24adb056cf
|
|
PoD,hap: Fix logdirty mode when using hardware assisted paging
|
|
|
|
When writing a writable p2m entry for a pfn, we need to mark the pfn
|
|
dirty to avoid corruption when doing live migration.
|
|
|
|
Marking the page dirty exposes another issue, where there are
|
|
excessive sweeps for zero pages if there's a mismatch between PoD
|
|
entries and cache entries. Only sweep for zero pages if we actually
|
|
need more memory.
|
|
|
|
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
|
|
Acked-by: Tim Deegan <Tim.Deegan@citrix.com>
|
|
|
|
Index: xen-4.0.2-testing/xen/arch/x86/mm/p2m.c
|
|
===================================================================
|
|
--- xen-4.0.2-testing.orig/xen/arch/x86/mm/p2m.c
|
|
+++ xen-4.0.2-testing/xen/arch/x86/mm/p2m.c
|
|
@@ -1064,14 +1064,22 @@ p2m_pod_demand_populate(struct domain *d
|
|
if ( unlikely(d->is_dying) )
|
|
goto out_fail;
|
|
|
|
- /* If we're low, start a sweep */
|
|
- if ( order == 9 && page_list_empty(&p2md->pod.super) )
|
|
- p2m_pod_emergency_sweep_super(d);
|
|
-
|
|
- if ( page_list_empty(&p2md->pod.single) &&
|
|
- ( ( order == 0 )
|
|
- || (order == 9 && page_list_empty(&p2md->pod.super) ) ) )
|
|
- p2m_pod_emergency_sweep(d);
|
|
+ /* Once we've ballooned down enough that we can fill the remaining
|
|
+ * PoD entries from the cache, don't sweep even if the particular
|
|
+ * list we want to use is empty: that can lead to thrashing zero pages
|
|
+ * through the cache for no good reason. */
|
|
+ if ( p2md->pod.entry_count > p2md->pod.count )
|
|
+ {
|
|
+
|
|
+ /* If we're low, start a sweep */
|
|
+ if ( order == 9 && page_list_empty(&p2md->pod.super) )
|
|
+ p2m_pod_emergency_sweep_super(d);
|
|
+
|
|
+ if ( page_list_empty(&p2md->pod.single) &&
|
|
+ ( ( order == 0 )
|
|
+ || (order == 9 && page_list_empty(&p2md->pod.super) ) ) )
|
|
+ p2m_pod_emergency_sweep(d);
|
|
+ }
|
|
|
|
/* Keep track of the highest gfn demand-populated by a guest fault */
|
|
if ( q == p2m_guest && gfn > p2md->pod.max_guest )
|
|
@@ -1098,7 +1106,10 @@ p2m_pod_demand_populate(struct domain *d
|
|
set_p2m_entry(d, gfn_aligned, mfn, order, p2m_ram_rw);
|
|
|
|
for( i = 0 ; i < (1UL << order) ; i++ )
|
|
+ {
|
|
set_gpfn_from_mfn(mfn_x(mfn) + i, gfn_aligned + i);
|
|
+ paging_mark_dirty(d, mfn_x(mfn) + i);
|
|
+ }
|
|
|
|
p2md->pod.entry_count -= (1 << order); /* Lock: p2m */
|
|
BUG_ON(p2md->pod.entry_count < 0);
|