08a77ed8c4
xenpaging.tools_xenpaging_cleanup.patch - fate#310510 - fix xenpaging xenpaging.mem_event_check_ring-free_requests.patch - install /etc/xen/examples/xentrace_formats.txt to get human readable tracedata if xenalyze is not used - fate#310510 - fix xenpaging xenpaging.autostart_delay.patch xenpaging.blacklist.patch xenpaging.MRU_SIZE.patch remove xenpaging.hacks.patch, realmode works - Upstream patches from Jan including fixes for the following bugs bnc#583568 - Xen kernel is not booting bnc#615206 - Xen kernel fails to boot with IO-APIC problem bnc#640773 - Xen kernel crashing right after grub bnc#643477 - issues with PCI hotplug/hotunplug to Xen driver domain 22223-vtd-igd-workaround.patch 22222-x86-timer-extint.patch 22214-x86-msr-misc-enable.patch 22213-x86-xsave-cpuid-check.patch 22194-tmem-check-pv-mfn.patch 22177-i386-irq-safe-map_domain_page.patch 22175-x86-irq-enter-exit.patch 22174-x86-pmtimer-accuracy.patch 22160-Intel-C6-EOI.patch OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=76
115 lines
4.2 KiB
Diff
115 lines
4.2 KiB
Diff
Subject: xenpaging: populate only paged-out pages
|
|
|
|
populdate a paged-out page only once to reduce pressure in the ringbuffer.
|
|
Several cpus may still request a page at once. xenpaging can handle this.
|
|
|
|
Signed-off-by: Olaf Hering <olaf@aepfle.de>
|
|
|
|
---
|
|
xen/arch/x86/hvm/emulate.c | 3 ++-
|
|
xen/arch/x86/hvm/hvm.c | 17 ++++++++++-------
|
|
xen/arch/x86/mm/guest_walk.c | 3 ++-
|
|
xen/arch/x86/mm/hap/guest_walk.c | 6 ++++--
|
|
4 files changed, 18 insertions(+), 11 deletions(-)
|
|
|
|
--- xen-4.0.1-testing.orig/xen/arch/x86/hvm/emulate.c
|
|
+++ xen-4.0.1-testing/xen/arch/x86/hvm/emulate.c
|
|
@@ -65,7 +65,8 @@ static int hvmemul_do_io(
|
|
ram_mfn = gfn_to_mfn_unshare(current->domain, ram_gfn, &p2mt, 0);
|
|
if ( p2m_is_paging(p2mt) )
|
|
{
|
|
- p2m_mem_paging_populate(curr->domain, ram_gfn);
|
|
+ if ( p2m_is_paged(p2mt) )
|
|
+ p2m_mem_paging_populate(curr->domain, ram_gfn);
|
|
return X86EMUL_RETRY;
|
|
}
|
|
if ( p2m_is_shared(p2mt) )
|
|
--- xen-4.0.1-testing.orig/xen/arch/x86/hvm/hvm.c
|
|
+++ xen-4.0.1-testing/xen/arch/x86/hvm/hvm.c
|
|
@@ -291,7 +291,8 @@ static int hvm_set_ioreq_page(
|
|
return -EINVAL;
|
|
if ( p2m_is_paging(p2mt) )
|
|
{
|
|
- p2m_mem_paging_populate(d, gmfn);
|
|
+ if ( p2m_is_paged(p2mt) )
|
|
+ p2m_mem_paging_populate(d, gmfn);
|
|
return -ENOENT;
|
|
}
|
|
if ( p2m_is_shared(p2mt) )
|
|
@@ -1324,7 +1325,8 @@ static void *hvm_map_entry(unsigned long
|
|
mfn = mfn_x(gfn_to_mfn_unshare(current->domain, gfn, &p2mt, 0));
|
|
if ( p2m_is_paging(p2mt) )
|
|
{
|
|
- p2m_mem_paging_populate(current->domain, gfn);
|
|
+ if ( p2m_is_paged(p2mt) )
|
|
+ p2m_mem_paging_populate(current->domain, gfn);
|
|
return NULL;
|
|
}
|
|
if ( p2m_is_shared(p2mt) )
|
|
@@ -1723,7 +1725,8 @@ static enum hvm_copy_result __hvm_copy(
|
|
|
|
if ( p2m_is_paging(p2mt) )
|
|
{
|
|
- p2m_mem_paging_populate(curr->domain, gfn);
|
|
+ if ( p2m_is_paged(p2mt) )
|
|
+ p2m_mem_paging_populate(curr->domain, gfn);
|
|
return HVMCOPY_gfn_paged_out;
|
|
}
|
|
if ( p2m_is_shared(p2mt) )
|
|
@@ -3032,8 +3035,8 @@ long do_hvm_op(unsigned long op, XEN_GUE
|
|
mfn_t mfn = gfn_to_mfn(d, pfn, &t);
|
|
if ( p2m_is_paging(t) )
|
|
{
|
|
- p2m_mem_paging_populate(d, pfn);
|
|
-
|
|
+ if ( p2m_is_paged(t) )
|
|
+ p2m_mem_paging_populate(d, pfn);
|
|
rc = -EINVAL;
|
|
goto param_fail3;
|
|
}
|
|
@@ -3096,8 +3099,8 @@ long do_hvm_op(unsigned long op, XEN_GUE
|
|
mfn = gfn_to_mfn_unshare(d, pfn, &t, 0);
|
|
if ( p2m_is_paging(t) )
|
|
{
|
|
- p2m_mem_paging_populate(d, pfn);
|
|
-
|
|
+ if ( p2m_is_paged(t) )
|
|
+ p2m_mem_paging_populate(d, pfn);
|
|
rc = -EINVAL;
|
|
goto param_fail4;
|
|
}
|
|
--- xen-4.0.1-testing.orig/xen/arch/x86/mm/guest_walk.c
|
|
+++ xen-4.0.1-testing/xen/arch/x86/mm/guest_walk.c
|
|
@@ -96,7 +96,8 @@ static inline void *map_domain_gfn(struc
|
|
*mfn = gfn_to_mfn_unshare(d, gfn_x(gfn), p2mt, 0);
|
|
if ( p2m_is_paging(*p2mt) )
|
|
{
|
|
- p2m_mem_paging_populate(d, gfn_x(gfn));
|
|
+ if ( p2m_is_paged(*p2mt) )
|
|
+ p2m_mem_paging_populate(d, gfn_x(gfn));
|
|
|
|
*rc = _PAGE_PAGED;
|
|
return NULL;
|
|
--- xen-4.0.1-testing.orig/xen/arch/x86/mm/hap/guest_walk.c
|
|
+++ xen-4.0.1-testing/xen/arch/x86/mm/hap/guest_walk.c
|
|
@@ -49,7 +49,8 @@ unsigned long hap_gva_to_gfn(GUEST_PAGIN
|
|
top_mfn = gfn_to_mfn_unshare(v->domain, cr3 >> PAGE_SHIFT, &p2mt, 0);
|
|
if ( p2m_is_paging(p2mt) )
|
|
{
|
|
- p2m_mem_paging_populate(v->domain, cr3 >> PAGE_SHIFT);
|
|
+ if ( p2m_is_paged(p2mt) )
|
|
+ p2m_mem_paging_populate(v->domain, cr3 >> PAGE_SHIFT);
|
|
|
|
pfec[0] = PFEC_page_paged;
|
|
return INVALID_GFN;
|
|
@@ -81,7 +82,8 @@ unsigned long hap_gva_to_gfn(GUEST_PAGIN
|
|
gfn_to_mfn_unshare(v->domain, gfn_x(gfn), &p2mt, 0);
|
|
if ( p2m_is_paging(p2mt) )
|
|
{
|
|
- p2m_mem_paging_populate(v->domain, gfn_x(gfn));
|
|
+ if ( p2m_is_paged(p2mt) )
|
|
+ p2m_mem_paging_populate(v->domain, gfn_x(gfn));
|
|
|
|
pfec[0] = PFEC_page_paged;
|
|
return INVALID_GFN;
|