99885eadf2
xen-4.4.1-testing-src.tar.bz2 - Dropped patches now contained in tarball 53d7b781-x86-cpu-undo-BIOS-CPUID-max_leaf-limit-earlier.patch 53df71c7-lz4-check-for-underruns.patch 53e47d6b-x86_emulate-properly-do-IP-updates-and-other-side-effects.patch - bnc#882089 - Windows 2012 R2 fails to boot up with greater than 60 vcpus 53df727b-x86-HVM-extend-LAPIC-shortcuts-around-P2M-lookups.patch 53e8be5f-x86-vHPET-use-rwlock-instead-of-simple-one.patch 53ff3659-x86-consolidate-boolean-inputs-in-hvm-and-p2m.patch 53ff36ae-x86-hvm-treat-non-insn-fetch-NPF-also-as-read-violations.patch 53ff36d5-x86-mem_event-deliver-gla-fault-EPT-violation-information.patch 54005472-EPT-utilize-GLA-GPA-translation-known-for-certain-faults.patch - Upstream patches from Jan 53f737b1-VMX-fix-DebugCtl-MSR-clearing.patch 53f7386d-x86-irq-process-softirqs-in-irq-keyhandlers.patch 53ff3716-x86-ats-Disable-Address-Translation-Services-by-default.patch 53ff3899-x86-NMI-allow-processing-unknown-NMIs-with-watchdog.patch - bnc#864801 - VUL-0: CVE-2013-4540: qemu: zaurus: buffer overrun on invalid state load CVE-2013-4540-qemu.patch OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=328
113 lines
3.8 KiB
Diff
113 lines
3.8 KiB
Diff
References: bnc#882089
|
|
|
|
# Commit fd1863847af15c3676348447755e1a1801f9d394
|
|
# Date 2014-08-04 13:46:03 +0200
|
|
# Author Jan Beulich <jbeulich@suse.com>
|
|
# Committer Jan Beulich <jbeulich@suse.com>
|
|
x86/HVM: extend LAPIC shortcuts around P2M lookups
|
|
|
|
... to all internally handled MMIO regions. It is in particular the
|
|
HPET page that, e.g. on Windows Server 2012 R2, can get heavily
|
|
accessed, and hence avoiding the unnecessary lookups is rather
|
|
beneficial (in the reported case a 40+-vCPU guest would previously not
|
|
have booted at all while with hvm_hap_nested_page_fault() shortcut
|
|
alone it was able to boot up in 18 minutes [i.e. still room for
|
|
improvement]).
|
|
|
|
Note the apparently unrelated addition of a is_hvm_vcpu() check to the
|
|
__hvm_copy() code: Afaict for PVH this shortcut should never have taken
|
|
effect (since there's no LAPIC in that case).
|
|
|
|
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
|
Reviewed-by: Tim Deegan <tim@xen.org>
|
|
|
|
|
|
--- a/xen/arch/x86/hvm/hvm.c
|
|
+++ b/xen/arch/x86/hvm/hvm.c
|
|
@@ -1521,11 +1521,14 @@ int hvm_hap_nested_page_fault(paddr_t gp
|
|
}
|
|
}
|
|
|
|
- /* For the benefit of 32-bit WinXP (& older Windows) on AMD CPUs,
|
|
- * a fast path for LAPIC accesses, skipping the p2m lookup. */
|
|
+ /*
|
|
+ * No need to do the P2M lookup for internally handled MMIO, benefiting
|
|
+ * - 32-bit WinXP (& older Windows) on AMD CPUs for LAPIC accesses,
|
|
+ * - newer Windows (like Server 2012) for HPET accesses.
|
|
+ */
|
|
if ( !nestedhvm_vcpu_in_guestmode(v)
|
|
&& is_hvm_vcpu(v)
|
|
- && gfn == PFN_DOWN(vlapic_base_address(vcpu_vlapic(v))) )
|
|
+ && hvm_mmio_internal(gpa) )
|
|
{
|
|
if ( !handle_mmio() )
|
|
hvm_inject_hw_exception(TRAP_gp_fault, 0);
|
|
@@ -2644,7 +2647,9 @@ static enum hvm_copy_result __hvm_copy(
|
|
|
|
while ( todo > 0 )
|
|
{
|
|
- count = min_t(int, PAGE_SIZE - (addr & ~PAGE_MASK), todo);
|
|
+ paddr_t gpa = addr & ~PAGE_MASK;
|
|
+
|
|
+ count = min_t(int, PAGE_SIZE - gpa, todo);
|
|
|
|
if ( flags & HVMCOPY_virt )
|
|
{
|
|
@@ -2659,16 +2664,22 @@ static enum hvm_copy_result __hvm_copy(
|
|
hvm_inject_page_fault(pfec, addr);
|
|
return HVMCOPY_bad_gva_to_gfn;
|
|
}
|
|
+ gpa |= (paddr_t)gfn << PAGE_SHIFT;
|
|
}
|
|
else
|
|
{
|
|
gfn = addr >> PAGE_SHIFT;
|
|
+ gpa = addr;
|
|
}
|
|
|
|
- /* For the benefit of 32-bit WinXP (& older Windows) on AMD CPUs,
|
|
- * a fast path for LAPIC accesses, skipping the p2m lookup. */
|
|
+ /*
|
|
+ * No need to do the P2M lookup for internally handled MMIO, benefiting
|
|
+ * - 32-bit WinXP (& older Windows) on AMD CPUs for LAPIC accesses,
|
|
+ * - newer Windows (like Server 2012) for HPET accesses.
|
|
+ */
|
|
if ( !nestedhvm_vcpu_in_guestmode(curr)
|
|
- && gfn == PFN_DOWN(vlapic_base_address(vcpu_vlapic(curr))) )
|
|
+ && is_hvm_vcpu(curr)
|
|
+ && hvm_mmio_internal(gpa) )
|
|
return HVMCOPY_bad_gfn_to_mfn;
|
|
|
|
page = get_page_from_gfn(curr->domain, gfn, &p2mt, P2M_UNSHARE);
|
|
--- a/xen/arch/x86/hvm/intercept.c
|
|
+++ b/xen/arch/x86/hvm/intercept.c
|
|
@@ -163,6 +163,18 @@ static int hvm_mmio_access(struct vcpu *
|
|
return rc;
|
|
}
|
|
|
|
+bool_t hvm_mmio_internal(paddr_t gpa)
|
|
+{
|
|
+ struct vcpu *curr = current;
|
|
+ unsigned int i;
|
|
+
|
|
+ for ( i = 0; i < HVM_MMIO_HANDLER_NR; ++i )
|
|
+ if ( hvm_mmio_handlers[i]->check_handler(curr, gpa) )
|
|
+ return 1;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
int hvm_mmio_intercept(ioreq_t *p)
|
|
{
|
|
struct vcpu *v = current;
|
|
--- a/xen/include/asm-x86/hvm/io.h
|
|
+++ b/xen/include/asm-x86/hvm/io.h
|
|
@@ -91,6 +91,7 @@ static inline int hvm_buffered_io_interc
|
|
return hvm_io_intercept(p, HVM_BUFFERED_IO);
|
|
}
|
|
|
|
+bool_t hvm_mmio_internal(paddr_t gpa);
|
|
int hvm_mmio_intercept(ioreq_t *p);
|
|
int hvm_buffered_io_send(ioreq_t *p);
|
|
|