xen/53df727b-x86-HVM-extend-LAPIC-shortcuts-around-P2M-lookups.patch
Charles Arnold 3e51b51ba9 - bnc#883112 - Xen Panic during boot "System without CMOS RTC must
be booted from EFI"
  53dba447-x86-ACPI-allow-CMOS-RTC-use-even-when-ACPI-says-there-is-none.patch
- Upstream patches from Jan
  53d7b781-x86-cpu-undo-BIOS-CPUID-max_leaf-limit-earlier.patch
  53df71c7-lz4-check-for-underruns.patch
  53df727b-x86-HVM-extend-LAPIC-shortcuts-around-P2M-lookups.patch
  53e47d6b-x86_emulate-properly-do-IP-updates-and-other-side-effects.patch

- Update to Xen Version 4.4.1-rc2
  xen-4.4.1-testing-src.tar.bz2
- Dropped 60 upstream patches and xen-4.4.0-testing-src.tar.bz2

- bnc#820873 - The "long" option doesn't work with "xl list"
  53d124e7-fix-list_domain_details-check-config-data-length-0.patch

- bnc#888996 - Package 'xen-tool' contains 'SuSE' spelling in a
  filename and/or SPEC file
  Renamed README.SuSE -> README.SUSE
  Modified files: xen.spec, boot.local.xenU, init.pciback
  xend-config.patch, xend-vif-route-ifup.patch

- bnc#882673 - Dom0 memory should enforce a minimum memory size
  (e.g. dom0_mem=min:512M)
  xen.spec (Mike Latimer)

OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=324
2014-08-15 14:33:16 +00:00

111 lines
3.8 KiB
Diff

# Commit fd1863847af15c3676348447755e1a1801f9d394
# Date 2014-08-04 13:46:03 +0200
# Author Jan Beulich <jbeulich@suse.com>
# Committer Jan Beulich <jbeulich@suse.com>
x86/HVM: extend LAPIC shortcuts around P2M lookups
... to all internally handled MMIO regions. It is in particular the
HPET page that, e.g. on Windows Server 2012 R2, can get heavily
accessed, and hence avoiding the unnecessary lookups is rather
beneficial (in the reported case a 40+-vCPU guest would previously not
have booted at all while with hvm_hap_nested_page_fault() shortcut
alone it was able to boot up in 18 minutes [i.e. still room for
improvement]).
Note the apparently unrelated addition of a is_hvm_vcpu() check to the
__hvm_copy() code: Afaict for PVH this shortcut should never have taken
effect (since there's no LAPIC in that case).
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Tim Deegan <tim@xen.org>
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1521,11 +1521,14 @@ int hvm_hap_nested_page_fault(paddr_t gp
}
}
- /* For the benefit of 32-bit WinXP (& older Windows) on AMD CPUs,
- * a fast path for LAPIC accesses, skipping the p2m lookup. */
+ /*
+ * No need to do the P2M lookup for internally handled MMIO, benefiting
+ * - 32-bit WinXP (& older Windows) on AMD CPUs for LAPIC accesses,
+ * - newer Windows (like Server 2012) for HPET accesses.
+ */
if ( !nestedhvm_vcpu_in_guestmode(v)
&& is_hvm_vcpu(v)
- && gfn == PFN_DOWN(vlapic_base_address(vcpu_vlapic(v))) )
+ && hvm_mmio_internal(gpa) )
{
if ( !handle_mmio() )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
@@ -2644,7 +2647,9 @@ static enum hvm_copy_result __hvm_copy(
while ( todo > 0 )
{
- count = min_t(int, PAGE_SIZE - (addr & ~PAGE_MASK), todo);
+ paddr_t gpa = addr & ~PAGE_MASK;
+
+ count = min_t(int, PAGE_SIZE - gpa, todo);
if ( flags & HVMCOPY_virt )
{
@@ -2659,16 +2664,22 @@ static enum hvm_copy_result __hvm_copy(
hvm_inject_page_fault(pfec, addr);
return HVMCOPY_bad_gva_to_gfn;
}
+ gpa |= (paddr_t)gfn << PAGE_SHIFT;
}
else
{
gfn = addr >> PAGE_SHIFT;
+ gpa = addr;
}
- /* For the benefit of 32-bit WinXP (& older Windows) on AMD CPUs,
- * a fast path for LAPIC accesses, skipping the p2m lookup. */
+ /*
+ * No need to do the P2M lookup for internally handled MMIO, benefiting
+ * - 32-bit WinXP (& older Windows) on AMD CPUs for LAPIC accesses,
+ * - newer Windows (like Server 2012) for HPET accesses.
+ */
if ( !nestedhvm_vcpu_in_guestmode(curr)
- && gfn == PFN_DOWN(vlapic_base_address(vcpu_vlapic(curr))) )
+ && is_hvm_vcpu(curr)
+ && hvm_mmio_internal(gpa) )
return HVMCOPY_bad_gfn_to_mfn;
page = get_page_from_gfn(curr->domain, gfn, &p2mt, P2M_UNSHARE);
--- a/xen/arch/x86/hvm/intercept.c
+++ b/xen/arch/x86/hvm/intercept.c
@@ -163,6 +163,18 @@ static int hvm_mmio_access(struct vcpu *
return rc;
}
+bool_t hvm_mmio_internal(paddr_t gpa)
+{
+ struct vcpu *curr = current;
+ unsigned int i;
+
+ for ( i = 0; i < HVM_MMIO_HANDLER_NR; ++i )
+ if ( hvm_mmio_handlers[i]->check_handler(curr, gpa) )
+ return 1;
+
+ return 0;
+}
+
int hvm_mmio_intercept(ioreq_t *p)
{
struct vcpu *v = current;
--- a/xen/include/asm-x86/hvm/io.h
+++ b/xen/include/asm-x86/hvm/io.h
@@ -91,6 +91,7 @@ static inline int hvm_buffered_io_interc
return hvm_io_intercept(p, HVM_BUFFERED_IO);
}
+bool_t hvm_mmio_internal(paddr_t gpa);
int hvm_mmio_intercept(ioreq_t *p);
int hvm_buffered_io_send(ioreq_t *p);