c608e23838
Turn off building the KMPs now that we are using the pvops kernel xen.spec - Upstream patches from Jan 561bbc8b-VT-d-don-t-suppress-invalidation-address-write-when-it-is-zero.patch 561d20a0-x86-hide-MWAITX-from-PV-domains.patch 561e3283-x86-NUMA-fix-SRAT-table-processor-entry-parsing-and-consumption.patch 5632118e-arm-Support-hypercall_create_continuation-for-multicall.patch 56321222-arm-rate-limit-logging-from-unimplemented-PHYSDEVOP-and-HVMOP.patch 56321249-arm-handle-races-between-relinquish_memory-and-free_domheap_pages.patch 5632127b-x86-guard-against-undue-super-page-PTE-creation.patch 5632129c-free-domain-s-vcpu-array.patch (Replaces CVE-2015-7969-xsa149.patch) 563212c9-x86-PoD-Eager-sweep-for-zeroed-pages.patch 563212e4-xenoprof-free-domain-s-vcpu-array.patch 563212ff-x86-rate-limit-logging-in-do_xen-oprof-pmu-_op.patch 56323737-libxl-adjust-PoD-target-by-memory-fudge-too.patch 56377442-x86-PoD-Make-p2m_pod_empty_cache-restartable.patch 5641ceec-x86-HVM-always-intercept-AC-and-DB.patch (Replaces CVE-2015-5307-xsa156.patch) 5644b756-x86-HVM-don-t-inject-DB-with-error-code.patch - Dropped 55b0a2db-x86-MSI-track-guest-masking.patch - Use upstream variants of block-iscsi and block-nbd - Remove xenalyze.hg, its part of xen-4.6 OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=389
56 lines
1.9 KiB
Diff
56 lines
1.9 KiB
Diff
# Commit 710942e57fb42ff8f344ca82f6b678f67e38ae63
|
|
# Date 2015-10-12 15:58:35 +0200
|
|
# Author Jan Beulich <jbeulich@suse.com>
|
|
# Committer Jan Beulich <jbeulich@suse.com>
|
|
VT-d: don't suppress invalidation address write when it is zero
|
|
|
|
GFN zero is a valid address, and hence may need invalidation done for
|
|
it just like for any other GFN.
|
|
|
|
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
|
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
|
Acked-by: Yang Zhang <yang.z.zhang@intel.com>
|
|
|
|
--- a/xen/drivers/passthrough/vtd/iommu.c
|
|
+++ b/xen/drivers/passthrough/vtd/iommu.c
|
|
@@ -414,7 +414,7 @@ static int flush_iotlb_reg(void *_iommu,
|
|
{
|
|
struct iommu *iommu = (struct iommu *) _iommu;
|
|
int tlb_offset = ecap_iotlb_offset(iommu->ecap);
|
|
- u64 val = 0, val_iva = 0;
|
|
+ u64 val = 0;
|
|
unsigned long flags;
|
|
|
|
/*
|
|
@@ -435,7 +435,6 @@ static int flush_iotlb_reg(void *_iommu,
|
|
switch ( type )
|
|
{
|
|
case DMA_TLB_GLOBAL_FLUSH:
|
|
- /* global flush doesn't need set IVA_REG */
|
|
val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
|
|
break;
|
|
case DMA_TLB_DSI_FLUSH:
|
|
@@ -443,8 +442,6 @@ static int flush_iotlb_reg(void *_iommu,
|
|
break;
|
|
case DMA_TLB_PSI_FLUSH:
|
|
val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
|
|
- /* Note: always flush non-leaf currently */
|
|
- val_iva = size_order | addr;
|
|
break;
|
|
default:
|
|
BUG();
|
|
@@ -457,8 +454,11 @@ static int flush_iotlb_reg(void *_iommu,
|
|
|
|
spin_lock_irqsave(&iommu->register_lock, flags);
|
|
/* Note: Only uses first TLB reg currently */
|
|
- if ( val_iva )
|
|
- dmar_writeq(iommu->reg, tlb_offset, val_iva);
|
|
+ if ( type == DMA_TLB_PSI_FLUSH )
|
|
+ {
|
|
+ /* Note: always flush non-leaf currently. */
|
|
+ dmar_writeq(iommu->reg, tlb_offset, size_order | addr);
|
|
+ }
|
|
dmar_writeq(iommu->reg, tlb_offset + 8, val);
|
|
|
|
/* Make sure hardware complete it */
|