9fd34708c1
guest 21971-pod-accounting.patch - bnc#584204 - xm usb-list broken usb-list.patch - bnc#625520 - TP-L3: NMI cannot be triggered for xen kernel 21926-x86-pv-NMI-inject.patch - bnc#613529 - TP-L3: kdump kernel hangs when crash was initiated from xen kernel 21886-kexec-shutdown.patch - Upstream Intel patches to improve X2APIC handling. 21716-iommu-alloc.patch 21717-ir-qi.patch 21718-x2apic-logic.patch 21933-vtd-ioapic-write.patch 21953-msi-enable.patch OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=71
201 lines
7.4 KiB
Diff
201 lines
7.4 KiB
Diff
# HG changeset patch
|
|
# User Keir Fraser <keir.fraser@citrix.com>
|
|
# Date 1279186999 -3600
|
|
# Node ID a35e5f33a72eee3d00cec6972bb93585609559e2
|
|
# Parent 4ba86edf38f816a0d94cfb85b90074a72113e41c
|
|
x2APIC/VT-d: improve interrupt remapping and queued invalidation enabling and disabling
|
|
|
|
x2APIC depends on interrupt remapping, so interrupt remapping needs to
|
|
be enabled before x2APIC. Usually x2APIC is not enabled
|
|
(x2apic_enabled=0) when enable interrupt remapping, although x2APIC
|
|
will be enabled later. So it needs to pass a parameter to set
|
|
interrupt mode in intremap_enable, instead of checking
|
|
x2apic_enable. This patch adds a parameter "eim" to intremap_enable to
|
|
achieve it. Interrupt remapping and queued invalidation are already
|
|
enabled when enable x2apic, so it needn't to enable them again when
|
|
setup iommu. This patch checks if interrupt remapping and queued
|
|
invalidation are already enable or not, and won't enable them if
|
|
already enabled. It does the similar in disabling, that's to say don't
|
|
disable them if already disabled.
|
|
|
|
Signed-off-by: Weidong Han <weidong.han@intel.com>
|
|
xen-unstable changeset: 21717:176956d1d2fd
|
|
xen-unstable date: Mon Jul 05 08:30:25 2010 +0100
|
|
|
|
Index: xen-4.0.0-testing/xen/drivers/passthrough/vtd/extern.h
|
|
===================================================================
|
|
--- xen-4.0.0-testing.orig/xen/drivers/passthrough/vtd/extern.h
|
|
+++ xen-4.0.0-testing/xen/drivers/passthrough/vtd/extern.h
|
|
@@ -33,7 +33,7 @@ extern struct keyhandler dump_iommu_info
|
|
|
|
int enable_qinval(struct iommu *iommu);
|
|
void disable_qinval(struct iommu *iommu);
|
|
-int enable_intremap(struct iommu *iommu);
|
|
+int enable_intremap(struct iommu *iommu, int eim);
|
|
void disable_intremap(struct iommu *iommu);
|
|
int queue_invalidate_context(struct iommu *iommu,
|
|
u16 did, u16 source_id, u8 function_mask, u8 granu);
|
|
Index: xen-4.0.0-testing/xen/drivers/passthrough/vtd/intremap.c
|
|
===================================================================
|
|
--- xen-4.0.0-testing.orig/xen/drivers/passthrough/vtd/intremap.c
|
|
+++ xen-4.0.0-testing/xen/drivers/passthrough/vtd/intremap.c
|
|
@@ -709,7 +709,7 @@ void msi_msg_write_remap_rte(
|
|
}
|
|
#endif
|
|
|
|
-int enable_intremap(struct iommu *iommu)
|
|
+int enable_intremap(struct iommu *iommu, int eim)
|
|
{
|
|
struct acpi_drhd_unit *drhd;
|
|
struct ir_ctrl *ir_ctrl;
|
|
@@ -719,10 +719,25 @@ int enable_intremap(struct iommu *iommu)
|
|
ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap);
|
|
|
|
ir_ctrl = iommu_ir_ctrl(iommu);
|
|
+ sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
|
|
+
|
|
+ /* Return if already enabled by Xen */
|
|
+ if ( (sts & DMA_GSTS_IRES) && ir_ctrl->iremap_maddr )
|
|
+ return 0;
|
|
+
|
|
+ sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
|
|
+ if ( !(sts & DMA_GSTS_QIES) )
|
|
+ {
|
|
+ dprintk(XENLOG_ERR VTDPREFIX,
|
|
+ "Queued invalidation is not enabled, should not enable "
|
|
+ "interrupt remapping\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
if ( ir_ctrl->iremap_maddr == 0 )
|
|
{
|
|
drhd = iommu_to_drhd(iommu);
|
|
- ir_ctrl->iremap_maddr = alloc_pgtable_maddr(drhd, IREMAP_ARCH_PAGE_NR );
|
|
+ ir_ctrl->iremap_maddr = alloc_pgtable_maddr(drhd, IREMAP_ARCH_PAGE_NR);
|
|
if ( ir_ctrl->iremap_maddr == 0 )
|
|
{
|
|
dprintk(XENLOG_WARNING VTDPREFIX,
|
|
@@ -735,7 +750,7 @@ int enable_intremap(struct iommu *iommu)
|
|
#ifdef CONFIG_X86
|
|
/* set extended interrupt mode bit */
|
|
ir_ctrl->iremap_maddr |=
|
|
- x2apic_enabled ? (1 << IRTA_REG_EIME_SHIFT) : 0;
|
|
+ eim ? (1 << IRTA_REG_EIME_SHIFT) : 0;
|
|
#endif
|
|
spin_lock_irqsave(&iommu->register_lock, flags);
|
|
|
|
@@ -772,13 +787,18 @@ void disable_intremap(struct iommu *iomm
|
|
u32 sts;
|
|
unsigned long flags;
|
|
|
|
- ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap);
|
|
+ if ( !ecap_intr_remap(iommu->ecap) )
|
|
+ return;
|
|
|
|
spin_lock_irqsave(&iommu->register_lock, flags);
|
|
sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
|
|
+ if ( !(sts & DMA_GSTS_IRES) )
|
|
+ goto out;
|
|
+
|
|
dmar_writel(iommu->reg, DMAR_GCMD_REG, sts & (~DMA_GCMD_IRE));
|
|
|
|
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
|
|
!(sts & DMA_GSTS_IRES), sts);
|
|
+out:
|
|
spin_unlock_irqrestore(&iommu->register_lock, flags);
|
|
}
|
|
Index: xen-4.0.0-testing/xen/drivers/passthrough/vtd/iommu.c
|
|
===================================================================
|
|
--- xen-4.0.0-testing.orig/xen/drivers/passthrough/vtd/iommu.c
|
|
+++ xen-4.0.0-testing/xen/drivers/passthrough/vtd/iommu.c
|
|
@@ -1829,24 +1829,20 @@ static int init_vtd_hw(void)
|
|
spin_lock_irqsave(&iommu->register_lock, flags);
|
|
dmar_writel(iommu->reg, DMAR_FECTL_REG, 0);
|
|
spin_unlock_irqrestore(&iommu->register_lock, flags);
|
|
-
|
|
- /* initialize flush functions */
|
|
- flush = iommu_get_flush(iommu);
|
|
- flush->context = flush_context_reg;
|
|
- flush->iotlb = flush_iotlb_reg;
|
|
}
|
|
|
|
- if ( iommu_qinval )
|
|
+ for_each_drhd_unit ( drhd )
|
|
{
|
|
- for_each_drhd_unit ( drhd )
|
|
+ iommu = drhd->iommu;
|
|
+ /*
|
|
+ * If queued invalidation not enabled, use regiser based
|
|
+ * invalidation
|
|
+ */
|
|
+ if ( enable_qinval(iommu) != 0 )
|
|
{
|
|
- iommu = drhd->iommu;
|
|
- if ( enable_qinval(iommu) != 0 )
|
|
- {
|
|
- dprintk(XENLOG_INFO VTDPREFIX,
|
|
- "Failed to enable Queued Invalidation!\n");
|
|
- break;
|
|
- }
|
|
+ flush = iommu_get_flush(iommu);
|
|
+ flush->context = flush_context_reg;
|
|
+ flush->iotlb = flush_iotlb_reg;
|
|
}
|
|
}
|
|
|
|
@@ -1872,9 +1868,9 @@ static int init_vtd_hw(void)
|
|
for_each_drhd_unit ( drhd )
|
|
{
|
|
iommu = drhd->iommu;
|
|
- if ( enable_intremap(iommu) != 0 )
|
|
+ if ( enable_intremap(iommu, 0) != 0 )
|
|
{
|
|
- dprintk(XENLOG_INFO VTDPREFIX,
|
|
+ dprintk(XENLOG_WARNING VTDPREFIX,
|
|
"Failed to enable Interrupt Remapping!\n");
|
|
break;
|
|
}
|
|
Index: xen-4.0.0-testing/xen/drivers/passthrough/vtd/qinval.c
|
|
===================================================================
|
|
--- xen-4.0.0-testing.orig/xen/drivers/passthrough/vtd/qinval.c
|
|
+++ xen-4.0.0-testing/xen/drivers/passthrough/vtd/qinval.c
|
|
@@ -437,10 +437,16 @@ int enable_qinval(struct iommu *iommu)
|
|
u32 sts;
|
|
unsigned long flags;
|
|
|
|
+ if ( !ecap_queued_inval(iommu->ecap) || !iommu_qinval )
|
|
+ return -ENOENT;
|
|
+
|
|
qi_ctrl = iommu_qi_ctrl(iommu);
|
|
flush = iommu_get_flush(iommu);
|
|
|
|
- ASSERT(ecap_queued_inval(iommu->ecap) && iommu_qinval);
|
|
+ /* Return if already enabled by Xen */
|
|
+ sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
|
|
+ if ( (sts & DMA_GSTS_QIES) && qi_ctrl->qinval_maddr )
|
|
+ return 0;
|
|
|
|
if ( qi_ctrl->qinval_maddr == 0 )
|
|
{
|
|
@@ -488,14 +494,19 @@ void disable_qinval(struct iommu *iommu)
|
|
u32 sts;
|
|
unsigned long flags;
|
|
|
|
- ASSERT(ecap_queued_inval(iommu->ecap) && iommu_qinval);
|
|
+ if ( !ecap_queued_inval(iommu->ecap) )
|
|
+ return;
|
|
|
|
spin_lock_irqsave(&iommu->register_lock, flags);
|
|
sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
|
|
+ if ( !(sts & DMA_GSTS_QIES) )
|
|
+ goto out;
|
|
+
|
|
dmar_writel(iommu->reg, DMAR_GCMD_REG, sts & (~DMA_GCMD_QIE));
|
|
|
|
/* Make sure hardware complete it */
|
|
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
|
|
!(sts & DMA_GSTS_QIES), sts);
|
|
+out:
|
|
spin_unlock_irqrestore(&iommu->register_lock, flags);
|
|
}
|