xen/5583d9c5-x86-MSI-X-cleanup.patch
Charles Arnold f158f55e6a - Update to Xen 4.5.2
xen-4.5.2-testing-src.tar.bz2
- Drop the following
  xen-4.5.1-testing-src.tar.bz2
  552d0f49-x86-traps-identify-the-vcpu-in-context-when-dumping-regs.patch
  5576f178-kexec-add-more-pages-to-v1-environment.patch
  55780be1-x86-EFI-adjust-EFI_MEMORY_WP-handling-for-spec-version-2.5.patch
  558bfaa0-x86-traps-avoid-using-current-too-early.patch
  5592a116-nested-EPT-fix-the-handling-of-nested-EPT.patch
  559b9dd6-x86-p2m-ept-don-t-unmap-in-use-EPT-pagetable.patch
  559bc633-x86-cpupool-clear-proper-cpu_valid-bit-on-CPU-teardown.patch
  559bc64e-credit1-properly-deal-with-CPUs-not-in-any-pool.patch
  559bc87f-x86-hvmloader-avoid-data-corruption-with-xenstore-rw.patch
  559bdde5-pull-in-latest-linux-earlycpio.patch
  55a62eb0-xl-correct-handling-of-extra_config-in-main_cpupoolcreate.patch
  55a66a1e-make-rangeset_report_ranges-report-all-ranges.patch
  55a77e4f-dmar-device-scope-mem-leak-fix.patch
  55c1d83d-x86-gdt-Drop-write-only-xalloc-d-array.patch
  55c3232b-x86-mm-Make-hap-shadow-teardown-preemptible.patch
  55dc78e9-x86-amd_ucode-skip-updates-for-final-levels.patch
  55df2f76-IOMMU-skip-domains-without-page-tables-when-dumping.patch
  55e43fd8-x86-NUMA-fix-setup_node.patch
  55e43ff8-x86-NUMA-don-t-account-hotplug-regions.patch
  55e593f1-x86-NUMA-make-init_node_heap-respect-Xen-heap-limit.patch
  55f2e438-x86-hvm-fix-saved-pmtimer-and-hpet-values.patch
  55f9345b-x86-MSI-fail-if-no-hardware-support.patch
  5604f2e6-vt-d-fix-IM-bit-mask-and-unmask-of-FECTL_REG.patch
  560a4af9-x86-EPT-tighten-conditions-of-IOMMU-mapping-updates.patch
  560a7c36-x86-p2m-pt-delay-freeing-of-intermediate-page-tables.patch
  560a7c53-x86-p2m-pt-ignore-pt-share-flag-for-shadow-mode-guests.patch

OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=384
2015-11-04 20:30:31 +00:00

290 lines
10 KiB
Diff

References: bsc#907514 bsc#910258 bsc#918984 bsc#923967
# Commit 236e13ce60e1c0eb0535ad258e74a3789bc0d074
# Date 2015-06-19 10:58:45 +0200
# Author Jan Beulich <jbeulich@suse.com>
# Committer Jan Beulich <jbeulich@suse.com>
x86/MSI-X: cleanup
- __pci_enable_msix() now checks that an MSI-X capability was actually
found
- pass "pos" to msix_capability_init() as both callers already know it
(and hence there's no need to re-obtain it)
- call __pci_disable_msi{,x}() directly instead of via
pci_disable_msi() from __pci_enable_msi{x,}() state validation paths
- use msix_control_reg() instead of open coding it
- log message adjustments
- coding style corrections
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Index: xen-4.5.2-testing/xen/arch/x86/msi.c
===================================================================
--- xen-4.5.2-testing.orig/xen/arch/x86/msi.c
+++ xen-4.5.2-testing/xen/arch/x86/msi.c
@@ -35,6 +35,8 @@
static s8 __read_mostly use_msi = -1;
boolean_param("msi", use_msi);
+static void __pci_disable_msix(struct msi_desc *);
+
/* bitmap indicate which fixed map is free */
static DEFINE_SPINLOCK(msix_fixmap_lock);
static DECLARE_BITMAP(msix_fixmap_pages, FIX_MSIX_MAX_PAGES);
@@ -129,12 +131,14 @@ void msi_compose_msg(unsigned vector, co
unsigned dest;
memset(msg, 0, sizeof(*msg));
- if ( !cpumask_intersects(cpu_mask, &cpu_online_map) ) {
+ if ( !cpumask_intersects(cpu_mask, &cpu_online_map) )
+ {
dprintk(XENLOG_ERR,"%s, compose msi message error!!\n", __func__);
return;
}
- if ( vector ) {
+ if ( vector )
+ {
cpumask_t *mask = this_cpu(scratch_mask);
cpumask_and(mask, cpu_mask, &cpu_online_map);
@@ -195,8 +199,7 @@ static void read_msi_msg(struct msi_desc
}
case PCI_CAP_ID_MSIX:
{
- void __iomem *base;
- base = entry->mask_base;
+ void __iomem *base = entry->mask_base;
msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
@@ -257,8 +260,7 @@ static int write_msi_msg(struct msi_desc
}
case PCI_CAP_ID_MSIX:
{
- void __iomem *base;
- base = entry->mask_base;
+ void __iomem *base = entry->mask_base;
writel(msg->address_lo,
base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
@@ -281,7 +283,7 @@ void set_msi_affinity(struct irq_desc *d
struct msi_desc *msi_desc = desc->msi_desc;
dest = set_desc_affinity(desc, mask);
- if (dest == BAD_APICID || !msi_desc)
+ if ( dest == BAD_APICID || !msi_desc )
return;
ASSERT(spin_is_locked(&desc->lock));
@@ -332,11 +334,11 @@ static void msix_set_enable(struct pci_d
pos = pci_find_cap_offset(seg, bus, slot, func, PCI_CAP_ID_MSIX);
if ( pos )
{
- control = pci_conf_read16(seg, bus, slot, func, pos + PCI_MSIX_FLAGS);
+ control = pci_conf_read16(seg, bus, slot, func, msix_control_reg(pos));
control &= ~PCI_MSIX_FLAGS_ENABLE;
if ( enable )
control |= PCI_MSIX_FLAGS_ENABLE;
- pci_conf_write16(seg, bus, slot, func, pos + PCI_MSIX_FLAGS, control);
+ pci_conf_write16(seg, bus, slot, func, msix_control_reg(pos), control);
}
}
@@ -353,9 +355,11 @@ static void msi_set_mask_bit(struct irq_
ASSERT(spin_is_locked(&desc->lock));
BUG_ON(!entry || !entry->dev);
- switch (entry->msi_attrib.type) {
+ switch ( entry->msi_attrib.type )
+ {
case PCI_CAP_ID_MSI:
- if (entry->msi_attrib.maskbit) {
+ if ( entry->msi_attrib.maskbit )
+ {
u32 mask_bits;
u16 seg = entry->dev->seg;
u8 bus = entry->dev->bus;
@@ -703,13 +707,14 @@ static u64 read_pci_mem_bar(u16 seg, u8
* requested MSI-X entries with allocated irqs or non-zero for otherwise.
**/
static int msix_capability_init(struct pci_dev *dev,
+ unsigned int pos,
struct msi_info *msi,
struct msi_desc **desc,
unsigned int nr_entries)
{
struct arch_msix *msix = dev->msix;
struct msi_desc *entry = NULL;
- int pos, vf;
+ int vf;
u16 control;
u64 table_paddr;
u32 table_offset;
@@ -721,7 +726,6 @@ static int msix_capability_init(struct p
ASSERT(spin_is_locked(&pcidevs_lock));
- pos = pci_find_cap_offset(seg, bus, slot, func, PCI_CAP_ID_MSIX);
control = pci_conf_read16(seg, bus, slot, func, msix_control_reg(pos));
msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
@@ -886,10 +890,9 @@ static int __pci_enable_msi(struct msi_i
old_desc = find_msi_entry(pdev, msi->irq, PCI_CAP_ID_MSI);
if ( old_desc )
{
- dprintk(XENLOG_WARNING, "irq %d has already mapped to MSI on "
- "device %04x:%02x:%02x.%01x\n",
- msi->irq, msi->seg, msi->bus,
- PCI_SLOT(msi->devfn), PCI_FUNC(msi->devfn));
+ printk(XENLOG_WARNING "irq %d already mapped to MSI on %04x:%02x:%02x.%u\n",
+ msi->irq, msi->seg, msi->bus,
+ PCI_SLOT(msi->devfn), PCI_FUNC(msi->devfn));
*desc = old_desc;
return 0;
}
@@ -897,10 +900,10 @@ static int __pci_enable_msi(struct msi_i
old_desc = find_msi_entry(pdev, -1, PCI_CAP_ID_MSIX);
if ( old_desc )
{
- dprintk(XENLOG_WARNING, "MSI-X is already in use on "
- "device %04x:%02x:%02x.%01x\n", msi->seg, msi->bus,
- PCI_SLOT(msi->devfn), PCI_FUNC(msi->devfn));
- pci_disable_msi(old_desc);
+ printk(XENLOG_WARNING "MSI-X already in use on %04x:%02x:%02x.%u\n",
+ msi->seg, msi->bus,
+ PCI_SLOT(msi->devfn), PCI_FUNC(msi->devfn));
+ __pci_disable_msix(old_desc);
}
return msi_capability_init(pdev, msi->irq, desc, msi->entry_nr);
@@ -914,7 +917,6 @@ static void __pci_disable_msi(struct msi
msi_set_enable(dev, 0);
BUG_ON(list_empty(&dev->msi_list));
-
}
/**
@@ -934,7 +936,7 @@ static void __pci_disable_msi(struct msi
**/
static int __pci_enable_msix(struct msi_info *msi, struct msi_desc **desc)
{
- int status, pos, nr_entries;
+ int pos, nr_entries;
struct pci_dev *pdev;
u16 control;
u8 slot = PCI_SLOT(msi->devfn);
@@ -943,23 +945,22 @@ static int __pci_enable_msix(struct msi_
ASSERT(spin_is_locked(&pcidevs_lock));
pdev = pci_get_pdev(msi->seg, msi->bus, msi->devfn);
- if ( !pdev )
+ pos = pci_find_cap_offset(msi->seg, msi->bus, slot, func, PCI_CAP_ID_MSIX);
+ if ( !pdev || !pos )
return -ENODEV;
- pos = pci_find_cap_offset(msi->seg, msi->bus, slot, func, PCI_CAP_ID_MSIX);
control = pci_conf_read16(msi->seg, msi->bus, slot, func,
msix_control_reg(pos));
nr_entries = multi_msix_capable(control);
- if (msi->entry_nr >= nr_entries)
+ if ( msi->entry_nr >= nr_entries )
return -EINVAL;
old_desc = find_msi_entry(pdev, msi->irq, PCI_CAP_ID_MSIX);
if ( old_desc )
{
- dprintk(XENLOG_WARNING, "irq %d has already mapped to MSIX on "
- "device %04x:%02x:%02x.%01x\n",
- msi->irq, msi->seg, msi->bus,
- PCI_SLOT(msi->devfn), PCI_FUNC(msi->devfn));
+ printk(XENLOG_WARNING "irq %d already mapped to MSI-X on %04x:%02x:%02x.%u\n",
+ msi->irq, msi->seg, msi->bus,
+ PCI_SLOT(msi->devfn), PCI_FUNC(msi->devfn));
*desc = old_desc;
return 0;
}
@@ -967,15 +968,13 @@ static int __pci_enable_msix(struct msi_
old_desc = find_msi_entry(pdev, -1, PCI_CAP_ID_MSI);
if ( old_desc )
{
- dprintk(XENLOG_WARNING, "MSI is already in use on "
- "device %04x:%02x:%02x.%01x\n", msi->seg, msi->bus,
- PCI_SLOT(msi->devfn), PCI_FUNC(msi->devfn));
- pci_disable_msi(old_desc);
-
+ printk(XENLOG_WARNING "MSI already in use on %04x:%02x:%02x.%u\n",
+ msi->seg, msi->bus,
+ PCI_SLOT(msi->devfn), PCI_FUNC(msi->devfn));
+ __pci_disable_msi(old_desc);
}
- status = msix_capability_init(pdev, msi, desc, nr_entries);
- return status;
+ return msix_capability_init(pdev, pos, msi, desc, nr_entries);
}
static void _pci_cleanup_msix(struct arch_msix *msix)
@@ -993,19 +992,16 @@ static void _pci_cleanup_msix(struct arc
static void __pci_disable_msix(struct msi_desc *entry)
{
- struct pci_dev *dev;
- int pos;
- u16 control, seg;
- u8 bus, slot, func;
-
- dev = entry->dev;
- seg = dev->seg;
- bus = dev->bus;
- slot = PCI_SLOT(dev->devfn);
- func = PCI_FUNC(dev->devfn);
+ struct pci_dev *dev = entry->dev;
+ u16 seg = dev->seg;
+ u8 bus = dev->bus;
+ u8 slot = PCI_SLOT(dev->devfn);
+ u8 func = PCI_FUNC(dev->devfn);
+ unsigned int pos = pci_find_cap_offset(seg, bus, slot, func,
+ PCI_CAP_ID_MSIX);
+ u16 control = pci_conf_read16(seg, bus, slot, func,
+ msix_control_reg(entry->msi_attrib.pos));
- pos = pci_find_cap_offset(seg, bus, slot, func, PCI_CAP_ID_MSIX);
- control = pci_conf_read16(seg, bus, slot, func, msix_control_reg(pos));
msix_set_enable(dev, 0);
BUG_ON(list_empty(&dev->msi_list));
@@ -1047,7 +1043,7 @@ int pci_prepare_msix(u16 seg, u8 bus, u8
u16 control = pci_conf_read16(seg, bus, slot, func,
msix_control_reg(pos));
- rc = msix_capability_init(pdev, NULL, NULL,
+ rc = msix_capability_init(pdev, pos, NULL, NULL,
multi_msix_capable(control));
}
spin_unlock(&pcidevs_lock);
@@ -1066,8 +1062,8 @@ int pci_enable_msi(struct msi_info *msi,
if ( !use_msi )
return -EPERM;
- return msi->table_base ? __pci_enable_msix(msi, desc) :
- __pci_enable_msi(msi, desc);
+ return msi->table_base ? __pci_enable_msix(msi, desc) :
+ __pci_enable_msi(msi, desc);
}
/*
@@ -1117,7 +1113,9 @@ int pci_restore_msi_state(struct pci_dev
if ( !pdev )
return -EINVAL;
- ret = xsm_resource_setup_pci(XSM_PRIV, (pdev->seg << 16) | (pdev->bus << 8) | pdev->devfn);
+ ret = xsm_resource_setup_pci(XSM_PRIV,
+ (pdev->seg << 16) | (pdev->bus << 8) |
+ pdev->devfn);
if ( ret )
return ret;