23725-pci-add-device.patch 23762-iommu-fault-bm-off.patch 23763-pci-multi-seg-x2apic-vtd-no-crash.patch 23765-x86-irq-vector-leak.patch 23766-x86-msi-vf-bars.patch 23771-x86-ioapic-clear-pin.patch 23772-x86-trampoline.patch 23774-x86_64-EFI-EDD.patch 23776-x86-kexec-hpet-legacy-bcast-disable.patch 23781-pm-wide-ACPI-ids.patch 23782-x86-ioapic-clear-irr.patch 23783-ACPI-set-_PDC-bits.patch OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=140
44 lines
1.5 KiB
Diff
44 lines
1.5 KiB
Diff
# HG changeset patch
|
|
# User Tim Deegan <Tim.Deegan@citrix.com>
|
|
# Date 1311608493 -3600
|
|
# Node ID aa54b8175954bd6ffeb3bcf72e782e133896b388
|
|
# Parent 9dbbf1631193bb6df679f5eaaee192ef4ef91fd9
|
|
VT-d: always clean up dpci timers.
|
|
|
|
If a VM has all its PCI devices deassigned, need_iommu(d) becomes
|
|
false but it might still have DPCI EOI timers that were init_timer()d
|
|
but not yet kill_timer()d. That causes xen to crash later because the
|
|
linked list of inactive timers gets corrupted, e.g.:
|
|
|
|
(XEN) Xen call trace:
|
|
(XEN) [<ffff82c480126256>] set_timer+0x1c2/0x24f
|
|
(XEN) [<ffff82c48011fbf8>] schedule+0x129/0x5dd
|
|
(XEN) [<ffff82c480122c1e>] __do_softirq+0x7e/0x89
|
|
(XEN) [<ffff82c480122c9d>] do_softirq+0x26/0x28
|
|
(XEN) [<ffff82c480153c85>] idle_loop+0x5a/0x5c
|
|
(XEN)
|
|
(XEN)
|
|
(XEN) ****************************************
|
|
(XEN) Panic on CPU 0:
|
|
(XEN) Assertion 'entry->next->prev == entry' failed at
|
|
/local/scratch/tdeegan/xen-unstable.hg/xen/include:172
|
|
(XEN) ****************************************
|
|
|
|
The following patch makes sure that the domain destruction path always
|
|
clears up the DPCI state even if !needs_iommu(d).
|
|
|
|
Signed-off-by: Tim Deegan <Tim.Deegan@citrix.com>
|
|
|
|
--- a/xen/drivers/passthrough/pci.c
|
|
+++ b/xen/drivers/passthrough/pci.c
|
|
@@ -220,9 +220,6 @@ static void pci_clean_dpci_irqs(struct d
|
|
if ( !iommu_enabled )
|
|
return;
|
|
|
|
- if ( !need_iommu(d) )
|
|
- return;
|
|
-
|
|
spin_lock(&d->event_lock);
|
|
hvm_irq_dpci = domain_get_irq_dpci(d);
|
|
if ( hvm_irq_dpci != NULL )
|