- bsc#949046 - Increase %suse_version in SP1 to 1316
xen.spec - bsc#945167 - Running command ’ xl pci-assignable-add 03:10.1’ secondly show errors 55f7f9d2-libxl-slightly-refine-pci-assignable-add-remove-handling.patch - Upstream patches from Jan 55f2e438-x86-hvm-fix-saved-pmtimer-and-hpet-values.patch 55f9345b-x86-MSI-fail-if-no-hardware-support.patch 5604f239-x86-PV-properly-populate-descriptor-tables.patch 5604f2e6-vt-d-fix-IM-bit-mask-and-unmask-of-FECTL_REG.patch 560a4af9-x86-EPT-tighten-conditions-of-IOMMU-mapping-updates.patch 560a7c36-x86-p2m-pt-delay-freeing-of-intermediate-page-tables.patch 560a7c53-x86-p2m-pt-ignore-pt-share-flag-for-shadow-mode-guests.patch 560bd926-credit1-fix-tickling-when-it-happens-from-a-remote-pCPU.patch 560e6d34-x86-p2m-pt-tighten-conditions-of-IOMMU-mapping-updates.patch - bsc#941074 - VmError: Device 51728 (vbd) could not be connected. Hotplug scripts not working. hotplug-Linux-block-performance-fix.patch - bsc#947165 - VUL-0: CVE-2015-7311: xen: libxl fails to honour readonly flag on disks with qemu-xen (xsa-142) CVE-2015-7311-xsa142.patch OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=378
This commit is contained in:
parent
e932e0a863
commit
19d8f590f0
@ -113,7 +113,7 @@ Reviewed-by: Jan Beulich <jbeulich@suse.com>
|
||||
if ( idx != 0 )
|
||||
--- a/xen/arch/x86/mm.c
|
||||
+++ b/xen/arch/x86/mm.c
|
||||
@@ -4587,7 +4587,7 @@ int xenmem_add_to_physmap_one(
|
||||
@@ -4592,7 +4592,7 @@ int xenmem_add_to_physmap_one(
|
||||
mfn = virt_to_mfn(d->shared_info);
|
||||
break;
|
||||
case XENMAPSPACE_grant_table:
|
||||
@ -122,7 +122,7 @@ Reviewed-by: Jan Beulich <jbeulich@suse.com>
|
||||
|
||||
if ( d->grant_table->gt_version == 0 )
|
||||
d->grant_table->gt_version = 1;
|
||||
@@ -4609,7 +4609,7 @@ int xenmem_add_to_physmap_one(
|
||||
@@ -4614,7 +4614,7 @@ int xenmem_add_to_physmap_one(
|
||||
mfn = virt_to_mfn(d->grant_table->shared_raw[idx]);
|
||||
}
|
||||
|
||||
|
@ -104,7 +104,7 @@ Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
u32 mask_bits;
|
||||
u16 seg = entry->dev->seg;
|
||||
u8 bus = entry->dev->bus;
|
||||
@@ -701,13 +705,14 @@ static u64 read_pci_mem_bar(u16 seg, u8
|
||||
@@ -703,13 +707,14 @@ static u64 read_pci_mem_bar(u16 seg, u8
|
||||
* requested MSI-X entries with allocated irqs or non-zero for otherwise.
|
||||
**/
|
||||
static int msix_capability_init(struct pci_dev *dev,
|
||||
@ -120,7 +120,7 @@ Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
u16 control;
|
||||
u64 table_paddr;
|
||||
u32 table_offset;
|
||||
@@ -719,7 +724,6 @@ static int msix_capability_init(struct p
|
||||
@@ -721,7 +726,6 @@ static int msix_capability_init(struct p
|
||||
|
||||
ASSERT(spin_is_locked(&pcidevs_lock));
|
||||
|
||||
@ -128,7 +128,7 @@ Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
control = pci_conf_read16(seg, bus, slot, func, msix_control_reg(pos));
|
||||
msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
|
||||
|
||||
@@ -884,10 +888,9 @@ static int __pci_enable_msi(struct msi_i
|
||||
@@ -886,10 +890,9 @@ static int __pci_enable_msi(struct msi_i
|
||||
old_desc = find_msi_entry(pdev, msi->irq, PCI_CAP_ID_MSI);
|
||||
if ( old_desc )
|
||||
{
|
||||
@ -142,7 +142,7 @@ Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
*desc = old_desc;
|
||||
return 0;
|
||||
}
|
||||
@@ -895,10 +898,10 @@ static int __pci_enable_msi(struct msi_i
|
||||
@@ -897,10 +900,10 @@ static int __pci_enable_msi(struct msi_i
|
||||
old_desc = find_msi_entry(pdev, -1, PCI_CAP_ID_MSIX);
|
||||
if ( old_desc )
|
||||
{
|
||||
@ -157,7 +157,7 @@ Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
}
|
||||
|
||||
return msi_capability_init(pdev, msi->irq, desc, msi->entry_nr);
|
||||
@@ -912,7 +915,6 @@ static void __pci_disable_msi(struct msi
|
||||
@@ -914,7 +917,6 @@ static void __pci_disable_msi(struct msi
|
||||
msi_set_enable(dev, 0);
|
||||
|
||||
BUG_ON(list_empty(&dev->msi_list));
|
||||
@ -165,7 +165,7 @@ Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -932,7 +934,7 @@ static void __pci_disable_msi(struct msi
|
||||
@@ -934,7 +936,7 @@ static void __pci_disable_msi(struct msi
|
||||
**/
|
||||
static int __pci_enable_msix(struct msi_info *msi, struct msi_desc **desc)
|
||||
{
|
||||
@ -174,7 +174,7 @@ Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
struct pci_dev *pdev;
|
||||
u16 control;
|
||||
u8 slot = PCI_SLOT(msi->devfn);
|
||||
@@ -941,23 +943,22 @@ static int __pci_enable_msix(struct msi_
|
||||
@@ -943,23 +945,22 @@ static int __pci_enable_msix(struct msi_
|
||||
|
||||
ASSERT(spin_is_locked(&pcidevs_lock));
|
||||
pdev = pci_get_pdev(msi->seg, msi->bus, msi->devfn);
|
||||
@ -204,7 +204,7 @@ Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
*desc = old_desc;
|
||||
return 0;
|
||||
}
|
||||
@@ -965,15 +966,13 @@ static int __pci_enable_msix(struct msi_
|
||||
@@ -967,15 +968,13 @@ static int __pci_enable_msix(struct msi_
|
||||
old_desc = find_msi_entry(pdev, -1, PCI_CAP_ID_MSI);
|
||||
if ( old_desc )
|
||||
{
|
||||
@ -225,7 +225,7 @@ Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
}
|
||||
|
||||
static void _pci_cleanup_msix(struct arch_msix *msix)
|
||||
@@ -991,19 +990,16 @@ static void _pci_cleanup_msix(struct arc
|
||||
@@ -993,19 +992,16 @@ static void _pci_cleanup_msix(struct arc
|
||||
|
||||
static void __pci_disable_msix(struct msi_desc *entry)
|
||||
{
|
||||
@ -254,7 +254,7 @@ Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
msix_set_enable(dev, 0);
|
||||
|
||||
BUG_ON(list_empty(&dev->msi_list));
|
||||
@@ -1045,7 +1041,7 @@ int pci_prepare_msix(u16 seg, u8 bus, u8
|
||||
@@ -1047,7 +1043,7 @@ int pci_prepare_msix(u16 seg, u8 bus, u8
|
||||
u16 control = pci_conf_read16(seg, bus, slot, func,
|
||||
msix_control_reg(pos));
|
||||
|
||||
@ -263,7 +263,7 @@ Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
multi_msix_capable(control));
|
||||
}
|
||||
spin_unlock(&pcidevs_lock);
|
||||
@@ -1064,8 +1060,8 @@ int pci_enable_msi(struct msi_info *msi,
|
||||
@@ -1066,8 +1062,8 @@ int pci_enable_msi(struct msi_info *msi,
|
||||
if ( !use_msi )
|
||||
return -EPERM;
|
||||
|
||||
@ -274,7 +274,7 @@ Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1115,7 +1111,9 @@ int pci_restore_msi_state(struct pci_dev
|
||||
@@ -1117,7 +1113,9 @@ int pci_restore_msi_state(struct pci_dev
|
||||
if ( !pdev )
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -230,7 +230,7 @@ Acked-by: Ian Campbell <ian.campbell@citrix.com>
|
||||
.enable = unmask_msi_irq,
|
||||
.disable = mask_msi_irq,
|
||||
.ack = ack_maskable_msi_irq,
|
||||
@@ -591,7 +603,8 @@ static int msi_capability_init(struct pc
|
||||
@@ -593,7 +605,8 @@ static int msi_capability_init(struct pc
|
||||
entry[i].msi_attrib.is_64 = is_64bit_address(control);
|
||||
entry[i].msi_attrib.entry_nr = i;
|
||||
entry[i].msi_attrib.maskbit = is_mask_bit_support(control);
|
||||
@ -240,7 +240,7 @@ Acked-by: Ian Campbell <ian.campbell@citrix.com>
|
||||
entry[i].msi_attrib.pos = pos;
|
||||
if ( entry[i].msi_attrib.maskbit )
|
||||
entry[i].msi.mpos = mpos;
|
||||
@@ -817,7 +830,8 @@ static int msix_capability_init(struct p
|
||||
@@ -819,7 +832,8 @@ static int msix_capability_init(struct p
|
||||
entry->msi_attrib.is_64 = 1;
|
||||
entry->msi_attrib.entry_nr = msi->entry_nr;
|
||||
entry->msi_attrib.maskbit = 1;
|
||||
@ -250,7 +250,7 @@ Acked-by: Ian Campbell <ian.campbell@citrix.com>
|
||||
entry->msi_attrib.pos = pos;
|
||||
entry->irq = msi->irq;
|
||||
entry->dev = dev;
|
||||
@@ -1152,7 +1166,8 @@ int pci_restore_msi_state(struct pci_dev
|
||||
@@ -1154,7 +1168,8 @@ int pci_restore_msi_state(struct pci_dev
|
||||
|
||||
for ( i = 0; ; )
|
||||
{
|
||||
@ -260,7 +260,7 @@ Acked-by: Ian Campbell <ian.campbell@citrix.com>
|
||||
|
||||
if ( !--nr )
|
||||
break;
|
||||
@@ -1304,7 +1319,7 @@ static void dump_msi(unsigned char key)
|
||||
@@ -1306,7 +1321,7 @@ static void dump_msi(unsigned char key)
|
||||
else
|
||||
mask = '?';
|
||||
printk(" %-6s%4u vec=%02x%7s%6s%3sassert%5s%7s"
|
||||
@ -269,7 +269,7 @@ Acked-by: Ian Campbell <ian.campbell@citrix.com>
|
||||
type, irq,
|
||||
(data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT,
|
||||
data & MSI_DATA_DELIVERY_LOWPRI ? "lowest" : "fixed",
|
||||
@@ -1312,7 +1327,10 @@ static void dump_msi(unsigned char key)
|
||||
@@ -1314,7 +1329,10 @@ static void dump_msi(unsigned char key)
|
||||
data & MSI_DATA_LEVEL_ASSERT ? "" : "de",
|
||||
addr & MSI_ADDR_DESTMODE_LOGIC ? "log" : "phys",
|
||||
addr & MSI_ADDR_REDIRECTION_LOWPRI ? "lowest" : "cpu",
|
||||
@ -317,18 +317,18 @@ Acked-by: Ian Campbell <ian.campbell@citrix.com>
|
||||
static unsigned int iommu_msi_startup(struct irq_desc *desc)
|
||||
--- a/xen/drivers/passthrough/vtd/iommu.c
|
||||
+++ b/xen/drivers/passthrough/vtd/iommu.c
|
||||
@@ -996,7 +996,7 @@ static void dma_msi_unmask(struct irq_de
|
||||
spin_lock_irqsave(&iommu->register_lock, flags);
|
||||
dmar_writel(iommu->reg, DMAR_FECTL_REG, 0);
|
||||
@@ -999,7 +999,7 @@ static void dma_msi_unmask(struct irq_de
|
||||
sts &= ~DMA_FECTL_IM;
|
||||
dmar_writel(iommu->reg, DMAR_FECTL_REG, sts);
|
||||
spin_unlock_irqrestore(&iommu->register_lock, flags);
|
||||
- iommu->msi.msi_attrib.masked = 0;
|
||||
+ iommu->msi.msi_attrib.host_masked = 0;
|
||||
}
|
||||
|
||||
static void dma_msi_mask(struct irq_desc *desc)
|
||||
@@ -1008,7 +1008,7 @@ static void dma_msi_mask(struct irq_desc
|
||||
spin_lock_irqsave(&iommu->register_lock, flags);
|
||||
dmar_writel(iommu->reg, DMAR_FECTL_REG, DMA_FECTL_IM);
|
||||
@@ -1014,7 +1014,7 @@ static void dma_msi_mask(struct irq_desc
|
||||
sts |= DMA_FECTL_IM;
|
||||
dmar_writel(iommu->reg, DMAR_FECTL_REG, sts);
|
||||
spin_unlock_irqrestore(&iommu->register_lock, flags);
|
||||
- iommu->msi.msi_attrib.masked = 1;
|
||||
+ iommu->msi.msi_attrib.host_masked = 1;
|
||||
|
@ -14,7 +14,7 @@ Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
|
||||
--- a/xen/arch/x86/msi.c
|
||||
+++ b/xen/arch/x86/msi.c
|
||||
@@ -1108,6 +1108,12 @@ void pci_cleanup_msi(struct pci_dev *pde
|
||||
@@ -1110,6 +1110,12 @@ void pci_cleanup_msi(struct pci_dev *pde
|
||||
msi_free_irqs(pdev);
|
||||
}
|
||||
|
||||
|
@ -15,7 +15,7 @@ Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
|
||||
--- a/xen/arch/x86/msi.c
|
||||
+++ b/xen/arch/x86/msi.c
|
||||
@@ -843,6 +843,12 @@ static int msix_capability_init(struct p
|
||||
@@ -845,6 +845,12 @@ static int msix_capability_init(struct p
|
||||
|
||||
if ( !msix->used_entries )
|
||||
{
|
||||
@ -28,7 +28,7 @@ Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
if ( rangeset_add_range(mmio_ro_ranges, msix->table.first,
|
||||
msix->table.last) )
|
||||
WARN();
|
||||
@@ -1111,6 +1117,34 @@ void pci_cleanup_msi(struct pci_dev *pde
|
||||
@@ -1113,6 +1119,34 @@ void pci_cleanup_msi(struct pci_dev *pde
|
||||
int pci_msi_conf_write_intercept(struct pci_dev *pdev, unsigned int reg,
|
||||
unsigned int size, uint32_t *data)
|
||||
{
|
||||
|
@ -283,7 +283,7 @@ Backporting note (largely to myself):
|
||||
}
|
||||
|
||||
void ack_nonmaskable_msi_irq(struct irq_desc *desc)
|
||||
@@ -740,6 +809,9 @@ static int msix_capability_init(struct p
|
||||
@@ -742,6 +811,9 @@ static int msix_capability_init(struct p
|
||||
control = pci_conf_read16(seg, bus, slot, func, msix_control_reg(pos));
|
||||
msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
|
||||
|
||||
@ -293,7 +293,7 @@ Backporting note (largely to myself):
|
||||
if ( desc )
|
||||
{
|
||||
entry = alloc_msi_entry(1);
|
||||
@@ -879,7 +951,8 @@ static int msix_capability_init(struct p
|
||||
@@ -881,7 +953,8 @@ static int msix_capability_init(struct p
|
||||
++msix->used_entries;
|
||||
|
||||
/* Restore MSI-X enabled bits */
|
||||
@ -303,7 +303,7 @@ Backporting note (largely to myself):
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1024,8 +1097,16 @@ static void __pci_disable_msix(struct ms
|
||||
@@ -1026,8 +1099,16 @@ static void __pci_disable_msix(struct ms
|
||||
|
||||
BUG_ON(list_empty(&dev->msi_list));
|
||||
|
||||
@ -322,7 +322,7 @@ Backporting note (largely to myself):
|
||||
pci_conf_write16(seg, bus, slot, func, msix_control_reg(pos), control);
|
||||
|
||||
_pci_cleanup_msix(dev->msix);
|
||||
@@ -1199,15 +1280,24 @@ int pci_restore_msi_state(struct pci_dev
|
||||
@@ -1201,15 +1282,24 @@ int pci_restore_msi_state(struct pci_dev
|
||||
nr = entry->msi.nvec;
|
||||
}
|
||||
else if ( entry->msi_attrib.type == PCI_CAP_ID_MSIX )
|
||||
|
@ -171,7 +171,7 @@ Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
}
|
||||
|
||||
int __setup_msi_irq(struct irq_desc *desc, struct msi_desc *msidesc,
|
||||
@@ -803,20 +848,38 @@ static int msix_capability_init(struct p
|
||||
@@ -805,20 +850,38 @@ static int msix_capability_init(struct p
|
||||
u8 bus = dev->bus;
|
||||
u8 slot = PCI_SLOT(dev->devfn);
|
||||
u8 func = PCI_FUNC(dev->devfn);
|
||||
@ -211,7 +211,7 @@ Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
ASSERT(msi);
|
||||
}
|
||||
|
||||
@@ -847,6 +910,8 @@ static int msix_capability_init(struct p
|
||||
@@ -849,6 +912,8 @@ static int msix_capability_init(struct p
|
||||
{
|
||||
if ( !msi || !msi->table_base )
|
||||
{
|
||||
@ -220,7 +220,7 @@ Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
xfree(entry);
|
||||
return -ENXIO;
|
||||
}
|
||||
@@ -889,6 +954,8 @@ static int msix_capability_init(struct p
|
||||
@@ -891,6 +956,8 @@ static int msix_capability_init(struct p
|
||||
|
||||
if ( idx < 0 )
|
||||
{
|
||||
@ -229,7 +229,7 @@ Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
xfree(entry);
|
||||
return idx;
|
||||
}
|
||||
@@ -915,7 +982,7 @@ static int msix_capability_init(struct p
|
||||
@@ -917,7 +984,7 @@ static int msix_capability_init(struct p
|
||||
|
||||
if ( !msix->used_entries )
|
||||
{
|
||||
@ -238,7 +238,7 @@ Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
if ( !msix->guest_maskall )
|
||||
control &= ~PCI_MSIX_FLAGS_MASKALL;
|
||||
else
|
||||
@@ -951,8 +1018,8 @@ static int msix_capability_init(struct p
|
||||
@@ -953,8 +1020,8 @@ static int msix_capability_init(struct p
|
||||
++msix->used_entries;
|
||||
|
||||
/* Restore MSI-X enabled bits */
|
||||
@ -249,7 +249,7 @@ Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1092,8 +1159,15 @@ static void __pci_disable_msix(struct ms
|
||||
@@ -1094,8 +1161,15 @@ static void __pci_disable_msix(struct ms
|
||||
PCI_CAP_ID_MSIX);
|
||||
u16 control = pci_conf_read16(seg, bus, slot, func,
|
||||
msix_control_reg(entry->msi_attrib.pos));
|
||||
@ -266,7 +266,7 @@ Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
|
||||
BUG_ON(list_empty(&dev->msi_list));
|
||||
|
||||
@@ -1105,8 +1179,11 @@ static void __pci_disable_msix(struct ms
|
||||
@@ -1107,8 +1181,11 @@ static void __pci_disable_msix(struct ms
|
||||
"cannot disable IRQ %d: masking MSI-X on %04x:%02x:%02x.%u\n",
|
||||
entry->irq, dev->seg, dev->bus,
|
||||
PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
|
||||
@ -279,7 +279,7 @@ Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
pci_conf_write16(seg, bus, slot, func, msix_control_reg(pos), control);
|
||||
|
||||
_pci_cleanup_msix(dev->msix);
|
||||
@@ -1255,6 +1332,8 @@ int pci_restore_msi_state(struct pci_dev
|
||||
@@ -1257,6 +1334,8 @@ int pci_restore_msi_state(struct pci_dev
|
||||
list_for_each_entry_safe( entry, tmp, &pdev->msi_list, list )
|
||||
{
|
||||
unsigned int i = 0, nr = 1;
|
||||
@ -288,7 +288,7 @@ Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
|
||||
irq = entry->irq;
|
||||
desc = &irq_desc[irq];
|
||||
@@ -1281,10 +1360,18 @@ int pci_restore_msi_state(struct pci_dev
|
||||
@@ -1283,10 +1362,18 @@ int pci_restore_msi_state(struct pci_dev
|
||||
}
|
||||
else if ( entry->msi_attrib.type == PCI_CAP_ID_MSIX )
|
||||
{
|
||||
@ -308,7 +308,7 @@ Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
return -ENXIO;
|
||||
}
|
||||
}
|
||||
@@ -1314,11 +1401,9 @@ int pci_restore_msi_state(struct pci_dev
|
||||
@@ -1316,11 +1403,9 @@ int pci_restore_msi_state(struct pci_dev
|
||||
if ( entry->msi_attrib.type == PCI_CAP_ID_MSI )
|
||||
{
|
||||
unsigned int cpos = msi_control_reg(entry->msi_attrib.pos);
|
||||
@ -322,7 +322,7 @@ Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
multi_msi_enable(control, entry->msi.nvec);
|
||||
pci_conf_write16(pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
|
||||
PCI_FUNC(pdev->devfn), cpos, control);
|
||||
@@ -1326,7 +1411,9 @@ int pci_restore_msi_state(struct pci_dev
|
||||
@@ -1328,7 +1413,9 @@ int pci_restore_msi_state(struct pci_dev
|
||||
msi_set_enable(pdev, 1);
|
||||
}
|
||||
else if ( entry->msi_attrib.type == PCI_CAP_ID_MSIX )
|
||||
|
@ -15,7 +15,7 @@ Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
|
||||
--- a/xen/arch/x86/msi.c
|
||||
+++ b/xen/arch/x86/msi.c
|
||||
@@ -1303,6 +1303,37 @@ int pci_msi_conf_write_intercept(struct
|
||||
@@ -1305,6 +1305,37 @@ int pci_msi_conf_write_intercept(struct
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -23,7 +23,6 @@ were found (and are being corrected here at once):
|
||||
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
||||
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
Acked-by: Ian Campbell <ian.campbell@citrix.com>
|
||||
Release-acked-by: Wei Liu <wei.liu2@citrix.com>
|
||||
|
||||
# Commit 0a7167d9b20cdc48e6ea320fbbb920b3267c9757
|
||||
# Date 2015-09-04 14:58:07 +0100
|
||||
|
68
55f2e438-x86-hvm-fix-saved-pmtimer-and-hpet-values.patch
Normal file
68
55f2e438-x86-hvm-fix-saved-pmtimer-and-hpet-values.patch
Normal file
@ -0,0 +1,68 @@
|
||||
# Commit 244582a01dcb49fa30083725964a066937cc94f2
|
||||
# Date 2015-09-11 16:24:56 +0200
|
||||
# Author Kouya Shimura <kouya@jp.fujitsu.com>
|
||||
# Committer Jan Beulich <jbeulich@suse.com>
|
||||
x86/hvm: fix saved pmtimer and hpet values
|
||||
|
||||
The ACPI PM timer is sometimes broken on live migration.
|
||||
Since vcpu->arch.hvm_vcpu.guest_time is always zero in other than
|
||||
"delay for missed ticks mode". Even in "delay for missed ticks mode",
|
||||
vcpu's guest_time field is not valid (i.e. zero) when
|
||||
the state of vcpu is "blocked". (see pt_save_timer function)
|
||||
|
||||
The original author (Tim Deegan) of pmtimer_save() must have intended
|
||||
that it saves the last scheduled time of the vcpu. Unfortunately it was
|
||||
already implied this bug. FYI, there is no other timer mode than
|
||||
"delay for missed ticks mode" then.
|
||||
|
||||
For consistency with HPET, pmtimer_save() should refer hvm_get_guest_time()
|
||||
to update the counter as well as hpet_save() does.
|
||||
|
||||
Without this patch, the clock of windows server 2012R2 without HPET
|
||||
might leap forward several minutes on live migration.
|
||||
|
||||
Signed-off-by: Kouya Shimura <kouya@jp.fujitsu.com>
|
||||
|
||||
Retain use of ->arch.hvm_vcpu.guest_time when non-zero. Do the inverse
|
||||
adjustment for vHPET.
|
||||
|
||||
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
||||
Reviewed-by: Tim Deegan <tim@xen.org>
|
||||
Reviewed-by: Kouya Shimura <kouya@jp.fujitsu.com>
|
||||
|
||||
--- a/xen/arch/x86/hvm/hpet.c
|
||||
+++ b/xen/arch/x86/hvm/hpet.c
|
||||
@@ -506,11 +506,13 @@ const struct hvm_mmio_handler hpet_mmio_
|
||||
static int hpet_save(struct domain *d, hvm_domain_context_t *h)
|
||||
{
|
||||
HPETState *hp = domain_vhpet(d);
|
||||
+ struct vcpu *v = pt_global_vcpu_target(d);
|
||||
int rc;
|
||||
uint64_t guest_time;
|
||||
|
||||
write_lock(&hp->lock);
|
||||
- guest_time = guest_time_hpet(hp);
|
||||
+ guest_time = (v->arch.hvm_vcpu.guest_time ?: hvm_get_guest_time(v)) /
|
||||
+ STIME_PER_HPET_TICK;
|
||||
|
||||
/* Write the proper value into the main counter */
|
||||
if ( hpet_enabled(hp) )
|
||||
--- a/xen/arch/x86/hvm/pmtimer.c
|
||||
+++ b/xen/arch/x86/hvm/pmtimer.c
|
||||
@@ -250,10 +250,12 @@ static int pmtimer_save(struct domain *d
|
||||
|
||||
spin_lock(&s->lock);
|
||||
|
||||
- /* Update the counter to the guest's current time. We always save
|
||||
- * with the domain paused, so the saved time should be after the
|
||||
- * last_gtime, but just in case, make sure we only go forwards */
|
||||
- x = ((s->vcpu->arch.hvm_vcpu.guest_time - s->last_gtime) * s->scale) >> 32;
|
||||
+ /*
|
||||
+ * Update the counter to the guest's current time. Make sure it only
|
||||
+ * goes forwards.
|
||||
+ */
|
||||
+ x = (((s->vcpu->arch.hvm_vcpu.guest_time ?: hvm_get_guest_time(s->vcpu)) -
|
||||
+ s->last_gtime) * s->scale) >> 32;
|
||||
if ( x < 1UL<<31 )
|
||||
s->pm.tmr_val += x;
|
||||
if ( (s->pm.tmr_val & TMR_VAL_MSB) != msb )
|
@ -0,0 +1,106 @@
|
||||
References: bsc#945167
|
||||
|
||||
# Commit 6e1e3480c3878bac5d244925974a6852c47c809b
|
||||
# Date 2015-09-15 11:58:26 +0100
|
||||
# Author Jan Beulich <JBeulich@suse.com>
|
||||
# Committer Ian Campbell <ian.campbell@citrix.com>
|
||||
libxl: slightly refine pci-assignable-{add, remove} handling
|
||||
|
||||
While it appears to be intentional for "xl pci-assignable-remove" to
|
||||
not re-bind the original driver by default (requires the -r option),
|
||||
permanently losing the information which driver was originally used
|
||||
seems bad. Make "add; remove; add; remove -r" re-bind the original
|
||||
driver by allowing "remove" to delete the information only upon
|
||||
successful re-bind.
|
||||
|
||||
In the course of this I also noticed that binding information is lost
|
||||
when upon first "add" pciback isn't loaded yet, due to its presence not
|
||||
being checked for early enough. Adjust pciback_dev_is_assigned()
|
||||
accordingly, and properly distinguish "yes" and "error" returns in the
|
||||
"add" case (removing a redundant error message from the "remove" path
|
||||
for consistency).
|
||||
|
||||
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
||||
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
|
||||
Acked-by: Ian Campbell <ian.campbell@citrix.com>
|
||||
|
||||
--- a/tools/libxl/libxl_pci.c
|
||||
+++ b/tools/libxl/libxl_pci.c
|
||||
@@ -543,6 +543,17 @@ static int pciback_dev_is_assigned(libxl
|
||||
int rc;
|
||||
struct stat st;
|
||||
|
||||
+ if ( access(SYSFS_PCIBACK_DRIVER, F_OK) < 0 ) {
|
||||
+ if ( errno == ENOENT ) {
|
||||
+ LIBXL__LOG(ctx, LIBXL__LOG_ERROR,
|
||||
+ "Looks like pciback driver is not loaded");
|
||||
+ } else {
|
||||
+ LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR,
|
||||
+ "Can't access "SYSFS_PCIBACK_DRIVER);
|
||||
+ }
|
||||
+ return -1;
|
||||
+ }
|
||||
+
|
||||
spath = libxl__sprintf(gc, SYSFS_PCIBACK_DRIVER"/"PCI_BDF,
|
||||
pcidev->domain, pcidev->bus,
|
||||
pcidev->dev, pcidev->func);
|
||||
@@ -658,6 +669,7 @@ static int libxl__device_pci_assignable_
|
||||
libxl_ctx *ctx = libxl__gc_owner(gc);
|
||||
unsigned dom, bus, dev, func;
|
||||
char *spath, *driver_path = NULL;
|
||||
+ int rc;
|
||||
struct stat st;
|
||||
|
||||
/* Local copy for convenience */
|
||||
@@ -674,7 +686,11 @@ static int libxl__device_pci_assignable_
|
||||
}
|
||||
|
||||
/* Check to see if it's already assigned to pciback */
|
||||
- if ( pciback_dev_is_assigned(gc, pcidev) ) {
|
||||
+ rc = pciback_dev_is_assigned(gc, pcidev);
|
||||
+ if ( rc < 0 ) {
|
||||
+ return ERROR_FAIL;
|
||||
+ }
|
||||
+ if ( rc ) {
|
||||
LIBXL__LOG(ctx, LIBXL__LOG_WARNING, PCI_BDF" already assigned to pciback",
|
||||
dom, bus, dev, func);
|
||||
return 0;
|
||||
@@ -692,11 +708,18 @@ static int libxl__device_pci_assignable_
|
||||
if ( rebind ) {
|
||||
if ( driver_path ) {
|
||||
pci_assignable_driver_path_write(gc, pcidev, driver_path);
|
||||
+ } else if ( (driver_path =
|
||||
+ pci_assignable_driver_path_read(gc, pcidev)) != NULL ) {
|
||||
+ LIBXL__LOG(ctx, LIBXL__LOG_INFO,
|
||||
+ PCI_BDF" not bound to a driver, will be rebound to %s",
|
||||
+ dom, bus, dev, func, driver_path);
|
||||
} else {
|
||||
LIBXL__LOG(ctx, LIBXL__LOG_WARNING,
|
||||
PCI_BDF" not bound to a driver, will not be rebound.",
|
||||
dom, bus, dev, func);
|
||||
}
|
||||
+ } else {
|
||||
+ pci_assignable_driver_path_remove(gc, pcidev);
|
||||
}
|
||||
|
||||
if ( pciback_dev_assign(gc, pcidev) ) {
|
||||
@@ -717,7 +740,6 @@ static int libxl__device_pci_assignable_
|
||||
|
||||
/* Unbind from pciback */
|
||||
if ( (rc=pciback_dev_is_assigned(gc, pcidev)) < 0 ) {
|
||||
- LIBXL__LOG(ctx, LIBXL__LOG_ERROR, "Checking if pciback was assigned");
|
||||
return ERROR_FAIL;
|
||||
} else if ( rc ) {
|
||||
pciback_dev_unassign(gc, pcidev);
|
||||
@@ -741,9 +763,9 @@ static int libxl__device_pci_assignable_
|
||||
"Couldn't bind device to %s", driver_path);
|
||||
return -1;
|
||||
}
|
||||
- }
|
||||
|
||||
- pci_assignable_driver_path_remove(gc, pcidev);
|
||||
+ pci_assignable_driver_path_remove(gc, pcidev);
|
||||
+ }
|
||||
} else {
|
||||
if ( rebind ) {
|
||||
LIBXL__LOG(ctx, LIBXL__LOG_WARNING,
|
23
55f9345b-x86-MSI-fail-if-no-hardware-support.patch
Normal file
23
55f9345b-x86-MSI-fail-if-no-hardware-support.patch
Normal file
@ -0,0 +1,23 @@
|
||||
# Commit c7d5d5d8ea1ecbd6ef8b47dace4dec825f0f6e48
|
||||
# Date 2015-09-16 11:20:27 +0200
|
||||
# Author Jan Beulich <jbeulich@suse.com>
|
||||
# Committer Jan Beulich <jbeulich@suse.com>
|
||||
x86/MSI: fail if no hardware support
|
||||
|
||||
This is to guard against buggy callers (luckily Dom0 only) invoking
|
||||
the respective hypercall for a device not being MSI-capable.
|
||||
|
||||
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
||||
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
|
||||
--- a/xen/arch/x86/msi.c
|
||||
+++ b/xen/arch/x86/msi.c
|
||||
@@ -566,6 +566,8 @@ static int msi_capability_init(struct pc
|
||||
|
||||
ASSERT(spin_is_locked(&pcidevs_lock));
|
||||
pos = pci_find_cap_offset(seg, bus, slot, func, PCI_CAP_ID_MSI);
|
||||
+ if ( !pos )
|
||||
+ return -ENODEV;
|
||||
control = pci_conf_read16(seg, bus, slot, func, msi_control_reg(pos));
|
||||
maxvec = multi_msi_capable(control);
|
||||
if ( nvec > maxvec )
|
83
5604f239-x86-PV-properly-populate-descriptor-tables.patch
Normal file
83
5604f239-x86-PV-properly-populate-descriptor-tables.patch
Normal file
@ -0,0 +1,83 @@
|
||||
# Commit cf6d39f81992c29a637c603dbabf1e21a0ea563f
|
||||
# Date 2015-09-25 09:05:29 +0200
|
||||
# Author Jan Beulich <jbeulich@suse.com>
|
||||
# Committer Jan Beulich <jbeulich@suse.com>
|
||||
x86/PV: properly populate descriptor tables
|
||||
|
||||
Us extending the GDT limit past the Xen descriptors so far meant that
|
||||
guests (including user mode programs) accessing any descriptor table
|
||||
slot above the original OS'es limit but below the first Xen descriptor
|
||||
caused a #PF, converted to a #GP in our #PF handler. Which is quite
|
||||
different from the native behavior, where some of such accesses (LAR
|
||||
and LSL) don't fault. Mimic that behavior by mapping a blank page into
|
||||
unused slots.
|
||||
|
||||
While not strictly required, treat the LDT the same for consistency.
|
||||
|
||||
Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
||||
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
|
||||
--- a/xen/arch/x86/mm.c
|
||||
+++ b/xen/arch/x86/mm.c
|
||||
@@ -505,12 +505,13 @@ void update_cr3(struct vcpu *v)
|
||||
make_cr3(v, cr3_mfn);
|
||||
}
|
||||
|
||||
+static const char __section(".bss.page_aligned") zero_page[PAGE_SIZE];
|
||||
|
||||
static void invalidate_shadow_ldt(struct vcpu *v, int flush)
|
||||
{
|
||||
l1_pgentry_t *pl1e;
|
||||
- int i;
|
||||
- unsigned long pfn;
|
||||
+ unsigned int i;
|
||||
+ unsigned long pfn, zero_pfn = PFN_DOWN(__pa(zero_page));
|
||||
struct page_info *page;
|
||||
|
||||
BUG_ON(unlikely(in_irq()));
|
||||
@@ -526,8 +527,10 @@ static void invalidate_shadow_ldt(struct
|
||||
for ( i = 16; i < 32; i++ )
|
||||
{
|
||||
pfn = l1e_get_pfn(pl1e[i]);
|
||||
- if ( pfn == 0 ) continue;
|
||||
- l1e_write(&pl1e[i], l1e_empty());
|
||||
+ if ( !(l1e_get_flags(pl1e[i]) & _PAGE_PRESENT) || pfn == zero_pfn )
|
||||
+ continue;
|
||||
+ l1e_write(&pl1e[i],
|
||||
+ l1e_from_pfn(zero_pfn, __PAGE_HYPERVISOR & ~_PAGE_RW));
|
||||
page = mfn_to_page(pfn);
|
||||
ASSERT_PAGE_IS_TYPE(page, PGT_seg_desc_page);
|
||||
ASSERT_PAGE_IS_DOMAIN(page, v->domain);
|
||||
@@ -4360,16 +4363,18 @@ long do_update_va_mapping_otherdomain(un
|
||||
void destroy_gdt(struct vcpu *v)
|
||||
{
|
||||
l1_pgentry_t *pl1e;
|
||||
- int i;
|
||||
- unsigned long pfn;
|
||||
+ unsigned int i;
|
||||
+ unsigned long pfn, zero_pfn = PFN_DOWN(__pa(zero_page));
|
||||
|
||||
v->arch.pv_vcpu.gdt_ents = 0;
|
||||
pl1e = gdt_ldt_ptes(v->domain, v);
|
||||
for ( i = 0; i < FIRST_RESERVED_GDT_PAGE; i++ )
|
||||
{
|
||||
- if ( (pfn = l1e_get_pfn(pl1e[i])) != 0 )
|
||||
+ pfn = l1e_get_pfn(pl1e[i]);
|
||||
+ if ( (l1e_get_flags(pl1e[i]) & _PAGE_PRESENT) && pfn != zero_pfn )
|
||||
put_page_and_type(mfn_to_page(pfn));
|
||||
- l1e_write(&pl1e[i], l1e_empty());
|
||||
+ l1e_write(&pl1e[i],
|
||||
+ l1e_from_pfn(zero_pfn, __PAGE_HYPERVISOR & ~_PAGE_RW));
|
||||
v->arch.pv_vcpu.gdt_frames[i] = 0;
|
||||
}
|
||||
}
|
||||
@@ -4382,7 +4387,7 @@ long set_gdt(struct vcpu *v,
|
||||
struct domain *d = v->domain;
|
||||
l1_pgentry_t *pl1e;
|
||||
/* NB. There are 512 8-byte entries per GDT page. */
|
||||
- int i, nr_pages = (entries + 511) / 512;
|
||||
+ unsigned int i, nr_pages = (entries + 511) / 512;
|
||||
|
||||
if ( entries > FIRST_RESERVED_GDT_ENTRY )
|
||||
return -EINVAL;
|
77
5604f2e6-vt-d-fix-IM-bit-mask-and-unmask-of-FECTL_REG.patch
Normal file
77
5604f2e6-vt-d-fix-IM-bit-mask-and-unmask-of-FECTL_REG.patch
Normal file
@ -0,0 +1,77 @@
|
||||
# Commit 86f3ff9fc4cc3cb69b96c1de74bcc51f738fe2b9
|
||||
# Date 2015-09-25 09:08:22 +0200
|
||||
# Author Quan Xu <quan.xu@intel.com>
|
||||
# Committer Jan Beulich <jbeulich@suse.com>
|
||||
vt-d: fix IM bit mask and unmask of Fault Event Control Register
|
||||
|
||||
Bit 0:29 in Fault Event Control Register are 'Reserved and Preserved',
|
||||
software cannot write 0 to it unconditionally. Software must preserve
|
||||
the value read for writes.
|
||||
|
||||
Signed-off-by: Quan Xu <quan.xu@intel.com>
|
||||
Acked-by: Yang Zhang <yang.z.zhang@intel.com>
|
||||
|
||||
# Commit 26b300bd727ef00a8f60329212a83c3b027a48f7
|
||||
# Date 2015-09-25 18:03:04 +0200
|
||||
# Author Quan Xu <quan.xu@intel.com>
|
||||
# Committer Jan Beulich <jbeulich@suse.com>
|
||||
vt-d: fix IM bit unmask of Fault Event Control Register in init_vtd_hw()
|
||||
|
||||
Bit 0:29 in Fault Event Control Register are 'Reserved and Preserved',
|
||||
software cannot write 0 to it unconditionally. Software must preserve
|
||||
the value read for writes.
|
||||
|
||||
Suggested-by: Jan Beulich <jbeulich@suse.com>
|
||||
Signed-off-by: Quan Xu <quan.xu@intel.com>
|
||||
|
||||
--- a/xen/drivers/passthrough/vtd/iommu.c
|
||||
+++ b/xen/drivers/passthrough/vtd/iommu.c
|
||||
@@ -991,10 +991,13 @@ static void dma_msi_unmask(struct irq_de
|
||||
{
|
||||
struct iommu *iommu = desc->action->dev_id;
|
||||
unsigned long flags;
|
||||
+ u32 sts;
|
||||
|
||||
/* unmask it */
|
||||
spin_lock_irqsave(&iommu->register_lock, flags);
|
||||
- dmar_writel(iommu->reg, DMAR_FECTL_REG, 0);
|
||||
+ sts = dmar_readl(iommu->reg, DMAR_FECTL_REG);
|
||||
+ sts &= ~DMA_FECTL_IM;
|
||||
+ dmar_writel(iommu->reg, DMAR_FECTL_REG, sts);
|
||||
spin_unlock_irqrestore(&iommu->register_lock, flags);
|
||||
iommu->msi.msi_attrib.masked = 0;
|
||||
}
|
||||
@@ -1003,10 +1006,13 @@ static void dma_msi_mask(struct irq_desc
|
||||
{
|
||||
unsigned long flags;
|
||||
struct iommu *iommu = desc->action->dev_id;
|
||||
+ u32 sts;
|
||||
|
||||
/* mask it */
|
||||
spin_lock_irqsave(&iommu->register_lock, flags);
|
||||
- dmar_writel(iommu->reg, DMAR_FECTL_REG, DMA_FECTL_IM);
|
||||
+ sts = dmar_readl(iommu->reg, DMAR_FECTL_REG);
|
||||
+ sts |= DMA_FECTL_IM;
|
||||
+ dmar_writel(iommu->reg, DMAR_FECTL_REG, sts);
|
||||
spin_unlock_irqrestore(&iommu->register_lock, flags);
|
||||
iommu->msi.msi_attrib.masked = 1;
|
||||
}
|
||||
@@ -2002,6 +2008,7 @@ static int init_vtd_hw(void)
|
||||
struct iommu_flush *flush = NULL;
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
+ u32 sts;
|
||||
|
||||
/*
|
||||
* Basic VT-d HW init: set VT-d interrupt, clear VT-d faults.
|
||||
@@ -2015,7 +2022,9 @@ static int init_vtd_hw(void)
|
||||
clear_fault_bits(iommu);
|
||||
|
||||
spin_lock_irqsave(&iommu->register_lock, flags);
|
||||
- dmar_writel(iommu->reg, DMAR_FECTL_REG, 0);
|
||||
+ sts = dmar_readl(iommu->reg, DMAR_FECTL_REG);
|
||||
+ sts &= ~DMA_FECTL_IM;
|
||||
+ dmar_writel(iommu->reg, DMAR_FECTL_REG, sts);
|
||||
spin_unlock_irqrestore(&iommu->register_lock, flags);
|
||||
}
|
||||
|
@ -0,0 +1,48 @@
|
||||
# Commit 6c0e4ad60850032c9bbd5d18b8446421c97e08e4
|
||||
# Date 2015-09-29 10:25:29 +0200
|
||||
# Author Jan Beulich <jbeulich@suse.com>
|
||||
# Committer Jan Beulich <jbeulich@suse.com>
|
||||
x86/EPT: tighten conditions of IOMMU mapping updates
|
||||
|
||||
Permission changes should also result in updates or TLB flushes.
|
||||
|
||||
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
||||
Acked-by: Kevin Tian <kevin.tian@intel.com>
|
||||
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
|
||||
|
||||
--- a/xen/arch/x86/mm/p2m-ept.c
|
||||
+++ b/xen/arch/x86/mm/p2m-ept.c
|
||||
@@ -619,6 +619,7 @@ ept_set_entry(struct p2m_domain *p2m, un
|
||||
uint8_t ipat = 0;
|
||||
int need_modify_vtd_table = 1;
|
||||
int vtd_pte_present = 0;
|
||||
+ unsigned int iommu_flags = p2m_get_iommu_flags(p2mt);
|
||||
enum { sync_off, sync_on, sync_check } needs_sync = sync_check;
|
||||
ept_entry_t old_entry = { .epte = 0 };
|
||||
ept_entry_t new_entry = { .epte = 0 };
|
||||
@@ -749,8 +750,9 @@ ept_set_entry(struct p2m_domain *p2m, un
|
||||
new_entry.mfn = mfn_x(mfn);
|
||||
|
||||
/* Safe to read-then-write because we hold the p2m lock */
|
||||
- if ( ept_entry->mfn == new_entry.mfn )
|
||||
- need_modify_vtd_table = 0;
|
||||
+ if ( ept_entry->mfn == new_entry.mfn &&
|
||||
+ p2m_get_iommu_flags(ept_entry->sa_p2mt) == iommu_flags )
|
||||
+ need_modify_vtd_table = 0;
|
||||
|
||||
ept_p2m_type_to_flags(&new_entry, p2mt, p2ma);
|
||||
}
|
||||
@@ -775,11 +777,9 @@ out:
|
||||
iommu_pte_flush(d, gfn, &ept_entry->epte, order, vtd_pte_present);
|
||||
else
|
||||
{
|
||||
- unsigned int flags = p2m_get_iommu_flags(p2mt);
|
||||
-
|
||||
- if ( flags != 0 )
|
||||
+ if ( iommu_flags )
|
||||
for ( i = 0; i < (1 << order); i++ )
|
||||
- iommu_map_page(d, gfn + i, mfn_x(mfn) + i, flags);
|
||||
+ iommu_map_page(d, gfn + i, mfn_x(mfn) + i, iommu_flags);
|
||||
else
|
||||
for ( i = 0; i < (1 << order); i++ )
|
||||
iommu_unmap_page(d, gfn + i);
|
@ -0,0 +1,97 @@
|
||||
# Commit 960265fbd878cdc9841473b755e4ccc9eb1942d2
|
||||
# Date 2015-09-29 13:55:34 +0200
|
||||
# Author Jan Beulich <jbeulich@suse.com>
|
||||
# Committer Jan Beulich <jbeulich@suse.com>
|
||||
x86/p2m-pt: delay freeing of intermediate page tables
|
||||
|
||||
Old intermediate page tables must be freed only after IOMMU side
|
||||
updates/flushes have got carried out.
|
||||
|
||||
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
||||
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
|
||||
|
||||
--- a/xen/arch/x86/mm/p2m-pt.c
|
||||
+++ b/xen/arch/x86/mm/p2m-pt.c
|
||||
@@ -486,8 +486,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
|
||||
/* XXX -- this might be able to be faster iff current->domain == d */
|
||||
void *table;
|
||||
unsigned long i, gfn_remainder = gfn;
|
||||
- l1_pgentry_t *p2m_entry;
|
||||
- l1_pgentry_t entry_content;
|
||||
+ l1_pgentry_t *p2m_entry, entry_content;
|
||||
+ /* Intermediate table to free if we're replacing it with a superpage. */
|
||||
+ l1_pgentry_t intermediate_entry = l1e_empty();
|
||||
l2_pgentry_t l2e_content;
|
||||
l3_pgentry_t l3e_content;
|
||||
int rc;
|
||||
@@ -535,7 +536,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
|
||||
*/
|
||||
if ( page_order == PAGE_ORDER_1G )
|
||||
{
|
||||
- l1_pgentry_t old_entry = l1e_empty();
|
||||
p2m_entry = p2m_find_entry(table, &gfn_remainder, gfn,
|
||||
L3_PAGETABLE_SHIFT - PAGE_SHIFT,
|
||||
L3_PAGETABLE_ENTRIES);
|
||||
@@ -545,7 +545,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
|
||||
{
|
||||
/* We're replacing a non-SP page with a superpage. Make sure to
|
||||
* handle freeing the table properly. */
|
||||
- old_entry = *p2m_entry;
|
||||
+ intermediate_entry = *p2m_entry;
|
||||
}
|
||||
|
||||
ASSERT(!mfn_valid(mfn) || p2mt != p2m_mmio_direct);
|
||||
@@ -563,10 +563,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
|
||||
|
||||
p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 3);
|
||||
/* NB: paging_write_p2m_entry() handles tlb flushes properly */
|
||||
-
|
||||
- /* Free old intermediate tables if necessary */
|
||||
- if ( l1e_get_flags(old_entry) & _PAGE_PRESENT )
|
||||
- p2m_free_entry(p2m, &old_entry, page_order);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -607,7 +603,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
|
||||
}
|
||||
else if ( page_order == PAGE_ORDER_2M )
|
||||
{
|
||||
- l1_pgentry_t old_entry = l1e_empty();
|
||||
p2m_entry = p2m_find_entry(table, &gfn_remainder, gfn,
|
||||
L2_PAGETABLE_SHIFT - PAGE_SHIFT,
|
||||
L2_PAGETABLE_ENTRIES);
|
||||
@@ -619,7 +614,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
|
||||
{
|
||||
/* We're replacing a non-SP page with a superpage. Make sure to
|
||||
* handle freeing the table properly. */
|
||||
- old_entry = *p2m_entry;
|
||||
+ intermediate_entry = *p2m_entry;
|
||||
}
|
||||
|
||||
ASSERT(!mfn_valid(mfn) || p2mt != p2m_mmio_direct);
|
||||
@@ -640,10 +635,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
|
||||
|
||||
p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 2);
|
||||
/* NB: paging_write_p2m_entry() handles tlb flushes properly */
|
||||
-
|
||||
- /* Free old intermediate tables if necessary */
|
||||
- if ( l1e_get_flags(old_entry) & _PAGE_PRESENT )
|
||||
- p2m_free_entry(p2m, &old_entry, page_order);
|
||||
}
|
||||
|
||||
/* Track the highest gfn for which we have ever had a valid mapping */
|
||||
@@ -671,6 +662,14 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
|
||||
}
|
||||
}
|
||||
|
||||
+ /*
|
||||
+ * Free old intermediate tables if necessary. This has to be the
|
||||
+ * last thing we do, after removal from the IOMMU tables, so as to
|
||||
+ * avoid a potential use-after-free.
|
||||
+ */
|
||||
+ if ( l1e_get_flags(intermediate_entry) & _PAGE_PRESENT )
|
||||
+ p2m_free_entry(p2m, &intermediate_entry, page_order);
|
||||
+
|
||||
out:
|
||||
unmap_domain_page(table);
|
||||
return rc;
|
@ -0,0 +1,22 @@
|
||||
# Commit c0a85795d864dd64c116af661bf676d66ddfd5fc
|
||||
# Date 2015-09-29 13:56:03 +0200
|
||||
# Author Jan Beulich <jbeulich@suse.com>
|
||||
# Committer Jan Beulich <jbeulich@suse.com>
|
||||
x86/p2m-pt: ignore pt-share flag for shadow mode guests
|
||||
|
||||
There is no page table sharing in shadow mode.
|
||||
|
||||
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
||||
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
|
||||
|
||||
--- a/xen/arch/x86/mm/p2m-pt.c
|
||||
+++ b/xen/arch/x86/mm/p2m-pt.c
|
||||
@@ -644,7 +644,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
|
||||
|
||||
if ( iommu_enabled && need_iommu(p2m->domain) )
|
||||
{
|
||||
- if ( iommu_hap_pt_share )
|
||||
+ if ( iommu_use_hap_pt(p2m->domain) )
|
||||
{
|
||||
if ( old_mfn && (old_mfn != mfn_x(mfn)) )
|
||||
amd_iommu_flush_pages(p2m->domain, gfn, page_order);
|
@ -0,0 +1,104 @@
|
||||
# Commit ea5637968a09a81a64fa5fd73ce49b4ea9789e12
|
||||
# Date 2015-09-30 14:44:22 +0200
|
||||
# Author Dario Faggioli <dario.faggioli@citrix.com>
|
||||
# Committer Jan Beulich <jbeulich@suse.com>
|
||||
credit1: fix tickling when it happens from a remote pCPU
|
||||
|
||||
especially if that is also from a different cpupool than the
|
||||
processor of the vCPU that triggered the tickling.
|
||||
|
||||
In fact, it is possible that we get as far as calling vcpu_unblock()-->
|
||||
vcpu_wake()-->csched_vcpu_wake()-->__runq_tickle() for the vCPU 'vc',
|
||||
but all while running on a pCPU that is different from 'vc->processor'.
|
||||
|
||||
For instance, this can happen when an HVM domain runs in a cpupool,
|
||||
with a different scheduler than the default one, and issues IOREQs
|
||||
to Dom0, running in Pool-0 with the default scheduler.
|
||||
In fact, right in this case, the following crash can be observed:
|
||||
|
||||
(XEN) ----[ Xen-4.7-unstable x86_64 debug=y Tainted: C ]----
|
||||
(XEN) CPU: 7
|
||||
(XEN) RIP: e008:[<ffff82d0801230de>] __runq_tickle+0x18f/0x430
|
||||
(XEN) RFLAGS: 0000000000010086 CONTEXT: hypervisor (d1v0)
|
||||
(XEN) rax: 0000000000000001 rbx: ffff8303184fee00 rcx: 0000000000000000
|
||||
(XEN) ... ... ...
|
||||
(XEN) Xen stack trace from rsp=ffff83031fa57a08:
|
||||
(XEN) ffff82d0801fe664 ffff82d08033c820 0000000100000002 0000000a00000001
|
||||
(XEN) 0000000000006831 0000000000000000 0000000000000000 0000000000000000
|
||||
(XEN) ... ... ...
|
||||
(XEN) Xen call trace:
|
||||
(XEN) [<ffff82d0801230de>] __runq_tickle+0x18f/0x430
|
||||
(XEN) [<ffff82d08012348a>] csched_vcpu_wake+0x10b/0x110
|
||||
(XEN) [<ffff82d08012b421>] vcpu_wake+0x20a/0x3ce
|
||||
(XEN) [<ffff82d08012b91c>] vcpu_unblock+0x4b/0x4e
|
||||
(XEN) [<ffff82d080167bd0>] vcpu_kick+0x17/0x61
|
||||
(XEN) [<ffff82d080167c46>] vcpu_mark_events_pending+0x2c/0x2f
|
||||
(XEN) [<ffff82d08010ac35>] evtchn_fifo_set_pending+0x381/0x3f6
|
||||
(XEN) [<ffff82d08010a0f6>] notify_via_xen_event_channel+0xc9/0xd6
|
||||
(XEN) [<ffff82d0801c29ed>] hvm_send_ioreq+0x3e9/0x441
|
||||
(XEN) [<ffff82d0801bba7d>] hvmemul_do_io+0x23f/0x2d2
|
||||
(XEN) [<ffff82d0801bbb43>] hvmemul_do_io_buffer+0x33/0x64
|
||||
(XEN) [<ffff82d0801bc92b>] hvmemul_do_pio_buffer+0x35/0x37
|
||||
(XEN) [<ffff82d0801cc49f>] handle_pio+0x58/0x14c
|
||||
(XEN) [<ffff82d0801eabcb>] vmx_vmexit_handler+0x16b3/0x1bea
|
||||
(XEN) [<ffff82d0801efd21>] vmx_asm_vmexit_handler+0x41/0xc0
|
||||
|
||||
In this case, pCPU 7 is not in Pool-0, while the (Dom0's) vCPU being
|
||||
woken is. pCPU's 7 pool has a different scheduler than credit, but it
|
||||
is, however, right from pCPU 7 that we are waking the Dom0's vCPUs.
|
||||
Therefore, the current code tries to access csched_balance_mask for
|
||||
pCPU 7, but that is not defined, and hence the Oops.
|
||||
|
||||
(Note that, in case the two pools run the same scheduler we see no
|
||||
Oops, but things are still conceptually wrong.)
|
||||
|
||||
Cure things by making the csched_balance_mask macro accept a
|
||||
parameter for fetching a specific pCPU's mask (instead than always
|
||||
using smp_processor_id()).
|
||||
|
||||
Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>
|
||||
Reviewed-by: Juergen Gross <jgross@suse.com>
|
||||
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
|
||||
|
||||
--- a/xen/common/sched_credit.c
|
||||
+++ b/xen/common/sched_credit.c
|
||||
@@ -154,10 +154,10 @@ struct csched_pcpu {
|
||||
* Convenience macro for accessing the per-PCPU cpumask we need for
|
||||
* implementing the two steps (soft and hard affinity) balancing logic.
|
||||
* It is stored in csched_pcpu so that serialization is not an issue,
|
||||
- * as there is a csched_pcpu for each PCPU and we always hold the
|
||||
- * runqueue spin-lock when using this.
|
||||
+ * as there is a csched_pcpu for each PCPU, and we always hold the
|
||||
+ * runqueue lock for the proper PCPU when using this.
|
||||
*/
|
||||
-#define csched_balance_mask (CSCHED_PCPU(smp_processor_id())->balance_mask)
|
||||
+#define csched_balance_mask(c) (CSCHED_PCPU(c)->balance_mask)
|
||||
|
||||
/*
|
||||
* Virtual CPU
|
||||
@@ -396,9 +396,10 @@ __runq_tickle(unsigned int cpu, struct c
|
||||
|
||||
/* Are there idlers suitable for new (for this balance step)? */
|
||||
csched_balance_cpumask(new->vcpu, balance_step,
|
||||
- csched_balance_mask);
|
||||
- cpumask_and(csched_balance_mask, csched_balance_mask, &idle_mask);
|
||||
- new_idlers_empty = cpumask_empty(csched_balance_mask);
|
||||
+ csched_balance_mask(cpu));
|
||||
+ cpumask_and(csched_balance_mask(cpu),
|
||||
+ csched_balance_mask(cpu), &idle_mask);
|
||||
+ new_idlers_empty = cpumask_empty(csched_balance_mask(cpu));
|
||||
|
||||
/*
|
||||
* Let's not be too harsh! If there aren't idlers suitable
|
||||
@@ -1475,8 +1476,9 @@ csched_runq_steal(int peer_cpu, int cpu,
|
||||
&& !__vcpu_has_soft_affinity(vc, vc->cpu_hard_affinity) )
|
||||
continue;
|
||||
|
||||
- csched_balance_cpumask(vc, balance_step, csched_balance_mask);
|
||||
- if ( __csched_vcpu_is_migrateable(vc, cpu, csched_balance_mask) )
|
||||
+ csched_balance_cpumask(vc, balance_step, csched_balance_mask(cpu));
|
||||
+ if ( __csched_vcpu_is_migrateable(vc, cpu,
|
||||
+ csched_balance_mask(cpu)) )
|
||||
{
|
||||
/* We got a candidate. Grab it! */
|
||||
TRACE_3D(TRC_CSCHED_STOLEN_VCPU, peer_cpu,
|
@ -0,0 +1,159 @@
|
||||
# Commit 660fd65d5578a95ec5eac522128bba23325179eb
|
||||
# Date 2015-10-02 13:40:36 +0200
|
||||
# Author Jan Beulich <jbeulich@suse.com>
|
||||
# Committer Jan Beulich <jbeulich@suse.com>
|
||||
x86/p2m-pt: tighten conditions of IOMMU mapping updates
|
||||
|
||||
Whether the MFN changes does not depend on the new entry being valid
|
||||
(but solely on the old one), and the need to update or TLB-flush also
|
||||
depends on permission changes.
|
||||
|
||||
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
||||
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
|
||||
|
||||
--- a/xen/arch/x86/mm/p2m-pt.c
|
||||
+++ b/xen/arch/x86/mm/p2m-pt.c
|
||||
@@ -493,7 +493,18 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
|
||||
l3_pgentry_t l3e_content;
|
||||
int rc;
|
||||
unsigned int iommu_pte_flags = p2m_get_iommu_flags(p2mt);
|
||||
- unsigned long old_mfn = 0;
|
||||
+ /*
|
||||
+ * old_mfn and iommu_old_flags control possible flush/update needs on the
|
||||
+ * IOMMU: We need to flush when MFN or flags (i.e. permissions) change.
|
||||
+ * iommu_old_flags being initialized to zero covers the case of the entry
|
||||
+ * getting replaced being a non-present (leaf or intermediate) one. For
|
||||
+ * present leaf entries the real value will get calculated below, while
|
||||
+ * for present intermediate entries ~0 (guaranteed != iommu_pte_flags)
|
||||
+ * will be used (to cover all cases of what the leaf entries underneath
|
||||
+ * the intermediate one might be).
|
||||
+ */
|
||||
+ unsigned int flags, iommu_old_flags = 0;
|
||||
+ unsigned long old_mfn = INVALID_MFN;
|
||||
|
||||
if ( tb_init_done )
|
||||
{
|
||||
@@ -540,12 +551,20 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
|
||||
L3_PAGETABLE_SHIFT - PAGE_SHIFT,
|
||||
L3_PAGETABLE_ENTRIES);
|
||||
ASSERT(p2m_entry);
|
||||
- if ( (l1e_get_flags(*p2m_entry) & _PAGE_PRESENT) &&
|
||||
- !(l1e_get_flags(*p2m_entry) & _PAGE_PSE) )
|
||||
+ flags = l1e_get_flags(*p2m_entry);
|
||||
+ if ( flags & _PAGE_PRESENT )
|
||||
{
|
||||
- /* We're replacing a non-SP page with a superpage. Make sure to
|
||||
- * handle freeing the table properly. */
|
||||
- intermediate_entry = *p2m_entry;
|
||||
+ if ( flags & _PAGE_PSE )
|
||||
+ {
|
||||
+ iommu_old_flags =
|
||||
+ p2m_get_iommu_flags(p2m_flags_to_type(flags));
|
||||
+ old_mfn = l1e_get_pfn(*p2m_entry);
|
||||
+ }
|
||||
+ else
|
||||
+ {
|
||||
+ iommu_old_flags = ~0;
|
||||
+ intermediate_entry = *p2m_entry;
|
||||
+ }
|
||||
}
|
||||
|
||||
ASSERT(!mfn_valid(mfn) || p2mt != p2m_mmio_direct);
|
||||
@@ -556,10 +575,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
|
||||
entry_content.l1 = l3e_content.l3;
|
||||
|
||||
if ( entry_content.l1 != 0 )
|
||||
- {
|
||||
p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
|
||||
- old_mfn = l1e_get_pfn(*p2m_entry);
|
||||
- }
|
||||
|
||||
p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 3);
|
||||
/* NB: paging_write_p2m_entry() handles tlb flushes properly */
|
||||
@@ -584,7 +600,10 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
|
||||
p2m_entry = p2m_find_entry(table, &gfn_remainder, gfn,
|
||||
0, L1_PAGETABLE_ENTRIES);
|
||||
ASSERT(p2m_entry);
|
||||
-
|
||||
+ iommu_old_flags =
|
||||
+ p2m_get_iommu_flags(p2m_flags_to_type(l1e_get_flags(*p2m_entry)));
|
||||
+ old_mfn = l1e_get_pfn(*p2m_entry);
|
||||
+
|
||||
if ( mfn_valid(mfn) || (p2mt == p2m_mmio_direct)
|
||||
|| p2m_is_paging(p2mt) )
|
||||
entry_content = p2m_l1e_from_pfn(mfn_x(mfn),
|
||||
@@ -593,10 +612,8 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
|
||||
entry_content = l1e_empty();
|
||||
|
||||
if ( entry_content.l1 != 0 )
|
||||
- {
|
||||
p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
|
||||
- old_mfn = l1e_get_pfn(*p2m_entry);
|
||||
- }
|
||||
+
|
||||
/* level 1 entry */
|
||||
p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 1);
|
||||
/* NB: paging_write_p2m_entry() handles tlb flushes properly */
|
||||
@@ -607,14 +624,20 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
|
||||
L2_PAGETABLE_SHIFT - PAGE_SHIFT,
|
||||
L2_PAGETABLE_ENTRIES);
|
||||
ASSERT(p2m_entry);
|
||||
-
|
||||
- /* FIXME: Deal with 4k replaced by 2meg pages */
|
||||
- if ( (l1e_get_flags(*p2m_entry) & _PAGE_PRESENT) &&
|
||||
- !(l1e_get_flags(*p2m_entry) & _PAGE_PSE) )
|
||||
- {
|
||||
- /* We're replacing a non-SP page with a superpage. Make sure to
|
||||
- * handle freeing the table properly. */
|
||||
- intermediate_entry = *p2m_entry;
|
||||
+ flags = l1e_get_flags(*p2m_entry);
|
||||
+ if ( flags & _PAGE_PRESENT )
|
||||
+ {
|
||||
+ if ( flags & _PAGE_PSE )
|
||||
+ {
|
||||
+ iommu_old_flags =
|
||||
+ p2m_get_iommu_flags(p2m_flags_to_type(flags));
|
||||
+ old_mfn = l1e_get_pfn(*p2m_entry);
|
||||
+ }
|
||||
+ else
|
||||
+ {
|
||||
+ iommu_old_flags = ~0;
|
||||
+ intermediate_entry = *p2m_entry;
|
||||
+ }
|
||||
}
|
||||
|
||||
ASSERT(!mfn_valid(mfn) || p2mt != p2m_mmio_direct);
|
||||
@@ -628,10 +651,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
|
||||
entry_content.l1 = l2e_content.l2;
|
||||
|
||||
if ( entry_content.l1 != 0 )
|
||||
- {
|
||||
p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
|
||||
- old_mfn = l1e_get_pfn(*p2m_entry);
|
||||
- }
|
||||
|
||||
p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 2);
|
||||
/* NB: paging_write_p2m_entry() handles tlb flushes properly */
|
||||
@@ -642,17 +662,17 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
|
||||
&& (gfn + (1UL << page_order) - 1 > p2m->max_mapped_pfn) )
|
||||
p2m->max_mapped_pfn = gfn + (1UL << page_order) - 1;
|
||||
|
||||
- if ( iommu_enabled && need_iommu(p2m->domain) )
|
||||
+ if ( iommu_enabled && need_iommu(p2m->domain) &&
|
||||
+ (iommu_old_flags != iommu_pte_flags || old_mfn != mfn_x(mfn)) )
|
||||
{
|
||||
if ( iommu_use_hap_pt(p2m->domain) )
|
||||
{
|
||||
- if ( old_mfn && (old_mfn != mfn_x(mfn)) )
|
||||
+ if ( iommu_old_flags )
|
||||
amd_iommu_flush_pages(p2m->domain, gfn, page_order);
|
||||
}
|
||||
else
|
||||
{
|
||||
- unsigned int flags = p2m_get_iommu_flags(p2mt);
|
||||
-
|
||||
+ flags = p2m_get_iommu_flags(p2mt);
|
||||
if ( flags != 0 )
|
||||
for ( i = 0; i < (1UL << page_order); i++ )
|
||||
iommu_map_page(p2m->domain, gfn+i, mfn_x(mfn)+i, flags);
|
50
CVE-2015-7311-xsa142.patch
Normal file
50
CVE-2015-7311-xsa142.patch
Normal file
@ -0,0 +1,50 @@
|
||||
From 07ca00703f76ad392eda5ee52cce1197cf49c30a Mon Sep 17 00:00:00 2001
|
||||
From: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
|
||||
Subject: [PATCH v2.1 for-4.5] libxl: handle read-only drives with qemu-xen
|
||||
|
||||
The current libxl code doesn't deal with read-only drives at all.
|
||||
|
||||
Upstream QEMU and qemu-xen only support read-only cdrom drives: make
|
||||
sure to specify "readonly=on" for cdrom drives and return error in case
|
||||
the user requested a non-cdrom read-only drive.
|
||||
|
||||
This is XSA-142, discovered by Lin Liu
|
||||
(https://bugzilla.redhat.com/show_bug.cgi?id=1257893).
|
||||
|
||||
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
|
||||
|
||||
Backport to Xen 4.5 and earlier, apropos of report and review from
|
||||
Michael Young.
|
||||
|
||||
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
|
||||
---
|
||||
tools/libxl/libxl_dm.c | 13 +++++++++----
|
||||
1 file changed, 9 insertions(+), 4 deletions(-)
|
||||
|
||||
Index: xen-4.5.1-testing/tools/libxl/libxl_dm.c
|
||||
===================================================================
|
||||
--- xen-4.5.1-testing.orig/tools/libxl/libxl_dm.c
|
||||
+++ xen-4.5.1-testing/tools/libxl/libxl_dm.c
|
||||
@@ -812,13 +812,18 @@ static char ** libxl__build_device_model
|
||||
if (disks[i].is_cdrom) {
|
||||
if (disks[i].format == LIBXL_DISK_FORMAT_EMPTY)
|
||||
drive = libxl__sprintf
|
||||
- (gc, "if=ide,index=%d,media=cdrom,cache=writeback,id=ide-%i",
|
||||
- disk, dev_number);
|
||||
+ (gc, "if=ide,index=%d,readonly=%s,media=cdrom,cache=writeback,id=ide-%i",
|
||||
+ disk, disks[i].readwrite ? "off" : "on", dev_number);
|
||||
else
|
||||
drive = libxl__sprintf
|
||||
- (gc, "file=%s,if=ide,index=%d,media=cdrom,format=%s,cache=writeback,id=ide-%i",
|
||||
- disks[i].pdev_path, disk, format, dev_number);
|
||||
+ (gc, "file=%s,if=ide,index=%d,readonly=%s,media=cdrom,format=%s,cache=writeback,id=ide-%i",
|
||||
+ disks[i].pdev_path, disk, disks[i].readwrite ? "off" : "on", format, dev_number);
|
||||
} else {
|
||||
+ if (!disks[i].readwrite) {
|
||||
+ LIBXL__LOG(ctx, LIBXL__LOG_ERROR, "qemu-xen doesn't support read-only disk drivers");
|
||||
+ return NULL;
|
||||
+ }
|
||||
+
|
||||
if (disks[i].format == LIBXL_DISK_FORMAT_EMPTY) {
|
||||
LIBXL__LOG(ctx, LIBXL__LOG_WARNING, "cannot support"
|
||||
" empty disk format for %s", disks[i].vdev);
|
204
hotplug-Linux-block-performance-fix.patch
Normal file
204
hotplug-Linux-block-performance-fix.patch
Normal file
@ -0,0 +1,204 @@
|
||||
Reference: bsc#941074
|
||||
|
||||
During the attachment of a loopback mounted image file, the mode of all
|
||||
curent instances of this device already attached to other domains must be
|
||||
checked. This requires finding all loopback devices pointing to the inode
|
||||
of the shared image file, and then comparing the major and minor number of
|
||||
these devices to the major and minor number of every vbd device found in the
|
||||
xenstore database.
|
||||
|
||||
Prior to this patch, the entire xenstore database is walked for every instance
|
||||
of every loopback device pointing to the same shared image file. This process
|
||||
causes the block attachment process to becomes exponentially slower with every
|
||||
additional attachment of a shared image.
|
||||
|
||||
Rather than scanning all of xenstore for every instance of a shared loopback
|
||||
device, this patch creates a list of the major and minor numbers from all
|
||||
matching loopback devices. After generating this list, Xenstore is walked
|
||||
once, and major and minor numbers from every vbd are checked against the list.
|
||||
If a match is found, the mode of that vbd is checked for compatibility with
|
||||
the mode of the device being attached.
|
||||
|
||||
Signed-off-by: Mike Latimer <mlatimer@xxxxxxxx>
|
||||
---
|
||||
tools/hotplug/Linux/block | 89 ++++++++++++++++++++++++++++++-----------------
|
||||
1 file changed, 57 insertions(+), 32 deletions(-)
|
||||
|
||||
Index: xen-4.4.3-testing/tools/hotplug/Linux/block
|
||||
===================================================================
|
||||
--- xen-4.4.3-testing.orig/tools/hotplug/Linux/block
|
||||
+++ xen-4.4.3-testing/tools/hotplug/Linux/block
|
||||
@@ -38,7 +38,7 @@ find_free_loopback_dev() {
|
||||
}
|
||||
|
||||
##
|
||||
-# check_sharing device mode
|
||||
+# check_sharing devtype device mode [inode]
|
||||
#
|
||||
# Check whether the device requested is already in use. To use the device in
|
||||
# read-only mode, it may be in use in read-only mode, but may not be in use in
|
||||
@@ -47,19 +47,44 @@ find_free_loopback_dev() {
|
||||
#
|
||||
# Prints one of
|
||||
#
|
||||
-# 'local': the device may not be used because it is mounted in the current
|
||||
-# (i.e. the privileged domain) in a way incompatible with the
|
||||
-# requested mode;
|
||||
-# 'guest': the device may not be used because it already mounted by a guest
|
||||
-# in a way incompatible with the requested mode; or
|
||||
-# 'ok': the device may be used.
|
||||
+# 'local $d': the device ($d) may not be used because it is mounted in the
|
||||
+# current (i.e. the privileged domain) in a way incompatible
|
||||
+# with the requested mode;
|
||||
+# 'guest $d': the device may not be used because it is already mounted
|
||||
+# through device $d by a guest in a way incompatible with the
|
||||
+# requested mode; or
|
||||
+# 'ok': the device may be used.
|
||||
#
|
||||
check_sharing()
|
||||
{
|
||||
- local dev="$1"
|
||||
- local mode="$2"
|
||||
+ local devtype=$1
|
||||
+ local dev="$2"
|
||||
+ local mode="$3"
|
||||
+ local devmm=","
|
||||
+
|
||||
+ if [ "$devtype" = "file" ];
|
||||
+ then
|
||||
+ local inode="$4"
|
||||
+
|
||||
+ shared_list=$(losetup -a |
|
||||
+ sed -n -e "s@^\([^:]\+\)\(:[[:blank:]]\[0*${dev}\]:${inode}[[:blank:]](.*)\)@\1@p" )
|
||||
+ for dev in $shared_list
|
||||
+ do
|
||||
+ if [ -n "$dev" ]
|
||||
+ then
|
||||
+ devmm="${devmm}$(device_major_minor $dev),"
|
||||
+ fi
|
||||
+ done
|
||||
+ # if $devmm is unchanged, file being checked is not a shared loopback device
|
||||
+ if [ "$devmm" = "," ];
|
||||
+ then
|
||||
+ echo 'ok'
|
||||
+ return
|
||||
+ fi
|
||||
+ else
|
||||
+ devmm=${devmm}$(device_major_minor "$dev")","
|
||||
+ fi
|
||||
|
||||
- local devmm=$(device_major_minor "$dev")
|
||||
local file
|
||||
|
||||
if [ "$mode" = 'w' ]
|
||||
@@ -75,9 +100,10 @@ check_sharing()
|
||||
then
|
||||
local d=$(device_major_minor "$file")
|
||||
|
||||
- if [ "$d" = "$devmm" ]
|
||||
+ # checking for $d in $devmm is best through the [[...]] bashism
|
||||
+ if [[ "$devmm" == *",$d,"* ]]
|
||||
then
|
||||
- echo 'local'
|
||||
+ echo "local $d"
|
||||
return
|
||||
fi
|
||||
fi
|
||||
@@ -90,13 +116,14 @@ check_sharing()
|
||||
do
|
||||
d=$(xenstore_read_default "$base_path/$dom/$dev/physical-device" "")
|
||||
|
||||
- if [ "$d" = "$devmm" ]
|
||||
+ # checking for $d in $devmm is best through the [[...]] bashism
|
||||
+ if [ -n "$d" ] && [[ "$devmm" == *",$d,"* ]]
|
||||
then
|
||||
if [ "$mode" = 'w' ]
|
||||
then
|
||||
if ! same_vm $dom
|
||||
then
|
||||
- echo 'guest'
|
||||
+ echo "guest $d"
|
||||
return
|
||||
fi
|
||||
else
|
||||
@@ -107,7 +134,7 @@ check_sharing()
|
||||
then
|
||||
if ! same_vm $dom
|
||||
then
|
||||
- echo 'guest'
|
||||
+ echo "guest $d"
|
||||
return
|
||||
fi
|
||||
fi
|
||||
@@ -129,6 +156,7 @@ check_device_sharing()
|
||||
{
|
||||
local dev="$1"
|
||||
local mode=$(canonicalise_mode "$2")
|
||||
+ local type="device"
|
||||
local result
|
||||
|
||||
if [ "x$mode" = 'x!' ]
|
||||
@@ -136,33 +164,38 @@ check_device_sharing()
|
||||
return 0
|
||||
fi
|
||||
|
||||
- result=$(check_sharing "$dev" "$mode")
|
||||
+ result=$(check_sharing "$type" "$dev" "$mode")
|
||||
|
||||
if [ "$result" != 'ok' ]
|
||||
then
|
||||
- do_ebusy "Device $dev is mounted " "$mode" "$result"
|
||||
+ do_ebusy "Device $dev is mounted " "$mode" "${result%% *}"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
##
|
||||
-# check_device_sharing file dev mode
|
||||
+# check_device_sharing file dev mode inode
|
||||
#
|
||||
-# Perform the sharing check for the given file mounted through the given
|
||||
-# loopback interface, in the given mode.
|
||||
+# Perform the sharing check for the given file, with its corresponding
|
||||
+# device, inode and mode. As the file can be mounted multiple times,
|
||||
+# the inode is passed through to check_sharing for all instances to be
|
||||
+# checked.
|
||||
#
|
||||
check_file_sharing()
|
||||
{
|
||||
local file="$1"
|
||||
local dev="$2"
|
||||
local mode="$3"
|
||||
+ local inode="$4"
|
||||
+ local type="file"
|
||||
+ local result
|
||||
|
||||
- result=$(check_sharing "$dev" "$mode")
|
||||
+ result=$(check_sharing "$type" "$dev" "$mode" "$inode")
|
||||
|
||||
if [ "$result" != 'ok' ]
|
||||
then
|
||||
- do_ebusy "File $file is loopback-mounted through $dev,
|
||||
-which is mounted " "$mode" "$result"
|
||||
+ do_ebusy "File $file is loopback-mounted through ${result#* },
|
||||
+which is mounted " "$mode" "${result%% *}"
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -279,15 +312,7 @@ mount it read-write in a guest domain."
|
||||
fatal "Unable to lookup $file: dev: $dev inode: $inode"
|
||||
fi
|
||||
|
||||
- shared_list=$(losetup -a |
|
||||
- sed -n -e "s@^\([^:]\+\)\(:[[:blank:]]\[0*${dev}\]:${inode}[[:blank:]](.*)\)@\1@p" )
|
||||
- for dev in $shared_list
|
||||
- do
|
||||
- if [ -n "$dev" ]
|
||||
- then
|
||||
- check_file_sharing "$file" "$dev" "$mode"
|
||||
- fi
|
||||
- done
|
||||
+ check_file_sharing "$file" "$dev" "$mode" "$inode"
|
||||
fi
|
||||
|
||||
loopdev=$(losetup -f 2>/dev/null || find_free_loopback_dev)
|
37
xen.changes
37
xen.changes
@ -1,3 +1,40 @@
|
||||
-------------------------------------------------------------------
|
||||
Tue Oct 6 09:04:17 MDT 2015 - carnold@suse.com
|
||||
|
||||
- bsc#949046 - Increase %suse_version in SP1 to 1316
|
||||
xen.spec
|
||||
|
||||
-------------------------------------------------------------------
|
||||
Mon Oct 5 09:12:45 MDT 2015 - carnold@suse.com
|
||||
|
||||
- bsc#945167 - Running command ’ xl pci-assignable-add 03:10.1’
|
||||
secondly show errors
|
||||
55f7f9d2-libxl-slightly-refine-pci-assignable-add-remove-handling.patch
|
||||
- Upstream patches from Jan
|
||||
55f2e438-x86-hvm-fix-saved-pmtimer-and-hpet-values.patch
|
||||
55f9345b-x86-MSI-fail-if-no-hardware-support.patch
|
||||
5604f239-x86-PV-properly-populate-descriptor-tables.patch
|
||||
5604f2e6-vt-d-fix-IM-bit-mask-and-unmask-of-FECTL_REG.patch
|
||||
560a4af9-x86-EPT-tighten-conditions-of-IOMMU-mapping-updates.patch
|
||||
560a7c36-x86-p2m-pt-delay-freeing-of-intermediate-page-tables.patch
|
||||
560a7c53-x86-p2m-pt-ignore-pt-share-flag-for-shadow-mode-guests.patch
|
||||
560bd926-credit1-fix-tickling-when-it-happens-from-a-remote-pCPU.patch
|
||||
560e6d34-x86-p2m-pt-tighten-conditions-of-IOMMU-mapping-updates.patch
|
||||
|
||||
-------------------------------------------------------------------
|
||||
Fri Oct 2 11:31:34 MDT 2015 - mlatimer@suse.com
|
||||
|
||||
- bsc#941074 - VmError: Device 51728 (vbd) could not be connected.
|
||||
Hotplug scripts not working.
|
||||
hotplug-Linux-block-performance-fix.patch
|
||||
|
||||
-------------------------------------------------------------------
|
||||
Wed Sep 23 14:56:47 MDT 2015 - carnold@suse.com
|
||||
|
||||
- bsc#947165 - VUL-0: CVE-2015-7311: xen: libxl fails to honour
|
||||
readonly flag on disks with qemu-xen (xsa-142)
|
||||
CVE-2015-7311-xsa142.patch
|
||||
|
||||
-------------------------------------------------------------------
|
||||
Mon Sep 16 07:17:00 UTC 2015 - cyliu@suse.com
|
||||
|
||||
|
81
xen.spec
81
xen.spec
@ -1,7 +1,7 @@
|
||||
#
|
||||
# spec file for package xen
|
||||
#
|
||||
# Copyright (c) 2015 SUSE LINUX GmbH, Nuernberg, Germany.
|
||||
# Copyright (c) 2015 SUSE LINUX Products GmbH, Nuernberg, Germany.
|
||||
#
|
||||
# All modifications and additions to the file contributed by third parties
|
||||
# remain the property of their copyright owners, unless otherwise agreed
|
||||
@ -15,7 +15,6 @@
|
||||
# Please submit bugfixes or comments via http://bugs.opensuse.org/
|
||||
#
|
||||
|
||||
|
||||
# needssslcertforbuild
|
||||
|
||||
Name: xen
|
||||
@ -46,7 +45,7 @@ ExclusiveArch: %ix86 x86_64 %arm aarch64
|
||||
#
|
||||
%define max_cpus 4
|
||||
%ifarch x86_64
|
||||
%if %suse_version == 1315
|
||||
%if %suse_version >= 1315
|
||||
%define max_cpus 1024
|
||||
%else
|
||||
%define max_cpus 512
|
||||
@ -159,7 +158,7 @@ BuildRequires: xorg-x11-util-devel
|
||||
%endif
|
||||
%endif
|
||||
|
||||
Version: 4.5.1_08
|
||||
Version: 4.5.1_10
|
||||
Release: 0
|
||||
Summary: Xen Virtualization: Hypervisor (aka VMM aka Microkernel)
|
||||
License: GPL-2.0
|
||||
@ -205,31 +204,41 @@ Source20000: xenalyze.hg.tar.bz2
|
||||
Patch1: 55103616-vm-assist-prepare-for-discontiguous-used-bit-numbers.patch
|
||||
Patch2: 551ac326-xentop-add-support-for-qdisk.patch
|
||||
Patch3: 552d0f49-x86-traps-identify-the-vcpu-in-context-when-dumping-regs.patch
|
||||
Patch4: 5548e903-domctl-don-t-truncate-XEN_DOMCTL_max_mem-requests.patch
|
||||
Patch5: 5548e95d-x86-allow-to-suppress-M2P-user-mode-exposure.patch
|
||||
Patch6: 554cc211-libxl-add-qxl.patch
|
||||
Patch7: 556d973f-unmodified-drivers-tolerate-IRQF_DISABLED-being-undefined.patch
|
||||
Patch8: 5576f178-kexec-add-more-pages-to-v1-environment.patch
|
||||
Patch9: 55780be1-x86-EFI-adjust-EFI_MEMORY_WP-handling-for-spec-version-2.5.patch
|
||||
Patch10: 558bfaa0-x86-traps-avoid-using-current-too-early.patch
|
||||
Patch11: 5592a116-nested-EPT-fix-the-handling-of-nested-EPT.patch
|
||||
Patch12: 559b9dd6-x86-p2m-ept-don-t-unmap-in-use-EPT-pagetable.patch
|
||||
Patch13: 559bc633-x86-cpupool-clear-proper-cpu_valid-bit-on-CPU-teardown.patch
|
||||
Patch14: 559bc64e-credit1-properly-deal-with-CPUs-not-in-any-pool.patch
|
||||
Patch15: 559bc87f-x86-hvmloader-avoid-data-corruption-with-xenstore-rw.patch
|
||||
Patch16: 559bdde5-pull-in-latest-linux-earlycpio.patch
|
||||
Patch17: 55a62eb0-xl-correct-handling-of-extra_config-in-main_cpupoolcreate.patch
|
||||
Patch18: 55a66a1e-make-rangeset_report_ranges-report-all-ranges.patch
|
||||
Patch19: 55a77e4f-dmar-device-scope-mem-leak-fix.patch
|
||||
Patch20: 55c1d83d-x86-gdt-Drop-write-only-xalloc-d-array.patch
|
||||
Patch21: 55c3232b-x86-mm-Make-hap-shadow-teardown-preemptible.patch
|
||||
Patch22: 55dc78e9-x86-amd_ucode-skip-updates-for-final-levels.patch
|
||||
Patch23: 55dc7937-x86-IO-APIC-don-t-create-pIRQ-mapping-from-masked-RTE.patch
|
||||
Patch24: 55df2f76-IOMMU-skip-domains-without-page-tables-when-dumping.patch
|
||||
Patch25: 55e43fd8-x86-NUMA-fix-setup_node.patch
|
||||
Patch26: 55e43ff8-x86-NUMA-don-t-account-hotplug-regions.patch
|
||||
Patch27: 55e593f1-x86-NUMA-make-init_node_heap-respect-Xen-heap-limit.patch
|
||||
Patch28: 5537a4d8-libxl-use-DEBUG-log-level-instead-of-INFO.patch
|
||||
Patch4: 5537a4d8-libxl-use-DEBUG-log-level-instead-of-INFO.patch
|
||||
Patch5: 5548e903-domctl-don-t-truncate-XEN_DOMCTL_max_mem-requests.patch
|
||||
Patch6: 5548e95d-x86-allow-to-suppress-M2P-user-mode-exposure.patch
|
||||
Patch7: 554cc211-libxl-add-qxl.patch
|
||||
Patch8: 556d973f-unmodified-drivers-tolerate-IRQF_DISABLED-being-undefined.patch
|
||||
Patch9: 5576f178-kexec-add-more-pages-to-v1-environment.patch
|
||||
Patch10: 55780be1-x86-EFI-adjust-EFI_MEMORY_WP-handling-for-spec-version-2.5.patch
|
||||
Patch11: 558bfaa0-x86-traps-avoid-using-current-too-early.patch
|
||||
Patch12: 5592a116-nested-EPT-fix-the-handling-of-nested-EPT.patch
|
||||
Patch13: 559b9dd6-x86-p2m-ept-don-t-unmap-in-use-EPT-pagetable.patch
|
||||
Patch14: 559bc633-x86-cpupool-clear-proper-cpu_valid-bit-on-CPU-teardown.patch
|
||||
Patch15: 559bc64e-credit1-properly-deal-with-CPUs-not-in-any-pool.patch
|
||||
Patch16: 559bc87f-x86-hvmloader-avoid-data-corruption-with-xenstore-rw.patch
|
||||
Patch17: 559bdde5-pull-in-latest-linux-earlycpio.patch
|
||||
Patch18: 55a62eb0-xl-correct-handling-of-extra_config-in-main_cpupoolcreate.patch
|
||||
Patch19: 55a66a1e-make-rangeset_report_ranges-report-all-ranges.patch
|
||||
Patch20: 55a77e4f-dmar-device-scope-mem-leak-fix.patch
|
||||
Patch21: 55c1d83d-x86-gdt-Drop-write-only-xalloc-d-array.patch
|
||||
Patch22: 55c3232b-x86-mm-Make-hap-shadow-teardown-preemptible.patch
|
||||
Patch23: 55dc78e9-x86-amd_ucode-skip-updates-for-final-levels.patch
|
||||
Patch24: 55dc7937-x86-IO-APIC-don-t-create-pIRQ-mapping-from-masked-RTE.patch
|
||||
Patch25: 55df2f76-IOMMU-skip-domains-without-page-tables-when-dumping.patch
|
||||
Patch26: 55e43fd8-x86-NUMA-fix-setup_node.patch
|
||||
Patch27: 55e43ff8-x86-NUMA-don-t-account-hotplug-regions.patch
|
||||
Patch28: 55e593f1-x86-NUMA-make-init_node_heap-respect-Xen-heap-limit.patch
|
||||
Patch29: 55f2e438-x86-hvm-fix-saved-pmtimer-and-hpet-values.patch
|
||||
Patch30: 55f7f9d2-libxl-slightly-refine-pci-assignable-add-remove-handling.patch
|
||||
Patch31: 55f9345b-x86-MSI-fail-if-no-hardware-support.patch
|
||||
Patch32: 5604f239-x86-PV-properly-populate-descriptor-tables.patch
|
||||
Patch33: 5604f2e6-vt-d-fix-IM-bit-mask-and-unmask-of-FECTL_REG.patch
|
||||
Patch34: 560a4af9-x86-EPT-tighten-conditions-of-IOMMU-mapping-updates.patch
|
||||
Patch35: 560a7c36-x86-p2m-pt-delay-freeing-of-intermediate-page-tables.patch
|
||||
Patch36: 560a7c53-x86-p2m-pt-ignore-pt-share-flag-for-shadow-mode-guests.patch
|
||||
Patch37: 560bd926-credit1-fix-tickling-when-it-happens-from-a-remote-pCPU.patch
|
||||
Patch38: 560e6d34-x86-p2m-pt-tighten-conditions-of-IOMMU-mapping-updates.patch
|
||||
Patch131: CVE-2015-4106-xsa131-9.patch
|
||||
Patch137: CVE-2015-3259-xsa137.patch
|
||||
Patch139: xsa139-qemuu.patch
|
||||
@ -247,6 +256,7 @@ Patch14014: xsa140-qemut-4.patch
|
||||
Patch14015: xsa140-qemut-5.patch
|
||||
Patch14016: xsa140-qemut-6.patch
|
||||
Patch14017: xsa140-qemut-7.patch
|
||||
Patch142: CVE-2015-7311-xsa142.patch
|
||||
# Upstream qemu
|
||||
Patch250: VNC-Support-for-ExtendedKeyEvent-client-message.patch
|
||||
Patch251: 0001-net-move-the-tap-buffer-into-TAPState.patch
|
||||
@ -279,6 +289,7 @@ Patch330: suspend_evtchn_lock.patch
|
||||
Patch331: xenpaging.doc.patch
|
||||
Patch332: local_attach_support_for_phy.patch
|
||||
Patch333: xen-c99-fix.patch
|
||||
Patch334: hotplug-Linux-block-performance-fix.patch
|
||||
# Qemu traditional
|
||||
Patch350: blktap.patch
|
||||
Patch351: cdrom-removable.patch
|
||||
@ -625,6 +636,16 @@ Authors:
|
||||
%patch26 -p1
|
||||
%patch27 -p1
|
||||
%patch28 -p1
|
||||
%patch29 -p1
|
||||
%patch30 -p1
|
||||
%patch31 -p1
|
||||
%patch32 -p1
|
||||
%patch33 -p1
|
||||
%patch34 -p1
|
||||
%patch35 -p1
|
||||
%patch36 -p1
|
||||
%patch37 -p1
|
||||
%patch38 -p1
|
||||
%patch131 -p1
|
||||
%patch137 -p1
|
||||
%patch139 -p1
|
||||
@ -642,6 +663,7 @@ Authors:
|
||||
%patch14015 -p1
|
||||
%patch14016 -p1
|
||||
%patch14017 -p1
|
||||
%patch142 -p1
|
||||
# Upstream qemu patches
|
||||
%patch250 -p1
|
||||
%patch251 -p1
|
||||
@ -673,6 +695,7 @@ Authors:
|
||||
%patch331 -p1
|
||||
%patch332 -p1
|
||||
%patch333 -p1
|
||||
%patch334 -p1
|
||||
# Qemu traditional
|
||||
%patch350 -p1
|
||||
%patch351 -p1
|
||||
|
Loading…
Reference in New Issue
Block a user