SHA256
1
0
forked from pool/xen
xen/5583da09-x86-MSI-track-host-and-guest-masking-separately.patch
Charles Arnold 763b78040d - bnc#935634 - VUL-0: CVE-2015-3259: xen: XSA-137: xl command line
config handling stack overflow
  CVE-2015-3259-xsa137.patch
- Upstream patches from Jan
  558bfaa0-x86-traps-avoid-using-current-too-early.patch
  5592a116-nested-EPT-fix-the-handling-of-nested-EPT.patch
  559b9dd6-x86-p2m-ept-don-t-unmap-in-use-EPT-pagetable.patch
  559bdde5-pull-in-latest-linux-earlycpio.patch
- Upstream patches from Jan pending review
  552d0fd2-x86-hvm-don-t-include-asm-spinlock-h.patch
  552d0fe8-x86-mtrr-include-asm-atomic.h.patch
  552d293b-x86-vMSI-X-honor-all-mask-requests.patch
  552d2966-x86-vMSI-X-add-valid-bits-for-read-acceleration.patch
  554c7aee-x86-provide-arch_fetch_and_add.patch
  554c7b00-arm-provide-arch_fetch_and_add.patch
  55534b0a-x86-provide-add_sized.patch
  55534b25-arm-provide-add_sized.patch
  5555a4f8-use-ticket-locks-for-spin-locks.patch
  5555a5b9-x86-arm-remove-asm-spinlock-h.patch
  5555a8ec-introduce-non-contiguous-allocation.patch
  55795a52-x86-vMSI-X-support-qword-MMIO-access.patch
  557eb55f-gnttab-per-active-entry-locking.patch
  557eb5b6-gnttab-introduce-maptrack-lock.patch
  557eb620-gnttab-make-the-grant-table-lock-a-read-write-lock.patch
  557ffab8-evtchn-factor-out-freeing-an-event-channel.patch
  5582bf43-evtchn-simplify-port_is_valid.patch
  5582bf81-evtchn-remove-the-locking-when-unmasking-an-event-channel.patch
  5583d9c5-x86-MSI-X-cleanup.patch
  5583da09-x86-MSI-track-host-and-guest-masking-separately.patch
  5583da64-gnttab-use-per-VCPU-maptrack-free-lists.patch

OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=369
2015-07-10 15:21:29 +00:00

306 lines
11 KiB
Diff

# Commit ad28e42bd1d28d746988ed71654e8aa670629753
# Date 2015-06-19 10:59:53 +0200
# Author Jan Beulich <jbeulich@suse.com>
# Committer Jan Beulich <jbeulich@suse.com>
x86/MSI: track host and guest masking separately
In particular we want to avoid losing track of our own intention to
have an entry masked. Physical unmasking now happens only when both
host and guest requested so.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
--- sle12sp1.orig/xen/arch/x86/hpet.c 2015-01-14 18:44:18.000000000 +0100
+++ sle12sp1/xen/arch/x86/hpet.c 2015-07-08 00:00:00.000000000 +0200
@@ -240,7 +240,7 @@ static void hpet_msi_unmask(struct irq_d
cfg = hpet_read32(HPET_Tn_CFG(ch->idx));
cfg |= HPET_TN_ENABLE;
hpet_write32(cfg, HPET_Tn_CFG(ch->idx));
- ch->msi.msi_attrib.masked = 0;
+ ch->msi.msi_attrib.host_masked = 0;
}
static void hpet_msi_mask(struct irq_desc *desc)
@@ -251,7 +251,7 @@ static void hpet_msi_mask(struct irq_des
cfg = hpet_read32(HPET_Tn_CFG(ch->idx));
cfg &= ~HPET_TN_ENABLE;
hpet_write32(cfg, HPET_Tn_CFG(ch->idx));
- ch->msi.msi_attrib.masked = 1;
+ ch->msi.msi_attrib.host_masked = 1;
}
static int hpet_msi_write(struct hpet_event_channel *ch, struct msi_msg *msg)
--- sle12sp1.orig/xen/arch/x86/hvm/vmsi.c 2015-07-08 00:00:00.000000000 +0200
+++ sle12sp1/xen/arch/x86/hvm/vmsi.c 2015-07-08 00:00:00.000000000 +0200
@@ -219,7 +219,6 @@ static int msixtbl_read(
{
unsigned long offset;
struct msixtbl_entry *entry;
- void *virt;
unsigned int nr_entry, index;
int r = X86EMUL_UNHANDLEABLE;
@@ -253,13 +252,20 @@ static int msixtbl_read(
}
if ( offset == PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET )
{
- virt = msixtbl_addr_to_virt(entry, address);
+ const struct msi_desc *msi_desc;
+ void *virt = msixtbl_addr_to_virt(entry, address);
+
if ( !virt )
goto out;
+ msi_desc = virt_to_msi_desc(entry->pdev, virt);
+ if ( !msi_desc )
+ goto out;
if ( len == 4 )
- *pval = readl(virt);
+ *pval = MASK_INSR(msi_desc->msi_attrib.guest_masked,
+ PCI_MSIX_VECTOR_BITMASK);
else
- *pval |= (u64)readl(virt) << 32;
+ *pval |= (u64)MASK_INSR(msi_desc->msi_attrib.guest_masked,
+ PCI_MSIX_VECTOR_BITMASK) << 32;
}
r = X86EMUL_OKAY;
@@ -277,7 +283,7 @@ static int msixtbl_write(struct vcpu *v,
void *virt;
unsigned int nr_entry, index;
int r = X86EMUL_UNHANDLEABLE;
- unsigned long flags, orig;
+ unsigned long flags;
struct irq_desc *desc;
if ( (len != 4 && len != 8) || (address & (len - 1)) )
@@ -337,37 +343,7 @@ static int msixtbl_write(struct vcpu *v,
ASSERT(msi_desc == desc->msi_desc);
- orig = readl(virt);
-
- /*
- * Do not allow guest to modify MSI-X control bit if it is masked
- * by Xen. We'll only handle the case where Xen thinks that
- * bit is unmasked, but hardware has silently masked the bit
- * (in case of SR-IOV VF reset, etc). On the other hand, if Xen
- * thinks that the bit is masked, but it's really not,
- * we log a warning.
- */
- if ( msi_desc->msi_attrib.masked )
- {
- if ( !(orig & PCI_MSIX_VECTOR_BITMASK) )
- printk(XENLOG_WARNING "MSI-X control bit is unmasked when"
- " it is expected to be masked [%04x:%02x:%02x.%u]\n",
- entry->pdev->seg, entry->pdev->bus,
- PCI_SLOT(entry->pdev->devfn),
- PCI_FUNC(entry->pdev->devfn));
-
- goto unlock;
- }
-
- /*
- * The mask bit is the only defined bit in the word. But we
- * ought to preserve the reserved bits. Clearing the reserved
- * bits can result in undefined behaviour (see PCI Local Bus
- * Specification revision 2.3).
- */
- val &= PCI_MSIX_VECTOR_BITMASK;
- val |= (orig & ~PCI_MSIX_VECTOR_BITMASK);
- writel(val, virt);
+ guest_mask_msi_irq(desc, !!(val & PCI_MSIX_VECTOR_BITMASK));
unlock:
spin_unlock_irqrestore(&desc->lock, flags);
--- sle12sp1.orig/xen/arch/x86/msi.c 2015-07-08 00:00:00.000000000 +0200
+++ sle12sp1/xen/arch/x86/msi.c 2015-07-08 00:00:00.000000000 +0200
@@ -349,9 +349,10 @@ int msi_maskable_irq(const struct msi_de
|| entry->msi_attrib.maskbit;
}
-static void msi_set_mask_bit(struct irq_desc *desc, int flag)
+static void msi_set_mask_bit(struct irq_desc *desc, bool_t host, bool_t guest)
{
struct msi_desc *entry = desc->msi_desc;
+ bool_t flag = host || guest;
ASSERT(spin_is_locked(&desc->lock));
BUG_ON(!entry || !entry->dev);
@@ -383,7 +384,8 @@ static void msi_set_mask_bit(struct irq_
BUG();
break;
}
- entry->msi_attrib.masked = !!flag;
+ entry->msi_attrib.host_masked = host;
+ entry->msi_attrib.guest_masked = guest;
}
static int msi_get_mask_bit(const struct msi_desc *entry)
@@ -405,20 +407,33 @@ static int msi_get_mask_bit(const struct
void mask_msi_irq(struct irq_desc *desc)
{
- msi_set_mask_bit(desc, 1);
+ msi_set_mask_bit(desc, 1, desc->msi_desc->msi_attrib.guest_masked);
}
void unmask_msi_irq(struct irq_desc *desc)
{
- msi_set_mask_bit(desc, 0);
+ msi_set_mask_bit(desc, 0, desc->msi_desc->msi_attrib.guest_masked);
+}
+
+void guest_mask_msi_irq(struct irq_desc *desc, bool_t mask)
+{
+ msi_set_mask_bit(desc, desc->msi_desc->msi_attrib.host_masked, mask);
}
static unsigned int startup_msi_irq(struct irq_desc *desc)
{
- unmask_msi_irq(desc);
+ bool_t guest_masked = (desc->status & IRQ_GUEST) &&
+ is_hvm_domain(desc->msi_desc->dev->domain);
+
+ msi_set_mask_bit(desc, 0, guest_masked);
return 0;
}
+static void shutdown_msi_irq(struct irq_desc *desc)
+{
+ msi_set_mask_bit(desc, 1, 1);
+}
+
void ack_nonmaskable_msi_irq(struct irq_desc *desc)
{
irq_complete_move(desc);
@@ -443,7 +458,7 @@ void end_nonmaskable_msi_irq(struct irq_
static hw_irq_controller pci_msi_maskable = {
.typename = "PCI-MSI/-X",
.startup = startup_msi_irq,
- .shutdown = mask_msi_irq,
+ .shutdown = shutdown_msi_irq,
.enable = unmask_msi_irq,
.disable = mask_msi_irq,
.ack = ack_maskable_msi_irq,
@@ -591,7 +606,8 @@ static int msi_capability_init(struct pc
entry[i].msi_attrib.is_64 = is_64bit_address(control);
entry[i].msi_attrib.entry_nr = i;
entry[i].msi_attrib.maskbit = is_mask_bit_support(control);
- entry[i].msi_attrib.masked = 1;
+ entry[i].msi_attrib.host_masked = 1;
+ entry[i].msi_attrib.guest_masked = 0;
entry[i].msi_attrib.pos = pos;
if ( entry[i].msi_attrib.maskbit )
entry[i].msi.mpos = mpos;
@@ -817,7 +833,8 @@ static int msix_capability_init(struct p
entry->msi_attrib.is_64 = 1;
entry->msi_attrib.entry_nr = msi->entry_nr;
entry->msi_attrib.maskbit = 1;
- entry->msi_attrib.masked = 1;
+ entry->msi_attrib.host_masked = 1;
+ entry->msi_attrib.guest_masked = 1;
entry->msi_attrib.pos = pos;
entry->irq = msi->irq;
entry->dev = dev;
@@ -1152,7 +1169,8 @@ int pci_restore_msi_state(struct pci_dev
for ( i = 0; ; )
{
- msi_set_mask_bit(desc, entry[i].msi_attrib.masked);
+ msi_set_mask_bit(desc, entry[i].msi_attrib.host_masked,
+ entry[i].msi_attrib.guest_masked);
if ( !--nr )
break;
@@ -1304,7 +1322,7 @@ static void dump_msi(unsigned char key)
else
mask = '?';
printk(" %-6s%4u vec=%02x%7s%6s%3sassert%5s%7s"
- " dest=%08x mask=%d/%d/%c\n",
+ " dest=%08x mask=%d/%c%c/%c\n",
type, irq,
(data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT,
data & MSI_DATA_DELIVERY_LOWPRI ? "lowest" : "fixed",
@@ -1312,7 +1330,10 @@ static void dump_msi(unsigned char key)
data & MSI_DATA_LEVEL_ASSERT ? "" : "de",
addr & MSI_ADDR_DESTMODE_LOGIC ? "log" : "phys",
addr & MSI_ADDR_REDIRECTION_LOWPRI ? "lowest" : "cpu",
- dest32, attr.maskbit, attr.masked, mask);
+ dest32, attr.maskbit,
+ attr.host_masked ? 'H' : ' ',
+ attr.guest_masked ? 'G' : ' ',
+ mask);
}
}
--- sle12sp1.orig/xen/drivers/passthrough/amd/iommu_init.c 2015-01-14 18:44:18.000000000 +0100
+++ sle12sp1/xen/drivers/passthrough/amd/iommu_init.c 2015-07-08 00:00:00.000000000 +0200
@@ -451,7 +451,7 @@ static void iommu_msi_unmask(struct irq_
spin_lock_irqsave(&iommu->lock, flags);
amd_iommu_msi_enable(iommu, IOMMU_CONTROL_ENABLED);
spin_unlock_irqrestore(&iommu->lock, flags);
- iommu->msi.msi_attrib.masked = 0;
+ iommu->msi.msi_attrib.host_masked = 0;
}
static void iommu_msi_mask(struct irq_desc *desc)
@@ -464,7 +464,7 @@ static void iommu_msi_mask(struct irq_de
spin_lock_irqsave(&iommu->lock, flags);
amd_iommu_msi_enable(iommu, IOMMU_CONTROL_DISABLED);
spin_unlock_irqrestore(&iommu->lock, flags);
- iommu->msi.msi_attrib.masked = 1;
+ iommu->msi.msi_attrib.host_masked = 1;
}
static unsigned int iommu_msi_startup(struct irq_desc *desc)
--- sle12sp1.orig/xen/drivers/passthrough/vtd/iommu.c 2015-05-19 23:16:48.000000000 +0200
+++ sle12sp1/xen/drivers/passthrough/vtd/iommu.c 2015-07-08 00:00:00.000000000 +0200
@@ -996,7 +996,7 @@ static void dma_msi_unmask(struct irq_de
spin_lock_irqsave(&iommu->register_lock, flags);
dmar_writel(iommu->reg, DMAR_FECTL_REG, 0);
spin_unlock_irqrestore(&iommu->register_lock, flags);
- iommu->msi.msi_attrib.masked = 0;
+ iommu->msi.msi_attrib.host_masked = 0;
}
static void dma_msi_mask(struct irq_desc *desc)
@@ -1008,7 +1008,7 @@ static void dma_msi_mask(struct irq_desc
spin_lock_irqsave(&iommu->register_lock, flags);
dmar_writel(iommu->reg, DMAR_FECTL_REG, DMA_FECTL_IM);
spin_unlock_irqrestore(&iommu->register_lock, flags);
- iommu->msi.msi_attrib.masked = 1;
+ iommu->msi.msi_attrib.host_masked = 1;
}
static unsigned int dma_msi_startup(struct irq_desc *desc)
--- sle12sp1.orig/xen/include/asm-x86/msi.h 2015-01-14 18:44:18.000000000 +0100
+++ sle12sp1/xen/include/asm-x86/msi.h 2015-07-08 00:00:00.000000000 +0200
@@ -90,12 +90,13 @@ extern unsigned int pci_msix_get_table_l
struct msi_desc {
struct msi_attrib {
- __u8 type : 5; /* {0: unused, 5h:MSI, 11h:MSI-X} */
- __u8 maskbit : 1; /* mask-pending bit supported ? */
- __u8 masked : 1;
+ __u8 type; /* {0: unused, 5h:MSI, 11h:MSI-X} */
+ __u8 pos; /* Location of the MSI capability */
+ __u8 maskbit : 1; /* mask/pending bit supported ? */
__u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */
- __u8 pos; /* Location of the msi capability */
- __u16 entry_nr; /* specific enabled entry */
+ __u8 host_masked : 1;
+ __u8 guest_masked : 1;
+ __u16 entry_nr; /* specific enabled entry */
} msi_attrib;
struct list_head list;
@@ -236,6 +237,7 @@ void msi_compose_msg(unsigned vector, co
void __msi_set_enable(u16 seg, u8 bus, u8 slot, u8 func, int pos, int enable);
void mask_msi_irq(struct irq_desc *);
void unmask_msi_irq(struct irq_desc *);
+void guest_mask_msi_irq(struct irq_desc *, bool_t mask);
void ack_nonmaskable_msi_irq(struct irq_desc *);
void end_nonmaskable_msi_irq(struct irq_desc *, u8 vector);
void set_msi_affinity(struct irq_desc *, const cpumask_t *);