SHA256
1
0
forked from pool/xen
xen/552d2966-x86-vMSI-X-add-valid-bits-for-read-acceleration.patch
Charles Arnold 763b78040d - bnc#935634 - VUL-0: CVE-2015-3259: xen: XSA-137: xl command line
config handling stack overflow
  CVE-2015-3259-xsa137.patch
- Upstream patches from Jan
  558bfaa0-x86-traps-avoid-using-current-too-early.patch
  5592a116-nested-EPT-fix-the-handling-of-nested-EPT.patch
  559b9dd6-x86-p2m-ept-don-t-unmap-in-use-EPT-pagetable.patch
  559bdde5-pull-in-latest-linux-earlycpio.patch
- Upstream patches from Jan pending review
  552d0fd2-x86-hvm-don-t-include-asm-spinlock-h.patch
  552d0fe8-x86-mtrr-include-asm-atomic.h.patch
  552d293b-x86-vMSI-X-honor-all-mask-requests.patch
  552d2966-x86-vMSI-X-add-valid-bits-for-read-acceleration.patch
  554c7aee-x86-provide-arch_fetch_and_add.patch
  554c7b00-arm-provide-arch_fetch_and_add.patch
  55534b0a-x86-provide-add_sized.patch
  55534b25-arm-provide-add_sized.patch
  5555a4f8-use-ticket-locks-for-spin-locks.patch
  5555a5b9-x86-arm-remove-asm-spinlock-h.patch
  5555a8ec-introduce-non-contiguous-allocation.patch
  55795a52-x86-vMSI-X-support-qword-MMIO-access.patch
  557eb55f-gnttab-per-active-entry-locking.patch
  557eb5b6-gnttab-introduce-maptrack-lock.patch
  557eb620-gnttab-make-the-grant-table-lock-a-read-write-lock.patch
  557ffab8-evtchn-factor-out-freeing-an-event-channel.patch
  5582bf43-evtchn-simplify-port_is_valid.patch
  5582bf81-evtchn-remove-the-locking-when-unmasking-an-event-channel.patch
  5583d9c5-x86-MSI-X-cleanup.patch
  5583da09-x86-MSI-track-host-and-guest-masking-separately.patch
  5583da64-gnttab-use-per-VCPU-maptrack-free-lists.patch

OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=369
2015-07-10 15:21:29 +00:00

57 lines
2.2 KiB
Diff

# Commit df9f5676b3711c95127d44e871ad7ca38d6ed28a
# Date 2015-04-14 16:51:18 +0200
# Author Jan Beulich <jbeulich@suse.com>
# Committer Jan Beulich <jbeulich@suse.com>
x86/vMSI-X: add valid bits for read acceleration
Again because Xen doesn't get to see all guest writes, it shouldn't
serve reads from its cache before having seen a write to the respective
address.
Also use DECLARE_BITMAP() in a related field declaration instead of
open coding it.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
--- sle12sp1.orig/xen/arch/x86/hvm/vmsi.c 2015-04-20 09:30:29.000000000 +0200
+++ sle12sp1/xen/arch/x86/hvm/vmsi.c 2015-04-20 09:32:57.000000000 +0200
@@ -154,11 +154,14 @@ struct msixtbl_entry
struct pci_dev *pdev;
unsigned long gtable; /* gpa of msix table */
unsigned long table_len;
- unsigned long table_flags[BITS_TO_LONGS(MAX_MSIX_TABLE_ENTRIES)];
+ DECLARE_BITMAP(table_flags, MAX_MSIX_TABLE_ENTRIES);
#define MAX_MSIX_ACC_ENTRIES 3
struct {
uint32_t msi_ad[3]; /* Shadow of address low, high and data */
} gentries[MAX_MSIX_ACC_ENTRIES];
+ DECLARE_BITMAP(acc_valid, 3 * MAX_MSIX_ACC_ENTRIES);
+#define acc_bit(what, ent, slot, idx) \
+ what##_bit((slot) * 3 + (idx), (ent)->acc_valid)
struct rcu_head rcu;
};
@@ -233,9 +236,10 @@ static int msixtbl_read(
if ( offset != PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET )
{
nr_entry = (address - entry->gtable) / PCI_MSIX_ENTRY_SIZE;
- if ( nr_entry >= MAX_MSIX_ACC_ENTRIES )
- goto out;
index = offset / sizeof(uint32_t);
+ if ( nr_entry >= MAX_MSIX_ACC_ENTRIES ||
+ !acc_bit(test, entry, nr_entry, index) )
+ goto out;
*pval = entry->gentries[nr_entry].msi_ad[index];
}
else
@@ -281,6 +285,7 @@ static int msixtbl_write(struct vcpu *v,
{
index = offset / sizeof(uint32_t);
entry->gentries[nr_entry].msi_ad[index] = val;
+ acc_bit(set, entry, nr_entry, index);
}
set_bit(nr_entry, &entry->table_flags);
goto out;