xen/554c7aee-x86-provide-arch_fetch_and_add.patch

69 lines
2.0 KiB
Diff
Raw Normal View History

- bnc#935634 - VUL-0: CVE-2015-3259: xen: XSA-137: xl command line config handling stack overflow CVE-2015-3259-xsa137.patch - Upstream patches from Jan 558bfaa0-x86-traps-avoid-using-current-too-early.patch 5592a116-nested-EPT-fix-the-handling-of-nested-EPT.patch 559b9dd6-x86-p2m-ept-don-t-unmap-in-use-EPT-pagetable.patch 559bdde5-pull-in-latest-linux-earlycpio.patch - Upstream patches from Jan pending review 552d0fd2-x86-hvm-don-t-include-asm-spinlock-h.patch 552d0fe8-x86-mtrr-include-asm-atomic.h.patch 552d293b-x86-vMSI-X-honor-all-mask-requests.patch 552d2966-x86-vMSI-X-add-valid-bits-for-read-acceleration.patch 554c7aee-x86-provide-arch_fetch_and_add.patch 554c7b00-arm-provide-arch_fetch_and_add.patch 55534b0a-x86-provide-add_sized.patch 55534b25-arm-provide-add_sized.patch 5555a4f8-use-ticket-locks-for-spin-locks.patch 5555a5b9-x86-arm-remove-asm-spinlock-h.patch 5555a8ec-introduce-non-contiguous-allocation.patch 55795a52-x86-vMSI-X-support-qword-MMIO-access.patch 557eb55f-gnttab-per-active-entry-locking.patch 557eb5b6-gnttab-introduce-maptrack-lock.patch 557eb620-gnttab-make-the-grant-table-lock-a-read-write-lock.patch 557ffab8-evtchn-factor-out-freeing-an-event-channel.patch 5582bf43-evtchn-simplify-port_is_valid.patch 5582bf81-evtchn-remove-the-locking-when-unmasking-an-event-channel.patch 5583d9c5-x86-MSI-X-cleanup.patch 5583da09-x86-MSI-track-host-and-guest-masking-separately.patch 5583da64-gnttab-use-per-VCPU-maptrack-free-lists.patch OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=369
2015-07-10 17:21:29 +02:00
# Commit 2bfc9fc52ce8485fa43e79bbdc32360c74e12fe8
# Date 2015-05-08 10:59:26 +0200
# Author David Vrabel <david.vrabel@citrix.com>
# Committer Jan Beulich <jbeulich@suse.com>
x86: provide arch_fetch_and_add()
arch_fetch_and_add() atomically adds a value and returns the previous
value.
This is needed to implement ticket locks.
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
2015-08-27 00:28:15 +02:00
--- a/xen/include/asm-x86/system.h
+++ b/xen/include/asm-x86/system.h
- bnc#935634 - VUL-0: CVE-2015-3259: xen: XSA-137: xl command line config handling stack overflow CVE-2015-3259-xsa137.patch - Upstream patches from Jan 558bfaa0-x86-traps-avoid-using-current-too-early.patch 5592a116-nested-EPT-fix-the-handling-of-nested-EPT.patch 559b9dd6-x86-p2m-ept-don-t-unmap-in-use-EPT-pagetable.patch 559bdde5-pull-in-latest-linux-earlycpio.patch - Upstream patches from Jan pending review 552d0fd2-x86-hvm-don-t-include-asm-spinlock-h.patch 552d0fe8-x86-mtrr-include-asm-atomic.h.patch 552d293b-x86-vMSI-X-honor-all-mask-requests.patch 552d2966-x86-vMSI-X-add-valid-bits-for-read-acceleration.patch 554c7aee-x86-provide-arch_fetch_and_add.patch 554c7b00-arm-provide-arch_fetch_and_add.patch 55534b0a-x86-provide-add_sized.patch 55534b25-arm-provide-add_sized.patch 5555a4f8-use-ticket-locks-for-spin-locks.patch 5555a5b9-x86-arm-remove-asm-spinlock-h.patch 5555a8ec-introduce-non-contiguous-allocation.patch 55795a52-x86-vMSI-X-support-qword-MMIO-access.patch 557eb55f-gnttab-per-active-entry-locking.patch 557eb5b6-gnttab-introduce-maptrack-lock.patch 557eb620-gnttab-make-the-grant-table-lock-a-read-write-lock.patch 557ffab8-evtchn-factor-out-freeing-an-event-channel.patch 5582bf43-evtchn-simplify-port_is_valid.patch 5582bf81-evtchn-remove-the-locking-when-unmasking-an-event-channel.patch 5583d9c5-x86-MSI-X-cleanup.patch 5583da09-x86-MSI-track-host-and-guest-masking-separately.patch 5583da64-gnttab-use-per-VCPU-maptrack-free-lists.patch OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=369
2015-07-10 17:21:29 +02:00
@@ -118,6 +118,52 @@ static always_inline unsigned long __cmp
})
/*
+ * Undefined symbol to cause link failure if a wrong size is used with
+ * arch_fetch_and_add().
+ */
+extern unsigned long __bad_fetch_and_add_size(void);
+
+static always_inline unsigned long __xadd(
+ volatile void *ptr, unsigned long v, int size)
+{
+ switch ( size )
+ {
+ case 1:
+ asm volatile ( "lock; xaddb %b0,%1"
+ : "+r" (v), "+m" (*__xg(ptr))
+ :: "memory");
+ return v;
+ case 2:
+ asm volatile ( "lock; xaddw %w0,%1"
+ : "+r" (v), "+m" (*__xg(ptr))
+ :: "memory");
+ return v;
+ case 4:
+ asm volatile ( "lock; xaddl %k0,%1"
+ : "+r" (v), "+m" (*__xg(ptr))
+ :: "memory");
+ return v;
+ case 8:
+ asm volatile ( "lock; xaddq %q0,%1"
+ : "+r" (v), "+m" (*__xg(ptr))
+ :: "memory");
+
+ return v;
+ default:
+ return __bad_fetch_and_add_size();
+ }
+}
+
+/*
+ * Atomically add @v to the 1, 2, 4, or 8 byte value at @ptr. Returns
+ * the previous value.
+ *
+ * This is a full memory barrier.
+ */
+#define arch_fetch_and_add(ptr, v) \
+ ((typeof(*(ptr)))__xadd(ptr, (typeof(*(ptr)))(v), sizeof(*(ptr))))
+
+/*
* Both Intel and AMD agree that, from a programmer's viewpoint:
* Loads cannot be reordered relative to other loads.
* Stores cannot be reordered relative to other stores.