xen/55534b0a-x86-provide-add_sized.patch

66 lines
2.7 KiB
Diff
Raw Normal View History

- bnc#935634 - VUL-0: CVE-2015-3259: xen: XSA-137: xl command line config handling stack overflow CVE-2015-3259-xsa137.patch - Upstream patches from Jan 558bfaa0-x86-traps-avoid-using-current-too-early.patch 5592a116-nested-EPT-fix-the-handling-of-nested-EPT.patch 559b9dd6-x86-p2m-ept-don-t-unmap-in-use-EPT-pagetable.patch 559bdde5-pull-in-latest-linux-earlycpio.patch - Upstream patches from Jan pending review 552d0fd2-x86-hvm-don-t-include-asm-spinlock-h.patch 552d0fe8-x86-mtrr-include-asm-atomic.h.patch 552d293b-x86-vMSI-X-honor-all-mask-requests.patch 552d2966-x86-vMSI-X-add-valid-bits-for-read-acceleration.patch 554c7aee-x86-provide-arch_fetch_and_add.patch 554c7b00-arm-provide-arch_fetch_and_add.patch 55534b0a-x86-provide-add_sized.patch 55534b25-arm-provide-add_sized.patch 5555a4f8-use-ticket-locks-for-spin-locks.patch 5555a5b9-x86-arm-remove-asm-spinlock-h.patch 5555a8ec-introduce-non-contiguous-allocation.patch 55795a52-x86-vMSI-X-support-qword-MMIO-access.patch 557eb55f-gnttab-per-active-entry-locking.patch 557eb5b6-gnttab-introduce-maptrack-lock.patch 557eb620-gnttab-make-the-grant-table-lock-a-read-write-lock.patch 557ffab8-evtchn-factor-out-freeing-an-event-channel.patch 5582bf43-evtchn-simplify-port_is_valid.patch 5582bf81-evtchn-remove-the-locking-when-unmasking-an-event-channel.patch 5583d9c5-x86-MSI-X-cleanup.patch 5583da09-x86-MSI-track-host-and-guest-masking-separately.patch 5583da64-gnttab-use-per-VCPU-maptrack-free-lists.patch OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=369
2015-07-10 17:21:29 +02:00
# Commit 3c694aec08dda782d9c866e599b848dff86f474f
# Date 2015-05-13 15:00:58 +0200
# Author David Vrabel <david.vrabel@citrix.com>
# Committer Jan Beulich <jbeulich@suse.com>
x86: provide add_sized()
add_sized(ptr, inc) adds inc to the value at ptr using only the correct
size of loads and stores for the type of *ptr. The add is /not/ atomic.
This is needed for ticket locks to ensure the increment of the head ticket
does not affect the tail ticket.
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
2015-08-27 00:28:15 +02:00
--- a/xen/include/asm-x86/atomic.h
+++ b/xen/include/asm-x86/atomic.h
- bnc#935634 - VUL-0: CVE-2015-3259: xen: XSA-137: xl command line config handling stack overflow CVE-2015-3259-xsa137.patch - Upstream patches from Jan 558bfaa0-x86-traps-avoid-using-current-too-early.patch 5592a116-nested-EPT-fix-the-handling-of-nested-EPT.patch 559b9dd6-x86-p2m-ept-don-t-unmap-in-use-EPT-pagetable.patch 559bdde5-pull-in-latest-linux-earlycpio.patch - Upstream patches from Jan pending review 552d0fd2-x86-hvm-don-t-include-asm-spinlock-h.patch 552d0fe8-x86-mtrr-include-asm-atomic.h.patch 552d293b-x86-vMSI-X-honor-all-mask-requests.patch 552d2966-x86-vMSI-X-add-valid-bits-for-read-acceleration.patch 554c7aee-x86-provide-arch_fetch_and_add.patch 554c7b00-arm-provide-arch_fetch_and_add.patch 55534b0a-x86-provide-add_sized.patch 55534b25-arm-provide-add_sized.patch 5555a4f8-use-ticket-locks-for-spin-locks.patch 5555a5b9-x86-arm-remove-asm-spinlock-h.patch 5555a8ec-introduce-non-contiguous-allocation.patch 55795a52-x86-vMSI-X-support-qword-MMIO-access.patch 557eb55f-gnttab-per-active-entry-locking.patch 557eb5b6-gnttab-introduce-maptrack-lock.patch 557eb620-gnttab-make-the-grant-table-lock-a-read-write-lock.patch 557ffab8-evtchn-factor-out-freeing-an-event-channel.patch 5582bf43-evtchn-simplify-port_is_valid.patch 5582bf81-evtchn-remove-the-locking-when-unmasking-an-event-channel.patch 5583d9c5-x86-MSI-X-cleanup.patch 5583da09-x86-MSI-track-host-and-guest-masking-separately.patch 5583da64-gnttab-use-per-VCPU-maptrack-free-lists.patch OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=369
2015-07-10 17:21:29 +02:00
@@ -14,6 +14,14 @@ static inline void name(volatile type *a
{ asm volatile("mov" size " %1,%0": "=m" (*(volatile type *)addr) \
:reg (val) barrier); }
+#define build_add_sized(name, size, type, reg) \
+ static inline void name(volatile type *addr, type val) \
+ { \
+ asm volatile("add" size " %1,%0" \
+ : "=m" (*addr) \
+ : reg (val)); \
+ }
+
build_read_atomic(read_u8_atomic, "b", uint8_t, "=q", )
build_read_atomic(read_u16_atomic, "w", uint16_t, "=r", )
build_read_atomic(read_u32_atomic, "l", uint32_t, "=r", )
@@ -25,8 +33,14 @@ build_write_atomic(write_u32_atomic, "l"
build_read_atomic(read_u64_atomic, "q", uint64_t, "=r", )
build_write_atomic(write_u64_atomic, "q", uint64_t, "r", )
+build_add_sized(add_u8_sized, "b", uint8_t, "qi")
+build_add_sized(add_u16_sized, "w", uint16_t, "ri")
+build_add_sized(add_u32_sized, "l", uint32_t, "ri")
+build_add_sized(add_u64_sized, "q", uint64_t, "ri")
+
#undef build_read_atomic
#undef build_write_atomic
+#undef build_add_sized
void __bad_atomic_size(void);
@@ -54,6 +68,18 @@ void __bad_atomic_size(void);
__x; \
})
+#define add_sized(p, x) ({ \
+ typeof(*(p)) x_ = (x); \
+ switch ( sizeof(*(p)) ) \
+ { \
+ case 1: add_u8_sized((uint8_t *)(p), x_); break; \
+ case 2: add_u16_sized((uint16_t *)(p), x_); break; \
+ case 4: add_u32_sized((uint32_t *)(p), x_); break; \
+ case 8: add_u64_sized((uint64_t *)(p), x_); break; \
+ default: __bad_atomic_size(); break; \
+ } \
+})
+
/*
* NB. I've pushed the volatile qualifier into the operations. This allows
* fast accessors such as _atomic_read() and _atomic_set() which don't give