8724a18868
config handling stack overflow 55a62eb0-xl-correct-handling-of-extra_config-in-main_cpupoolcreate.patch - bsc#907514 - Bus fatal error & sles12 sudden reboot has been observed - bsc#910258 - SLES12 Xen host crashes with FATAL NMI after shutdown of guest with VT-d NIC - bsc#918984 - Bus fatal error & sles11-SP4 sudden reboot has been observed - bsc#923967 - Partner-L3: Bus fatal error & sles11-SP3 sudden reboot has been observed 552d293b-x86-vMSI-X-honor-all-mask-requests.patch 552d2966-x86-vMSI-X-add-valid-bits-for-read-acceleration.patch 5576f143-x86-adjust-PV-I-O-emulation-functions-types.patch 55795a52-x86-vMSI-X-support-qword-MMIO-access.patch 5583d9c5-x86-MSI-X-cleanup.patch 5583da09-x86-MSI-track-host-and-guest-masking-separately.patch 55b0a218-x86-PCI-CFG-write-intercept.patch 55b0a255-x86-MSI-X-maskall.patch 55b0a283-x86-MSI-X-teardown.patch 55b0a2ab-x86-MSI-X-enable.patch 55b0a2db-x86-MSI-track-guest-masking.patch - Upstream patches from Jan 552d0f49-x86-traps-identify-the-vcpu-in-context-when-dumping-regs.patch 559bc633-x86-cpupool-clear-proper-cpu_valid-bit-on-CPU-teardown.patch 559bc64e-credit1-properly-deal-with-CPUs-not-in-any-pool.patch 559bc87f-x86-hvmloader-avoid-data-corruption-with-xenstore-rw.patch 55a66a1e-make-rangeset_report_ranges-report-all-ranges.patch 55a77e4f-dmar-device-scope-mem-leak-fix.patch OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=373
69 lines
2.0 KiB
Diff
69 lines
2.0 KiB
Diff
# Commit 2bfc9fc52ce8485fa43e79bbdc32360c74e12fe8
|
|
# Date 2015-05-08 10:59:26 +0200
|
|
# Author David Vrabel <david.vrabel@citrix.com>
|
|
# Committer Jan Beulich <jbeulich@suse.com>
|
|
x86: provide arch_fetch_and_add()
|
|
|
|
arch_fetch_and_add() atomically adds a value and returns the previous
|
|
value.
|
|
|
|
This is needed to implement ticket locks.
|
|
|
|
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
|
|
|
|
--- a/xen/include/asm-x86/system.h
|
|
+++ b/xen/include/asm-x86/system.h
|
|
@@ -118,6 +118,52 @@ static always_inline unsigned long __cmp
|
|
})
|
|
|
|
/*
|
|
+ * Undefined symbol to cause link failure if a wrong size is used with
|
|
+ * arch_fetch_and_add().
|
|
+ */
|
|
+extern unsigned long __bad_fetch_and_add_size(void);
|
|
+
|
|
+static always_inline unsigned long __xadd(
|
|
+ volatile void *ptr, unsigned long v, int size)
|
|
+{
|
|
+ switch ( size )
|
|
+ {
|
|
+ case 1:
|
|
+ asm volatile ( "lock; xaddb %b0,%1"
|
|
+ : "+r" (v), "+m" (*__xg(ptr))
|
|
+ :: "memory");
|
|
+ return v;
|
|
+ case 2:
|
|
+ asm volatile ( "lock; xaddw %w0,%1"
|
|
+ : "+r" (v), "+m" (*__xg(ptr))
|
|
+ :: "memory");
|
|
+ return v;
|
|
+ case 4:
|
|
+ asm volatile ( "lock; xaddl %k0,%1"
|
|
+ : "+r" (v), "+m" (*__xg(ptr))
|
|
+ :: "memory");
|
|
+ return v;
|
|
+ case 8:
|
|
+ asm volatile ( "lock; xaddq %q0,%1"
|
|
+ : "+r" (v), "+m" (*__xg(ptr))
|
|
+ :: "memory");
|
|
+
|
|
+ return v;
|
|
+ default:
|
|
+ return __bad_fetch_and_add_size();
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Atomically add @v to the 1, 2, 4, or 8 byte value at @ptr. Returns
|
|
+ * the previous value.
|
|
+ *
|
|
+ * This is a full memory barrier.
|
|
+ */
|
|
+#define arch_fetch_and_add(ptr, v) \
|
|
+ ((typeof(*(ptr)))__xadd(ptr, (typeof(*(ptr)))(v), sizeof(*(ptr))))
|
|
+
|
|
+/*
|
|
* Both Intel and AMD agree that, from a programmer's viewpoint:
|
|
* Loads cannot be reordered relative to other loads.
|
|
* Stores cannot be reordered relative to other stores.
|