libatomic_ops/libatomic_ops-aarch64.patch

1382 lines
45 KiB
Diff
Raw Normal View History

From: Andreas Schwab <schwab@suse.de>
Date: Sat May 18 08:10:09 UTC 2013
"libatomic_ops-aarch64.patch: aarch64 support for libatomic_ops from
upstream add-aarch64-support branch"
Index: libatomic_ops/src/atomic_ops.h
===================================================================
--- libatomic_ops/src/atomic_ops.h.orig
+++ libatomic_ops/src/atomic_ops.h
@@ -244,6 +244,10 @@
|| defined(__powerpc64__) || defined(__ppc64__)
# include "atomic_ops/sysdeps/gcc/powerpc.h"
# endif /* __powerpc__ */
+# if defined(__aarch64__)
+# include "atomic_ops/sysdeps/gcc/aarch64.h"
+# define AO_CAN_EMUL_CAS
+# endif /* __aarch64__ */
# if defined(__arm__) && !defined(AO_USE_PTHREAD_DEFS)
# include "atomic_ops/sysdeps/gcc/arm.h"
# define AO_CAN_EMUL_CAS
Index: libatomic_ops/src/atomic_ops/sysdeps/Makefile.am
===================================================================
--- libatomic_ops/src/atomic_ops/sysdeps/Makefile.am.orig
+++ libatomic_ops/src/atomic_ops/sysdeps/Makefile.am
@@ -30,6 +30,8 @@ nobase_sysdep_HEADERS= generic_pthread.h
gcc/hexagon.h gcc/hppa.h gcc/ia64.h gcc/m68k.h \
gcc/mips.h gcc/powerpc.h gcc/s390.h \
gcc/sh.h gcc/sparc.h gcc/x86.h gcc/x86_64.h \
+ gcc/aarch64.h gcc/generic.h \
+ gcc/generic-small.h gcc/generic-arithm.h \
\
hpc/hppa.h hpc/ia64.h \
\
Index: libatomic_ops/src/atomic_ops/sysdeps/gcc/aarch64.h
===================================================================
--- /dev/null
+++ libatomic_ops/src/atomic_ops/sysdeps/gcc/aarch64.h
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+#include "../test_and_set_t_is_ao_t.h"
+
+#include "../standard_ao_double_t.h"
+
+#ifndef AO_UNIPROCESSOR
+ AO_INLINE void
+ AO_nop_write(void)
+ {
+ __asm__ __volatile__("dmb st" : : : "memory");
+ }
+# define AO_HAVE_nop_write
+#endif
+
+/* TODO: Adjust version check on fixing double-wide AO support in GCC. */
+#if __GNUC__ == 4
+
+ AO_INLINE AO_double_t
+ AO_double_load(const volatile AO_double_t *addr)
+ {
+ AO_double_t result;
+ int status;
+
+ /* Note that STXP cannot be discarded because LD[A]XP is not */
+ /* single-copy atomic (unlike LDREXD for 32-bit ARM). */
+ do {
+ __asm__ __volatile__("//AO_double_load\n"
+ " ldxp %0, %1, %3\n"
+ " stxp %w2, %0, %1, %3"
+ : "=&r" (result.AO_val1), "=&r" (result.AO_val2), "=&r" (status)
+ : "Q" (*addr));
+ } while (status);
+ return result;
+ }
+# define AO_HAVE_double_load
+
+ AO_INLINE AO_double_t
+ AO_double_load_acquire(const volatile AO_double_t *addr)
+ {
+ AO_double_t result;
+ int status;
+
+ do {
+ __asm__ __volatile__("//AO_double_load_acquire\n"
+ " ldaxp %0, %1, %3\n"
+ " stxp %w2, %0, %1, %3"
+ : "=&r" (result.AO_val1), "=&r" (result.AO_val2), "=&r" (status)
+ : "Q" (*addr));
+ } while (status);
+ return result;
+ }
+# define AO_HAVE_double_load_acquire
+
+ AO_INLINE void
+ AO_double_store(volatile AO_double_t *addr, AO_double_t value)
+ {
+ AO_double_t old_val;
+ int status;
+
+ do {
+ __asm__ __volatile__("//AO_double_store\n"
+ " ldxp %0, %1, %3\n"
+ " stxp %w2, %4, %5, %3"
+ : "=&r" (old_val.AO_val1), "=&r" (old_val.AO_val2), "=&r" (status),
+ "=Q" (*addr)
+ : "r" (value.AO_val1), "r" (value.AO_val2));
+ /* Compared to the arm.h implementation, the 'cc' (flags) are not */
+ /* clobbered because A64 has no concept of conditional execution. */
+ } while (status);
+ }
+# define AO_HAVE_double_store
+
+ AO_INLINE void
+ AO_double_store_release(volatile AO_double_t *addr, AO_double_t value)
+ {
+ AO_double_t old_val;
+ int status;
+
+ do {
+ __asm__ __volatile__("//AO_double_store_release\n"
+ " ldxp %0, %1, %3\n"
+ " stlxp %w2, %4, %5, %3"
+ : "=&r" (old_val.AO_val1), "=&r" (old_val.AO_val2), "=&r" (status),
+ "=Q" (*addr)
+ : "r" (value.AO_val1), "r" (value.AO_val2));
+ } while (status);
+ }
+# define AO_HAVE_double_store_release
+
+ AO_INLINE int
+ AO_double_compare_and_swap(volatile AO_double_t *addr,
+ AO_double_t old_val, AO_double_t new_val)
+ {
+ AO_double_t tmp;
+ int result = 1;
+
+ do {
+ __asm__ __volatile__("//AO_double_compare_and_swap\n"
+ " ldxp %0, %1, %2\n"
+ : "=&r" (tmp.AO_val1), "=&r" (tmp.AO_val2)
+ : "Q" (*addr));
+ if (tmp.AO_val1 != old_val.AO_val1 || tmp.AO_val2 != old_val.AO_val2)
+ break;
+ __asm__ __volatile__(
+ " stxp %w0, %2, %3, %1\n"
+ : "=&r" (result), "=Q" (*addr)
+ : "r" (new_val.AO_val1), "r" (new_val.AO_val2));
+ } while (result);
+ return !result;
+ }
+# define AO_HAVE_double_compare_and_swap
+
+ AO_INLINE int
+ AO_double_compare_and_swap_acquire(volatile AO_double_t *addr,
+ AO_double_t old_val, AO_double_t new_val)
+ {
+ AO_double_t tmp;
+ int result = 1;
+
+ do {
+ __asm__ __volatile__("//AO_double_compare_and_swap_acquire\n"
+ " ldaxp %0, %1, %2\n"
+ : "=&r" (tmp.AO_val1), "=&r" (tmp.AO_val2)
+ : "Q" (*addr));
+ if (tmp.AO_val1 != old_val.AO_val1 || tmp.AO_val2 != old_val.AO_val2)
+ break;
+ __asm__ __volatile__(
+ " stxp %w0, %2, %3, %1\n"
+ : "=&r" (result), "=Q" (*addr)
+ : "r" (new_val.AO_val1), "r" (new_val.AO_val2));
+ } while (result);
+ return !result;
+ }
+# define AO_HAVE_double_compare_and_swap_acquire
+
+ AO_INLINE int
+ AO_double_compare_and_swap_release(volatile AO_double_t *addr,
+ AO_double_t old_val, AO_double_t new_val)
+ {
+ AO_double_t tmp;
+ int result = 1;
+
+ do {
+ __asm__ __volatile__("//AO_double_compare_and_swap_release\n"
+ " ldxp %0, %1, %2\n"
+ : "=&r" (tmp.AO_val1), "=&r" (tmp.AO_val2)
+ : "Q" (*addr));
+ if (tmp.AO_val1 != old_val.AO_val1 || tmp.AO_val2 != old_val.AO_val2)
+ break;
+ __asm__ __volatile__(
+ " stlxp %w0, %2, %3, %1\n"
+ : "=&r" (result), "=Q" (*addr)
+ : "r" (new_val.AO_val1), "r" (new_val.AO_val2));
+ } while (result);
+ return !result;
+ }
+# define AO_HAVE_double_compare_and_swap_release
+#endif
+
+#include "generic.h"
Index: libatomic_ops/src/atomic_ops/sysdeps/gcc/generic-arithm.h
===================================================================
--- /dev/null
+++ libatomic_ops/src/atomic_ops/sysdeps/gcc/generic-arithm.h
@@ -0,0 +1,704 @@
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned/**/char
+AO_char_fetch_and_add(volatile unsigned/**/char *addr, unsigned/**/char incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_char_fetch_and_add
+
+AO_INLINE void
+AO_char_and(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_char_and
+
+AO_INLINE void
+AO_char_or(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_char_or
+
+AO_INLINE void
+AO_char_xor(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_char_xor
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned/**/short
+AO_short_fetch_and_add(volatile unsigned/**/short *addr, unsigned/**/short incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_short_fetch_and_add
+
+AO_INLINE void
+AO_short_and(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_short_and
+
+AO_INLINE void
+AO_short_or(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_short_or
+
+AO_INLINE void
+AO_short_xor(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_short_xor
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned
+AO_int_fetch_and_add(volatile unsigned *addr, unsigned incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_int_fetch_and_add
+
+AO_INLINE void
+AO_int_and(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_int_and
+
+AO_INLINE void
+AO_int_or(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_int_or
+
+AO_INLINE void
+AO_int_xor(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_int_xor
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE AO_t
+AO_fetch_and_add(volatile AO_t *addr, AO_t incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_fetch_and_add
+
+AO_INLINE void
+AO_and(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_and
+
+AO_INLINE void
+AO_or(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_or
+
+AO_INLINE void
+AO_xor(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_xor
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned/**/char
+AO_char_fetch_and_add_acquire(volatile unsigned/**/char *addr, unsigned/**/char incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_char_fetch_and_add_acquire
+
+AO_INLINE void
+AO_char_and_acquire(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_char_and_acquire
+
+AO_INLINE void
+AO_char_or_acquire(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_char_or_acquire
+
+AO_INLINE void
+AO_char_xor_acquire(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_char_xor_acquire
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned/**/short
+AO_short_fetch_and_add_acquire(volatile unsigned/**/short *addr, unsigned/**/short incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_short_fetch_and_add_acquire
+
+AO_INLINE void
+AO_short_and_acquire(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_short_and_acquire
+
+AO_INLINE void
+AO_short_or_acquire(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_short_or_acquire
+
+AO_INLINE void
+AO_short_xor_acquire(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_short_xor_acquire
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned
+AO_int_fetch_and_add_acquire(volatile unsigned *addr, unsigned incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_int_fetch_and_add_acquire
+
+AO_INLINE void
+AO_int_and_acquire(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_int_and_acquire
+
+AO_INLINE void
+AO_int_or_acquire(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_int_or_acquire
+
+AO_INLINE void
+AO_int_xor_acquire(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_int_xor_acquire
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE AO_t
+AO_fetch_and_add_acquire(volatile AO_t *addr, AO_t incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_fetch_and_add_acquire
+
+AO_INLINE void
+AO_and_acquire(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_and_acquire
+
+AO_INLINE void
+AO_or_acquire(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_or_acquire
+
+AO_INLINE void
+AO_xor_acquire(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_xor_acquire
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned/**/char
+AO_char_fetch_and_add_release(volatile unsigned/**/char *addr, unsigned/**/char incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_char_fetch_and_add_release
+
+AO_INLINE void
+AO_char_and_release(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_char_and_release
+
+AO_INLINE void
+AO_char_or_release(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_char_or_release
+
+AO_INLINE void
+AO_char_xor_release(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_char_xor_release
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned/**/short
+AO_short_fetch_and_add_release(volatile unsigned/**/short *addr, unsigned/**/short incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_short_fetch_and_add_release
+
+AO_INLINE void
+AO_short_and_release(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_short_and_release
+
+AO_INLINE void
+AO_short_or_release(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_short_or_release
+
+AO_INLINE void
+AO_short_xor_release(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_short_xor_release
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned
+AO_int_fetch_and_add_release(volatile unsigned *addr, unsigned incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_int_fetch_and_add_release
+
+AO_INLINE void
+AO_int_and_release(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_int_and_release
+
+AO_INLINE void
+AO_int_or_release(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_int_or_release
+
+AO_INLINE void
+AO_int_xor_release(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_int_xor_release
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE AO_t
+AO_fetch_and_add_release(volatile AO_t *addr, AO_t incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_fetch_and_add_release
+
+AO_INLINE void
+AO_and_release(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_and_release
+
+AO_INLINE void
+AO_or_release(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_or_release
+
+AO_INLINE void
+AO_xor_release(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_xor_release
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned/**/char
+AO_char_fetch_and_add_full(volatile unsigned/**/char *addr, unsigned/**/char incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_char_fetch_and_add_full
+
+AO_INLINE void
+AO_char_and_full(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_char_and_full
+
+AO_INLINE void
+AO_char_or_full(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_char_or_full
+
+AO_INLINE void
+AO_char_xor_full(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_char_xor_full
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned/**/short
+AO_short_fetch_and_add_full(volatile unsigned/**/short *addr, unsigned/**/short incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_short_fetch_and_add_full
+
+AO_INLINE void
+AO_short_and_full(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_short_and_full
+
+AO_INLINE void
+AO_short_or_full(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_short_or_full
+
+AO_INLINE void
+AO_short_xor_full(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_short_xor_full
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned
+AO_int_fetch_and_add_full(volatile unsigned *addr, unsigned incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_int_fetch_and_add_full
+
+AO_INLINE void
+AO_int_and_full(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_int_and_full
+
+AO_INLINE void
+AO_int_or_full(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_int_or_full
+
+AO_INLINE void
+AO_int_xor_full(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_int_xor_full
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE AO_t
+AO_fetch_and_add_full(volatile AO_t *addr, AO_t incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_fetch_and_add_full
+
+AO_INLINE void
+AO_and_full(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_and_full
+
+AO_INLINE void
+AO_or_full(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_or_full
+
+AO_INLINE void
+AO_xor_full(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_xor_full
Index: libatomic_ops/src/atomic_ops/sysdeps/gcc/generic-small.h
===================================================================
--- /dev/null
+++ libatomic_ops/src/atomic_ops/sysdeps/gcc/generic-small.h
@@ -0,0 +1,280 @@
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned/**/char
+AO_char_load(const volatile unsigned/**/char *addr)
+{
+ return __atomic_load_n(addr, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_char_load
+
+AO_INLINE unsigned/**/char
+AO_char_load_acquire(const volatile unsigned/**/char *addr)
+{
+ return __atomic_load_n(addr, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_char_load_acquire
+
+/* char_load_full is generalized using load and nop_full, so that */
+/* char_load_read is defined using load and nop_read. */
+/* char_store_full definition is omitted similar to load_full reason. */
+
+AO_INLINE void
+AO_char_store(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ __atomic_store_n(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_char_store
+
+AO_INLINE void
+AO_char_store_release(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ __atomic_store_n(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_char_store_release
+
+AO_INLINE unsigned/**/char
+AO_char_fetch_compare_and_swap(volatile unsigned/**/char *addr,
+ unsigned/**/char old_val, unsigned/**/char new_val)
+{
+ return __sync_val_compare_and_swap(addr, old_val, new_val
+ /* empty protection list */);
+}
+#define AO_HAVE_char_fetch_compare_and_swap
+
+/* TODO: Add CAS _acquire/release/full primitives. */
+
+#ifndef AO_GENERALIZE_ASM_BOOL_CAS
+ AO_INLINE int
+ AO_char_compare_and_swap(volatile unsigned/**/char *addr,
+ unsigned/**/char old_val, unsigned/**/char new_val)
+ {
+ return __sync_bool_compare_and_swap(addr, old_val, new_val
+ /* empty protection list */);
+ }
+# define AO_HAVE_char_compare_and_swap
+#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned/**/short
+AO_short_load(const volatile unsigned/**/short *addr)
+{
+ return __atomic_load_n(addr, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_short_load
+
+AO_INLINE unsigned/**/short
+AO_short_load_acquire(const volatile unsigned/**/short *addr)
+{
+ return __atomic_load_n(addr, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_short_load_acquire
+
+/* short_load_full is generalized using load and nop_full, so that */
+/* short_load_read is defined using load and nop_read. */
+/* short_store_full definition is omitted similar to load_full reason. */
+
+AO_INLINE void
+AO_short_store(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ __atomic_store_n(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_short_store
+
+AO_INLINE void
+AO_short_store_release(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ __atomic_store_n(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_short_store_release
+
+AO_INLINE unsigned/**/short
+AO_short_fetch_compare_and_swap(volatile unsigned/**/short *addr,
+ unsigned/**/short old_val, unsigned/**/short new_val)
+{
+ return __sync_val_compare_and_swap(addr, old_val, new_val
+ /* empty protection list */);
+}
+#define AO_HAVE_short_fetch_compare_and_swap
+
+/* TODO: Add CAS _acquire/release/full primitives. */
+
+#ifndef AO_GENERALIZE_ASM_BOOL_CAS
+ AO_INLINE int
+ AO_short_compare_and_swap(volatile unsigned/**/short *addr,
+ unsigned/**/short old_val, unsigned/**/short new_val)
+ {
+ return __sync_bool_compare_and_swap(addr, old_val, new_val
+ /* empty protection list */);
+ }
+# define AO_HAVE_short_compare_and_swap
+#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned
+AO_int_load(const volatile unsigned *addr)
+{
+ return __atomic_load_n(addr, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_int_load
+
+AO_INLINE unsigned
+AO_int_load_acquire(const volatile unsigned *addr)
+{
+ return __atomic_load_n(addr, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_int_load_acquire
+
+/* int_load_full is generalized using load and nop_full, so that */
+/* int_load_read is defined using load and nop_read. */
+/* int_store_full definition is omitted similar to load_full reason. */
+
+AO_INLINE void
+AO_int_store(volatile unsigned *addr, unsigned value)
+{
+ __atomic_store_n(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_int_store
+
+AO_INLINE void
+AO_int_store_release(volatile unsigned *addr, unsigned value)
+{
+ __atomic_store_n(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_int_store_release
+
+AO_INLINE unsigned
+AO_int_fetch_compare_and_swap(volatile unsigned *addr,
+ unsigned old_val, unsigned new_val)
+{
+ return __sync_val_compare_and_swap(addr, old_val, new_val
+ /* empty protection list */);
+}
+#define AO_HAVE_int_fetch_compare_and_swap
+
+/* TODO: Add CAS _acquire/release/full primitives. */
+
+#ifndef AO_GENERALIZE_ASM_BOOL_CAS
+ AO_INLINE int
+ AO_int_compare_and_swap(volatile unsigned *addr,
+ unsigned old_val, unsigned new_val)
+ {
+ return __sync_bool_compare_and_swap(addr, old_val, new_val
+ /* empty protection list */);
+ }
+# define AO_HAVE_int_compare_and_swap
+#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE AO_t
+AO_load(const volatile AO_t *addr)
+{
+ return __atomic_load_n(addr, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_load
+
+AO_INLINE AO_t
+AO_load_acquire(const volatile AO_t *addr)
+{
+ return __atomic_load_n(addr, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_load_acquire
+
+/* load_full is generalized using load and nop_full, so that */
+/* load_read is defined using load and nop_read. */
+/* store_full definition is omitted similar to load_full reason. */
+
+AO_INLINE void
+AO_store(volatile AO_t *addr, AO_t value)
+{
+ __atomic_store_n(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_store
+
+AO_INLINE void
+AO_store_release(volatile AO_t *addr, AO_t value)
+{
+ __atomic_store_n(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_store_release
+
+AO_INLINE AO_t
+AO_fetch_compare_and_swap(volatile AO_t *addr,
+ AO_t old_val, AO_t new_val)
+{
+ return __sync_val_compare_and_swap(addr, old_val, new_val
+ /* empty protection list */);
+}
+#define AO_HAVE_fetch_compare_and_swap
+
+/* TODO: Add CAS _acquire/release/full primitives. */
+
+#ifndef AO_GENERALIZE_ASM_BOOL_CAS
+ AO_INLINE int
+ AO_compare_and_swap(volatile AO_t *addr,
+ AO_t old_val, AO_t new_val)
+ {
+ return __sync_bool_compare_and_swap(addr, old_val, new_val
+ /* empty protection list */);
+ }
+# define AO_HAVE_compare_and_swap
+#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
Index: libatomic_ops/src/atomic_ops/sysdeps/gcc/generic.h
===================================================================
--- /dev/null
+++ libatomic_ops/src/atomic_ops/sysdeps/gcc/generic.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+/* The following implementation assumes GCC 4.7 or later. */
+/* For the details, see GNU Manual, chapter 6.52 (Built-in functions */
+/* for memory model aware atomic operations). */
+
+/* TODO: Include this file for other targets if gcc 4.7+ */
+
+#ifdef AO_UNIPROCESSOR
+ /* If only a single processor (core) is used, AO_UNIPROCESSOR could */
+ /* be defined by the client to avoid unnecessary memory barrier. */
+ AO_INLINE void
+ AO_nop_full(void)
+ {
+ AO_compiler_barrier();
+ }
+# define AO_HAVE_nop_full
+
+#else
+ AO_INLINE void
+ AO_nop_read(void)
+ {
+ __atomic_thread_fence(__ATOMIC_ACQUIRE);
+ }
+# define AO_HAVE_nop_read
+
+# ifndef AO_HAVE_nop_write
+ AO_INLINE void
+ AO_nop_write(void)
+ {
+ __atomic_thread_fence(__ATOMIC_RELEASE);
+ }
+# define AO_HAVE_nop_write
+# endif
+
+ AO_INLINE void
+ AO_nop_full(void)
+ {
+ /* __sync_synchronize() could be used instead. */
+ __atomic_thread_fence(__ATOMIC_SEQ_CST);
+ }
+# define AO_HAVE_nop_full
+#endif /* !AO_UNIPROCESSOR */
+
+#include "generic-small.h"
+
+#ifndef AO_PREFER_GENERALIZED
+# include "generic-arithm.h"
+
+ AO_INLINE AO_TS_VAL_t
+ AO_test_and_set(volatile AO_TS_t *addr)
+ {
+ return (AO_TS_VAL_t)__atomic_test_and_set(addr, __ATOMIC_RELAXED);
+ }
+# define AO_HAVE_test_and_set
+
+ AO_INLINE AO_TS_VAL_t
+ AO_test_and_set_acquire(volatile AO_TS_t *addr)
+ {
+ return (AO_TS_VAL_t)__atomic_test_and_set(addr, __ATOMIC_ACQUIRE);
+ }
+# define AO_HAVE_test_and_set_acquire
+
+ AO_INLINE AO_TS_VAL_t
+ AO_test_and_set_release(volatile AO_TS_t *addr)
+ {
+ return (AO_TS_VAL_t)__atomic_test_and_set(addr, __ATOMIC_RELEASE);
+ }
+# define AO_HAVE_test_and_set_release
+
+ AO_INLINE AO_TS_VAL_t
+ AO_test_and_set_full(volatile AO_TS_t *addr)
+ {
+ return (AO_TS_VAL_t)__atomic_test_and_set(addr, __ATOMIC_SEQ_CST);
+ }
+# define AO_HAVE_test_and_set_full
+#endif /* !AO_PREFER_GENERALIZED */
+
+#ifdef AO_HAVE_DOUBLE_PTR_STORAGE
+
+# ifndef AO_HAVE_double_load
+ AO_INLINE AO_double_t
+ AO_double_load(const volatile AO_double_t *addr)
+ {
+ AO_double_t result;
+
+ result.AO_whole = __atomic_load_n(&addr->AO_whole, __ATOMIC_RELAXED);
+ return result;
+ }
+# define AO_HAVE_double_load
+# endif
+
+# ifndef AO_HAVE_double_load_acquire
+ AO_INLINE AO_double_t
+ AO_double_load_acquire(const volatile AO_double_t *addr)
+ {
+ AO_double_t result;
+
+ result.AO_whole = __atomic_load_n(&addr->AO_whole, __ATOMIC_ACQUIRE);
+ return result;
+ }
+# define AO_HAVE_double_load_acquire
+# endif
+
+# ifndef AO_HAVE_double_store
+ AO_INLINE void
+ AO_double_store(volatile AO_double_t *addr, AO_double_t value)
+ {
+ __atomic_store_n(&addr->AO_whole, value.AO_whole, __ATOMIC_RELAXED);
+ }
+# define AO_HAVE_double_store
+# endif
+
+# ifndef AO_HAVE_double_store_release
+ AO_INLINE void
+ AO_double_store_release(volatile AO_double_t *addr, AO_double_t value)
+ {
+ __atomic_store_n(&addr->AO_whole, value.AO_whole, __ATOMIC_RELEASE);
+ }
+# define AO_HAVE_double_store_release
+# endif
+
+# ifndef AO_HAVE_double_compare_and_swap
+ AO_INLINE int
+ AO_double_compare_and_swap(volatile AO_double_t *addr,
+ AO_double_t old_val, AO_double_t new_val)
+ {
+ return (int)__atomic_compare_exchange_n(&addr->AO_whole,
+ &old_val.AO_whole /* p_expected */,
+ new_val.AO_whole /* desired */,
+ 0 /* is_weak: false */,
+ __ATOMIC_RELAXED /* success */,
+ __ATOMIC_RELAXED /* failure */);
+ }
+# define AO_HAVE_double_compare_and_swap
+# endif
+
+ /* TODO: Add double CAS _acquire/release/full primitives. */
+#endif /* AO_HAVE_DOUBLE_PTR_STORAGE */
Index: libatomic_ops/src/atomic_ops/sysdeps/standard_ao_double_t.h
===================================================================
--- libatomic_ops/src/atomic_ops/sysdeps/standard_ao_double_t.h.orig
+++ libatomic_ops/src/atomic_ops/sysdeps/standard_ao_double_t.h
@@ -11,6 +11,8 @@
typedef __m128 double_ptr_storage;
#elif defined(_WIN32) && !defined(__GNUC__)
typedef unsigned __int64 double_ptr_storage;
+#elif defined(__aarch64__)
+ typedef unsigned __int128 double_ptr_storage;
#else
typedef unsigned long long double_ptr_storage;
#endif