Moved the assembler functions from gatomic.h to gatomic.c, which makes for

2004-02-29  Sebastian Wilhelmi  <seppi@seppi.de>

	* configure.in, glib/gatomic.c, glib/gatomic.h: Moved the
	assembler functions from gatomic.h to gatomic.c, which makes for
	better maintainability. Also use gint instead of gint32 to be able
	to use reference counting for ABI-fixed structures with
	gint/guint.

	* glib/gthread.h: Adapted accordingly.

	* tests/atomic-test.c: Updated to test for G_MAXINT and G_MININT.
This commit is contained in:
Sebastian Wilhelmi 2004-02-29 16:48:37 +00:00 committed by Sebastian Wilhelmi
parent fe2efd06a0
commit 769f36db47
4 changed files with 484 additions and 586 deletions

View File

@ -1,7 +1,7 @@
/* GLIB - Library of useful routines for C programming
* Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
*
* GAtomic: atomic integer operation.
* g_atomic_*: atomic operations.
* Copyright (C) 2003 Sebastian Wilhelmi
*
* This library is free software; you can redistribute it and/or
@ -20,67 +20,414 @@
* Boston, MA 02111-1307, USA.
*/
#include <glib.h>
#include "config.h"
#ifdef G_THREADS_ENABLED
# if !defined (G_ATOMIC_USE_FALLBACK_IMPLEMENTATION)
/* We have an inline implementation, which we can now use for the
* fallback implementation. This fallback implementation is only used by
* modules, which are not compliled with gcc
#include "glib.h"
#if defined (__GNUC__)
# if defined (G_ATOMIC_I486)
/* Adapted from CVS version 1.10 of glibc's sysdeps/i386/i486/bits/atomic.h
*/
gint32
g_atomic_int_exchange_and_add_fallback (gint32 *atomic,
gint32 val)
gint
g_atomic_int_exchange_and_add (gint *atomic,
gint val)
{
return g_atomic_int_exchange_and_add (atomic, val);
gint result;
__asm__ __volatile__ ("lock; xaddl %0,%1"
: "=r" (result), "=m" (*atomic)
: "0" (val), "m" (*atomic));
return result;
}
void
g_atomic_int_add_fallback (gint32 *atomic,
gint32 val)
g_atomic_int_add (gint *atomic,
gint val)
{
g_atomic_int_add (atomic, val);
__asm__ __volatile__ ("lock; addl %1,%0"
: "=m" (*atomic)
: "ir" (val), "m" (*atomic));
}
gboolean
g_atomic_int_compare_and_exchange_fallback (gint32 *atomic,
gint32 oldval,
gint32 newval)
g_atomic_int_compare_and_exchange (gint *atomic,
gint oldval,
gint newval)
{
return g_atomic_int_compare_and_exchange (atomic, oldval, newval);
gint result;
__asm __volatile ("lock; cmpxchgl %2, %1"
: "=a" (result), "=m" (*atomic)
: "r" (newval), "m" (*atomic), "0" (oldval));
return result == oldval;
}
/* The same code as above, as on i386 gpointer is 32 bit as well.
* Duplicating the code here seems more natural than casting the
* arguments and calling the former function */
gboolean
g_atomic_pointer_compare_and_exchange (gpointer *atomic,
gpointer oldval,
gpointer newval)
{
gpointer result;
__asm __volatile ("lock; cmpxchgl %2, %1"
: "=a" (result), "=m" (*atomic)
: "r" (newval), "m" (*atomic), "0" (oldval));
return result == oldval;
}
# elif defined (G_ATOMIC_SPARCV9)
/* Adapted from CVS version 1.3 of glibc's sysdeps/sparc/sparc64/bits/atomic.h
*/
# define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval) \
({ \
gint __result; \
__asm __volatile ("cas [%4], %2, %0" \
: "=r" (__result), "=m" (*(atomic)) \
: "r" (oldval), "m" (*(atomic)), "r" (atomic), \
"0" (newval)); \
__result == oldval; \
})
# if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
gboolean
g_atomic_pointer_compare_and_exchange (gpointer *atomic,
gpointer oldval,
gpointer newval)
{
gpointer result;
__asm __volatile ("cas [%4], %2, %0"
: "=r" (result), "=m" (*atomic)
: "r" (oldval), "m" (*atomic), "r" (atomic),
"0" (newval));
return result == oldval;
}
# elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
gboolean
g_atomic_pointer_compare_and_exchange (gpointer *atomic,
gpointer oldval,
gpointer newval)
{
gpointer result;
gpointer *a = atomic;
__asm __volatile ("casx [%4], %2, %0"
: "=r" (result), "=m" (*a)
: "r" (oldval), "m" (*a), "r" (a),
"0" (newval));
return result != 0;
}
# else /* What's that */
# error "Your system has an unsupported pointer size"
# endif /* GLIB_SIZEOF_VOID_P */
# define G_ATOMIC_MEMORY_BARRIER \
__asm __volatile ("membar #LoadLoad | #LoadStore" \
" | #StoreLoad | #StoreStore" : : : "memory")
# elif defined (G_ATOMIC_ALPHA)
/* Adapted from CVS version 1.3 of glibc's sysdeps/alpha/bits/atomic.h
*/
# define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval) \
({ \
gint __result; \
gint __prev; \
__asm__ __volatile__ ( \
" mb\n" \
"1: ldl_l %0,%2\n" \
" cmpeq %0,%3,%1\n" \
" beq %1,2f\n" \
" mov %4,%1\n" \
" stl_c %1,%2\n" \
" beq %1,1b\n" \
" mb\n" \
"2:" \
: "=&r" (__prev), \
"=&r" (__result) \
: "m" (*(atomic)), \
"Ir" (oldval), \
"Ir" (newval) \
: "memory"); \
__result != 0; \
})
# if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
gboolean
g_atomic_pointer_compare_and_exchange (gpointer *atomic,
gpointer oldval,
gpointer newval)
{
gint result;
gpointer prev;
__asm__ __volatile__ (
" mb\n"
"1: ldl_l %0,%2\n"
" cmpeq %0,%3,%1\n"
" beq %1,2f\n"
" mov %4,%1\n"
" stl_c %1,%2\n"
" beq %1,1b\n"
" mb\n"
"2:"
: "=&r" (prev),
"=&r" (result)
: "m" (*atomic),
"Ir" (oldval),
"Ir" (newval)
: "memory");
return result != 0;
}
# elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
gboolean
g_atomic_pointer_compare_and_exchange (gpointer *atomic,
gpointer oldval,
gpointer newval)
{
gint result;
gpointer prev;
__asm__ __volatile__ (
" mb\n"
"1: ldq_l %0,%2\n"
" cmpeq %0,%3,%1\n"
" beq %1,2f\n"
" mov %4,%1\n"
" stq_c %1,%2\n"
" beq %1,1b\n"
" mb\n"
"2:"
: "=&r" (prev),
"=&r" (result)
: "m" (*atomic),
"Ir" (oldval),
"Ir" (newval)
: "memory");
return result != 0;
}
# else /* What's that */
# error "Your system has an unsupported pointer size"
# endif /* GLIB_SIZEOF_VOID_P */
# define G_ATOMIC_MEMORY_BARRIER __asm ("mb" : : : "memory")
# elif defined (G_ATOMIC_X86_64)
/* Adapted from CVS version 1.9 of glibc's sysdeps/x86_64/bits/atomic.h
*/
gint
g_atomic_int_exchange_and_add (gint *atomic,
gint val)
{
gint result;
__asm__ __volatile__ ("lock; xaddl %0,%1"
: "=r" (result), "=m" (*atomic)
: "0" (val), "m" (*atomic));
return result;
}
void
g_atomic_int_add (gint *atomic,
gint val)
{
__asm__ __volatile__ ("lock; addl %1,%0"
: "=m" (*atomic)
: "ir" (val), "m" (*atomic));
}
gboolean
g_atomic_pointer_compare_and_exchange_fallback (gpointer *atomic,
gpointer oldval,
gpointer newval)
g_atomic_int_compare_and_exchange (gint *atomic,
gint oldval,
gint newval)
{
return g_atomic_pointer_compare_and_exchange (atomic, oldval, newval);
gint result;
__asm __volatile ("lock; cmpxchgl %2, %1"
: "=a" (result), "=m" (*atomic)
: "r" (newval), "m" (*atomic), "0" (oldval));
return result == oldval;
}
gint32
g_atomic_int_get_fallback (gint32 *atomic)
gboolean
g_atomic_pointer_compare_and_exchange (gpointer *atomic,
gpointer oldval,
gpointer newval)
{
return g_atomic_int_get (atomic);
gpointer result;
__asm __volatile ("lock; cmpxchgq %q2, %1"
: "=a" (result), "=m" (*atomic)
: "r" (newval), "m" (*atomic), "0" (oldval));
return result == oldval;
}
gint32
g_atomic_pointer_get_fallback (gpointer *atomic)
# elif defined (G_ATOMIC_POWERPC)
/* Adapted from CVS version 1.12 of glibc's sysdeps/powerpc/bits/atomic.h
* and CVS version 1.3 of glibc's sysdeps/powerpc/powerpc32/bits/atomic.h
* and CVS version 1.2 of glibc's sysdeps/powerpc/powerpc64/bits/atomic.h
*/
gint
g_atomic_int_exchange_and_add (gint *atomic,
gint val)
{
return g_atomic_int_get (atomic);
}
gint result, temp;
__asm __volatile ("1: lwarx %0,0,%3\n"
" add %1,%0,%4\n"
" stwcx. %1,0,%3\n"
" bne- 1b"
: "=&b" (result), "=&r" (temp), "=m" (*atomic)
: "b" (atomic), "r" (val), "2" (*atomic)
: "cr0", "memory");
return result;
}
/* The same as above, to save a function call repeated here */
void
g_atomic_int_add (gint *atomic,
gint val)
{
gint result, temp;
__asm __volatile ("1: lwarx %0,0,%3\n"
" add %1,%0,%4\n"
" stwcx. %1,0,%3\n"
" bne- 1b"
: "=&b" (result), "=&r" (temp), "=m" (*atomic)
: "b" (atomic), "r" (val), "2" (*atomic)
: "cr0", "memory");
}
# else /* !G_ATOMIC_USE_FALLBACK_IMPLEMENTATION */
# if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
gboolean
g_atomic_int_compare_and_exchange (gint *atomic,
gint oldval,
gint newval)
{
gint result;
__asm __volatile ("sync\n"
"1: lwarx %0,0,%1\n"
" subf. %0,%2,%0\n"
" bne 2f\n"
" stwcx. %3,0,%1\n"
" bne- 1b\n"
"2: isync"
: "=&r" (result)
: "b" (atomic), "r" (oldval), "r" (newval)
: "cr0", "memory");
return result == 0;
}
gboolean
g_atomic_pointer_compare_and_exchange (gpointer *atomic,
gpointer oldval,
gpointer newval)
{
gpointer result;
__asm __volatile ("sync\n"
"1: lwarx %0,0,%1\n"
" subf. %0,%2,%0\n"
" bne 2f\n"
" stwcx. %3,0,%1\n"
" bne- 1b\n"
"2: isync"
: "=&r" (result)
: "b" (atomic), "r" (oldval), "r" (newval)
: "cr0", "memory");
return result == 0;
}
# elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
gboolean
g_atomic_int_compare_and_exchange (gint *atomic,
gint oldval,
gint newval)
{
__asm __volatile ("sync\n"
"1: lwarx %0,0,%1\n"
" extsw %0,%0\n"
" subf. %0,%2,%0\n"
" bne 2f\n"
" stwcx. %3,0,%1\n"
" bne- 1b\n"
"2: isync"
: "=&r" (result)
: "b" (atomic), "r" (oldval), "r" (newval)
: "cr0", "memory");
return result == 0;
}
gboolean
g_atomic_pointer_compare_and_exchange (gpointer *atomic,
gpointer oldval,
gpointer newval)
{
gpointer result;
__asm __volatile ("sync\n"
"1: ldarx %0,0,%1\n"
" subf. %0,%2,%0\n"
" bne 2f\n"
" stdcx. %3,0,%1\n"
" bne- 1b\n"
"2: isync"
: "=&r" (result)
: "b" (atomic), "r" (oldval), "r" (newval)
: "cr0", "memory");
return result == 0;
}
# else /* What's that */
# error "Your system has an unsupported pointer size"
# endif /* GLIB_SIZEOF_VOID_P */
# define G_ATOMIC_MEMORY_BARRIER __asm ("sync" : : : "memory")
# elif defined (G_ATOMIC_IA64)
/* Adapted from CVS version 1.8 of glibc's sysdeps/ia64/bits/atomic.h
*/
gint
g_atomic_int_exchange_and_add (gint *atomic,
gint val)
{
return __sync_fetch_and_add_si (atomic, val);
}
void
g_atomic_int_add (gint *atomic,
gint val)
{
__sync_fetch_and_add_si (atomic, val);
}
gboolean
g_atomic_int_compare_and_exchange (gint *atomic,
gint oldval,
gint newval)
{
return __sync_bool_compare_and_swap_si (atomic, oldval, newval);
}
gboolean
g_atomic_pointer_compare_and_exchange (gpointer *atomic,
gpointer oldval,
gpointer newval)
{
return __sync_bool_compare_and_swap_di ((long *)atomic,
(long)oldval, (long)newval);
}
# define G_ATOMIC_MEMORY_BARRIER __sync_synchronize ()
# else /* !G_ATOMIC */
# define DEFINE_WITH_MUTEXES
# endif /* G_ATOMIC */
#else /* !__GNUC__ */
# define DEFINE_WITH_MUTEXES
#endif /* __GNUC__ */
#ifdef DEFINE_WITH_MUTEXES
/* We have to use the slow, but safe locking method */
G_LOCK_DEFINE_STATIC (g_atomic_lock);
gint32
g_atomic_int_exchange_and_add_fallback (gint32 *atomic,
gint32 val)
gint
g_atomic_int_exchange_and_add (gint *atomic,
gint val)
{
gint32 result;
gint result;
G_LOCK (g_atomic_lock);
result = *atomic;
@ -92,8 +439,8 @@ g_atomic_int_exchange_and_add_fallback (gint32 *atomic,
void
g_atomic_int_add_fallback (gint32 *atomic,
gint32 val)
g_atomic_int_add (gint *atomic,
gint val)
{
G_LOCK (g_atomic_lock);
*atomic += val;
@ -101,9 +448,9 @@ g_atomic_int_add_fallback (gint32 *atomic,
}
gboolean
g_atomic_int_compare_and_exchange_fallback (gint32 *atomic,
gint32 oldval,
gint32 newval)
g_atomic_int_compare_and_exchange (gint *atomic,
gint oldval,
gint newval)
{
gboolean result;
@ -121,9 +468,9 @@ g_atomic_int_compare_and_exchange_fallback (gint32 *atomic,
}
gboolean
g_atomic_pointer_compare_and_exchange_fallback (gpointer *atomic,
gpointer oldval,
gpointer newval)
g_atomic_pointer_compare_and_exchange (gpointer *atomic,
gpointer oldval,
gpointer newval)
{
gboolean result;
@ -140,10 +487,10 @@ g_atomic_pointer_compare_and_exchange_fallback (gpointer *atomic,
return result;
}
static inline gint32
g_atomic_int_get_fallback (gint32 *atomic)
gint
g_atomic_int_get (gint *atomic)
{
gint32 result;
gint result;
G_LOCK (g_atomic_lock);
result = *atomic;
@ -152,8 +499,8 @@ g_atomic_int_get_fallback (gint32 *atomic)
return result;
}
static inline gpointer
g_atomic_pointer_get_fallback (gpointer *atomic)
gpointer
g_atomic_pointer_get (gpointer *atomic)
{
gpointer result;
@ -163,16 +510,56 @@ g_atomic_pointer_get_fallback (gpointer *atomic)
return result;
}
# endif /* G_ATOMIC_USE_FALLBACK_IMPLEMENTATION */
#else /* !G_THREADS_ENABLED */
gint32 g_atomic_int_exchange_and_add (gint32 *atomic,
gint32 val)
#elif defined (G_ATOMIC_OP_MEMORY_BARRIER_NEEDED)
gint
g_atomic_int_get (gint *atomic)
{
gint32 result = *atomic;
*atomic += val;
gint result = *atomic;
G_ATOMIC_MEMORY_BARRIER;
return result;
}
#endif /* G_THREADS_ENABLED */
gpointer
g_atomic_pointer_get (gpointer *atomic)
{
gpointer result = *atomic;
G_ATOMIC_MEMORY_BARRIER;
return result;
}
#endif /* DEFINE_WITH_MUTEXES || G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
#ifdef ATOMIC_INT_CMP_XCHG
gboolean
g_atomic_int_compare_and_exchange (gint *atomic,
gint oldval,
gint newval)
{
return ATOMIC_INT_CMP_XCHG (atomic, oldval, newval);
}
gint
g_atomic_int_exchange_and_add (gint *atomic,
gint val)
{
gint result;
do
result = *atomic;
while (!ATOMIC_INT_CMP_XCHG (atomic, result, result + val));
return result;
}
void
g_atomic_int_add (gint *atomic,
gint val)
{
gint result;
do
result = *atomic;
while (!ATOMIC_INT_CMP_XCHG (atomic, result, result + val));
}
#endif /* ATOMIC_INT_CMP_XCHG */

View File

@ -1,7 +1,7 @@
/* GLIB - Library of useful routines for C programming
* Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
*
* GAtomic: atomic integer operation.
* g_atomic_*: atomic operations.
* Copyright (C) 2003 Sebastian Wilhelmi
*
* This library is free software; you can redistribute it and/or
@ -34,507 +34,24 @@
G_BEGIN_DECLS
#ifdef G_THREADS_ENABLED
gint g_atomic_int_exchange_and_add (gint *atomic,
gint val);
void g_atomic_int_add (gint *atomic,
gint val);
gboolean g_atomic_int_compare_and_exchange (gint *atomic,
gint oldval,
gint newval);
gboolean g_atomic_pointer_compare_and_exchange (gpointer *atomic,
gpointer oldval,
gpointer newval);
gint32 g_atomic_int_exchange_and_add_fallback (gint32 *atomic,
gint32 val);
void g_atomic_int_add_fallback (gint32 *atomic,
gint32 val);
gboolean g_atomic_int_compare_and_exchange_fallback (gint32 *atomic,
gint32 oldval,
gint32 newval);
gboolean g_atomic_pointer_compare_and_exchange_fallback (gpointer *atomic,
gpointer oldval,
gpointer newval);
# if defined (__GNUC__)
# if defined (G_ATOMIC_INLINED_IMPLEMENTATION_I486)
/* Adapted from CVS version 1.10 of glibc's sysdeps/i386/i486/bits/atomic.h
*/
static inline gint32
g_atomic_int_exchange_and_add (gint32 *atomic,
gint32 val)
{
gint32 result;
__asm__ __volatile__ ("lock; xaddl %0,%1"
: "=r" (result), "=m" (*atomic)
: "0" (val), "m" (*atomic));
return result;
}
static inline void
g_atomic_int_add (gint32 *atomic,
gint32 val)
{
__asm__ __volatile__ ("lock; addl %1,%0"
: "=m" (*atomic)
: "ir" (val), "m" (*atomic));
}
static inline gboolean
g_atomic_int_compare_and_exchange (gint32 *atomic,
gint32 oldval,
gint32 newval)
{
gint32 result;
__asm __volatile ("lock; cmpxchgl %2, %1"
: "=a" (result), "=m" (*atomic)
: "r" (newval), "m" (*atomic), "0" (oldval));
return result == oldval;
}
/* The same code as above, as on i386 gpointer is 32 bit as well.
* Duplicating the code here seems more natural than casting the
* arguments and calling the former function */
static inline gboolean
g_atomic_pointer_compare_and_exchange (gpointer *atomic,
gpointer oldval,
gpointer newval)
{
gpointer result;
__asm __volatile ("lock; cmpxchgl %2, %1"
: "=a" (result), "=m" (*atomic)
: "r" (newval), "m" (*atomic), "0" (oldval));
return result == oldval;
}
# define G_ATOMIC_MEMORY_BARRIER() /* Not needed */
# elif defined(G_ATOMIC_INLINED_IMPLEMENTATION_SPARCV9) \
&& (defined(__sparcv8) || defined(__sparcv9) || defined(__sparc_v9__))
/* Adapted from CVS version 1.3 of glibc's sysdeps/sparc/sparc64/bits/atomic.h
*/
/* Why the test for __sparcv8, wheras really the sparcv9 architecture
* is required for the folowing assembler instructions? On
* sparc-solaris the only difference detectable at compile time
* between no -m and -mcpu=v9 is __sparcv8.
*
* However, in case -mcpu=v8 is set, the assembler will fail. This
* should be rare however, as there are only very few v8-not-v9
* machines still out there (and we can't do better).
*/
static inline gboolean
g_atomic_int_compare_and_exchange (gint32 *atomic,
gint32 oldval,
gint32 newval)
{
gint32 result;
__asm __volatile ("cas [%4], %2, %0"
: "=r" (result), "=m" (*atomic)
: "r" (oldval), "m" (*atomic), "r" (atomic),
"0" (newval));
return result != 0;
}
# if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
static inline gboolean
g_atomic_pointer_compare_and_exchange (gpointer *atomic,
gpointer oldval,
gpointer newval)
{
gpointer result;
__asm __volatile ("cas [%4], %2, %0"
: "=r" (result), "=m" (*atomic)
: "r" (oldval), "m" (*atomic), "r" (atomic),
"0" (newval));
return result != 0;
}
# elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
static inline gboolean
g_atomic_pointer_compare_and_exchange (gpointer *atomic,
gpointer oldval,
gpointer newval)
{
gpointer result;
gpointer *a = atomic;
__asm __volatile ("casx [%4], %2, %0"
: "=r" (result), "=m" (*a)
: "r" (oldval), "m" (*a), "r" (a),
"0" (newval));
return result != 0;
}
# else /* What's that */
# error "Your system has an unsupported pointer size"
# endif /* GLIB_SIZEOF_VOID_P */
static inline gint32
g_atomic_int_exchange_and_add (gint32 *atomic,
gint32 val)
{
gint32 result;
do
result = *atomic;
while (!g_atomic_int_compare_and_exchange (atomic, result, result + val));
return result;
}
static inline void
g_atomic_int_add (gint32 *atomic,
gint32 val)
{
g_atomic_int_exchange_and_add (atomic, val);
}
# define G_ATOMIC_MEMORY_BARRIER() \
__asm __volatile ("membar #LoadLoad | #LoadStore" \
" | #StoreLoad | #StoreStore" : : : "memory")
# elif defined(G_ATOMIC_INLINED_IMPLEMENTATION_ALPHA)
/* Adapted from CVS version 1.3 of glibc's sysdeps/alpha/bits/atomic.h
*/
static inline gboolean
g_atomic_int_compare_and_exchange (gint32 *atomic,
gint32 oldval,
gint32 newval)
{
gint32 result;
gint32 prev;
__asm__ __volatile__ (
" mb\n"
"1: ldl_l %0,%2\n"
" cmpeq %0,%3,%1\n"
" beq %1,2f\n"
" mov %4,%1\n"
" stl_c %1,%2\n"
" beq %1,1b\n"
" mb\n"
"2:"
: "=&r" (prev),
"=&r" (result)
: "m" (*atomic),
"Ir" ((gint64)oldval),
"Ir" (newval)
: "memory");
return result != 0;
}
# if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
static inline gboolean
g_atomic_pointer_compare_and_exchange (gpointer *atomic,
gpointer oldval,
gpointer newval)
{
gint32 result;
gpointer prev;
__asm__ __volatile__ (
" mb\n"
"1: ldl_l %0,%2\n"
" cmpeq %0,%3,%1\n"
" beq %1,2f\n"
" mov %4,%1\n"
" stl_c %1,%2\n"
" beq %1,1b\n"
" mb\n"
"2:"
: "=&r" (prev),
"=&r" (result)
: "m" (*atomic),
"Ir" ((gint64)oldval),
"Ir" (newval)
: "memory");
return result != 0;
}
# elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
static inline gboolean
g_atomic_pointer_compare_and_exchange (gpointer *atomic,
gpointer oldval,
gpointer newval)
{
gint32 result;
gpointer prev;
__asm__ __volatile__ (
" mb\n"
"1: ldq_l %0,%2\n"
" cmpeq %0,%3,%1\n"
" beq %1,2f\n"
" mov %4,%1\n"
" stq_c %1,%2\n"
" beq %1,1b\n"
" mb\n"
"2:"
: "=&r" (prev),
"=&r" (result)
: "m" (*atomic),
"Ir" ((gint64)oldval),
"Ir" (newval)
: "memory");
return result != 0;
}
# else /* What's that */
# error "Your system has an unsupported pointer size"
# endif /* GLIB_SIZEOF_VOID_P */
static inline gint32
g_atomic_int_exchange_and_add (gint32 *atomic,
gint32 val)
{
gint32 result;
do
result = *atomic;
while (!g_atomic_int_compare_and_exchange (atomic, result, result + val));
return result;
}
static inline void
g_atomic_int_add (gint32 *atomic,
gint32 val)
{
g_atomic_int_exchange_and_add (atomic, val);
}
# define G_ATOMIC_MEMORY_BARRIER() __asm ("mb" : : : "memory")
# elif defined(G_ATOMIC_INLINED_IMPLEMENTATION_X86_64)
/* Adapted from CVS version 1.9 of glibc's sysdeps/x86_64/bits/atomic.h
*/
static inline gint32
g_atomic_int_exchange_and_add (gint32 *atomic,
gint32 val)
{
gint32 result;
__asm__ __volatile__ ("lock; xaddl %0,%1"
: "=r" (result), "=m" (*atomic)
: "0" (val), "m" (*atomic));
return result;
}
static inline void
g_atomic_int_add (gint32 *atomic,
gint32 val)
{
__asm__ __volatile__ ("lock; addl %1,%0"
: "=m" (*atomic)
: "ir" (val), "m" (*atomic));
}
static inline gboolean
g_atomic_int_compare_and_exchange (gint32 *atomic,
gint32 oldval,
gint32 newval)
{
gint32 result;
__asm __volatile ("lock; cmpxchgl %2, %1"
: "=a" (result), "=m" (*atomic)
: "r" (newval), "m" (*atomic), "0" (oldval));
return result == oldval;
}
static inline gboolean
g_atomic_pointer_compare_and_exchange (gpointer *atomic,
gpointer oldval,
gpointer newval)
{
gpointer result;
__asm __volatile ("lock; cmpxchgq %q2, %1"
: "=a" (result), "=m" (*atomic)
: "r" (newval), "m" (*atomic), "0" (oldval));
return result == oldval;
}
# define G_ATOMIC_MEMORY_BARRIER() /* Not needed */
# elif defined(G_ATOMIC_INLINED_IMPLEMENTATION_POWERPC)
/* Adapted from CVS version 1.12 of glibc's sysdeps/powerpc/bits/atomic.h
* and CVS version 1.3 of glibc's sysdeps/powerpc/powerpc32/bits/atomic.h
* and CVS version 1.2 of glibc's sysdeps/powerpc/powerpc64/bits/atomic.h
*/
static inline gint32
g_atomic_int_exchange_and_add (gint32 *atomic,
gint32 val)
{
gint32 result, temp;
__asm __volatile ("1: lwarx %0,0,%3\n"
" add %1,%0,%4\n"
" stwcx. %1,0,%3\n"
" bne- 1b"
: "=&b" (result), "=&r" (temp), "=m" (*atomic)
: "b" (atomic), "r" (val), "2" (*atomic)
: "cr0", "memory");
return result;
}
static inline void
g_atomic_int_add (gint32 *atomic,
gint32 val)
{
g_atomic_int_exchange_and_add (atomic, val);
}
# if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
static inline gboolean
g_atomic_int_compare_and_exchange (gint32 *atomic,
gint32 oldval,
gint32 newval)
{
gint32 result;
__asm __volatile ("sync\n"
"1: lwarx %0,0,%1\n"
" subf. %0,%2,%0\n"
" bne 2f\n"
" stwcx. %3,0,%1\n"
" bne- 1b\n"
"2: isync"
: "=&r" (result)
: "b" (atomic), "r" (oldval), "r" (newval)
: "cr0", "memory");
return result == 0;
}
static inline gboolean
g_atomic_pointer_compare_and_exchange (gpointer *atomic,
gpointer oldval,
gpointer newval)
{
gpointer result;
__asm __volatile ("sync\n"
"1: lwarx %0,0,%1\n"
" subf. %0,%2,%0\n"
" bne 2f\n"
" stwcx. %3,0,%1\n"
" bne- 1b\n"
"2: isync"
: "=&r" (result)
: "b" (atomic), "r" (oldval), "r" (newval)
: "cr0", "memory");
return result == 0;
}
# elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
static inline gboolean
g_atomic_int_compare_and_exchange (gint32 *atomic,
gint32 oldval,
gint32 newval)
{
__asm __volatile ("sync\n"
"1: lwarx %0,0,%1\n"
" extsw %0,%0\n"
" subf. %0,%2,%0\n"
" bne 2f\n"
" stwcx. %3,0,%1\n"
" bne- 1b\n"
"2: isync"
: "=&r" (result)
: "b" (atomic), "r" (oldval), "r" (newval)
: "cr0", "memory");
return result == 0;
}
static inline gboolean
g_atomic_pointer_compare_and_exchange (gpointer *atomic,
gpointer oldval,
gpointer newval)
{
gpointer result;
__asm __volatile ("sync\n"
"1: ldarx %0,0,%1\n"
" subf. %0,%2,%0\n"
" bne 2f\n"
" stdcx. %3,0,%1\n"
" bne- 1b\n"
"2: isync"
: "=&r" (result)
: "b" (atomic), "r" (oldval), "r" (newval)
: "cr0", "memory");
return result == 0;
}
# else /* What's that */
# error "Your system has an unsupported pointer size"
# endif /* GLIB_SIZEOF_VOID_P */
# define G_ATOMIC_MEMORY_BARRIER() __asm ("sync" : : : "memory")
# elif defined(G_ATOMIC_INLINED_IMPLEMENTATION_IA64)
/* Adapted from CVS version 1.8 of glibc's sysdeps/ia64/bits/atomic.h
*/
static inline gint32
g_atomic_int_exchange_and_add (gint32 *atomic,
gint32 val)
{
return __sync_fetch_and_add_si (atomic, val);
}
static inline void
g_atomic_int_add (gint32 *atomic,
gint32 val)
{
__sync_fetch_and_add_si (atomic, val);
}
static inline gboolean
g_atomic_int_compare_and_exchange (gint32 *atomic,
gint32 oldval,
gint32 newval)
{
return __sync_bool_compare_and_exchange_si (atomic, oldval, newval);
}
static inline gboolean
g_atomic_pointer_compare_and_exchange (gpointer *atomic,
gpointer oldval,
gpointer newval)
{
return __sync_bool_compare_and_exchange_di ((long *)atomic,
(long)oldval, (long)newval);
}
# define G_ATOMIC_MEMORY_BARRIER() __sync_synchronize ()
# else /* !G_ATOMIC_INLINED_IMPLEMENTATION_... */
# define G_ATOMIC_USE_FALLBACK_IMPLEMENTATION
# endif /* G_ATOMIC_INLINED_IMPLEMENTATION_... */
# else /* !__GNU__ */
# define G_ATOMIC_USE_FALLBACK_IMPLEMENTATION
# endif /* __GNUC__ */
# ifdef G_ATOMIC_USE_FALLBACK_IMPLEMENTATION
# define g_atomic_int_exchange_and_add \
g_atomic_int_exchange_and_add_fallback
# define g_atomic_int_add \
g_atomic_int_add_fallback
# define g_atomic_int_compare_and_exchange \
g_atomic_int_compare_and_exchange_fallback
# define g_atomic_pointer_compare_and_exchange \
g_atomic_pointer_compare_and_exchange_fallback
# define g_atomic_int_get \
g_atomic_int_get_fallback
# define g_atomic_pointer_get \
g_atomic_pointer_get_fallback
# else /* !G_ATOMIC_USE_FALLBACK_IMPLEMENTATION */
static inline gint32
g_atomic_int_get (gint32 *atomic)
{
gint32 result = *atomic;
G_ATOMIC_MEMORY_BARRIER ();
return result;
}
static inline gpointer
g_atomic_pointer_get (gpointer *atomic)
{
gpointer result = *atomic;
G_ATOMIC_MEMORY_BARRIER ();
return result;
}
# endif /* G_ATOMIC_USE_FALLBACK_IMPLEMENTATION */
#else /* !G_THREADS_ENABLED */
gint32 g_atomic_int_exchange_and_add (gint32 *atomic, gint32 val);
# define g_atomic_int_add(atomic, val) (void)(*(atomic) += (val))
# define g_atomic_int_compare_and_exchange(atomic, oldval, newval) \
(*(atomic) == (oldval) ? (*(atomic) = (newval), TRUE) : FALSE)
# define g_atomic_pointer_compare_and_exchange(atomic, oldval, newval) \
(*(atomic) == (oldval) ? (*(atomic) = (newval), TRUE) : FALSE)
#ifdef G_ATOMIC_OP_MEMORY_BARRIER_NEEDED
gint g_atomic_int_get (gint *atomic);
gpointer g_atomic_pointer_get (gpointer *atomic);
#else /* !G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
# define g_atomic_int_get(atomic) (*(atomic))
# define g_atomic_pointer_get(atomic) (*(atomic))
# define G_ATOMIC_MEMORY_BARRIER() /* Not needed */
#endif /* G_THREADS_ENABLED */
#endif /* G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
#define g_atomic_int_inc(atomic) (g_atomic_int_add ((atomic), 1))
#define g_atomic_int_dec_and_test(atomic) \

View File

@ -29,8 +29,7 @@
#include <glib/gerror.h>
#include <glib/gtypes.h>
#include <glib/gutils.h> /* for G_CAN_INLINE */
#include <glib/gatomic.h> /* for G_ATOMIC_MEMORY_BARRIER */
#include <glib/gatomic.h> /* for g_atomic_pointer_get */
G_BEGIN_DECLS
@ -138,6 +137,10 @@ void g_thread_init_with_errorcheck_mutexes (GThreadFunctions* vtable);
/* internal function for fallback static mutex implementation */
GMutex* g_static_mutex_get_mutex_impl (GMutex **mutex);
#define g_static_mutex_get_mutex_impl_shortcut(mutex) \
(g_atomic_pointer_get ((gpointer*)mutex) ? *(mutex) : \
g_static_mutex_get_mutex_impl (mutex))
/* shorthands for conditional and unconditional function calls */
#define G_THREAD_UF(op, arglist) \
@ -304,30 +307,15 @@ struct _GOnce
gpointer g_once_impl (GOnce *once, GThreadFunc func, gpointer arg);
#if defined (G_CAN_INLINE) && defined (G_ATOMIC_MEMORY_BARRIER)
static inline gpointer
g_once (GOnce *once, GThreadFunc func, gpointer arg)
{
if (once->status != G_ONCE_STATUS_READY)
return g_once_impl (once, func, arg);
G_ATOMIC_MEMORY_BARRIER ();
return once->retval;
}
static inline GMutex*
g_static_mutex_get_mutex_impl_shortcut (GMutex **mutex)
{
if (! *mutex)
return g_static_mutex_get_mutex_impl (mutex);
G_ATOMIC_MEMORY_BARRIER ();
return *mutex;
}
#else /* !G_CAN_INLINE || !G_ATOMIC_MEMORY_BARRIER */
# define g_once g_once_impl
# define g_static_mutex_get_mutex_impl_shortcut g_static_mutex_get_mutex_impl
#endif /* G_CAN_INLINE && G_ATOMIC_MEMORY_BARRIER*/
#ifdef G_ATOMIC_OP_MEMORY_BARRIER_NEEDED
# define g_once(once, func, arg) g_once_impl ((once), (func), (arg))
#else /* !G_ATOMIC_OP_MEMORY_BARRIER_NEEDED*/
# define g_once(once, func, arg) \
(((once)->status == G_ONCE_STATUS_READY) ? \
(once)->retval : \
g_once_impl ((once), (func), (arg)))
#endif /* G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
/* these are some convenience macros that expand to nothing if GLib
* was configured with --disable-threads. for using StaticMutexes,
* you define them with G_LOCK_DEFINE_STATIC (name) or G_LOCK_DEFINE (name)

View File

@ -11,7 +11,7 @@ main (int argc,
char *argv[])
{
gint i;
gint32 atomic = -5;
gint atomic = -5;
gpointer atomic_pointer = NULL;
gpointer biggest_pointer = atomic_pointer - 1;
@ -43,6 +43,12 @@ main (int argc,
g_assert (!g_atomic_int_compare_and_exchange (&atomic, 42, 12));
g_assert (atomic == 20);
g_assert (g_atomic_int_compare_and_exchange (&atomic, 20, G_MAXINT));
g_assert (atomic == G_MAXINT);
g_assert (g_atomic_int_compare_and_exchange (&atomic, G_MAXINT, G_MININT));
g_assert (atomic == G_MININT);
g_assert (g_atomic_pointer_compare_and_exchange (&atomic_pointer,
NULL, biggest_pointer));
g_assert (atomic_pointer == biggest_pointer);