Merge branch '2418-more-stupid-atomics' into 'main'

gbitlock: Drop unnecessary volatile qualifiers

Closes #2418

See merge request GNOME/glib!2131
This commit is contained in:
Emmanuele Bassi 2021-06-07 17:21:50 +00:00
commit ff8b43a154
2 changed files with 63 additions and 38 deletions

View File

@ -76,7 +76,7 @@ static GSList *g_futex_address_list = NULL;
* separate process. * separate process.
*/ */
static void static void
g_futex_wait (const volatile gint *address, g_futex_wait (const gint *address,
gint value) gint value)
{ {
syscall (__NR_futex, address, (gsize) FUTEX_WAIT_PRIVATE, (gsize) value, NULL); syscall (__NR_futex, address, (gsize) FUTEX_WAIT_PRIVATE, (gsize) value, NULL);
@ -94,7 +94,7 @@ g_futex_wait (const volatile gint *address,
* thread being woken up. * thread being woken up.
*/ */
static void static void
g_futex_wake (const volatile gint *address) g_futex_wake (const gint *address)
{ {
syscall (__NR_futex, address, (gsize) FUTEX_WAKE_PRIVATE, (gsize) 1, NULL); syscall (__NR_futex, address, (gsize) FUTEX_WAKE_PRIVATE, (gsize) 1, NULL);
} }
@ -104,13 +104,13 @@ g_futex_wake (const volatile gint *address)
/* emulate futex(2) */ /* emulate futex(2) */
typedef struct typedef struct
{ {
const volatile gint *address; const gint *address;
gint ref_count; gint ref_count;
GCond wait_queue; GCond wait_queue;
} WaitAddress; } WaitAddress;
static WaitAddress * static WaitAddress *
g_futex_find_address (const volatile gint *address) g_futex_find_address (const gint *address)
{ {
GSList *node; GSList *node;
@ -126,7 +126,7 @@ g_futex_find_address (const volatile gint *address)
} }
static void static void
g_futex_wait (const volatile gint *address, g_futex_wait (const gint *address,
gint value) gint value)
{ {
g_mutex_lock (&g_futex_mutex); g_mutex_lock (&g_futex_mutex);
@ -159,7 +159,7 @@ g_futex_wait (const volatile gint *address,
} }
static void static void
g_futex_wake (const volatile gint *address) g_futex_wake (const gint *address)
{ {
WaitAddress *waiter; WaitAddress *waiter;
@ -177,7 +177,7 @@ g_futex_wake (const volatile gint *address)
#endif #endif
#define CONTENTION_CLASSES 11 #define CONTENTION_CLASSES 11
static volatile gint g_bit_lock_contended[CONTENTION_CLASSES]; static gint g_bit_lock_contended[CONTENTION_CLASSES]; /* (atomic) */
#if (defined (i386) || defined (__amd64__)) #if (defined (i386) || defined (__amd64__))
#if G_GNUC_CHECK_VERSION(4, 5) #if G_GNUC_CHECK_VERSION(4, 5)
@ -202,7 +202,8 @@ static volatile gint g_bit_lock_contended[CONTENTION_CLASSES];
* *
* This function accesses @address atomically. All other accesses to * This function accesses @address atomically. All other accesses to
* @address must be atomic in order for this function to work * @address must be atomic in order for this function to work
* reliably. * reliably. While @address has a `volatile` qualifier, this is a historical
* artifact and the argument passed to it should not be `volatile`.
* *
* Since: 2.24 * Since: 2.24
**/ **/
@ -210,6 +211,8 @@ void
g_bit_lock (volatile gint *address, g_bit_lock (volatile gint *address,
gint lock_bit) gint lock_bit)
{ {
gint *address_nonvolatile = (gint *) address;
#ifdef USE_ASM_GOTO #ifdef USE_ASM_GOTO
retry: retry:
__asm__ volatile goto ("lock bts %1, (%0)\n" __asm__ volatile goto ("lock bts %1, (%0)\n"
@ -225,13 +228,13 @@ g_bit_lock (volatile gint *address,
guint mask = 1u << lock_bit; guint mask = 1u << lock_bit;
guint v; guint v;
v = (guint) g_atomic_int_get (address); v = (guint) g_atomic_int_get (address_nonvolatile);
if (v & mask) if (v & mask)
{ {
guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended); guint class = ((gsize) address_nonvolatile) % G_N_ELEMENTS (g_bit_lock_contended);
g_atomic_int_add (&g_bit_lock_contended[class], +1); g_atomic_int_add (&g_bit_lock_contended[class], +1);
g_futex_wait (address, v); g_futex_wait (address_nonvolatile, v);
g_atomic_int_add (&g_bit_lock_contended[class], -1); g_atomic_int_add (&g_bit_lock_contended[class], -1);
} }
} }
@ -241,14 +244,14 @@ g_bit_lock (volatile gint *address,
guint v; guint v;
retry: retry:
v = g_atomic_int_or (address, mask); v = g_atomic_int_or (address_nonvolatile, mask);
if (v & mask) if (v & mask)
/* already locked */ /* already locked */
{ {
guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended); guint class = ((gsize) address_nonvolatile) % G_N_ELEMENTS (g_bit_lock_contended);
g_atomic_int_add (&g_bit_lock_contended[class], +1); g_atomic_int_add (&g_bit_lock_contended[class], +1);
g_futex_wait (address, v); g_futex_wait (address_nonvolatile, v);
g_atomic_int_add (&g_bit_lock_contended[class], -1); g_atomic_int_add (&g_bit_lock_contended[class], -1);
goto retry; goto retry;
@ -272,7 +275,8 @@ g_bit_lock (volatile gint *address,
* *
* This function accesses @address atomically. All other accesses to * This function accesses @address atomically. All other accesses to
* @address must be atomic in order for this function to work * @address must be atomic in order for this function to work
* reliably. * reliably. While @address has a `volatile` qualifier, this is a historical
* artifact and the argument passed to it should not be `volatile`.
* *
* Returns: %TRUE if the lock was acquired * Returns: %TRUE if the lock was acquired
* *
@ -294,10 +298,11 @@ g_bit_trylock (volatile gint *address,
return result; return result;
#else #else
gint *address_nonvolatile = (gint *) address;
guint mask = 1u << lock_bit; guint mask = 1u << lock_bit;
guint v; guint v;
v = g_atomic_int_or (address, mask); v = g_atomic_int_or (address_nonvolatile, mask);
return ~v & mask; return ~v & mask;
#endif #endif
@ -314,7 +319,8 @@ g_bit_trylock (volatile gint *address,
* *
* This function accesses @address atomically. All other accesses to * This function accesses @address atomically. All other accesses to
* @address must be atomic in order for this function to work * @address must be atomic in order for this function to work
* reliably. * reliably. While @address has a `volatile` qualifier, this is a historical
* artifact and the argument passed to it should not be `volatile`.
* *
* Since: 2.24 * Since: 2.24
**/ **/
@ -322,6 +328,8 @@ void
g_bit_unlock (volatile gint *address, g_bit_unlock (volatile gint *address,
gint lock_bit) gint lock_bit)
{ {
gint *address_nonvolatile = (gint *) address;
#ifdef USE_ASM_GOTO #ifdef USE_ASM_GOTO
__asm__ volatile ("lock btr %1, (%0)" __asm__ volatile ("lock btr %1, (%0)"
: /* no output */ : /* no output */
@ -330,14 +338,14 @@ g_bit_unlock (volatile gint *address,
#else #else
guint mask = 1u << lock_bit; guint mask = 1u << lock_bit;
g_atomic_int_and (address, ~mask); g_atomic_int_and (address_nonvolatile, ~mask);
#endif #endif
{ {
guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended); guint class = ((gsize) address_nonvolatile) % G_N_ELEMENTS (g_bit_lock_contended);
if (g_atomic_int_get (&g_bit_lock_contended[class])) if (g_atomic_int_get (&g_bit_lock_contended[class]))
g_futex_wake (address); g_futex_wake (address_nonvolatile);
} }
} }
@ -366,10 +374,10 @@ g_bit_unlock (volatile gint *address,
* *
* g_futex_wake (g_futex_int_address (int_address)); * g_futex_wake (g_futex_int_address (int_address));
*/ */
static const volatile gint * static const gint *
g_futex_int_address (const volatile void *address) g_futex_int_address (const void *address)
{ {
const volatile gint *int_address = address; const gint *int_address = address;
/* this implementation makes these (reasonable) assumptions: */ /* this implementation makes these (reasonable) assumptions: */
G_STATIC_ASSERT (G_BYTE_ORDER == G_LITTLE_ENDIAN || G_STATIC_ASSERT (G_BYTE_ORDER == G_LITTLE_ENDIAN ||
@ -395,12 +403,17 @@ g_futex_int_address (const volatile void *address)
* For portability reasons, you may only lock on the bottom 32 bits of * For portability reasons, you may only lock on the bottom 32 bits of
* the pointer. * the pointer.
* *
* While @address has a `volatile` qualifier, this is a historical
* artifact and the argument passed to it should not be `volatile`.
*
* Since: 2.30 * Since: 2.30
**/ **/
void void
(g_pointer_bit_lock) (volatile void *address, (g_pointer_bit_lock) (volatile void *address,
gint lock_bit) gint lock_bit)
{ {
void *address_nonvolatile = (void *) address;
g_return_if_fail (lock_bit < 32); g_return_if_fail (lock_bit < 32);
{ {
@ -416,23 +429,23 @@ void
contended: contended:
{ {
volatile gsize *pointer_address = address; gsize *pointer_address = address_nonvolatile;
gsize mask = 1u << lock_bit; gsize mask = 1u << lock_bit;
gsize v; gsize v;
v = (gsize) g_atomic_pointer_get (pointer_address); v = (gsize) g_atomic_pointer_get (pointer_address);
if (v & mask) if (v & mask)
{ {
guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended); guint class = ((gsize) address_nonvolatile) % G_N_ELEMENTS (g_bit_lock_contended);
g_atomic_int_add (&g_bit_lock_contended[class], +1); g_atomic_int_add (&g_bit_lock_contended[class], +1);
g_futex_wait (g_futex_int_address (address), v); g_futex_wait (g_futex_int_address (address_nonvolatile), v);
g_atomic_int_add (&g_bit_lock_contended[class], -1); g_atomic_int_add (&g_bit_lock_contended[class], -1);
} }
} }
goto retry; goto retry;
#else #else
volatile gsize *pointer_address = address; gsize *pointer_address = address_nonvolatile;
gsize mask = 1u << lock_bit; gsize mask = 1u << lock_bit;
gsize v; gsize v;
@ -441,10 +454,10 @@ void
if (v & mask) if (v & mask)
/* already locked */ /* already locked */
{ {
guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended); guint class = ((gsize) address_nonvolatile) % G_N_ELEMENTS (g_bit_lock_contended);
g_atomic_int_add (&g_bit_lock_contended[class], +1); g_atomic_int_add (&g_bit_lock_contended[class], +1);
g_futex_wait (g_futex_int_address (address), (guint) v); g_futex_wait (g_futex_int_address (address_nonvolatile), (guint) v);
g_atomic_int_add (&g_bit_lock_contended[class], -1); g_atomic_int_add (&g_bit_lock_contended[class], -1);
goto retry; goto retry;
@ -458,12 +471,15 @@ void
* @address: (not nullable): a pointer to a #gpointer-sized value * @address: (not nullable): a pointer to a #gpointer-sized value
* @lock_bit: a bit value between 0 and 31 * @lock_bit: a bit value between 0 and 31
* *
* This is equivalent to g_bit_trylock, but working on pointers (or * This is equivalent to g_bit_trylock(), but working on pointers (or
* other pointer-sized values). * other pointer-sized values).
* *
* For portability reasons, you may only lock on the bottom 32 bits of * For portability reasons, you may only lock on the bottom 32 bits of
* the pointer. * the pointer.
* *
* While @address has a `volatile` qualifier, this is a historical
* artifact and the argument passed to it should not be `volatile`.
*
* Returns: %TRUE if the lock was acquired * Returns: %TRUE if the lock was acquired
* *
* Since: 2.30 * Since: 2.30
@ -487,7 +503,8 @@ gboolean
return result; return result;
#else #else
volatile gsize *pointer_address = address; void *address_nonvolatile = (void *) address;
gsize *pointer_address = address_nonvolatile;
gsize mask = 1u << lock_bit; gsize mask = 1u << lock_bit;
gsize v; gsize v;
@ -511,12 +528,17 @@ gboolean
* For portability reasons, you may only lock on the bottom 32 bits of * For portability reasons, you may only lock on the bottom 32 bits of
* the pointer. * the pointer.
* *
* While @address has a `volatile` qualifier, this is a historical
* artifact and the argument passed to it should not be `volatile`.
*
* Since: 2.30 * Since: 2.30
**/ **/
void void
(g_pointer_bit_unlock) (volatile void *address, (g_pointer_bit_unlock) (volatile void *address,
gint lock_bit) gint lock_bit)
{ {
void *address_nonvolatile = (void *) address;
g_return_if_fail (lock_bit < 32); g_return_if_fail (lock_bit < 32);
{ {
@ -526,16 +548,16 @@ void
: "r" (address), "r" ((gsize) lock_bit) : "r" (address), "r" ((gsize) lock_bit)
: "cc", "memory"); : "cc", "memory");
#else #else
volatile gsize *pointer_address = address; gsize *pointer_address = address_nonvolatile;
gsize mask = 1u << lock_bit; gsize mask = 1u << lock_bit;
g_atomic_pointer_and (pointer_address, ~mask); g_atomic_pointer_and (pointer_address, ~mask);
#endif #endif
{ {
guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended); guint class = ((gsize) address_nonvolatile) % G_N_ELEMENTS (g_bit_lock_contended);
if (g_atomic_int_get (&g_bit_lock_contended[class])) if (g_atomic_int_get (&g_bit_lock_contended[class]))
g_futex_wake (g_futex_int_address (address)); g_futex_wake (g_futex_int_address (address_nonvolatile));
} }
} }
} }

View File

@ -97,9 +97,12 @@ test_types (void)
/* Note that atomic variables should almost certainly not be marked as /* Note that atomic variables should almost certainly not be marked as
* `volatile` see http://isvolatileusefulwiththreads.in/c/. This test exists * `volatile` see http://isvolatileusefulwiththreads.in/c/. This test exists
* to make sure that we dont warn when built against older third party code. */ * to make sure that we dont warn when built against older third party code. */
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wincompatible-pointer-types"
g_atomic_pointer_set (&vp_str_vol, NULL); g_atomic_pointer_set (&vp_str_vol, NULL);
res = g_atomic_pointer_compare_and_exchange (&vp_str_vol, NULL, str); res = g_atomic_pointer_compare_and_exchange (&vp_str_vol, NULL, str);
g_assert_true (res); g_assert_true (res);
#pragma GCC diagnostic pop
g_atomic_pointer_set (&ip, 0); g_atomic_pointer_set (&ip, 0);
ip2 = g_atomic_pointer_get (&ip); ip2 = g_atomic_pointer_get (&ip);