diff --git a/glib/gbitlock.c b/glib/gbitlock.c index 900897517..604028747 100644 --- a/glib/gbitlock.c +++ b/glib/gbitlock.c @@ -31,6 +31,7 @@ #include #include +#include "gtestutils.h" #include "gthreadprivate.h" #ifdef G_BIT_LOCK_FORCE_FUTEX_EMULATION @@ -186,6 +187,80 @@ bit_lock_contended_class (gpointer address) #endif #endif +/** + * g_bit_lock_and_get: + * @address: a pointer to an integer + * @lock_bit: a bit value between 0 and 31 + * @out_val: (out) (optional): returns the set pointer atomically. This is the + * value right after setting the lock, it thus always has the lock bit set, + * while previously @address had the lockbit unset. + * + * This is like g_bit_lock(), except it can atomically return the new value at + * @address (right after obtaining the lock). + * + * Since: 2.86 + **/ +void +g_bit_lock_and_get (gint *address, + guint lock_bit, + gint *out_val) +{ + const guint MASK = 1u << lock_bit; + guint v; + +#ifdef G_ENABLE_DEBUG + g_assert (lock_bit < 32u); +#endif + +#ifdef USE_ASM_GOTO + if (G_LIKELY (!out_val)) + { + while (TRUE) + { + __asm__ volatile goto("lock bts %1, (%0)\n" + "jc %l[contended]" + : /* no output */ + : "r"(address), "r"(lock_bit) + : "cc", "memory" + : contended); + return; + + contended: + { + guint v; + + v = (guint) g_atomic_int_get (address); + if (v & MASK) + { + guint class = bit_lock_contended_class (address); + + g_atomic_int_add (&g_bit_lock_contended[class], +1); + g_futex_wait (address, v); + g_atomic_int_add (&g_bit_lock_contended[class], -1); + } + } + } + } +#endif + +retry: + v = g_atomic_int_or ((guint *) address, MASK); + if (v & MASK) + /* already locked */ + { + guint class = bit_lock_contended_class (address); + + g_atomic_int_add (&g_bit_lock_contended[class], +1); + g_futex_wait (address, v); + g_atomic_int_add (&g_bit_lock_contended[class], -1); + + goto retry; + } + + if (out_val) + *out_val = (gint) (v | MASK); +} + /** * g_bit_lock: * @address: a pointer to an integer @@ -212,52 +287,7 @@ void g_bit_lock (volatile gint *address, gint lock_bit) { - gint *address_nonvolatile = (gint *) address; - -#ifdef USE_ASM_GOTO - retry: - __asm__ volatile goto ("lock bts %1, (%0)\n" - "jc %l[contended]" - : /* no output */ - : "r" (address), "r" (lock_bit) - : "cc", "memory" - : contended); - return; - - contended: - { - guint mask = 1u << lock_bit; - guint v; - - v = (guint) g_atomic_int_get (address_nonvolatile); - if (v & mask) - { - guint class = bit_lock_contended_class (address_nonvolatile); - - g_atomic_int_add (&g_bit_lock_contended[class], +1); - g_futex_wait (address_nonvolatile, v); - g_atomic_int_add (&g_bit_lock_contended[class], -1); - } - } - goto retry; -#else - guint mask = 1u << lock_bit; - guint v; - - retry: - v = g_atomic_int_or (address_nonvolatile, mask); - if (v & mask) - /* already locked */ - { - guint class = bit_lock_contended_class (address_nonvolatile); - - g_atomic_int_add (&g_bit_lock_contended[class], +1); - g_futex_wait (address_nonvolatile, v); - g_atomic_int_add (&g_bit_lock_contended[class], -1); - - goto retry; - } -#endif + g_bit_lock_and_get ((gint *) address, (guint) lock_bit, NULL); } /** @@ -354,6 +384,63 @@ g_bit_unlock (volatile gint *address, } } +/** + * g_bit_unlock_and_set: + * @address: a pointer to an integer + * @lock_bit: a bit value between 0 and 31 + * @val: the new value to set + * @preserve_mask: if non-zero, those bits of the current value in @address + * are preserved and ignored from @val. + * Note that the @lock_bit bit will be always unset regardless of + * @val, @preserve_mask and the currently set value in @address. + * + * This is like g_bit_unlock() but also atomically sets @address to @val + * (while preserving bits of @address according to @preserve_mask and + * clearing @lock_bit). + * + * Since: 2.86 + **/ +void +g_bit_unlock_and_set (gint *address, + guint lock_bit, + gint val, + gint preserve_mask) + +{ + const guint MASK = 1u << lock_bit; + +#ifdef G_ENABLE_DEBUG + g_assert (lock_bit < 32u); +#endif + + if (G_UNLIKELY (preserve_mask != 0)) + { + guint old_val; + guint new_val; + + old_val = (guint) g_atomic_int_get (address); + + again: + new_val = ((old_val & ((guint) preserve_mask)) | (((guint) val) & ~((guint) preserve_mask))) & ~MASK; + if (!g_atomic_int_compare_and_exchange_full (address, (gint) old_val, (gint) new_val, (gint *) &old_val)) + goto again; + } + else + { + g_atomic_int_set (address, (gint) (((guint) val) & ~MASK)); + } + + /* Warning: unlocking may allow another thread to proceed and destroy the + * memory that @address points to. We thus must not dereference it anymore. + */ + + { + guint class = bit_lock_contended_class (address); + + if (g_atomic_int_get (&g_bit_lock_contended[class])) + g_futex_wake (address); + } +} /* We emulate pointer-sized futex(2) because the kernel API only * supports integers. @@ -653,8 +740,8 @@ g_pointer_bit_lock_mask_ptr (gpointer ptr, guint lock_bit, gboolean set, guintpt * @ptr: the new pointer value to set * @preserve_mask: if non-zero, those bits of the current pointer in @address * are preserved. - * Note that the @lock_bit bit will be always set according to @set, - * regardless of @preserve_mask and the currently set value in @address. + * Note that the @lock_bit bit will be always unset regardless of + * @ptr, @preserve_mask and the currently set value in @address. * * This is equivalent to g_pointer_bit_unlock() and atomically setting * the pointer value. diff --git a/glib/gbitlock.h b/glib/gbitlock.h index f44a52c37..1411157d4 100644 --- a/glib/gbitlock.h +++ b/glib/gbitlock.h @@ -34,6 +34,11 @@ G_BEGIN_DECLS GLIB_AVAILABLE_IN_ALL void g_bit_lock (volatile gint *address, gint lock_bit); +GLIB_AVAILABLE_IN_2_86 +void g_bit_lock_and_get (gint *address, + guint lock_bit, + gint *out_val); + GLIB_AVAILABLE_IN_ALL gboolean g_bit_trylock (volatile gint *address, gint lock_bit); @@ -41,6 +46,12 @@ GLIB_AVAILABLE_IN_ALL void g_bit_unlock (volatile gint *address, gint lock_bit); +GLIB_AVAILABLE_IN_2_86 +void g_bit_unlock_and_set (gint *address, + guint lock_bit, + gint new_val, + gint preserve_mask); + GLIB_AVAILABLE_IN_ALL void g_pointer_bit_lock (volatile void *address, gint lock_bit); diff --git a/glib/tests/1bit-mutex.c b/glib/tests/1bit-mutex.c index f6a90f0bd..2904753c7 100644 --- a/glib/tests/1bit-mutex.c +++ b/glib/tests/1bit-mutex.c @@ -47,8 +47,10 @@ #undef g_pointer_bit_unlock_and_set #define g_bit_lock _emufutex_g_bit_lock + #define g_bit_lock_and_get _emufutex_g_bit_lock_and_get #define g_bit_trylock _emufutex_g_bit_trylock #define g_bit_unlock _emufutex_g_bit_unlock + #define g_bit_unlock_and_set _emufutex_g_bit_unlock_and_set #define g_pointer_bit_lock _emufutex_g_pointer_bit_lock #define g_pointer_bit_lock_and_get _emufutex_g_pointer_bit_lock_and_get #define g_pointer_bit_trylock _emufutex_g_pointer_bit_trylock