From d3e0b3620af2607058a25c4f59bdd87490caefb5 Mon Sep 17 00:00:00 2001 From: Thomas Haller Date: Wed, 6 Mar 2024 17:03:13 +0100 Subject: [PATCH 1/6] gdataset: expose g_datalist_lock()/g_datalist_unlock() as private API g_datalist_id_update_atomic() allows to use GData's internal look and perform complex operations on the data while holding the lock. Now, also expose g_datalist_{lock,unlock}() functions as private API. That will be useful, because g_datalist_id_update_atomic() and GData's lock is the main synchronization point for GObject. By being able to take those locks directly, we can perform operations without having to also look up GData key. --- glib/gdataset.c | 12 ++---------- glib/gdatasetprivate.h | 20 ++++++++++++++++++++ 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/glib/gdataset.c b/glib/gdataset.c index 57987b8e7..aeb90ac7e 100644 --- a/glib/gdataset.c +++ b/glib/gdataset.c @@ -151,27 +151,19 @@ static GDataset *g_dataset_cached = NULL; /* should this be /* --- functions --- */ -#define DATALIST_LOCK_BIT 2 - G_ALWAYS_INLINE static inline GData * g_datalist_lock_and_get (GData **datalist) { guintptr ptr; - g_pointer_bit_lock_and_get ((void **) datalist, DATALIST_LOCK_BIT, &ptr); + g_pointer_bit_lock_and_get ((void **) datalist, _G_DATALIST_LOCK_BIT, &ptr); return G_DATALIST_CLEAN_POINTER (ptr); } -static void -g_datalist_unlock (GData **datalist) -{ - g_pointer_bit_unlock ((void **)datalist, DATALIST_LOCK_BIT); -} - static void g_datalist_unlock_and_set (GData **datalist, gpointer ptr) { - g_pointer_bit_unlock_and_set ((void **) datalist, DATALIST_LOCK_BIT, ptr, G_DATALIST_FLAGS_MASK_INTERNAL); + g_pointer_bit_unlock_and_set ((void **) datalist, _G_DATALIST_LOCK_BIT, ptr, G_DATALIST_FLAGS_MASK_INTERNAL); } static gsize diff --git a/glib/gdatasetprivate.h b/glib/gdatasetprivate.h index 399310a4f..4a6d6f7fa 100644 --- a/glib/gdatasetprivate.h +++ b/glib/gdatasetprivate.h @@ -38,6 +38,26 @@ G_BEGIN_DECLS #define G_DATALIST_GET_FLAGS(datalist) \ ((gsize) g_atomic_pointer_get (datalist) & G_DATALIST_FLAGS_MASK) +#define _G_DATALIST_LOCK_BIT 2 + +#define g_datalist_lock(datalist) \ + G_STMT_START \ + { \ + GData **const _datalist = (datalist); \ + \ + g_pointer_bit_lock ((void **) _datalist, _G_DATALIST_LOCK_BIT); \ + } \ + G_STMT_END + +#define g_datalist_unlock(datalist) \ + G_STMT_START \ + { \ + GData **const _datalist = (datalist); \ + \ + g_pointer_bit_unlock ((void **) _datalist, _G_DATALIST_LOCK_BIT); \ + } \ + G_STMT_END + /*< private > * GDataListUpdateAtomicFunc: * @data: (inout) (nullable) (not optional): the existing data corresponding to From 61aa0c3ace2fd268e84daa1113903edf2b49a0c7 Mon Sep 17 00:00:00 2001 From: Thomas Haller Date: Wed, 6 Mar 2024 09:18:31 +0100 Subject: [PATCH 2/6] gdataset: add "already_locked" argument to g_datalist_id_update_atomic() This allows the caller to take the lock on the GData first, and perform some operations. This is useful under the assumption, that the caller can find cases where calling g_datalist_id_update_atomic() is unnecessary, but where they still need to hold the lock to atomically make that decision. That can avoid performance overhead, if we can avoid calling g_datalist_id_update_atomic(). That matters for checking the toggle-notify in g_object_ref()/g_object_unref(). Note that with "already_locked", g_datalist_id_update_atomic() will still unlock the GData at the end. That is because the API of g_datalist_id_update_atomic() requires that it might re-allocate the buffer, and it can do a more efficient unlock in that case, instead of leaving it to the caller. The usage and purpose of this parameter is anyway special, so the few callers will be fine with this asymmetry. It can safe an additional atomic operation to first set the buffer and then do a separate g_datalist_unlock(). --- glib/gdataset.c | 20 +++++++++++++++++++- glib/gdatasetprivate.h | 1 + glib/glib-private.h | 6 +++++- gobject/gobject.c | 5 +++-- 4 files changed, 28 insertions(+), 4 deletions(-) diff --git a/glib/gdataset.c b/glib/gdataset.c index aeb90ac7e..a4d7af487 100644 --- a/glib/gdataset.c +++ b/glib/gdataset.c @@ -1062,6 +1062,7 @@ g_datalist_id_remove_no_notify (GData **datalist, * g_datalist_id_update_atomic: * @datalist: the data list * @key_id: the key to add. + * @already_locked: whether the GData lock is already held. * @callback: (scope call): callback to update (set, remove, steal, update) the * data. * @user_data: the user data for @callback. @@ -1084,11 +1085,21 @@ g_datalist_id_remove_no_notify (GData **datalist, * value of the function. This is an alternative to returning a result via * @user_data. * + * If @already_locked is TRUE, the caller previously already called + * g_datalist_lock(). In that case, g_datalist_id_update_atomic() assumes it + * already holds the lock and does not take the lock again. Note that in any + * case, at the end g_datalist_id_update_atomic() will always unlock the GData. + * This asymmetry is here, because update may reallocate the buffer and it is + * more efficient to do when releasing the lock. The few callers that set + * @already_locked to TRUE are fine with this asymmetry and anyway want to + * unlock afterwards. + * * Returns: the value returned by @callback. */ gpointer g_datalist_id_update_atomic (GData **datalist, GQuark key_id, + gboolean already_locked, GDataListUpdateAtomicFunc callback, gpointer user_data) { @@ -1102,7 +1113,14 @@ g_datalist_id_update_atomic (GData **datalist, g_return_val_if_fail (datalist, NULL); g_return_val_if_fail (key_id != 0, NULL); - d = g_datalist_lock_and_get (datalist); + if (G_UNLIKELY (already_locked)) + { + d = G_DATALIST_GET_POINTER (datalist); + } + else + { + d = g_datalist_lock_and_get (datalist); + } data = datalist_find (d, key_id, &idx); diff --git a/glib/gdatasetprivate.h b/glib/gdatasetprivate.h index 4a6d6f7fa..872a7cd42 100644 --- a/glib/gdatasetprivate.h +++ b/glib/gdatasetprivate.h @@ -78,6 +78,7 @@ typedef gpointer (*GDataListUpdateAtomicFunc) (gpointer *data, gpointer g_datalist_id_update_atomic (GData **datalist, GQuark key_id, + gboolean already_locked, GDataListUpdateAtomicFunc callback, gpointer user_data); diff --git a/glib/glib-private.h b/glib/glib-private.h index 4dd0c2020..94ea7e857 100644 --- a/glib/glib-private.h +++ b/glib/glib-private.h @@ -305,6 +305,7 @@ typedef struct { gpointer (*g_datalist_id_update_atomic) (GData **datalist, GQuark key_id, + gboolean already_locked, GDataListUpdateAtomicFunc callback, gpointer user_data); @@ -345,7 +346,10 @@ guint g_uint_hash (gconstpointer v); #endif /* Convenience wrapper to call private g_datalist_id_update_atomic() function. */ +#define _g_datalist_id_update_atomic_full(datalist, key_id, already_locked, callback, user_data) \ + (GLIB_PRIVATE_CALL (g_datalist_id_update_atomic) ((datalist), (key_id), (already_locked), (callback), (user_data))) + #define _g_datalist_id_update_atomic(datalist, key_id, callback, user_data) \ - (GLIB_PRIVATE_CALL (g_datalist_id_update_atomic) ((datalist), (key_id), (callback), (user_data))) + _g_datalist_id_update_atomic_full ((datalist), (key_id), FALSE, (callback), (user_data)) #endif /* __GLIB_PRIVATE_H__ */ diff --git a/gobject/gobject.c b/gobject/gobject.c index 11091cf18..0bd584fcd 100644 --- a/gobject/gobject.c +++ b/gobject/gobject.c @@ -234,10 +234,11 @@ static GQuark quark_weak_locations = 0; static gpointer (*_local_g_datalist_id_update_atomic) (GData **datalist, GQuark key_id, + gboolean already_locked, GDataListUpdateAtomicFunc callback, gpointer user_data) = NULL; -#undef _g_datalist_id_update_atomic -#define _g_datalist_id_update_atomic(...) ((_local_g_datalist_id_update_atomic) (__VA_ARGS__)) +#undef _g_datalist_id_update_atomic_full +#define _g_datalist_id_update_atomic_full(...) ((_local_g_datalist_id_update_atomic) (__VA_ARGS__)) #if HAVE_PRIVATE G_ALWAYS_INLINE static inline GObjectPrivate * From 2fe2f2f9b766e993a7661ea4f8ad978467c898b6 Mon Sep 17 00:00:00 2001 From: Thomas Haller Date: Tue, 30 Jan 2024 12:48:19 +0100 Subject: [PATCH 3/6] gobject: rework toggle_refs_check_and_ref() to use g_datalist_id_update_atomic() This is the first step to drop OPTIONAL_BIT_LOCK_TOGGLE_REFS lock. That will happen soon after. Note that toggle_refs_check_and_ref_or_deref() is called from g_object_ref()/g_object_unref(), when the ref count toggles between 1 and 2 and called frequently. Also consider that most objects have no toggle references and would rather avoid the overhead. Note that we expect that the object has no toggle references. So the fast path only takes a g_datalist_lock() first, updates the ref-count and checks OBJECT_HAS_TOGGLE_REF(). If we have no toggle reference, we avoid the call to g_datalist_id_update_atomic() -- which first needs to search the datalist for the quark_toggle_refs key. Only if OBJECT_HAS_TOGGLE_REF(), we call g_datalist_id_update_atomic(). At that point, we pass "already_locked" to indicate that we hold the lock, and avoid the overhead of taking the lock a second time. In this commit, the fast-path actually gets worse. Because previously we already had the OBJECT_HAS_TOGGLE_REF() optimization and only needed the object_bit_lock(). Now we additionally take the g_datalist_lock(). Note that the object_bit_lock() will go away next, which brings the fast path back to take only one bit lock. You might think, that the fast-path then is still worse, because previously we took a distinct lock object_bit_lock(), while now even more places go through the GData lock. Which theoretically could allow for higher parallelism, by taking different locks. However, note that in both cases these are per-object locks. So it would be very hard to find a usage where previously higher parallelism was achieved due to that (e.g. a concurrent g_weak_ref_get() vs toggling the last reference). And that is only the fast-path in toggle_refs_check_and_ref_or_deref(). At all other places, we soon will take one lock less. This also fixes a regression of commit abdb58007a5e ('gobject: drop OPTIONAL_BIT_LOCK_NOTIFY lock'). Note the code comment in toggle_refs_check_and_ref_or_deref() how it relies to hold the same lock that is also taken while destroying the object. This was no longer the case since OPTIONAL_BIT_LOCK_NOTIFY lock was replaced by GData lock. This is fixed by this commit, because again the same lock is taken. Fixes: abdb58007a5e ('gobject: drop OPTIONAL_BIT_LOCK_NOTIFY lock') --- gobject/gobject.c | 86 ++++++++++++++++++++++++++++++++++------------- 1 file changed, 62 insertions(+), 24 deletions(-) diff --git a/gobject/gobject.c b/gobject/gobject.c index 0bd584fcd..5a1ccd69e 100644 --- a/gobject/gobject.c +++ b/gobject/gobject.c @@ -4299,6 +4299,30 @@ typedef struct { } toggle_refs[1]; /* flexible array */ } ToggleRefStack; +static gpointer +toggle_refs_check_and_ref_cb (gpointer *data, + GDestroyNotify *destroy_notify, + gpointer user_data) +{ + GToggleNotify *toggle_notify = ((gpointer *) user_data)[0]; + gpointer *toggle_data = ((gpointer *) user_data)[1]; + ToggleRefStack *tstack = *data; + + if (G_UNLIKELY (tstack->n_toggle_refs != 1)) + { + /* We only reach this line after we checked that the ref-count was 1 + * and that OBJECT_HAS_TOGGLE_REF(). We expect that there is exactly + * one toggle reference registered. */ + g_critical ("Unexpected number of toggle-refs. g_object_add_toggle_ref() must be paired with g_object_remove_toggle_ref()"); + *toggle_notify = NULL; + return NULL; + } + + *toggle_notify = tstack->toggle_refs[0].notify; + *toggle_data = tstack->toggle_refs[0].data; + return NULL; +} + G_ALWAYS_INLINE static inline gboolean toggle_refs_check_and_ref_or_deref (GObject *object, gboolean is_ref, @@ -4319,6 +4343,15 @@ toggle_refs_check_and_ref_or_deref (GObject *object, object_bit_lock (object, OPTIONAL_BIT_LOCK_TOGGLE_REFS); + /* This is called from g_object_ref()/g_object_unref() and a hot path. + * + * We hack the GData open and take the g_datalist_lock() outside. Then we + * perform checks, that most likely will tell us that there is not toggle + * notifications. Only if we have a toggle notification, we call + * _g_datalist_id_update_atomic_full(). */ + + g_datalist_lock (&object->qdata); + /* @old_ref is mainly an (out) parameter. On failure to compare-and-exchange, * we MUST return the new value which the caller will use for retry.*/ @@ -4335,32 +4368,36 @@ toggle_refs_check_and_ref_or_deref (GObject *object, * After this point with is_ref=FALSE and success=TRUE, @object must no * longer be accessed. * - * The exception is here. While we still hold the object lock, we know that - * @object could not be destroyed, because g_object_unref() also needs to - * acquire the same lock during g_object_notify_queue_freeze(). Thus, we know - * object cannot yet be destroyed and we can access it until the unlock - * below. */ + * The exception is here. While we still hold the lock, we know that @object + * could not be destroyed, because g_object_unref() also needs to acquire the + * same lock before finalizing @object. Thus, we know object cannot yet be + * destroyed and we can access it until the unlock below. */ - if (success && OBJECT_HAS_TOGGLE_REF (object)) + if (G_UNLIKELY (!success)) { - ToggleRefStack *tstackptr; - - tstackptr = g_datalist_id_get_data (&object->qdata, quark_toggle_refs); - - if (tstackptr->n_toggle_refs != 1) - { - g_critical ("Unexpected number of toggle-refs. g_object_add_toggle_ref() must be paired with g_object_remove_toggle_ref()"); - } - else - { - *toggle_notify = tstackptr->toggle_refs[0].notify; - *toggle_data = tstackptr->toggle_refs[0].data; - } + g_datalist_unlock (&object->qdata); + object_bit_unlock (object, OPTIONAL_BIT_LOCK_TOGGLE_REFS); + return FALSE; } - object_bit_unlock (object, OPTIONAL_BIT_LOCK_TOGGLE_REFS); + if (G_LIKELY (!OBJECT_HAS_TOGGLE_REF (object))) + { + g_datalist_unlock (&object->qdata); + object_bit_unlock (object, OPTIONAL_BIT_LOCK_TOGGLE_REFS); + return TRUE; + } - return success; + /* slow-path. We have a toggle reference. Call into g_datalist_id_update_atomic(). + * + * Note that _g_datalist_id_update_atomic_full() will release the lock! */ + _g_datalist_id_update_atomic_full (&object->qdata, + quark_toggle_refs, + TRUE, + toggle_refs_check_and_ref_cb, + (gpointer[2]){ toggle_notify, toggle_data }); + + object_bit_unlock (object, OPTIONAL_BIT_LOCK_TOGGLE_REFS); + return TRUE; } /** @@ -4750,9 +4787,10 @@ retry_beginning: * notification queue gets automatically drained when g_object_finalize() is * reached and the qdata is cleared. * - * Important: Note that g_object_notify_queue_freeze() takes a object_bit_lock(), - * which happens to be the same lock that is also taken by toggle_refs_check_and_ref(), - * that is very important. See also the code comment in toggle_refs_check_and_ref(). + * Important: Note that g_object_notify_queue_freeze() takes an object lock. + * That happens to be the same lock that is also taken by + * toggle_refs_check_and_ref_or_deref(), that is very important. See also the + * code comment in toggle_refs_check_and_ref_or_deref(). */ g_object_notify_queue_freeze (object, TRUE); nqueue_is_frozen = TRUE; From f438ef68025ab66f025f140811890bff9b275b26 Mon Sep 17 00:00:00 2001 From: Thomas Haller Date: Tue, 30 Jan 2024 12:48:19 +0100 Subject: [PATCH 4/6] gobject: rework g_object_add_toggle_ref() to use g_datalist_id_update_atomic() --- gobject/gobject.c | 88 +++++++++++++++++++++++++++++++---------------- 1 file changed, 58 insertions(+), 30 deletions(-) diff --git a/gobject/gobject.c b/gobject/gobject.c index 5a1ccd69e..ff80b4826 100644 --- a/gobject/gobject.c +++ b/gobject/gobject.c @@ -4291,12 +4291,22 @@ g_object_force_floating (GObject *object) floating_flag_handler (object, +1); } -typedef struct { +typedef struct +{ + GToggleNotify notify; + gpointer data; +} ToggleRefTuple; + +typedef struct +{ + GObject *object; + ToggleRefTuple tuple; +} ToggleRefCallbackData; + +typedef struct +{ guint n_toggle_refs; - struct { - GToggleNotify notify; - gpointer data; - } toggle_refs[1]; /* flexible array */ + ToggleRefTuple toggle_refs[1]; /* flexible array */ } ToggleRefStack; static gpointer @@ -4400,6 +4410,38 @@ toggle_refs_check_and_ref_or_deref (GObject *object, return TRUE; } +static gpointer +toggle_refs_ref_cb (gpointer *data, + GDestroyNotify *destroy_notify, + gpointer user_data) +{ + ToggleRefCallbackData *trdata = user_data; + ToggleRefStack *tstack = *data; + guint i; + + if (!tstack) + { + tstack = g_new (ToggleRefStack, 1); + tstack->n_toggle_refs = 1; + i = 0; + + g_datalist_set_flags (&trdata->object->qdata, OBJECT_HAS_TOGGLE_REF_FLAG); + + *destroy_notify = g_free; + } + else + { + i = tstack->n_toggle_refs++; + tstack = g_realloc (tstack, sizeof (*tstack) + sizeof (tstack->toggle_refs[0]) * i); + } + + *data = tstack; + + tstack->toggle_refs[i] = trdata->tuple; + + return NULL; +} + /** * g_object_add_toggle_ref: (skip) * @object: a #GObject @@ -4451,9 +4493,6 @@ g_object_add_toggle_ref (GObject *object, GToggleNotify notify, gpointer data) { - ToggleRefStack *tstack; - guint i; - g_return_if_fail (G_IS_OBJECT (object)); g_return_if_fail (notify != NULL); g_return_if_fail (g_atomic_int_get (&object->ref_count) >= 1); @@ -4461,29 +4500,18 @@ g_object_add_toggle_ref (GObject *object, g_object_ref (object); object_bit_lock (object, OPTIONAL_BIT_LOCK_TOGGLE_REFS); - tstack = g_datalist_id_remove_no_notify (&object->qdata, quark_toggle_refs); - if (tstack) - { - i = tstack->n_toggle_refs++; - /* allocate i = tstate->n_toggle_refs - 1 positions beyond the 1 declared - * in tstate->toggle_refs */ - tstack = g_realloc (tstack, sizeof (*tstack) + sizeof (tstack->toggle_refs[0]) * i); - } - else - { - tstack = g_renew (ToggleRefStack, NULL, 1); - tstack->n_toggle_refs = 1; - i = 0; - } - /* Set a flag for fast lookup after adding the first toggle reference */ - if (tstack->n_toggle_refs == 1) - g_datalist_set_flags (&object->qdata, OBJECT_HAS_TOGGLE_REF_FLAG); - - tstack->toggle_refs[i].notify = notify; - tstack->toggle_refs[i].data = data; - g_datalist_id_set_data_full (&object->qdata, quark_toggle_refs, tstack, - (GDestroyNotify)g_free); + _g_datalist_id_update_atomic (&object->qdata, + quark_toggle_refs, + toggle_refs_ref_cb, + &((ToggleRefCallbackData){ + .object = object, + .tuple = { + .notify = notify, + .data = data, + }, + })); + object_bit_unlock (object, OPTIONAL_BIT_LOCK_TOGGLE_REFS); } From 588dbc569d035e8759229ea867c3ba9c3980364f Mon Sep 17 00:00:00 2001 From: Thomas Haller Date: Tue, 30 Jan 2024 12:48:19 +0100 Subject: [PATCH 5/6] gobject: rework g_object_remove_toggle_ref() to use g_datalist_id_update_atomic() --- gobject/gobject.c | 88 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 61 insertions(+), 27 deletions(-) diff --git a/gobject/gobject.c b/gobject/gobject.c index ff80b4826..8a1ef95f1 100644 --- a/gobject/gobject.c +++ b/gobject/gobject.c @@ -4515,6 +4515,46 @@ g_object_add_toggle_ref (GObject *object, object_bit_unlock (object, OPTIONAL_BIT_LOCK_TOGGLE_REFS); } +static gpointer +toggle_refs_unref_cb (gpointer *data, + GDestroyNotify *destroy_notify, + gpointer user_data) +{ + ToggleRefCallbackData *trdata = user_data; + ToggleRefStack *tstack = *data; + gboolean found_one = FALSE; + guint i; + + if (tstack) + { + for (i = 0; i < tstack->n_toggle_refs; i++) + { + if (tstack->toggle_refs[i].notify == trdata->tuple.notify && + (tstack->toggle_refs[i].data == trdata->tuple.data || trdata->tuple.data == NULL)) + { + found_one = TRUE; + break; + } + } + } + + if (G_LIKELY (found_one)) + { + + tstack->n_toggle_refs -= 1; + if (tstack->n_toggle_refs == 0) + { + g_datalist_unset_flags (&trdata->object->qdata, OBJECT_HAS_TOGGLE_REF_FLAG); + g_free (tstack); + *data = NULL; + } + else if (i != tstack->n_toggle_refs) + tstack->toggle_refs[i] = tstack->toggle_refs[tstack->n_toggle_refs]; + } + + return GINT_TO_POINTER (found_one); +} + /** * g_object_remove_toggle_ref: (skip) * @object: a #GObject @@ -4539,42 +4579,36 @@ g_object_remove_toggle_ref (GObject *object, GToggleNotify notify, gpointer data) { - ToggleRefStack *tstack; - gboolean found_one = FALSE; + gboolean found_one; + gpointer result; g_return_if_fail (G_IS_OBJECT (object)); g_return_if_fail (notify != NULL); object_bit_lock (object, OPTIONAL_BIT_LOCK_TOGGLE_REFS); - tstack = g_datalist_id_get_data (&object->qdata, quark_toggle_refs); - if (tstack) - { - guint i; - for (i = 0; i < tstack->n_toggle_refs; i++) - if (tstack->toggle_refs[i].notify == notify && - (tstack->toggle_refs[i].data == data || data == NULL)) - { - found_one = TRUE; - tstack->n_toggle_refs -= 1; - if (i != tstack->n_toggle_refs) - tstack->toggle_refs[i] = tstack->toggle_refs[tstack->n_toggle_refs]; + result = _g_datalist_id_update_atomic (&object->qdata, + quark_toggle_refs, + toggle_refs_unref_cb, + &((ToggleRefCallbackData){ + .object = object, + .tuple = { + .notify = notify, + .data = data, + }, + })); - if (tstack->n_toggle_refs == 0) - { - g_datalist_unset_flags (&object->qdata, OBJECT_HAS_TOGGLE_REF_FLAG); - g_datalist_id_set_data_full (&object->qdata, quark_toggle_refs, NULL, NULL); - } - - break; - } - } object_bit_unlock (object, OPTIONAL_BIT_LOCK_TOGGLE_REFS); - if (found_one) - g_object_unref (object); - else - g_critical ("%s: couldn't find toggle ref %p(%p)", G_STRFUNC, notify, data); + found_one = GPOINTER_TO_INT (result); + + if (!found_one) + { + g_critical ("%s: couldn't find toggle ref %p(%p)", G_STRFUNC, notify, data); + return; + } + + g_object_unref (object); } /* Internal implementation of g_object_ref() which doesn't call out to user code. From e6a1e78029d6fe199830fc5d7a0049b432625d9d Mon Sep 17 00:00:00 2001 From: Thomas Haller Date: Tue, 30 Jan 2024 13:09:19 +0100 Subject: [PATCH 6/6] gobject: drop OPTIONAL_BIT_LOCK_TOGGLE_REFS lock It was replaced by the GData lock and g_datalist_id_update_atomic(). Note that we introduced object_bit_lock() to replace global mutexes. Now, object_bit_lock() is also replaced, by using the GData lock via g_datalist_id_update_atomic(). This means, all mutex-like locks on the GObject now go through the GData lock on the GObject's qdata. For the moment, the object_bit_lock() API is still here and unused. It will be dropped in a separate commit. --- gobject/gobject.c | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/gobject/gobject.c b/gobject/gobject.c index 8a1ef95f1..f8aa66a4c 100644 --- a/gobject/gobject.c +++ b/gobject/gobject.c @@ -124,7 +124,7 @@ enum { * parallel as possible. The alternative would be to add individual locking * integers to GObjectPrivate. But increasing memory usage for more parallelism * (per-object!) is not worth it. */ -#define OPTIONAL_BIT_LOCK_TOGGLE_REFS 1 +#define OPTIONAL_BIT_LOCK_UNUSED 1 #if SIZEOF_INT == 4 && GLIB_SIZEOF_VOID_P >= 8 #define HAVE_OPTIONAL_FLAGS_IN_GOBJECT 1 @@ -624,7 +624,7 @@ weak_ref_data_has (GObject *object, WeakRefData *wrdata, WeakRefData **out_new_w static G_THREAD_LOCAL guint _object_bit_is_locked; #endif -static void +G_GNUC_UNUSED static void object_bit_lock (GObject *object, guint lock_bit) { #if defined(G_ENABLE_DEBUG) && defined(G_THREAD_LOCAL) @@ -639,7 +639,7 @@ object_bit_lock (GObject *object, guint lock_bit) g_bit_lock ((gint *) object_get_optional_flags_p (object), _OPTIONAL_BIT_LOCK); } -static void +G_GNUC_UNUSED static void object_bit_unlock (GObject *object, guint lock_bit) { #if defined(G_ENABLE_DEBUG) && defined(G_THREAD_LOCAL) @@ -4351,8 +4351,6 @@ toggle_refs_check_and_ref_or_deref (GObject *object, *toggle_notify = NULL; *toggle_data = NULL; - object_bit_lock (object, OPTIONAL_BIT_LOCK_TOGGLE_REFS); - /* This is called from g_object_ref()/g_object_unref() and a hot path. * * We hack the GData open and take the g_datalist_lock() outside. Then we @@ -4386,14 +4384,12 @@ toggle_refs_check_and_ref_or_deref (GObject *object, if (G_UNLIKELY (!success)) { g_datalist_unlock (&object->qdata); - object_bit_unlock (object, OPTIONAL_BIT_LOCK_TOGGLE_REFS); return FALSE; } if (G_LIKELY (!OBJECT_HAS_TOGGLE_REF (object))) { g_datalist_unlock (&object->qdata); - object_bit_unlock (object, OPTIONAL_BIT_LOCK_TOGGLE_REFS); return TRUE; } @@ -4406,7 +4402,6 @@ toggle_refs_check_and_ref_or_deref (GObject *object, toggle_refs_check_and_ref_cb, (gpointer[2]){ toggle_notify, toggle_data }); - object_bit_unlock (object, OPTIONAL_BIT_LOCK_TOGGLE_REFS); return TRUE; } @@ -4499,8 +4494,6 @@ g_object_add_toggle_ref (GObject *object, g_object_ref (object); - object_bit_lock (object, OPTIONAL_BIT_LOCK_TOGGLE_REFS); - _g_datalist_id_update_atomic (&object->qdata, quark_toggle_refs, toggle_refs_ref_cb, @@ -4511,8 +4504,6 @@ g_object_add_toggle_ref (GObject *object, .data = data, }, })); - - object_bit_unlock (object, OPTIONAL_BIT_LOCK_TOGGLE_REFS); } static gpointer @@ -4585,8 +4576,6 @@ g_object_remove_toggle_ref (GObject *object, g_return_if_fail (G_IS_OBJECT (object)); g_return_if_fail (notify != NULL); - object_bit_lock (object, OPTIONAL_BIT_LOCK_TOGGLE_REFS); - result = _g_datalist_id_update_atomic (&object->qdata, quark_toggle_refs, toggle_refs_unref_cb, @@ -4598,8 +4587,6 @@ g_object_remove_toggle_ref (GObject *object, }, })); - object_bit_unlock (object, OPTIONAL_BIT_LOCK_TOGGLE_REFS); - found_one = GPOINTER_TO_INT (result); if (!found_one)