Merge branch 'th/gobject-toggle-refs-check' into 'main'

[th/gobject-toggle-refs-check] Fix critical warning for toggle notifications in g_object_ref()/g_object_unref()

See merge request GNOME/glib!3959
This commit is contained in:
Philip Withnall 2024-03-18 14:21:29 +00:00
commit 9d9029c50a
2 changed files with 74 additions and 41 deletions

View File

@ -342,6 +342,10 @@ g_bit_unlock (volatile gint *address,
g_atomic_int_and (address_nonvolatile, ~mask); g_atomic_int_and (address_nonvolatile, ~mask);
#endif #endif
/* Warning: unlocking may allow another thread to proceed and destroy the
* memory that @address points to. We thus must not dereference it anymore.
*/
{ {
guint class = bit_lock_contended_class (address_nonvolatile); guint class = bit_lock_contended_class (address_nonvolatile);
@ -599,6 +603,10 @@ void
g_atomic_pointer_and (pointer_address, ~mask); g_atomic_pointer_and (pointer_address, ~mask);
#endif #endif
/* Warning: unlocking may allow another thread to proceed and destroy the
* memory that @address points to. We thus must not dereference it anymore.
*/
{ {
guint class = bit_lock_contended_class (address_nonvolatile); guint class = bit_lock_contended_class (address_nonvolatile);

View File

@ -644,6 +644,9 @@ object_bit_unlock (GObject *object, guint lock_bit)
_object_bit_is_locked = 0; _object_bit_is_locked = 0;
#endif #endif
/* Warning: after unlock, @object may be a dangling pointer (destroyed on
* another thread) and must not be touched anymore. */
g_bit_unlock ((gint *) object_get_optional_flags_p (object), _OPTIONAL_BIT_LOCK); g_bit_unlock ((gint *) object_get_optional_flags_p (object), _OPTIONAL_BIT_LOCK);
} }
@ -3949,25 +3952,68 @@ typedef struct {
} toggle_refs[1]; /* flexible array */ } toggle_refs[1]; /* flexible array */
} ToggleRefStack; } ToggleRefStack;
static GToggleNotify G_ALWAYS_INLINE static inline gboolean
toggle_refs_get_notify_unlocked (GObject *object, toggle_refs_check_and_ref_or_deref (GObject *object,
gpointer *out_data) gboolean is_ref,
gint *old_ref,
GToggleNotify *toggle_notify,
gpointer *toggle_data)
{ {
ToggleRefStack *tstackptr; const gint ref_curr = is_ref ? 1 : 2;
const gint ref_next = is_ref ? 2 : 1;
gboolean success;
if (!OBJECT_HAS_TOGGLE_REF (object)) #if G_ENABLE_DEBUG
return NULL; g_assert (ref_curr == *old_ref);
#endif
*toggle_notify = NULL;
*toggle_data = NULL;
object_bit_lock (object, OPTIONAL_BIT_LOCK_TOGGLE_REFS);
/* @old_ref is mainly an (out) parameter. On failure to compare-and-exchange,
* we MUST return the new value which the caller will use for retry.*/
success = g_atomic_int_compare_and_exchange_full ((int *) &object->ref_count,
ref_curr,
ref_next,
old_ref);
/* Note that if we are called during g_object_unref (@is_ref set to FALSE),
* then we drop the ref count from 2 to 1 and give up our reference. We thus
* no longer hold a strong reference and another thread may race against
* destroying the object.
*
* After this point with is_ref=FALSE and success=TRUE, @object must no
* longer be accessed.
*
* The exception is here. While we still hold the object lock, we know that
* @object could not be destroyed, because g_object_unref() also needs to
* acquire the same lock during g_object_notify_queue_freeze(). Thus, we know
* object cannot yet be destroyed and we can access it until the unlock
* below. */
if (success && OBJECT_HAS_TOGGLE_REF (object))
{
ToggleRefStack *tstackptr;
tstackptr = g_datalist_id_get_data (&object->qdata, quark_toggle_refs); tstackptr = g_datalist_id_get_data (&object->qdata, quark_toggle_refs);
if (tstackptr->n_toggle_refs != 1) if (tstackptr->n_toggle_refs != 1)
{ {
g_critical ("Unexpected number of toggle-refs. g_object_add_toggle_ref() must be paired with g_object_remove_toggle_ref()"); g_critical ("Unexpected number of toggle-refs. g_object_add_toggle_ref() must be paired with g_object_remove_toggle_ref()");
return NULL; }
else
{
*toggle_notify = tstackptr->toggle_refs[0].notify;
*toggle_data = tstackptr->toggle_refs[0].data;
}
} }
*out_data = tstackptr->toggle_refs[0].data; object_bit_unlock (object, OPTIONAL_BIT_LOCK_TOGGLE_REFS);
return tstackptr->toggle_refs[0].notify;
return success;
} }
/** /**
@ -4148,15 +4194,8 @@ retry:
} }
else if (old_ref == 1) else if (old_ref == 1)
{ {
gboolean do_retry;
/* With ref count 1, check whether we need to emit a toggle notification. */ /* With ref count 1, check whether we need to emit a toggle notification. */
object_bit_lock (object, OPTIONAL_BIT_LOCK_TOGGLE_REFS); if (!toggle_refs_check_and_ref_or_deref (object, TRUE, &old_ref, &toggle_notify, &toggle_data))
toggle_notify = toggle_refs_get_notify_unlocked (object, &toggle_data);
do_retry = !g_atomic_int_compare_and_exchange_full ((int *) &object->ref_count,
old_ref, old_ref + 1, &old_ref);
object_bit_unlock (object, OPTIONAL_BIT_LOCK_TOGGLE_REFS);
if (do_retry)
goto retry; goto retry;
} }
else else
@ -4281,7 +4320,6 @@ g_object_unref (gpointer _object)
GToggleNotify toggle_notify; GToggleNotify toggle_notify;
gpointer toggle_data; gpointer toggle_data;
GObjectNotifyQueue *nqueue; GObjectNotifyQueue *nqueue;
gboolean do_retry;
GType obj_gtype; GType obj_gtype;
g_return_if_fail (G_IS_OBJECT (object)); g_return_if_fail (G_IS_OBJECT (object));
@ -4330,18 +4368,8 @@ retry_beginning:
* *
* We need to take a lock, to avoid races. */ * We need to take a lock, to avoid races. */
object_bit_lock (object, OPTIONAL_BIT_LOCK_TOGGLE_REFS); if (!toggle_refs_check_and_ref_or_deref (object, FALSE, &old_ref, &toggle_notify, &toggle_data))
toggle_notify = toggle_refs_get_notify_unlocked (object, &toggle_data);
if (!g_atomic_int_compare_and_exchange_full ((int *) &object->ref_count,
old_ref, old_ref - 1, &old_ref))
{
object_bit_unlock (object, OPTIONAL_BIT_LOCK_TOGGLE_REFS);
goto retry_beginning; goto retry_beginning;
}
object_bit_unlock (object, OPTIONAL_BIT_LOCK_TOGGLE_REFS);
/* Beware: object might be a dangling pointer. */ /* Beware: object might be a dangling pointer. */
TRACE (GOBJECT_OBJECT_UNREF (object, obj_gtype, old_ref)); TRACE (GOBJECT_OBJECT_UNREF (object, obj_gtype, old_ref));
@ -4374,6 +4402,10 @@ retry_beginning:
* notifications. If the instance gets through to finalize(), the * notifications. If the instance gets through to finalize(), the
* notification queue gets automatically drained when g_object_finalize() is * notification queue gets automatically drained when g_object_finalize() is
* reached and the qdata is cleared. * reached and the qdata is cleared.
*
* Important: Note that g_object_notify_queue_freeze() takes a object_bit_lock(),
* which happens to be the same lock that is also taken by toggle_refs_check_and_ref(),
* that is very important. See also the code comment in toggle_refs_check_and_ref().
*/ */
nqueue = g_object_notify_queue_freeze (object); nqueue = g_object_notify_queue_freeze (object);
@ -4425,14 +4457,7 @@ retry_decrement:
* notification. Take a lock and check for that. * notification. Take a lock and check for that.
* *
* In that case, we need a lock to get the toggle notification. */ * In that case, we need a lock to get the toggle notification. */
object_bit_lock (object, OPTIONAL_BIT_LOCK_TOGGLE_REFS); if (!toggle_refs_check_and_ref_or_deref (object, FALSE, &old_ref, &toggle_notify, &toggle_data))
toggle_notify = toggle_refs_get_notify_unlocked (object, &toggle_data);
do_retry = !g_atomic_int_compare_and_exchange_full ((int *) &object->ref_count,
old_ref, old_ref - 1,
&old_ref);
object_bit_unlock (object, OPTIONAL_BIT_LOCK_TOGGLE_REFS);
if (do_retry)
goto retry_decrement; goto retry_decrement;
/* Beware: object might be a dangling pointer. */ /* Beware: object might be a dangling pointer. */