mirror of
https://gitlab.gnome.org/GNOME/glib.git
synced 2025-01-13 15:56:23 +01:00
gobject: avoid global GRWLock for weak locations in g_object_unref() in some cases
_object_unref_clear_weak_locations() is called twice during g_object_unref(). In both cases, it is when we expect that the reference count is 1 and we are either about to call dispose() or finalize(). At this point, we must check for GWeakRef to avoid a race that the ref count gets increased just at that point. However, we can do something better than to always take the global lock. On the object, whenever an object is set to a GWeakRef, set a flag OPTIONAL_FLAG_EVER_HAD_WEAK_REF. Most objects are not involved with weak references and won't have this flag set. If we reach _object_unref_clear_weak_locations() we just (atomically) checked that the ref count is one. If the object at this point never had a GWeakRef registered, we know that nobody else could have raced against obtaining another reference. In this case, we can skip taking the lock and checking for weak locations. As most object don't ever have a GWeakRef registered, this significantly avoids unnecessary work during _object_unref_clear_weak_locations(). This even fixes a hard to hit race in the do_unref=FALSE case. Previously, if do_unref=FALSE there were code paths where we avoided taking the global lock. We do so, when quark_weak_locations is unset. However, that is not race free. If we enter _object_unref_clear_weak_locations() with a ref-count of 1 and one GWeakRef registered, another thread can take a strong reference and unset the GWeakRef. Then quark_weak_locations will be unset, and _object_unref_clear_weak_locations() misses the fact that the ref count is now bumped to two. That is now fixed, because once OPTIONAL_FLAG_EVER_HAD_WEAK_REF is set, it will stick. Previously, there was an optimization to first take a read lock to check whether there are weak locations to clear. It's not clear that this is worth it, because we now already have a hint that there might be a weak location. Unfortunately, GRWLock does not support an upgradable lock, so we cannot take an (upgradable) read lock, and when necessary upgrade that to a write lock.
This commit is contained in:
parent
0c06a4b7a0
commit
092be080c5
@ -108,6 +108,7 @@ enum {
|
|||||||
#define OPTIONAL_FLAG_HAS_SIGNAL_HANDLER (1 << 1) /* Set if object ever had a signal handler */
|
#define OPTIONAL_FLAG_HAS_SIGNAL_HANDLER (1 << 1) /* Set if object ever had a signal handler */
|
||||||
#define OPTIONAL_FLAG_HAS_NOTIFY_HANDLER (1 << 2) /* Same, specifically for "notify" */
|
#define OPTIONAL_FLAG_HAS_NOTIFY_HANDLER (1 << 2) /* Same, specifically for "notify" */
|
||||||
#define OPTIONAL_FLAG_LOCK (1 << 3) /* _OPTIONAL_BIT_LOCK */
|
#define OPTIONAL_FLAG_LOCK (1 << 3) /* _OPTIONAL_BIT_LOCK */
|
||||||
|
#define OPTIONAL_FLAG_EVER_HAD_WEAK_REF (1 << 4) /* whether on the object ever g_weak_ref_set() was called. */
|
||||||
|
|
||||||
/* We use g_bit_lock(), which only supports one lock per integer.
|
/* We use g_bit_lock(), which only supports one lock per integer.
|
||||||
*
|
*
|
||||||
@ -3841,77 +3842,58 @@ gpointer
|
|||||||
static gboolean
|
static gboolean
|
||||||
_object_unref_clear_weak_locations (GObject *object, gint *p_old_ref, gboolean do_unref)
|
_object_unref_clear_weak_locations (GObject *object, gint *p_old_ref, gboolean do_unref)
|
||||||
{
|
{
|
||||||
GSList **weak_locations;
|
gboolean success;
|
||||||
|
|
||||||
|
/* Fast path, for objects that never had a GWeakRef registered. */
|
||||||
|
if (!(object_get_optional_flags (object) & OPTIONAL_FLAG_EVER_HAD_WEAK_REF))
|
||||||
|
{
|
||||||
|
/* The caller previously just checked atomically that the ref-count was
|
||||||
|
* one.
|
||||||
|
*
|
||||||
|
* At this point still, @object never ever had a GWeakRef registered.
|
||||||
|
* That means, nobody else holds a strong reference and also nobody else
|
||||||
|
* can hold a weak reference, to race against obtaining another
|
||||||
|
* reference. We are good to proceed. */
|
||||||
|
if (do_unref)
|
||||||
|
{
|
||||||
|
if (!g_atomic_int_compare_and_exchange ((gint *) &object->ref_count, 1, 0))
|
||||||
|
{
|
||||||
|
#if G_ENABLE_DEBUG
|
||||||
|
g_assert_not_reached ();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return TRUE;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Slow path. We must obtain a lock to atomically release weak references and
|
||||||
|
* check that the ref count is as expected. */
|
||||||
|
|
||||||
|
g_rw_lock_writer_lock (&weak_locations_lock);
|
||||||
|
|
||||||
if (do_unref)
|
if (do_unref)
|
||||||
{
|
{
|
||||||
gboolean unreffed = FALSE;
|
success = g_atomic_int_compare_and_exchange_full ((gint *) &object->ref_count,
|
||||||
|
1, 0,
|
||||||
/* Fast path for the final unref using a read-lck only. We check whether
|
p_old_ref);
|
||||||
* we have weak_locations and drop ref count to zero under a reader lock. */
|
|
||||||
|
|
||||||
g_rw_lock_reader_lock (&weak_locations_lock);
|
|
||||||
|
|
||||||
weak_locations = g_datalist_id_get_data (&object->qdata, quark_weak_locations);
|
|
||||||
if (!weak_locations)
|
|
||||||
{
|
|
||||||
unreffed = g_atomic_int_compare_and_exchange_full ((int *) &object->ref_count,
|
|
||||||
1, 0,
|
|
||||||
p_old_ref);
|
|
||||||
g_rw_lock_reader_unlock (&weak_locations_lock);
|
|
||||||
return unreffed;
|
|
||||||
}
|
|
||||||
|
|
||||||
g_rw_lock_reader_unlock (&weak_locations_lock);
|
|
||||||
|
|
||||||
/* We have weak-locations. Note that we are here already after dispose(). That
|
|
||||||
* means, during dispose a GWeakRef was registered (very unusual). */
|
|
||||||
|
|
||||||
g_rw_lock_writer_lock (&weak_locations_lock);
|
|
||||||
|
|
||||||
if (!g_atomic_int_compare_and_exchange_full ((int *) &object->ref_count,
|
|
||||||
1, 0,
|
|
||||||
p_old_ref))
|
|
||||||
{
|
|
||||||
g_rw_lock_writer_unlock (&weak_locations_lock);
|
|
||||||
return FALSE;
|
|
||||||
}
|
|
||||||
|
|
||||||
weak_locations = g_datalist_id_remove_no_notify (&object->qdata, quark_weak_locations);
|
|
||||||
g_clear_pointer (&weak_locations, weak_locations_free_unlocked);
|
|
||||||
|
|
||||||
g_rw_lock_writer_unlock (&weak_locations_lock);
|
|
||||||
return TRUE;
|
|
||||||
}
|
}
|
||||||
|
else
|
||||||
weak_locations = g_datalist_id_get_data (&object->qdata, quark_weak_locations);
|
|
||||||
if (weak_locations != NULL)
|
|
||||||
{
|
{
|
||||||
g_rw_lock_writer_lock (&weak_locations_lock);
|
*p_old_ref = g_atomic_int_get ((gint *) &object->ref_count);
|
||||||
|
success = (*p_old_ref == 1);
|
||||||
|
}
|
||||||
|
|
||||||
*p_old_ref = g_atomic_int_get (&object->ref_count);
|
if (success)
|
||||||
if (*p_old_ref != 1)
|
{
|
||||||
{
|
GSList **weak_locations;
|
||||||
g_rw_lock_writer_unlock (&weak_locations_lock);
|
|
||||||
return FALSE;
|
|
||||||
}
|
|
||||||
|
|
||||||
weak_locations = g_datalist_id_remove_no_notify (&object->qdata, quark_weak_locations);
|
weak_locations = g_datalist_id_remove_no_notify (&object->qdata, quark_weak_locations);
|
||||||
g_clear_pointer (&weak_locations, weak_locations_free_unlocked);
|
g_clear_pointer (&weak_locations, weak_locations_free_unlocked);
|
||||||
|
|
||||||
g_rw_lock_writer_unlock (&weak_locations_lock);
|
|
||||||
return TRUE;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We don't need to re-fetch p_old_ref or check that it's still 1. The caller
|
g_rw_lock_writer_unlock (&weak_locations_lock);
|
||||||
* did that already. We are good.
|
|
||||||
*
|
return success;
|
||||||
* Note that in this case we fetched old_ref and weak_locations separately,
|
|
||||||
* without a lock. But this is fine. We are still before calling dispose().
|
|
||||||
* If there is a race at this point, the same race can happen between
|
|
||||||
* _object_unref_clear_weak_locations() and dispose() call. That is handled
|
|
||||||
* just fine. */
|
|
||||||
return TRUE;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -4034,6 +4016,10 @@ retry_beginning:
|
|||||||
G_OBJECT_GET_CLASS (object)->dispose (object);
|
G_OBJECT_GET_CLASS (object)->dispose (object);
|
||||||
TRACE (GOBJECT_OBJECT_DISPOSE_END (object, G_TYPE_FROM_INSTANCE (object), 1));
|
TRACE (GOBJECT_OBJECT_DISPOSE_END (object, G_TYPE_FROM_INSTANCE (object), 1));
|
||||||
|
|
||||||
|
/* Must re-fetch old-ref. _object_unref_clear_weak_locations() relies on
|
||||||
|
* that. */
|
||||||
|
old_ref = g_atomic_int_get (&object->ref_count);
|
||||||
|
|
||||||
retry_decrement:
|
retry_decrement:
|
||||||
/* Here, old_ref is 1 if we just come from dispose(). If the object was resurrected,
|
/* Here, old_ref is 1 if we just come from dispose(). If the object was resurrected,
|
||||||
* we can hit `goto retry_decrement` and be here with a larger old_ref. */
|
* we can hit `goto retry_decrement` and be here with a larger old_ref. */
|
||||||
@ -4044,6 +4030,15 @@ retry_decrement:
|
|||||||
* queue. */
|
* queue. */
|
||||||
g_object_notify_queue_thaw (object, nqueue, FALSE);
|
g_object_notify_queue_thaw (object, nqueue, FALSE);
|
||||||
nqueue = NULL;
|
nqueue = NULL;
|
||||||
|
|
||||||
|
/* Note at this point, @old_ref might be wrong.
|
||||||
|
*
|
||||||
|
* Also note that _object_unref_clear_weak_locations() requires that we
|
||||||
|
* atomically checked that @old_ref is 1. However, as @old_ref is larger
|
||||||
|
* than 1, that will not be called. Instead, all other code paths below,
|
||||||
|
* handle the possibility of a bogus @old_ref.
|
||||||
|
*
|
||||||
|
* No need to re-fetch. */
|
||||||
}
|
}
|
||||||
|
|
||||||
if (old_ref > 2)
|
if (old_ref > 2)
|
||||||
@ -4082,9 +4077,8 @@ retry_decrement:
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* old_ref is 1, we are about to drop the reference count to zero. That is
|
/* old_ref is (atomically!) checked to be 1, we are about to drop the
|
||||||
* done by _object_unref_clear_weak_locations() under a weak_locations_lock
|
* reference count to zero in _object_unref_clear_weak_locations(). */
|
||||||
* so that there is no race with g_weak_ref_set(). */
|
|
||||||
if (!_object_unref_clear_weak_locations (object, &old_ref, TRUE))
|
if (!_object_unref_clear_weak_locations (object, &old_ref, TRUE))
|
||||||
goto retry_decrement;
|
goto retry_decrement;
|
||||||
|
|
||||||
@ -5269,6 +5263,8 @@ g_weak_ref_set (GWeakRef *weak_ref,
|
|||||||
|
|
||||||
if (weak_locations == NULL)
|
if (weak_locations == NULL)
|
||||||
{
|
{
|
||||||
|
object_set_optional_flags (new_object, OPTIONAL_FLAG_EVER_HAD_WEAK_REF);
|
||||||
|
|
||||||
weak_locations = g_new0 (GSList *, 1);
|
weak_locations = g_new0 (GSList *, 1);
|
||||||
g_datalist_id_set_data_full (&new_object->qdata, quark_weak_locations,
|
g_datalist_id_set_data_full (&new_object->qdata, quark_weak_locations,
|
||||||
weak_locations, weak_locations_free);
|
weak_locations, weak_locations_free);
|
||||||
|
Loading…
Reference in New Issue
Block a user