gobject: new thread-safe API for g_object_weak_ref()

The weak notification APIs g_object_weak_ref() and g_object_weak_unref()
are not thread-safe. This patch adds thread-safe alternatives:
g_object_weak_ref_full() and g_object_weak_unref_full().

The problem arises when other threads call g_object_run_dispose() or
g_object_unref(), making g_object_weak_unref() unsafe. The caller cannot
know whether the weak notification was successfully removed or might
still be invoked.

For example, g_object_weak_unref() will assert if no matching
notification is found. This is inherrently racy. Beyond that problem,
weak notifications often involve user data that must be freed -- either
by the callback or after g_object_weak_unref(). Since you can't know
which path executed, this can lead to races and double-free errors.

The new g_object_weak_unref_full() returns a boolean to indicate whether
the notification was removed or will still be invoked, allowing safe
cleanup. This return value and acting upon it is the main solution for
thread-safety.

Note that g_object_unref() only starts disposing after ensuring there
are no more GWeakRefs and only the single caller's strong reference
remains. So you might think that no other thread could acquire a strong
reference and race by calling g_object_weak_unref(). While this makes
such a race less likely, it is not eliminated. If there are multiple
weak notifications or closures, one can pass a reference to another
thread that calls g_object_weak_unref() and enables the race. Also, with
g_object_run_dispose(), there is nothing preventing another thread from
racing against g_object_weak_unref().

g_object_weak_ref_full() and g_object_weak_unref_full() also support a
`synchronize=TRUE` flag. This ensures the callback runs while holding a
per-callback mutex, allowing g_object_weak_unref_full() to wait until
the callback has either already run or will never run.

Calling user callbacks while holding a lock can risk deadlocks, but the
risk is limited because the lock is specific to that notification.

Finally, GDestroyNotify callbacks are supported. While mostly a
convenience, they are also invoked outside the lock, which enables more
complex cleanup without the risk of deadlock.

Contrary to common wisdom, combining weak notifications with GWeakRef
does not solve this problem. Also, it forces to acquire strong
references, which emits toggle notifications. When carefully using
g_object_weak_ref_full(), the caller of g_object_weak_unref_full()
can safely use a pointer to the object, without ever increasing
the reference count. A unit test shows how that is done.

This improves correctness and safety for weak references in
multithreaded contexts.

The main overhead of this change is that WeakRefTuple grew from 2
pointer sizes to 4. Every weak notification will have such a entry, so
it takes now more memory to track the registration. Otherwise, there is
no relevant overhead compared to before. Obviously, a "synchronized"
notification is more expensive, which is why it requires an opt-in
during g_object_weak_ref_full().
This commit is contained in:
Thomas Haller
2025-04-14 09:40:30 +02:00
parent 811b68695d
commit a7538492d1
3 changed files with 991 additions and 93 deletions

View File

@@ -3694,10 +3694,59 @@ g_object_disconnect (gpointer _object,
va_end (var_args);
}
#define WEAK_REF_SYNC_DATA_LOCK_BIT 0
typedef struct
{
guint w_refcount;
gint w_sync_flag;
/* We use bitlock, which is not re-entrant. This would mean, a
* g_object_weak_unref_sync() from within a callback would dead-lock. We
* avoid that by remembering here the thread which is currently notifying and
* avoid taking the lock. */
GThread *w_notifying_thread;
GDestroyNotify destroy;
} WeakRefSyncData;
G_ALWAYS_INLINE static inline void
_weak_ref_sync_data_free (WeakRefSyncData *sync_data)
{
g_free_sized (sync_data, sizeof (WeakRefSyncData));
}
G_ALWAYS_INLINE static inline void
_weak_ref_sync_data_unref (WeakRefSyncData *sync_data)
{
if ((--sync_data->w_refcount) == 0)
_weak_ref_sync_data_free (sync_data);
}
typedef struct
{
GWeakNotify notify;
gpointer data;
union
{
/* A tagged union. Depending on "is_notifying", we either have "destroy" or
* "sync_data".
*
* This is to save a separate pointer for "destroy" in the long-lived
* WeakRefStack. Once we start notifying, we move it to
* "WeakRefSyncData.destroy". */
GDestroyNotify destroy;
WeakRefSyncData *sync_data;
} u;
/* To bad, we need two bits here, but none of the above pointers is
* guaranteed to be malloced (and to have a certain minimal alignment), so we
* cannot encode the bits there. Maybe you could argue, that on every
* supported architecture, function pointers must be aligned to >= 2, so we
* would have two bits available. But that is unclear to always hold.
*
* Thus, add separate fields, which increases the struct size. */
guint8 synchronize;
guint8 is_notifying;
} WeakRefTuple;
struct _WeakRefReleaseAllState;
@@ -3718,6 +3767,23 @@ typedef struct
#define WEAK_REF_STACK_ALLOC_SIZE(alloc_size) (G_STRUCT_OFFSET (WeakRefStack, weak_refs) + sizeof (WeakRefTuple) * (alloc_size))
G_ALWAYS_INLINE static inline WeakRefTuple *
_weak_ref_tuple_init (WeakRefTuple *tuple,
GWeakNotify notify,
gpointer data,
GDestroyNotify destroy,
gboolean synchronize)
{
*tuple = (WeakRefTuple){
.notify = notify,
.data = data,
.synchronize = (!!synchronize),
.is_notifying = FALSE,
.u.destroy = destroy,
};
return tuple;
}
G_GNUC_UNUSED G_ALWAYS_INLINE static inline gboolean
_weak_ref_release_all_state_contains (WeakRefReleaseAllState *release_all_state, WeakRefReleaseAllState *needle)
{
@@ -3811,12 +3877,41 @@ _weak_ref_stack_update_release_all_state (WeakRefStack *wstack, guint idx)
}
}
static void
_weak_ref_stack_remove (WeakRefStack **p_wstack, guint idx, gpointer *data, gboolean allow_shrink)
{
WeakRefStack *wstack = *p_wstack;
_weak_ref_stack_update_release_all_state (wstack, idx);
wstack->n_weak_refs--;
if (wstack->n_weak_refs == 0)
{
_weak_ref_stack_free (wstack);
*data = NULL;
*p_wstack = NULL;
}
else
{
if (idx != wstack->n_weak_refs)
{
memmove (&wstack->weak_refs[idx],
&wstack->weak_refs[idx + 1],
sizeof (wstack->weak_refs[0]) * (wstack->n_weak_refs - idx));
}
if (allow_shrink)
_weak_ref_stack_maybe_shrink (p_wstack, data);
}
}
static gpointer
g_object_weak_ref_cb (gpointer *data,
GDestroyNotify *destroy_notify,
gpointer user_data)
{
WeakRefTuple *tuple = user_data;
const WeakRefTuple *tuple = user_data;
WeakRefStack *wstack = *data;
guint i;
@@ -3858,6 +3953,80 @@ g_object_weak_ref_cb (gpointer *data,
return NULL;
}
static inline void
_g_object_weak_ref (GObject *object,
GWeakNotify notify,
gpointer data,
GDestroyNotify destroy,
gboolean synchronize)
{
WeakRefTuple tuple;
_weak_ref_tuple_init (&tuple, notify, data, destroy, synchronize);
_g_datalist_id_update_atomic (&object->qdata,
quark_weak_notifies,
g_object_weak_ref_cb,
&tuple);
}
/**
* g_object_weak_ref_full: (skip)
* @object: #GObject to reference weakly
* @notify: callback to invoke before the object is freed
* @data: extra data to pass to notify
* @destroy: an optional #GDestroyNotify for @data.
* @synchronize: whether to hold a mutex during the callback
*
* Adds a weak reference callback to an object. Weak references are used for
* notification when an object is disposed. They are called "weak references"
* because they allow you to safely hold a pointer to an object without calling
* g_object_ref() (g_object_ref() adds a strong reference, that is, forces the
* object to stay alive).
*
* Weak notifications are emitted around the time of GObject's dispose() hook.
* That is, either during g_object_run_dispose() or during g_object_unref().
* At that time, the object is still mostly alive and its reference count is
* positive. While you could acquire new references or register weak
* references, this should be avoided to let the object getting fully
* destroyed. If the object was not resurrected during dispose(), any
* remaining notifications will be emitted right before finalize(). At that
* time, the object is mostly dead already and new references cannot be
* acquired anymore.
*
* This reference can be later removed via g_object_weak_unref() or
* g_object_weak_unref_full().
*
* If @synchronize is TRUE, the @notify callback will be invoked with a
* per-registration lock. Use this if and only if you also plan to call
* g_object_weak_unref_full() with synchronization. If a @destroy callback is
* provided, then it is called for @data. For more details see
* g_object_weak_unref_full().
*
* Care must be taken to properly handle thread-safety. See
* g_object_weak_unref_full() for details.
*
* You may combine this with GWeakRef.
*
* Since: 2.86
*/
void
g_object_weak_ref_full (GObject *object,
GWeakNotify notify,
gpointer data,
GDestroyNotify destroy,
gboolean synchronize)
{
g_return_if_fail (G_IS_OBJECT (object));
g_return_if_fail (notify != NULL);
g_return_if_fail (g_atomic_int_get (&object->ref_count) >= 1);
_g_object_weak_ref (object,
notify,
data,
destroy,
synchronize);
}
/**
* g_object_weak_ref: (skip)
* @object: #GObject to reference weakly
@@ -3870,10 +4039,15 @@ g_object_weak_ref_cb (gpointer *data,
* to an object without calling g_object_ref() (g_object_ref() adds a
* strong reference, that is, forces the object to stay alive).
*
* Note that the weak references created by this method are not
* thread-safe: they cannot safely be used in one thread if the
* object's last g_object_unref() might happen in another thread.
* Use #GWeakRef if thread-safety is required.
* This reference can be later removed via g_object_weak_unref()
* or g_object_weak_unref_full().
*
* Care must be taken to properly handle thread-safety. See
* g_object_weak_unref_full() for details.
*
* You may combine this with GWeakRef.
*
* For more details see g_object_weak_ref_full().
*/
void
g_object_weak_ref (GObject *object,
@@ -3884,60 +4058,288 @@ g_object_weak_ref (GObject *object,
g_return_if_fail (notify != NULL);
g_return_if_fail (g_atomic_int_get (&object->ref_count) >= 1);
_g_datalist_id_update_atomic (&object->qdata,
quark_weak_notifies,
g_object_weak_ref_cb,
&((WeakRefTuple){
.notify = notify,
.data = data,
}));
_g_object_weak_ref (object,
notify,
data,
NULL,
FALSE);
}
typedef struct
{
WeakRefTuple search_tuple;
WeakRefTuple found_tuple;
/* Unref specifies the weak notification by the "notify" callback and the
* "data". This may not be unique. We want to actively remove at most one
* matching entry, but synchronize with all entries that are currently being
* notified.
*
* This flag tracks whether we did remove an entry.
*/
gboolean did_unref;
/* We call g_object_weak_unref_cb() in a loop. In common cases,
* g_object_weak_unref_cb() is able to indicate to the caller whether
* it needs to be called again.
*/
gboolean check_more;
WeakRefSyncData *sync_data_to_release;
GThread *calling_thread;
} WeakUnrefCbData;
static gpointer
g_object_weak_unref_cb (gpointer *data,
GDestroyNotify *destroy_notify,
gpointer user_data)
{
WeakRefTuple *tuple = user_data;
WeakUnrefCbData *wu_data = user_data;
WeakRefTuple *tuple;
WeakRefStack *wstack = *data;
guint idx;
g_clear_pointer (&wu_data->sync_data_to_release,
_weak_ref_sync_data_unref);
/* First, find the tuple we want to unref... */
if (wstack)
{
for (idx = 0; idx < wstack->n_weak_refs; idx++)
{
if (wstack->weak_refs[idx].notify == tuple->notify &&
wstack->weak_refs[idx].data == tuple->data)
goto handle_weak_ref_found;
tuple = &wstack->weak_refs[idx];
if (tuple->notify != wu_data->search_tuple.notify ||
tuple->data != wu_data->search_tuple.data)
continue;
if (G_UNLIKELY (tuple->is_notifying))
{
/* This entry is already being notified. We maybe want to sync
* with it. Note that there could be multiple such matching
* entries. We sync with them all (the caller will call us in a
* loop until "check_more" is unset). */
if (!wu_data->search_tuple.synchronize)
{
/* The caller does not want to sync. Skip. */
continue;
}
if (!_g_bit_lock_is_locked (&tuple->u.sync_data->w_sync_flag, WEAK_REF_SYNC_DATA_LOCK_BIT))
{
/* This entry is alreay fully notified and the callback
* returned. This check is not only an optimization, it is
* important so we don't pick the same entry more than once.
* Skip. */
continue;
}
if (!wu_data->calling_thread)
wu_data->calling_thread = g_thread_self ();
if (tuple->u.sync_data->w_notifying_thread == wu_data->calling_thread)
{
/* We got an unref call from inside a notify callback (which already
* holds the lock). We cannot synchronize again. Skip. */
continue;
}
}
else
{
if (wu_data->did_unref)
{
/* We already unrefed one entry. We don't want another. Skip. */
continue;
}
if (G_UNLIKELY (wu_data->search_tuple.synchronize && !tuple->synchronize))
{
/* The caller must indicate during g_object_weak_ref_full()
* whether to synchronize, so that we setup the lock during
* release. The caller cannot request synchronize only during
* g_object_weak_unref_full(). */
g_critical ("%s: must register weak ref %p(%p) with g_object_weak_ref_full(synchronize=TRUE) to synchronize during unref",
G_STRFUNC, wu_data->search_tuple.notify, wu_data->search_tuple.data);
}
}
goto handle_weak_ref_found;
}
}
g_critical ("%s: couldn't find weak ref %p(%p)", G_STRFUNC, tuple->notify, tuple->data);
return NULL;
handle_weak_ref_found:
_weak_ref_stack_update_release_all_state (wstack, idx);
wu_data->found_tuple = *tuple;
wstack->n_weak_refs -= 1;
if (wstack->n_weak_refs == 0)
if (tuple->is_notifying)
{
_weak_ref_stack_free (wstack);
*data = NULL;
tuple->u.sync_data->w_refcount++;
}
else
{
if (idx != wstack->n_weak_refs)
{
memmove (&wstack->weak_refs[idx],
&wstack->weak_refs[idx + 1],
sizeof (wstack->weak_refs[idx]) * (wstack->n_weak_refs - idx));
}
_weak_ref_stack_remove (&wstack, idx, data, TRUE);
_weak_ref_stack_maybe_shrink (&wstack, data);
/* We found and removed our entry. We only need the outer loop to call us
* back, if there is more to synchronize. Check for that. */
wu_data->check_more = FALSE;
if (!wu_data->search_tuple.synchronize)
{
/* The caller does not want to synchronize. We don't check more. */
}
else
{
if (wstack)
{
for (idx++; idx < wstack->n_weak_refs; idx++)
{
tuple = &wstack->weak_refs[idx];
if (tuple->notify != wu_data->search_tuple.notify ||
tuple->data != wu_data->search_tuple.data)
continue;
/* There are other entries that match. We will need the caller
* to call back, maybe we want to synchronize with those too. */
wu_data->check_more = TRUE;
break;
}
}
}
}
return NULL;
return &wu_data->found_tuple;
}
G_ALWAYS_INLINE static inline gboolean
_g_object_weak_unref (GObject *object,
GWeakNotify notify,
gpointer data,
gboolean synchronize,
gboolean steal_data)
{
WeakUnrefCbData wu_data;
wu_data.did_unref = FALSE;
wu_data.check_more = TRUE;
wu_data.sync_data_to_release = NULL;
wu_data.calling_thread = NULL;
_weak_ref_tuple_init (&wu_data.search_tuple, notify, data, NULL, synchronize);
while (TRUE)
{
const WeakRefTuple *found_tuple;
found_tuple = _g_datalist_id_update_atomic (&object->qdata,
quark_weak_notifies,
g_object_weak_unref_cb,
&wu_data);
if (!found_tuple)
break;
if (found_tuple->is_notifying)
{
WeakRefSyncData *sync_data = found_tuple->u.sync_data;
/* This entry's weak notification is invoked on another thread.
* Synchronize. */
g_bit_lock (&sync_data->w_sync_flag, WEAK_REF_SYNC_DATA_LOCK_BIT);
g_bit_unlock (&sync_data->w_sync_flag, WEAK_REF_SYNC_DATA_LOCK_BIT);
/* To unref/release the sync_data, pass it back into
* g_object_weak_unref_cb(). */
wu_data.sync_data_to_release = sync_data;
continue;
}
wu_data.did_unref = TRUE;
if (!steal_data && found_tuple->u.destroy)
found_tuple->u.destroy (found_tuple->data);
if (!wu_data.check_more)
break;
}
return wu_data.did_unref;
}
/**
* g_object_weak_unref_full: (skip)
* @object: #GObject to remove a weak reference from
* @notify: callback to search for
* @data: data to search for
* @synchronize: whether to synchronize with a concurrent weak notification.
* @steal_data: whether to not invoke the #GDestroyNotify and steal the data
*
* Removes a weak reference callback to an object.
*
* Unlike g_object_weak_unref(), it is permissible that the weak reference
* is not found. The return value atomically indicates whether the notification
* could be unregistered. If the function returns %FALSE, it may be that
* the registration never existed, or that the weak notification was already
* called, or that the weak notification is still about to be called. Knowing
* this is important to properly release resources.
*
* Setting @synchornize to %TRUE requires that synchronization was alraedy
* enabled during g_object_weak_ref_full(). In that case, weak notifications
* are emitted holding a per-registration mutex. If @synchronize is then
* set %TRUE, g_object_weak_unref_full() will acquire that mutex too, and
* wait for a concurrent @notify to complete. This means, that when the function
* returns %TRUE, you know that the weak notification was successfully removed
* and never fires. More importantly, when the function returns %FALSE, you
* know that the weak notification was already completed (or never existed
* in the first place).
*
* Note that synchronization has the potential for deadlock. Don't synchronize
* g_object_weak_ref_full() while holding a lock that is also acquired by the
* @notify callback. On the other hand, the @destroy notification is called
* outside any lock. If you want to do non-trivial operations during a weak
* notification while also synchronizing, you could do that during @destroy.
*
* A @destroy callback could be provided during g_object_weak_ref_full().
* If @steal_data is set to %TRUE, a successful unregistration suppresses
* the invocation of the @destroy callback. In other cases, the @destroy
* callback is either invoked by the thread that does the @notify or by
* g_object_weak_unref_full().
*
* There are multiple ways how to properly handle thread-safety, where
* other threads might dispose an object and trigger a notification.
* The most important way is the return value of the function, that tells
* you whether the notification was unregistered before ever being invoked.
* You can also set @synchronize to %TRUE, which means you know that any
* potential notification will have been completed before the function returns.
*
* Often weak notifications are combined with #GWeakRef. That makes sense,
* because you will need a strong reference before calling
* g_object_weak_unref_full(). But it does not cover all races, because
* g_object_run_dispose() also invokes weak notifications, but is unaffected by
* #GWeakRef. In general, to properly handle thread-safety you must look at the
* return value of this function (to cleanup resources once) or use a @destroy
* notify (which also ensures that the resources are released exactly once).
*
* Since: 2.86
*
* Returns: %TRUE if a weak notification was found and unregistered without
* ever being invoked.
*/
gboolean
g_object_weak_unref_full (GObject *object,
GWeakNotify notify,
gpointer data,
gboolean synchronize,
gboolean steal_data)
{
g_return_val_if_fail (G_IS_OBJECT (object), FALSE);
g_return_val_if_fail (notify != NULL, FALSE);
return _g_object_weak_unref (object,
notify,
data,
synchronize,
steal_data);
}
/**
@@ -3947,29 +4349,50 @@ handle_weak_ref_found:
* @data: data to search for
*
* Removes a weak reference callback to an object.
*
* Note that it is a bug to call g_object_weak_unref() for a non-registered
* weak notification. If other threads can dispose the objects, this condition
* can be violated due to a race. Avoid that by using
* g_object_weak_unref_full() which gracefully accepts that the notification
* may not be registered. For thread-safty see g_object_weak_ref_full().
*
* g_object_weak_unref() will destroy the data, if a #GDestroyNotify
* was provided during g_object_weak_ref_full().
*
* This has the same effect as calling g_object_weak_unref_full()
* with @synchronize and @steal_data set to %FALSE.
*/
void
g_object_weak_unref (GObject *object,
GWeakNotify notify,
gpointer data)
g_object_weak_unref (GObject *object,
GWeakNotify notify,
gpointer data)
{
gboolean weak_ref_found;
g_return_if_fail (G_IS_OBJECT (object));
g_return_if_fail (notify != NULL);
_g_datalist_id_update_atomic (&object->qdata,
quark_weak_notifies,
g_object_weak_unref_cb,
&((WeakRefTuple){
.notify = notify,
.data = data,
}));
weak_ref_found = _g_object_weak_unref (object,
notify,
data,
FALSE,
FALSE);
if (G_UNLIKELY (!weak_ref_found))
g_critical ("%s: couldn't find weak ref %p(%p)", G_STRFUNC, notify, data);
}
typedef struct
{
WeakRefReleaseAllState *const release_all_state;
WeakRefTuple tuple;
WeakRefSyncData *sync_data_to_release;
gboolean release_all_done;
/* Cache the memory allocated sync data here. Maybe we can reuse it. */
WeakRefSyncData *sync_data_cache;
GThread *calling_thread;
} WeakRefReleaseAllData;
static gpointer
@@ -3980,32 +4403,61 @@ g_object_weak_release_all_cb (gpointer *data,
WeakRefStack *wstack = *data;
WeakRefReleaseAllData *wdata = user_data;
WeakRefReleaseAllState *release_all_state = wdata->release_all_state;
WeakRefTuple *tuple;
guint idx;
if (!wstack)
return NULL;
{
#ifdef G_ENABLE_DEBUG
g_assert (!wdata->sync_data_to_release);
#endif
return NULL;
}
#ifdef G_ENABLE_DEBUG
g_assert (wstack->n_weak_refs > 0);
#endif
if (wdata->sync_data_to_release)
{
WeakRefSyncData *sync_data = g_steal_pointer (&wdata->sync_data_to_release);
/* We have a previous sync-data. Thus, there is a corresponding
* gravestone entry in "wstack" that we need to remove. Do that now. */
for (idx = 0; idx < wstack->n_weak_refs; idx++)
{
tuple = &wstack->weak_refs[idx];
if (tuple->is_notifying &&
tuple->u.sync_data == sync_data)
break;
}
#ifdef G_ENABLE_DEBUG
g_assert (idx < wstack->n_weak_refs);
#endif
if (sync_data->w_refcount == 1 && !wdata->sync_data_cache)
{
/* We cache this allocation for later reuse. */
wdata->sync_data_cache = sync_data;
}
else
_weak_ref_sync_data_unref (sync_data);
_weak_ref_stack_remove (&wstack, idx, data, FALSE);
if (!wstack)
return NULL;
}
if (release_all_state)
{
if (release_all_state->remaining_to_notify == G_MAXUINT)
{
if (wstack->n_weak_refs == 1u)
{
/* We only pop the single entry. */
wdata->release_all_done = TRUE;
release_all_state = NULL;
}
else
{
release_all_state->remaining_to_notify = wstack->n_weak_refs;
release_all_state->remaining_to_notify = wstack->n_weak_refs;
/* Prepend to linked list. */
release_all_state->release_all_next = wstack->release_all_states;
wstack->release_all_states = release_all_state;
}
/* Prepend to linked list. */
release_all_state->release_all_next = wstack->release_all_states;
wstack->release_all_states = release_all_state;
}
else
{
@@ -4023,51 +4475,89 @@ g_object_weak_release_all_cb (gpointer *data,
}
}
_weak_ref_stack_update_release_all_state (wstack, 0);
if (release_all_state && release_all_state->remaining_to_notify == 0)
wdata->release_all_done = TRUE;
wstack->n_weak_refs--;
/* Emit the notifications in FIFO order. */
wdata->tuple = wstack->weak_refs[0];
if (wstack->n_weak_refs == 0)
/* Find a not yet handled tuple. */
for (idx = 0; TRUE; idx++)
{
_weak_ref_stack_free (wstack);
*data = NULL;
if (idx >= wstack->n_weak_refs)
{
/* We have some tuples in "wstack", but they are all being
* notified (on another thread). There is nothing left. */
return NULL;
}
/* Also set release_all_done.
*
* If g_object_weak_release_all() was called during dispose (with
* release_all FALSE), we anyway have an upper limit of how many
* notifications we want to pop. We only pop the notifications that were
* registered when the loop initially starts. In that case, we surely
* don't want the caller to call back.
*
* g_object_weak_release_all() is also being called before finalize. At
* that point, the ref count is already at zero, and g_object_weak_ref()
* asserts against being called. So nobody can register a new weak ref
* anymore.
*
* In both cases, we don't require the calling loop to call back. This
* saves an additional GData lookup. */
wdata->release_all_done = TRUE;
tuple = &wstack->weak_refs[idx];
if (tuple->is_notifying)
continue;
break;
}
if (tuple->synchronize)
{
WeakRefSyncData *sync_data;
if (!wdata->calling_thread)
wdata->calling_thread = g_thread_self ();
if (wdata->sync_data_cache)
sync_data = g_steal_pointer (&wdata->sync_data_cache);
else
sync_data = g_new (WeakRefSyncData, 1);
*sync_data = (WeakRefSyncData){
.w_sync_flag = 0,
.w_refcount = 1,
.w_notifying_thread = wdata->calling_thread,
.destroy = tuple->u.destroy,
};
_g_bit_lock_init (&sync_data->w_sync_flag, WEAK_REF_SYNC_DATA_LOCK_BIT);
tuple->is_notifying = TRUE;
tuple->u.sync_data = sync_data;
wdata->tuple = *tuple;
}
else
{
memmove (&wstack->weak_refs[0],
&wstack->weak_refs[1],
sizeof (wstack->weak_refs[0]) * wstack->n_weak_refs);
wdata->tuple = *tuple;
if (wdata->release_all_done)
_weak_ref_stack_remove (&wstack, idx, data, FALSE);
if (!wstack)
{
/* We maybe-shrink on each g_object_weak_unref(). During release-all,
* we usually don't shrink, because we expect that we pop all entries.
* In this case, we are now done and additional entries were subscribed
* in the meantime. In this case also maybe-shrink. */
_weak_ref_stack_maybe_shrink (&wstack, data);
/* Also set release_all_done.
*
* If g_object_weak_release_all() was called during dispose (with
* release_all FALSE), we anyway have an upper limit of how many
* notifications we want to pop. We only pop the notifications that were
* registered when the loop initially starts. In that case, we surely
* don't want the caller to call back.
*
* g_object_weak_release_all() is also being called before finalize. At
* that point, the ref count is already at zero, and g_object_weak_ref()
* asserts against being called. So nobody can register a new weak ref
* anymore.
*
* In both cases, we don't require the calling loop to call back. This
* saves an additional GData lookup. */
wdata->release_all_done = TRUE;
}
else
{
if (release_all_state && release_all_state->remaining_to_notify == 0)
{
wdata->release_all_done = TRUE;
/* We maybe-shrink during each g_object_weak_unref(). But during
* release-all, we usually don't shrink, because we expect that
* we will pop all entries.
*
* Now, we are done poping entries and the "wstack" is still not
* empty (because new notifications were registered).
* Maybe-shrink now. */
_weak_ref_stack_maybe_shrink (&wstack, data);
}
}
}
@@ -4083,21 +4573,76 @@ g_object_weak_release_all (GObject *object, gboolean release_all)
WeakRefReleaseAllData wdata = {
.release_all_state = release_all ? NULL : &release_all_state,
.release_all_done = FALSE,
.sync_data_to_release = NULL,
.sync_data_cache = NULL,
.calling_thread = NULL,
};
while (TRUE)
{
GDestroyNotify destroy;
if (!_g_datalist_id_update_atomic (&object->qdata,
quark_weak_notifies,
g_object_weak_release_all_cb,
&wdata))
break;
wdata.tuple.notify (wdata.tuple.data, object);
#ifdef G_ENABLE_DEBUG
g_assert (!wdata.sync_data_to_release);
#endif
if (wdata.tuple.is_notifying)
{
WeakRefSyncData *sync_data = wdata.tuple.u.sync_data;
wdata.tuple.notify (wdata.tuple.data, object);
g_bit_unlock (&sync_data->w_sync_flag, WEAK_REF_SYNC_DATA_LOCK_BIT);
/* We must remove the tuple from the "warray",
* g_object_weak_release_all_cb() did not yet do that. Also, we must
* _weak_ref_sync_data_unref() the "sync_data".
*
* This must be done again while holding a GData lock, so call back
* into g_object_weak_release_all_cb(). */
wdata.sync_data_to_release = sync_data;
#ifdef G_ENABLE_DEBUG
g_assert (!wdata.release_all_done);
#endif
destroy = sync_data->destroy;
}
else
{
wdata.tuple.notify (wdata.tuple.data, object);
destroy = wdata.tuple.u.destroy;
}
if (destroy)
{
/* We call destroy() after releasing the lock. That is intentional.
*
* That is because emitting notify() while holding a lock has a
* (small) possibility for deadlock. That means, the callee must be
* careful to not call into untrusted code.
*
* However, the callee also knows that after the "notify" callback,
* we will emit the "destroy" notify. Thus, the callee can use that
* callback to do anything (thereby avoiding any potential dead-lock).
*
* This means, "destroy" is not synchronized, and the caller of
* g_object_weak_unref_full() cannot know whether it completed. That
* may be undesirable in some cases, but the benefit is to provide a
* callback for cleanup that has no danger of dead-lock. */
destroy (wdata.tuple.data);
}
if (wdata.release_all_done)
break;
}
g_clear_pointer (&wdata.sync_data_cache, _weak_ref_sync_data_free);
}
/**

View File

@@ -246,6 +246,11 @@ typedef void (*GObjectFinalizeFunc) (GObject *object);
* right before finalize. At that time, the reference count of the object already
* dropped to zero and taking another reference or registering new weak references
* is no longer allowed.
*
* If the weak notification was registered via g_object_weak_ref_full() with
* synchronizing enabled, then a per-registration lock is held while the callback
* is invoked. This allows to synchronize with g_object_weak_unref_full() but also
* has the potential for deadlock. See also g_object_weak_unref_full().
*/
typedef void (*GWeakNotify) (gpointer data,
GObject *where_the_object_was);
@@ -522,6 +527,21 @@ GOBJECT_AVAILABLE_IN_ALL
void g_object_weak_unref (GObject *object,
GWeakNotify notify,
gpointer data);
GOBJECT_AVAILABLE_IN_2_86
void g_object_weak_ref_full (GObject *object,
GWeakNotify notify,
gpointer data,
GDestroyNotify destroy,
gboolean synchronize);
GOBJECT_AVAILABLE_IN_2_86
gboolean g_object_weak_unref_full (GObject *object,
GWeakNotify notify,
gpointer data,
gboolean synchronize,
gboolean steal_data);
GOBJECT_AVAILABLE_IN_ALL
void g_object_add_weak_pointer (GObject *object,
gpointer *weak_pointer_location);

View File

@@ -549,6 +549,336 @@ test_references_run_dispose (void)
/*****************************************************************************/
static gint _ref_thread_safe_weak_was_invoked = 0;
typedef struct
{
gint ref_count;
GMutex mutex;
/* This represents the action that is taken by either the weak notification
* xor the thread that calls g_object_weak_unref_full().
*
* The point is that there is a guarantee that exactly one of the two parties
* performs this action. This is what g_object_weak_ref_full() allows, to
* have the action thread safe.
*
* Obviously, the action happens on two different threads, so you will need
* some form of synchronization around it (e.g. a mutex). */
const char *synchronized_action;
} RefThreadSafeData;
static gpointer
_ref_thread_safe_thread_fcn (gpointer thread_data)
{
g_object_unref (thread_data);
return NULL;
}
static void
_ref_thread_safe_weak_cb (gpointer data,
GObject *where_the_object_was)
{
RefThreadSafeData *wdata = data;
if (!g_atomic_int_compare_and_exchange (&_ref_thread_safe_weak_was_invoked, 0, 1))
g_assert_not_reached ();
g_mutex_lock (&wdata->mutex);
if (!wdata->synchronized_action)
wdata->synchronized_action = "_ref_thread_safe_weak_cb";
g_mutex_unlock (&wdata->mutex);
g_usleep (100);
if (!g_atomic_int_compare_and_exchange (&_ref_thread_safe_weak_was_invoked, 1, 2))
g_assert_not_reached ();
}
static RefThreadSafeData *
_ref_thread_safe_ref (gpointer data)
{
RefThreadSafeData *wdata = data;
g_atomic_int_inc (&wdata->ref_count);
return data;
}
static void
_ref_thread_safe_unref (gpointer data)
{
RefThreadSafeData *wdata = data;
if (g_atomic_int_dec_and_test (&wdata->ref_count))
{
g_mutex_clear (&wdata->mutex);
g_free (wdata);
}
}
static void
test_references_thread_safe (void)
{
GThread *thread;
GObject *obj;
const char *called_synchronized_action;
RefThreadSafeData *wdata;
gint usec_sleep;
gboolean should_steal_data = g_random_boolean ();
gboolean did_steal_data = FALSE;
gboolean did_unref = FALSE;
int was_invoked;
obj = g_object_new (G_TYPE_OBJECT, NULL);
wdata = g_new (RefThreadSafeData, 1);
*wdata = (RefThreadSafeData){
.ref_count = 1,
.synchronized_action = NULL,
};
g_mutex_init (&wdata->mutex);
/* To test and use g_object_weak_ref_full() in a thread safe way we pass on
* an (atomic) reference to our RefThreadSafeData. In this case, the whole
* setup is elaborate and cumbersome.
*
* Note that a real user who subscribes to a weak notification does so for a
* reason. They must already track state about what they doing (only so that
* they can call g_object_weak_unref_full() later. At that point, they will
* already have part of this in place. It will be easier for such a real
* world usage to extend their code to use g_object_weak_ref_full() in a
* thread safe way, than it is for our test here.
*/
g_object_weak_ref_full (obj,
_ref_thread_safe_weak_cb,
_ref_thread_safe_ref (wdata),
_ref_thread_safe_unref,
g_random_boolean ());
thread = g_thread_new ("run-dispose", _ref_thread_safe_thread_fcn, obj);
usec_sleep = g_random_int_range (-1, 20);
if (usec_sleep != -1)
g_usleep ((gulong) usec_sleep);
g_mutex_lock (&wdata->mutex);
if (!wdata->synchronized_action)
{
/* We want to call g_object_weak_unref_full(), for that that we must be
* sure that the object is still alive.
*
* We could use a GWeakRef (in addition) to that to get a strong
* reference first.
*
* Instead, we can synchronize with the weak notification. If the weak
* notification at this point (while holding the mutex), did not yet run,
* we know that the object is still alive. We know this even without
* acquiring a strong reference(!) and thus without emitting a toggle
* notification.
*/
did_unref = g_object_weak_unref_full (obj, _ref_thread_safe_weak_cb, wdata, FALSE, should_steal_data);
if (did_unref)
did_steal_data = should_steal_data;
wdata->synchronized_action = "main_thread";
}
called_synchronized_action = wdata->synchronized_action;
g_mutex_unlock (&wdata->mutex);
/* At this point, exactly one thread ran the action ("main_thread" or
* "_ref_thread_safe_weak_cb") while under a mutex. We also safely
* called g_object_weak_unref_full().
*
* Note that if g_object_weak_unref_full() returned FALSE, then in parallel
* _ref_thread_safe_weak_cb() callback might still be running and access
* wdata. But the weak notification callback will also obtain the mutex, and
* see that it should do nothing further. */
g_assert_cmpstr (called_synchronized_action, !=, NULL);
was_invoked = g_atomic_int_get (&_ref_thread_safe_weak_was_invoked);
if (did_unref)
{
/* If g_object_weak_unref_full() did unregister the notification, the
* callback is not running. */
g_assert_cmpint (was_invoked, ==, 0);
}
if (g_str_equal (called_synchronized_action, "_ref_thread_safe_weak_cb"))
{
/* was_invoked must be at least 1 (maybe not yet 2). */
g_assert_cmpint (was_invoked, >=, 1);
}
if (did_steal_data)
{
/* We successfully stole the data. Must do the additional unref here. */
_ref_thread_safe_unref (wdata);
}
_ref_thread_safe_unref (wdata);
g_thread_join (thread);
/* _ref_thread_safe_weak_cb() ran to completion iff we did not successfully
* g_object_weak_unref_full(). */
g_assert_cmpint (g_atomic_int_get (&_ref_thread_safe_weak_was_invoked), ==, (did_unref ? 0 : 2));
}
/*****************************************************************************/
static gboolean _weak_ref_sync_was_invoked = FALSE;
typedef struct
{
GObject *obj;
gboolean is_run_dispose;
gpointer was_notified;
gpointer some_data;
} WeakRefSyncData;
static gpointer
_weak_ref_sync_thread_fcn (gpointer thread_data)
{
WeakRefSyncData *wdata = thread_data;
if (wdata->is_run_dispose)
g_object_run_dispose (wdata->obj);
else
g_object_unref (wdata->obj);
return NULL;
}
static void
_weak_ref_sync_weak_cb (gpointer data,
GObject *where_the_object_was)
{
WeakRefSyncData *wdata = data;
gint msec_sleep;
msec_sleep = g_random_int_range (-1, 5);
if (msec_sleep != -1)
g_usleep ((gulong) (msec_sleep * 1000));
g_assert_false (_weak_ref_sync_was_invoked);
_weak_ref_sync_was_invoked = TRUE;
g_assert_null (wdata->was_notified);
g_assert_nonnull (wdata->some_data);
wdata->was_notified = g_steal_pointer (&wdata->some_data);
if (g_random_boolean ())
{
gboolean did_unref;
did_unref = g_object_weak_unref_full (wdata->obj, _weak_ref_sync_weak_cb, wdata, FALSE, FALSE);
g_assert_false (did_unref);
}
}
static void
test_weak_ref_sync (gconstpointer test_user_data)
{
const gboolean IS_RUN_DISPOSE = GPOINTER_TO_INT (test_user_data);
GThread *thread;
GObject *obj = g_object_new (G_TYPE_OBJECT, NULL);
WeakRefSyncData wdata = {
.obj = obj,
.is_run_dispose = IS_RUN_DISPOSE,
.was_notified = NULL,
.some_data = g_malloc0 (1),
};
gint usec_sleep;
gboolean did_unref = FALSE;
gboolean called_unref = TRUE;
GWeakRef weakref;
gboolean has_weakref;
has_weakref = (!IS_RUN_DISPOSE || g_random_boolean ());
if (has_weakref)
g_weak_ref_init (&weakref, obj);
g_object_weak_ref_full (obj, _weak_ref_sync_weak_cb, &wdata, NULL, TRUE);
thread = g_thread_new ("run-dispose", _weak_ref_sync_thread_fcn, &wdata);
usec_sleep = g_random_int_range (-1, 20);
if (usec_sleep != -1)
g_usleep ((gulong) usec_sleep);
if (IS_RUN_DISPOSE)
{
/* The thread only runs g_object_run_dispose(). We know that our object is valid. */
did_unref = g_object_weak_unref_full (obj, _weak_ref_sync_weak_cb, &wdata, TRUE, FALSE);
}
else
{
GObject *obj2 = g_weak_ref_get (&weakref);
/* The thread runs g_object_unref(). We want to test a concurrent
* g_object_weak_unref_full(), so we need to first acquire a strong
* reference.
*
* So how does g_object_weak_unref_full() do anything useful? For one, it
* exists primarily to fix races when other threads run
* g_object_run_dispose(). In this case, we can only use GWeakRef to
* acquire a strong reference first. */
if (obj2)
{
did_unref = g_object_weak_unref_full (obj2, _weak_ref_sync_weak_cb, &wdata, TRUE, FALSE);
g_object_unref (obj2);
}
else
{
called_unref = FALSE;
did_unref = FALSE;
}
}
if (!called_unref)
{
/* We never called g_object_weak_ref_full(). We know that the
* other thread will call unref, but we didn't synchronize to know
* whether the thread already did that.
*
* We cannot assert. */
}
else
{
if (did_unref)
{
g_assert_null (wdata.was_notified);
g_assert_nonnull (wdata.some_data);
}
else
{
g_assert_nonnull (wdata.was_notified);
g_assert_null (wdata.some_data);
}
g_assert_cmpint (!did_unref, ==, _weak_ref_sync_was_invoked);
g_assert_cmpint (did_unref, ==, !_weak_ref_sync_was_invoked);
}
g_thread_join (thread);
if (IS_RUN_DISPOSE)
g_object_unref (obj);
_weak_ref_sync_was_invoked = FALSE;
if (did_unref)
g_free (wdata.was_notified);
else if (called_unref)
g_free (wdata.some_data);
else
{
g_free (wdata.was_notified);
g_free (wdata.some_data);
}
}
/*****************************************************************************/
int
main (int argc,
char *argv[])
@@ -563,6 +893,9 @@ main (int argc,
g_test_add_func ("/gobject/references-many", test_references_many);
g_test_add_func ("/gobject/references_two", test_references_two);
g_test_add_func ("/gobject/references_run_dispose", test_references_run_dispose);
g_test_add_func ("/gobject/references_thread_safe", test_references_thread_safe);
g_test_add_data_func ("/gobject/weak_ref_sync/unref", GINT_TO_POINTER (FALSE), test_weak_ref_sync);
g_test_add_data_func ("/gobject/weak_ref_sync/run-dispose", GINT_TO_POINTER (TRUE), test_weak_ref_sync);
return g_test_run ();
}