Merge branch 'wip/pwithnall/3399-glib-gir-platform-differences-gthread' into 'main'

gthread: Make introspection comments platform-independent

Closes #3399

See merge request GNOME/glib!4168
This commit is contained in:
Emmanuele Bassi 2024-07-24 19:20:56 +00:00
commit 055eef3994
4 changed files with 853 additions and 633 deletions

View File

@ -146,78 +146,20 @@ g_mutex_get_impl (GMutex *mutex)
} }
/**
* g_mutex_init:
* @mutex: an uninitialized #GMutex
*
* Initializes a #GMutex so that it can be used.
*
* This function is useful to initialize a mutex that has been
* allocated on the stack, or as part of a larger structure.
* It is not necessary to initialize a mutex that has been
* statically allocated.
*
* |[<!-- language="C" -->
* typedef struct {
* GMutex m;
* ...
* } Blob;
*
* Blob *b;
*
* b = g_new (Blob, 1);
* g_mutex_init (&b->m);
* ]|
*
* To undo the effect of g_mutex_init() when a mutex is no longer
* needed, use g_mutex_clear().
*
* Calling g_mutex_init() on an already initialized #GMutex leads
* to undefined behaviour.
*
* Since: 2.32
*/
void void
g_mutex_init (GMutex *mutex) g_mutex_init_impl (GMutex *mutex)
{ {
mutex->p = g_mutex_impl_new (); mutex->p = g_mutex_impl_new ();
} }
/**
* g_mutex_clear:
* @mutex: an initialized #GMutex
*
* Frees the resources allocated to a mutex with g_mutex_init().
*
* This function should not be used with a #GMutex that has been
* statically allocated.
*
* Calling g_mutex_clear() on a locked mutex leads to undefined
* behaviour.
*
* Since: 2.32
*/
void void
g_mutex_clear (GMutex *mutex) g_mutex_clear_impl (GMutex *mutex)
{ {
g_mutex_impl_free (mutex->p); g_mutex_impl_free (mutex->p);
} }
/**
* g_mutex_lock:
* @mutex: a #GMutex
*
* Locks @mutex. If @mutex is already locked by another thread, the
* current thread will block until @mutex is unlocked by the other
* thread.
*
* #GMutex is neither guaranteed to be recursive nor to be
* non-recursive. As such, calling g_mutex_lock() on a #GMutex that has
* already been locked by the same thread results in undefined behaviour
* (including but not limited to deadlocks).
*/
void void
g_mutex_lock (GMutex *mutex) g_mutex_lock_impl (GMutex *mutex)
{ {
gint status; gint status;
@ -225,18 +167,8 @@ g_mutex_lock (GMutex *mutex)
g_thread_abort (status, "pthread_mutex_lock"); g_thread_abort (status, "pthread_mutex_lock");
} }
/**
* g_mutex_unlock:
* @mutex: a #GMutex
*
* Unlocks @mutex. If another thread is blocked in a g_mutex_lock()
* call for @mutex, it will become unblocked and can lock @mutex itself.
*
* Calling g_mutex_unlock() on a mutex that is not locked by the
* current thread leads to undefined behaviour.
*/
void void
g_mutex_unlock (GMutex *mutex) g_mutex_unlock_impl (GMutex *mutex)
{ {
gint status; gint status;
@ -244,23 +176,8 @@ g_mutex_unlock (GMutex *mutex)
g_thread_abort (status, "pthread_mutex_unlock"); g_thread_abort (status, "pthread_mutex_unlock");
} }
/**
* g_mutex_trylock:
* @mutex: a #GMutex
*
* Tries to lock @mutex. If @mutex is already locked by another thread,
* it immediately returns %FALSE. Otherwise it locks @mutex and returns
* %TRUE.
*
* #GMutex is neither guaranteed to be recursive nor to be
* non-recursive. As such, calling g_mutex_lock() on a #GMutex that has
* already been locked by the same thread results in undefined behaviour
* (including but not limited to deadlocks or arbitrary return values).
*
* Returns: %TRUE if @mutex could be locked
*/
gboolean gboolean
g_mutex_trylock (GMutex *mutex) g_mutex_trylock_impl (GMutex *mutex)
{ {
gint status; gint status;
@ -318,118 +235,32 @@ g_rec_mutex_get_impl (GRecMutex *rec_mutex)
return impl; return impl;
} }
/**
* g_rec_mutex_init:
* @rec_mutex: an uninitialized #GRecMutex
*
* Initializes a #GRecMutex so that it can be used.
*
* This function is useful to initialize a recursive mutex
* that has been allocated on the stack, or as part of a larger
* structure.
*
* It is not necessary to initialise a recursive mutex that has been
* statically allocated.
*
* |[<!-- language="C" -->
* typedef struct {
* GRecMutex m;
* ...
* } Blob;
*
* Blob *b;
*
* b = g_new (Blob, 1);
* g_rec_mutex_init (&b->m);
* ]|
*
* Calling g_rec_mutex_init() on an already initialized #GRecMutex
* leads to undefined behaviour.
*
* To undo the effect of g_rec_mutex_init() when a recursive mutex
* is no longer needed, use g_rec_mutex_clear().
*
* Since: 2.32
*/
void void
g_rec_mutex_init (GRecMutex *rec_mutex) g_rec_mutex_init_impl (GRecMutex *rec_mutex)
{ {
rec_mutex->p = g_rec_mutex_impl_new (); rec_mutex->p = g_rec_mutex_impl_new ();
} }
/**
* g_rec_mutex_clear:
* @rec_mutex: an initialized #GRecMutex
*
* Frees the resources allocated to a recursive mutex with
* g_rec_mutex_init().
*
* This function should not be used with a #GRecMutex that has been
* statically allocated.
*
* Calling g_rec_mutex_clear() on a locked recursive mutex leads
* to undefined behaviour.
*
* Since: 2.32
*/
void void
g_rec_mutex_clear (GRecMutex *rec_mutex) g_rec_mutex_clear_impl (GRecMutex *rec_mutex)
{ {
g_rec_mutex_impl_free (rec_mutex->p); g_rec_mutex_impl_free (rec_mutex->p);
} }
/**
* g_rec_mutex_lock:
* @rec_mutex: a #GRecMutex
*
* Locks @rec_mutex. If @rec_mutex is already locked by another
* thread, the current thread will block until @rec_mutex is
* unlocked by the other thread. If @rec_mutex is already locked
* by the current thread, the 'lock count' of @rec_mutex is increased.
* The mutex will only become available again when it is unlocked
* as many times as it has been locked.
*
* Since: 2.32
*/
void void
g_rec_mutex_lock (GRecMutex *mutex) g_rec_mutex_lock_impl (GRecMutex *mutex)
{ {
pthread_mutex_lock (g_rec_mutex_get_impl (mutex)); pthread_mutex_lock (g_rec_mutex_get_impl (mutex));
} }
/**
* g_rec_mutex_unlock:
* @rec_mutex: a #GRecMutex
*
* Unlocks @rec_mutex. If another thread is blocked in a
* g_rec_mutex_lock() call for @rec_mutex, it will become unblocked
* and can lock @rec_mutex itself.
*
* Calling g_rec_mutex_unlock() on a recursive mutex that is not
* locked by the current thread leads to undefined behaviour.
*
* Since: 2.32
*/
void void
g_rec_mutex_unlock (GRecMutex *rec_mutex) g_rec_mutex_unlock_impl (GRecMutex *rec_mutex)
{ {
pthread_mutex_unlock (rec_mutex->p); pthread_mutex_unlock (rec_mutex->p);
} }
/**
* g_rec_mutex_trylock:
* @rec_mutex: a #GRecMutex
*
* Tries to lock @rec_mutex. If @rec_mutex is already locked
* by another thread, it immediately returns %FALSE. Otherwise
* it locks @rec_mutex and returns %TRUE.
*
* Returns: %TRUE if @rec_mutex could be locked
*
* Since: 2.32
*/
gboolean gboolean
g_rec_mutex_trylock (GRecMutex *rec_mutex) g_rec_mutex_trylock_impl (GRecMutex *rec_mutex)
{ {
if (pthread_mutex_trylock (g_rec_mutex_get_impl (rec_mutex)) != 0) if (pthread_mutex_trylock (g_rec_mutex_get_impl (rec_mutex)) != 0)
return FALSE; return FALSE;
@ -478,78 +309,20 @@ g_rw_lock_get_impl (GRWLock *lock)
return impl; return impl;
} }
/**
* g_rw_lock_init:
* @rw_lock: an uninitialized #GRWLock
*
* Initializes a #GRWLock so that it can be used.
*
* This function is useful to initialize a lock that has been
* allocated on the stack, or as part of a larger structure. It is not
* necessary to initialise a reader-writer lock that has been statically
* allocated.
*
* |[<!-- language="C" -->
* typedef struct {
* GRWLock l;
* ...
* } Blob;
*
* Blob *b;
*
* b = g_new (Blob, 1);
* g_rw_lock_init (&b->l);
* ]|
*
* To undo the effect of g_rw_lock_init() when a lock is no longer
* needed, use g_rw_lock_clear().
*
* Calling g_rw_lock_init() on an already initialized #GRWLock leads
* to undefined behaviour.
*
* Since: 2.32
*/
void void
g_rw_lock_init (GRWLock *rw_lock) g_rw_lock_init_impl (GRWLock *rw_lock)
{ {
rw_lock->p = g_rw_lock_impl_new (); rw_lock->p = g_rw_lock_impl_new ();
} }
/**
* g_rw_lock_clear:
* @rw_lock: an initialized #GRWLock
*
* Frees the resources allocated to a lock with g_rw_lock_init().
*
* This function should not be used with a #GRWLock that has been
* statically allocated.
*
* Calling g_rw_lock_clear() when any thread holds the lock
* leads to undefined behaviour.
*
* Since: 2.32
*/
void void
g_rw_lock_clear (GRWLock *rw_lock) g_rw_lock_clear_impl (GRWLock *rw_lock)
{ {
g_rw_lock_impl_free (rw_lock->p); g_rw_lock_impl_free (rw_lock->p);
} }
/**
* g_rw_lock_writer_lock:
* @rw_lock: a #GRWLock
*
* Obtain a write lock on @rw_lock. If another thread currently holds
* a read or write lock on @rw_lock, the current thread will block
* until all other threads have dropped their locks on @rw_lock.
*
* Calling g_rw_lock_writer_lock() while the current thread already
* owns a read or write lock on @rw_lock leads to undefined behaviour.
*
* Since: 2.32
*/
void void
g_rw_lock_writer_lock (GRWLock *rw_lock) g_rw_lock_writer_lock_impl (GRWLock *rw_lock)
{ {
int retval = pthread_rwlock_wrlock (g_rw_lock_get_impl (rw_lock)); int retval = pthread_rwlock_wrlock (g_rw_lock_get_impl (rw_lock));
@ -557,21 +330,8 @@ g_rw_lock_writer_lock (GRWLock *rw_lock)
g_critical ("Failed to get RW lock %p: %s", rw_lock, g_strerror (retval)); g_critical ("Failed to get RW lock %p: %s", rw_lock, g_strerror (retval));
} }
/**
* g_rw_lock_writer_trylock:
* @rw_lock: a #GRWLock
*
* Tries to obtain a write lock on @rw_lock. If another thread
* currently holds a read or write lock on @rw_lock, it immediately
* returns %FALSE.
* Otherwise it locks @rw_lock and returns %TRUE.
*
* Returns: %TRUE if @rw_lock could be locked
*
* Since: 2.32
*/
gboolean gboolean
g_rw_lock_writer_trylock (GRWLock *rw_lock) g_rw_lock_writer_trylock_impl (GRWLock *rw_lock)
{ {
if (pthread_rwlock_trywrlock (g_rw_lock_get_impl (rw_lock)) != 0) if (pthread_rwlock_trywrlock (g_rw_lock_get_impl (rw_lock)) != 0)
return FALSE; return FALSE;
@ -579,47 +339,14 @@ g_rw_lock_writer_trylock (GRWLock *rw_lock)
return TRUE; return TRUE;
} }
/**
* g_rw_lock_writer_unlock:
* @rw_lock: a #GRWLock
*
* Release a write lock on @rw_lock.
*
* Calling g_rw_lock_writer_unlock() on a lock that is not held
* by the current thread leads to undefined behaviour.
*
* Since: 2.32
*/
void void
g_rw_lock_writer_unlock (GRWLock *rw_lock) g_rw_lock_writer_unlock_impl (GRWLock *rw_lock)
{ {
pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock)); pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock));
} }
/**
* g_rw_lock_reader_lock:
* @rw_lock: a #GRWLock
*
* Obtain a read lock on @rw_lock. If another thread currently holds
* the write lock on @rw_lock, the current thread will block until the
* write lock was (held and) released. If another thread does not hold
* the write lock, but is waiting for it, it is implementation defined
* whether the reader or writer will block. Read locks can be taken
* recursively.
*
* Calling g_rw_lock_reader_lock() while the current thread already
* owns a write lock leads to undefined behaviour. Read locks however
* can be taken recursively, in which case you need to make sure to
* call g_rw_lock_reader_unlock() the same amount of times.
*
* It is implementation-defined how many read locks are allowed to be
* held on the same lock simultaneously. If the limit is hit,
* or if a deadlock is detected, a critical warning will be emitted.
*
* Since: 2.32
*/
void void
g_rw_lock_reader_lock (GRWLock *rw_lock) g_rw_lock_reader_lock_impl (GRWLock *rw_lock)
{ {
int retval = pthread_rwlock_rdlock (g_rw_lock_get_impl (rw_lock)); int retval = pthread_rwlock_rdlock (g_rw_lock_get_impl (rw_lock));
@ -627,20 +354,8 @@ g_rw_lock_reader_lock (GRWLock *rw_lock)
g_critical ("Failed to get RW lock %p: %s", rw_lock, g_strerror (retval)); g_critical ("Failed to get RW lock %p: %s", rw_lock, g_strerror (retval));
} }
/**
* g_rw_lock_reader_trylock:
* @rw_lock: a #GRWLock
*
* Tries to obtain a read lock on @rw_lock and returns %TRUE if
* the read lock was successfully obtained. Otherwise it
* returns %FALSE.
*
* Returns: %TRUE if @rw_lock could be locked
*
* Since: 2.32
*/
gboolean gboolean
g_rw_lock_reader_trylock (GRWLock *rw_lock) g_rw_lock_reader_trylock_impl (GRWLock *rw_lock)
{ {
if (pthread_rwlock_tryrdlock (g_rw_lock_get_impl (rw_lock)) != 0) if (pthread_rwlock_tryrdlock (g_rw_lock_get_impl (rw_lock)) != 0)
return FALSE; return FALSE;
@ -648,19 +363,8 @@ g_rw_lock_reader_trylock (GRWLock *rw_lock)
return TRUE; return TRUE;
} }
/**
* g_rw_lock_reader_unlock:
* @rw_lock: a #GRWLock
*
* Release a read lock on @rw_lock.
*
* Calling g_rw_lock_reader_unlock() on a lock that is not held
* by the current thread leads to undefined behaviour.
*
* Since: 2.32
*/
void void
g_rw_lock_reader_unlock (GRWLock *rw_lock) g_rw_lock_reader_unlock_impl (GRWLock *rw_lock)
{ {
pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock)); pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock));
} }
@ -721,72 +425,20 @@ g_cond_get_impl (GCond *cond)
return impl; return impl;
} }
/**
* g_cond_init:
* @cond: an uninitialized #GCond
*
* Initialises a #GCond so that it can be used.
*
* This function is useful to initialise a #GCond that has been
* allocated as part of a larger structure. It is not necessary to
* initialise a #GCond that has been statically allocated.
*
* To undo the effect of g_cond_init() when a #GCond is no longer
* needed, use g_cond_clear().
*
* Calling g_cond_init() on an already-initialised #GCond leads
* to undefined behaviour.
*
* Since: 2.32
*/
void void
g_cond_init (GCond *cond) g_cond_init_impl (GCond *cond)
{ {
cond->p = g_cond_impl_new (); cond->p = g_cond_impl_new ();
} }
/**
* g_cond_clear:
* @cond: an initialised #GCond
*
* Frees the resources allocated to a #GCond with g_cond_init().
*
* This function should not be used with a #GCond that has been
* statically allocated.
*
* Calling g_cond_clear() for a #GCond on which threads are
* blocking leads to undefined behaviour.
*
* Since: 2.32
*/
void void
g_cond_clear (GCond *cond) g_cond_clear_impl (GCond *cond)
{ {
g_cond_impl_free (cond->p); g_cond_impl_free (cond->p);
} }
/**
* g_cond_wait:
* @cond: a #GCond
* @mutex: a #GMutex that is currently locked
*
* Atomically releases @mutex and waits until @cond is signalled.
* When this function returns, @mutex is locked again and owned by the
* calling thread.
*
* When using condition variables, it is possible that a spurious wakeup
* may occur (ie: g_cond_wait() returns even though g_cond_signal() was
* not called). It's also possible that a stolen wakeup may occur.
* This is when g_cond_signal() is called, but another thread acquires
* @mutex before this thread and modifies the state of the program in
* such a way that when g_cond_wait() is able to return, the expected
* condition is no longer met.
*
* For this reason, g_cond_wait() must always be used in a loop. See
* the documentation for #GCond for a complete example.
**/
void void
g_cond_wait (GCond *cond, g_cond_wait_impl (GCond *cond,
GMutex *mutex) GMutex *mutex)
{ {
gint status; gint status;
@ -795,17 +447,8 @@ g_cond_wait (GCond *cond,
g_thread_abort (status, "pthread_cond_wait"); g_thread_abort (status, "pthread_cond_wait");
} }
/**
* g_cond_signal:
* @cond: a #GCond
*
* If threads are waiting for @cond, at least one of them is unblocked.
* If no threads are waiting for @cond, this function has no effect.
* It is good practice to hold the same lock as the waiting thread
* while calling this function, though not required.
*/
void void
g_cond_signal (GCond *cond) g_cond_signal_impl (GCond *cond)
{ {
gint status; gint status;
@ -813,17 +456,8 @@ g_cond_signal (GCond *cond)
g_thread_abort (status, "pthread_cond_signal"); g_thread_abort (status, "pthread_cond_signal");
} }
/**
* g_cond_broadcast:
* @cond: a #GCond
*
* If threads are waiting for @cond, all of them are unblocked.
* If no threads are waiting for @cond, this function has no effect.
* It is good practice to lock the same mutex as the waiting threads
* while calling this function, though not required.
*/
void void
g_cond_broadcast (GCond *cond) g_cond_broadcast_impl (GCond *cond)
{ {
gint status; gint status;
@ -831,66 +465,8 @@ g_cond_broadcast (GCond *cond)
g_thread_abort (status, "pthread_cond_broadcast"); g_thread_abort (status, "pthread_cond_broadcast");
} }
/**
* g_cond_wait_until:
* @cond: a #GCond
* @mutex: a #GMutex that is currently locked
* @end_time: the monotonic time to wait until
*
* Waits until either @cond is signalled or @end_time has passed.
*
* As with g_cond_wait() it is possible that a spurious or stolen wakeup
* could occur. For that reason, waiting on a condition variable should
* always be in a loop, based on an explicitly-checked predicate.
*
* %TRUE is returned if the condition variable was signalled (or in the
* case of a spurious wakeup). %FALSE is returned if @end_time has
* passed.
*
* The following code shows how to correctly perform a timed wait on a
* condition variable (extending the example presented in the
* documentation for #GCond):
*
* |[<!-- language="C" -->
* gpointer
* pop_data_timed (void)
* {
* gint64 end_time;
* gpointer data;
*
* g_mutex_lock (&data_mutex);
*
* end_time = g_get_monotonic_time () + 5 * G_TIME_SPAN_SECOND;
* while (!current_data)
* if (!g_cond_wait_until (&data_cond, &data_mutex, end_time))
* {
* // timeout has passed.
* g_mutex_unlock (&data_mutex);
* return NULL;
* }
*
* // there is data for us
* data = current_data;
* current_data = NULL;
*
* g_mutex_unlock (&data_mutex);
*
* return data;
* }
* ]|
*
* Notice that the end time is calculated once, before entering the
* loop and reused. This is the motivation behind the use of absolute
* time on this API -- if a relative time of 5 seconds were passed
* directly to the call and a spurious wakeup occurred, the program would
* have to start over waiting again (which would lead to a total wait
* time of more than 5 seconds).
*
* Returns: %TRUE on a signal, %FALSE on a timeout
* Since: 2.32
**/
gboolean gboolean
g_cond_wait_until (GCond *cond, g_cond_wait_until_impl (GCond *cond,
GMutex *mutex, GMutex *mutex,
gint64 end_time) gint64 end_time)
{ {
@ -944,82 +520,6 @@ g_cond_wait_until (GCond *cond,
/* {{{1 GPrivate */ /* {{{1 GPrivate */
/**
* GPrivate:
*
* The #GPrivate struct is an opaque data structure to represent a
* thread-local data key. It is approximately equivalent to the
* pthread_setspecific()/pthread_getspecific() APIs on POSIX and to
* TlsSetValue()/TlsGetValue() on Windows.
*
* If you don't already know why you might want this functionality,
* then you probably don't need it.
*
* #GPrivate is a very limited resource (as far as 128 per program,
* shared between all libraries). It is also not possible to destroy a
* #GPrivate after it has been used. As such, it is only ever acceptable
* to use #GPrivate in static scope, and even then sparingly so.
*
* See G_PRIVATE_INIT() for a couple of examples.
*
* The #GPrivate structure should be considered opaque. It should only
* be accessed via the g_private_ functions.
*/
/**
* G_PRIVATE_INIT:
* @notify: a #GDestroyNotify
*
* A macro to assist with the static initialisation of a #GPrivate.
*
* This macro is useful for the case that a #GDestroyNotify function
* should be associated with the key. This is needed when the key will be
* used to point at memory that should be deallocated when the thread
* exits.
*
* Additionally, the #GDestroyNotify will also be called on the previous
* value stored in the key when g_private_replace() is used.
*
* If no #GDestroyNotify is needed, then use of this macro is not
* required -- if the #GPrivate is declared in static scope then it will
* be properly initialised by default (ie: to all zeros). See the
* examples below.
*
* |[<!-- language="C" -->
* static GPrivate name_key = G_PRIVATE_INIT (g_free);
*
* // return value should not be freed
* const gchar *
* get_local_name (void)
* {
* return g_private_get (&name_key);
* }
*
* void
* set_local_name (const gchar *name)
* {
* g_private_replace (&name_key, g_strdup (name));
* }
*
*
* static GPrivate count_key; // no free function
*
* gint
* get_local_count (void)
* {
* return GPOINTER_TO_INT (g_private_get (&count_key));
* }
*
* void
* set_local_count (gint count)
* {
* g_private_set (&count_key, GINT_TO_POINTER (count));
* }
* ]|
*
* Since: 2.32
**/
static pthread_key_t * static pthread_key_t *
g_private_impl_new (GDestroyNotify notify) g_private_impl_new (GDestroyNotify notify)
{ {
@ -1103,7 +603,7 @@ g_private_impl_free_direct (gpointer impl)
} }
static inline pthread_key_t static inline pthread_key_t
g_private_get_impl (GPrivate *key) _g_private_get_impl (GPrivate *key)
{ {
if (sizeof (pthread_key_t) > sizeof (gpointer)) if (sizeof (pthread_key_t) > sizeof (gpointer))
{ {
@ -1142,65 +642,28 @@ g_private_get_impl (GPrivate *key)
} }
} }
/**
* g_private_get:
* @key: a #GPrivate
*
* Returns the current value of the thread local variable @key.
*
* If the value has not yet been set in this thread, %NULL is returned.
* Values are never copied between threads (when a new thread is
* created, for example).
*
* Returns: the thread-local value
*/
gpointer gpointer
g_private_get (GPrivate *key) g_private_get_impl (GPrivate *key)
{ {
/* quote POSIX: No errors are returned from pthread_getspecific(). */ /* quote POSIX: No errors are returned from pthread_getspecific(). */
return pthread_getspecific (g_private_get_impl (key)); return pthread_getspecific (_g_private_get_impl (key));
} }
/**
* g_private_set:
* @key: a #GPrivate
* @value: the new value
*
* Sets the thread local variable @key to have the value @value in the
* current thread.
*
* This function differs from g_private_replace() in the following way:
* the #GDestroyNotify for @key is not called on the old value.
*/
void void
g_private_set (GPrivate *key, g_private_set_impl (GPrivate *key,
gpointer value) gpointer value)
{ {
gint status; gint status;
if G_UNLIKELY ((status = pthread_setspecific (g_private_get_impl (key), value)) != 0) if G_UNLIKELY ((status = pthread_setspecific (_g_private_get_impl (key), value)) != 0)
g_thread_abort (status, "pthread_setspecific"); g_thread_abort (status, "pthread_setspecific");
} }
/**
* g_private_replace:
* @key: a #GPrivate
* @value: the new value
*
* Sets the thread local variable @key to have the value @value in the
* current thread.
*
* This function differs from g_private_set() in the following way: if
* the previous value was non-%NULL then the #GDestroyNotify handler for
* @key is run on it.
*
* Since: 2.32
**/
void void
g_private_replace (GPrivate *key, g_private_replace_impl (GPrivate *key,
gpointer value) gpointer value)
{ {
pthread_key_t impl = g_private_get_impl (key); pthread_key_t impl = _g_private_get_impl (key);
gpointer old; gpointer old;
gint status; gint status;
@ -1315,16 +778,8 @@ g_system_thread_new (GThreadFunc proxy,
return (GRealThread *) thread; return (GRealThread *) thread;
} }
/**
* g_thread_yield:
*
* Causes the calling thread to voluntarily relinquish the CPU, so
* that other threads can run.
*
* This function is often used as a method to make busy wait less evil.
*/
void void
g_thread_yield (void) g_thread_yield_impl (void)
{ {
sched_yield (); sched_yield ();
} }
@ -1436,13 +891,13 @@ typedef enum {
*/ */
void void
g_mutex_init (GMutex *mutex) g_mutex_init_impl (GMutex *mutex)
{ {
mutex->i[0] = G_MUTEX_STATE_EMPTY; mutex->i[0] = G_MUTEX_STATE_EMPTY;
} }
void void
g_mutex_clear (GMutex *mutex) g_mutex_clear_impl (GMutex *mutex)
{ {
if G_UNLIKELY (mutex->i[0] != G_MUTEX_STATE_EMPTY) if G_UNLIKELY (mutex->i[0] != G_MUTEX_STATE_EMPTY)
{ {
@ -1485,7 +940,7 @@ g_mutex_unlock_slowpath (GMutex *mutex,
} }
void void
g_mutex_lock (GMutex *mutex) g_mutex_lock_impl (GMutex *mutex)
{ {
/* empty -> owned and we're done. Anything else, and we need to wait... */ /* empty -> owned and we're done. Anything else, and we need to wait... */
if G_UNLIKELY (!g_atomic_int_compare_and_exchange (&mutex->i[0], if G_UNLIKELY (!g_atomic_int_compare_and_exchange (&mutex->i[0],
@ -1495,7 +950,7 @@ g_mutex_lock (GMutex *mutex)
} }
void void
g_mutex_unlock (GMutex *mutex) g_mutex_unlock_impl (GMutex *mutex)
{ {
guint prev; guint prev;
@ -1507,7 +962,7 @@ g_mutex_unlock (GMutex *mutex)
} }
gboolean gboolean
g_mutex_trylock (GMutex *mutex) g_mutex_trylock_impl (GMutex *mutex)
{ {
GMutexState empty = G_MUTEX_STATE_EMPTY; GMutexState empty = G_MUTEX_STATE_EMPTY;
@ -1532,18 +987,18 @@ g_mutex_trylock (GMutex *mutex)
*/ */
void void
g_cond_init (GCond *cond) g_cond_init_impl (GCond *cond)
{ {
cond->i[0] = 0; cond->i[0] = 0;
} }
void void
g_cond_clear (GCond *cond) g_cond_clear_impl (GCond *cond)
{ {
} }
void void
g_cond_wait (GCond *cond, g_cond_wait_impl (GCond *cond,
GMutex *mutex) GMutex *mutex)
{ {
guint sampled = (guint) g_atomic_int_get (&cond->i[0]); guint sampled = (guint) g_atomic_int_get (&cond->i[0]);
@ -1554,7 +1009,7 @@ g_cond_wait (GCond *cond,
} }
void void
g_cond_signal (GCond *cond) g_cond_signal_impl (GCond *cond)
{ {
g_atomic_int_inc (&cond->i[0]); g_atomic_int_inc (&cond->i[0]);
@ -1562,7 +1017,7 @@ g_cond_signal (GCond *cond)
} }
void void
g_cond_broadcast (GCond *cond) g_cond_broadcast_impl (GCond *cond)
{ {
g_atomic_int_inc (&cond->i[0]); g_atomic_int_inc (&cond->i[0]);
@ -1570,7 +1025,7 @@ g_cond_broadcast (GCond *cond)
} }
gboolean gboolean
g_cond_wait_until (GCond *cond, g_cond_wait_until_impl (GCond *cond,
GMutex *mutex, GMutex *mutex,
gint64 end_time) gint64 end_time)
{ {

View File

@ -79,30 +79,30 @@ g_thread_abort (gint status,
/* {{{1 GMutex */ /* {{{1 GMutex */
void void
g_mutex_init (GMutex *mutex) g_mutex_init_impl (GMutex *mutex)
{ {
InitializeSRWLock ((gpointer) mutex); InitializeSRWLock ((gpointer) mutex);
} }
void void
g_mutex_clear (GMutex *mutex) g_mutex_clear_impl (GMutex *mutex)
{ {
} }
void void
g_mutex_lock (GMutex *mutex) g_mutex_lock_impl (GMutex *mutex)
{ {
AcquireSRWLockExclusive ((gpointer) mutex); AcquireSRWLockExclusive ((gpointer) mutex);
} }
gboolean gboolean
g_mutex_trylock (GMutex *mutex) g_mutex_trylock_impl (GMutex *mutex)
{ {
return TryAcquireSRWLockExclusive ((gpointer) mutex); return TryAcquireSRWLockExclusive ((gpointer) mutex);
} }
void void
g_mutex_unlock (GMutex *mutex) g_mutex_unlock_impl (GMutex *mutex)
{ {
ReleaseSRWLockExclusive ((gpointer) mutex); ReleaseSRWLockExclusive ((gpointer) mutex);
} }
@ -144,31 +144,31 @@ g_rec_mutex_get_impl (GRecMutex *mutex)
} }
void void
g_rec_mutex_init (GRecMutex *mutex) g_rec_mutex_init_impl (GRecMutex *mutex)
{ {
mutex->p = g_rec_mutex_impl_new (); mutex->p = g_rec_mutex_impl_new ();
} }
void void
g_rec_mutex_clear (GRecMutex *mutex) g_rec_mutex_clear_impl (GRecMutex *mutex)
{ {
g_rec_mutex_impl_free (mutex->p); g_rec_mutex_impl_free (mutex->p);
} }
void void
g_rec_mutex_lock (GRecMutex *mutex) g_rec_mutex_lock_impl (GRecMutex *mutex)
{ {
EnterCriticalSection (g_rec_mutex_get_impl (mutex)); EnterCriticalSection (g_rec_mutex_get_impl (mutex));
} }
void void
g_rec_mutex_unlock (GRecMutex *mutex) g_rec_mutex_unlock_impl (GRecMutex *mutex)
{ {
LeaveCriticalSection (mutex->p); LeaveCriticalSection (mutex->p);
} }
gboolean gboolean
g_rec_mutex_trylock (GRecMutex *mutex) g_rec_mutex_trylock_impl (GRecMutex *mutex)
{ {
return TryEnterCriticalSection (g_rec_mutex_get_impl (mutex)); return TryEnterCriticalSection (g_rec_mutex_get_impl (mutex));
} }
@ -176,85 +176,85 @@ g_rec_mutex_trylock (GRecMutex *mutex)
/* {{{1 GRWLock */ /* {{{1 GRWLock */
void void
g_rw_lock_init (GRWLock *lock) g_rw_lock_init_impl (GRWLock *lock)
{ {
InitializeSRWLock ((gpointer) lock); InitializeSRWLock ((gpointer) lock);
} }
void void
g_rw_lock_clear (GRWLock *lock) g_rw_lock_clear_impl (GRWLock *lock)
{ {
} }
void void
g_rw_lock_writer_lock (GRWLock *lock) g_rw_lock_writer_lock_impl (GRWLock *lock)
{ {
AcquireSRWLockExclusive ((gpointer) lock); AcquireSRWLockExclusive ((gpointer) lock);
} }
gboolean gboolean
g_rw_lock_writer_trylock (GRWLock *lock) g_rw_lock_writer_trylock_impl (GRWLock *lock)
{ {
return TryAcquireSRWLockExclusive ((gpointer) lock); return TryAcquireSRWLockExclusive ((gpointer) lock);
} }
void void
g_rw_lock_writer_unlock (GRWLock *lock) g_rw_lock_writer_unlock_impl (GRWLock *lock)
{ {
ReleaseSRWLockExclusive ((gpointer) lock); ReleaseSRWLockExclusive ((gpointer) lock);
} }
void void
g_rw_lock_reader_lock (GRWLock *lock) g_rw_lock_reader_lock_impl (GRWLock *lock)
{ {
AcquireSRWLockShared ((gpointer) lock); AcquireSRWLockShared ((gpointer) lock);
} }
gboolean gboolean
g_rw_lock_reader_trylock (GRWLock *lock) g_rw_lock_reader_trylock_impl (GRWLock *lock)
{ {
return TryAcquireSRWLockShared ((gpointer) lock); return TryAcquireSRWLockShared ((gpointer) lock);
} }
void void
g_rw_lock_reader_unlock (GRWLock *lock) g_rw_lock_reader_unlock_impl (GRWLock *lock)
{ {
ReleaseSRWLockShared ((gpointer) lock); ReleaseSRWLockShared ((gpointer) lock);
} }
/* {{{1 GCond */ /* {{{1 GCond */
void void
g_cond_init (GCond *cond) g_cond_init_impl (GCond *cond)
{ {
InitializeConditionVariable ((gpointer) cond); InitializeConditionVariable ((gpointer) cond);
} }
void void
g_cond_clear (GCond *cond) g_cond_clear_impl (GCond *cond)
{ {
} }
void void
g_cond_signal (GCond *cond) g_cond_signal_impl (GCond *cond)
{ {
WakeConditionVariable ((gpointer) cond); WakeConditionVariable ((gpointer) cond);
} }
void void
g_cond_broadcast (GCond *cond) g_cond_broadcast_impl (GCond *cond)
{ {
WakeAllConditionVariable ((gpointer) cond); WakeAllConditionVariable ((gpointer) cond);
} }
void void
g_cond_wait (GCond *cond, g_cond_wait_impl (GCond *cond,
GMutex *entered_mutex) GMutex *entered_mutex)
{ {
SleepConditionVariableSRW ((gpointer) cond, (gpointer) entered_mutex, INFINITE, 0); SleepConditionVariableSRW ((gpointer) cond, (gpointer) entered_mutex, INFINITE, 0);
} }
gboolean gboolean
g_cond_wait_until (GCond *cond, g_cond_wait_until_impl (GCond *cond,
GMutex *entered_mutex, GMutex *entered_mutex,
gint64 end_time) gint64 end_time)
{ {
@ -307,7 +307,7 @@ static GPrivateDestructor *g_private_destructors; /* (atomic) prepend-only */
static CRITICAL_SECTION g_private_lock; static CRITICAL_SECTION g_private_lock;
static DWORD static DWORD
g_private_get_impl (GPrivate *key) _g_private_get_impl (GPrivate *key)
{ {
DWORD impl = (DWORD) GPOINTER_TO_UINT(key->p); DWORD impl = (DWORD) GPOINTER_TO_UINT(key->p);
@ -366,23 +366,23 @@ g_private_get_impl (GPrivate *key)
} }
gpointer gpointer
g_private_get (GPrivate *key) g_private_get_impl (GPrivate *key)
{ {
return TlsGetValue (g_private_get_impl (key)); return TlsGetValue (_g_private_get_impl (key));
} }
void void
g_private_set (GPrivate *key, g_private_set_impl (GPrivate *key,
gpointer value) gpointer value)
{ {
TlsSetValue (g_private_get_impl (key), value); TlsSetValue (_g_private_get_impl (key), value);
} }
void void
g_private_replace (GPrivate *key, g_private_replace_impl (GPrivate *key,
gpointer value) gpointer value)
{ {
DWORD impl = g_private_get_impl (key); DWORD impl = _g_private_get_impl (key);
gpointer old; gpointer old;
old = TlsGetValue (impl); old = TlsGetValue (impl);
@ -522,7 +522,7 @@ error:
} }
void void
g_thread_yield (void) g_thread_yield_impl (void)
{ {
Sleep(0); Sleep(0);
} }

View File

@ -1129,5 +1129,731 @@ g_get_num_processors (void)
return 1; /* Fallback */ return 1; /* Fallback */
} }
/**
* g_mutex_init:
* @mutex: an uninitialized #GMutex
*
* Initializes a #GMutex so that it can be used.
*
* This function is useful to initialize a mutex that has been
* allocated on the stack, or as part of a larger structure.
* It is not necessary to initialize a mutex that has been
* statically allocated.
*
* |[<!-- language="C" -->
* typedef struct {
* GMutex m;
* ...
* } Blob;
*
* Blob *b;
*
* b = g_new (Blob, 1);
* g_mutex_init (&b->m);
* ]|
*
* To undo the effect of g_mutex_init() when a mutex is no longer
* needed, use g_mutex_clear().
*
* Calling g_mutex_init() on an already initialized #GMutex leads
* to undefined behaviour.
*
* Since: 2.32
*/
void
g_mutex_init (GMutex *mutex)
{
g_mutex_init_impl (mutex);
}
/**
* g_mutex_clear:
* @mutex: an initialized #GMutex
*
* Frees the resources allocated to a mutex with g_mutex_init().
*
* This function should not be used with a #GMutex that has been
* statically allocated.
*
* Calling g_mutex_clear() on a locked mutex leads to undefined
* behaviour.
*
* Since: 2.32
*/
void
g_mutex_clear (GMutex *mutex)
{
g_mutex_clear_impl (mutex);
}
/**
* g_mutex_lock:
* @mutex: a #GMutex
*
* Locks @mutex. If @mutex is already locked by another thread, the
* current thread will block until @mutex is unlocked by the other
* thread.
*
* #GMutex is neither guaranteed to be recursive nor to be
* non-recursive. As such, calling g_mutex_lock() on a #GMutex that has
* already been locked by the same thread results in undefined behaviour
* (including but not limited to deadlocks).
*/
void
g_mutex_lock (GMutex *mutex)
{
g_mutex_lock_impl (mutex);
}
/**
* g_mutex_unlock:
* @mutex: a #GMutex
*
* Unlocks @mutex. If another thread is blocked in a g_mutex_lock()
* call for @mutex, it will become unblocked and can lock @mutex itself.
*
* Calling g_mutex_unlock() on a mutex that is not locked by the
* current thread leads to undefined behaviour.
*/
void
g_mutex_unlock (GMutex *mutex)
{
g_mutex_unlock_impl (mutex);
}
/**
* g_mutex_trylock:
* @mutex: a #GMutex
*
* Tries to lock @mutex. If @mutex is already locked by another thread,
* it immediately returns %FALSE. Otherwise it locks @mutex and returns
* %TRUE.
*
* #GMutex is neither guaranteed to be recursive nor to be
* non-recursive. As such, calling g_mutex_lock() on a #GMutex that has
* already been locked by the same thread results in undefined behaviour
* (including but not limited to deadlocks or arbitrary return values).
*
* Returns: %TRUE if @mutex could be locked
*/
gboolean
g_mutex_trylock (GMutex *mutex)
{
return g_mutex_trylock_impl (mutex);
}
/**
* g_rec_mutex_init:
* @rec_mutex: an uninitialized #GRecMutex
*
* Initializes a #GRecMutex so that it can be used.
*
* This function is useful to initialize a recursive mutex
* that has been allocated on the stack, or as part of a larger
* structure.
*
* It is not necessary to initialise a recursive mutex that has been
* statically allocated.
*
* |[<!-- language="C" -->
* typedef struct {
* GRecMutex m;
* ...
* } Blob;
*
* Blob *b;
*
* b = g_new (Blob, 1);
* g_rec_mutex_init (&b->m);
* ]|
*
* Calling g_rec_mutex_init() on an already initialized #GRecMutex
* leads to undefined behaviour.
*
* To undo the effect of g_rec_mutex_init() when a recursive mutex
* is no longer needed, use g_rec_mutex_clear().
*
* Since: 2.32
*/
void
g_rec_mutex_init (GRecMutex *rec_mutex)
{
g_rec_mutex_init_impl (rec_mutex);
}
/**
* g_rec_mutex_clear:
* @rec_mutex: an initialized #GRecMutex
*
* Frees the resources allocated to a recursive mutex with
* g_rec_mutex_init().
*
* This function should not be used with a #GRecMutex that has been
* statically allocated.
*
* Calling g_rec_mutex_clear() on a locked recursive mutex leads
* to undefined behaviour.
*
* Since: 2.32
*/
void
g_rec_mutex_clear (GRecMutex *rec_mutex)
{
g_rec_mutex_clear_impl (rec_mutex);
}
/**
* g_rec_mutex_lock:
* @rec_mutex: a #GRecMutex
*
* Locks @rec_mutex. If @rec_mutex is already locked by another
* thread, the current thread will block until @rec_mutex is
* unlocked by the other thread. If @rec_mutex is already locked
* by the current thread, the 'lock count' of @rec_mutex is increased.
* The mutex will only become available again when it is unlocked
* as many times as it has been locked.
*
* Since: 2.32
*/
void
g_rec_mutex_lock (GRecMutex *mutex)
{
g_rec_mutex_lock_impl (mutex);
}
/**
* g_rec_mutex_unlock:
* @rec_mutex: a #GRecMutex
*
* Unlocks @rec_mutex. If another thread is blocked in a
* g_rec_mutex_lock() call for @rec_mutex, it will become unblocked
* and can lock @rec_mutex itself.
*
* Calling g_rec_mutex_unlock() on a recursive mutex that is not
* locked by the current thread leads to undefined behaviour.
*
* Since: 2.32
*/
void
g_rec_mutex_unlock (GRecMutex *rec_mutex)
{
g_rec_mutex_unlock_impl (rec_mutex);
}
/**
* g_rec_mutex_trylock:
* @rec_mutex: a #GRecMutex
*
* Tries to lock @rec_mutex. If @rec_mutex is already locked
* by another thread, it immediately returns %FALSE. Otherwise
* it locks @rec_mutex and returns %TRUE.
*
* Returns: %TRUE if @rec_mutex could be locked
*
* Since: 2.32
*/
gboolean
g_rec_mutex_trylock (GRecMutex *rec_mutex)
{
return g_rec_mutex_trylock_impl (rec_mutex);
}
/* {{{1 GRWLock */
/**
* g_rw_lock_init:
* @rw_lock: an uninitialized #GRWLock
*
* Initializes a #GRWLock so that it can be used.
*
* This function is useful to initialize a lock that has been
* allocated on the stack, or as part of a larger structure. It is not
* necessary to initialise a reader-writer lock that has been statically
* allocated.
*
* |[<!-- language="C" -->
* typedef struct {
* GRWLock l;
* ...
* } Blob;
*
* Blob *b;
*
* b = g_new (Blob, 1);
* g_rw_lock_init (&b->l);
* ]|
*
* To undo the effect of g_rw_lock_init() when a lock is no longer
* needed, use g_rw_lock_clear().
*
* Calling g_rw_lock_init() on an already initialized #GRWLock leads
* to undefined behaviour.
*
* Since: 2.32
*/
void
g_rw_lock_init (GRWLock *rw_lock)
{
g_rw_lock_init_impl (rw_lock);
}
/**
* g_rw_lock_clear:
* @rw_lock: an initialized #GRWLock
*
* Frees the resources allocated to a lock with g_rw_lock_init().
*
* This function should not be used with a #GRWLock that has been
* statically allocated.
*
* Calling g_rw_lock_clear() when any thread holds the lock
* leads to undefined behaviour.
*
* Since: 2.32
*/
void
g_rw_lock_clear (GRWLock *rw_lock)
{
g_rw_lock_clear_impl (rw_lock);
}
/**
* g_rw_lock_writer_lock:
* @rw_lock: a #GRWLock
*
* Obtain a write lock on @rw_lock. If another thread currently holds
* a read or write lock on @rw_lock, the current thread will block
* until all other threads have dropped their locks on @rw_lock.
*
* Calling g_rw_lock_writer_lock() while the current thread already
* owns a read or write lock on @rw_lock leads to undefined behaviour.
*
* Since: 2.32
*/
void
g_rw_lock_writer_lock (GRWLock *rw_lock)
{
g_rw_lock_writer_lock_impl (rw_lock);
}
/**
* g_rw_lock_writer_trylock:
* @rw_lock: a #GRWLock
*
* Tries to obtain a write lock on @rw_lock. If another thread
* currently holds a read or write lock on @rw_lock, it immediately
* returns %FALSE.
* Otherwise it locks @rw_lock and returns %TRUE.
*
* Returns: %TRUE if @rw_lock could be locked
*
* Since: 2.32
*/
gboolean
g_rw_lock_writer_trylock (GRWLock *rw_lock)
{
return g_rw_lock_writer_trylock_impl (rw_lock);
}
/**
* g_rw_lock_writer_unlock:
* @rw_lock: a #GRWLock
*
* Release a write lock on @rw_lock.
*
* Calling g_rw_lock_writer_unlock() on a lock that is not held
* by the current thread leads to undefined behaviour.
*
* Since: 2.32
*/
void
g_rw_lock_writer_unlock (GRWLock *rw_lock)
{
g_rw_lock_writer_unlock_impl (rw_lock);
}
/**
* g_rw_lock_reader_lock:
* @rw_lock: a #GRWLock
*
* Obtain a read lock on @rw_lock. If another thread currently holds
* the write lock on @rw_lock, the current thread will block until the
* write lock was (held and) released. If another thread does not hold
* the write lock, but is waiting for it, it is implementation defined
* whether the reader or writer will block. Read locks can be taken
* recursively.
*
* Calling g_rw_lock_reader_lock() while the current thread already
* owns a write lock leads to undefined behaviour. Read locks however
* can be taken recursively, in which case you need to make sure to
* call g_rw_lock_reader_unlock() the same amount of times.
*
* It is implementation-defined how many read locks are allowed to be
* held on the same lock simultaneously. If the limit is hit,
* or if a deadlock is detected, a critical warning will be emitted.
*
* Since: 2.32
*/
void
g_rw_lock_reader_lock (GRWLock *rw_lock)
{
g_rw_lock_reader_lock_impl (rw_lock);
}
/**
* g_rw_lock_reader_trylock:
* @rw_lock: a #GRWLock
*
* Tries to obtain a read lock on @rw_lock and returns %TRUE if
* the read lock was successfully obtained. Otherwise it
* returns %FALSE.
*
* Returns: %TRUE if @rw_lock could be locked
*
* Since: 2.32
*/
gboolean
g_rw_lock_reader_trylock (GRWLock *rw_lock)
{
return g_rw_lock_reader_trylock_impl (rw_lock);
}
/**
* g_rw_lock_reader_unlock:
* @rw_lock: a #GRWLock
*
* Release a read lock on @rw_lock.
*
* Calling g_rw_lock_reader_unlock() on a lock that is not held
* by the current thread leads to undefined behaviour.
*
* Since: 2.32
*/
void
g_rw_lock_reader_unlock (GRWLock *rw_lock)
{
g_rw_lock_reader_unlock_impl (rw_lock);
}
/* {{{1 GCond */
/**
* g_cond_init:
* @cond: an uninitialized #GCond
*
* Initialises a #GCond so that it can be used.
*
* This function is useful to initialise a #GCond that has been
* allocated as part of a larger structure. It is not necessary to
* initialise a #GCond that has been statically allocated.
*
* To undo the effect of g_cond_init() when a #GCond is no longer
* needed, use g_cond_clear().
*
* Calling g_cond_init() on an already-initialised #GCond leads
* to undefined behaviour.
*
* Since: 2.32
*/
void
g_cond_init (GCond *cond)
{
g_cond_init_impl (cond);
}
/**
* g_cond_clear:
* @cond: an initialised #GCond
*
* Frees the resources allocated to a #GCond with g_cond_init().
*
* This function should not be used with a #GCond that has been
* statically allocated.
*
* Calling g_cond_clear() for a #GCond on which threads are
* blocking leads to undefined behaviour.
*
* Since: 2.32
*/
void
g_cond_clear (GCond *cond)
{
g_cond_clear_impl (cond);
}
/**
* g_cond_wait:
* @cond: a #GCond
* @mutex: a #GMutex that is currently locked
*
* Atomically releases @mutex and waits until @cond is signalled.
* When this function returns, @mutex is locked again and owned by the
* calling thread.
*
* When using condition variables, it is possible that a spurious wakeup
* may occur (ie: g_cond_wait() returns even though g_cond_signal() was
* not called). It's also possible that a stolen wakeup may occur.
* This is when g_cond_signal() is called, but another thread acquires
* @mutex before this thread and modifies the state of the program in
* such a way that when g_cond_wait() is able to return, the expected
* condition is no longer met.
*
* For this reason, g_cond_wait() must always be used in a loop. See
* the documentation for #GCond for a complete example.
**/
void
g_cond_wait (GCond *cond,
GMutex *mutex)
{
g_cond_wait_impl (cond, mutex);
}
/**
* g_cond_signal:
* @cond: a #GCond
*
* If threads are waiting for @cond, at least one of them is unblocked.
* If no threads are waiting for @cond, this function has no effect.
* It is good practice to hold the same lock as the waiting thread
* while calling this function, though not required.
*/
void
g_cond_signal (GCond *cond)
{
g_cond_signal_impl (cond);
}
/**
* g_cond_broadcast:
* @cond: a #GCond
*
* If threads are waiting for @cond, all of them are unblocked.
* If no threads are waiting for @cond, this function has no effect.
* It is good practice to lock the same mutex as the waiting threads
* while calling this function, though not required.
*/
void
g_cond_broadcast (GCond *cond)
{
g_cond_broadcast_impl (cond);
}
/**
* g_cond_wait_until:
* @cond: a #GCond
* @mutex: a #GMutex that is currently locked
* @end_time: the monotonic time to wait until
*
* Waits until either @cond is signalled or @end_time has passed.
*
* As with g_cond_wait() it is possible that a spurious or stolen wakeup
* could occur. For that reason, waiting on a condition variable should
* always be in a loop, based on an explicitly-checked predicate.
*
* %TRUE is returned if the condition variable was signalled (or in the
* case of a spurious wakeup). %FALSE is returned if @end_time has
* passed.
*
* The following code shows how to correctly perform a timed wait on a
* condition variable (extending the example presented in the
* documentation for #GCond):
*
* |[<!-- language="C" -->
* gpointer
* pop_data_timed (void)
* {
* gint64 end_time;
* gpointer data;
*
* g_mutex_lock (&data_mutex);
*
* end_time = g_get_monotonic_time () + 5 * G_TIME_SPAN_SECOND;
* while (!current_data)
* if (!g_cond_wait_until (&data_cond, &data_mutex, end_time))
* {
* // timeout has passed.
* g_mutex_unlock (&data_mutex);
* return NULL;
* }
*
* // there is data for us
* data = current_data;
* current_data = NULL;
*
* g_mutex_unlock (&data_mutex);
*
* return data;
* }
* ]|
*
* Notice that the end time is calculated once, before entering the
* loop and reused. This is the motivation behind the use of absolute
* time on this API -- if a relative time of 5 seconds were passed
* directly to the call and a spurious wakeup occurred, the program would
* have to start over waiting again (which would lead to a total wait
* time of more than 5 seconds).
*
* Returns: %TRUE on a signal, %FALSE on a timeout
* Since: 2.32
**/
gboolean
g_cond_wait_until (GCond *cond,
GMutex *mutex,
gint64 end_time)
{
return g_cond_wait_until_impl (cond, mutex, end_time);
}
/* {{{1 GPrivate */
/**
* GPrivate:
*
* The #GPrivate struct is an opaque data structure to represent a
* thread-local data key. It is approximately equivalent to the
* pthread_setspecific()/pthread_getspecific() APIs on POSIX and to
* TlsSetValue()/TlsGetValue() on Windows.
*
* If you don't already know why you might want this functionality,
* then you probably don't need it.
*
* #GPrivate is a very limited resource (as far as 128 per program,
* shared between all libraries). It is also not possible to destroy a
* #GPrivate after it has been used. As such, it is only ever acceptable
* to use #GPrivate in static scope, and even then sparingly so.
*
* See G_PRIVATE_INIT() for a couple of examples.
*
* The #GPrivate structure should be considered opaque. It should only
* be accessed via the g_private_ functions.
*/
/**
* G_PRIVATE_INIT:
* @notify: a #GDestroyNotify
*
* A macro to assist with the static initialisation of a #GPrivate.
*
* This macro is useful for the case that a #GDestroyNotify function
* should be associated with the key. This is needed when the key will be
* used to point at memory that should be deallocated when the thread
* exits.
*
* Additionally, the #GDestroyNotify will also be called on the previous
* value stored in the key when g_private_replace() is used.
*
* If no #GDestroyNotify is needed, then use of this macro is not
* required -- if the #GPrivate is declared in static scope then it will
* be properly initialised by default (ie: to all zeros). See the
* examples below.
*
* |[<!-- language="C" -->
* static GPrivate name_key = G_PRIVATE_INIT (g_free);
*
* // return value should not be freed
* const gchar *
* get_local_name (void)
* {
* return g_private_get (&name_key);
* }
*
* void
* set_local_name (const gchar *name)
* {
* g_private_replace (&name_key, g_strdup (name));
* }
*
*
* static GPrivate count_key; // no free function
*
* gint
* get_local_count (void)
* {
* return GPOINTER_TO_INT (g_private_get (&count_key));
* }
*
* void
* set_local_count (gint count)
* {
* g_private_set (&count_key, GINT_TO_POINTER (count));
* }
* ]|
*
* Since: 2.32
**/
/**
* g_private_get:
* @key: a #GPrivate
*
* Returns the current value of the thread local variable @key.
*
* If the value has not yet been set in this thread, %NULL is returned.
* Values are never copied between threads (when a new thread is
* created, for example).
*
* Returns: the thread-local value
*/
gpointer
g_private_get (GPrivate *key)
{
return g_private_get_impl (key);
}
/**
* g_private_set:
* @key: a #GPrivate
* @value: the new value
*
* Sets the thread local variable @key to have the value @value in the
* current thread.
*
* This function differs from g_private_replace() in the following way:
* the #GDestroyNotify for @key is not called on the old value.
*/
void
g_private_set (GPrivate *key,
gpointer value)
{
g_private_set_impl (key, value);
}
/**
* g_private_replace:
* @key: a #GPrivate
* @value: the new value
*
* Sets the thread local variable @key to have the value @value in the
* current thread.
*
* This function differs from g_private_set() in the following way: if
* the previous value was non-%NULL then the #GDestroyNotify handler for
* @key is run on it.
*
* Since: 2.32
**/
void
g_private_replace (GPrivate *key,
gpointer value)
{
g_private_replace_impl (key, value);
}
/* {{{1 GThread */
/**
* g_thread_yield:
*
* Causes the calling thread to voluntarily relinquish the CPU, so
* that other threads can run.
*
* This function is often used as a method to make busy wait less evil.
*/
void
g_thread_yield (void)
{
g_thread_yield_impl ();
}
/* Epilogue {{{1 */ /* Epilogue {{{1 */
/* vim: set foldmethod=marker: */ /* vim: set foldmethod=marker: */

View File

@ -175,4 +175,43 @@ guint g_thread_n_created (void);
gpointer g_private_set_alloc0 (GPrivate *key, gpointer g_private_set_alloc0 (GPrivate *key,
gsize size); gsize size);
void g_mutex_init_impl (GMutex *mutex);
void g_mutex_clear_impl (GMutex *mutex);
void g_mutex_lock_impl (GMutex *mutex);
void g_mutex_unlock_impl (GMutex *mutex);
gboolean g_mutex_trylock_impl (GMutex *mutex);
void g_rec_mutex_init_impl (GRecMutex *rec_mutex);
void g_rec_mutex_clear_impl (GRecMutex *rec_mutex);
void g_rec_mutex_lock_impl (GRecMutex *mutex);
void g_rec_mutex_unlock_impl (GRecMutex *rec_mutex);
gboolean g_rec_mutex_trylock_impl (GRecMutex *rec_mutex);
void g_rw_lock_init_impl (GRWLock *rw_lock);
void g_rw_lock_clear_impl (GRWLock *rw_lock);
void g_rw_lock_writer_lock_impl (GRWLock *rw_lock);
gboolean g_rw_lock_writer_trylock_impl (GRWLock *rw_lock);
void g_rw_lock_writer_unlock_impl (GRWLock *rw_lock);
void g_rw_lock_reader_lock_impl (GRWLock *rw_lock);
gboolean g_rw_lock_reader_trylock_impl (GRWLock *rw_lock);
void g_rw_lock_reader_unlock_impl (GRWLock *rw_lock);
void g_cond_init_impl (GCond *cond);
void g_cond_clear_impl (GCond *cond);
void g_cond_wait_impl (GCond *cond,
GMutex *mutex);
void g_cond_signal_impl (GCond *cond);
void g_cond_broadcast_impl (GCond *cond);
gboolean g_cond_wait_until_impl (GCond *cond,
GMutex *mutex,
gint64 end_time);
gpointer g_private_get_impl (GPrivate *key);
void g_private_set_impl (GPrivate *key,
gpointer value);
void g_private_replace_impl (GPrivate *key,
gpointer value);
void g_thread_yield_impl (void);
#endif /* __G_THREADPRIVATE_H__ */ #endif /* __G_THREADPRIVATE_H__ */