Merge branch 'tdr' into 'master'

Fix data races in task test and gmenumodel test

See merge request GNOME/glib!706
This commit is contained in:
Philip Withnall 2019-03-06 13:53:24 +00:00
commit 62bd79b6a2
2 changed files with 29 additions and 17 deletions

View File

@ -752,11 +752,11 @@ typedef struct
GDBusServer *server;
GThread *service_thread;
/* Protects server_connection and service_loop. */
GMutex service_loop_lock;
GCond service_loop_cond;
GMainLoop *service_loop;
GMainLoop *loop;
} PeerConnection;
static gboolean
@ -766,9 +766,10 @@ on_new_connection (GDBusServer *server,
{
PeerConnection *data = user_data;
g_mutex_lock (&data->service_loop_lock);
data->server_connection = g_object_ref (connection);
g_main_loop_quit (data->loop);
g_cond_broadcast (&data->service_loop_cond);
g_mutex_unlock (&data->service_loop_lock);
return TRUE;
}
@ -801,6 +802,15 @@ await_service_loop (PeerConnection *data)
g_mutex_unlock (&data->service_loop_lock);
}
static void
await_server_connection (PeerConnection *data)
{
g_mutex_lock (&data->service_loop_lock);
while (data->server_connection == NULL)
g_cond_wait (&data->service_loop_cond, &data->service_loop_lock);
g_mutex_unlock (&data->service_loop_lock);
}
static gpointer
service_thread_func (gpointer user_data)
{
@ -874,7 +884,6 @@ peer_connection_up (PeerConnection *data)
GError *error;
memset (data, '\0', sizeof (PeerConnection));
data->loop = g_main_loop_new (NULL, FALSE);
g_mutex_init (&data->service_loop_lock);
g_cond_init (&data->service_loop_cond);
@ -897,8 +906,7 @@ peer_connection_up (PeerConnection *data)
&error);
g_assert_no_error (error);
g_assert (data->client_connection != NULL);
while (data->server_connection == NULL)
g_main_loop_run (data->loop);
await_server_connection (data);
}
static void
@ -915,8 +923,6 @@ peer_connection_down (PeerConnection *data)
g_mutex_clear (&data->service_loop_lock);
g_cond_clear (&data->service_loop_cond);
g_main_loop_unref (data->loop);
}
struct roundtrip_state

View File

@ -1317,6 +1317,7 @@ test_run_in_thread_nested (void)
* tasks, they won't all run at once.
*/
static GMutex overflow_mutex;
static guint overflow_completed;
static void
run_overflow_task_thread (GTask *task,
@ -1329,16 +1330,19 @@ run_overflow_task_thread (GTask *task,
if (g_task_return_error_if_cancelled (task))
{
*result = 'X';
return;
}
else
{
/* Block until the main thread is ready. */
g_mutex_lock (&overflow_mutex);
g_mutex_unlock (&overflow_mutex);
*result = '.';
g_task_return_boolean (task, TRUE);
}
/* Block until the main thread is ready. */
g_mutex_lock (&overflow_mutex);
g_mutex_unlock (&overflow_mutex);
*result = '.';
g_task_return_boolean (task, TRUE);
g_atomic_int_inc (&overflow_completed);
}
#define NUM_OVERFLOW_TASKS 1024
@ -1382,9 +1386,11 @@ test_run_in_thread_overflow (void)
g_mutex_unlock (&overflow_mutex);
/* Wait for all tasks to complete. */
while (strlen (buf) != NUM_OVERFLOW_TASKS)
while (g_atomic_int_get (&overflow_completed) != NUM_OVERFLOW_TASKS)
g_usleep (1000);
g_assert_cmpint (strlen (buf), ==, NUM_OVERFLOW_TASKS);
i = strspn (buf, ".");
/* Given the sleep times above, i should be 14 for normal, 40 for
* slow. But if the machine is too slow/busy then the scheduling