mirror of
https://gitlab.gnome.org/GNOME/glib.git
synced 2025-03-31 21:03:10 +02:00
Merge branch 'tdr' into 'master'
Fix data races in task test and gmenumodel test See merge request GNOME/glib!706
This commit is contained in:
commit
62bd79b6a2
gio/tests
@ -752,11 +752,11 @@ typedef struct
|
|||||||
GDBusServer *server;
|
GDBusServer *server;
|
||||||
|
|
||||||
GThread *service_thread;
|
GThread *service_thread;
|
||||||
|
/* Protects server_connection and service_loop. */
|
||||||
GMutex service_loop_lock;
|
GMutex service_loop_lock;
|
||||||
GCond service_loop_cond;
|
GCond service_loop_cond;
|
||||||
|
|
||||||
GMainLoop *service_loop;
|
GMainLoop *service_loop;
|
||||||
GMainLoop *loop;
|
|
||||||
} PeerConnection;
|
} PeerConnection;
|
||||||
|
|
||||||
static gboolean
|
static gboolean
|
||||||
@ -766,9 +766,10 @@ on_new_connection (GDBusServer *server,
|
|||||||
{
|
{
|
||||||
PeerConnection *data = user_data;
|
PeerConnection *data = user_data;
|
||||||
|
|
||||||
|
g_mutex_lock (&data->service_loop_lock);
|
||||||
data->server_connection = g_object_ref (connection);
|
data->server_connection = g_object_ref (connection);
|
||||||
|
g_cond_broadcast (&data->service_loop_cond);
|
||||||
g_main_loop_quit (data->loop);
|
g_mutex_unlock (&data->service_loop_lock);
|
||||||
|
|
||||||
return TRUE;
|
return TRUE;
|
||||||
}
|
}
|
||||||
@ -801,6 +802,15 @@ await_service_loop (PeerConnection *data)
|
|||||||
g_mutex_unlock (&data->service_loop_lock);
|
g_mutex_unlock (&data->service_loop_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
await_server_connection (PeerConnection *data)
|
||||||
|
{
|
||||||
|
g_mutex_lock (&data->service_loop_lock);
|
||||||
|
while (data->server_connection == NULL)
|
||||||
|
g_cond_wait (&data->service_loop_cond, &data->service_loop_lock);
|
||||||
|
g_mutex_unlock (&data->service_loop_lock);
|
||||||
|
}
|
||||||
|
|
||||||
static gpointer
|
static gpointer
|
||||||
service_thread_func (gpointer user_data)
|
service_thread_func (gpointer user_data)
|
||||||
{
|
{
|
||||||
@ -874,7 +884,6 @@ peer_connection_up (PeerConnection *data)
|
|||||||
GError *error;
|
GError *error;
|
||||||
|
|
||||||
memset (data, '\0', sizeof (PeerConnection));
|
memset (data, '\0', sizeof (PeerConnection));
|
||||||
data->loop = g_main_loop_new (NULL, FALSE);
|
|
||||||
|
|
||||||
g_mutex_init (&data->service_loop_lock);
|
g_mutex_init (&data->service_loop_lock);
|
||||||
g_cond_init (&data->service_loop_cond);
|
g_cond_init (&data->service_loop_cond);
|
||||||
@ -897,8 +906,7 @@ peer_connection_up (PeerConnection *data)
|
|||||||
&error);
|
&error);
|
||||||
g_assert_no_error (error);
|
g_assert_no_error (error);
|
||||||
g_assert (data->client_connection != NULL);
|
g_assert (data->client_connection != NULL);
|
||||||
while (data->server_connection == NULL)
|
await_server_connection (data);
|
||||||
g_main_loop_run (data->loop);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -915,8 +923,6 @@ peer_connection_down (PeerConnection *data)
|
|||||||
|
|
||||||
g_mutex_clear (&data->service_loop_lock);
|
g_mutex_clear (&data->service_loop_lock);
|
||||||
g_cond_clear (&data->service_loop_cond);
|
g_cond_clear (&data->service_loop_cond);
|
||||||
|
|
||||||
g_main_loop_unref (data->loop);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct roundtrip_state
|
struct roundtrip_state
|
||||||
|
@ -1317,6 +1317,7 @@ test_run_in_thread_nested (void)
|
|||||||
* tasks, they won't all run at once.
|
* tasks, they won't all run at once.
|
||||||
*/
|
*/
|
||||||
static GMutex overflow_mutex;
|
static GMutex overflow_mutex;
|
||||||
|
static guint overflow_completed;
|
||||||
|
|
||||||
static void
|
static void
|
||||||
run_overflow_task_thread (GTask *task,
|
run_overflow_task_thread (GTask *task,
|
||||||
@ -1329,16 +1330,19 @@ run_overflow_task_thread (GTask *task,
|
|||||||
if (g_task_return_error_if_cancelled (task))
|
if (g_task_return_error_if_cancelled (task))
|
||||||
{
|
{
|
||||||
*result = 'X';
|
*result = 'X';
|
||||||
return;
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
/* Block until the main thread is ready. */
|
||||||
|
g_mutex_lock (&overflow_mutex);
|
||||||
|
g_mutex_unlock (&overflow_mutex);
|
||||||
|
|
||||||
|
*result = '.';
|
||||||
|
|
||||||
|
g_task_return_boolean (task, TRUE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Block until the main thread is ready. */
|
g_atomic_int_inc (&overflow_completed);
|
||||||
g_mutex_lock (&overflow_mutex);
|
|
||||||
g_mutex_unlock (&overflow_mutex);
|
|
||||||
|
|
||||||
*result = '.';
|
|
||||||
|
|
||||||
g_task_return_boolean (task, TRUE);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#define NUM_OVERFLOW_TASKS 1024
|
#define NUM_OVERFLOW_TASKS 1024
|
||||||
@ -1382,9 +1386,11 @@ test_run_in_thread_overflow (void)
|
|||||||
g_mutex_unlock (&overflow_mutex);
|
g_mutex_unlock (&overflow_mutex);
|
||||||
|
|
||||||
/* Wait for all tasks to complete. */
|
/* Wait for all tasks to complete. */
|
||||||
while (strlen (buf) != NUM_OVERFLOW_TASKS)
|
while (g_atomic_int_get (&overflow_completed) != NUM_OVERFLOW_TASKS)
|
||||||
g_usleep (1000);
|
g_usleep (1000);
|
||||||
|
|
||||||
|
g_assert_cmpint (strlen (buf), ==, NUM_OVERFLOW_TASKS);
|
||||||
|
|
||||||
i = strspn (buf, ".");
|
i = strspn (buf, ".");
|
||||||
/* Given the sleep times above, i should be 14 for normal, 40 for
|
/* Given the sleep times above, i should be 14 for normal, 40 for
|
||||||
* slow. But if the machine is too slow/busy then the scheduling
|
* slow. But if the machine is too slow/busy then the scheduling
|
||||||
|
Loading…
x
Reference in New Issue
Block a user