taskpool: expose new "shared" task pool implementation

While the default implementation will spawn a thread per new
pushed task, this new implementation instead spawns a maximum
number of threads, then queues new tasks on existing threads.

The thread that the new task will be queued on is picked in
a pretty naive fashion, by simply popping the first thread
from a queue and pushing it back to the tail, but this is
an implementation detail and can always be sophisticated
in the future if the need arises.

Part-of: <https://gitlab.freedesktop.org/gstreamer/gstreamer/-/merge_requests/692>
This commit is contained in:
Mathieu Duponchelle 2020-11-03 02:43:26 +01:00 committed by GStreamer Merge Bot
parent b508287add
commit b6df564251
3 changed files with 465 additions and 6 deletions

View file

@ -276,7 +276,10 @@ not_supported:
*
* Join a task and/or return it to the pool. @id is the id obtained from
* gst_task_pool_push(). The default implementation does nothing, as the
* default push() implementation always returns %NULL.
* default #GstTaskPoolClass::push implementation always returns %NULL.
*
* This method should only be called with the same @pool instance that provided
* @id.
*/
void
gst_task_pool_join (GstTaskPool * pool, gpointer id)
@ -298,8 +301,13 @@ gst_task_pool_join (GstTaskPool * pool, gpointer id)
*
* Dispose of the handle returned by gst_task_pool_push(). This does
* not need to be called with the default implementation as the default
* push() implementation always returns %NULL. This does not need to be
* called either when calling gst_task_pool_join().
* #GstTaskPoolClass::push implementation always returns %NULL. This does not need to be
* called either when calling gst_task_pool_join(), but should be called
* when joining is not necessary, but gst_task_pool_push() returned a
* non-%NULL value.
*
* This method should only be called with the same @pool instance that provided
* @id.
*
* Since: 1.20
*/
@ -315,3 +323,228 @@ gst_task_pool_dispose_handle (GstTaskPool * pool, gpointer id)
if (klass->dispose_handle)
klass->dispose_handle (pool, id);
}
typedef struct
{
gboolean done;
guint64 id;
GstTaskPoolFunction func;
gpointer user_data;
GMutex done_lock;
GCond done_cond;
gint refcount;
} SharedTaskData;
static SharedTaskData *
shared_task_data_ref (SharedTaskData * tdata)
{
g_atomic_int_add (&tdata->refcount, 1);
return tdata;
}
static void
shared_task_data_unref (SharedTaskData * tdata)
{
if (g_atomic_int_dec_and_test (&tdata->refcount)) {
g_mutex_clear (&tdata->done_lock);
g_cond_clear (&tdata->done_cond);
g_slice_free (SharedTaskData, tdata);
}
}
struct _GstSharedTaskPoolPrivate
{
guint max_threads;
};
#define GST_SHARED_TASK_POOL_CAST(pool) ((GstSharedTaskPool*)(pool))
G_DEFINE_TYPE_WITH_PRIVATE (GstSharedTaskPool, gst_shared_task_pool,
GST_TYPE_TASK_POOL);
static void
shared_func (SharedTaskData * tdata, GstTaskPool * pool)
{
tdata->func (tdata->user_data);
g_mutex_lock (&tdata->done_lock);
tdata->done = TRUE;
g_cond_signal (&tdata->done_cond);
g_mutex_unlock (&tdata->done_lock);
shared_task_data_unref (tdata);
}
static gpointer
shared_push (GstTaskPool * pool, GstTaskPoolFunction func,
gpointer user_data, GError ** error)
{
SharedTaskData *ret = NULL;
GST_OBJECT_LOCK (pool);
if (!pool->pool) {
GST_OBJECT_UNLOCK (pool);
goto done;
}
ret = g_slice_new (SharedTaskData);
ret->done = FALSE;
ret->func = func;
ret->user_data = user_data;
g_atomic_int_set (&ret->refcount, 1);
g_cond_init (&ret->done_cond);
g_mutex_init (&ret->done_lock);
g_thread_pool_push (pool->pool, shared_task_data_ref (ret), error);
GST_OBJECT_UNLOCK (pool);
done:
return ret;
}
static void
shared_join (GstTaskPool * pool, gpointer id)
{
SharedTaskData *tdata;
if (!id)
return;
tdata = (SharedTaskData *) id;
g_mutex_lock (&tdata->done_lock);
while (!tdata->done) {
g_cond_wait (&tdata->done_cond, &tdata->done_lock);
}
g_mutex_unlock (&tdata->done_lock);
shared_task_data_unref (tdata);
}
static void
shared_dispose_handle (GstTaskPool * pool, gpointer id)
{
SharedTaskData *tdata;
if (!id)
return;
tdata = (SharedTaskData *) id;
shared_task_data_unref (tdata);
}
static void
shared_prepare (GstTaskPool * pool, GError ** error)
{
GstSharedTaskPool *shared_pool = GST_SHARED_TASK_POOL_CAST (pool);
GST_OBJECT_LOCK (pool);
pool->pool =
g_thread_pool_new ((GFunc) shared_func, pool,
shared_pool->priv->max_threads, FALSE, error);
GST_OBJECT_UNLOCK (pool);
}
static void
gst_shared_task_pool_class_init (GstSharedTaskPoolClass * klass)
{
GstTaskPoolClass *taskpoolclass = GST_TASK_POOL_CLASS (klass);
taskpoolclass->prepare = shared_prepare;
taskpoolclass->push = shared_push;
taskpoolclass->join = shared_join;
taskpoolclass->dispose_handle = shared_dispose_handle;
}
static void
gst_shared_task_pool_init (GstSharedTaskPool * pool)
{
GstSharedTaskPoolPrivate *priv;
priv = pool->priv = gst_shared_task_pool_get_instance_private (pool);
priv->max_threads = 1;
}
/**
* gst_shared_task_pool_set_max_threads:
* @pool: a #GstSharedTaskPool
* @max_threads: Maximum number of threads to spawn.
*
* Update the maximal number of threads the @pool may spawn. When
* the maximal number of threads is reduced, existing threads are not
* immediately shut down, see g_thread_pool_set_max_threads().
*
* Setting @max_threads to 0 effectively freezes the pool.
*
* Since: 1.20
*/
void
gst_shared_task_pool_set_max_threads (GstSharedTaskPool * pool,
guint max_threads)
{
GstTaskPool *taskpool;
g_return_if_fail (GST_IS_SHARED_TASK_POOL (pool));
taskpool = GST_TASK_POOL (pool);
GST_OBJECT_LOCK (pool);
if (taskpool->pool)
g_thread_pool_set_max_threads (taskpool->pool, max_threads, NULL);
pool->priv->max_threads = max_threads;
GST_OBJECT_UNLOCK (pool);
}
/**
* gst_shared_task_pool_get_max_threads:
* @pool: a #GstSharedTaskPool
* @max_threads: Maximum number of threads to spawn.
*
* Returns: the maximum number of threads @pool is configured to spawn
* Since: 1.20
*/
guint
gst_shared_task_pool_get_max_threads (GstSharedTaskPool * pool)
{
guint ret;
g_return_val_if_fail (GST_IS_SHARED_TASK_POOL (pool), 0);
GST_OBJECT_LOCK (pool);
ret = pool->priv->max_threads;
GST_OBJECT_UNLOCK (pool);
return ret;
}
/**
* gst_shared_task_pool_new:
*
* Create a new shared task pool. The shared task pool will queue tasks on
* a maximum number of threads, 1 by default.
*
* Do not use a #GstSharedTaskPool to manage potentially inter-dependent tasks such
* as pad tasks, as having one task waiting on another to return before returning
* would cause obvious deadlocks if they happen to share the same thread.
*
* Returns: (transfer full): a new #GstSharedTaskPool. gst_object_unref() after usage.
* Since: 1.20
*/
GstTaskPool *
gst_shared_task_pool_new (void)
{
GstTaskPool *pool;
pool = g_object_new (GST_TYPE_SHARED_TASK_POOL, NULL);
/* clear floating flag */
gst_object_ref_sink (pool);
return pool;
}

View file

@ -67,7 +67,6 @@ struct _GstTaskPool {
* @cleanup: make sure all threads are stopped
* @push: start a new thread
* @join: join a thread
* @dispose_handle: free / unref the handle returned in push
*
* The #GstTaskPoolClass object.
*/
@ -84,15 +83,17 @@ struct _GstTaskPoolClass {
/**
* GstTaskPoolClass::dispose_handle:
* @pool: a #GstTaskPool
* @id: (transfer full): the handle to dispose of
*
* free / unref the handle returned in push.
* free / unref the handle returned in GstTaskPoolClass::push.
*
* Since: 1.20
*/
void (*dispose_handle) (GstTaskPool *pool, gpointer id);
/*< private >*/
gpointer _gst_reserved[GST_PADDING];
gpointer _gst_reserved[GST_PADDING - 1];
};
GST_API
@ -118,6 +119,59 @@ void gst_task_pool_cleanup (GstTaskPool *pool);
G_DEFINE_AUTOPTR_CLEANUP_FUNC(GstTaskPool, gst_object_unref)
typedef struct _GstSharedTaskPool GstSharedTaskPool;
typedef struct _GstSharedTaskPoolClass GstSharedTaskPoolClass;
typedef struct _GstSharedTaskPoolPrivate GstSharedTaskPoolPrivate;
#define GST_TYPE_SHARED_TASK_POOL (gst_shared_task_pool_get_type ())
#define GST_SHARED_TASK_POOL(pool) (G_TYPE_CHECK_INSTANCE_CAST ((pool), GST_TYPE_TASK_POOL, GstSharedTaskPool))
#define GST_IS_SHARED_TASK_POOL(pool) (G_TYPE_CHECK_INSTANCE_TYPE ((pool), GST_TYPE_SHARED_TASK_POOL))
#define GST_SHARED_TASK_POOL_CLASS(pclass) (G_TYPE_CHECK_CLASS_CAST ((pclass), GST_TYPE_SHARED_TASK_POOL, GstSharedTaskPoolClass))
#define GST_IS_SHARED_TASK_POOL_CLASS(pclass) (G_TYPE_CHECK_CLASS_TYPE ((pclass), GST_TYPE_SHARED_TASK_POOL))
#define GST_SHARED_TASK_POOL_GET_CLASS(pool) (G_TYPE_INSTANCE_GET_CLASS ((pool), GST_TYPE_SHARED_TASK_POOL, GstSharedTaskPoolClass))
/**
* GstSharedTaskPool:
*
* The #GstSharedTaskPool object.
*
* since: 1.20
*/
struct _GstSharedTaskPool {
GstTaskPool parent;
/*< private >*/
GstSharedTaskPoolPrivate *priv;
gpointer _gst_reserved[GST_PADDING];
};
/**
* GstSharedTaskPoolClass:
*
* The #GstSharedTaskPoolClass object.
*
* Since: 1.20
*/
struct _GstSharedTaskPoolClass {
GstTaskPoolClass parent_class;
/*< private >*/
gpointer _gst_reserved[GST_PADDING];
};
GST_API
GType gst_shared_task_pool_get_type (void);
GST_API
void gst_shared_task_pool_set_max_threads (GstSharedTaskPool *pool, guint max_threads);
GST_API
guint gst_shared_task_pool_get_max_threads (GstSharedTaskPool *pool);
GST_API
GstTaskPool * gst_shared_task_pool_new (void);
G_END_DECLS
#endif /* __GST_TASK_POOL_H__ */

View file

@ -293,6 +293,176 @@ GST_START_TEST (test_create)
GST_END_TEST;
typedef struct
{
gboolean called;
gpointer caller_thread;
GCond blocked_cond;
GMutex blocked_lock;
gboolean blocked;
GCond unblock_cond;
GMutex unblock_lock;
gboolean unblock;
} TaskData;
static void
task_cb (TaskData * tdata)
{
tdata->called = TRUE;
tdata->caller_thread = g_thread_self ();
g_mutex_lock (&tdata->blocked_lock);
tdata->blocked = TRUE;
g_cond_signal (&tdata->blocked_cond);
g_mutex_unlock (&tdata->blocked_lock);
g_mutex_lock (&tdata->unblock_lock);
while (!tdata->unblock)
g_cond_wait (&tdata->unblock_cond, &tdata->unblock_lock);
g_mutex_unlock (&tdata->unblock_lock);
}
static void
init_task_data (TaskData * tdata)
{
tdata->called = FALSE;
tdata->caller_thread = NULL;
tdata->unblock = FALSE;
g_cond_init (&tdata->unblock_cond);
g_mutex_init (&tdata->unblock_lock);
tdata->blocked = FALSE;
g_cond_init (&tdata->blocked_cond);
g_mutex_init (&tdata->blocked_lock);
}
static void
cleanup_task_data (TaskData * tdata)
{
g_mutex_clear (&tdata->unblock_lock);
g_cond_clear (&tdata->unblock_cond);
g_mutex_clear (&tdata->blocked_lock);
g_cond_clear (&tdata->blocked_cond);
}
/* In this test, we use a shared task pool with max-threads=1 and verify
* that the caller thread for two tasks is the same */
GST_START_TEST (test_shared_task_pool_shared_thread)
{
GstTaskPool *pool;
gpointer handle, handle2;
GError *err = NULL;
TaskData tdata, tdata2;
init_task_data (&tdata);
init_task_data (&tdata2);
pool = gst_shared_task_pool_new ();
gst_task_pool_prepare (pool, &err);
fail_unless (err == NULL);
/* We request that two tasks be executed, and our task function is blocking.
* This means no new thread is available to spawn, and the second task should
* be queued up on the first thread */
handle =
gst_task_pool_push (pool, (GstTaskPoolFunction) task_cb, &tdata, &err);
fail_unless (err == NULL);
handle2 =
gst_task_pool_push (pool, (GstTaskPoolFunction) task_cb, &tdata2, &err);
fail_unless (err == NULL);
g_mutex_lock (&tdata.unblock_lock);
tdata.unblock = TRUE;
g_cond_signal (&tdata.unblock_cond);
g_mutex_unlock (&tdata.unblock_lock);
g_mutex_lock (&tdata2.unblock_lock);
tdata2.unblock = TRUE;
g_cond_signal (&tdata2.unblock_cond);
g_mutex_unlock (&tdata2.unblock_lock);
gst_task_pool_join (pool, handle);
gst_task_pool_join (pool, handle2);
fail_unless (tdata.called == TRUE);
fail_unless (tdata2.called == TRUE);
fail_unless (tdata.caller_thread == tdata2.caller_thread);
cleanup_task_data (&tdata);
cleanup_task_data (&tdata2);
gst_task_pool_cleanup (pool);
g_object_unref (pool);
}
GST_END_TEST;
/* In this test, we use a shared task pool with max-threads=2 and verify
* that the caller thread for two tasks is different */
GST_START_TEST (test_shared_task_pool_two_threads)
{
GstTaskPool *pool;
gpointer handle, handle2;
GError *err = NULL;
TaskData tdata, tdata2;
init_task_data (&tdata);
init_task_data (&tdata2);
pool = gst_shared_task_pool_new ();
gst_shared_task_pool_set_max_threads (GST_SHARED_TASK_POOL (pool), 2);
gst_task_pool_prepare (pool, &err);
fail_unless (err == NULL);
/* We request that two tasks be executed, and our task function is blocking.
* This means the pool will have to spawn a new thread to handle the task */
handle =
gst_task_pool_push (pool, (GstTaskPoolFunction) task_cb, &tdata, &err);
fail_unless (err == NULL);
handle2 =
gst_task_pool_push (pool, (GstTaskPoolFunction) task_cb, &tdata2, &err);
fail_unless (err == NULL);
/* Make sure that the second task has started executing before unblocking */
g_mutex_lock (&tdata2.blocked_lock);
while (!tdata2.blocked) {
g_cond_wait (&tdata2.blocked_cond, &tdata2.blocked_lock);
}
g_mutex_unlock (&tdata2.blocked_lock);
g_mutex_lock (&tdata.unblock_lock);
tdata.unblock = TRUE;
g_cond_signal (&tdata.unblock_cond);
g_mutex_unlock (&tdata.unblock_lock);
g_mutex_lock (&tdata2.unblock_lock);
tdata2.unblock = TRUE;
g_cond_signal (&tdata2.unblock_cond);
g_mutex_unlock (&tdata2.unblock_lock);
gst_task_pool_join (pool, handle);
gst_task_pool_join (pool, handle2);
fail_unless (tdata.called == TRUE);
fail_unless (tdata2.called == TRUE);
fail_unless (tdata.caller_thread != tdata2.caller_thread);
cleanup_task_data (&tdata);
cleanup_task_data (&tdata2);
gst_task_pool_cleanup (pool);
g_object_unref (pool);
}
GST_END_TEST;
static Suite *
gst_task_suite (void)
@ -308,6 +478,8 @@ gst_task_suite (void)
tcase_add_test (tc_chain, test_join);
tcase_add_test (tc_chain, test_pause_stop_race);
tcase_add_test (tc_chain, test_resume);
tcase_add_test (tc_chain, test_shared_task_pool_shared_thread);
tcase_add_test (tc_chain, test_shared_task_pool_two_threads);
return s;
}