thread-pool: Rename AIO pool functions to *_aio() and data types to *Aio

These names conflict with ones used by future generic thread pool
equivalents.
Generic names should belong to the generic pool type, not specific (AIO)
type.

Acked-by: Fabiano Rosas <farosas@suse.de>
Reviewed-by: Cédric Le Goater <clg@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Link: https://lore.kernel.org/qemu-devel/70f9e0fb4b01042258a1a57996c64d19779dc7f0.1741124640.git.maciej.szmigiero@oracle.com
Signed-off-by: Cédric Le Goater <clg@redhat.com>
This commit is contained in:
Maciej S. Szmigiero 2025-03-04 23:03:30 +01:00 committed by Cédric Le Goater
parent 03c6468a13
commit dc67daeed5
5 changed files with 42 additions and 42 deletions

View file

@ -54,7 +54,7 @@ typedef void QEMUBHFunc(void *opaque);
typedef bool AioPollFn(void *opaque); typedef bool AioPollFn(void *opaque);
typedef void IOHandler(void *opaque); typedef void IOHandler(void *opaque);
struct ThreadPool; struct ThreadPoolAio;
struct LinuxAioState; struct LinuxAioState;
typedef struct LuringState LuringState; typedef struct LuringState LuringState;
@ -207,7 +207,7 @@ struct AioContext {
/* Thread pool for performing work and receiving completion callbacks. /* Thread pool for performing work and receiving completion callbacks.
* Has its own locking. * Has its own locking.
*/ */
struct ThreadPool *thread_pool; struct ThreadPoolAio *thread_pool;
#ifdef CONFIG_LINUX_AIO #ifdef CONFIG_LINUX_AIO
struct LinuxAioState *linux_aio; struct LinuxAioState *linux_aio;
@ -500,8 +500,8 @@ void aio_set_event_notifier_poll(AioContext *ctx,
*/ */
GSource *aio_get_g_source(AioContext *ctx); GSource *aio_get_g_source(AioContext *ctx);
/* Return the ThreadPool bound to this AioContext */ /* Return the ThreadPoolAio bound to this AioContext */
struct ThreadPool *aio_get_thread_pool(AioContext *ctx); struct ThreadPoolAio *aio_get_thread_pool(AioContext *ctx);
/* Setup the LinuxAioState bound to this AioContext */ /* Setup the LinuxAioState bound to this AioContext */
struct LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp); struct LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp);

View file

@ -24,10 +24,10 @@
typedef int ThreadPoolFunc(void *opaque); typedef int ThreadPoolFunc(void *opaque);
typedef struct ThreadPool ThreadPool; typedef struct ThreadPoolAio ThreadPoolAio;
ThreadPool *thread_pool_new(struct AioContext *ctx); ThreadPoolAio *thread_pool_new_aio(struct AioContext *ctx);
void thread_pool_free(ThreadPool *pool); void thread_pool_free_aio(ThreadPoolAio *pool);
/* /*
* thread_pool_submit_{aio,co} API: submit I/O requests in the thread's * thread_pool_submit_{aio,co} API: submit I/O requests in the thread's
@ -36,7 +36,7 @@ void thread_pool_free(ThreadPool *pool);
BlockAIOCB *thread_pool_submit_aio(ThreadPoolFunc *func, void *arg, BlockAIOCB *thread_pool_submit_aio(ThreadPoolFunc *func, void *arg,
BlockCompletionFunc *cb, void *opaque); BlockCompletionFunc *cb, void *opaque);
int coroutine_fn thread_pool_submit_co(ThreadPoolFunc *func, void *arg); int coroutine_fn thread_pool_submit_co(ThreadPoolFunc *func, void *arg);
void thread_pool_update_params(ThreadPoolAio *pool, struct AioContext *ctx);
void thread_pool_update_params(ThreadPool *pool, struct AioContext *ctx);
#endif #endif

View file

@ -369,7 +369,7 @@ aio_ctx_finalize(GSource *source)
QEMUBH *bh; QEMUBH *bh;
unsigned flags; unsigned flags;
thread_pool_free(ctx->thread_pool); thread_pool_free_aio(ctx->thread_pool);
#ifdef CONFIG_LINUX_AIO #ifdef CONFIG_LINUX_AIO
if (ctx->linux_aio) { if (ctx->linux_aio) {
@ -435,10 +435,10 @@ GSource *aio_get_g_source(AioContext *ctx)
return &ctx->source; return &ctx->source;
} }
ThreadPool *aio_get_thread_pool(AioContext *ctx) ThreadPoolAio *aio_get_thread_pool(AioContext *ctx)
{ {
if (!ctx->thread_pool) { if (!ctx->thread_pool) {
ctx->thread_pool = thread_pool_new(ctx); ctx->thread_pool = thread_pool_new_aio(ctx);
} }
return ctx->thread_pool; return ctx->thread_pool;
} }

View file

@ -23,9 +23,9 @@
#include "block/thread-pool.h" #include "block/thread-pool.h"
#include "qemu/main-loop.h" #include "qemu/main-loop.h"
static void do_spawn_thread(ThreadPool *pool); static void do_spawn_thread(ThreadPoolAio *pool);
typedef struct ThreadPoolElement ThreadPoolElement; typedef struct ThreadPoolElementAio ThreadPoolElementAio;
enum ThreadState { enum ThreadState {
THREAD_QUEUED, THREAD_QUEUED,
@ -33,9 +33,9 @@ enum ThreadState {
THREAD_DONE, THREAD_DONE,
}; };
struct ThreadPoolElement { struct ThreadPoolElementAio {
BlockAIOCB common; BlockAIOCB common;
ThreadPool *pool; ThreadPoolAio *pool;
ThreadPoolFunc *func; ThreadPoolFunc *func;
void *arg; void *arg;
@ -47,13 +47,13 @@ struct ThreadPoolElement {
int ret; int ret;
/* Access to this list is protected by lock. */ /* Access to this list is protected by lock. */
QTAILQ_ENTRY(ThreadPoolElement) reqs; QTAILQ_ENTRY(ThreadPoolElementAio) reqs;
/* This list is only written by the thread pool's mother thread. */ /* This list is only written by the thread pool's mother thread. */
QLIST_ENTRY(ThreadPoolElement) all; QLIST_ENTRY(ThreadPoolElementAio) all;
}; };
struct ThreadPool { struct ThreadPoolAio {
AioContext *ctx; AioContext *ctx;
QEMUBH *completion_bh; QEMUBH *completion_bh;
QemuMutex lock; QemuMutex lock;
@ -62,10 +62,10 @@ struct ThreadPool {
QEMUBH *new_thread_bh; QEMUBH *new_thread_bh;
/* The following variables are only accessed from one AioContext. */ /* The following variables are only accessed from one AioContext. */
QLIST_HEAD(, ThreadPoolElement) head; QLIST_HEAD(, ThreadPoolElementAio) head;
/* The following variables are protected by lock. */ /* The following variables are protected by lock. */
QTAILQ_HEAD(, ThreadPoolElement) request_list; QTAILQ_HEAD(, ThreadPoolElementAio) request_list;
int cur_threads; int cur_threads;
int idle_threads; int idle_threads;
int new_threads; /* backlog of threads we need to create */ int new_threads; /* backlog of threads we need to create */
@ -76,14 +76,14 @@ struct ThreadPool {
static void *worker_thread(void *opaque) static void *worker_thread(void *opaque)
{ {
ThreadPool *pool = opaque; ThreadPoolAio *pool = opaque;
qemu_mutex_lock(&pool->lock); qemu_mutex_lock(&pool->lock);
pool->pending_threads--; pool->pending_threads--;
do_spawn_thread(pool); do_spawn_thread(pool);
while (pool->cur_threads <= pool->max_threads) { while (pool->cur_threads <= pool->max_threads) {
ThreadPoolElement *req; ThreadPoolElementAio *req;
int ret; int ret;
if (QTAILQ_EMPTY(&pool->request_list)) { if (QTAILQ_EMPTY(&pool->request_list)) {
@ -131,7 +131,7 @@ static void *worker_thread(void *opaque)
return NULL; return NULL;
} }
static void do_spawn_thread(ThreadPool *pool) static void do_spawn_thread(ThreadPoolAio *pool)
{ {
QemuThread t; QemuThread t;
@ -148,14 +148,14 @@ static void do_spawn_thread(ThreadPool *pool)
static void spawn_thread_bh_fn(void *opaque) static void spawn_thread_bh_fn(void *opaque)
{ {
ThreadPool *pool = opaque; ThreadPoolAio *pool = opaque;
qemu_mutex_lock(&pool->lock); qemu_mutex_lock(&pool->lock);
do_spawn_thread(pool); do_spawn_thread(pool);
qemu_mutex_unlock(&pool->lock); qemu_mutex_unlock(&pool->lock);
} }
static void spawn_thread(ThreadPool *pool) static void spawn_thread(ThreadPoolAio *pool)
{ {
pool->cur_threads++; pool->cur_threads++;
pool->new_threads++; pool->new_threads++;
@ -173,8 +173,8 @@ static void spawn_thread(ThreadPool *pool)
static void thread_pool_completion_bh(void *opaque) static void thread_pool_completion_bh(void *opaque)
{ {
ThreadPool *pool = opaque; ThreadPoolAio *pool = opaque;
ThreadPoolElement *elem, *next; ThreadPoolElementAio *elem, *next;
defer_call_begin(); /* cb() may use defer_call() to coalesce work */ defer_call_begin(); /* cb() may use defer_call() to coalesce work */
@ -184,8 +184,8 @@ restart:
continue; continue;
} }
trace_thread_pool_complete(pool, elem, elem->common.opaque, trace_thread_pool_complete_aio(pool, elem, elem->common.opaque,
elem->ret); elem->ret);
QLIST_REMOVE(elem, all); QLIST_REMOVE(elem, all);
if (elem->common.cb) { if (elem->common.cb) {
@ -217,10 +217,10 @@ restart:
static void thread_pool_cancel(BlockAIOCB *acb) static void thread_pool_cancel(BlockAIOCB *acb)
{ {
ThreadPoolElement *elem = (ThreadPoolElement *)acb; ThreadPoolElementAio *elem = (ThreadPoolElementAio *)acb;
ThreadPool *pool = elem->pool; ThreadPoolAio *pool = elem->pool;
trace_thread_pool_cancel(elem, elem->common.opaque); trace_thread_pool_cancel_aio(elem, elem->common.opaque);
QEMU_LOCK_GUARD(&pool->lock); QEMU_LOCK_GUARD(&pool->lock);
if (elem->state == THREAD_QUEUED) { if (elem->state == THREAD_QUEUED) {
@ -234,16 +234,16 @@ static void thread_pool_cancel(BlockAIOCB *acb)
} }
static const AIOCBInfo thread_pool_aiocb_info = { static const AIOCBInfo thread_pool_aiocb_info = {
.aiocb_size = sizeof(ThreadPoolElement), .aiocb_size = sizeof(ThreadPoolElementAio),
.cancel_async = thread_pool_cancel, .cancel_async = thread_pool_cancel,
}; };
BlockAIOCB *thread_pool_submit_aio(ThreadPoolFunc *func, void *arg, BlockAIOCB *thread_pool_submit_aio(ThreadPoolFunc *func, void *arg,
BlockCompletionFunc *cb, void *opaque) BlockCompletionFunc *cb, void *opaque)
{ {
ThreadPoolElement *req; ThreadPoolElementAio *req;
AioContext *ctx = qemu_get_current_aio_context(); AioContext *ctx = qemu_get_current_aio_context();
ThreadPool *pool = aio_get_thread_pool(ctx); ThreadPoolAio *pool = aio_get_thread_pool(ctx);
/* Assert that the thread submitting work is the same running the pool */ /* Assert that the thread submitting work is the same running the pool */
assert(pool->ctx == qemu_get_current_aio_context()); assert(pool->ctx == qemu_get_current_aio_context());
@ -290,7 +290,7 @@ int coroutine_fn thread_pool_submit_co(ThreadPoolFunc *func, void *arg)
return tpc.ret; return tpc.ret;
} }
void thread_pool_update_params(ThreadPool *pool, AioContext *ctx) void thread_pool_update_params(ThreadPoolAio *pool, AioContext *ctx)
{ {
qemu_mutex_lock(&pool->lock); qemu_mutex_lock(&pool->lock);
@ -317,7 +317,7 @@ void thread_pool_update_params(ThreadPool *pool, AioContext *ctx)
qemu_mutex_unlock(&pool->lock); qemu_mutex_unlock(&pool->lock);
} }
static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx) static void thread_pool_init_one(ThreadPoolAio *pool, AioContext *ctx)
{ {
if (!ctx) { if (!ctx) {
ctx = qemu_get_aio_context(); ctx = qemu_get_aio_context();
@ -337,14 +337,14 @@ static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx)
thread_pool_update_params(pool, ctx); thread_pool_update_params(pool, ctx);
} }
ThreadPool *thread_pool_new(AioContext *ctx) ThreadPoolAio *thread_pool_new_aio(AioContext *ctx)
{ {
ThreadPool *pool = g_new(ThreadPool, 1); ThreadPoolAio *pool = g_new(ThreadPoolAio, 1);
thread_pool_init_one(pool, ctx); thread_pool_init_one(pool, ctx);
return pool; return pool;
} }
void thread_pool_free(ThreadPool *pool) void thread_pool_free_aio(ThreadPoolAio *pool)
{ {
if (!pool) { if (!pool) {
return; return;

View file

@ -15,8 +15,8 @@ reentrant_aio(void *ctx, const char *name) "ctx %p name %s"
# thread-pool.c # thread-pool.c
thread_pool_submit_aio(void *pool, void *req, void *opaque) "pool %p req %p opaque %p" thread_pool_submit_aio(void *pool, void *req, void *opaque) "pool %p req %p opaque %p"
thread_pool_complete(void *pool, void *req, void *opaque, int ret) "pool %p req %p opaque %p ret %d" thread_pool_complete_aio(void *pool, void *req, void *opaque, int ret) "pool %p req %p opaque %p ret %d"
thread_pool_cancel(void *req, void *opaque) "req %p opaque %p" thread_pool_cancel_aio(void *req, void *opaque) "req %p opaque %p"
# buffer.c # buffer.c
buffer_resize(const char *buf, size_t olen, size_t len) "%s: old %zd, new %zd" buffer_resize(const char *buf, size_t olen, size_t len) "%s: old %zd, new %zd"