aio: Create AioPolledEvent

As a preparation for having multiple adaptive polling states per
AioContext, move the 'ns' field into a separate struct.

Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Message-ID: <20250307221634.71951-4-kwolf@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
Kevin Wolf 2025-03-07 23:16:32 +01:00
parent 2f3b6e61f6
commit 518db1013c
3 changed files with 23 additions and 17 deletions

View file

@ -123,6 +123,10 @@ struct BHListSlice {
typedef QSLIST_HEAD(, AioHandler) AioHandlerSList; typedef QSLIST_HEAD(, AioHandler) AioHandlerSList;
typedef struct AioPolledEvent {
int64_t ns; /* current polling time in nanoseconds */
} AioPolledEvent;
struct AioContext { struct AioContext {
GSource source; GSource source;
@ -229,7 +233,7 @@ struct AioContext {
int poll_disable_cnt; int poll_disable_cnt;
/* Polling mode parameters */ /* Polling mode parameters */
int64_t poll_ns; /* current polling time in nanoseconds */ AioPolledEvent poll;
int64_t poll_max_ns; /* maximum polling time in nanoseconds */ int64_t poll_max_ns; /* maximum polling time in nanoseconds */
int64_t poll_grow; /* polling time growth factor */ int64_t poll_grow; /* polling time growth factor */
int64_t poll_shrink; /* polling time shrink factor */ int64_t poll_shrink; /* polling time shrink factor */

View file

@ -585,7 +585,7 @@ static bool try_poll_mode(AioContext *ctx, AioHandlerList *ready_list,
return false; return false;
} }
max_ns = qemu_soonest_timeout(*timeout, ctx->poll_ns); max_ns = qemu_soonest_timeout(*timeout, ctx->poll.ns);
if (max_ns && !ctx->fdmon_ops->need_wait(ctx)) { if (max_ns && !ctx->fdmon_ops->need_wait(ctx)) {
/* /*
* Enable poll mode. It pairs with the poll_set_started() in * Enable poll mode. It pairs with the poll_set_started() in
@ -683,40 +683,40 @@ bool aio_poll(AioContext *ctx, bool blocking)
if (ctx->poll_max_ns) { if (ctx->poll_max_ns) {
int64_t block_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start; int64_t block_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start;
if (block_ns <= ctx->poll_ns) { if (block_ns <= ctx->poll.ns) {
/* This is the sweet spot, no adjustment needed */ /* This is the sweet spot, no adjustment needed */
} else if (block_ns > ctx->poll_max_ns) { } else if (block_ns > ctx->poll_max_ns) {
/* We'd have to poll for too long, poll less */ /* We'd have to poll for too long, poll less */
int64_t old = ctx->poll_ns; int64_t old = ctx->poll.ns;
if (ctx->poll_shrink) { if (ctx->poll_shrink) {
ctx->poll_ns /= ctx->poll_shrink; ctx->poll.ns /= ctx->poll_shrink;
} else { } else {
ctx->poll_ns = 0; ctx->poll.ns = 0;
} }
trace_poll_shrink(ctx, old, ctx->poll_ns); trace_poll_shrink(ctx, old, ctx->poll.ns);
} else if (ctx->poll_ns < ctx->poll_max_ns && } else if (ctx->poll.ns < ctx->poll_max_ns &&
block_ns < ctx->poll_max_ns) { block_ns < ctx->poll_max_ns) {
/* There is room to grow, poll longer */ /* There is room to grow, poll longer */
int64_t old = ctx->poll_ns; int64_t old = ctx->poll.ns;
int64_t grow = ctx->poll_grow; int64_t grow = ctx->poll_grow;
if (grow == 0) { if (grow == 0) {
grow = 2; grow = 2;
} }
if (ctx->poll_ns) { if (ctx->poll.ns) {
ctx->poll_ns *= grow; ctx->poll.ns *= grow;
} else { } else {
ctx->poll_ns = 4000; /* start polling at 4 microseconds */ ctx->poll.ns = 4000; /* start polling at 4 microseconds */
} }
if (ctx->poll_ns > ctx->poll_max_ns) { if (ctx->poll.ns > ctx->poll_max_ns) {
ctx->poll_ns = ctx->poll_max_ns; ctx->poll.ns = ctx->poll_max_ns;
} }
trace_poll_grow(ctx, old, ctx->poll_ns); trace_poll_grow(ctx, old, ctx->poll.ns);
} }
} }
@ -770,8 +770,9 @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
/* No thread synchronization here, it doesn't matter if an incorrect value /* No thread synchronization here, it doesn't matter if an incorrect value
* is used once. * is used once.
*/ */
ctx->poll.ns = 0;
ctx->poll_max_ns = max_ns; ctx->poll_max_ns = max_ns;
ctx->poll_ns = 0;
ctx->poll_grow = grow; ctx->poll_grow = grow;
ctx->poll_shrink = shrink; ctx->poll_shrink = shrink;

View file

@ -609,7 +609,8 @@ AioContext *aio_context_new(Error **errp)
qemu_rec_mutex_init(&ctx->lock); qemu_rec_mutex_init(&ctx->lock);
timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx); timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
ctx->poll_ns = 0; ctx->poll.ns = 0;
ctx->poll_max_ns = 0; ctx->poll_max_ns = 0;
ctx->poll_grow = 0; ctx->poll_grow = 0;
ctx->poll_shrink = 0; ctx->poll_shrink = 0;