mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-03 07:43:54 -06:00
AioContext: export and use aio_dispatch
So far, aio_poll's scheme was dispatch/poll/dispatch, where the first dispatch phase was used only in the GSource case in order to avoid a blocking poll. Earlier patches changed it to dispatch/prepare/poll/dispatch, where prepare is aio_compute_timeout. By making aio_dispatch public, we can remove the first dispatch phase altogether, so that both aio_poll and the GSource use the same prepare/poll/dispatch scheme. This patch breaks the invariant that aio_poll(..., true) will not block the first time it returns false. This used to be fundamental for qemu_aio_flush's implementation as "while (qemu_aio_wait()) {}" but no code in QEMU relies on this invariant anymore. The return value of aio_poll() is now comparable with that of g_main_context_iteration. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
parent
3672fa5083
commit
e4c7e2d12d
4 changed files with 24 additions and 70 deletions
31
aio-win32.c
31
aio-win32.c
|
@ -130,11 +130,12 @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
|
|||
return progress;
|
||||
}
|
||||
|
||||
static bool aio_dispatch(AioContext *ctx)
|
||||
bool aio_dispatch(AioContext *ctx)
|
||||
{
|
||||
bool progress;
|
||||
|
||||
progress = aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
|
||||
progress = aio_bh_poll(ctx);
|
||||
progress |= aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
|
||||
progress |= timerlistgroup_run_timers(&ctx->tlg);
|
||||
return progress;
|
||||
}
|
||||
|
@ -149,23 +150,6 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
|||
|
||||
progress = false;
|
||||
|
||||
/*
|
||||
* If there are callbacks left that have been queued, we need to call then.
|
||||
* Do not call select in this case, because it is possible that the caller
|
||||
* does not need a complete flush (as is the case for aio_poll loops).
|
||||
*/
|
||||
if (aio_bh_poll(ctx)) {
|
||||
blocking = false;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
/* Dispatch any pending callbacks from the GSource. */
|
||||
progress |= aio_dispatch(ctx);
|
||||
|
||||
if (progress && !blocking) {
|
||||
return true;
|
||||
}
|
||||
|
||||
ctx->walking_handlers++;
|
||||
|
||||
/* fill fd sets */
|
||||
|
@ -205,14 +189,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
|||
events[ret - WAIT_OBJECT_0] = events[--count];
|
||||
}
|
||||
|
||||
if (blocking) {
|
||||
/* Run the timers a second time. We do this because otherwise aio_wait
|
||||
* will not note progress - and will stop a drain early - if we have
|
||||
* a timer that was not ready to run entering g_poll but is ready
|
||||
* after g_poll. This will only do anything if a timer has expired.
|
||||
*/
|
||||
progress |= timerlistgroup_run_timers(&ctx->tlg);
|
||||
}
|
||||
progress |= timerlistgroup_run_timers(&ctx->tlg);
|
||||
|
||||
return progress;
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue