mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-04 16:23:55 -06:00
aio: stop using .io_flush()
Now that aio_poll() users check their termination condition themselves, it is no longer necessary to call .io_flush() handlers. The behavior of aio_poll() changes as follows: 1. .io_flush() is no longer invoked and file descriptors are *always* monitored. Previously returning 0 from .io_flush() would skip this file descriptor. Due to this change it is essential to check that requests are pending before calling qemu_aio_wait(). Failure to do so means we block, for example, waiting for an idle iSCSI socket to become readable when there are no requests. Currently all qemu_aio_wait()/aio_poll() callers check before calling. 2. aio_poll() now returns true if progress was made (BH or fd handlers executed) and false otherwise. Previously it would return true whenever 'busy', which means that .io_flush() returned true. The 'busy' concept no longer exists so just progress is returned. Due to this change we need to update tests/test-aio.c which asserts aio_poll() return values. Note that QEMU doesn't actually rely on these return values so only tests/test-aio.c cares. Note that ctx->notifier, the EventNotifier fd used for aio_notify(), is now handled as a special case. This is a little ugly but maintains aio_poll() semantics, i.e. aio_notify() does not count as 'progress' and aio_poll() avoids blocking when the user has not set any fd handlers yet. Patches after this remove .io_flush() handler code until we can finally drop the io_flush arguments to aio_set_fd_handler() and friends. Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
parent
35ecde2601
commit
164a101f28
3 changed files with 28 additions and 45 deletions
|
@ -254,7 +254,7 @@ static void test_wait_event_notifier(void)
|
|||
EventNotifierTestData data = { .n = 0, .active = 1 };
|
||||
event_notifier_init(&data.e, false);
|
||||
aio_set_event_notifier(ctx, &data.e, event_ready_cb, event_active_cb);
|
||||
g_assert(aio_poll(ctx, false));
|
||||
g_assert(!aio_poll(ctx, false));
|
||||
g_assert_cmpint(data.n, ==, 0);
|
||||
g_assert_cmpint(data.active, ==, 1);
|
||||
|
||||
|
@ -279,7 +279,7 @@ static void test_flush_event_notifier(void)
|
|||
EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
|
||||
event_notifier_init(&data.e, false);
|
||||
aio_set_event_notifier(ctx, &data.e, event_ready_cb, event_active_cb);
|
||||
g_assert(aio_poll(ctx, false));
|
||||
g_assert(!aio_poll(ctx, false));
|
||||
g_assert_cmpint(data.n, ==, 0);
|
||||
g_assert_cmpint(data.active, ==, 10);
|
||||
|
||||
|
@ -313,7 +313,7 @@ static void test_wait_event_notifier_noflush(void)
|
|||
/* Until there is an active descriptor, aio_poll may or may not call
|
||||
* event_ready_cb. Still, it must not block. */
|
||||
event_notifier_set(&data.e);
|
||||
g_assert(!aio_poll(ctx, true));
|
||||
g_assert(aio_poll(ctx, true));
|
||||
data.n = 0;
|
||||
|
||||
/* An active event notifier forces aio_poll to look at EventNotifiers. */
|
||||
|
@ -323,13 +323,13 @@ static void test_wait_event_notifier_noflush(void)
|
|||
event_notifier_set(&data.e);
|
||||
g_assert(aio_poll(ctx, false));
|
||||
g_assert_cmpint(data.n, ==, 1);
|
||||
g_assert(aio_poll(ctx, false));
|
||||
g_assert(!aio_poll(ctx, false));
|
||||
g_assert_cmpint(data.n, ==, 1);
|
||||
|
||||
event_notifier_set(&data.e);
|
||||
g_assert(aio_poll(ctx, false));
|
||||
g_assert_cmpint(data.n, ==, 2);
|
||||
g_assert(aio_poll(ctx, false));
|
||||
g_assert(!aio_poll(ctx, false));
|
||||
g_assert_cmpint(data.n, ==, 2);
|
||||
|
||||
event_notifier_set(&dummy.e);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue