block/linux-aio: convert to blk_io_plug_call() API

Stop using the .bdrv_co_io_plug() API because it is not multi-queue
block layer friendly. Use the new blk_io_plug_call() API to batch I/O
submission instead.

Note that a dev_max_batch check is dropped in laio_io_unplug() because
the semantics of unplug_fn() are different from .bdrv_co_unplug():
1. unplug_fn() is only called when the last blk_io_unplug() call occurs,
   not every time blk_io_unplug() is called.
2. unplug_fn() is per-thread, not per-BlockDriverState, so there is no
   way to get per-BlockDriverState fields like dev_max_batch.

Therefore this condition cannot be moved to laio_unplug_fn(). It is not
obvious that this condition affects performance in practice, so I am
removing it instead of trying to come up with a more complex mechanism
to preserve the condition.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Acked-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
Message-id: 20230530180959.1108766-6-stefanha@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2023-05-30 14:09:58 -04:00
parent 6a6da231b7
commit 076682885d
3 changed files with 11 additions and 65 deletions

View file

@ -15,6 +15,7 @@
#include "qemu/event_notifier.h"
#include "qemu/coroutine.h"
#include "qapi/error.h"
#include "sysemu/block-backend.h"
/* Only used for assertions. */
#include "qemu/coroutine_int.h"
@ -46,7 +47,6 @@ struct qemu_laiocb {
};
typedef struct {
int plugged;
unsigned int in_queue;
unsigned int in_flight;
bool blocked;
@ -236,7 +236,7 @@ static void qemu_laio_process_completions_and_submit(LinuxAioState *s)
{
qemu_laio_process_completions(s);
if (!s->io_q.plugged && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
if (!QSIMPLEQ_EMPTY(&s->io_q.pending)) {
ioq_submit(s);
}
}
@ -277,7 +277,6 @@ static void qemu_laio_poll_ready(EventNotifier *opaque)
static void ioq_init(LaioQueue *io_q)
{
QSIMPLEQ_INIT(&io_q->pending);
io_q->plugged = 0;
io_q->in_queue = 0;
io_q->in_flight = 0;
io_q->blocked = false;
@ -354,31 +353,11 @@ static uint64_t laio_max_batch(LinuxAioState *s, uint64_t dev_max_batch)
return max_batch;
}
void laio_io_plug(void)
static void laio_unplug_fn(void *opaque)
{
AioContext *ctx = qemu_get_current_aio_context();
LinuxAioState *s = aio_get_linux_aio(ctx);
LinuxAioState *s = opaque;
s->io_q.plugged++;
}
void laio_io_unplug(uint64_t dev_max_batch)
{
AioContext *ctx = qemu_get_current_aio_context();
LinuxAioState *s = aio_get_linux_aio(ctx);
assert(s->io_q.plugged);
s->io_q.plugged--;
/*
* Why max batch checking is performed here:
* Another BDS may have queued requests with a higher dev_max_batch and
* therefore in_queue could now exceed our dev_max_batch. Re-check the max
* batch so we can honor our device's dev_max_batch.
*/
if (s->io_q.in_queue >= laio_max_batch(s, dev_max_batch) ||
(!s->io_q.plugged &&
!s->io_q.blocked && !QSIMPLEQ_EMPTY(&s->io_q.pending))) {
if (!s->io_q.blocked && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
ioq_submit(s);
}
}
@ -410,10 +389,12 @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
QSIMPLEQ_INSERT_TAIL(&s->io_q.pending, laiocb, next);
s->io_q.in_queue++;
if (!s->io_q.blocked &&
(!s->io_q.plugged ||
s->io_q.in_queue >= laio_max_batch(s, dev_max_batch))) {
ioq_submit(s);
if (!s->io_q.blocked) {
if (s->io_q.in_queue >= laio_max_batch(s, dev_max_batch)) {
ioq_submit(s);
} else {
blk_io_plug_call(laio_unplug_fn, s);
}
}
return 0;