block: rename blk_io_plug_call() API to defer_call()

Prepare to move the blk_io_plug_call() API out of the block layer so
that other subsystems call use this deferred call mechanism. Rename it
to defer_call() but leave the code in block/plug.c.

The next commit will move the code out of the block layer.

Suggested-by: Ilya Maximets <i.maximets@ovn.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Paul Durrant <paul@xen.org>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-ID: <20230913200045.1024233-2-stefanha@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2023-09-13 16:00:42 -04:00 committed by Kevin Wolf
parent 302823854b
commit ccee48aa73
9 changed files with 76 additions and 79 deletions

View file

@ -312,10 +312,10 @@ static void blkio_detach_aio_context(BlockDriverState *bs)
} }
/* /*
* Called by blk_io_unplug() or immediately if not plugged. Called without * Called by defer_call_end() or immediately if not in a deferred section.
* blkio_lock. * Called without blkio_lock.
*/ */
static void blkio_unplug_fn(void *opaque) static void blkio_deferred_fn(void *opaque)
{ {
BDRVBlkioState *s = opaque; BDRVBlkioState *s = opaque;
@ -332,7 +332,7 @@ static void blkio_submit_io(BlockDriverState *bs)
{ {
BDRVBlkioState *s = bs->opaque; BDRVBlkioState *s = bs->opaque;
blk_io_plug_call(blkio_unplug_fn, s); defer_call(blkio_deferred_fn, s);
} }
static int coroutine_fn static int coroutine_fn

View file

@ -306,7 +306,7 @@ static void ioq_init(LuringQueue *io_q)
io_q->blocked = false; io_q->blocked = false;
} }
static void luring_unplug_fn(void *opaque) static void luring_deferred_fn(void *opaque)
{ {
LuringState *s = opaque; LuringState *s = opaque;
trace_luring_unplug_fn(s, s->io_q.blocked, s->io_q.in_queue, trace_luring_unplug_fn(s, s->io_q.blocked, s->io_q.in_queue,
@ -367,7 +367,7 @@ static int luring_do_submit(int fd, LuringAIOCB *luringcb, LuringState *s,
return ret; return ret;
} }
blk_io_plug_call(luring_unplug_fn, s); defer_call(luring_deferred_fn, s);
} }
return 0; return 0;
} }

View file

@ -353,7 +353,7 @@ static uint64_t laio_max_batch(LinuxAioState *s, uint64_t dev_max_batch)
return max_batch; return max_batch;
} }
static void laio_unplug_fn(void *opaque) static void laio_deferred_fn(void *opaque)
{ {
LinuxAioState *s = opaque; LinuxAioState *s = opaque;
@ -393,7 +393,7 @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
if (s->io_q.in_queue >= laio_max_batch(s, dev_max_batch)) { if (s->io_q.in_queue >= laio_max_batch(s, dev_max_batch)) {
ioq_submit(s); ioq_submit(s);
} else { } else {
blk_io_plug_call(laio_unplug_fn, s); defer_call(laio_deferred_fn, s);
} }
} }

View file

@ -476,7 +476,7 @@ static void nvme_trace_command(const NvmeCmd *cmd)
} }
} }
static void nvme_unplug_fn(void *opaque) static void nvme_deferred_fn(void *opaque)
{ {
NVMeQueuePair *q = opaque; NVMeQueuePair *q = opaque;
@ -503,7 +503,7 @@ static void nvme_submit_command(NVMeQueuePair *q, NVMeRequest *req,
q->need_kick++; q->need_kick++;
qemu_mutex_unlock(&q->lock); qemu_mutex_unlock(&q->lock);
blk_io_plug_call(nvme_unplug_fn, q); defer_call(nvme_deferred_fn, q);
} }
static void nvme_admin_cmd_sync_cb(void *opaque, int ret) static void nvme_admin_cmd_sync_cb(void *opaque, int ret)

View file

@ -1,24 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* SPDX-License-Identifier: GPL-2.0-or-later */
/* /*
* Block I/O plugging * Deferred calls
* *
* Copyright Red Hat. * Copyright Red Hat.
* *
* This API defers a function call within a blk_io_plug()/blk_io_unplug() * This API defers a function call within a defer_call_begin()/defer_call_end()
* section, allowing multiple calls to batch up. This is a performance * section, allowing multiple calls to batch up. This is a performance
* optimization that is used in the block layer to submit several I/O requests * optimization that is used in the block layer to submit several I/O requests
* at once instead of individually: * at once instead of individually:
* *
* blk_io_plug(); <-- start of plugged region * defer_call_begin(); <-- start of section
* ... * ...
* blk_io_plug_call(my_func, my_obj); <-- deferred my_func(my_obj) call * defer_call(my_func, my_obj); <-- deferred my_func(my_obj) call
* blk_io_plug_call(my_func, my_obj); <-- another * defer_call(my_func, my_obj); <-- another
* blk_io_plug_call(my_func, my_obj); <-- another * defer_call(my_func, my_obj); <-- another
* ... * ...
* blk_io_unplug(); <-- end of plugged region, my_func(my_obj) is called once * defer_call_end(); <-- end of section, my_func(my_obj) is called once
*
* This code is actually generic and not tied to the block layer. If another
* subsystem needs this functionality, it could be renamed.
*/ */
#include "qemu/osdep.h" #include "qemu/osdep.h"
@ -27,66 +24,66 @@
#include "qemu/thread.h" #include "qemu/thread.h"
#include "sysemu/block-backend.h" #include "sysemu/block-backend.h"
/* A function call that has been deferred until unplug() */ /* A function call that has been deferred until defer_call_end() */
typedef struct { typedef struct {
void (*fn)(void *); void (*fn)(void *);
void *opaque; void *opaque;
} UnplugFn; } DeferredCall;
/* Per-thread state */ /* Per-thread state */
typedef struct { typedef struct {
unsigned count; /* how many times has plug() been called? */ unsigned nesting_level;
GArray *unplug_fns; /* functions to call at unplug time */ GArray *deferred_call_array;
} Plug; } DeferCallThreadState;
/* Use get_ptr_plug() to fetch this thread-local value */ /* Use get_ptr_defer_call_thread_state() to fetch this thread-local value */
QEMU_DEFINE_STATIC_CO_TLS(Plug, plug); QEMU_DEFINE_STATIC_CO_TLS(DeferCallThreadState, defer_call_thread_state);
/* Called at thread cleanup time */ /* Called at thread cleanup time */
static void blk_io_plug_atexit(Notifier *n, void *value) static void defer_call_atexit(Notifier *n, void *value)
{ {
Plug *plug = get_ptr_plug(); DeferCallThreadState *thread_state = get_ptr_defer_call_thread_state();
g_array_free(plug->unplug_fns, TRUE); g_array_free(thread_state->deferred_call_array, TRUE);
} }
/* This won't involve coroutines, so use __thread */ /* This won't involve coroutines, so use __thread */
static __thread Notifier blk_io_plug_atexit_notifier; static __thread Notifier defer_call_atexit_notifier;
/** /**
* blk_io_plug_call: * defer_call:
* @fn: a function pointer to be invoked * @fn: a function pointer to be invoked
* @opaque: a user-defined argument to @fn() * @opaque: a user-defined argument to @fn()
* *
* Call @fn(@opaque) immediately if not within a blk_io_plug()/blk_io_unplug() * Call @fn(@opaque) immediately if not within a
* section. * defer_call_begin()/defer_call_end() section.
* *
* Otherwise defer the call until the end of the outermost * Otherwise defer the call until the end of the outermost
* blk_io_plug()/blk_io_unplug() section in this thread. If the same * defer_call_begin()/defer_call_end() section in this thread. If the same
* @fn/@opaque pair has already been deferred, it will only be called once upon * @fn/@opaque pair has already been deferred, it will only be called once upon
* blk_io_unplug() so that accumulated calls are batched into a single call. * defer_call_end() so that accumulated calls are batched into a single call.
* *
* The caller must ensure that @opaque is not freed before @fn() is invoked. * The caller must ensure that @opaque is not freed before @fn() is invoked.
*/ */
void blk_io_plug_call(void (*fn)(void *), void *opaque) void defer_call(void (*fn)(void *), void *opaque)
{ {
Plug *plug = get_ptr_plug(); DeferCallThreadState *thread_state = get_ptr_defer_call_thread_state();
/* Call immediately if we're not plugged */ /* Call immediately if we're not deferring calls */
if (plug->count == 0) { if (thread_state->nesting_level == 0) {
fn(opaque); fn(opaque);
return; return;
} }
GArray *array = plug->unplug_fns; GArray *array = thread_state->deferred_call_array;
if (!array) { if (!array) {
array = g_array_new(FALSE, FALSE, sizeof(UnplugFn)); array = g_array_new(FALSE, FALSE, sizeof(DeferredCall));
plug->unplug_fns = array; thread_state->deferred_call_array = array;
blk_io_plug_atexit_notifier.notify = blk_io_plug_atexit; defer_call_atexit_notifier.notify = defer_call_atexit;
qemu_thread_atexit_add(&blk_io_plug_atexit_notifier); qemu_thread_atexit_add(&defer_call_atexit_notifier);
} }
UnplugFn *fns = (UnplugFn *)array->data; DeferredCall *fns = (DeferredCall *)array->data;
UnplugFn new_fn = { DeferredCall new_fn = {
.fn = fn, .fn = fn,
.opaque = opaque, .opaque = opaque,
}; };
@ -106,46 +103,46 @@ void blk_io_plug_call(void (*fn)(void *), void *opaque)
} }
/** /**
* blk_io_plug: Defer blk_io_plug_call() functions until blk_io_unplug() * defer_call_begin: Defer defer_call() functions until defer_call_end()
* *
* blk_io_plug/unplug are thread-local operations. This means that multiple * defer_call_begin() and defer_call_end() are thread-local operations. The
* threads can simultaneously call plug/unplug, but the caller must ensure that * caller must ensure that each defer_call_begin() has a matching
* each unplug() is called in the same thread of the matching plug(). * defer_call_end() in the same thread.
* *
* Nesting is supported. blk_io_plug_call() functions are only called at the * Nesting is supported. defer_call() functions are only called at the
* outermost blk_io_unplug(). * outermost defer_call_end().
*/ */
void blk_io_plug(void) void defer_call_begin(void)
{ {
Plug *plug = get_ptr_plug(); DeferCallThreadState *thread_state = get_ptr_defer_call_thread_state();
assert(plug->count < UINT32_MAX); assert(thread_state->nesting_level < UINT32_MAX);
plug->count++; thread_state->nesting_level++;
} }
/** /**
* blk_io_unplug: Run any pending blk_io_plug_call() functions * defer_call_end: Run any pending defer_call() functions
* *
* There must have been a matching blk_io_plug() call in the same thread prior * There must have been a matching defer_call_begin() call in the same thread
* to this blk_io_unplug() call. * prior to this defer_call_end() call.
*/ */
void blk_io_unplug(void) void defer_call_end(void)
{ {
Plug *plug = get_ptr_plug(); DeferCallThreadState *thread_state = get_ptr_defer_call_thread_state();
assert(plug->count > 0); assert(thread_state->nesting_level > 0);
if (--plug->count > 0) { if (--thread_state->nesting_level > 0) {
return; return;
} }
GArray *array = plug->unplug_fns; GArray *array = thread_state->deferred_call_array;
if (!array) { if (!array) {
return; return;
} }
UnplugFn *fns = (UnplugFn *)array->data; DeferredCall *fns = (DeferredCall *)array->data;
for (guint i = 0; i < array->len; i++) { for (guint i = 0; i < array->len; i++) {
fns[i].fn(fns[i].opaque); fns[i].fn(fns[i].opaque);

View file

@ -509,7 +509,7 @@ static int xen_block_get_request(XenBlockDataPlane *dataplane,
/* /*
* Threshold of in-flight requests above which we will start using * Threshold of in-flight requests above which we will start using
* blk_io_plug()/blk_io_unplug() to batch requests. * defer_call_begin()/defer_call_end() to batch requests.
*/ */
#define IO_PLUG_THRESHOLD 1 #define IO_PLUG_THRESHOLD 1
@ -537,7 +537,7 @@ static bool xen_block_handle_requests(XenBlockDataPlane *dataplane)
* is below us. * is below us.
*/ */
if (inflight_atstart > IO_PLUG_THRESHOLD) { if (inflight_atstart > IO_PLUG_THRESHOLD) {
blk_io_plug(); defer_call_begin();
} }
while (rc != rp) { while (rc != rp) {
/* pull request from ring */ /* pull request from ring */
@ -577,12 +577,12 @@ static bool xen_block_handle_requests(XenBlockDataPlane *dataplane)
if (inflight_atstart > IO_PLUG_THRESHOLD && if (inflight_atstart > IO_PLUG_THRESHOLD &&
batched >= inflight_atstart) { batched >= inflight_atstart) {
blk_io_unplug(); defer_call_end();
} }
xen_block_do_aio(request); xen_block_do_aio(request);
if (inflight_atstart > IO_PLUG_THRESHOLD) { if (inflight_atstart > IO_PLUG_THRESHOLD) {
if (batched >= inflight_atstart) { if (batched >= inflight_atstart) {
blk_io_plug(); defer_call_begin();
batched = 0; batched = 0;
} else { } else {
batched++; batched++;
@ -590,7 +590,7 @@ static bool xen_block_handle_requests(XenBlockDataPlane *dataplane)
} }
} }
if (inflight_atstart > IO_PLUG_THRESHOLD) { if (inflight_atstart > IO_PLUG_THRESHOLD) {
blk_io_unplug(); defer_call_end();
} }
return done_something; return done_something;

View file

@ -1134,7 +1134,7 @@ void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
bool suppress_notifications = virtio_queue_get_notification(vq); bool suppress_notifications = virtio_queue_get_notification(vq);
aio_context_acquire(blk_get_aio_context(s->blk)); aio_context_acquire(blk_get_aio_context(s->blk));
blk_io_plug(); defer_call_begin();
do { do {
if (suppress_notifications) { if (suppress_notifications) {
@ -1158,7 +1158,7 @@ void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
virtio_blk_submit_multireq(s, &mrb); virtio_blk_submit_multireq(s, &mrb);
} }
blk_io_unplug(); defer_call_end();
aio_context_release(blk_get_aio_context(s->blk)); aio_context_release(blk_get_aio_context(s->blk));
} }

View file

@ -799,7 +799,7 @@ static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req)
return -ENOBUFS; return -ENOBUFS;
} }
scsi_req_ref(req->sreq); scsi_req_ref(req->sreq);
blk_io_plug(); defer_call_begin();
object_unref(OBJECT(d)); object_unref(OBJECT(d));
return 0; return 0;
} }
@ -810,7 +810,7 @@ static void virtio_scsi_handle_cmd_req_submit(VirtIOSCSI *s, VirtIOSCSIReq *req)
if (scsi_req_enqueue(sreq)) { if (scsi_req_enqueue(sreq)) {
scsi_req_continue(sreq); scsi_req_continue(sreq);
} }
blk_io_unplug(); defer_call_end();
scsi_req_unref(sreq); scsi_req_unref(sreq);
} }
@ -836,7 +836,7 @@ static void virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
while (!QTAILQ_EMPTY(&reqs)) { while (!QTAILQ_EMPTY(&reqs)) {
req = QTAILQ_FIRST(&reqs); req = QTAILQ_FIRST(&reqs);
QTAILQ_REMOVE(&reqs, req, next); QTAILQ_REMOVE(&reqs, req, next);
blk_io_unplug(); defer_call_end();
scsi_req_unref(req->sreq); scsi_req_unref(req->sreq);
virtqueue_detach_element(req->vq, &req->elem, 0); virtqueue_detach_element(req->vq, &req->elem, 0);
virtio_scsi_free_req(req); virtio_scsi_free_req(req);

View file

@ -100,9 +100,9 @@ void blk_iostatus_set_err(BlockBackend *blk, int error);
int blk_get_max_iov(BlockBackend *blk); int blk_get_max_iov(BlockBackend *blk);
int blk_get_max_hw_iov(BlockBackend *blk); int blk_get_max_hw_iov(BlockBackend *blk);
void blk_io_plug(void); void defer_call_begin(void);
void blk_io_unplug(void); void defer_call_end(void);
void blk_io_plug_call(void (*fn)(void *), void *opaque); void defer_call(void (*fn)(void *), void *opaque);
AioContext *blk_get_aio_context(BlockBackend *blk); AioContext *blk_get_aio_context(BlockBackend *blk);
BlockAcctStats *blk_get_stats(BlockBackend *blk); BlockAcctStats *blk_get_stats(BlockBackend *blk);