mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-17 15:12:07 -06:00
Pull request
-----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEhpWov9P5fNqsNXdanKSrs4Grc8gFAmPO6D0ACgkQnKSrs4Gr c8jU2wf+O+0JmsRUuCYera0eXA8YfZyFxa7+A5fy6izyNugJMmHx+Nse9IsvLqGo pLTMnc0HH7lLG8ofX9M93M1BOT2a3f//CrZQimfWuPAlKWUkpuOGOepEwbBxt247 DQAvxESjclZ9anVeSuKBmpz8u7S4H9AYuLupFh9bXZW0C+wgmbZp7Ak7+LNqcbaC TwasPgbHVji6j9IuKo1yJfr2f2csjb2zpock1m5E/BRCQxomKdtdFGs4LcHdWqNR NVBFc89SNDJknaihkgjxxXvDFjtb96DOQaI7UuFxhCfTae+gJMDIdoUoJoSpQh1j dMQ8pKRR0zN7ndZg0ozxT7qxJPp6LA== =Xju6 -----END PGP SIGNATURE----- Merge tag 'block-pull-request' of https://gitlab.com/stefanha/qemu into staging Pull request # -----BEGIN PGP SIGNATURE----- # # iQEzBAABCAAdFiEEhpWov9P5fNqsNXdanKSrs4Grc8gFAmPO6D0ACgkQnKSrs4Gr # c8jU2wf+O+0JmsRUuCYera0eXA8YfZyFxa7+A5fy6izyNugJMmHx+Nse9IsvLqGo # pLTMnc0HH7lLG8ofX9M93M1BOT2a3f//CrZQimfWuPAlKWUkpuOGOepEwbBxt247 # DQAvxESjclZ9anVeSuKBmpz8u7S4H9AYuLupFh9bXZW0C+wgmbZp7Ak7+LNqcbaC # TwasPgbHVji6j9IuKo1yJfr2f2csjb2zpock1m5E/BRCQxomKdtdFGs4LcHdWqNR # NVBFc89SNDJknaihkgjxxXvDFjtb96DOQaI7UuFxhCfTae+gJMDIdoUoJoSpQh1j # dMQ8pKRR0zN7ndZg0ozxT7qxJPp6LA== # =Xju6 # -----END PGP SIGNATURE----- # gpg: Signature made Mon 23 Jan 2023 20:04:13 GMT # gpg: using RSA key 8695A8BFD3F97CDAAC35775A9CA4ABB381AB73C8 # gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" [full] # gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>" [full] # Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35 775A 9CA4 ABB3 81AB 73C8 * tag 'block-pull-request' of https://gitlab.com/stefanha/qemu: block/blkio: Fix inclusion of required headers virtio-blk: simplify virtio_blk_dma_restart_cb() util/aio: Defer disabling poll mode as long as possible Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
13356edb87
5 changed files with 43 additions and 45 deletions
|
@ -19,6 +19,8 @@
|
||||||
#include "qemu/module.h"
|
#include "qemu/module.h"
|
||||||
#include "exec/memory.h" /* for ram_block_discard_disable() */
|
#include "exec/memory.h" /* for ram_block_discard_disable() */
|
||||||
|
|
||||||
|
#include "block/block-io.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Keep the QEMU BlockDriver names identical to the libblkio driver names.
|
* Keep the QEMU BlockDriver names identical to the libblkio driver names.
|
||||||
* Using macros instead of typing out the string literals avoids typos.
|
* Using macros instead of typing out the string literals avoids typos.
|
||||||
|
|
|
@ -237,9 +237,6 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
|
||||||
goto fail_aio_context;
|
goto fail_aio_context;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Process queued requests before the ones in vring */
|
|
||||||
virtio_blk_process_queued_requests(vblk, false);
|
|
||||||
|
|
||||||
/* Kick right away to begin processing requests already in vring */
|
/* Kick right away to begin processing requests already in vring */
|
||||||
for (i = 0; i < nvqs; i++) {
|
for (i = 0; i < nvqs; i++) {
|
||||||
VirtQueue *vq = virtio_get_queue(s->vdev, i);
|
VirtQueue *vq = virtio_get_queue(s->vdev, i);
|
||||||
|
@ -272,11 +269,6 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
|
||||||
fail_host_notifiers:
|
fail_host_notifiers:
|
||||||
k->set_guest_notifiers(qbus->parent, nvqs, false);
|
k->set_guest_notifiers(qbus->parent, nvqs, false);
|
||||||
fail_guest_notifiers:
|
fail_guest_notifiers:
|
||||||
/*
|
|
||||||
* If we failed to set up the guest notifiers queued requests will be
|
|
||||||
* processed on the main context.
|
|
||||||
*/
|
|
||||||
virtio_blk_process_queued_requests(vblk, false);
|
|
||||||
vblk->dataplane_disabled = true;
|
vblk->dataplane_disabled = true;
|
||||||
s->starting = false;
|
s->starting = false;
|
||||||
vblk->dataplane_started = true;
|
vblk->dataplane_started = true;
|
||||||
|
@ -325,8 +317,13 @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev)
|
||||||
aio_context_acquire(s->ctx);
|
aio_context_acquire(s->ctx);
|
||||||
aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s);
|
aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s);
|
||||||
|
|
||||||
/* Drain and try to switch bs back to the QEMU main loop. If other users
|
/* Wait for virtio_blk_dma_restart_bh() and in flight I/O to complete */
|
||||||
* keep the BlockBackend in the iothread, that's ok */
|
blk_drain(s->conf->conf.blk);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Try to switch bs back to the QEMU main loop. If other users keep the
|
||||||
|
* BlockBackend in the iothread, that's ok
|
||||||
|
*/
|
||||||
blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context(), NULL);
|
blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context(), NULL);
|
||||||
|
|
||||||
aio_context_release(s->ctx);
|
aio_context_release(s->ctx);
|
||||||
|
|
|
@ -806,8 +806,10 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
|
||||||
virtio_blk_handle_vq(s, vq);
|
virtio_blk_handle_vq(s, vq);
|
||||||
}
|
}
|
||||||
|
|
||||||
void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh)
|
static void virtio_blk_dma_restart_bh(void *opaque)
|
||||||
{
|
{
|
||||||
|
VirtIOBlock *s = opaque;
|
||||||
|
|
||||||
VirtIOBlockReq *req = s->rq;
|
VirtIOBlockReq *req = s->rq;
|
||||||
MultiReqBuffer mrb = {};
|
MultiReqBuffer mrb = {};
|
||||||
|
|
||||||
|
@ -834,43 +836,27 @@ void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh)
|
||||||
if (mrb.num_reqs) {
|
if (mrb.num_reqs) {
|
||||||
virtio_blk_submit_multireq(s, &mrb);
|
virtio_blk_submit_multireq(s, &mrb);
|
||||||
}
|
}
|
||||||
if (is_bh) {
|
|
||||||
|
/* Paired with inc in virtio_blk_dma_restart_cb() */
|
||||||
blk_dec_in_flight(s->conf.conf.blk);
|
blk_dec_in_flight(s->conf.conf.blk);
|
||||||
}
|
|
||||||
aio_context_release(blk_get_aio_context(s->conf.conf.blk));
|
aio_context_release(blk_get_aio_context(s->conf.conf.blk));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void virtio_blk_dma_restart_bh(void *opaque)
|
|
||||||
{
|
|
||||||
VirtIOBlock *s = opaque;
|
|
||||||
|
|
||||||
qemu_bh_delete(s->bh);
|
|
||||||
s->bh = NULL;
|
|
||||||
|
|
||||||
virtio_blk_process_queued_requests(s, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void virtio_blk_dma_restart_cb(void *opaque, bool running,
|
static void virtio_blk_dma_restart_cb(void *opaque, bool running,
|
||||||
RunState state)
|
RunState state)
|
||||||
{
|
{
|
||||||
VirtIOBlock *s = opaque;
|
VirtIOBlock *s = opaque;
|
||||||
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
|
|
||||||
VirtioBusState *bus = VIRTIO_BUS(qbus);
|
|
||||||
|
|
||||||
if (!running) {
|
if (!running) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/* Paired with dec in virtio_blk_dma_restart_bh() */
|
||||||
* If ioeventfd is enabled, don't schedule the BH here as queued
|
|
||||||
* requests will be processed while starting the data plane.
|
|
||||||
*/
|
|
||||||
if (!s->bh && !virtio_bus_ioeventfd_enabled(bus)) {
|
|
||||||
s->bh = aio_bh_new(blk_get_aio_context(s->conf.conf.blk),
|
|
||||||
virtio_blk_dma_restart_bh, s);
|
|
||||||
blk_inc_in_flight(s->conf.conf.blk);
|
blk_inc_in_flight(s->conf.conf.blk);
|
||||||
qemu_bh_schedule(s->bh);
|
|
||||||
}
|
aio_bh_schedule_oneshot(blk_get_aio_context(s->conf.conf.blk),
|
||||||
|
virtio_blk_dma_restart_bh, s);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void virtio_blk_reset(VirtIODevice *vdev)
|
static void virtio_blk_reset(VirtIODevice *vdev)
|
||||||
|
@ -1213,7 +1199,13 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
s->change = qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s);
|
/*
|
||||||
|
* This must be after virtio_init() so virtio_blk_dma_restart_cb() gets
|
||||||
|
* called after ->start_ioeventfd() has already set blk's AioContext.
|
||||||
|
*/
|
||||||
|
s->change =
|
||||||
|
qdev_add_vm_change_state_handler(dev, virtio_blk_dma_restart_cb, s);
|
||||||
|
|
||||||
blk_ram_registrar_init(&s->blk_ram_registrar, s->blk);
|
blk_ram_registrar_init(&s->blk_ram_registrar, s->blk);
|
||||||
blk_set_dev_ops(s->blk, &virtio_block_ops, s);
|
blk_set_dev_ops(s->blk, &virtio_block_ops, s);
|
||||||
|
|
||||||
|
|
|
@ -55,7 +55,6 @@ struct VirtIOBlock {
|
||||||
VirtIODevice parent_obj;
|
VirtIODevice parent_obj;
|
||||||
BlockBackend *blk;
|
BlockBackend *blk;
|
||||||
void *rq;
|
void *rq;
|
||||||
QEMUBH *bh;
|
|
||||||
VirtIOBlkConf conf;
|
VirtIOBlkConf conf;
|
||||||
unsigned short sector_mask;
|
unsigned short sector_mask;
|
||||||
bool original_wce;
|
bool original_wce;
|
||||||
|
@ -93,6 +92,5 @@ typedef struct MultiReqBuffer {
|
||||||
} MultiReqBuffer;
|
} MultiReqBuffer;
|
||||||
|
|
||||||
void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq);
|
void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq);
|
||||||
void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh);
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -585,18 +585,16 @@ static bool try_poll_mode(AioContext *ctx, AioHandlerList *ready_list,
|
||||||
|
|
||||||
max_ns = qemu_soonest_timeout(*timeout, ctx->poll_ns);
|
max_ns = qemu_soonest_timeout(*timeout, ctx->poll_ns);
|
||||||
if (max_ns && !ctx->fdmon_ops->need_wait(ctx)) {
|
if (max_ns && !ctx->fdmon_ops->need_wait(ctx)) {
|
||||||
|
/*
|
||||||
|
* Enable poll mode. It pairs with the poll_set_started() in
|
||||||
|
* aio_poll() which disables poll mode.
|
||||||
|
*/
|
||||||
poll_set_started(ctx, ready_list, true);
|
poll_set_started(ctx, ready_list, true);
|
||||||
|
|
||||||
if (run_poll_handlers(ctx, ready_list, max_ns, timeout)) {
|
if (run_poll_handlers(ctx, ready_list, max_ns, timeout)) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (poll_set_started(ctx, ready_list, false)) {
|
|
||||||
*timeout = 0;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -657,6 +655,17 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
||||||
* system call---a single round of run_poll_handlers_once suffices.
|
* system call---a single round of run_poll_handlers_once suffices.
|
||||||
*/
|
*/
|
||||||
if (timeout || ctx->fdmon_ops->need_wait(ctx)) {
|
if (timeout || ctx->fdmon_ops->need_wait(ctx)) {
|
||||||
|
/*
|
||||||
|
* Disable poll mode. poll mode should be disabled before the call
|
||||||
|
* of ctx->fdmon_ops->wait() so that guest's notification can wake
|
||||||
|
* up IO threads when some work becomes pending. It is essential to
|
||||||
|
* avoid hangs or unnecessary latency.
|
||||||
|
*/
|
||||||
|
if (poll_set_started(ctx, &ready_list, false)) {
|
||||||
|
timeout = 0;
|
||||||
|
progress = true;
|
||||||
|
}
|
||||||
|
|
||||||
ctx->fdmon_ops->wait(ctx, &ready_list, timeout);
|
ctx->fdmon_ops->wait(ctx, &ready_list, timeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue