mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-01 23:03:54 -06:00
block: remove AioContext locking
This is the big patch that removes aio_context_acquire()/aio_context_release() from the block layer and affected block layer users. There isn't a clean way to split this patch and the reviewers are likely the same group of people, so I decided to do it in one patch. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Paul Durrant <paul@xen.org> Message-ID: <20231205182011.1976568-7-stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
parent
6bc30f1949
commit
b49f4755c7
41 changed files with 104 additions and 1169 deletions
|
@ -124,7 +124,6 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
|
|||
VirtIOBlockDataPlane *s = vblk->dataplane;
|
||||
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vblk)));
|
||||
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
|
||||
AioContext *old_context;
|
||||
unsigned i;
|
||||
unsigned nvqs = s->conf->num_queues;
|
||||
Error *local_err = NULL;
|
||||
|
@ -178,10 +177,7 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
|
|||
|
||||
trace_virtio_blk_data_plane_start(s);
|
||||
|
||||
old_context = blk_get_aio_context(s->conf->conf.blk);
|
||||
aio_context_acquire(old_context);
|
||||
r = blk_set_aio_context(s->conf->conf.blk, s->ctx, &local_err);
|
||||
aio_context_release(old_context);
|
||||
if (r < 0) {
|
||||
error_report_err(local_err);
|
||||
goto fail_aio_context;
|
||||
|
@ -208,13 +204,11 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
|
|||
|
||||
/* Get this show started by hooking up our callbacks */
|
||||
if (!blk_in_drain(s->conf->conf.blk)) {
|
||||
aio_context_acquire(s->ctx);
|
||||
for (i = 0; i < nvqs; i++) {
|
||||
VirtQueue *vq = virtio_get_queue(s->vdev, i);
|
||||
|
||||
virtio_queue_aio_attach_host_notifier(vq, s->ctx);
|
||||
}
|
||||
aio_context_release(s->ctx);
|
||||
}
|
||||
return 0;
|
||||
|
||||
|
@ -314,8 +308,6 @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev)
|
|||
*/
|
||||
vblk->dataplane_started = false;
|
||||
|
||||
aio_context_acquire(s->ctx);
|
||||
|
||||
/* Wait for virtio_blk_dma_restart_bh() and in flight I/O to complete */
|
||||
blk_drain(s->conf->conf.blk);
|
||||
|
||||
|
@ -325,8 +317,6 @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev)
|
|||
*/
|
||||
blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context(), NULL);
|
||||
|
||||
aio_context_release(s->ctx);
|
||||
|
||||
/* Clean up guest notifier (irq) */
|
||||
k->set_guest_notifiers(qbus->parent, nvqs, false);
|
||||
|
||||
|
|
|
@ -260,8 +260,6 @@ static void xen_block_complete_aio(void *opaque, int ret)
|
|||
XenBlockRequest *request = opaque;
|
||||
XenBlockDataPlane *dataplane = request->dataplane;
|
||||
|
||||
aio_context_acquire(dataplane->ctx);
|
||||
|
||||
if (ret != 0) {
|
||||
error_report("%s I/O error",
|
||||
request->req.operation == BLKIF_OP_READ ?
|
||||
|
@ -273,10 +271,10 @@ static void xen_block_complete_aio(void *opaque, int ret)
|
|||
if (request->presync) {
|
||||
request->presync = 0;
|
||||
xen_block_do_aio(request);
|
||||
goto done;
|
||||
return;
|
||||
}
|
||||
if (request->aio_inflight > 0) {
|
||||
goto done;
|
||||
return;
|
||||
}
|
||||
|
||||
switch (request->req.operation) {
|
||||
|
@ -318,9 +316,6 @@ static void xen_block_complete_aio(void *opaque, int ret)
|
|||
if (dataplane->more_work) {
|
||||
qemu_bh_schedule(dataplane->bh);
|
||||
}
|
||||
|
||||
done:
|
||||
aio_context_release(dataplane->ctx);
|
||||
}
|
||||
|
||||
static bool xen_block_split_discard(XenBlockRequest *request,
|
||||
|
@ -601,9 +596,7 @@ static void xen_block_dataplane_bh(void *opaque)
|
|||
{
|
||||
XenBlockDataPlane *dataplane = opaque;
|
||||
|
||||
aio_context_acquire(dataplane->ctx);
|
||||
xen_block_handle_requests(dataplane);
|
||||
aio_context_release(dataplane->ctx);
|
||||
}
|
||||
|
||||
static bool xen_block_dataplane_event(void *opaque)
|
||||
|
@ -703,10 +696,8 @@ void xen_block_dataplane_stop(XenBlockDataPlane *dataplane)
|
|||
xen_block_dataplane_detach(dataplane);
|
||||
}
|
||||
|
||||
aio_context_acquire(dataplane->ctx);
|
||||
/* Xen doesn't have multiple users for nodes, so this can't fail */
|
||||
blk_set_aio_context(dataplane->blk, qemu_get_aio_context(), &error_abort);
|
||||
aio_context_release(dataplane->ctx);
|
||||
|
||||
/*
|
||||
* Now that the context has been moved onto the main thread, cancel
|
||||
|
@ -752,7 +743,6 @@ void xen_block_dataplane_start(XenBlockDataPlane *dataplane,
|
|||
{
|
||||
ERRP_GUARD();
|
||||
XenDevice *xendev = dataplane->xendev;
|
||||
AioContext *old_context;
|
||||
unsigned int ring_size;
|
||||
unsigned int i;
|
||||
|
||||
|
@ -836,11 +826,8 @@ void xen_block_dataplane_start(XenBlockDataPlane *dataplane,
|
|||
goto stop;
|
||||
}
|
||||
|
||||
old_context = blk_get_aio_context(dataplane->blk);
|
||||
aio_context_acquire(old_context);
|
||||
/* If other users keep the BlockBackend in the iothread, that's ok */
|
||||
blk_set_aio_context(dataplane->blk, dataplane->ctx, NULL);
|
||||
aio_context_release(old_context);
|
||||
|
||||
if (!blk_in_drain(dataplane->blk)) {
|
||||
xen_block_dataplane_attach(dataplane);
|
||||
|
|
|
@ -1210,17 +1210,13 @@ static void virtio_blk_dma_restart_cb(void *opaque, bool running,
|
|||
static void virtio_blk_reset(VirtIODevice *vdev)
|
||||
{
|
||||
VirtIOBlock *s = VIRTIO_BLK(vdev);
|
||||
AioContext *ctx;
|
||||
VirtIOBlockReq *req;
|
||||
|
||||
/* Dataplane has stopped... */
|
||||
assert(!s->dataplane_started);
|
||||
|
||||
/* ...but requests may still be in flight. */
|
||||
ctx = blk_get_aio_context(s->blk);
|
||||
aio_context_acquire(ctx);
|
||||
blk_drain(s->blk);
|
||||
aio_context_release(ctx);
|
||||
|
||||
/* We drop queued requests after blk_drain() because blk_drain() itself can
|
||||
* produce them. */
|
||||
|
@ -1250,10 +1246,6 @@ static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config)
|
|||
uint64_t capacity;
|
||||
int64_t length;
|
||||
int blk_size = conf->logical_block_size;
|
||||
AioContext *ctx;
|
||||
|
||||
ctx = blk_get_aio_context(s->blk);
|
||||
aio_context_acquire(ctx);
|
||||
|
||||
blk_get_geometry(s->blk, &capacity);
|
||||
memset(&blkcfg, 0, sizeof(blkcfg));
|
||||
|
@ -1277,7 +1269,6 @@ static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config)
|
|||
* per track (cylinder).
|
||||
*/
|
||||
length = blk_getlength(s->blk);
|
||||
aio_context_release(ctx);
|
||||
if (length > 0 && length / conf->heads / conf->secs % blk_size) {
|
||||
blkcfg.geometry.sectors = conf->secs & ~s->sector_mask;
|
||||
} else {
|
||||
|
@ -1344,9 +1335,7 @@ static void virtio_blk_set_config(VirtIODevice *vdev, const uint8_t *config)
|
|||
|
||||
memcpy(&blkcfg, config, s->config_size);
|
||||
|
||||
aio_context_acquire(blk_get_aio_context(s->blk));
|
||||
blk_set_enable_write_cache(s->blk, blkcfg.wce != 0);
|
||||
aio_context_release(blk_get_aio_context(s->blk));
|
||||
}
|
||||
|
||||
static uint64_t virtio_blk_get_features(VirtIODevice *vdev, uint64_t features,
|
||||
|
@ -1414,11 +1403,9 @@ static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status)
|
|||
* s->blk would erroneously be placed in writethrough mode.
|
||||
*/
|
||||
if (!virtio_vdev_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE)) {
|
||||
aio_context_acquire(blk_get_aio_context(s->blk));
|
||||
blk_set_enable_write_cache(s->blk,
|
||||
virtio_vdev_has_feature(vdev,
|
||||
VIRTIO_BLK_F_WCE));
|
||||
aio_context_release(blk_get_aio_context(s->blk));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue