block: remove AioContext locking

This is the big patch that removes
aio_context_acquire()/aio_context_release() from the block layer and
affected block layer users.

There isn't a clean way to split this patch and the reviewers are likely
the same group of people, so I decided to do it in one patch.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Paul Durrant <paul@xen.org>
Message-ID: <20231205182011.1976568-7-stefanha@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2023-12-05 13:20:03 -05:00 committed by Kevin Wolf
parent 6bc30f1949
commit b49f4755c7
41 changed files with 104 additions and 1169 deletions

View file

@ -66,7 +66,7 @@ typedef struct BlkMigDevState {
/* Protected by block migration lock. */
int64_t completed_sectors;
/* During migration this is protected by iothread lock / AioContext.
/* During migration this is protected by bdrv_dirty_bitmap_lock().
* Allocation and free happen during setup and cleanup respectively.
*/
BdrvDirtyBitmap *dirty_bitmap;
@ -101,7 +101,7 @@ typedef struct BlkMigState {
int prev_progress;
int bulk_completed;
/* Lock must be taken _inside_ the iothread lock and any AioContexts. */
/* Lock must be taken _inside_ the iothread lock. */
QemuMutex lock;
} BlkMigState;
@ -270,7 +270,6 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
if (bmds->shared_base) {
qemu_mutex_lock_iothread();
aio_context_acquire(blk_get_aio_context(bb));
/* Skip unallocated sectors; intentionally treats failure or
* partial sector as an allocated sector */
while (cur_sector < total_sectors &&
@ -281,7 +280,6 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
}
cur_sector += count >> BDRV_SECTOR_BITS;
}
aio_context_release(blk_get_aio_context(bb));
qemu_mutex_unlock_iothread();
}
@ -313,21 +311,16 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
block_mig_state.submitted++;
blk_mig_unlock();
/* We do not know if bs is under the main thread (and thus does
* not acquire the AioContext when doing AIO) or rather under
* dataplane. Thus acquire both the iothread mutex and the
* AioContext.
*
* This is ugly and will disappear when we make bdrv_* thread-safe,
* without the need to acquire the AioContext.
/*
* The migration thread does not have an AioContext. Lock the BQL so that
* I/O runs in the main loop AioContext (see
* qemu_get_current_aio_context()).
*/
qemu_mutex_lock_iothread();
aio_context_acquire(blk_get_aio_context(bmds->blk));
bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, cur_sector * BDRV_SECTOR_SIZE,
nr_sectors * BDRV_SECTOR_SIZE);
blk->aiocb = blk_aio_preadv(bb, cur_sector * BDRV_SECTOR_SIZE, &blk->qiov,
0, blk_mig_read_cb, blk);
aio_context_release(blk_get_aio_context(bmds->blk));
qemu_mutex_unlock_iothread();
bmds->cur_sector = cur_sector + nr_sectors;
@ -512,7 +505,7 @@ static void blk_mig_reset_dirty_cursor(void)
}
}
/* Called with iothread lock and AioContext taken. */
/* Called with iothread lock taken. */
static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
int is_async)
@ -606,9 +599,7 @@ static int blk_mig_save_dirty_block(QEMUFile *f, int is_async)
int ret = 1;
QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
aio_context_acquire(blk_get_aio_context(bmds->blk));
ret = mig_save_device_dirty(f, bmds, is_async);
aio_context_release(blk_get_aio_context(bmds->blk));
if (ret <= 0) {
break;
}
@ -666,9 +657,9 @@ static int64_t get_remaining_dirty(void)
int64_t dirty = 0;
QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
aio_context_acquire(blk_get_aio_context(bmds->blk));
bdrv_dirty_bitmap_lock(bmds->dirty_bitmap);
dirty += bdrv_get_dirty_count(bmds->dirty_bitmap);
aio_context_release(blk_get_aio_context(bmds->blk));
bdrv_dirty_bitmap_unlock(bmds->dirty_bitmap);
}
return dirty;
@ -681,7 +672,6 @@ static void block_migration_cleanup_bmds(void)
{
BlkMigDevState *bmds;
BlockDriverState *bs;
AioContext *ctx;
unset_dirty_tracking();
@ -693,13 +683,7 @@ static void block_migration_cleanup_bmds(void)
bdrv_op_unblock_all(bs, bmds->blocker);
}
error_free(bmds->blocker);
/* Save ctx, because bmds->blk can disappear during blk_unref. */
ctx = blk_get_aio_context(bmds->blk);
aio_context_acquire(ctx);
blk_unref(bmds->blk);
aio_context_release(ctx);
g_free(bmds->blk_name);
g_free(bmds->aio_bitmap);
g_free(bmds);