dma: use current AioContext for dma_blk_io()

In the past a single AioContext was used for block I/O and it was
fetched using blk_get_aio_context(). Nowadays the block layer supports
running I/O from any AioContext and multiple AioContexts at the same
time. Remove the dma_blk_io() AioContext argument and use the current
AioContext instead.

This makes calling the function easier and enables multiple IOThreads to
use dma_blk_io() concurrently for the same block device.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Message-ID: <20250311132616.1049687-3-stefanha@redhat.com>
Tested-by: Peter Krempa <pkrempa@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2025-03-11 21:26:05 +08:00 committed by Kevin Wolf
parent b2e3659d0d
commit a89c3c9b2c
5 changed files with 9 additions and 14 deletions

View file

@ -968,8 +968,7 @@ static void ide_dma_cb(void *opaque, int ret)
BDRV_SECTOR_SIZE, ide_dma_cb, s); BDRV_SECTOR_SIZE, ide_dma_cb, s);
break; break;
case IDE_DMA_TRIM: case IDE_DMA_TRIM:
s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk), s->bus->dma->aiocb = dma_blk_io(&s->sg, offset, BDRV_SECTOR_SIZE,
&s->sg, offset, BDRV_SECTOR_SIZE,
ide_issue_trim, s, ide_dma_cb, s, ide_issue_trim, s, ide_dma_cb, s,
DMA_DIRECTION_TO_DEVICE); DMA_DIRECTION_TO_DEVICE);
break; break;

View file

@ -187,8 +187,7 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
pmac_ide_transfer_cb, io); pmac_ide_transfer_cb, io);
break; break;
case IDE_DMA_TRIM: case IDE_DMA_TRIM:
s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk), &s->sg, s->bus->dma->aiocb = dma_blk_io(&s->sg, offset, 0x1, ide_issue_trim, s,
offset, 0x1, ide_issue_trim, s,
pmac_ide_transfer_cb, io, pmac_ide_transfer_cb, io,
DMA_DIRECTION_TO_DEVICE); DMA_DIRECTION_TO_DEVICE);
break; break;

View file

@ -487,8 +487,7 @@ static void scsi_do_read(SCSIDiskReq *r, int ret)
if (r->req.sg) { if (r->req.sg) {
dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ); dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ);
r->req.residual -= r->req.sg->size; r->req.residual -= r->req.sg->size;
r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), r->req.aiocb = dma_blk_io(r->req.sg, r->sector << BDRV_SECTOR_BITS,
r->req.sg, r->sector << BDRV_SECTOR_BITS,
BDRV_SECTOR_SIZE, BDRV_SECTOR_SIZE,
sdc->dma_readv, r, scsi_dma_complete, r, sdc->dma_readv, r, scsi_dma_complete, r,
DMA_DIRECTION_FROM_DEVICE); DMA_DIRECTION_FROM_DEVICE);
@ -650,8 +649,7 @@ static void scsi_write_data(SCSIRequest *req)
if (r->req.sg) { if (r->req.sg) {
dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE); dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE);
r->req.residual -= r->req.sg->size; r->req.residual -= r->req.sg->size;
r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), r->req.aiocb = dma_blk_io(r->req.sg, r->sector << BDRV_SECTOR_BITS,
r->req.sg, r->sector << BDRV_SECTOR_BITS,
BDRV_SECTOR_SIZE, BDRV_SECTOR_SIZE,
sdc->dma_writev, r, scsi_dma_complete, r, sdc->dma_writev, r, scsi_dma_complete, r,
DMA_DIRECTION_TO_DEVICE); DMA_DIRECTION_TO_DEVICE);

View file

@ -290,8 +290,7 @@ typedef BlockAIOCB *DMAIOFunc(int64_t offset, QEMUIOVector *iov,
BlockCompletionFunc *cb, void *cb_opaque, BlockCompletionFunc *cb, void *cb_opaque,
void *opaque); void *opaque);
BlockAIOCB *dma_blk_io(AioContext *ctx, BlockAIOCB *dma_blk_io(QEMUSGList *sg, uint64_t offset, uint32_t align,
QEMUSGList *sg, uint64_t offset, uint32_t align,
DMAIOFunc *io_func, void *io_func_opaque, DMAIOFunc *io_func, void *io_func_opaque,
BlockCompletionFunc *cb, void *opaque, DMADirection dir); BlockCompletionFunc *cb, void *opaque, DMADirection dir);
BlockAIOCB *dma_blk_read(BlockBackend *blk, BlockAIOCB *dma_blk_read(BlockBackend *blk,

View file

@ -211,7 +211,7 @@ static const AIOCBInfo dma_aiocb_info = {
.cancel_async = dma_aio_cancel, .cancel_async = dma_aio_cancel,
}; };
BlockAIOCB *dma_blk_io(AioContext *ctx, BlockAIOCB *dma_blk_io(
QEMUSGList *sg, uint64_t offset, uint32_t align, QEMUSGList *sg, uint64_t offset, uint32_t align,
DMAIOFunc *io_func, void *io_func_opaque, DMAIOFunc *io_func, void *io_func_opaque,
BlockCompletionFunc *cb, BlockCompletionFunc *cb,
@ -223,7 +223,7 @@ BlockAIOCB *dma_blk_io(AioContext *ctx,
dbs->acb = NULL; dbs->acb = NULL;
dbs->sg = sg; dbs->sg = sg;
dbs->ctx = ctx; dbs->ctx = qemu_get_current_aio_context();
dbs->offset = offset; dbs->offset = offset;
dbs->align = align; dbs->align = align;
dbs->sg_cur_index = 0; dbs->sg_cur_index = 0;
@ -251,7 +251,7 @@ BlockAIOCB *dma_blk_read(BlockBackend *blk,
QEMUSGList *sg, uint64_t offset, uint32_t align, QEMUSGList *sg, uint64_t offset, uint32_t align,
void (*cb)(void *opaque, int ret), void *opaque) void (*cb)(void *opaque, int ret), void *opaque)
{ {
return dma_blk_io(blk_get_aio_context(blk), sg, offset, align, return dma_blk_io(sg, offset, align,
dma_blk_read_io_func, blk, cb, opaque, dma_blk_read_io_func, blk, cb, opaque,
DMA_DIRECTION_FROM_DEVICE); DMA_DIRECTION_FROM_DEVICE);
} }
@ -269,7 +269,7 @@ BlockAIOCB *dma_blk_write(BlockBackend *blk,
QEMUSGList *sg, uint64_t offset, uint32_t align, QEMUSGList *sg, uint64_t offset, uint32_t align,
void (*cb)(void *opaque, int ret), void *opaque) void (*cb)(void *opaque, int ret), void *opaque)
{ {
return dma_blk_io(blk_get_aio_context(blk), sg, offset, align, return dma_blk_io(sg, offset, align,
dma_blk_write_io_func, blk, cb, opaque, dma_blk_write_io_func, blk, cb, opaque,
DMA_DIRECTION_TO_DEVICE); DMA_DIRECTION_TO_DEVICE);
} }