mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-03 07:43:54 -06:00
virtio-blk: restart s->rq reqs in vq AioContexts
A virtio-blk device with the iothread-vq-mapping parameter has per-virtqueue AioContexts. It is not thread-safe to process s->rq requests in the BlockBackend AioContext since that may be different from the virtqueue's AioContext to which this request belongs. The code currently races and could crash. Adapt virtio_blk_dma_restart_cb() to first split s->rq into per-vq lists and then schedule a BH each vq's AioContext as necessary. This way requests are safely processed in their vq's AioContext. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-ID: <20240119135748.270944-5-stefanha@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
parent
3cdaf3dd4a
commit
71ee0cdd14
1 changed files with 33 additions and 11 deletions
|
@ -1156,16 +1156,11 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
|
||||||
|
|
||||||
static void virtio_blk_dma_restart_bh(void *opaque)
|
static void virtio_blk_dma_restart_bh(void *opaque)
|
||||||
{
|
{
|
||||||
VirtIOBlock *s = opaque;
|
VirtIOBlockReq *req = opaque;
|
||||||
|
VirtIOBlock *s = req->dev; /* we're called with at least one request */
|
||||||
|
|
||||||
VirtIOBlockReq *req;
|
|
||||||
MultiReqBuffer mrb = {};
|
MultiReqBuffer mrb = {};
|
||||||
|
|
||||||
WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
|
|
||||||
req = s->rq;
|
|
||||||
s->rq = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (req) {
|
while (req) {
|
||||||
VirtIOBlockReq *next = req->next;
|
VirtIOBlockReq *next = req->next;
|
||||||
if (virtio_blk_handle_request(req, &mrb)) {
|
if (virtio_blk_handle_request(req, &mrb)) {
|
||||||
|
@ -1195,16 +1190,43 @@ static void virtio_blk_dma_restart_cb(void *opaque, bool running,
|
||||||
RunState state)
|
RunState state)
|
||||||
{
|
{
|
||||||
VirtIOBlock *s = opaque;
|
VirtIOBlock *s = opaque;
|
||||||
|
uint16_t num_queues = s->conf.num_queues;
|
||||||
|
|
||||||
if (!running) {
|
if (!running) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Paired with dec in virtio_blk_dma_restart_bh() */
|
/* Split the device-wide s->rq request list into per-vq request lists */
|
||||||
blk_inc_in_flight(s->conf.conf.blk);
|
g_autofree VirtIOBlockReq **vq_rq = g_new0(VirtIOBlockReq *, num_queues);
|
||||||
|
VirtIOBlockReq *rq;
|
||||||
|
|
||||||
aio_bh_schedule_oneshot(blk_get_aio_context(s->conf.conf.blk),
|
WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
|
||||||
virtio_blk_dma_restart_bh, s);
|
rq = s->rq;
|
||||||
|
s->rq = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
while (rq) {
|
||||||
|
VirtIOBlockReq *next = rq->next;
|
||||||
|
uint16_t idx = virtio_get_queue_index(rq->vq);
|
||||||
|
|
||||||
|
rq->next = vq_rq[idx];
|
||||||
|
vq_rq[idx] = rq;
|
||||||
|
rq = next;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Schedule a BH to submit the requests in each vq's AioContext */
|
||||||
|
for (uint16_t i = 0; i < num_queues; i++) {
|
||||||
|
if (!vq_rq[i]) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Paired with dec in virtio_blk_dma_restart_bh() */
|
||||||
|
blk_inc_in_flight(s->conf.conf.blk);
|
||||||
|
|
||||||
|
aio_bh_schedule_oneshot(s->vq_aio_context[i],
|
||||||
|
virtio_blk_dma_restart_bh,
|
||||||
|
vq_rq[i]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void virtio_blk_reset(VirtIODevice *vdev)
|
static void virtio_blk_reset(VirtIODevice *vdev)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue