mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-04 16:23:55 -06:00
-----BEGIN PGP SIGNATURE-----
iQEcBAABAgAGBQJaoonGAAoJEJykq7OBq3PIuvIH/1sU3LokJ9KroaKaqYyAQnOX V9ow3x4z3CQ8qOUpFWXA3l3lMLWE3YzGLvSMLsUVXafobX6qmK/LhtmLk3oNrg4j Q5T+d/JFZFZx+MsO4yqD29yJFi2BN1paZ1dpjo6uY5BtABg3zi/cKHOcwkCQDvBA XNHCSATt0neew51zZ7xKf2ja8tCPbaeshGY56FW1N118LTCNxIU42JKvK3sCZ8KL bgWRqg3FDZEF5MY0xZwCuCMwskIpu1nw6xgwXe5UdB42p2QntzGGfd9xzlmAcy2O nYjBqlL7ACN0kbKcPtTNPsikP7O4huoT+62s4cRkFuIUNssot3NSv+iV+HJ3ESs= =zmof -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging # gpg: Signature made Fri 09 Mar 2018 13:19:02 GMT # gpg: using RSA key 9CA4ABB381AB73C8 # gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" # gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>" # Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35 775A 9CA4 ABB3 81AB 73C8 * remotes/stefanha/tags/block-pull-request: vl: introduce vm_shutdown() virtio-scsi: fix race between .ioeventfd_stop() and vq handler virtio-blk: fix race between .ioeventfd_stop() and vq handler block: add aio_wait_bh_oneshot() virtio-blk: dataplane: Don't batch notifications if EVENT_IDX is present README: Fix typo 'git-publish' block: Fix qemu crash when using scsi-block Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
e4ae62b802
11 changed files with 122 additions and 85 deletions
|
@ -34,6 +34,7 @@ struct VirtIOBlockDataPlane {
|
|||
VirtIODevice *vdev;
|
||||
QEMUBH *bh; /* bh for guest notification */
|
||||
unsigned long *batch_notify_vqs;
|
||||
bool batch_notifications;
|
||||
|
||||
/* Note that these EventNotifiers are assigned by value. This is
|
||||
* fine as long as you do not call event_notifier_cleanup on them
|
||||
|
@ -47,8 +48,12 @@ struct VirtIOBlockDataPlane {
|
|||
/* Raise an interrupt to signal guest, if necessary */
|
||||
void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s, VirtQueue *vq)
|
||||
{
|
||||
set_bit(virtio_get_queue_index(vq), s->batch_notify_vqs);
|
||||
qemu_bh_schedule(s->bh);
|
||||
if (s->batch_notifications) {
|
||||
set_bit(virtio_get_queue_index(vq), s->batch_notify_vqs);
|
||||
qemu_bh_schedule(s->bh);
|
||||
} else {
|
||||
virtio_notify_irqfd(s->vdev, vq);
|
||||
}
|
||||
}
|
||||
|
||||
static void notify_guest_bh(void *opaque)
|
||||
|
@ -177,6 +182,12 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
|
|||
|
||||
s->starting = true;
|
||||
|
||||
if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
|
||||
s->batch_notifications = true;
|
||||
} else {
|
||||
s->batch_notifications = false;
|
||||
}
|
||||
|
||||
/* Set up guest notifier (irq) */
|
||||
r = k->set_guest_notifiers(qbus->parent, nvqs, true);
|
||||
if (r != 0) {
|
||||
|
@ -229,6 +240,22 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
|
|||
return -ENOSYS;
|
||||
}
|
||||
|
||||
/* Stop notifications for new requests from guest.
|
||||
*
|
||||
* Context: BH in IOThread
|
||||
*/
|
||||
static void virtio_blk_data_plane_stop_bh(void *opaque)
|
||||
{
|
||||
VirtIOBlockDataPlane *s = opaque;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < s->conf->num_queues; i++) {
|
||||
VirtQueue *vq = virtio_get_queue(s->vdev, i);
|
||||
|
||||
virtio_queue_aio_set_host_notifier_handler(vq, s->ctx, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
/* Context: QEMU global mutex held */
|
||||
void virtio_blk_data_plane_stop(VirtIODevice *vdev)
|
||||
{
|
||||
|
@ -253,13 +280,7 @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev)
|
|||
trace_virtio_blk_data_plane_stop(s);
|
||||
|
||||
aio_context_acquire(s->ctx);
|
||||
|
||||
/* Stop notifications for new requests from guest */
|
||||
for (i = 0; i < nvqs; i++) {
|
||||
VirtQueue *vq = virtio_get_queue(s->vdev, i);
|
||||
|
||||
virtio_queue_aio_set_host_notifier_handler(vq, s->ctx, NULL);
|
||||
}
|
||||
aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s);
|
||||
|
||||
/* Drain and switch bs back to the QEMU main loop */
|
||||
blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context());
|
||||
|
|
|
@ -107,9 +107,10 @@ static int virtio_scsi_vring_init(VirtIOSCSI *s, VirtQueue *vq, int n,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* assumes s->ctx held */
|
||||
static void virtio_scsi_clear_aio(VirtIOSCSI *s)
|
||||
/* Context: BH in IOThread */
|
||||
static void virtio_scsi_dataplane_stop_bh(void *opaque)
|
||||
{
|
||||
VirtIOSCSI *s = opaque;
|
||||
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
|
||||
int i;
|
||||
|
||||
|
@ -171,7 +172,7 @@ int virtio_scsi_dataplane_start(VirtIODevice *vdev)
|
|||
return 0;
|
||||
|
||||
fail_vrings:
|
||||
virtio_scsi_clear_aio(s);
|
||||
aio_wait_bh_oneshot(s->ctx, virtio_scsi_dataplane_stop_bh, s);
|
||||
aio_context_release(s->ctx);
|
||||
for (i = 0; i < vs->conf.num_queues + 2; i++) {
|
||||
virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
|
||||
|
@ -207,7 +208,7 @@ void virtio_scsi_dataplane_stop(VirtIODevice *vdev)
|
|||
s->dataplane_stopping = true;
|
||||
|
||||
aio_context_acquire(s->ctx);
|
||||
virtio_scsi_clear_aio(s);
|
||||
aio_wait_bh_oneshot(s->ctx, virtio_scsi_dataplane_stop_bh, s);
|
||||
aio_context_release(s->ctx);
|
||||
|
||||
blk_drain_all(); /* ensure there are no in-flight requests */
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue