mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-04 16:23:55 -06:00
vhost: move iova_tree set to vhost_svq_start
Since we don't know if we will use SVQ at qemu initialization, let's allocate iova_tree only if needed. To do so, accept it at SVQ start, not at initialization. This will avoid to create it if the device does not support SVQ. Signed-off-by: Eugenio Pérez <eperezma@redhat.com> Acked-by: Jason Wang <jasowang@redhat.com> Message-Id: <20221215113144.322011-5-eperezma@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
3cfb4d069c
commit
5fde952bbd
3 changed files with 8 additions and 11 deletions
|
@ -642,9 +642,10 @@ void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd)
|
||||||
* @svq: Shadow Virtqueue
|
* @svq: Shadow Virtqueue
|
||||||
* @vdev: VirtIO device
|
* @vdev: VirtIO device
|
||||||
* @vq: Virtqueue to shadow
|
* @vq: Virtqueue to shadow
|
||||||
|
* @iova_tree: Tree to perform descriptors translations
|
||||||
*/
|
*/
|
||||||
void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
|
void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
|
||||||
VirtQueue *vq)
|
VirtQueue *vq, VhostIOVATree *iova_tree)
|
||||||
{
|
{
|
||||||
size_t desc_size, driver_size, device_size;
|
size_t desc_size, driver_size, device_size;
|
||||||
|
|
||||||
|
@ -655,6 +656,7 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
|
||||||
svq->last_used_idx = 0;
|
svq->last_used_idx = 0;
|
||||||
svq->vdev = vdev;
|
svq->vdev = vdev;
|
||||||
svq->vq = vq;
|
svq->vq = vq;
|
||||||
|
svq->iova_tree = iova_tree;
|
||||||
|
|
||||||
svq->vring.num = virtio_queue_get_num(vdev, virtio_get_queue_index(vq));
|
svq->vring.num = virtio_queue_get_num(vdev, virtio_get_queue_index(vq));
|
||||||
driver_size = vhost_svq_driver_area_size(svq);
|
driver_size = vhost_svq_driver_area_size(svq);
|
||||||
|
@ -712,18 +714,15 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq)
|
||||||
* Creates vhost shadow virtqueue, and instructs the vhost device to use the
|
* Creates vhost shadow virtqueue, and instructs the vhost device to use the
|
||||||
* shadow methods and file descriptors.
|
* shadow methods and file descriptors.
|
||||||
*
|
*
|
||||||
* @iova_tree: Tree to perform descriptors translations
|
|
||||||
* @ops: SVQ owner callbacks
|
* @ops: SVQ owner callbacks
|
||||||
* @ops_opaque: ops opaque pointer
|
* @ops_opaque: ops opaque pointer
|
||||||
*/
|
*/
|
||||||
VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree,
|
VhostShadowVirtqueue *vhost_svq_new(const VhostShadowVirtqueueOps *ops,
|
||||||
const VhostShadowVirtqueueOps *ops,
|
|
||||||
void *ops_opaque)
|
void *ops_opaque)
|
||||||
{
|
{
|
||||||
VhostShadowVirtqueue *svq = g_new0(VhostShadowVirtqueue, 1);
|
VhostShadowVirtqueue *svq = g_new0(VhostShadowVirtqueue, 1);
|
||||||
|
|
||||||
event_notifier_init_fd(&svq->svq_kick, VHOST_FILE_UNBIND);
|
event_notifier_init_fd(&svq->svq_kick, VHOST_FILE_UNBIND);
|
||||||
svq->iova_tree = iova_tree;
|
|
||||||
svq->ops = ops;
|
svq->ops = ops;
|
||||||
svq->ops_opaque = ops_opaque;
|
svq->ops_opaque = ops_opaque;
|
||||||
return svq;
|
return svq;
|
||||||
|
|
|
@ -126,11 +126,10 @@ size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq);
|
||||||
size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq);
|
size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq);
|
||||||
|
|
||||||
void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
|
void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
|
||||||
VirtQueue *vq);
|
VirtQueue *vq, VhostIOVATree *iova_tree);
|
||||||
void vhost_svq_stop(VhostShadowVirtqueue *svq);
|
void vhost_svq_stop(VhostShadowVirtqueue *svq);
|
||||||
|
|
||||||
VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree,
|
VhostShadowVirtqueue *vhost_svq_new(const VhostShadowVirtqueueOps *ops,
|
||||||
const VhostShadowVirtqueueOps *ops,
|
|
||||||
void *ops_opaque);
|
void *ops_opaque);
|
||||||
|
|
||||||
void vhost_svq_free(gpointer vq);
|
void vhost_svq_free(gpointer vq);
|
||||||
|
|
|
@ -430,8 +430,7 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
|
||||||
for (unsigned n = 0; n < hdev->nvqs; ++n) {
|
for (unsigned n = 0; n < hdev->nvqs; ++n) {
|
||||||
VhostShadowVirtqueue *svq;
|
VhostShadowVirtqueue *svq;
|
||||||
|
|
||||||
svq = vhost_svq_new(v->iova_tree, v->shadow_vq_ops,
|
svq = vhost_svq_new(v->shadow_vq_ops, v->shadow_vq_ops_opaque);
|
||||||
v->shadow_vq_ops_opaque);
|
|
||||||
g_ptr_array_add(shadow_vqs, svq);
|
g_ptr_array_add(shadow_vqs, svq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1063,7 +1062,7 @@ static bool vhost_vdpa_svqs_start(struct vhost_dev *dev)
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
vhost_svq_start(svq, dev->vdev, vq);
|
vhost_svq_start(svq, dev->vdev, vq, v->iova_tree);
|
||||||
ok = vhost_vdpa_svq_map_rings(dev, svq, &addr, &err);
|
ok = vhost_vdpa_svq_map_rings(dev, svq, &addr, &err);
|
||||||
if (unlikely(!ok)) {
|
if (unlikely(!ok)) {
|
||||||
goto err_map;
|
goto err_map;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue