mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-07 01:33:56 -06:00
vhost: Add Shadow VirtQueue kick forwarding capabilities
At this mode no buffer forwarding will be performed in SVQ mode: Qemu will just forward the guest's kicks to the device. Host memory notifiers regions are left out for simplicity, and they will not be addressed in this series. Signed-off-by: Eugenio Pérez <eperezma@redhat.com> Acked-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Jason Wang <jasowang@redhat.com>
This commit is contained in:
parent
10857ec0ad
commit
dff4426fa6
4 changed files with 215 additions and 2 deletions
|
@ -17,12 +17,14 @@
|
|||
#include "hw/virtio/vhost.h"
|
||||
#include "hw/virtio/vhost-backend.h"
|
||||
#include "hw/virtio/virtio-net.h"
|
||||
#include "hw/virtio/vhost-shadow-virtqueue.h"
|
||||
#include "hw/virtio/vhost-vdpa.h"
|
||||
#include "exec/address-spaces.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "cpu.h"
|
||||
#include "trace.h"
|
||||
#include "qemu-common.h"
|
||||
#include "qapi/error.h"
|
||||
|
||||
/*
|
||||
* Return one past the end of the end of section. Be careful with uint64_t
|
||||
|
@ -342,6 +344,30 @@ static bool vhost_vdpa_one_time_request(struct vhost_dev *dev)
|
|||
return v->index != 0;
|
||||
}
|
||||
|
||||
static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
|
||||
Error **errp)
|
||||
{
|
||||
g_autoptr(GPtrArray) shadow_vqs = NULL;
|
||||
|
||||
if (!v->shadow_vqs_enabled) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free);
|
||||
for (unsigned n = 0; n < hdev->nvqs; ++n) {
|
||||
g_autoptr(VhostShadowVirtqueue) svq = vhost_svq_new();
|
||||
|
||||
if (unlikely(!svq)) {
|
||||
error_setg(errp, "Cannot create svq %u", n);
|
||||
return -1;
|
||||
}
|
||||
g_ptr_array_add(shadow_vqs, g_steal_pointer(&svq));
|
||||
}
|
||||
|
||||
v->shadow_vqs = g_steal_pointer(&shadow_vqs);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
|
||||
{
|
||||
struct vhost_vdpa *v;
|
||||
|
@ -364,6 +390,10 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
|
|||
dev->opaque = opaque ;
|
||||
v->listener = vhost_vdpa_memory_listener;
|
||||
v->msg_type = VHOST_IOTLB_MSG_V2;
|
||||
ret = vhost_vdpa_init_svq(dev, v, errp);
|
||||
if (ret) {
|
||||
goto err;
|
||||
}
|
||||
|
||||
vhost_vdpa_get_iova_range(v);
|
||||
|
||||
|
@ -375,6 +405,10 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
|
|||
VIRTIO_CONFIG_S_DRIVER);
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
ram_block_discard_disable(false);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
|
||||
|
@ -445,8 +479,14 @@ static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n)
|
|||
|
||||
static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev)
|
||||
{
|
||||
struct vhost_vdpa *v = dev->opaque;
|
||||
int i;
|
||||
|
||||
if (v->shadow_vqs_enabled) {
|
||||
/* FIXME SVQ is not compatible with host notifiers mr */
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) {
|
||||
if (vhost_vdpa_host_notifier_init(dev, i)) {
|
||||
goto err;
|
||||
|
@ -460,6 +500,21 @@ err:
|
|||
return;
|
||||
}
|
||||
|
||||
static void vhost_vdpa_svq_cleanup(struct vhost_dev *dev)
|
||||
{
|
||||
struct vhost_vdpa *v = dev->opaque;
|
||||
size_t idx;
|
||||
|
||||
if (!v->shadow_vqs) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (idx = 0; idx < v->shadow_vqs->len; ++idx) {
|
||||
vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, idx));
|
||||
}
|
||||
g_ptr_array_free(v->shadow_vqs, true);
|
||||
}
|
||||
|
||||
static int vhost_vdpa_cleanup(struct vhost_dev *dev)
|
||||
{
|
||||
struct vhost_vdpa *v;
|
||||
|
@ -468,6 +523,7 @@ static int vhost_vdpa_cleanup(struct vhost_dev *dev)
|
|||
trace_vhost_vdpa_cleanup(dev, v);
|
||||
vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
|
||||
memory_listener_unregister(&v->listener);
|
||||
vhost_vdpa_svq_cleanup(dev);
|
||||
|
||||
dev->opaque = NULL;
|
||||
ram_block_discard_disable(false);
|
||||
|
@ -559,11 +615,26 @@ static int vhost_vdpa_get_device_id(struct vhost_dev *dev,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void vhost_vdpa_reset_svq(struct vhost_vdpa *v)
|
||||
{
|
||||
if (!v->shadow_vqs_enabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
|
||||
VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
|
||||
vhost_svq_stop(svq);
|
||||
}
|
||||
}
|
||||
|
||||
static int vhost_vdpa_reset_device(struct vhost_dev *dev)
|
||||
{
|
||||
struct vhost_vdpa *v = dev->opaque;
|
||||
int ret;
|
||||
uint8_t status = 0;
|
||||
|
||||
vhost_vdpa_reset_svq(v);
|
||||
|
||||
ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
|
||||
trace_vhost_vdpa_reset_device(dev, status);
|
||||
return ret;
|
||||
|
@ -647,13 +718,74 @@ static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int vhost_vdpa_set_vring_dev_kick(struct vhost_dev *dev,
|
||||
struct vhost_vring_file *file)
|
||||
{
|
||||
trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
|
||||
return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the shadow virtqueue descriptors to the device
|
||||
*
|
||||
* @dev: The vhost device model
|
||||
* @svq: The shadow virtqueue
|
||||
* @idx: The index of the virtqueue in the vhost device
|
||||
* @errp: Error
|
||||
*/
|
||||
static bool vhost_vdpa_svq_setup(struct vhost_dev *dev,
|
||||
VhostShadowVirtqueue *svq, unsigned idx,
|
||||
Error **errp)
|
||||
{
|
||||
struct vhost_vring_file file = {
|
||||
.index = dev->vq_index + idx,
|
||||
};
|
||||
const EventNotifier *event_notifier = &svq->hdev_kick;
|
||||
int r;
|
||||
|
||||
file.fd = event_notifier_get_fd(event_notifier);
|
||||
r = vhost_vdpa_set_vring_dev_kick(dev, &file);
|
||||
if (unlikely(r != 0)) {
|
||||
error_setg_errno(errp, -r, "Can't set device kick fd");
|
||||
}
|
||||
|
||||
return r == 0;
|
||||
}
|
||||
|
||||
static bool vhost_vdpa_svqs_start(struct vhost_dev *dev)
|
||||
{
|
||||
struct vhost_vdpa *v = dev->opaque;
|
||||
Error *err = NULL;
|
||||
unsigned i;
|
||||
|
||||
if (!v->shadow_vqs) {
|
||||
return true;
|
||||
}
|
||||
|
||||
for (i = 0; i < v->shadow_vqs->len; ++i) {
|
||||
VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
|
||||
bool ok = vhost_vdpa_svq_setup(dev, svq, i, &err);
|
||||
if (unlikely(!ok)) {
|
||||
error_reportf_err(err, "Cannot setup SVQ %u: ", i);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
|
||||
{
|
||||
struct vhost_vdpa *v = dev->opaque;
|
||||
bool ok;
|
||||
trace_vhost_vdpa_dev_start(dev, started);
|
||||
|
||||
if (started) {
|
||||
vhost_vdpa_host_notifiers_init(dev);
|
||||
ok = vhost_vdpa_svqs_start(dev);
|
||||
if (unlikely(!ok)) {
|
||||
return -1;
|
||||
}
|
||||
vhost_vdpa_set_vring_ready(dev);
|
||||
} else {
|
||||
vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
|
||||
|
@ -725,8 +857,16 @@ static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
|
|||
static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
|
||||
struct vhost_vring_file *file)
|
||||
{
|
||||
trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
|
||||
return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
|
||||
struct vhost_vdpa *v = dev->opaque;
|
||||
int vdpa_idx = file->index - dev->vq_index;
|
||||
|
||||
if (v->shadow_vqs_enabled) {
|
||||
VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx);
|
||||
vhost_svq_set_svq_kick_fd(svq, file->fd);
|
||||
return 0;
|
||||
} else {
|
||||
return vhost_vdpa_set_vring_dev_kick(dev, file);
|
||||
}
|
||||
}
|
||||
|
||||
static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue