-----BEGIN PGP SIGNATURE-----

Version: GnuPG v1
 
 iQEcBAABAgAGBQJi18PHAAoJEO8Ells5jWIRCEQH+wepXDoT6Q56xmUgxVs+hlAD
 CXGy71/cNV08Yu3PTTXo8SYaw+KXxsA9ECgIr2hsfPXarAdoOpJFpZR0HoqIzaXd
 kpD6bvwN8bEEOlAHxKcb6/VM+VYntZBfkH9m1WLGx3fHILazLblyL8w2Hkp7NK9J
 IBpQQ63uU8Xt0+js96Z/sPOKRjrtbKXFT1bhY2CI8MKZpuqNyED0jZYwbNdnRwZN
 fuKbpsaaT4Wxx+mQMg7H7a0e/xx3DNi2F6cAtGLH98WYzbLFgExSSK8G8jnwEVfM
 EKWfU7N4zmokq7jN99yvGzjIzLrnLX6yn/ifSs+lQOzdtCA9zEbotI+CDCVdPs4=
 =9zus
 -----END PGP SIGNATURE-----

Merge tag 'net-pull-request' of https://github.com/jasowang/qemu into staging

# gpg: Signature made Wed 20 Jul 2022 09:58:47 BST
# gpg:                using RSA key EF04965B398D6211
# gpg: Good signature from "Jason Wang (Jason Wang on RedHat) <jasowang@redhat.com>" [marginal]
# gpg: WARNING: This key is not certified with sufficiently trusted signatures!
# gpg:          It is not certain that the signature belongs to the owner.
# Primary key fingerprint: 215D 46F4 8246 689E C77F  3562 EF04 965B 398D 6211

* tag 'net-pull-request' of https://github.com/jasowang/qemu: (25 commits)
  net/colo.c: fix segmentation fault when packet is not parsed correctly
  net/colo.c: No need to track conn_list for filter-rewriter
  net/colo: Fix a "double free" crash to clear the conn_list
  softmmu/runstate.c: add RunStateTransition support form COLO to PRELAUNCH
  vdpa: Add x-svq to NetdevVhostVDPAOptions
  vdpa: Add device migration blocker
  vdpa: Extract get features part from vhost_vdpa_get_max_queue_pairs
  vdpa: Buffer CVQ support on shadow virtqueue
  vdpa: manual forward CVQ buffers
  vhost-net-vdpa: add stubs for when no virtio-net device is present
  vdpa: Export vhost_vdpa_dma_map and unmap calls
  vhost: Add svq avail_handler callback
  vhost: add vhost_svq_poll
  vhost: Expose vhost_svq_add
  vhost: add vhost_svq_push_elem
  vhost: Track number of descs in SVQDescState
  vhost: Add SVQDescState
  vhost: Decouple vhost_svq_add from VirtQueueElement
  vhost: Check for queue full at vhost_svq_add
  vhost: Move vhost_svq_kick call to vhost_svq_add
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2022-07-20 16:27:57 +01:00
commit 8ec4bc3c8c
15 changed files with 675 additions and 128 deletions

View file

@ -49,7 +49,6 @@
#define VIRTIO_NET_VM_VERSION 11
#define MAC_TABLE_ENTRIES 64
#define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
/* previously fixed value */
@ -1434,57 +1433,71 @@ static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
return VIRTIO_NET_OK;
}
static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
size_t virtio_net_handle_ctrl_iov(VirtIODevice *vdev,
const struct iovec *in_sg, unsigned in_num,
const struct iovec *out_sg,
unsigned out_num)
{
VirtIONet *n = VIRTIO_NET(vdev);
struct virtio_net_ctrl_hdr ctrl;
virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
VirtQueueElement *elem;
size_t s;
struct iovec *iov, *iov2;
unsigned int iov_cnt;
if (iov_size(in_sg, in_num) < sizeof(status) ||
iov_size(out_sg, out_num) < sizeof(ctrl)) {
virtio_error(vdev, "virtio-net ctrl missing headers");
return 0;
}
iov2 = iov = g_memdup2(out_sg, sizeof(struct iovec) * out_num);
s = iov_to_buf(iov, out_num, 0, &ctrl, sizeof(ctrl));
iov_discard_front(&iov, &out_num, sizeof(ctrl));
if (s != sizeof(ctrl)) {
status = VIRTIO_NET_ERR;
} else if (ctrl.class == VIRTIO_NET_CTRL_RX) {
status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, out_num);
} else if (ctrl.class == VIRTIO_NET_CTRL_MAC) {
status = virtio_net_handle_mac(n, ctrl.cmd, iov, out_num);
} else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) {
status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, out_num);
} else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) {
status = virtio_net_handle_announce(n, ctrl.cmd, iov, out_num);
} else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
status = virtio_net_handle_mq(n, ctrl.cmd, iov, out_num);
} else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) {
status = virtio_net_handle_offloads(n, ctrl.cmd, iov, out_num);
}
s = iov_from_buf(in_sg, in_num, 0, &status, sizeof(status));
assert(s == sizeof(status));
g_free(iov2);
return sizeof(status);
}
static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
{
VirtQueueElement *elem;
for (;;) {
size_t written;
elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
if (!elem) {
break;
}
if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) ||
iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) {
virtio_error(vdev, "virtio-net ctrl missing headers");
written = virtio_net_handle_ctrl_iov(vdev, elem->in_sg, elem->in_num,
elem->out_sg, elem->out_num);
if (written > 0) {
virtqueue_push(vq, elem, written);
virtio_notify(vdev, vq);
g_free(elem);
} else {
virtqueue_detach_element(vq, elem, 0);
g_free(elem);
break;
}
iov_cnt = elem->out_num;
iov2 = iov = g_memdup2(elem->out_sg,
sizeof(struct iovec) * elem->out_num);
s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl));
iov_discard_front(&iov, &iov_cnt, sizeof(ctrl));
if (s != sizeof(ctrl)) {
status = VIRTIO_NET_ERR;
} else if (ctrl.class == VIRTIO_NET_CTRL_RX) {
status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt);
} else if (ctrl.class == VIRTIO_NET_CTRL_MAC) {
status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt);
} else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) {
status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt);
} else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) {
status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt);
} else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt);
} else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) {
status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt);
}
s = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, sizeof(status));
assert(s == sizeof(status));
virtqueue_push(vq, elem, sizeof(status));
virtio_notify(vdev, vq);
g_free(iov2);
g_free(elem);
}
}

View file

@ -122,17 +122,35 @@ static bool vhost_svq_translate_addr(const VhostShadowVirtqueue *svq,
return true;
}
static void vhost_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg,
const struct iovec *iovec, size_t num,
bool more_descs, bool write)
/**
* Write descriptors to SVQ vring
*
* @svq: The shadow virtqueue
* @sg: Cache for hwaddr
* @iovec: The iovec from the guest
* @num: iovec length
* @more_descs: True if more descriptors come in the chain
* @write: True if they are writeable descriptors
*
* Return true if success, false otherwise and print error.
*/
static bool vhost_svq_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg,
const struct iovec *iovec, size_t num,
bool more_descs, bool write)
{
uint16_t i = svq->free_head, last = svq->free_head;
unsigned n;
uint16_t flags = write ? cpu_to_le16(VRING_DESC_F_WRITE) : 0;
vring_desc_t *descs = svq->vring.desc;
bool ok;
if (num == 0) {
return;
return true;
}
ok = vhost_svq_translate_addr(svq, sg, iovec, num);
if (unlikely(!ok)) {
return false;
}
for (n = 0; n < num; n++) {
@ -150,39 +168,38 @@ static void vhost_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg,
}
svq->free_head = le16_to_cpu(svq->desc_next[last]);
return true;
}
static bool vhost_svq_add_split(VhostShadowVirtqueue *svq,
VirtQueueElement *elem, unsigned *head)
const struct iovec *out_sg, size_t out_num,
const struct iovec *in_sg, size_t in_num,
unsigned *head)
{
unsigned avail_idx;
vring_avail_t *avail = svq->vring.avail;
bool ok;
g_autofree hwaddr *sgs = g_new(hwaddr, MAX(elem->out_num, elem->in_num));
g_autofree hwaddr *sgs = g_new(hwaddr, MAX(out_num, in_num));
*head = svq->free_head;
/* We need some descriptors here */
if (unlikely(!elem->out_num && !elem->in_num)) {
if (unlikely(!out_num && !in_num)) {
qemu_log_mask(LOG_GUEST_ERROR,
"Guest provided element with no descriptors");
return false;
}
ok = vhost_svq_translate_addr(svq, sgs, elem->out_sg, elem->out_num);
if (unlikely(!ok)) {
return false;
}
vhost_vring_write_descs(svq, sgs, elem->out_sg, elem->out_num,
elem->in_num > 0, false);
ok = vhost_svq_translate_addr(svq, sgs, elem->in_sg, elem->in_num);
ok = vhost_svq_vring_write_descs(svq, sgs, out_sg, out_num, in_num > 0,
false);
if (unlikely(!ok)) {
return false;
}
vhost_vring_write_descs(svq, sgs, elem->in_sg, elem->in_num, false, true);
ok = vhost_svq_vring_write_descs(svq, sgs, in_sg, in_num, false, true);
if (unlikely(!ok)) {
return false;
}
/*
* Put the entry in the available array (but don't update avail->idx until
@ -199,26 +216,6 @@ static bool vhost_svq_add_split(VhostShadowVirtqueue *svq,
return true;
}
/**
* Add an element to a SVQ.
*
* The caller must check that there is enough slots for the new element. It
* takes ownership of the element: In case of failure, it is free and the SVQ
* is considered broken.
*/
static bool vhost_svq_add(VhostShadowVirtqueue *svq, VirtQueueElement *elem)
{
unsigned qemu_head;
bool ok = vhost_svq_add_split(svq, elem, &qemu_head);
if (unlikely(!ok)) {
g_free(elem);
return false;
}
svq->ring_id_maps[qemu_head] = elem;
return true;
}
static void vhost_svq_kick(VhostShadowVirtqueue *svq)
{
/*
@ -233,6 +230,46 @@ static void vhost_svq_kick(VhostShadowVirtqueue *svq)
event_notifier_set(&svq->hdev_kick);
}
/**
* Add an element to a SVQ.
*
* The caller must check that there is enough slots for the new element. It
* takes ownership of the element: In case of failure not ENOSPC, it is free.
*
* Return -EINVAL if element is invalid, -ENOSPC if dev queue is full
*/
int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
size_t out_num, const struct iovec *in_sg, size_t in_num,
VirtQueueElement *elem)
{
unsigned qemu_head;
unsigned ndescs = in_num + out_num;
bool ok;
if (unlikely(ndescs > vhost_svq_available_slots(svq))) {
return -ENOSPC;
}
ok = vhost_svq_add_split(svq, out_sg, out_num, in_sg, in_num, &qemu_head);
if (unlikely(!ok)) {
g_free(elem);
return -EINVAL;
}
svq->desc_state[qemu_head].elem = elem;
svq->desc_state[qemu_head].ndescs = ndescs;
vhost_svq_kick(svq);
return 0;
}
/* Convenience wrapper to add a guest's element to SVQ */
static int vhost_svq_add_element(VhostShadowVirtqueue *svq,
VirtQueueElement *elem)
{
return vhost_svq_add(svq, elem->out_sg, elem->out_num, elem->in_sg,
elem->in_num, elem);
}
/**
* Forward available buffers.
*
@ -257,7 +294,7 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq)
while (true) {
VirtQueueElement *elem;
bool ok;
int r;
if (svq->next_guest_avail_elem) {
elem = g_steal_pointer(&svq->next_guest_avail_elem);
@ -269,28 +306,30 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq)
break;
}
if (elem->out_num + elem->in_num > vhost_svq_available_slots(svq)) {
/*
* This condition is possible since a contiguous buffer in GPA
* does not imply a contiguous buffer in qemu's VA
* scatter-gather segments. If that happens, the buffer exposed
* to the device needs to be a chain of descriptors at this
* moment.
*
* SVQ cannot hold more available buffers if we are here:
* queue the current guest descriptor and ignore further kicks
* until some elements are used.
*/
svq->next_guest_avail_elem = elem;
return;
if (svq->ops) {
r = svq->ops->avail_handler(svq, elem, svq->ops_opaque);
} else {
r = vhost_svq_add_element(svq, elem);
}
if (unlikely(r != 0)) {
if (r == -ENOSPC) {
/*
* This condition is possible since a contiguous buffer in
* GPA does not imply a contiguous buffer in qemu's VA
* scatter-gather segments. If that happens, the buffer
* exposed to the device needs to be a chain of descriptors
* at this moment.
*
* SVQ cannot hold more available buffers if we are here:
* queue the current guest descriptor and ignore kicks
* until some elements are used.
*/
svq->next_guest_avail_elem = elem;
}
ok = vhost_svq_add(svq, elem);
if (unlikely(!ok)) {
/* VQ is broken, just return and ignore any other kicks */
/* VQ is full or broken, just return and ignore kicks */
return;
}
vhost_svq_kick(svq);
}
virtio_queue_set_notification(svq->vq, true);
@ -311,11 +350,12 @@ static void vhost_handle_guest_kick_notifier(EventNotifier *n)
static bool vhost_svq_more_used(VhostShadowVirtqueue *svq)
{
uint16_t *used_idx = &svq->vring.used->idx;
if (svq->last_used_idx != svq->shadow_used_idx) {
return true;
}
svq->shadow_used_idx = cpu_to_le16(svq->vring.used->idx);
svq->shadow_used_idx = cpu_to_le16(*(volatile uint16_t *)used_idx);
return svq->last_used_idx != svq->shadow_used_idx;
}
@ -376,21 +416,36 @@ static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq,
return NULL;
}
if (unlikely(!svq->ring_id_maps[used_elem.id])) {
if (unlikely(!svq->desc_state[used_elem.id].elem)) {
qemu_log_mask(LOG_GUEST_ERROR,
"Device %s says index %u is used, but it was not available",
svq->vdev->name, used_elem.id);
return NULL;
}
num = svq->ring_id_maps[used_elem.id]->in_num +
svq->ring_id_maps[used_elem.id]->out_num;
num = svq->desc_state[used_elem.id].ndescs;
last_used_chain = vhost_svq_last_desc_of_chain(svq, num, used_elem.id);
svq->desc_next[last_used_chain] = svq->free_head;
svq->free_head = used_elem.id;
*len = used_elem.len;
return g_steal_pointer(&svq->ring_id_maps[used_elem.id]);
return g_steal_pointer(&svq->desc_state[used_elem.id].elem);
}
/**
* Push an element to SVQ, returning it to the guest.
*/
void vhost_svq_push_elem(VhostShadowVirtqueue *svq,
const VirtQueueElement *elem, uint32_t len)
{
virtqueue_push(svq->vq, elem, len);
if (svq->next_guest_avail_elem) {
/*
* Avail ring was full when vhost_svq_flush was called, so it's a
* good moment to make more descriptors available if possible.
*/
vhost_handle_guest_kick(svq);
}
}
static void vhost_svq_flush(VhostShadowVirtqueue *svq,
@ -434,6 +489,33 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq,
} while (!vhost_svq_enable_notification(svq));
}
/**
* Poll the SVQ for one device used buffer.
*
* This function race with main event loop SVQ polling, so extra
* synchronization is needed.
*
* Return the length written by the device.
*/
size_t vhost_svq_poll(VhostShadowVirtqueue *svq)
{
int64_t start_us = g_get_monotonic_time();
do {
uint32_t len;
VirtQueueElement *elem = vhost_svq_get_buf(svq, &len);
if (elem) {
return len;
}
if (unlikely(g_get_monotonic_time() - start_us > 10e6)) {
return 0;
}
/* Make sure we read new used_idx */
smp_rmb();
} while (true);
}
/**
* Forward used buffers.
*
@ -560,7 +642,7 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
memset(svq->vring.desc, 0, driver_size);
svq->vring.used = qemu_memalign(qemu_real_host_page_size(), device_size);
memset(svq->vring.used, 0, device_size);
svq->ring_id_maps = g_new0(VirtQueueElement *, svq->vring.num);
svq->desc_state = g_new0(SVQDescState, svq->vring.num);
svq->desc_next = g_new0(uint16_t, svq->vring.num);
for (unsigned i = 0; i < svq->vring.num - 1; i++) {
svq->desc_next[i] = cpu_to_le16(i + 1);
@ -585,7 +667,7 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq)
for (unsigned i = 0; i < svq->vring.num; ++i) {
g_autofree VirtQueueElement *elem = NULL;
elem = g_steal_pointer(&svq->ring_id_maps[i]);
elem = g_steal_pointer(&svq->desc_state[i].elem);
if (elem) {
virtqueue_detach_element(svq->vq, elem, 0);
}
@ -597,7 +679,7 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq)
}
svq->vq = NULL;
g_free(svq->desc_next);
g_free(svq->ring_id_maps);
g_free(svq->desc_state);
qemu_vfree(svq->vring.desc);
qemu_vfree(svq->vring.used);
}
@ -607,12 +689,16 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq)
* shadow methods and file descriptors.
*
* @iova_tree: Tree to perform descriptors translations
* @ops: SVQ owner callbacks
* @ops_opaque: ops opaque pointer
*
* Returns the new virtqueue or NULL.
*
* In case of error, reason is reported through error_report.
*/
VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree)
VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree,
const VhostShadowVirtqueueOps *ops,
void *ops_opaque)
{
g_autofree VhostShadowVirtqueue *svq = g_new0(VhostShadowVirtqueue, 1);
int r;
@ -634,6 +720,8 @@ VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree)
event_notifier_init_fd(&svq->svq_kick, VHOST_FILE_UNBIND);
event_notifier_set_handler(&svq->hdev_call, vhost_svq_handle_call);
svq->iova_tree = iova_tree;
svq->ops = ops;
svq->ops_opaque = ops_opaque;
return g_steal_pointer(&svq);
err_init_hdev_call:

View file

@ -15,6 +15,37 @@
#include "standard-headers/linux/vhost_types.h"
#include "hw/virtio/vhost-iova-tree.h"
typedef struct SVQDescState {
VirtQueueElement *elem;
/*
* Number of descriptors exposed to the device. May or may not match
* guest's
*/
unsigned int ndescs;
} SVQDescState;
typedef struct VhostShadowVirtqueue VhostShadowVirtqueue;
/**
* Callback to handle an avail buffer.
*
* @svq: Shadow virtqueue
* @elem: Element placed in the queue by the guest
* @vq_callback_opaque: Opaque
*
* Returns 0 if the vq is running as expected.
*
* Note that ownership of elem is transferred to the callback.
*/
typedef int (*VirtQueueAvailCallback)(VhostShadowVirtqueue *svq,
VirtQueueElement *elem,
void *vq_callback_opaque);
typedef struct VhostShadowVirtqueueOps {
VirtQueueAvailCallback avail_handler;
} VhostShadowVirtqueueOps;
/* Shadow virtqueue to relay notifications */
typedef struct VhostShadowVirtqueue {
/* Shadow vring */
@ -47,8 +78,8 @@ typedef struct VhostShadowVirtqueue {
/* IOVA mapping */
VhostIOVATree *iova_tree;
/* Map for use the guest's descriptors */
VirtQueueElement **ring_id_maps;
/* SVQ vring descriptors state */
SVQDescState *desc_state;
/* Next VirtQueue element that guest made available */
VirtQueueElement *next_guest_avail_elem;
@ -59,6 +90,12 @@ typedef struct VhostShadowVirtqueue {
*/
uint16_t *desc_next;
/* Caller callbacks */
const VhostShadowVirtqueueOps *ops;
/* Caller callbacks opaque */
void *ops_opaque;
/* Next head to expose to the device */
uint16_t shadow_avail_idx;
@ -74,6 +111,13 @@ typedef struct VhostShadowVirtqueue {
bool vhost_svq_valid_features(uint64_t features, Error **errp);
void vhost_svq_push_elem(VhostShadowVirtqueue *svq,
const VirtQueueElement *elem, uint32_t len);
int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
size_t out_num, const struct iovec *in_sg, size_t in_num,
VirtQueueElement *elem);
size_t vhost_svq_poll(VhostShadowVirtqueue *svq);
void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd);
void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd);
void vhost_svq_get_vring_addr(const VhostShadowVirtqueue *svq,
@ -85,7 +129,9 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
VirtQueue *vq);
void vhost_svq_stop(VhostShadowVirtqueue *svq);
VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree);
VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree,
const VhostShadowVirtqueueOps *ops,
void *ops_opaque);
void vhost_svq_free(gpointer vq);
G_DEFINE_AUTOPTR_CLEANUP_FUNC(VhostShadowVirtqueue, vhost_svq_free);

View file

@ -20,6 +20,7 @@
#include "hw/virtio/vhost-shadow-virtqueue.h"
#include "hw/virtio/vhost-vdpa.h"
#include "exec/address-spaces.h"
#include "migration/blocker.h"
#include "qemu/cutils.h"
#include "qemu/main-loop.h"
#include "cpu.h"
@ -71,8 +72,8 @@ static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
return false;
}
static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
void *vaddr, bool readonly)
int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
void *vaddr, bool readonly)
{
struct vhost_msg_v2 msg = {};
int fd = v->device_fd;
@ -97,8 +98,7 @@ static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
return ret;
}
static int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova,
hwaddr size)
int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova, hwaddr size)
{
struct vhost_msg_v2 msg = {};
int fd = v->device_fd;
@ -418,8 +418,10 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free);
for (unsigned n = 0; n < hdev->nvqs; ++n) {
g_autoptr(VhostShadowVirtqueue) svq = vhost_svq_new(v->iova_tree);
g_autoptr(VhostShadowVirtqueue) svq;
svq = vhost_svq_new(v->iova_tree, v->shadow_vq_ops,
v->shadow_vq_ops_opaque);
if (unlikely(!svq)) {
error_setg(errp, "Cannot create svq %u", n);
return -1;
@ -1021,6 +1023,13 @@ static bool vhost_vdpa_svqs_start(struct vhost_dev *dev)
return true;
}
if (v->migration_blocker) {
int r = migrate_add_blocker(v->migration_blocker, &err);
if (unlikely(r < 0)) {
return false;
}
}
for (i = 0; i < v->shadow_vqs->len; ++i) {
VirtQueue *vq = virtio_get_queue(dev->vdev, dev->vq_index + i);
VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
@ -1063,6 +1072,10 @@ err:
vhost_svq_stop(svq);
}
if (v->migration_blocker) {
migrate_del_blocker(v->migration_blocker);
}
return false;
}
@ -1082,6 +1095,9 @@ static bool vhost_vdpa_svqs_stop(struct vhost_dev *dev)
}
}
if (v->migration_blocker) {
migrate_del_blocker(v->migration_blocker);
}
return true;
}