vdpa: Add custom IOTLB translations to SVQ

Use translations added in VhostIOVATree in SVQ.

Only introduce usage here, not allocation and deallocation. As with
previous patches, we use the dead code paths of shadow_vqs_enabled to
avoid commiting too many changes at once. These are impossible to take
at the moment.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
This commit is contained in:
Eugenio Pérez 2022-03-14 18:34:51 +01:00 committed by Jason Wang
parent ec6122d882
commit 34e3c94eda
4 changed files with 187 additions and 30 deletions

View file

@ -70,7 +70,59 @@ static uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq)
return svq->vring.num - (svq->shadow_avail_idx - svq->shadow_used_idx);
}
static void vhost_vring_write_descs(VhostShadowVirtqueue *svq,
/**
* Translate addresses between the qemu's virtual address and the SVQ IOVA
*
* @svq: Shadow VirtQueue
* @vaddr: Translated IOVA addresses
* @iovec: Source qemu's VA addresses
* @num: Length of iovec and minimum length of vaddr
*/
static bool vhost_svq_translate_addr(const VhostShadowVirtqueue *svq,
hwaddr *addrs, const struct iovec *iovec,
size_t num)
{
if (num == 0) {
return true;
}
for (size_t i = 0; i < num; ++i) {
DMAMap needle = {
.translated_addr = (hwaddr)(uintptr_t)iovec[i].iov_base,
.size = iovec[i].iov_len,
};
Int128 needle_last, map_last;
size_t off;
const DMAMap *map = vhost_iova_tree_find_iova(svq->iova_tree, &needle);
/*
* Map cannot be NULL since iova map contains all guest space and
* qemu already has a physical address mapped
*/
if (unlikely(!map)) {
qemu_log_mask(LOG_GUEST_ERROR,
"Invalid address 0x%"HWADDR_PRIx" given by guest",
needle.translated_addr);
return false;
}
off = needle.translated_addr - map->translated_addr;
addrs[i] = map->iova + off;
needle_last = int128_add(int128_make64(needle.translated_addr),
int128_make64(iovec[i].iov_len));
map_last = int128_make64(map->translated_addr + map->size);
if (unlikely(int128_gt(needle_last, map_last))) {
qemu_log_mask(LOG_GUEST_ERROR,
"Guest buffer expands over iova range");
return false;
}
}
return true;
}
static void vhost_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg,
const struct iovec *iovec, size_t num,
bool more_descs, bool write)
{
@ -89,7 +141,7 @@ static void vhost_vring_write_descs(VhostShadowVirtqueue *svq,
} else {
descs[i].flags = flags;
}
descs[i].addr = cpu_to_le64((hwaddr)(intptr_t)iovec[n].iov_base);
descs[i].addr = cpu_to_le64(sg[n]);
descs[i].len = cpu_to_le32(iovec[n].iov_len);
last = i;
@ -104,6 +156,8 @@ static bool vhost_svq_add_split(VhostShadowVirtqueue *svq,
{
unsigned avail_idx;
vring_avail_t *avail = svq->vring.avail;
bool ok;
g_autofree hwaddr *sgs = g_new(hwaddr, MAX(elem->out_num, elem->in_num));
*head = svq->free_head;
@ -114,9 +168,20 @@ static bool vhost_svq_add_split(VhostShadowVirtqueue *svq,
return false;
}
vhost_vring_write_descs(svq, elem->out_sg, elem->out_num, elem->in_num > 0,
false);
vhost_vring_write_descs(svq, elem->in_sg, elem->in_num, false, true);
ok = vhost_svq_translate_addr(svq, sgs, elem->out_sg, elem->out_num);
if (unlikely(!ok)) {
return false;
}
vhost_vring_write_descs(svq, sgs, elem->out_sg, elem->out_num,
elem->in_num > 0, false);
ok = vhost_svq_translate_addr(svq, sgs, elem->in_sg, elem->in_num);
if (unlikely(!ok)) {
return false;
}
vhost_vring_write_descs(svq, sgs, elem->in_sg, elem->in_num, false, true);
/*
* Put the entry in the available array (but don't update avail->idx until
@ -395,9 +460,9 @@ void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd)
void vhost_svq_get_vring_addr(const VhostShadowVirtqueue *svq,
struct vhost_vring_addr *addr)
{
addr->desc_user_addr = (uint64_t)(intptr_t)svq->vring.desc;
addr->avail_user_addr = (uint64_t)(intptr_t)svq->vring.avail;
addr->used_user_addr = (uint64_t)(intptr_t)svq->vring.used;
addr->desc_user_addr = (uint64_t)(uintptr_t)svq->vring.desc;
addr->avail_user_addr = (uint64_t)(uintptr_t)svq->vring.avail;
addr->used_user_addr = (uint64_t)(uintptr_t)svq->vring.used;
}
size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq)
@ -518,11 +583,13 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq)
* Creates vhost shadow virtqueue, and instructs the vhost device to use the
* shadow methods and file descriptors.
*
* @iova_tree: Tree to perform descriptors translations
*
* Returns the new virtqueue or NULL.
*
* In case of error, reason is reported through error_report.
*/
VhostShadowVirtqueue *vhost_svq_new(void)
VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree)
{
g_autofree VhostShadowVirtqueue *svq = g_new0(VhostShadowVirtqueue, 1);
int r;
@ -543,6 +610,7 @@ VhostShadowVirtqueue *vhost_svq_new(void)
event_notifier_init_fd(&svq->svq_kick, VHOST_FILE_UNBIND);
event_notifier_set_handler(&svq->hdev_call, vhost_svq_handle_call);
svq->iova_tree = iova_tree;
return g_steal_pointer(&svq);
err_init_hdev_call: