mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-06 09:13:55 -06:00
vdpa: add asid parameter to vhost_vdpa_dma_map/unmap
So the caller can choose which ASID is destined. No need to update the batch functions as they will always be called from memory listener updates at the moment. Memory listener updates will always update ASID 0, as it's the passthrough ASID. All vhost devices's ASID are 0 at this moment. Signed-off-by: Eugenio Pérez <eperezma@redhat.com> Acked-by: Jason Wang <jasowang@redhat.com> Message-Id: <20221215113144.322011-10-eperezma@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
273e0003f0
commit
cd831ed5c4
4 changed files with 41 additions and 19 deletions
|
@ -72,22 +72,28 @@ static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
|
|||
return false;
|
||||
}
|
||||
|
||||
int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
|
||||
void *vaddr, bool readonly)
|
||||
/*
|
||||
* The caller must set asid = 0 if the device does not support asid.
|
||||
* This is not an ABI break since it is set to 0 by the initializer anyway.
|
||||
*/
|
||||
int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
|
||||
hwaddr size, void *vaddr, bool readonly)
|
||||
{
|
||||
struct vhost_msg_v2 msg = {};
|
||||
int fd = v->device_fd;
|
||||
int ret = 0;
|
||||
|
||||
msg.type = v->msg_type;
|
||||
msg.asid = asid;
|
||||
msg.iotlb.iova = iova;
|
||||
msg.iotlb.size = size;
|
||||
msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr;
|
||||
msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
|
||||
msg.iotlb.type = VHOST_IOTLB_UPDATE;
|
||||
|
||||
trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.iotlb.iova, msg.iotlb.size,
|
||||
msg.iotlb.uaddr, msg.iotlb.perm, msg.iotlb.type);
|
||||
trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.asid, msg.iotlb.iova,
|
||||
msg.iotlb.size, msg.iotlb.uaddr, msg.iotlb.perm,
|
||||
msg.iotlb.type);
|
||||
|
||||
if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
|
||||
error_report("failed to write, fd=%d, errno=%d (%s)",
|
||||
|
@ -98,18 +104,24 @@ int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova, hwaddr size)
|
||||
/*
|
||||
* The caller must set asid = 0 if the device does not support asid.
|
||||
* This is not an ABI break since it is set to 0 by the initializer anyway.
|
||||
*/
|
||||
int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
|
||||
hwaddr size)
|
||||
{
|
||||
struct vhost_msg_v2 msg = {};
|
||||
int fd = v->device_fd;
|
||||
int ret = 0;
|
||||
|
||||
msg.type = v->msg_type;
|
||||
msg.asid = asid;
|
||||
msg.iotlb.iova = iova;
|
||||
msg.iotlb.size = size;
|
||||
msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
|
||||
|
||||
trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.iotlb.iova,
|
||||
trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.asid, msg.iotlb.iova,
|
||||
msg.iotlb.size, msg.iotlb.type);
|
||||
|
||||
if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
|
||||
|
@ -229,8 +241,8 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
|
|||
}
|
||||
|
||||
vhost_vdpa_iotlb_batch_begin_once(v);
|
||||
ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize),
|
||||
vaddr, section->readonly);
|
||||
ret = vhost_vdpa_dma_map(v, VHOST_VDPA_GUEST_PA_ASID, iova,
|
||||
int128_get64(llsize), vaddr, section->readonly);
|
||||
if (ret) {
|
||||
error_report("vhost vdpa map fail!");
|
||||
goto fail_map;
|
||||
|
@ -303,7 +315,8 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener,
|
|||
vhost_iova_tree_remove(v->iova_tree, *result);
|
||||
}
|
||||
vhost_vdpa_iotlb_batch_begin_once(v);
|
||||
ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize));
|
||||
ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova,
|
||||
int128_get64(llsize));
|
||||
if (ret) {
|
||||
error_report("vhost_vdpa dma unmap error!");
|
||||
}
|
||||
|
@ -869,7 +882,7 @@ static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr addr)
|
|||
}
|
||||
|
||||
size = ROUND_UP(result->size, qemu_real_host_page_size());
|
||||
r = vhost_vdpa_dma_unmap(v, result->iova, size);
|
||||
r = vhost_vdpa_dma_unmap(v, v->address_space_id, result->iova, size);
|
||||
if (unlikely(r < 0)) {
|
||||
error_report("Unable to unmap SVQ vring: %s (%d)", g_strerror(-r), -r);
|
||||
return;
|
||||
|
@ -909,7 +922,8 @@ static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa *v, DMAMap *needle,
|
|||
return false;
|
||||
}
|
||||
|
||||
r = vhost_vdpa_dma_map(v, needle->iova, needle->size + 1,
|
||||
r = vhost_vdpa_dma_map(v, v->address_space_id, needle->iova,
|
||||
needle->size + 1,
|
||||
(void *)(uintptr_t)needle->translated_addr,
|
||||
needle->perm == IOMMU_RO);
|
||||
if (unlikely(r != 0)) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue