mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-07-28 04:43:54 -06:00
vfio: Add host side DMA window capabilities
There are going to be multiple IOMMUs per a container. This moves the single host IOMMU parameter set to a list of VFIOHostDMAWindow. This should cause no behavioral change and will be used later by the SPAPR TCE IOMMU v2 which will also add a vfio_host_win_del() helper. Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> Reviewed-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
This commit is contained in:
parent
318f67ce13
commit
f4ec5e26ed
2 changed files with 52 additions and 18 deletions
|
@ -28,6 +28,7 @@
|
||||||
#include "exec/memory.h"
|
#include "exec/memory.h"
|
||||||
#include "hw/hw.h"
|
#include "hw/hw.h"
|
||||||
#include "qemu/error-report.h"
|
#include "qemu/error-report.h"
|
||||||
|
#include "qemu/range.h"
|
||||||
#include "sysemu/kvm.h"
|
#include "sysemu/kvm.h"
|
||||||
#ifdef CONFIG_KVM
|
#ifdef CONFIG_KVM
|
||||||
#include "linux/kvm.h"
|
#include "linux/kvm.h"
|
||||||
|
@ -241,6 +242,29 @@ static int vfio_dma_map(VFIOContainer *container, hwaddr iova,
|
||||||
return -errno;
|
return -errno;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void vfio_host_win_add(VFIOContainer *container,
|
||||||
|
hwaddr min_iova, hwaddr max_iova,
|
||||||
|
uint64_t iova_pgsizes)
|
||||||
|
{
|
||||||
|
VFIOHostDMAWindow *hostwin;
|
||||||
|
|
||||||
|
QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
|
||||||
|
if (ranges_overlap(hostwin->min_iova,
|
||||||
|
hostwin->max_iova - hostwin->min_iova + 1,
|
||||||
|
min_iova,
|
||||||
|
max_iova - min_iova + 1)) {
|
||||||
|
hw_error("%s: Overlapped IOMMU are not enabled", __func__);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
hostwin = g_malloc0(sizeof(*hostwin));
|
||||||
|
|
||||||
|
hostwin->min_iova = min_iova;
|
||||||
|
hostwin->max_iova = max_iova;
|
||||||
|
hostwin->iova_pgsizes = iova_pgsizes;
|
||||||
|
QLIST_INSERT_HEAD(&container->hostwin_list, hostwin, hostwin_next);
|
||||||
|
}
|
||||||
|
|
||||||
static bool vfio_listener_skipped_section(MemoryRegionSection *section)
|
static bool vfio_listener_skipped_section(MemoryRegionSection *section)
|
||||||
{
|
{
|
||||||
return (!memory_region_is_ram(section->mr) &&
|
return (!memory_region_is_ram(section->mr) &&
|
||||||
|
@ -329,6 +353,8 @@ static void vfio_listener_region_add(MemoryListener *listener,
|
||||||
Int128 llend, llsize;
|
Int128 llend, llsize;
|
||||||
void *vaddr;
|
void *vaddr;
|
||||||
int ret;
|
int ret;
|
||||||
|
VFIOHostDMAWindow *hostwin;
|
||||||
|
bool hostwin_found;
|
||||||
|
|
||||||
if (vfio_listener_skipped_section(section)) {
|
if (vfio_listener_skipped_section(section)) {
|
||||||
trace_vfio_listener_region_add_skip(
|
trace_vfio_listener_region_add_skip(
|
||||||
|
@ -354,7 +380,15 @@ static void vfio_listener_region_add(MemoryListener *listener,
|
||||||
}
|
}
|
||||||
end = int128_get64(int128_sub(llend, int128_one()));
|
end = int128_get64(int128_sub(llend, int128_one()));
|
||||||
|
|
||||||
if ((iova < container->min_iova) || (end > container->max_iova)) {
|
hostwin_found = false;
|
||||||
|
QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
|
||||||
|
if (hostwin->min_iova <= iova && end <= hostwin->max_iova) {
|
||||||
|
hostwin_found = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!hostwin_found) {
|
||||||
error_report("vfio: IOMMU container %p can't map guest IOVA region"
|
error_report("vfio: IOMMU container %p can't map guest IOVA region"
|
||||||
" 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx,
|
" 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx,
|
||||||
container, iova, end);
|
container, iova, end);
|
||||||
|
@ -369,10 +403,6 @@ static void vfio_listener_region_add(MemoryListener *listener,
|
||||||
|
|
||||||
trace_vfio_listener_region_add_iommu(iova, end);
|
trace_vfio_listener_region_add_iommu(iova, end);
|
||||||
/*
|
/*
|
||||||
* FIXME: We should do some checking to see if the
|
|
||||||
* capabilities of the host VFIO IOMMU are adequate to model
|
|
||||||
* the guest IOMMU
|
|
||||||
*
|
|
||||||
* FIXME: For VFIO iommu types which have KVM acceleration to
|
* FIXME: For VFIO iommu types which have KVM acceleration to
|
||||||
* avoid bouncing all map/unmaps through qemu this way, this
|
* avoid bouncing all map/unmaps through qemu this way, this
|
||||||
* would be the right place to wire that up (tell the KVM
|
* would be the right place to wire that up (tell the KVM
|
||||||
|
@ -879,17 +909,14 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as)
|
||||||
* existing Type1 IOMMUs generally support any IOVA we're
|
* existing Type1 IOMMUs generally support any IOVA we're
|
||||||
* going to actually try in practice.
|
* going to actually try in practice.
|
||||||
*/
|
*/
|
||||||
container->min_iova = 0;
|
|
||||||
container->max_iova = (hwaddr)-1;
|
|
||||||
|
|
||||||
/* Assume just 4K IOVA page size */
|
|
||||||
container->iova_pgsizes = 0x1000;
|
|
||||||
info.argsz = sizeof(info);
|
info.argsz = sizeof(info);
|
||||||
ret = ioctl(fd, VFIO_IOMMU_GET_INFO, &info);
|
ret = ioctl(fd, VFIO_IOMMU_GET_INFO, &info);
|
||||||
/* Ignore errors */
|
/* Ignore errors */
|
||||||
if ((ret == 0) && (info.flags & VFIO_IOMMU_INFO_PGSIZES)) {
|
if (ret || !(info.flags & VFIO_IOMMU_INFO_PGSIZES)) {
|
||||||
container->iova_pgsizes = info.iova_pgsizes;
|
/* Assume 4k IOVA page size */
|
||||||
|
info.iova_pgsizes = 4096;
|
||||||
}
|
}
|
||||||
|
vfio_host_win_add(container, 0, (hwaddr)-1, info.iova_pgsizes);
|
||||||
} else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU) ||
|
} else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU) ||
|
||||||
ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_v2_IOMMU)) {
|
ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_v2_IOMMU)) {
|
||||||
struct vfio_iommu_spapr_tce_info info;
|
struct vfio_iommu_spapr_tce_info info;
|
||||||
|
@ -949,11 +976,12 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as)
|
||||||
}
|
}
|
||||||
goto free_container_exit;
|
goto free_container_exit;
|
||||||
}
|
}
|
||||||
container->min_iova = info.dma32_window_start;
|
|
||||||
container->max_iova = container->min_iova + info.dma32_window_size - 1;
|
|
||||||
|
|
||||||
/* Assume just 4K IOVA pages for now */
|
/* The default table uses 4K pages */
|
||||||
container->iova_pgsizes = 0x1000;
|
vfio_host_win_add(container, info.dma32_window_start,
|
||||||
|
info.dma32_window_start +
|
||||||
|
info.dma32_window_size - 1,
|
||||||
|
0x1000);
|
||||||
} else {
|
} else {
|
||||||
error_report("vfio: No available IOMMU models");
|
error_report("vfio: No available IOMMU models");
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
|
|
|
@ -82,9 +82,8 @@ typedef struct VFIOContainer {
|
||||||
* contiguous IOVA window. We may need to generalize that in
|
* contiguous IOVA window. We may need to generalize that in
|
||||||
* future
|
* future
|
||||||
*/
|
*/
|
||||||
hwaddr min_iova, max_iova;
|
|
||||||
uint64_t iova_pgsizes;
|
|
||||||
QLIST_HEAD(, VFIOGuestIOMMU) giommu_list;
|
QLIST_HEAD(, VFIOGuestIOMMU) giommu_list;
|
||||||
|
QLIST_HEAD(, VFIOHostDMAWindow) hostwin_list;
|
||||||
QLIST_HEAD(, VFIOGroup) group_list;
|
QLIST_HEAD(, VFIOGroup) group_list;
|
||||||
QLIST_ENTRY(VFIOContainer) next;
|
QLIST_ENTRY(VFIOContainer) next;
|
||||||
} VFIOContainer;
|
} VFIOContainer;
|
||||||
|
@ -97,6 +96,13 @@ typedef struct VFIOGuestIOMMU {
|
||||||
QLIST_ENTRY(VFIOGuestIOMMU) giommu_next;
|
QLIST_ENTRY(VFIOGuestIOMMU) giommu_next;
|
||||||
} VFIOGuestIOMMU;
|
} VFIOGuestIOMMU;
|
||||||
|
|
||||||
|
typedef struct VFIOHostDMAWindow {
|
||||||
|
hwaddr min_iova;
|
||||||
|
hwaddr max_iova;
|
||||||
|
uint64_t iova_pgsizes;
|
||||||
|
QLIST_ENTRY(VFIOHostDMAWindow) hostwin_next;
|
||||||
|
} VFIOHostDMAWindow;
|
||||||
|
|
||||||
typedef struct VFIODeviceOps VFIODeviceOps;
|
typedef struct VFIODeviceOps VFIODeviceOps;
|
||||||
|
|
||||||
typedef struct VFIODevice {
|
typedef struct VFIODevice {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue