mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-07-31 14:23:53 -06:00
libvhost-user: Speedup gpa_to_mem_region() and vu_gpa_to_va()
Let's speed up GPA to memory region / virtual address lookup. Store the memory regions ordered by guest physical addresses, and use binary search for address translation, as well as when adding/removing memory regions. Most importantly, this will speed up GPA->VA address translation when we have many memslots. Reviewed-by: Raphael Norwitz <raphael@enfabrica.net> Acked-by: Stefano Garzarella <sgarzare@redhat.com> Signed-off-by: David Hildenbrand <david@redhat.com> Message-Id: <20240214151701.29906-11-david@redhat.com> Tested-by: Mario Casquero <mcasquer@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
60ccdca42d
commit
a3c0118c5a
1 changed files with 45 additions and 4 deletions
|
@ -199,19 +199,30 @@ vu_panic(VuDev *dev, const char *msg, ...)
|
||||||
static VuDevRegion *
|
static VuDevRegion *
|
||||||
vu_gpa_to_mem_region(VuDev *dev, uint64_t guest_addr)
|
vu_gpa_to_mem_region(VuDev *dev, uint64_t guest_addr)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
int low = 0;
|
||||||
|
int high = dev->nregions - 1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Memory regions cannot overlap in guest physical address space. Each
|
* Memory regions cannot overlap in guest physical address space. Each
|
||||||
* GPA belongs to exactly one memory region, so there can only be one
|
* GPA belongs to exactly one memory region, so there can only be one
|
||||||
* match.
|
* match.
|
||||||
|
*
|
||||||
|
* We store our memory regions ordered by GPA and can simply perform a
|
||||||
|
* binary search.
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < dev->nregions; i++) {
|
while (low <= high) {
|
||||||
VuDevRegion *cur = &dev->regions[i];
|
unsigned int mid = low + (high - low) / 2;
|
||||||
|
VuDevRegion *cur = &dev->regions[mid];
|
||||||
|
|
||||||
if (guest_addr >= cur->gpa && guest_addr < cur->gpa + cur->size) {
|
if (guest_addr >= cur->gpa && guest_addr < cur->gpa + cur->size) {
|
||||||
return cur;
|
return cur;
|
||||||
}
|
}
|
||||||
|
if (guest_addr >= cur->gpa + cur->size) {
|
||||||
|
low = mid + 1;
|
||||||
|
}
|
||||||
|
if (guest_addr < cur->gpa) {
|
||||||
|
high = mid - 1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -273,9 +284,14 @@ vu_remove_all_mem_regs(VuDev *dev)
|
||||||
static void
|
static void
|
||||||
_vu_add_mem_reg(VuDev *dev, VhostUserMemoryRegion *msg_region, int fd)
|
_vu_add_mem_reg(VuDev *dev, VhostUserMemoryRegion *msg_region, int fd)
|
||||||
{
|
{
|
||||||
|
const uint64_t start_gpa = msg_region->guest_phys_addr;
|
||||||
|
const uint64_t end_gpa = start_gpa + msg_region->memory_size;
|
||||||
int prot = PROT_READ | PROT_WRITE;
|
int prot = PROT_READ | PROT_WRITE;
|
||||||
VuDevRegion *r;
|
VuDevRegion *r;
|
||||||
void *mmap_addr;
|
void *mmap_addr;
|
||||||
|
int low = 0;
|
||||||
|
int high = dev->nregions - 1;
|
||||||
|
unsigned int idx;
|
||||||
|
|
||||||
DPRINT("Adding region %d\n", dev->nregions);
|
DPRINT("Adding region %d\n", dev->nregions);
|
||||||
DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n",
|
DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n",
|
||||||
|
@ -295,6 +311,29 @@ _vu_add_mem_reg(VuDev *dev, VhostUserMemoryRegion *msg_region, int fd)
|
||||||
prot = PROT_NONE;
|
prot = PROT_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We will add memory regions into the array sorted by GPA. Perform a
|
||||||
|
* binary search to locate the insertion point: it will be at the low
|
||||||
|
* index.
|
||||||
|
*/
|
||||||
|
while (low <= high) {
|
||||||
|
unsigned int mid = low + (high - low) / 2;
|
||||||
|
VuDevRegion *cur = &dev->regions[mid];
|
||||||
|
|
||||||
|
/* Overlap of GPA addresses. */
|
||||||
|
if (start_gpa < cur->gpa + cur->size && cur->gpa < end_gpa) {
|
||||||
|
vu_panic(dev, "regions with overlapping guest physical addresses");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (start_gpa >= cur->gpa + cur->size) {
|
||||||
|
low = mid + 1;
|
||||||
|
}
|
||||||
|
if (start_gpa < cur->gpa) {
|
||||||
|
high = mid - 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
idx = low;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We don't use offset argument of mmap() since the mapped address has
|
* We don't use offset argument of mmap() since the mapped address has
|
||||||
* to be page aligned, and we use huge pages.
|
* to be page aligned, and we use huge pages.
|
||||||
|
@ -308,7 +347,9 @@ _vu_add_mem_reg(VuDev *dev, VhostUserMemoryRegion *msg_region, int fd)
|
||||||
DPRINT(" mmap_addr: 0x%016"PRIx64"\n",
|
DPRINT(" mmap_addr: 0x%016"PRIx64"\n",
|
||||||
(uint64_t)(uintptr_t)mmap_addr);
|
(uint64_t)(uintptr_t)mmap_addr);
|
||||||
|
|
||||||
r = &dev->regions[dev->nregions];
|
/* Shift all affected entries by 1 to open a hole at idx. */
|
||||||
|
r = &dev->regions[idx];
|
||||||
|
memmove(r + 1, r, sizeof(VuDevRegion) * (dev->nregions - idx));
|
||||||
r->gpa = msg_region->guest_phys_addr;
|
r->gpa = msg_region->guest_phys_addr;
|
||||||
r->size = msg_region->memory_size;
|
r->size = msg_region->memory_size;
|
||||||
r->qva = msg_region->userspace_addr;
|
r->qva = msg_region->userspace_addr;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue