mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-31 14:02:05 -06:00
vhost: Clean out old vhost_set_memory and friends
Remove the old update mechanism, vhost_set_memory, and the functions and flags it used. Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
ade6d081fc
commit
06709c120c
2 changed files with 0 additions and 254 deletions
|
@ -155,160 +155,6 @@ static void vhost_log_sync_range(struct vhost_dev *dev,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Assign/unassign. Keep an unsorted array of non-overlapping
|
|
||||||
* memory regions in dev->mem. */
|
|
||||||
static void vhost_dev_unassign_memory(struct vhost_dev *dev,
|
|
||||||
uint64_t start_addr,
|
|
||||||
uint64_t size)
|
|
||||||
{
|
|
||||||
int from, to, n = dev->mem->nregions;
|
|
||||||
/* Track overlapping/split regions for sanity checking. */
|
|
||||||
int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
|
|
||||||
|
|
||||||
for (from = 0, to = 0; from < n; ++from, ++to) {
|
|
||||||
struct vhost_memory_region *reg = dev->mem->regions + to;
|
|
||||||
uint64_t reglast;
|
|
||||||
uint64_t memlast;
|
|
||||||
uint64_t change;
|
|
||||||
|
|
||||||
/* clone old region */
|
|
||||||
if (to != from) {
|
|
||||||
memcpy(reg, dev->mem->regions + from, sizeof *reg);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* No overlap is simple */
|
|
||||||
if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
|
|
||||||
start_addr, size)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Split only happens if supplied region
|
|
||||||
* is in the middle of an existing one. Thus it can not
|
|
||||||
* overlap with any other existing region. */
|
|
||||||
assert(!split);
|
|
||||||
|
|
||||||
reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
|
|
||||||
memlast = range_get_last(start_addr, size);
|
|
||||||
|
|
||||||
/* Remove whole region */
|
|
||||||
if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
|
|
||||||
--dev->mem->nregions;
|
|
||||||
--to;
|
|
||||||
++overlap_middle;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Shrink region */
|
|
||||||
if (memlast >= reglast) {
|
|
||||||
reg->memory_size = start_addr - reg->guest_phys_addr;
|
|
||||||
assert(reg->memory_size);
|
|
||||||
assert(!overlap_end);
|
|
||||||
++overlap_end;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Shift region */
|
|
||||||
if (start_addr <= reg->guest_phys_addr) {
|
|
||||||
change = memlast + 1 - reg->guest_phys_addr;
|
|
||||||
reg->memory_size -= change;
|
|
||||||
reg->guest_phys_addr += change;
|
|
||||||
reg->userspace_addr += change;
|
|
||||||
assert(reg->memory_size);
|
|
||||||
assert(!overlap_start);
|
|
||||||
++overlap_start;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* This only happens if supplied region
|
|
||||||
* is in the middle of an existing one. Thus it can not
|
|
||||||
* overlap with any other existing region. */
|
|
||||||
assert(!overlap_start);
|
|
||||||
assert(!overlap_end);
|
|
||||||
assert(!overlap_middle);
|
|
||||||
/* Split region: shrink first part, shift second part. */
|
|
||||||
memcpy(dev->mem->regions + n, reg, sizeof *reg);
|
|
||||||
reg->memory_size = start_addr - reg->guest_phys_addr;
|
|
||||||
assert(reg->memory_size);
|
|
||||||
change = memlast + 1 - reg->guest_phys_addr;
|
|
||||||
reg = dev->mem->regions + n;
|
|
||||||
reg->memory_size -= change;
|
|
||||||
assert(reg->memory_size);
|
|
||||||
reg->guest_phys_addr += change;
|
|
||||||
reg->userspace_addr += change;
|
|
||||||
/* Never add more than 1 region */
|
|
||||||
assert(dev->mem->nregions == n);
|
|
||||||
++dev->mem->nregions;
|
|
||||||
++split;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Called after unassign, so no regions overlap the given range. */
|
|
||||||
static void vhost_dev_assign_memory(struct vhost_dev *dev,
|
|
||||||
uint64_t start_addr,
|
|
||||||
uint64_t size,
|
|
||||||
uint64_t uaddr)
|
|
||||||
{
|
|
||||||
int from, to;
|
|
||||||
struct vhost_memory_region *merged = NULL;
|
|
||||||
for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
|
|
||||||
struct vhost_memory_region *reg = dev->mem->regions + to;
|
|
||||||
uint64_t prlast, urlast;
|
|
||||||
uint64_t pmlast, umlast;
|
|
||||||
uint64_t s, e, u;
|
|
||||||
|
|
||||||
/* clone old region */
|
|
||||||
if (to != from) {
|
|
||||||
memcpy(reg, dev->mem->regions + from, sizeof *reg);
|
|
||||||
}
|
|
||||||
prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
|
|
||||||
pmlast = range_get_last(start_addr, size);
|
|
||||||
urlast = range_get_last(reg->userspace_addr, reg->memory_size);
|
|
||||||
umlast = range_get_last(uaddr, size);
|
|
||||||
|
|
||||||
/* check for overlapping regions: should never happen. */
|
|
||||||
assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
|
|
||||||
/* Not an adjacent or overlapping region - do not merge. */
|
|
||||||
if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
|
|
||||||
(pmlast + 1 != reg->guest_phys_addr ||
|
|
||||||
umlast + 1 != reg->userspace_addr)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (dev->vhost_ops->vhost_backend_can_merge &&
|
|
||||||
!dev->vhost_ops->vhost_backend_can_merge(dev, uaddr, size,
|
|
||||||
reg->userspace_addr,
|
|
||||||
reg->memory_size)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (merged) {
|
|
||||||
--to;
|
|
||||||
assert(to >= 0);
|
|
||||||
} else {
|
|
||||||
merged = reg;
|
|
||||||
}
|
|
||||||
u = MIN(uaddr, reg->userspace_addr);
|
|
||||||
s = MIN(start_addr, reg->guest_phys_addr);
|
|
||||||
e = MAX(pmlast, prlast);
|
|
||||||
uaddr = merged->userspace_addr = u;
|
|
||||||
start_addr = merged->guest_phys_addr = s;
|
|
||||||
size = merged->memory_size = e - s + 1;
|
|
||||||
assert(merged->memory_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!merged) {
|
|
||||||
struct vhost_memory_region *reg = dev->mem->regions + to;
|
|
||||||
memset(reg, 0, sizeof *reg);
|
|
||||||
reg->memory_size = size;
|
|
||||||
assert(reg->memory_size);
|
|
||||||
reg->guest_phys_addr = start_addr;
|
|
||||||
reg->userspace_addr = uaddr;
|
|
||||||
++to;
|
|
||||||
}
|
|
||||||
assert(to <= dev->mem->nregions + 1);
|
|
||||||
dev->mem->nregions = to;
|
|
||||||
}
|
|
||||||
|
|
||||||
static uint64_t vhost_get_log_size(struct vhost_dev *dev)
|
static uint64_t vhost_get_log_size(struct vhost_dev *dev)
|
||||||
{
|
{
|
||||||
uint64_t log_size = 0;
|
uint64_t log_size = 0;
|
||||||
|
@ -531,95 +377,6 @@ static int vhost_verify_ring_mappings(struct vhost_dev *dev,
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
|
|
||||||
uint64_t start_addr,
|
|
||||||
uint64_t size)
|
|
||||||
{
|
|
||||||
int i, n = dev->mem->nregions;
|
|
||||||
for (i = 0; i < n; ++i) {
|
|
||||||
struct vhost_memory_region *reg = dev->mem->regions + i;
|
|
||||||
if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
|
|
||||||
start_addr, size)) {
|
|
||||||
return reg;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
|
|
||||||
uint64_t start_addr,
|
|
||||||
uint64_t size,
|
|
||||||
uint64_t uaddr)
|
|
||||||
{
|
|
||||||
struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
|
|
||||||
uint64_t reglast;
|
|
||||||
uint64_t memlast;
|
|
||||||
|
|
||||||
if (!reg) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
|
|
||||||
memlast = range_get_last(start_addr, size);
|
|
||||||
|
|
||||||
/* Need to extend region? */
|
|
||||||
if (start_addr < reg->guest_phys_addr || memlast > reglast) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
/* userspace_addr changed? */
|
|
||||||
return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void vhost_set_memory(MemoryListener *listener,
|
|
||||||
MemoryRegionSection *section,
|
|
||||||
bool add)
|
|
||||||
{
|
|
||||||
struct vhost_dev *dev = container_of(listener, struct vhost_dev,
|
|
||||||
memory_listener);
|
|
||||||
hwaddr start_addr = section->offset_within_address_space;
|
|
||||||
ram_addr_t size = int128_get64(section->size);
|
|
||||||
bool log_dirty =
|
|
||||||
memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION);
|
|
||||||
int s = offsetof(struct vhost_memory, regions) +
|
|
||||||
(dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
|
|
||||||
void *ram;
|
|
||||||
|
|
||||||
dev->mem = g_realloc(dev->mem, s);
|
|
||||||
|
|
||||||
if (log_dirty) {
|
|
||||||
add = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
assert(size);
|
|
||||||
|
|
||||||
/* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
|
|
||||||
ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
|
|
||||||
if (add) {
|
|
||||||
if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
|
|
||||||
/* Region exists with same address. Nothing to do. */
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (!vhost_dev_find_reg(dev, start_addr, size)) {
|
|
||||||
/* Removing region that we don't access. Nothing to do. */
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
vhost_dev_unassign_memory(dev, start_addr, size);
|
|
||||||
if (add) {
|
|
||||||
/* Add given mapping, merging adjacent regions if any */
|
|
||||||
vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
|
|
||||||
} else {
|
|
||||||
/* Remove old mapping for this memory, if any. */
|
|
||||||
vhost_dev_unassign_memory(dev, start_addr, size);
|
|
||||||
}
|
|
||||||
dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
|
|
||||||
dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
|
|
||||||
dev->memory_changed = true;
|
|
||||||
used_memslots = dev->mem->nregions;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool vhost_section(MemoryRegionSection *section)
|
static bool vhost_section(MemoryRegionSection *section)
|
||||||
{
|
{
|
||||||
return memory_region_is_ram(section->mr) &&
|
return memory_region_is_ram(section->mr) &&
|
||||||
|
@ -630,8 +387,6 @@ static void vhost_begin(MemoryListener *listener)
|
||||||
{
|
{
|
||||||
struct vhost_dev *dev = container_of(listener, struct vhost_dev,
|
struct vhost_dev *dev = container_of(listener, struct vhost_dev,
|
||||||
memory_listener);
|
memory_listener);
|
||||||
dev->mem_changed_end_addr = 0;
|
|
||||||
dev->mem_changed_start_addr = -1;
|
|
||||||
dev->tmp_sections = NULL;
|
dev->tmp_sections = NULL;
|
||||||
dev->n_tmp_sections = 0;
|
dev->n_tmp_sections = 0;
|
||||||
}
|
}
|
||||||
|
@ -707,7 +462,6 @@ static void vhost_commit(MemoryListener *listener)
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
VHOST_OPS_DEBUG("vhost_set_mem_table failed");
|
VHOST_OPS_DEBUG("vhost_set_mem_table failed");
|
||||||
}
|
}
|
||||||
dev->memory_changed = false;
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
log_size = vhost_get_log_size(dev);
|
log_size = vhost_get_log_size(dev);
|
||||||
|
@ -726,7 +480,6 @@ static void vhost_commit(MemoryListener *listener)
|
||||||
if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
|
if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
|
||||||
vhost_dev_log_resize(dev, log_size);
|
vhost_dev_log_resize(dev, log_size);
|
||||||
}
|
}
|
||||||
dev->memory_changed = false;
|
|
||||||
|
|
||||||
out:
|
out:
|
||||||
/* Deref the old list of sections, this must happen _after_ the
|
/* Deref the old list of sections, this must happen _after_ the
|
||||||
|
@ -817,8 +570,6 @@ static void vhost_region_add(MemoryListener *listener,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
vhost_region_add_section(dev, section);
|
vhost_region_add_section(dev, section);
|
||||||
|
|
||||||
vhost_set_memory(listener, section, true);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Called on regions that have not changed */
|
/* Called on regions that have not changed */
|
||||||
|
@ -842,7 +593,6 @@ static void vhost_region_del(MemoryListener *listener,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
vhost_set_memory(listener, section, false);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
|
static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
|
||||||
|
@ -1457,7 +1207,6 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
|
||||||
hdev->log_size = 0;
|
hdev->log_size = 0;
|
||||||
hdev->log_enabled = false;
|
hdev->log_enabled = false;
|
||||||
hdev->started = false;
|
hdev->started = false;
|
||||||
hdev->memory_changed = false;
|
|
||||||
memory_listener_register(&hdev->memory_listener, &address_space_memory);
|
memory_listener_register(&hdev->memory_listener, &address_space_memory);
|
||||||
QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
|
QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -75,9 +75,6 @@ struct vhost_dev {
|
||||||
bool log_enabled;
|
bool log_enabled;
|
||||||
uint64_t log_size;
|
uint64_t log_size;
|
||||||
Error *migration_blocker;
|
Error *migration_blocker;
|
||||||
bool memory_changed;
|
|
||||||
hwaddr mem_changed_start_addr;
|
|
||||||
hwaddr mem_changed_end_addr;
|
|
||||||
const VhostOps *vhost_ops;
|
const VhostOps *vhost_ops;
|
||||||
void *opaque;
|
void *opaque;
|
||||||
struct vhost_log *log;
|
struct vhost_log *log;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue