mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-06 01:03:55 -06:00
memory-device,vhost: Support memory devices that dynamically consume memslots
We want to support memory devices that have a dynamically managed memory region container as device memory region. This device memory region maps multiple RAM memory subregions (e.g., aliases to the same RAM memory region), whereby these subregions can be (un)mapped on demand. Each RAM subregion will consume a memslot in KVM and vhost, resulting in such a new device consuming memslots dynamically, and initially usually 0. We already track the number of used vs. required memslots for all memslots. From that, we can derive the number of reserved memslots that must not be used otherwise. The target use case is virtio-mem and the hyper-v balloon, which will dynamically map aliases to RAM memory region into their device memory region container. Properly document what's supported and what's not and extend the vhost memslot check accordingly. Message-ID: <20230926185738.277351-10-david@redhat.com> Reviewed-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: David Hildenbrand <david@redhat.com>
This commit is contained in:
parent
f9716f4b0d
commit
766aa0a654
4 changed files with 53 additions and 6 deletions
|
@ -23,6 +23,7 @@
|
|||
#include "qemu/log.h"
|
||||
#include "standard-headers/linux/vhost_types.h"
|
||||
#include "hw/virtio/virtio-bus.h"
|
||||
#include "hw/mem/memory-device.h"
|
||||
#include "migration/blocker.h"
|
||||
#include "migration/qemu-file-types.h"
|
||||
#include "sysemu/dma.h"
|
||||
|
@ -1423,7 +1424,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
|
|||
VhostBackendType backend_type, uint32_t busyloop_timeout,
|
||||
Error **errp)
|
||||
{
|
||||
unsigned int used;
|
||||
unsigned int used, reserved, limit;
|
||||
uint64_t features;
|
||||
int i, r, n_initialized_vqs = 0;
|
||||
|
||||
|
@ -1529,9 +1530,18 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
|
|||
} else {
|
||||
used = used_memslots;
|
||||
}
|
||||
if (used > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
|
||||
error_setg(errp, "vhost backend memory slots limit is less"
|
||||
" than current number of present memory slots");
|
||||
/*
|
||||
* We assume that all reserved memslots actually require a real memslot
|
||||
* in our vhost backend. This might not be true, for example, if the
|
||||
* memslot would be ROM. If ever relevant, we can optimize for that --
|
||||
* but we'll need additional information about the reservations.
|
||||
*/
|
||||
reserved = memory_devices_get_reserved_memslots();
|
||||
limit = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
|
||||
if (used + reserved > limit) {
|
||||
error_setg(errp, "vhost backend memory slots limit (%d) is less"
|
||||
" than current number of used (%d) and reserved (%d)"
|
||||
" memory slots for memory devices.", limit, used, reserved);
|
||||
r = -EINVAL;
|
||||
goto fail_busyloop;
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue