mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-08 02:03:56 -06:00
virtio,pc,pci: features, cleanups, fixes
more memslots support in libvhost-user support PCIe Gen5/Gen6 link speeds in pcie more traces in vdpa network simulation devices support in vdpa SMBIOS type 9 descriptor implementation Bump max_cpus to 4096 vcpus in q35 aw-bits and granule options in VIRTIO-IOMMU Support report NUMA nodes for device memory using GI in acpi Beginning of shutdown event support in pvpanic fixes, cleanups all over the place. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> -----BEGIN PGP SIGNATURE----- iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmXw0TMPHG1zdEByZWRo YXQuY29tAAoJECgfDbjSjVRp8x4H+gLMoGwaGAX7gDGPgn2Ix4j/3kO77ZJ9X9k/ 1KqZu/9eMS1j2Ei+vZqf05w7qRjxxhwDq3ilEXF/+UFqgAehLqpRRB8j5inqvzYt +jv0DbL11PBp/oFjWcytm5CbiVsvq8KlqCF29VNzc162XdtcduUOWagL96y8lJfZ uPrOoyeR7SMH9lp3LLLHWgu+9W4nOS03RroZ6Umj40y5B7yR0Rrppz8lMw5AoQtr 0gMRnFhYXeiW6CXdz+Tzcr7XfvkkYDi/j7ibiNSURLBfOpZa6Y8+kJGKxz5H1K1G 6ZY4PBcOpQzl+NMrktPHogczgJgOK10t+1i/R3bGZYw2Qn/93Eg= =C0UU -----END PGP SIGNATURE----- Merge tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu into staging virtio,pc,pci: features, cleanups, fixes more memslots support in libvhost-user support PCIe Gen5/Gen6 link speeds in pcie more traces in vdpa network simulation devices support in vdpa SMBIOS type 9 descriptor implementation Bump max_cpus to 4096 vcpus in q35 aw-bits and granule options in VIRTIO-IOMMU Support report NUMA nodes for device memory using GI in acpi Beginning of shutdown event support in pvpanic fixes, cleanups all over the place. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> # -----BEGIN PGP SIGNATURE----- # # iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmXw0TMPHG1zdEByZWRo # YXQuY29tAAoJECgfDbjSjVRp8x4H+gLMoGwaGAX7gDGPgn2Ix4j/3kO77ZJ9X9k/ # 1KqZu/9eMS1j2Ei+vZqf05w7qRjxxhwDq3ilEXF/+UFqgAehLqpRRB8j5inqvzYt # +jv0DbL11PBp/oFjWcytm5CbiVsvq8KlqCF29VNzc162XdtcduUOWagL96y8lJfZ # uPrOoyeR7SMH9lp3LLLHWgu+9W4nOS03RroZ6Umj40y5B7yR0Rrppz8lMw5AoQtr # 0gMRnFhYXeiW6CXdz+Tzcr7XfvkkYDi/j7ibiNSURLBfOpZa6Y8+kJGKxz5H1K1G # 6ZY4PBcOpQzl+NMrktPHogczgJgOK10t+1i/R3bGZYw2Qn/93Eg= # =C0UU # -----END PGP SIGNATURE----- # gpg: Signature made Tue 12 Mar 2024 22:03:31 GMT # gpg: using RSA key 5D09FD0871C8F85B94CA8A0D281F0DB8D28D5469 # gpg: issuer "mst@redhat.com" # gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" [full] # gpg: aka "Michael S. Tsirkin <mst@redhat.com>" [full] # Primary key fingerprint: 0270 606B 6F3C DF3D 0B17 0970 C350 3912 AFBE 8E67 # Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA 8A0D 281F 0DB8 D28D 5469 * tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu: (68 commits) docs/specs/pvpanic: document shutdown event hw/cxl: Fix missing reserved data in CXL Device DVSEC hmat acpi: Fix out of bounds access due to missing use of indirection hmat acpi: Do not add Memory Proximity Domain Attributes Structure targetting non existent memory. qemu-options.hx: Document the virtio-iommu-pci aw-bits option hw/arm/virt: Set virtio-iommu aw-bits default value to 48 hw/i386/q35: Set virtio-iommu aw-bits default value to 39 virtio-iommu: Add an option to define the input range width virtio-iommu: Trace domain range limits as unsigned int qemu-options.hx: Document the virtio-iommu-pci granule option virtio-iommu: Change the default granule to the host page size virtio-iommu: Add a granule property hw/i386/acpi-build: Add support for SRAT Generic Initiator structures hw/acpi: Implement the SRAT GI affinity structure qom: new object to associate device to NUMA node hw/i386/pc: Inline pc_cmos_init() into pc_cmos_init_late() and remove it hw/i386/pc: Set "normal" boot device order in pc_basic_device_init() hw/i386/pc: Avoid one use of the current_machine global hw/i386/pc: Remove "rtc_state" link again Revert "hw/i386/pc: Confine system flash handling to pc_sysfw" ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org> # Conflicts: # hw/core/machine.c
This commit is contained in:
commit
6fc6931231
56 changed files with 1428 additions and 384 deletions
|
@ -30,6 +30,7 @@ vhost_user_write(uint32_t req, uint32_t flags) "req:%d flags:0x%"PRIx32""
|
|||
vhost_user_create_notifier(int idx, void *n) "idx:%d n:%p"
|
||||
|
||||
# vhost-vdpa.c
|
||||
vhost_vdpa_skipped_memory_section(int is_ram, int is_iommu, int is_protected, int is_ram_device, uint64_t first, uint64_t last, int page_mask) "is_ram=%d, is_iommu=%d, is_protected=%d, is_ram_device=%d iova_min=0x%"PRIx64" iova_last=0x%"PRIx64" page_mask=0x%x"
|
||||
vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type) "vdpa_shared:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" uaddr: 0x%"PRIx64" perm: 0x%"PRIx8" type: %"PRIu8
|
||||
vhost_vdpa_dma_unmap(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint8_t type) "vdpa_shared:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" type: %"PRIu8
|
||||
vhost_vdpa_listener_begin_batch(void *v, int fd, uint32_t msg_type, uint8_t type) "vdpa_shared:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8
|
||||
|
@ -57,8 +58,8 @@ vhost_vdpa_dev_start(void *dev, bool started) "dev: %p started: %d"
|
|||
vhost_vdpa_set_log_base(void *dev, uint64_t base, unsigned long long size, int refcnt, int fd, void *log) "dev: %p base: 0x%"PRIx64" size: %llu refcnt: %d fd: %d log: %p"
|
||||
vhost_vdpa_set_vring_addr(void *dev, unsigned int index, unsigned int flags, uint64_t desc_user_addr, uint64_t used_user_addr, uint64_t avail_user_addr, uint64_t log_guest_addr) "dev: %p index: %u flags: 0x%x desc_user_addr: 0x%"PRIx64" used_user_addr: 0x%"PRIx64" avail_user_addr: 0x%"PRIx64" log_guest_addr: 0x%"PRIx64
|
||||
vhost_vdpa_set_vring_num(void *dev, unsigned int index, unsigned int num) "dev: %p index: %u num: %u"
|
||||
vhost_vdpa_set_vring_base(void *dev, unsigned int index, unsigned int num) "dev: %p index: %u num: %u"
|
||||
vhost_vdpa_get_vring_base(void *dev, unsigned int index, unsigned int num) "dev: %p index: %u num: %u"
|
||||
vhost_vdpa_set_dev_vring_base(void *dev, unsigned int index, unsigned int num, bool svq) "dev: %p index: %u num: %u svq: %d"
|
||||
vhost_vdpa_get_vring_base(void *dev, unsigned int index, unsigned int num, bool svq) "dev: %p index: %u num: %u svq: %d"
|
||||
vhost_vdpa_set_vring_kick(void *dev, unsigned int index, int fd) "dev: %p index: %u fd: %d"
|
||||
vhost_vdpa_set_vring_call(void *dev, unsigned int index, int fd) "dev: %p index: %u fd: %d"
|
||||
vhost_vdpa_get_features(void *dev, uint64_t features) "dev: %p features: 0x%"PRIx64
|
||||
|
@ -111,7 +112,7 @@ virtio_iommu_device_reset(void) "reset!"
|
|||
virtio_iommu_system_reset(void) "system reset!"
|
||||
virtio_iommu_get_features(uint64_t features) "device supports features=0x%"PRIx64
|
||||
virtio_iommu_device_status(uint8_t status) "driver status = %d"
|
||||
virtio_iommu_get_config(uint64_t page_size_mask, uint64_t start, uint64_t end, uint32_t domain_start, uint32_t domain_end, uint32_t probe_size, uint8_t bypass) "page_size_mask=0x%"PRIx64" input range start=0x%"PRIx64" input range end=0x%"PRIx64" domain range start=%d domain range end=%d probe_size=0x%x bypass=0x%x"
|
||||
virtio_iommu_get_config(uint64_t page_size_mask, uint64_t start, uint64_t end, uint32_t domain_start, uint32_t domain_end, uint32_t probe_size, uint8_t bypass) "page_size_mask=0x%"PRIx64" input range start=0x%"PRIx64" input range end=0x%"PRIx64" domain range start=%u domain range end=%u probe_size=0x%x bypass=0x%x"
|
||||
virtio_iommu_set_config(uint8_t bypass) "bypass=0x%x"
|
||||
virtio_iommu_attach(uint32_t domain_id, uint32_t ep_id) "domain=%d endpoint=%d"
|
||||
virtio_iommu_detach(uint32_t domain_id, uint32_t ep_id) "domain=%d endpoint=%d"
|
||||
|
|
|
@ -1610,11 +1610,27 @@ vhost_user_backend_handle_shared_object_add(struct vhost_dev *dev,
|
|||
}
|
||||
|
||||
static int
|
||||
vhost_user_backend_handle_shared_object_remove(VhostUserShared *object)
|
||||
vhost_user_backend_handle_shared_object_remove(struct vhost_dev *dev,
|
||||
VhostUserShared *object)
|
||||
{
|
||||
QemuUUID uuid;
|
||||
|
||||
memcpy(uuid.data, object->uuid, sizeof(object->uuid));
|
||||
switch (virtio_object_type(&uuid)) {
|
||||
case TYPE_VHOST_DEV:
|
||||
{
|
||||
struct vhost_dev *owner = virtio_lookup_vhost_device(&uuid);
|
||||
if (dev != owner) {
|
||||
/* Not allowed to remove non-owned entries */
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
/* Not allowed to remove non-owned entries */
|
||||
return 0;
|
||||
}
|
||||
|
||||
return virtio_remove_resource(&uuid);
|
||||
}
|
||||
|
||||
|
@ -1793,7 +1809,8 @@ static gboolean backend_read(QIOChannel *ioc, GIOCondition condition,
|
|||
ret = vhost_user_backend_handle_shared_object_add(dev, &payload.object);
|
||||
break;
|
||||
case VHOST_USER_BACKEND_SHARED_OBJECT_REMOVE:
|
||||
ret = vhost_user_backend_handle_shared_object_remove(&payload.object);
|
||||
ret = vhost_user_backend_handle_shared_object_remove(dev,
|
||||
&payload.object);
|
||||
break;
|
||||
case VHOST_USER_BACKEND_SHARED_OBJECT_LOOKUP:
|
||||
ret = vhost_user_backend_handle_shared_object_lookup(dev->opaque, ioc,
|
||||
|
|
|
@ -47,12 +47,17 @@ static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
|
|||
int page_mask)
|
||||
{
|
||||
Int128 llend;
|
||||
bool is_ram = memory_region_is_ram(section->mr);
|
||||
bool is_iommu = memory_region_is_iommu(section->mr);
|
||||
bool is_protected = memory_region_is_protected(section->mr);
|
||||
|
||||
if ((!memory_region_is_ram(section->mr) &&
|
||||
!memory_region_is_iommu(section->mr)) ||
|
||||
memory_region_is_protected(section->mr) ||
|
||||
/* vhost-vDPA doesn't allow MMIO to be mapped */
|
||||
memory_region_is_ram_device(section->mr)) {
|
||||
/* vhost-vDPA doesn't allow MMIO to be mapped */
|
||||
bool is_ram_device = memory_region_is_ram_device(section->mr);
|
||||
|
||||
if ((!is_ram && !is_iommu) || is_protected || is_ram_device) {
|
||||
trace_vhost_vdpa_skipped_memory_section(is_ram, is_iommu, is_protected,
|
||||
is_ram_device, iova_min,
|
||||
iova_max, page_mask);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -69,7 +74,7 @@ static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
|
|||
* size that maps to the kernel
|
||||
*/
|
||||
|
||||
if (!memory_region_is_iommu(section->mr)) {
|
||||
if (!is_iommu) {
|
||||
llend = vhost_vdpa_section_end(section, page_mask);
|
||||
if (int128_gt(llend, int128_make64(iova_max))) {
|
||||
error_report("RAM section out of device range (max=0x%" PRIx64
|
||||
|
@ -555,6 +560,11 @@ static bool vhost_vdpa_first_dev(struct vhost_dev *dev)
|
|||
return v->index == 0;
|
||||
}
|
||||
|
||||
static bool vhost_vdpa_last_dev(struct vhost_dev *dev)
|
||||
{
|
||||
return dev->vq_index + dev->nvqs == dev->vq_index_end;
|
||||
}
|
||||
|
||||
static int vhost_vdpa_get_dev_features(struct vhost_dev *dev,
|
||||
uint64_t *features)
|
||||
{
|
||||
|
@ -965,7 +975,10 @@ static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
|
|||
static int vhost_vdpa_set_dev_vring_base(struct vhost_dev *dev,
|
||||
struct vhost_vring_state *ring)
|
||||
{
|
||||
trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num);
|
||||
struct vhost_vdpa *v = dev->opaque;
|
||||
|
||||
trace_vhost_vdpa_set_dev_vring_base(dev, ring->index, ring->num,
|
||||
v->shadow_vqs_enabled);
|
||||
return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
|
||||
}
|
||||
|
||||
|
@ -1315,7 +1328,7 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
|
|||
vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
|
||||
}
|
||||
|
||||
if (dev->vq_index + dev->nvqs != dev->vq_index_end) {
|
||||
if (!vhost_vdpa_last_dev(dev)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1337,7 +1350,7 @@ static void vhost_vdpa_reset_status(struct vhost_dev *dev)
|
|||
{
|
||||
struct vhost_vdpa *v = dev->opaque;
|
||||
|
||||
if (dev->vq_index + dev->nvqs != dev->vq_index_end) {
|
||||
if (!vhost_vdpa_last_dev(dev)) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1407,6 +1420,7 @@ static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
|
|||
|
||||
if (v->shadow_vqs_enabled) {
|
||||
ring->num = virtio_queue_get_last_avail_idx(dev->vdev, ring->index);
|
||||
trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1419,7 +1433,7 @@ static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
|
|||
}
|
||||
|
||||
ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
|
||||
trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num);
|
||||
trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num, false);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1447,7 +1461,15 @@ static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
|
|||
|
||||
/* Remember last call fd because we can switch to SVQ anytime. */
|
||||
vhost_svq_set_svq_call_fd(svq, file->fd);
|
||||
if (v->shadow_vqs_enabled) {
|
||||
/*
|
||||
* When SVQ is transitioning to off, shadow_vqs_enabled has
|
||||
* not been set back to false yet, but the underlying call fd
|
||||
* will have to switch back to the guest notifier to signal the
|
||||
* passthrough virtqueues. In other situations, SVQ's own call
|
||||
* fd shall be used to signal the device model.
|
||||
*/
|
||||
if (v->shadow_vqs_enabled &&
|
||||
v->shared->svq_switching != SVQ_TSTATE_DISABLING) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include "sysemu/reset.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "qemu/reserved-region.h"
|
||||
#include "qemu/units.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "trace.h"
|
||||
|
@ -1115,8 +1116,8 @@ static int virtio_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu_mr,
|
|||
}
|
||||
|
||||
/*
|
||||
* The default mask (TARGET_PAGE_MASK) is the smallest supported guest granule,
|
||||
* for example 0xfffffffffffff000. When an assigned device has page size
|
||||
* The default mask depends on the "granule" property. For example, with
|
||||
* 4k granule, it is -(4 * KiB). When an assigned device has page size
|
||||
* restrictions due to the hardware IOMMU configuration, apply this restriction
|
||||
* to the mask.
|
||||
*/
|
||||
|
@ -1313,8 +1314,32 @@ static void virtio_iommu_device_realize(DeviceState *dev, Error **errp)
|
|||
* in vfio realize
|
||||
*/
|
||||
s->config.bypass = s->boot_bypass;
|
||||
s->config.page_size_mask = qemu_target_page_mask();
|
||||
s->config.input_range.end = UINT64_MAX;
|
||||
if (s->aw_bits < 32 || s->aw_bits > 64) {
|
||||
error_setg(errp, "aw-bits must be within [32,64]");
|
||||
return;
|
||||
}
|
||||
s->config.input_range.end =
|
||||
s->aw_bits == 64 ? UINT64_MAX : BIT_ULL(s->aw_bits) - 1;
|
||||
|
||||
switch (s->granule_mode) {
|
||||
case GRANULE_MODE_4K:
|
||||
s->config.page_size_mask = -(4 * KiB);
|
||||
break;
|
||||
case GRANULE_MODE_8K:
|
||||
s->config.page_size_mask = -(8 * KiB);
|
||||
break;
|
||||
case GRANULE_MODE_16K:
|
||||
s->config.page_size_mask = -(16 * KiB);
|
||||
break;
|
||||
case GRANULE_MODE_64K:
|
||||
s->config.page_size_mask = -(64 * KiB);
|
||||
break;
|
||||
case GRANULE_MODE_HOST:
|
||||
s->config.page_size_mask = qemu_real_host_page_mask();
|
||||
break;
|
||||
default:
|
||||
error_setg(errp, "Unsupported granule mode");
|
||||
}
|
||||
s->config.domain_range.end = UINT32_MAX;
|
||||
s->config.probe_size = VIOMMU_PROBE_SIZE;
|
||||
|
||||
|
@ -1522,6 +1547,9 @@ static Property virtio_iommu_properties[] = {
|
|||
DEFINE_PROP_LINK("primary-bus", VirtIOIOMMU, primary_bus,
|
||||
TYPE_PCI_BUS, PCIBus *),
|
||||
DEFINE_PROP_BOOL("boot-bypass", VirtIOIOMMU, boot_bypass, true),
|
||||
DEFINE_PROP_GRANULE_MODE("granule", VirtIOIOMMU, granule_mode,
|
||||
GRANULE_MODE_HOST),
|
||||
DEFINE_PROP_UINT8("aw-bits", VirtIOIOMMU, aw_bits, 64),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
|
|
|
@ -1442,6 +1442,155 @@ int virtio_pci_add_shm_cap(VirtIOPCIProxy *proxy,
|
|||
return virtio_pci_add_mem_cap(proxy, &cap.cap);
|
||||
}
|
||||
|
||||
/* Called within call_rcu(). */
|
||||
static void bitmap_free_region_cache(BitmapMemoryRegionCaches *caches)
|
||||
{
|
||||
assert(caches != NULL);
|
||||
address_space_cache_destroy(&caches->bitmap);
|
||||
g_free(caches);
|
||||
}
|
||||
|
||||
static void lm_disable(VirtIODevice *vdev)
|
||||
{
|
||||
BitmapMemoryRegionCaches *caches;
|
||||
caches = qatomic_read(&vdev->caches);
|
||||
qatomic_rcu_set(&vdev->caches, NULL);
|
||||
if (caches) {
|
||||
call_rcu(caches, bitmap_free_region_cache, rcu);
|
||||
}
|
||||
}
|
||||
|
||||
static void lm_enable(VirtIODevice *vdev)
|
||||
{
|
||||
BitmapMemoryRegionCaches *old = vdev->caches;
|
||||
BitmapMemoryRegionCaches *new = NULL;
|
||||
hwaddr addr, end, size;
|
||||
int64_t len;
|
||||
|
||||
addr = vdev->lm_base_addr_low | ((hwaddr)(vdev->lm_base_addr_high) << 32);
|
||||
end = vdev->lm_end_addr_low | ((hwaddr)(vdev->lm_end_addr_high) << 32);
|
||||
size = end - addr;
|
||||
if (size <= 0) {
|
||||
error_report("Invalid lm size.");
|
||||
return;
|
||||
}
|
||||
|
||||
new = g_new0(BitmapMemoryRegionCaches, 1);
|
||||
len = address_space_cache_init(&new->bitmap, vdev->dma_as, addr, size,
|
||||
true);
|
||||
if (len < size) {
|
||||
virtio_error(vdev, "Cannot map bitmap");
|
||||
goto err_bitmap;
|
||||
}
|
||||
qatomic_rcu_set(&vdev->caches, new);
|
||||
|
||||
if (old) {
|
||||
call_rcu(old, bitmap_free_region_cache, rcu);
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
err_bitmap:
|
||||
address_space_cache_destroy(&new->bitmap);
|
||||
g_free(new);
|
||||
}
|
||||
|
||||
static uint64_t virtio_pci_lm_read(void *opaque, hwaddr addr,
|
||||
unsigned size)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = opaque;
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
hwaddr offset_end = LM_VRING_STATE_OFFSET +
|
||||
virtio_pci_queue_mem_mult(proxy) * VIRTIO_QUEUE_MAX;
|
||||
uint32_t val;
|
||||
int qid;
|
||||
|
||||
if (vdev == NULL) {
|
||||
return UINT64_MAX;
|
||||
}
|
||||
switch (addr) {
|
||||
case LM_LOGGING_CTRL:
|
||||
val = vdev->lm_logging_ctrl;
|
||||
break;
|
||||
case LM_BASE_ADDR_LOW:
|
||||
val = vdev->lm_base_addr_low;
|
||||
break;
|
||||
case LM_BASE_ADDR_HIGH:
|
||||
val = vdev->lm_base_addr_high;
|
||||
break;
|
||||
case LM_END_ADDR_LOW:
|
||||
val = vdev->lm_end_addr_low;
|
||||
break;
|
||||
case LM_END_ADDR_HIGH:
|
||||
val = vdev->lm_end_addr_high;
|
||||
break;
|
||||
default:
|
||||
if (addr >= LM_VRING_STATE_OFFSET && addr <= offset_end) {
|
||||
qid = (addr - LM_VRING_STATE_OFFSET) /
|
||||
virtio_pci_queue_mem_mult(proxy);
|
||||
val = virtio_queue_get_vring_states(vdev, qid);
|
||||
} else
|
||||
val = 0;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static void virtio_pci_lm_write(void *opaque, hwaddr addr,
|
||||
uint64_t val, unsigned size)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = opaque;
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
hwaddr offset_end = LM_VRING_STATE_OFFSET +
|
||||
virtio_pci_queue_mem_mult(proxy) * VIRTIO_QUEUE_MAX;
|
||||
int qid;
|
||||
|
||||
if (vdev == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
switch (addr) {
|
||||
case LM_LOGGING_CTRL:
|
||||
vdev->lm_logging_ctrl = val;
|
||||
switch (val) {
|
||||
case LM_DISABLE:
|
||||
lm_disable(vdev);
|
||||
break;
|
||||
case LM_ENABLE:
|
||||
lm_enable(vdev);
|
||||
break;
|
||||
default:
|
||||
virtio_error(vdev, "Unsupport LM_LOGGING_CTRL value: %"PRIx64,
|
||||
val);
|
||||
break;
|
||||
};
|
||||
|
||||
break;
|
||||
case LM_BASE_ADDR_LOW:
|
||||
vdev->lm_base_addr_low = val;
|
||||
break;
|
||||
case LM_BASE_ADDR_HIGH:
|
||||
vdev->lm_base_addr_high = val;
|
||||
break;
|
||||
case LM_END_ADDR_LOW:
|
||||
vdev->lm_end_addr_low = val;
|
||||
break;
|
||||
case LM_END_ADDR_HIGH:
|
||||
vdev->lm_end_addr_high = val;
|
||||
break;
|
||||
default:
|
||||
if (addr >= LM_VRING_STATE_OFFSET && addr <= offset_end) {
|
||||
qid = (addr - LM_VRING_STATE_OFFSET) /
|
||||
virtio_pci_queue_mem_mult(proxy);
|
||||
virtio_queue_set_vring_states(vdev, qid, val);
|
||||
} else
|
||||
virtio_error(vdev, "Unsupport addr: %"PRIx64, addr);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr,
|
||||
unsigned size)
|
||||
{
|
||||
|
@ -1823,6 +1972,15 @@ static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy,
|
|||
},
|
||||
.endianness = DEVICE_LITTLE_ENDIAN,
|
||||
};
|
||||
static const MemoryRegionOps lm_ops = {
|
||||
.read = virtio_pci_lm_read,
|
||||
.write = virtio_pci_lm_write,
|
||||
.impl = {
|
||||
.min_access_size = 1,
|
||||
.max_access_size = 4,
|
||||
},
|
||||
.endianness = DEVICE_LITTLE_ENDIAN,
|
||||
};
|
||||
g_autoptr(GString) name = g_string_new(NULL);
|
||||
|
||||
g_string_printf(name, "virtio-pci-common-%s", vdev_name);
|
||||
|
@ -1859,6 +2017,14 @@ static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy,
|
|||
proxy,
|
||||
name->str,
|
||||
proxy->notify_pio.size);
|
||||
if (proxy->flags & VIRTIO_PCI_FLAG_VDPA) {
|
||||
g_string_printf(name, "virtio-pci-lm-%s", vdev_name);
|
||||
memory_region_init_io(&proxy->lm.mr, OBJECT(proxy),
|
||||
&lm_ops,
|
||||
proxy,
|
||||
name->str,
|
||||
proxy->lm.size);
|
||||
}
|
||||
}
|
||||
|
||||
static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy,
|
||||
|
@ -2021,6 +2187,10 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
|
|||
virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap);
|
||||
virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap);
|
||||
virtio_pci_modern_mem_region_map(proxy, &proxy->notify, ¬ify.cap);
|
||||
if (proxy->flags & VIRTIO_PCI_FLAG_VDPA) {
|
||||
memory_region_add_subregion(&proxy->modern_bar,
|
||||
proxy->lm.offset, &proxy->lm.mr);
|
||||
}
|
||||
|
||||
if (modern_pio) {
|
||||
memory_region_init(&proxy->io_bar, OBJECT(proxy),
|
||||
|
@ -2090,6 +2260,9 @@ static void virtio_pci_device_unplugged(DeviceState *d)
|
|||
virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr);
|
||||
virtio_pci_modern_mem_region_unmap(proxy, &proxy->device);
|
||||
virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify);
|
||||
if (proxy->flags & VIRTIO_PCI_FLAG_VDPA) {
|
||||
memory_region_del_subregion(&proxy->modern_bar, &proxy->lm.mr);
|
||||
}
|
||||
if (modern_pio) {
|
||||
virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio);
|
||||
}
|
||||
|
@ -2144,9 +2317,17 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
|
|||
proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
|
||||
|
||||
/* subclasses can enforce modern, so do this unconditionally */
|
||||
memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci",
|
||||
/* PCI BAR regions must be powers of 2 */
|
||||
pow2ceil(proxy->notify.offset + proxy->notify.size));
|
||||
if (!(proxy->flags & VIRTIO_PCI_FLAG_VDPA)) {
|
||||
memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci",
|
||||
/* PCI BAR regions must be powers of 2 */
|
||||
pow2ceil(proxy->notify.offset + proxy->notify.size));
|
||||
} else {
|
||||
proxy->lm.offset = proxy->notify.offset + proxy->notify.size;
|
||||
proxy->lm.size = 0x20 + VIRTIO_QUEUE_MAX * 4;
|
||||
memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci",
|
||||
/* PCI BAR regions must be powers of 2 */
|
||||
pow2ceil(proxy->lm.offset + proxy->lm.size));
|
||||
}
|
||||
|
||||
if (proxy->disable_legacy == ON_OFF_AUTO_AUTO) {
|
||||
proxy->disable_legacy = pcie_port ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
|
||||
|
@ -2301,6 +2482,8 @@ static Property virtio_pci_properties[] = {
|
|||
VIRTIO_PCI_FLAG_INIT_FLR_BIT, true),
|
||||
DEFINE_PROP_BIT("aer", VirtIOPCIProxy, flags,
|
||||
VIRTIO_PCI_FLAG_AER_BIT, false),
|
||||
DEFINE_PROP_BIT("vdpa", VirtIOPCIProxy, flags,
|
||||
VIRTIO_PCI_FLAG_VDPA_BIT, false),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
|
|
|
@ -3368,6 +3368,18 @@ static uint16_t virtio_queue_split_get_last_avail_idx(VirtIODevice *vdev,
|
|||
return vdev->vq[n].last_avail_idx;
|
||||
}
|
||||
|
||||
static uint32_t virtio_queue_split_get_vring_states(VirtIODevice *vdev,
|
||||
int n)
|
||||
{
|
||||
struct VirtQueue *vq = &vdev->vq[n];
|
||||
uint16_t avail, used;
|
||||
|
||||
avail = vq->last_avail_idx;
|
||||
used = vq->used_idx;
|
||||
|
||||
return avail | (uint32_t)used << 16;
|
||||
}
|
||||
|
||||
unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
|
||||
{
|
||||
if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
|
||||
|
@ -3377,6 +3389,33 @@ unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
|
|||
}
|
||||
}
|
||||
|
||||
unsigned int virtio_queue_get_vring_states(VirtIODevice *vdev, int n)
|
||||
{
|
||||
if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
|
||||
return -1;
|
||||
} else {
|
||||
return virtio_queue_split_get_vring_states(vdev, n);
|
||||
}
|
||||
}
|
||||
|
||||
static void virtio_queue_split_set_vring_states(VirtIODevice *vdev,
|
||||
int n, uint32_t idx)
|
||||
{
|
||||
struct VirtQueue *vq = &vdev->vq[n];
|
||||
vq->last_avail_idx = (uint16_t)(idx & 0xffff);
|
||||
vq->shadow_avail_idx = (uint16_t)(idx & 0xffff);
|
||||
vq->used_idx = (uint16_t)(idx >> 16);
|
||||
}
|
||||
|
||||
void virtio_queue_set_vring_states(VirtIODevice *vdev, int n, uint32_t idx)
|
||||
{
|
||||
if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
|
||||
return;
|
||||
} else {
|
||||
virtio_queue_split_set_vring_states(vdev, n, idx);
|
||||
}
|
||||
}
|
||||
|
||||
static void virtio_queue_packed_set_last_avail_idx(VirtIODevice *vdev,
|
||||
int n, unsigned int idx)
|
||||
{
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue