virtio,vhost,pci,pc: features, fixes and cleanups

- new stats in virtio balloon
 - virtio eventfd rework for boot speedup
 - vhost memory rework for boot speedup
 - fixes and cleanups all over the place
 
 Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQEcBAABAgAGBQJagxKDAAoJECgfDbjSjVRp5qAH/3gmgBaIzL3KRHd5i0RZifJv
 PvyAVYgZd7h0+/1r9GM7guHKyEPZ08JtbHSm/HuDV4BD/Vf3/8joy8roExIfde2A
 6k8fd6ANVQmE3t5zUxNXi9qiG4pO4xDIu4cMAbixzgN9x5ttlcfTw7fTT0e0VJxJ
 8SN02/uCPPR/DY4/cpjah+slSyv6rBKT1v1ONy7djyRTYHi6h3Meoh05YfEALkwA
 goxTKBZHi0L1IZ3HP/ZpXJDohQ5n2P09DX0fQgb8PgmW6WIWB/Qpi5pD53LZpMCV
 n9waTF0U0ahneFd2FHo22QMMrwWvQyrjv+w5uXVr+qmHb/OyH2tUt7PgGF9+QKA=
 =78s5
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging

virtio,vhost,pci,pc: features, fixes and cleanups

- new stats in virtio balloon
- virtio eventfd rework for boot speedup
- vhost memory rework for boot speedup
- fixes and cleanups all over the place

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

# gpg: Signature made Tue 13 Feb 2018 16:29:55 GMT
# gpg:                using RSA key 281F0DB8D28D5469
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>"
# gpg:                 aka "Michael S. Tsirkin <mst@redhat.com>"
# Primary key fingerprint: 0270 606B 6F3C DF3D 0B17  0970 C350 3912 AFBE 8E67
#      Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA  8A0D 281F 0DB8 D28D 5469

* remotes/mst/tags/for_upstream: (22 commits)
  virtio-balloon: include statistics of disk/file caches
  acpi-test: update FADT
  lpc: drop pcie host dependency
  tests: acpi: fix FADT not being compared to reference table
  hw/pci-bridge: fix pcie root port's IO hints capability
  libvhost-user: Support across-memory-boundary access
  libvhost-user: Fix resource leak
  virtio-balloon: unref the memory region before continuing
  pci: removed the is_express field since a uniform interface was inserted
  virtio-blk: enable multiple vectors when using multiple I/O queues
  pci/bus: let it has higher migration priority
  pci-bridge/i82801b11: clear bridge registers on platform reset
  vhost: Move log_dirty check
  vhost: Merge and delete unused callbacks
  vhost: Clean out old vhost_set_memory and friends
  vhost: Regenerate region list from changed sections list
  vhost: Merge sections added to temporary list
  vhost: Simplify ring verification checks
  vhost: Build temporary section list and deref after commit
  virtio: improve virtio devices initialization time
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2018-02-13 16:33:31 +00:00
commit b734ed9de1
39 changed files with 475 additions and 400 deletions

View file

@ -192,6 +192,7 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
fprintf(stderr, "virtio-blk failed to set host notifier (%d)\n", r);
while (i--) {
virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i);
}
goto fail_guest_notifiers;
}
@ -267,6 +268,7 @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev)
for (i = 0; i < nvqs; i++) {
virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i);
}
/* Clean up guest notifier (irq) */

View file

@ -1360,7 +1360,6 @@ static void nvme_class_init(ObjectClass *oc, void *data)
pc->vendor_id = PCI_VENDOR_ID_INTEL;
pc->device_id = 0x5845;
pc->revision = 2;
pc->is_express = 1;
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
dc->desc = "Non-Volatile Memory Express";

View file

@ -39,7 +39,6 @@
#include "hw/isa/apm.h"
#include "hw/i386/ioapic.h"
#include "hw/pci/pci.h"
#include "hw/pci/pcie_host.h"
#include "hw/pci/pci_bridge.h"
#include "hw/i386/ich9.h"
#include "hw/acpi/acpi.h"

View file

@ -675,7 +675,6 @@ static void e1000e_class_init(ObjectClass *class, void *data)
c->revision = 0;
c->romfile = "efi-e1000e.rom";
c->class_id = PCI_CLASS_NETWORK_ETHERNET;
c->is_express = 1;
dc->desc = "Intel 82574L GbE Controller";
dc->reset = e1000e_qdev_reset;

View file

@ -101,6 +101,7 @@ static void gen_rp_realize(DeviceState *dev, Error **errp)
static const VMStateDescription vmstate_rp_dev = {
.name = "pcie-root-port",
.priority = MIG_PRI_PCI_BUS,
.version_id = 1,
.minimum_version_id = 1,
.post_load = pcie_cap_slot_post_load,

View file

@ -78,6 +78,7 @@ err_bridge:
static const VMStateDescription i82801b11_bridge_dev_vmstate = {
.name = "i82801b11_bridge",
.priority = MIG_PRI_PCI_BUS,
.fields = (VMStateField[]) {
VMSTATE_PCI_DEVICE(parent_obj, PCIBridge),
VMSTATE_END_OF_LIST()
@ -96,6 +97,7 @@ static void i82801b11_bridge_class_init(ObjectClass *klass, void *data)
k->realize = i82801b11_bridge_realize;
k->config_write = pci_bridge_write_config;
dc->vmsd = &i82801b11_bridge_dev_vmstate;
dc->reset = pci_bridge_reset;
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
}

View file

@ -82,6 +82,7 @@ static void ioh3420_interrupts_uninit(PCIDevice *d)
static const VMStateDescription vmstate_ioh3420 = {
.name = "ioh-3240-express-root-port",
.priority = MIG_PRI_PCI_BUS,
.version_id = 1,
.minimum_version_id = 1,
.post_load = pcie_cap_slot_post_load,

View file

@ -174,6 +174,7 @@ static bool pci_device_shpc_present(void *opaque, int version_id)
static const VMStateDescription pci_bridge_dev_vmstate = {
.name = "pci_bridge",
.priority = MIG_PRI_PCI_BUS,
.fields = (VMStateField[]) {
VMSTATE_PCI_DEVICE(parent_obj, PCIBridge),
SHPC_VMSTATE(shpc, PCIDevice, pci_device_shpc_present),

View file

@ -129,6 +129,7 @@ static Property pcie_pci_bridge_dev_properties[] = {
static const VMStateDescription pcie_pci_bridge_dev_vmstate = {
.name = TYPE_PCIE_PCI_BRIDGE_DEV,
.priority = MIG_PRI_PCI_BUS,
.fields = (VMStateField[]) {
VMSTATE_PCI_DEVICE(parent_obj, PCIBridge),
SHPC_VMSTATE(shpc, PCIDevice, NULL),
@ -169,7 +170,6 @@ static void pcie_pci_bridge_class_init(ObjectClass *klass, void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
k->is_express = 1;
k->is_bridge = 1;
k->vendor_id = PCI_VENDOR_ID_REDHAT;
k->device_id = PCI_DEVICE_ID_REDHAT_PCIE_BRIDGE;
@ -178,7 +178,6 @@ static void pcie_pci_bridge_class_init(ObjectClass *klass, void *data)
k->config_write = pcie_pci_bridge_write_config;
dc->vmsd = &pcie_pci_bridge_dev_vmstate;
dc->props = pcie_pci_bridge_dev_properties;
dc->vmsd = &pcie_pci_bridge_dev_vmstate;
dc->reset = &pcie_pci_bridge_reset;
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
hc->plug = pcie_pci_bridge_hotplug_cb;

View file

@ -145,7 +145,6 @@ static void rp_class_init(ObjectClass *klass, void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
k->is_express = 1;
k->is_bridge = 1;
k->config_write = rp_write_config;
k->realize = rp_realize;

View file

@ -161,6 +161,7 @@ static Property xio3130_downstream_props[] = {
static const VMStateDescription vmstate_xio3130_downstream = {
.name = "xio3130-express-downstream-port",
.priority = MIG_PRI_PCI_BUS,
.version_id = 1,
.minimum_version_id = 1,
.post_load = pcie_cap_slot_post_load,
@ -177,7 +178,6 @@ static void xio3130_downstream_class_init(ObjectClass *klass, void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
k->is_express = 1;
k->is_bridge = 1;
k->config_write = xio3130_downstream_write_config;
k->realize = xio3130_downstream_realize;

View file

@ -132,6 +132,7 @@ PCIEPort *xio3130_upstream_init(PCIBus *bus, int devfn, bool multifunction,
static const VMStateDescription vmstate_xio3130_upstream = {
.name = "xio3130-express-upstream-port",
.priority = MIG_PRI_PCI_BUS,
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
@ -147,7 +148,6 @@ static void xio3130_upstream_class_init(ObjectClass *klass, void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
k->is_express = 1;
k->is_bridge = 1;
k->config_write = xio3130_upstream_write_config;
k->realize = xio3130_upstream_realize;

View file

@ -297,7 +297,6 @@ static void xilinx_pcie_root_class_init(ObjectClass *klass, void *data)
k->device_id = 0x7021;
k->revision = 0;
k->class_id = PCI_CLASS_BRIDGE_HOST;
k->is_express = true;
k->is_bridge = true;
k->realize = xilinx_pcie_root_realize;
k->exit = pci_bridge_exitfn;

View file

@ -2007,11 +2007,15 @@ static void pci_qdev_realize(DeviceState *qdev, Error **errp)
{
PCIDevice *pci_dev = (PCIDevice *)qdev;
PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev);
ObjectClass *klass = OBJECT_CLASS(pc);
Error *local_err = NULL;
bool is_default_rom;
/* initialize cap_present for pci_is_express() and pci_config_size() */
if (pc->is_express) {
/* initialize cap_present for pci_is_express() and pci_config_size(),
* Note that hybrid PCIs are not set automatically and need to manage
* QEMU_PCI_CAP_EXPRESS manually */
if (object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE) &&
!object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE)) {
pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
}

View file

@ -412,22 +412,36 @@ void pci_bridge_map_irq(PCIBridge *br, const char* bus_name,
int pci_bridge_qemu_reserve_cap_init(PCIDevice *dev, int cap_offset,
uint32_t bus_reserve, uint64_t io_reserve,
uint32_t mem_non_pref_reserve,
uint32_t mem_pref_32_reserve,
uint64_t mem_non_pref_reserve,
uint64_t mem_pref_32_reserve,
uint64_t mem_pref_64_reserve,
Error **errp)
{
if (mem_pref_32_reserve != (uint32_t)-1 &&
if (mem_pref_32_reserve != (uint64_t)-1 &&
mem_pref_64_reserve != (uint64_t)-1) {
error_setg(errp,
"PCI resource reserve cap: PREF32 and PREF64 conflict");
return -EINVAL;
}
if (mem_non_pref_reserve != (uint64_t)-1 &&
mem_non_pref_reserve >= (1ULL << 32)) {
error_setg(errp,
"PCI resource reserve cap: mem-reserve must be less than 4G");
return -EINVAL;
}
if (mem_pref_32_reserve != (uint64_t)-1 &&
mem_pref_32_reserve >= (1ULL << 32)) {
error_setg(errp,
"PCI resource reserve cap: pref32-reserve must be less than 4G");
return -EINVAL;
}
if (bus_reserve == (uint32_t)-1 &&
io_reserve == (uint64_t)-1 &&
mem_non_pref_reserve == (uint32_t)-1 &&
mem_pref_32_reserve == (uint32_t)-1 &&
mem_non_pref_reserve == (uint64_t)-1 &&
mem_pref_32_reserve == (uint64_t)-1 &&
mem_pref_64_reserve == (uint64_t)-1) {
return 0;
}

View file

@ -2447,7 +2447,6 @@ typedef struct MegasasInfo {
uint16_t subsystem_id;
int ioport_bar;
int mmio_bar;
bool is_express;
int osts;
const VMStateDescription *vmsd;
Property *props;
@ -2465,7 +2464,6 @@ static struct MegasasInfo megasas_devices[] = {
.ioport_bar = 2,
.mmio_bar = 0,
.osts = MFI_1078_RM | 1,
.is_express = false,
.vmsd = &vmstate_megasas_gen1,
.props = megasas_properties_gen1,
.interfaces = (InterfaceInfo[]) {
@ -2482,7 +2480,6 @@ static struct MegasasInfo megasas_devices[] = {
.ioport_bar = 0,
.mmio_bar = 1,
.osts = MFI_GEN2_RM,
.is_express = true,
.vmsd = &vmstate_megasas_gen2,
.props = megasas_properties_gen2,
.interfaces = (InterfaceInfo[]) {
@ -2506,7 +2503,6 @@ static void megasas_class_init(ObjectClass *oc, void *data)
pc->subsystem_vendor_id = PCI_VENDOR_ID_LSI_LOGIC;
pc->subsystem_id = info->subsystem_id;
pc->class_id = PCI_CLASS_STORAGE_RAID;
pc->is_express = info->is_express;
e->mmio_bar = info->mmio_bar;
e->ioport_bar = info->ioport_bar;
e->osts = info->osts;

View file

@ -175,6 +175,7 @@ fail_vrings:
aio_context_release(s->ctx);
for (i = 0; i < vs->conf.num_queues + 2; i++) {
virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i);
}
k->set_guest_notifiers(qbus->parent, vs->conf.num_queues + 2, false);
fail_guest_notifiers:
@ -213,6 +214,7 @@ void virtio_scsi_dataplane_stop(VirtIODevice *vdev)
for (i = 0; i < vs->conf.num_queues + 2; i++) {
virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i);
}
/* Clean up guest notifier (irq) */

View file

@ -3649,6 +3649,13 @@ static Property xhci_properties[] = {
DEFINE_PROP_END_OF_LIST(),
};
static void xhci_instance_init(Object *obj)
{
/* QEMU_PCI_CAP_EXPRESS initialization does not depend on QEMU command
* line, therefore, no need to wait to realize like other devices */
PCI_DEVICE(obj)->cap_present |= QEMU_PCI_CAP_EXPRESS;
}
static void xhci_class_init(ObjectClass *klass, void *data)
{
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@ -3661,7 +3668,6 @@ static void xhci_class_init(ObjectClass *klass, void *data)
k->realize = usb_xhci_realize;
k->exit = usb_xhci_exit;
k->class_id = PCI_CLASS_SERIAL_USB;
k->is_express = 1;
}
static const TypeInfo xhci_info = {
@ -3669,6 +3675,7 @@ static const TypeInfo xhci_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(XHCIState),
.class_init = xhci_class_init,
.instance_init = xhci_instance_init,
.abstract = true,
.interfaces = (InterfaceInfo[]) {
{ INTERFACE_PCIE_DEVICE },

View file

@ -3114,6 +3114,10 @@ static void vfio_instance_init(Object *obj)
vdev->host.function = ~0U;
vdev->nv_gpudirect_clique = 0xFF;
/* QEMU_PCI_CAP_EXPRESS initialization does not depend on QEMU command
* line, therefore, no need to wait to realize like other devices */
pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
}
static Property vfio_pci_dev_properties[] = {
@ -3172,7 +3176,6 @@ static void vfio_pci_dev_class_init(ObjectClass *klass, void *data)
pdc->exit = vfio_exitfn;
pdc->config_read = vfio_pci_read_config;
pdc->config_write = vfio_pci_write_config;
pdc->is_express = 1; /* We might be */
}
static const TypeInfo vfio_pci_dev_info = {

View file

@ -1,5 +1,11 @@
# See docs/devel/tracing.txt for syntax documentation.
# hw/virtio/vhost.c
vhost_commit(bool started, bool changed) "Started: %d Changed: %d"
vhost_region_add_section(const char *name, uint64_t gpa, uint64_t size, uint64_t host) "%s: 0x%"PRIx64"+0x%"PRIx64" @ 0x%"PRIx64
vhost_region_add_section_abut(const char *name, uint64_t new_size) "%s: 0x%"PRIx64
vhost_section(const char *name, int r) "%s:%d"
# hw/virtio/virtio.c
virtqueue_alloc_element(void *elem, size_t sz, unsigned in_num, unsigned out_num) "elem %p size %zd in_num %u out_num %u"
virtqueue_fill(void *vq, const void *elem, unsigned int len, unsigned int idx) "vq %p elem %p len %u idx %u"
@ -25,9 +31,3 @@ virtio_balloon_handle_output(const char *name, uint64_t gpa) "section name: %s g
virtio_balloon_get_config(uint32_t num_pages, uint32_t actual) "num_pages: %d actual: %d"
virtio_balloon_set_config(uint32_t actual, uint32_t oldactual) "actual: %d oldactual: %d"
virtio_balloon_to_target(uint64_t target, uint32_t num_pages) "balloon target: 0x%"PRIx64" num_pages: %d"
# hw/virtio/vhost.c
vhost_region_add(void *p, const char *mr) "dev %p mr %s"
vhost_region_del(void *p, const char *mr) "dev %p mr %s"
vhost_iommu_region_add(void *p, const char *mr) "dev %p mr %s"
vhost_iommu_region_del(void *p, const char *mr) "dev %p mr %s"

View file

@ -156,160 +156,6 @@ static void vhost_log_sync_range(struct vhost_dev *dev,
}
}
/* Assign/unassign. Keep an unsorted array of non-overlapping
* memory regions in dev->mem. */
static void vhost_dev_unassign_memory(struct vhost_dev *dev,
uint64_t start_addr,
uint64_t size)
{
int from, to, n = dev->mem->nregions;
/* Track overlapping/split regions for sanity checking. */
int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
for (from = 0, to = 0; from < n; ++from, ++to) {
struct vhost_memory_region *reg = dev->mem->regions + to;
uint64_t reglast;
uint64_t memlast;
uint64_t change;
/* clone old region */
if (to != from) {
memcpy(reg, dev->mem->regions + from, sizeof *reg);
}
/* No overlap is simple */
if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
start_addr, size)) {
continue;
}
/* Split only happens if supplied region
* is in the middle of an existing one. Thus it can not
* overlap with any other existing region. */
assert(!split);
reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
memlast = range_get_last(start_addr, size);
/* Remove whole region */
if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
--dev->mem->nregions;
--to;
++overlap_middle;
continue;
}
/* Shrink region */
if (memlast >= reglast) {
reg->memory_size = start_addr - reg->guest_phys_addr;
assert(reg->memory_size);
assert(!overlap_end);
++overlap_end;
continue;
}
/* Shift region */
if (start_addr <= reg->guest_phys_addr) {
change = memlast + 1 - reg->guest_phys_addr;
reg->memory_size -= change;
reg->guest_phys_addr += change;
reg->userspace_addr += change;
assert(reg->memory_size);
assert(!overlap_start);
++overlap_start;
continue;
}
/* This only happens if supplied region
* is in the middle of an existing one. Thus it can not
* overlap with any other existing region. */
assert(!overlap_start);
assert(!overlap_end);
assert(!overlap_middle);
/* Split region: shrink first part, shift second part. */
memcpy(dev->mem->regions + n, reg, sizeof *reg);
reg->memory_size = start_addr - reg->guest_phys_addr;
assert(reg->memory_size);
change = memlast + 1 - reg->guest_phys_addr;
reg = dev->mem->regions + n;
reg->memory_size -= change;
assert(reg->memory_size);
reg->guest_phys_addr += change;
reg->userspace_addr += change;
/* Never add more than 1 region */
assert(dev->mem->nregions == n);
++dev->mem->nregions;
++split;
}
}
/* Called after unassign, so no regions overlap the given range. */
static void vhost_dev_assign_memory(struct vhost_dev *dev,
uint64_t start_addr,
uint64_t size,
uint64_t uaddr)
{
int from, to;
struct vhost_memory_region *merged = NULL;
for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
struct vhost_memory_region *reg = dev->mem->regions + to;
uint64_t prlast, urlast;
uint64_t pmlast, umlast;
uint64_t s, e, u;
/* clone old region */
if (to != from) {
memcpy(reg, dev->mem->regions + from, sizeof *reg);
}
prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
pmlast = range_get_last(start_addr, size);
urlast = range_get_last(reg->userspace_addr, reg->memory_size);
umlast = range_get_last(uaddr, size);
/* check for overlapping regions: should never happen. */
assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
/* Not an adjacent or overlapping region - do not merge. */
if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
(pmlast + 1 != reg->guest_phys_addr ||
umlast + 1 != reg->userspace_addr)) {
continue;
}
if (dev->vhost_ops->vhost_backend_can_merge &&
!dev->vhost_ops->vhost_backend_can_merge(dev, uaddr, size,
reg->userspace_addr,
reg->memory_size)) {
continue;
}
if (merged) {
--to;
assert(to >= 0);
} else {
merged = reg;
}
u = MIN(uaddr, reg->userspace_addr);
s = MIN(start_addr, reg->guest_phys_addr);
e = MAX(pmlast, prlast);
uaddr = merged->userspace_addr = u;
start_addr = merged->guest_phys_addr = s;
size = merged->memory_size = e - s + 1;
assert(merged->memory_size);
}
if (!merged) {
struct vhost_memory_region *reg = dev->mem->regions + to;
memset(reg, 0, sizeof *reg);
reg->memory_size = size;
assert(reg->memory_size);
reg->guest_phys_addr = start_addr;
reg->userspace_addr = uaddr;
++to;
}
assert(to <= dev->mem->nregions + 1);
dev->mem->nregions = to;
}
static uint64_t vhost_get_log_size(struct vhost_dev *dev)
{
uint64_t log_size = 0;
@ -456,35 +302,37 @@ static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer,
}
}
static int vhost_verify_ring_part_mapping(struct vhost_dev *dev,
void *part,
uint64_t part_addr,
uint64_t part_size,
uint64_t start_addr,
uint64_t size)
static int vhost_verify_ring_part_mapping(void *ring_hva,
uint64_t ring_gpa,
uint64_t ring_size,
void *reg_hva,
uint64_t reg_gpa,
uint64_t reg_size)
{
hwaddr l;
void *p;
int r = 0;
uint64_t hva_ring_offset;
uint64_t ring_last = range_get_last(ring_gpa, ring_size);
uint64_t reg_last = range_get_last(reg_gpa, reg_size);
if (!ranges_overlap(start_addr, size, part_addr, part_size)) {
if (ring_last < reg_gpa || ring_gpa > reg_last) {
return 0;
}
l = part_size;
p = vhost_memory_map(dev, part_addr, &l, 1);
if (!p || l != part_size) {
r = -ENOMEM;
/* check that whole ring's is mapped */
if (ring_last > reg_last) {
return -ENOMEM;
}
if (p != part) {
r = -EBUSY;
/* check that ring's MemoryRegion wasn't replaced */
hva_ring_offset = ring_gpa - reg_gpa;
if (ring_hva != reg_hva + hva_ring_offset) {
return -EBUSY;
}
vhost_memory_unmap(dev, p, l, 0, 0);
return r;
return 0;
}
static int vhost_verify_ring_mappings(struct vhost_dev *dev,
uint64_t start_addr,
uint64_t size)
void *reg_hva,
uint64_t reg_gpa,
uint64_t reg_size)
{
int i, j;
int r = 0;
@ -498,22 +346,25 @@ static int vhost_verify_ring_mappings(struct vhost_dev *dev,
struct vhost_virtqueue *vq = dev->vqs + i;
j = 0;
r = vhost_verify_ring_part_mapping(dev, vq->desc, vq->desc_phys,
vq->desc_size, start_addr, size);
r = vhost_verify_ring_part_mapping(
vq->desc, vq->desc_phys, vq->desc_size,
reg_hva, reg_gpa, reg_size);
if (r) {
break;
}
j++;
r = vhost_verify_ring_part_mapping(dev, vq->avail, vq->avail_phys,
vq->avail_size, start_addr, size);
r = vhost_verify_ring_part_mapping(
vq->desc, vq->desc_phys, vq->desc_size,
reg_hva, reg_gpa, reg_size);
if (r) {
break;
}
j++;
r = vhost_verify_ring_part_mapping(dev, vq->used, vq->used_phys,
vq->used_size, start_addr, size);
r = vhost_verify_ring_part_mapping(
vq->desc, vq->desc_phys, vq->desc_size,
reg_hva, reg_gpa, reg_size);
if (r) {
break;
}
@ -527,134 +378,95 @@ static int vhost_verify_ring_mappings(struct vhost_dev *dev,
return r;
}
static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
uint64_t start_addr,
uint64_t size)
{
int i, n = dev->mem->nregions;
for (i = 0; i < n; ++i) {
struct vhost_memory_region *reg = dev->mem->regions + i;
if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
start_addr, size)) {
return reg;
}
}
return NULL;
}
static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
uint64_t start_addr,
uint64_t size,
uint64_t uaddr)
{
struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
uint64_t reglast;
uint64_t memlast;
if (!reg) {
return true;
}
reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
memlast = range_get_last(start_addr, size);
/* Need to extend region? */
if (start_addr < reg->guest_phys_addr || memlast > reglast) {
return true;
}
/* userspace_addr changed? */
return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
}
static void vhost_set_memory(MemoryListener *listener,
MemoryRegionSection *section,
bool add)
{
struct vhost_dev *dev = container_of(listener, struct vhost_dev,
memory_listener);
hwaddr start_addr = section->offset_within_address_space;
ram_addr_t size = int128_get64(section->size);
bool log_dirty =
memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION);
int s = offsetof(struct vhost_memory, regions) +
(dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
void *ram;
dev->mem = g_realloc(dev->mem, s);
if (log_dirty) {
add = false;
}
assert(size);
/* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
if (add) {
if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
/* Region exists with same address. Nothing to do. */
return;
}
} else {
if (!vhost_dev_find_reg(dev, start_addr, size)) {
/* Removing region that we don't access. Nothing to do. */
return;
}
}
vhost_dev_unassign_memory(dev, start_addr, size);
if (add) {
/* Add given mapping, merging adjacent regions if any */
vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
} else {
/* Remove old mapping for this memory, if any. */
vhost_dev_unassign_memory(dev, start_addr, size);
}
dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
dev->memory_changed = true;
used_memslots = dev->mem->nregions;
}
static bool vhost_section(MemoryRegionSection *section)
{
return memory_region_is_ram(section->mr) &&
bool result;
bool log_dirty = memory_region_get_dirty_log_mask(section->mr) &
~(1 << DIRTY_MEMORY_MIGRATION);
result = memory_region_is_ram(section->mr) &&
!memory_region_is_rom(section->mr);
/* Vhost doesn't handle any block which is doing dirty-tracking other
* than migration; this typically fires on VGA areas.
*/
result &= !log_dirty;
trace_vhost_section(section->mr->name, result);
return result;
}
static void vhost_begin(MemoryListener *listener)
{
struct vhost_dev *dev = container_of(listener, struct vhost_dev,
memory_listener);
dev->mem_changed_end_addr = 0;
dev->mem_changed_start_addr = -1;
dev->tmp_sections = NULL;
dev->n_tmp_sections = 0;
}
static void vhost_commit(MemoryListener *listener)
{
struct vhost_dev *dev = container_of(listener, struct vhost_dev,
memory_listener);
hwaddr start_addr = 0;
ram_addr_t size = 0;
MemoryRegionSection *old_sections;
int n_old_sections;
uint64_t log_size;
size_t regions_size;
int r;
int i;
bool changed = false;
if (!dev->memory_changed) {
return;
/* Note we can be called before the device is started, but then
* starting the device calls set_mem_table, so we need to have
* built the data structures.
*/
old_sections = dev->mem_sections;
n_old_sections = dev->n_mem_sections;
dev->mem_sections = dev->tmp_sections;
dev->n_mem_sections = dev->n_tmp_sections;
if (dev->n_mem_sections != n_old_sections) {
changed = true;
} else {
/* Same size, lets check the contents */
changed = n_old_sections && memcmp(dev->mem_sections, old_sections,
n_old_sections * sizeof(old_sections[0])) != 0;
}
trace_vhost_commit(dev->started, changed);
if (!changed) {
goto out;
}
/* Rebuild the regions list from the new sections list */
regions_size = offsetof(struct vhost_memory, regions) +
dev->n_mem_sections * sizeof dev->mem->regions[0];
dev->mem = g_realloc(dev->mem, regions_size);
dev->mem->nregions = dev->n_mem_sections;
used_memslots = dev->mem->nregions;
for (i = 0; i < dev->n_mem_sections; i++) {
struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
struct MemoryRegionSection *mrs = dev->mem_sections + i;
cur_vmr->guest_phys_addr = mrs->offset_within_address_space;
cur_vmr->memory_size = int128_get64(mrs->size);
cur_vmr->userspace_addr =
(uintptr_t)memory_region_get_ram_ptr(mrs->mr) +
mrs->offset_within_region;
cur_vmr->flags_padding = 0;
}
if (!dev->started) {
return;
}
if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) {
return;
goto out;
}
if (dev->started) {
start_addr = dev->mem_changed_start_addr;
size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1;
r = vhost_verify_ring_mappings(dev, start_addr, size);
assert(r >= 0);
for (i = 0; i < dev->mem->nregions; i++) {
if (vhost_verify_ring_mappings(dev,
(void *)(uintptr_t)dev->mem->regions[i].userspace_addr,
dev->mem->regions[i].guest_phys_addr,
dev->mem->regions[i].memory_size)) {
error_report("Verify ring failure on region %d", i);
abort();
}
}
if (!dev->log_enabled) {
@ -662,8 +474,7 @@ static void vhost_commit(MemoryListener *listener)
if (r < 0) {
VHOST_OPS_DEBUG("vhost_set_mem_table failed");
}
dev->memory_changed = false;
return;
goto out;
}
log_size = vhost_get_log_size(dev);
/* We allocate an extra 4K bytes to log,
@ -681,51 +492,91 @@ static void vhost_commit(MemoryListener *listener)
if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
vhost_dev_log_resize(dev, log_size);
}
dev->memory_changed = false;
out:
/* Deref the old list of sections, this must happen _after_ the
* vhost_set_mem_table to ensure the client isn't still using the
* section we're about to unref.
*/
while (n_old_sections--) {
memory_region_unref(old_sections[n_old_sections].mr);
}
g_free(old_sections);
return;
}
static void vhost_region_add(MemoryListener *listener,
MemoryRegionSection *section)
/* Adds the section data to the tmp_section structure.
* It relies on the listener calling us in memory address order
* and for each region (via the _add and _nop methods) to
* join neighbours.
*/
static void vhost_region_add_section(struct vhost_dev *dev,
MemoryRegionSection *section)
{
struct vhost_dev *dev = container_of(listener, struct vhost_dev,
memory_listener);
bool need_add = true;
uint64_t mrs_size = int128_get64(section->size);
uint64_t mrs_gpa = section->offset_within_address_space;
uintptr_t mrs_host = (uintptr_t)memory_region_get_ram_ptr(section->mr) +
section->offset_within_region;
if (!vhost_section(section)) {
return;
}
trace_vhost_region_add_section(section->mr->name, mrs_gpa, mrs_size,
mrs_host);
trace_vhost_region_add(dev, section->mr->name ?: NULL);
++dev->n_mem_sections;
dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
dev->n_mem_sections);
dev->mem_sections[dev->n_mem_sections - 1] = *section;
memory_region_ref(section->mr);
vhost_set_memory(listener, section, true);
}
if (dev->n_tmp_sections) {
/* Since we already have at least one section, lets see if
* this extends it; since we're scanning in order, we only
* have to look at the last one, and the FlatView that calls
* us shouldn't have overlaps.
*/
MemoryRegionSection *prev_sec = dev->tmp_sections +
(dev->n_tmp_sections - 1);
uint64_t prev_gpa_start = prev_sec->offset_within_address_space;
uint64_t prev_size = int128_get64(prev_sec->size);
uint64_t prev_gpa_end = range_get_last(prev_gpa_start, prev_size);
uint64_t prev_host_start =
(uintptr_t)memory_region_get_ram_ptr(prev_sec->mr) +
prev_sec->offset_within_region;
uint64_t prev_host_end = range_get_last(prev_host_start, prev_size);
static void vhost_region_del(MemoryListener *listener,
MemoryRegionSection *section)
{
struct vhost_dev *dev = container_of(listener, struct vhost_dev,
memory_listener);
int i;
if (!vhost_section(section)) {
return;
}
trace_vhost_region_del(dev, section->mr->name ?: NULL);
vhost_set_memory(listener, section, false);
memory_region_unref(section->mr);
for (i = 0; i < dev->n_mem_sections; ++i) {
if (dev->mem_sections[i].offset_within_address_space
== section->offset_within_address_space) {
--dev->n_mem_sections;
memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
(dev->n_mem_sections - i) * sizeof(*dev->mem_sections));
break;
if (prev_gpa_end + 1 == mrs_gpa &&
prev_host_end + 1 == mrs_host &&
section->mr == prev_sec->mr &&
(!dev->vhost_ops->vhost_backend_can_merge ||
dev->vhost_ops->vhost_backend_can_merge(dev,
mrs_host, mrs_size,
prev_host_start, prev_size))) {
/* The two sections abut */
need_add = false;
prev_sec->size = int128_add(prev_sec->size, section->size);
trace_vhost_region_add_section_abut(section->mr->name,
mrs_size + prev_size);
}
}
if (need_add) {
++dev->n_tmp_sections;
dev->tmp_sections = g_renew(MemoryRegionSection, dev->tmp_sections,
dev->n_tmp_sections);
dev->tmp_sections[dev->n_tmp_sections - 1] = *section;
/* The flatview isn't stable and we don't use it, making it NULL
* means we can memcmp the list.
*/
dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL;
memory_region_ref(section->mr);
}
}
/* Used for both add and nop callbacks */
static void vhost_region_addnop(MemoryListener *listener,
MemoryRegionSection *section)
{
struct vhost_dev *dev = container_of(listener, struct vhost_dev,
memory_listener);
if (!vhost_section(section)) {
return;
}
vhost_region_add_section(dev, section);
}
static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
@ -752,8 +603,6 @@ static void vhost_iommu_region_add(MemoryListener *listener,
return;
}
trace_vhost_iommu_region_add(dev, section->mr->name ?: NULL);
iommu = g_malloc0(sizeof(*iommu));
end = int128_add(int128_make64(section->offset_within_region),
section->size);
@ -782,8 +631,6 @@ static void vhost_iommu_region_del(MemoryListener *listener,
return;
}
trace_vhost_iommu_region_del(dev, section->mr->name ?: NULL);
QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) {
if (iommu->mr == section->mr &&
iommu->n.start == section->offset_within_region) {
@ -796,11 +643,6 @@ static void vhost_iommu_region_del(MemoryListener *listener,
}
}
static void vhost_region_nop(MemoryListener *listener,
MemoryRegionSection *section)
{
}
static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
struct vhost_virtqueue *vq,
unsigned idx, bool enable_log)
@ -1305,9 +1147,8 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
hdev->memory_listener = (MemoryListener) {
.begin = vhost_begin,
.commit = vhost_commit,
.region_add = vhost_region_add,
.region_del = vhost_region_del,
.region_nop = vhost_region_nop,
.region_add = vhost_region_addnop,
.region_nop = vhost_region_addnop,
.log_start = vhost_log_start,
.log_stop = vhost_log_stop,
.log_sync = vhost_log_sync,
@ -1349,7 +1190,6 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
hdev->log_size = 0;
hdev->log_enabled = false;
hdev->started = false;
hdev->memory_changed = false;
memory_listener_register(&hdev->memory_listener, &address_space_memory);
QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
return 0;
@ -1425,6 +1265,7 @@ fail_vq:
error_report("vhost VQ %d notifier cleanup error: %d", i, -r);
}
assert (e >= 0);
virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
}
virtio_device_release_ioeventfd(vdev);
fail:
@ -1448,6 +1289,7 @@ void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
}
assert (r >= 0);
virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
}
virtio_device_release_ioeventfd(vdev);
}

View file

@ -51,6 +51,7 @@ static const char *balloon_stat_names[] = {
[VIRTIO_BALLOON_S_MEMFREE] = "stat-free-memory",
[VIRTIO_BALLOON_S_MEMTOT] = "stat-total-memory",
[VIRTIO_BALLOON_S_AVAIL] = "stat-available-memory",
[VIRTIO_BALLOON_S_CACHES] = "stat-disk-caches",
[VIRTIO_BALLOON_S_NR] = NULL
};
@ -235,6 +236,7 @@ static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq)
memory_region_is_rom(section.mr) ||
memory_region_is_romd(section.mr)) {
trace_virtio_balloon_bad_addr(pa);
memory_region_unref(section.mr);
continue;
}

View file

@ -283,20 +283,26 @@ int virtio_bus_set_host_notifier(VirtioBusState *bus, int n, bool assign)
r = k->ioeventfd_assign(proxy, notifier, n, true);
if (r < 0) {
error_report("%s: unable to assign ioeventfd: %d", __func__, r);
goto cleanup_event_notifier;
virtio_bus_cleanup_host_notifier(bus, n);
}
return 0;
} else {
k->ioeventfd_assign(proxy, notifier, n, false);
}
cleanup_event_notifier:
return r;
}
void virtio_bus_cleanup_host_notifier(VirtioBusState *bus, int n)
{
VirtIODevice *vdev = virtio_bus_get_device(bus);
VirtQueue *vq = virtio_get_queue(vdev, n);
EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
/* Test and clear notifier after disabling event,
* in case poll callback didn't have time to run.
*/
virtio_queue_host_notifier_read(notifier);
event_notifier_cleanup(notifier);
return r;
}
static char *virtio_bus_get_dev_path(DeviceState *dev)

View file

@ -1932,7 +1932,8 @@ static Property virtio_blk_pci_properties[] = {
DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
DEV_NVECTORS_UNSPECIFIED),
DEFINE_PROP_END_OF_LIST(),
};
@ -1941,6 +1942,10 @@ static void virtio_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(vpci_dev);
DeviceState *vdev = DEVICE(&dev->vdev);
if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
vpci_dev->nvectors = dev->vdev.conf.num_queues + 1;
}
qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
object_property_set_bool(OBJECT(vdev), true, "realized", errp);
}
@ -1983,7 +1988,8 @@ static const TypeInfo virtio_blk_pci_info = {
static Property vhost_user_blk_pci_properties[] = {
DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
DEV_NVECTORS_UNSPECIFIED),
DEFINE_PROP_END_OF_LIST(),
};
@ -1992,6 +1998,10 @@ static void vhost_user_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
VHostUserBlkPCI *dev = VHOST_USER_BLK_PCI(vpci_dev);
DeviceState *vdev = DEVICE(&dev->vdev);
if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
vpci_dev->nvectors = dev->vdev.num_queues + 1;
}
qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
object_property_set_bool(OBJECT(vdev), true, "realized", errp);
}

View file

@ -2572,8 +2572,9 @@ static Property virtio_properties[] = {
static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev)
{
VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
int n, r, err;
int i, n, r, err;
memory_region_transaction_begin();
for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
VirtQueue *vq = &vdev->vq[n];
if (!virtio_queue_get_num(vdev, n)) {
@ -2596,9 +2597,11 @@ static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev)
}
event_notifier_set(&vq->host_notifier);
}
memory_region_transaction_commit();
return 0;
assign_error:
i = n; /* save n for a second iteration after transaction is committed. */
while (--n >= 0) {
VirtQueue *vq = &vdev->vq[n];
if (!virtio_queue_get_num(vdev, n)) {
@ -2609,6 +2612,14 @@ assign_error:
r = virtio_bus_set_host_notifier(qbus, n, false);
assert(r >= 0);
}
memory_region_transaction_commit();
while (--i >= 0) {
if (!virtio_queue_get_num(vdev, i)) {
continue;
}
virtio_bus_cleanup_host_notifier(qbus, i);
}
return err;
}
@ -2625,6 +2636,7 @@ static void virtio_device_stop_ioeventfd_impl(VirtIODevice *vdev)
VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
int n, r;
memory_region_transaction_begin();
for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
VirtQueue *vq = &vdev->vq[n];
@ -2635,6 +2647,14 @@ static void virtio_device_stop_ioeventfd_impl(VirtIODevice *vdev)
r = virtio_bus_set_host_notifier(qbus, n, false);
assert(r >= 0);
}
memory_region_transaction_commit();
for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
if (!virtio_queue_get_num(vdev, n)) {
continue;
}
virtio_bus_cleanup_host_notifier(qbus, n);
}
}
void virtio_device_stop_ioeventfd(VirtIODevice *vdev)

View file

@ -937,6 +937,13 @@ static Property xen_pci_passthrough_properties[] = {
DEFINE_PROP_END_OF_LIST(),
};
static void xen_pci_passthrough_instance_init(Object *obj)
{
/* QEMU_PCI_CAP_EXPRESS initialization does not depend on QEMU command
* line, therefore, no need to wait to realize like other devices */
PCI_DEVICE(obj)->cap_present |= QEMU_PCI_CAP_EXPRESS;
}
static void xen_pci_passthrough_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@ -946,7 +953,6 @@ static void xen_pci_passthrough_class_init(ObjectClass *klass, void *data)
k->exit = xen_pt_unregister_device;
k->config_read = xen_pt_pci_read_config;
k->config_write = xen_pt_pci_write_config;
k->is_express = 1; /* We might be */
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
dc->desc = "Assign an host PCI device with Xen";
dc->props = xen_pci_passthrough_properties;
@ -965,6 +971,7 @@ static const TypeInfo xen_pci_passthrough_info = {
.instance_size = sizeof(XenPCIPassthroughState),
.instance_finalize = xen_pci_passthrough_finalize,
.class_init = xen_pci_passthrough_class_init,
.instance_init = xen_pci_passthrough_instance_init,
.interfaces = (InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ INTERFACE_PCIE_DEVICE },