* Prevent reentrant DMA accesses by default

* Only compile hw/rdma code when necessary
 * Fix a potential locking issue in the vhost-user-test
 * Offer more registers in GDB for s390x TCG
 -----BEGIN PGP SIGNATURE-----
 
 iQJFBAABCAAvFiEEJ7iIR+7gJQEY8+q5LtnXdP5wLbUFAmRLlDkRHHRodXRoQHJl
 ZGhhdC5jb20ACgkQLtnXdP5wLbU98RAApVbehXIXMF4lm0B4/pTRlP7CEjbKvNdQ
 Ca/xuqiCctjisJWQMLf8WDdymzaA02ycSYo0X9diO4uisKog4kN4uYje3VTbSJnx
 7a0MlOpUqP6AR3hZYy5y/wv1M7Yjfhnj7yTUsAo/mQYoZmvqRviNwJ6ekdIU9J4W
 V2EfY+R/ebNami9VvuHTqudcZxmmR+4S5m5JipeHvjFE0DJucES0S8sN3W+iSCxG
 ydqG02CUoZtoorkJZccy3FPyeTVHgvcNqJbhAA5pFBgo30L+mx0llqztzLnWnLCe
 GWvpW2lZ2jC5MOg8oEpbiR807eB3ITpzCu9SX0IUOxMUHpGtl49sx1WNFrrlgD4Z
 b+mZSwJx4grwkU8iD/nF0apQoqEIAH6VSyRAIq1yLAsq1H4/K4c8M+bZ/jAD+gw+
 +DujZIPJ6Gcu3BeqWgOcOY1rRol3ksmRYlU8GrHTR/zIU+q3h7Gk3aH4+Ore1qnE
 hI/YzyyZlQgFMvENqakPJIS9IH+h41gCltwE8YxqJyUWIBfRs7iBMzdxQuUW+c2b
 n6OPYqeDlQt4hEpq4mUtZ+aJV1ey6kyk5ESLTj8QaJiZtcT1+lmvZX2ug2oeyrJm
 ys2458ulKZu0rGjXWWXNFB8u3MM9xN/PLqeD/wKShyLzkit5Nfx4azw5LPbmjrbj
 Twmf1HGeUiY=
 =v84A
 -----END PGP SIGNATURE-----

Merge tag 'pull-request-2023-04-28' of https://gitlab.com/thuth/qemu into staging

* Prevent reentrant DMA accesses by default
* Only compile hw/rdma code when necessary
* Fix a potential locking issue in the vhost-user-test
* Offer more registers in GDB for s390x TCG

# -----BEGIN PGP SIGNATURE-----
#
# iQJFBAABCAAvFiEEJ7iIR+7gJQEY8+q5LtnXdP5wLbUFAmRLlDkRHHRodXRoQHJl
# ZGhhdC5jb20ACgkQLtnXdP5wLbU98RAApVbehXIXMF4lm0B4/pTRlP7CEjbKvNdQ
# Ca/xuqiCctjisJWQMLf8WDdymzaA02ycSYo0X9diO4uisKog4kN4uYje3VTbSJnx
# 7a0MlOpUqP6AR3hZYy5y/wv1M7Yjfhnj7yTUsAo/mQYoZmvqRviNwJ6ekdIU9J4W
# V2EfY+R/ebNami9VvuHTqudcZxmmR+4S5m5JipeHvjFE0DJucES0S8sN3W+iSCxG
# ydqG02CUoZtoorkJZccy3FPyeTVHgvcNqJbhAA5pFBgo30L+mx0llqztzLnWnLCe
# GWvpW2lZ2jC5MOg8oEpbiR807eB3ITpzCu9SX0IUOxMUHpGtl49sx1WNFrrlgD4Z
# b+mZSwJx4grwkU8iD/nF0apQoqEIAH6VSyRAIq1yLAsq1H4/K4c8M+bZ/jAD+gw+
# +DujZIPJ6Gcu3BeqWgOcOY1rRol3ksmRYlU8GrHTR/zIU+q3h7Gk3aH4+Ore1qnE
# hI/YzyyZlQgFMvENqakPJIS9IH+h41gCltwE8YxqJyUWIBfRs7iBMzdxQuUW+c2b
# n6OPYqeDlQt4hEpq4mUtZ+aJV1ey6kyk5ESLTj8QaJiZtcT1+lmvZX2ug2oeyrJm
# ys2458ulKZu0rGjXWWXNFB8u3MM9xN/PLqeD/wKShyLzkit5Nfx4azw5LPbmjrbj
# Twmf1HGeUiY=
# =v84A
# -----END PGP SIGNATURE-----
# gpg: Signature made Fri 28 Apr 2023 10:39:05 AM BST
# gpg:                using RSA key 27B88847EEE0250118F3EAB92ED9D774FE702DB5
# gpg:                issuer "thuth@redhat.com"
# gpg: Good signature from "Thomas Huth <th.huth@gmx.de>" [undefined]
# gpg:                 aka "Thomas Huth <thuth@redhat.com>" [undefined]
# gpg:                 aka "Thomas Huth <th.huth@posteo.de>" [unknown]
# gpg:                 aka "Thomas Huth <huth@tuxfamily.org>" [undefined]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 27B8 8847 EEE0 2501 18F3  EAB9 2ED9 D774 FE70 2DB5

* tag 'pull-request-2023-04-28' of https://gitlab.com/thuth/qemu:
  apic: disable reentrancy detection for apic-msi
  raven: disable reentrancy detection for iomem
  bcm2835_property: disable reentrancy detection for iomem
  lsi53c895a: disable reentrancy detection for script RAM
  hw: replace most qemu_bh_new calls with qemu_bh_new_guarded
  checkpatch: add qemu_bh_new/aio_bh_new checks
  async: Add an optional reentrancy guard to the BH API
  memory: prevent dma-reentracy issues
  tests: vhost-user-test: release mutex on protocol violation
  hw/rdma: VMW_PVRDMA should depend on VMXNET3_PCI
  hw/rdma: Compile target-independent parts of the rdma code only once
  hw/rdma: Remove unused macros PG_DIR_SZ and PG_TBL_SZ
  s390x/gdb: Split s390-virt.xml

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2023-04-28 10:55:56 +01:00
commit 2074424ef6
49 changed files with 250 additions and 76 deletions

View file

@ -2,4 +2,4 @@ TARGET_ARCH=s390x
TARGET_SYSTBL_ABI=common,64 TARGET_SYSTBL_ABI=common,64
TARGET_SYSTBL=syscall.tbl TARGET_SYSTBL=syscall.tbl
TARGET_BIG_ENDIAN=y TARGET_BIG_ENDIAN=y
TARGET_XML_FILES= gdb-xml/s390x-core64.xml gdb-xml/s390-acr.xml gdb-xml/s390-fpr.xml gdb-xml/s390-vx.xml gdb-xml/s390-cr.xml gdb-xml/s390-virt.xml gdb-xml/s390-gs.xml TARGET_XML_FILES= gdb-xml/s390x-core64.xml gdb-xml/s390-acr.xml gdb-xml/s390-fpr.xml gdb-xml/s390-vx.xml gdb-xml/s390-cr.xml gdb-xml/s390-virt.xml gdb-xml/s390-virt-kvm.xml gdb-xml/s390-gs.xml

View file

@ -1,4 +1,4 @@
TARGET_ARCH=s390x TARGET_ARCH=s390x
TARGET_BIG_ENDIAN=y TARGET_BIG_ENDIAN=y
TARGET_SUPPORTS_MTTCG=y TARGET_SUPPORTS_MTTCG=y
TARGET_XML_FILES= gdb-xml/s390x-core64.xml gdb-xml/s390-acr.xml gdb-xml/s390-fpr.xml gdb-xml/s390-vx.xml gdb-xml/s390-cr.xml gdb-xml/s390-virt.xml gdb-xml/s390-gs.xml TARGET_XML_FILES= gdb-xml/s390x-core64.xml gdb-xml/s390-acr.xml gdb-xml/s390-fpr.xml gdb-xml/s390-vx.xml gdb-xml/s390-cr.xml gdb-xml/s390-virt.xml gdb-xml/s390-virt-kvm.xml gdb-xml/s390-gs.xml

View file

@ -61,6 +61,7 @@ There are several old APIs that use the main loop AioContext:
* LEGACY qemu_aio_set_event_notifier() - monitor an event notifier * LEGACY qemu_aio_set_event_notifier() - monitor an event notifier
* LEGACY timer_new_ms() - create a timer * LEGACY timer_new_ms() - create a timer
* LEGACY qemu_bh_new() - create a BH * LEGACY qemu_bh_new() - create a BH
* LEGACY qemu_bh_new_guarded() - create a BH with a device re-entrancy guard
* LEGACY qemu_aio_wait() - run an event loop iteration * LEGACY qemu_aio_wait() - run an event loop iteration
Since they implicitly work on the main loop they cannot be used in code that Since they implicitly work on the main loop they cannot be used in code that
@ -72,8 +73,14 @@ Instead, use the AioContext functions directly (see include/block/aio.h):
* aio_set_event_notifier() - monitor an event notifier * aio_set_event_notifier() - monitor an event notifier
* aio_timer_new() - create a timer * aio_timer_new() - create a timer
* aio_bh_new() - create a BH * aio_bh_new() - create a BH
* aio_bh_new_guarded() - create a BH with a device re-entrancy guard
* aio_poll() - run an event loop iteration * aio_poll() - run an event loop iteration
The qemu_bh_new_guarded/aio_bh_new_guarded APIs accept a "MemReentrancyGuard"
argument, which is used to check for and prevent re-entrancy problems. For
BHs associated with devices, the reentrancy-guard is contained in the
corresponding DeviceState and named "mem_reentrancy_guard".
The AioContext can be obtained from the IOThread using The AioContext can be obtained from the IOThread using
iothread_get_aio_context() or for the main loop using qemu_get_aio_context(). iothread_get_aio_context() or for the main loop using qemu_get_aio_context().
Code that takes an AioContext argument works both in IOThreads or the main Code that takes an AioContext argument works both in IOThreads or the main

14
gdb-xml/s390-virt-kvm.xml Normal file
View file

@ -0,0 +1,14 @@
<?xml version="1.0"?>
<!-- Copyright 2023 IBM Corp.
This work is licensed under the terms of the GNU GPL, version 2 or
(at your option) any later version. See the COPYING file in the
top-level directory. -->
<!DOCTYPE feature SYSTEM "gdb-target.dtd">
<feature name="org.gnu.gdb.s390.virt.kvm">
<reg name="pp" bitsize="64" type="uint64" group="system"/>
<reg name="pfault_token" bitsize="64" type="uint64" group="system"/>
<reg name="pfault_select" bitsize="64" type="uint64" group="system"/>
<reg name="pfault_compare" bitsize="64" type="uint64" group="system"/>
</feature>

View file

@ -11,8 +11,4 @@
<reg name="cputm" bitsize="64" type="uint64" group="system"/> <reg name="cputm" bitsize="64" type="uint64" group="system"/>
<reg name="last_break" bitsize="64" type="code_ptr" group="system"/> <reg name="last_break" bitsize="64" type="code_ptr" group="system"/>
<reg name="prefix" bitsize="64" type="data_ptr" group="system"/> <reg name="prefix" bitsize="64" type="data_ptr" group="system"/>
<reg name="pp" bitsize="64" type="uint64" group="system"/>
<reg name="pfault_token" bitsize="64" type="uint64" group="system"/>
<reg name="pfault_select" bitsize="64" type="uint64" group="system"/>
<reg name="pfault_compare" bitsize="64" type="uint64" group="system"/>
</feature> </feature>

View file

@ -61,6 +61,7 @@ typedef struct Xen9pfsDev {
int num_rings; int num_rings;
Xen9pfsRing *rings; Xen9pfsRing *rings;
MemReentrancyGuard mem_reentrancy_guard;
} Xen9pfsDev; } Xen9pfsDev;
static void xen_9pfs_disconnect(struct XenLegacyDevice *xendev); static void xen_9pfs_disconnect(struct XenLegacyDevice *xendev);
@ -443,7 +444,9 @@ static int xen_9pfs_connect(struct XenLegacyDevice *xendev)
xen_9pdev->rings[i].ring.out = xen_9pdev->rings[i].data + xen_9pdev->rings[i].ring.out = xen_9pdev->rings[i].data +
XEN_FLEX_RING_SIZE(ring_order); XEN_FLEX_RING_SIZE(ring_order);
xen_9pdev->rings[i].bh = qemu_bh_new(xen_9pfs_bh, &xen_9pdev->rings[i]); xen_9pdev->rings[i].bh = qemu_bh_new_guarded(xen_9pfs_bh,
&xen_9pdev->rings[i],
&xen_9pdev->mem_reentrancy_guard);
xen_9pdev->rings[i].out_cons = 0; xen_9pdev->rings[i].out_cons = 0;
xen_9pdev->rings[i].out_size = 0; xen_9pdev->rings[i].out_size = 0;
xen_9pdev->rings[i].inprogress = false; xen_9pdev->rings[i].inprogress = false;

View file

@ -127,7 +127,8 @@ bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
} else { } else {
s->ctx = qemu_get_aio_context(); s->ctx = qemu_get_aio_context();
} }
s->bh = aio_bh_new(s->ctx, notify_guest_bh, s); s->bh = aio_bh_new_guarded(s->ctx, notify_guest_bh, s,
&DEVICE(vdev)->mem_reentrancy_guard);
s->batch_notify_vqs = bitmap_new(conf->num_queues); s->batch_notify_vqs = bitmap_new(conf->num_queues);
*dataplane = s; *dataplane = s;

View file

@ -633,8 +633,9 @@ XenBlockDataPlane *xen_block_dataplane_create(XenDevice *xendev,
} else { } else {
dataplane->ctx = qemu_get_aio_context(); dataplane->ctx = qemu_get_aio_context();
} }
dataplane->bh = aio_bh_new(dataplane->ctx, xen_block_dataplane_bh, dataplane->bh = aio_bh_new_guarded(dataplane->ctx, xen_block_dataplane_bh,
dataplane); dataplane,
&DEVICE(xendev)->mem_reentrancy_guard);
return dataplane; return dataplane;
} }

View file

@ -985,7 +985,8 @@ static void virtser_port_device_realize(DeviceState *dev, Error **errp)
return; return;
} }
port->bh = qemu_bh_new(flush_queued_data_bh, port); port->bh = qemu_bh_new_guarded(flush_queued_data_bh, port,
&dev->mem_reentrancy_guard);
port->elem = NULL; port->elem = NULL;
} }

View file

@ -2201,11 +2201,14 @@ static void qxl_realize_common(PCIQXLDevice *qxl, Error **errp)
qemu_add_vm_change_state_handler(qxl_vm_change_state_handler, qxl); qemu_add_vm_change_state_handler(qxl_vm_change_state_handler, qxl);
qxl->update_irq = qemu_bh_new(qxl_update_irq_bh, qxl); qxl->update_irq = qemu_bh_new_guarded(qxl_update_irq_bh, qxl,
&DEVICE(qxl)->mem_reentrancy_guard);
qxl_reset_state(qxl); qxl_reset_state(qxl);
qxl->update_area_bh = qemu_bh_new(qxl_render_update_area_bh, qxl); qxl->update_area_bh = qemu_bh_new_guarded(qxl_render_update_area_bh, qxl,
qxl->ssd.cursor_bh = qemu_bh_new(qemu_spice_cursor_refresh_bh, &qxl->ssd); &DEVICE(qxl)->mem_reentrancy_guard);
qxl->ssd.cursor_bh = qemu_bh_new_guarded(qemu_spice_cursor_refresh_bh, &qxl->ssd,
&DEVICE(qxl)->mem_reentrancy_guard);
} }
static void qxl_realize_primary(PCIDevice *dev, Error **errp) static void qxl_realize_primary(PCIDevice *dev, Error **errp)

View file

@ -1339,8 +1339,10 @@ void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
g->ctrl_vq = virtio_get_queue(vdev, 0); g->ctrl_vq = virtio_get_queue(vdev, 0);
g->cursor_vq = virtio_get_queue(vdev, 1); g->cursor_vq = virtio_get_queue(vdev, 1);
g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g); g->ctrl_bh = qemu_bh_new_guarded(virtio_gpu_ctrl_bh, g,
g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g); &qdev->mem_reentrancy_guard);
g->cursor_bh = qemu_bh_new_guarded(virtio_gpu_cursor_bh, g,
&qdev->mem_reentrancy_guard);
QTAILQ_INIT(&g->reslist); QTAILQ_INIT(&g->reslist);
QTAILQ_INIT(&g->cmdq); QTAILQ_INIT(&g->cmdq);
QTAILQ_INIT(&g->fenceq); QTAILQ_INIT(&g->fenceq);

View file

@ -1509,7 +1509,8 @@ static void ahci_cmd_done(const IDEDMA *dma)
ahci_write_fis_d2h(ad); ahci_write_fis_d2h(ad);
if (ad->port_regs.cmd_issue && !ad->check_bh) { if (ad->port_regs.cmd_issue && !ad->check_bh) {
ad->check_bh = qemu_bh_new(ahci_check_cmd_bh, ad); ad->check_bh = qemu_bh_new_guarded(ahci_check_cmd_bh, ad,
&ad->mem_reentrancy_guard);
qemu_bh_schedule(ad->check_bh); qemu_bh_schedule(ad->check_bh);
} }
} }

View file

@ -321,6 +321,7 @@ struct AHCIDevice {
bool init_d2h_sent; bool init_d2h_sent;
AHCICmdHdr *cur_cmd; AHCICmdHdr *cur_cmd;
NCQTransferState ncq_tfs[AHCI_MAX_CMDS]; NCQTransferState ncq_tfs[AHCI_MAX_CMDS];
MemReentrancyGuard mem_reentrancy_guard;
}; };
struct AHCIPCIState { struct AHCIPCIState {

View file

@ -513,6 +513,7 @@ BlockAIOCB *ide_issue_trim(
BlockCompletionFunc *cb, void *cb_opaque, void *opaque) BlockCompletionFunc *cb, void *cb_opaque, void *opaque)
{ {
IDEState *s = opaque; IDEState *s = opaque;
IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
TrimAIOCB *iocb; TrimAIOCB *iocb;
/* Paired with a decrement in ide_trim_bh_cb() */ /* Paired with a decrement in ide_trim_bh_cb() */
@ -520,7 +521,8 @@ BlockAIOCB *ide_issue_trim(
iocb = blk_aio_get(&trim_aiocb_info, s->blk, cb, cb_opaque); iocb = blk_aio_get(&trim_aiocb_info, s->blk, cb, cb_opaque);
iocb->s = s; iocb->s = s;
iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb); iocb->bh = qemu_bh_new_guarded(ide_trim_bh_cb, iocb,
&DEVICE(dev)->mem_reentrancy_guard);
iocb->ret = 0; iocb->ret = 0;
iocb->qiov = qiov; iocb->qiov = qiov;
iocb->i = -1; iocb->i = -1;

View file

@ -885,6 +885,13 @@ static void apic_realize(DeviceState *dev, Error **errp)
memory_region_init_io(&s->io_memory, OBJECT(s), &apic_io_ops, s, "apic-msi", memory_region_init_io(&s->io_memory, OBJECT(s), &apic_io_ops, s, "apic-msi",
APIC_SPACE_SIZE); APIC_SPACE_SIZE);
/*
* apic-msi's apic_mem_write can call into ioapic_eoi_broadcast, which can
* write back to apic-msi. As such mark the apic-msi region re-entrancy
* safe.
*/
s->io_memory.disable_reentrancy_guard = true;
s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, apic_timer, s); s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, apic_timer, s);
local_apics[s->id] = s; local_apics[s->id] = s;

View file

@ -382,6 +382,13 @@ static void bcm2835_property_init(Object *obj)
memory_region_init_io(&s->iomem, OBJECT(s), &bcm2835_property_ops, s, memory_region_init_io(&s->iomem, OBJECT(s), &bcm2835_property_ops, s,
TYPE_BCM2835_PROPERTY, 0x10); TYPE_BCM2835_PROPERTY, 0x10);
/*
* bcm2835_property_ops call into bcm2835_mbox, which in-turn reads from
* iomem. As such, mark iomem as re-entracy safe.
*/
s->iomem.disable_reentrancy_guard = true;
sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem);
sysbus_init_irq(SYS_BUS_DEVICE(s), &s->mbox_irq); sysbus_init_irq(SYS_BUS_DEVICE(s), &s->mbox_irq);
} }

View file

@ -228,8 +228,10 @@ static void imx_rngc_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(sbd, &s->iomem); sysbus_init_mmio(sbd, &s->iomem);
sysbus_init_irq(sbd, &s->irq); sysbus_init_irq(sbd, &s->irq);
s->self_test_bh = qemu_bh_new(imx_rngc_self_test, s); s->self_test_bh = qemu_bh_new_guarded(imx_rngc_self_test, s,
s->seed_bh = qemu_bh_new(imx_rngc_seed, s); &dev->mem_reentrancy_guard);
s->seed_bh = qemu_bh_new_guarded(imx_rngc_seed, s,
&dev->mem_reentrancy_guard);
} }
static void imx_rngc_reset(DeviceState *dev) static void imx_rngc_reset(DeviceState *dev)

View file

@ -914,7 +914,7 @@ static void mac_dbdma_realize(DeviceState *dev, Error **errp)
{ {
DBDMAState *s = MAC_DBDMA(dev); DBDMAState *s = MAC_DBDMA(dev);
s->bh = qemu_bh_new(DBDMA_run_bh, s); s->bh = qemu_bh_new_guarded(DBDMA_run_bh, s, &dev->mem_reentrancy_guard);
} }
static void mac_dbdma_class_init(ObjectClass *oc, void *data) static void mac_dbdma_class_init(ObjectClass *oc, void *data)

View file

@ -2917,7 +2917,8 @@ static void virtio_net_add_queue(VirtIONet *n, int index)
n->vqs[index].tx_vq = n->vqs[index].tx_vq =
virtio_add_queue(vdev, n->net_conf.tx_queue_size, virtio_add_queue(vdev, n->net_conf.tx_queue_size,
virtio_net_handle_tx_bh); virtio_net_handle_tx_bh);
n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]); n->vqs[index].tx_bh = qemu_bh_new_guarded(virtio_net_tx_bh, &n->vqs[index],
&DEVICE(vdev)->mem_reentrancy_guard);
} }
n->vqs[index].tx_waiting = 0; n->vqs[index].tx_waiting = 0;

View file

@ -4607,7 +4607,8 @@ static void nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, uint64_t dma_addr,
QTAILQ_INSERT_TAIL(&(sq->req_list), &sq->io_req[i], entry); QTAILQ_INSERT_TAIL(&(sq->req_list), &sq->io_req[i], entry);
} }
sq->bh = qemu_bh_new(nvme_process_sq, sq); sq->bh = qemu_bh_new_guarded(nvme_process_sq, sq,
&DEVICE(sq->ctrl)->mem_reentrancy_guard);
if (n->dbbuf_enabled) { if (n->dbbuf_enabled) {
sq->db_addr = n->dbbuf_dbs + (sqid << 3); sq->db_addr = n->dbbuf_dbs + (sqid << 3);
@ -5253,7 +5254,8 @@ static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr,
} }
} }
n->cq[cqid] = cq; n->cq[cqid] = cq;
cq->bh = qemu_bh_new(nvme_post_cqes, cq); cq->bh = qemu_bh_new_guarded(nvme_post_cqes, cq,
&DEVICE(cq->ctrl)->mem_reentrancy_guard);
} }
static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeRequest *req) static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeRequest *req)

View file

@ -294,6 +294,13 @@ static void raven_pcihost_initfn(Object *obj)
memory_region_init(&s->pci_memory, obj, "pci-memory", 0x3f000000); memory_region_init(&s->pci_memory, obj, "pci-memory", 0x3f000000);
address_space_init(&s->pci_io_as, &s->pci_io, "raven-io"); address_space_init(&s->pci_io_as, &s->pci_io, "raven-io");
/*
* Raven's raven_io_ops use the address-space API to access pci-conf-idx
* (which is also owned by the raven device). As such, mark the
* pci_io_non_contiguous as re-entrancy safe.
*/
s->pci_io_non_contiguous.disable_reentrancy_guard = true;
/* CPU address space */ /* CPU address space */
memory_region_add_subregion(address_space_mem, PCI_IO_BASE_ADDR, memory_region_add_subregion(address_space_mem, PCI_IO_BASE_ADDR,
&s->pci_io); &s->pci_io);

View file

@ -1,3 +1,3 @@
config VMW_PVRDMA config VMW_PVRDMA
default y if PCI_DEVICES default y if PCI_DEVICES
depends on PVRDMA && PCI && MSI_NONBROKEN depends on PVRDMA && MSI_NONBROKEN && VMXNET3_PCI

View file

@ -1,10 +1,12 @@
specific_ss.add(when: 'CONFIG_VMW_PVRDMA', if_true: files( softmmu_ss.add(when: 'CONFIG_VMW_PVRDMA', if_true: files(
'rdma.c', 'rdma.c',
'rdma_backend.c', 'rdma_backend.c',
'rdma_rm.c',
'rdma_utils.c', 'rdma_utils.c',
'vmw/pvrdma_qp_ops.c',
))
specific_ss.add(when: 'CONFIG_VMW_PVRDMA', if_true: files(
'rdma_rm.c',
'vmw/pvrdma_cmd.c', 'vmw/pvrdma_cmd.c',
'vmw/pvrdma_dev_ring.c', 'vmw/pvrdma_dev_ring.c',
'vmw/pvrdma_main.c', 'vmw/pvrdma_main.c',
'vmw/pvrdma_qp_ops.c',
)) ))

View file

@ -23,10 +23,6 @@
#include "rdma_backend.h" #include "rdma_backend.h"
#include "rdma_rm.h" #include "rdma_rm.h"
/* Page directory and page tables */
#define PG_DIR_SZ { TARGET_PAGE_SIZE / sizeof(__u64) }
#define PG_TBL_SZ { TARGET_PAGE_SIZE / sizeof(__u64) }
void rdma_format_device_counters(RdmaDeviceResources *dev_res, GString *buf) void rdma_format_device_counters(RdmaDeviceResources *dev_res, GString *buf)
{ {
g_string_append_printf(buf, "\ttx : %" PRId64 "\n", g_string_append_printf(buf, "\ttx : %" PRId64 "\n",

View file

@ -2302,6 +2302,12 @@ static void lsi_scsi_realize(PCIDevice *dev, Error **errp)
memory_region_init_io(&s->io_io, OBJECT(s), &lsi_io_ops, s, memory_region_init_io(&s->io_io, OBJECT(s), &lsi_io_ops, s,
"lsi-io", 256); "lsi-io", 256);
/*
* Since we use the address-space API to interact with ram_io, disable the
* re-entrancy guard.
*/
s->ram_io.disable_reentrancy_guard = true;
address_space_init(&s->pci_io_as, pci_address_space_io(dev), "lsi-pci-io"); address_space_init(&s->pci_io_as, pci_address_space_io(dev), "lsi-pci-io");
qdev_init_gpio_out(d, &s->ext_irq, 1); qdev_init_gpio_out(d, &s->ext_irq, 1);

View file

@ -1322,7 +1322,8 @@ static void mptsas_scsi_realize(PCIDevice *dev, Error **errp)
} }
s->max_devices = MPTSAS_NUM_PORTS; s->max_devices = MPTSAS_NUM_PORTS;
s->request_bh = qemu_bh_new(mptsas_fetch_requests, s); s->request_bh = qemu_bh_new_guarded(mptsas_fetch_requests, s,
&DEVICE(dev)->mem_reentrancy_guard);
scsi_bus_init(&s->bus, sizeof(s->bus), &dev->qdev, &mptsas_scsi_info); scsi_bus_init(&s->bus, sizeof(s->bus), &dev->qdev, &mptsas_scsi_info);
} }

View file

@ -193,7 +193,8 @@ static void scsi_dma_restart_cb(void *opaque, bool running, RunState state)
AioContext *ctx = blk_get_aio_context(s->conf.blk); AioContext *ctx = blk_get_aio_context(s->conf.blk);
/* The reference is dropped in scsi_dma_restart_bh.*/ /* The reference is dropped in scsi_dma_restart_bh.*/
object_ref(OBJECT(s)); object_ref(OBJECT(s));
s->bh = aio_bh_new(ctx, scsi_dma_restart_bh, s); s->bh = aio_bh_new_guarded(ctx, scsi_dma_restart_bh, s,
&DEVICE(s)->mem_reentrancy_guard);
qemu_bh_schedule(s->bh); qemu_bh_schedule(s->bh);
} }
} }

View file

@ -1184,7 +1184,8 @@ pvscsi_realizefn(PCIDevice *pci_dev, Error **errp)
pcie_endpoint_cap_init(pci_dev, PVSCSI_EXP_EP_OFFSET); pcie_endpoint_cap_init(pci_dev, PVSCSI_EXP_EP_OFFSET);
} }
s->completion_worker = qemu_bh_new(pvscsi_process_completion_queue, s); s->completion_worker = qemu_bh_new_guarded(pvscsi_process_completion_queue, s,
&DEVICE(pci_dev)->mem_reentrancy_guard);
scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(pci_dev), &pvscsi_scsi_info); scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(pci_dev), &pvscsi_scsi_info);
/* override default SCSI bus hotplug-handler, with pvscsi's one */ /* override default SCSI bus hotplug-handler, with pvscsi's one */

View file

@ -937,7 +937,8 @@ static void usb_uas_realize(USBDevice *dev, Error **errp)
QTAILQ_INIT(&uas->results); QTAILQ_INIT(&uas->results);
QTAILQ_INIT(&uas->requests); QTAILQ_INIT(&uas->requests);
uas->status_bh = qemu_bh_new(usb_uas_send_status_bh, uas); uas->status_bh = qemu_bh_new_guarded(usb_uas_send_status_bh, uas,
&d->mem_reentrancy_guard);
dev->flags |= (1 << USB_DEV_FLAG_IS_SCSI_STORAGE); dev->flags |= (1 << USB_DEV_FLAG_IS_SCSI_STORAGE);
scsi_bus_init(&uas->bus, sizeof(uas->bus), DEVICE(dev), &usb_uas_scsi_info); scsi_bus_init(&uas->bus, sizeof(uas->bus), DEVICE(dev), &usb_uas_scsi_info);

View file

@ -1364,7 +1364,8 @@ static void dwc2_realize(DeviceState *dev, Error **errp)
s->fi = USB_FRMINTVL - 1; s->fi = USB_FRMINTVL - 1;
s->eof_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, dwc2_frame_boundary, s); s->eof_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, dwc2_frame_boundary, s);
s->frame_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, dwc2_work_timer, s); s->frame_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, dwc2_work_timer, s);
s->async_bh = qemu_bh_new(dwc2_work_bh, s); s->async_bh = qemu_bh_new_guarded(dwc2_work_bh, s,
&dev->mem_reentrancy_guard);
sysbus_init_irq(sbd, &s->irq); sysbus_init_irq(sbd, &s->irq);
} }

View file

@ -2533,7 +2533,8 @@ void usb_ehci_realize(EHCIState *s, DeviceState *dev, Error **errp)
} }
s->frame_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, ehci_work_timer, s); s->frame_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, ehci_work_timer, s);
s->async_bh = qemu_bh_new(ehci_work_bh, s); s->async_bh = qemu_bh_new_guarded(ehci_work_bh, s,
&dev->mem_reentrancy_guard);
s->device = dev; s->device = dev;
s->vmstate = qemu_add_vm_change_state_handler(usb_ehci_vm_state_change, s); s->vmstate = qemu_add_vm_change_state_handler(usb_ehci_vm_state_change, s);

View file

@ -1190,7 +1190,7 @@ void usb_uhci_common_realize(PCIDevice *dev, Error **errp)
USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL); USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL);
} }
} }
s->bh = qemu_bh_new(uhci_bh, s); s->bh = qemu_bh_new_guarded(uhci_bh, s, &DEVICE(dev)->mem_reentrancy_guard);
s->frame_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, uhci_frame_timer, s); s->frame_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, uhci_frame_timer, s);
s->num_ports_vmstate = NB_PORTS; s->num_ports_vmstate = NB_PORTS;
QTAILQ_INIT(&s->queues); QTAILQ_INIT(&s->queues);

View file

@ -1141,7 +1141,8 @@ static void usb_host_nodev_bh(void *opaque)
static void usb_host_nodev(USBHostDevice *s) static void usb_host_nodev(USBHostDevice *s)
{ {
if (!s->bh_nodev) { if (!s->bh_nodev) {
s->bh_nodev = qemu_bh_new(usb_host_nodev_bh, s); s->bh_nodev = qemu_bh_new_guarded(usb_host_nodev_bh, s,
&DEVICE(s)->mem_reentrancy_guard);
} }
qemu_bh_schedule(s->bh_nodev); qemu_bh_schedule(s->bh_nodev);
} }
@ -1739,7 +1740,8 @@ static int usb_host_post_load(void *opaque, int version_id)
USBHostDevice *dev = opaque; USBHostDevice *dev = opaque;
if (!dev->bh_postld) { if (!dev->bh_postld) {
dev->bh_postld = qemu_bh_new(usb_host_post_load_bh, dev); dev->bh_postld = qemu_bh_new_guarded(usb_host_post_load_bh, dev,
&DEVICE(dev)->mem_reentrancy_guard);
} }
qemu_bh_schedule(dev->bh_postld); qemu_bh_schedule(dev->bh_postld);
dev->bh_postld_pending = true; dev->bh_postld_pending = true;

View file

@ -1441,8 +1441,10 @@ static void usbredir_realize(USBDevice *udev, Error **errp)
} }
} }
dev->chardev_close_bh = qemu_bh_new(usbredir_chardev_close_bh, dev); dev->chardev_close_bh = qemu_bh_new_guarded(usbredir_chardev_close_bh, dev,
dev->device_reject_bh = qemu_bh_new(usbredir_device_reject_bh, dev); &DEVICE(dev)->mem_reentrancy_guard);
dev->device_reject_bh = qemu_bh_new_guarded(usbredir_device_reject_bh, dev,
&DEVICE(dev)->mem_reentrancy_guard);
dev->attach_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, usbredir_do_attach, dev); dev->attach_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, usbredir_do_attach, dev);
packet_id_queue_init(&dev->cancelled, dev, "cancelled"); packet_id_queue_init(&dev->cancelled, dev, "cancelled");

View file

@ -1032,7 +1032,8 @@ static void usbback_alloc(struct XenLegacyDevice *xendev)
QTAILQ_INIT(&usbif->req_free_q); QTAILQ_INIT(&usbif->req_free_q);
QSIMPLEQ_INIT(&usbif->hotplug_q); QSIMPLEQ_INIT(&usbif->hotplug_q);
usbif->bh = qemu_bh_new(usbback_bh, usbif); usbif->bh = qemu_bh_new_guarded(usbback_bh, usbif,
&DEVICE(xendev)->mem_reentrancy_guard);
} }
static int usbback_free(struct XenLegacyDevice *xendev) static int usbback_free(struct XenLegacyDevice *xendev)

View file

@ -886,8 +886,9 @@ static void virtio_balloon_device_realize(DeviceState *dev, Error **errp)
precopy_add_notifier(&s->free_page_hint_notify); precopy_add_notifier(&s->free_page_hint_notify);
object_ref(OBJECT(s->iothread)); object_ref(OBJECT(s->iothread));
s->free_page_bh = aio_bh_new(iothread_get_aio_context(s->iothread), s->free_page_bh = aio_bh_new_guarded(iothread_get_aio_context(s->iothread),
virtio_ballloon_get_free_page_hints, s); virtio_ballloon_get_free_page_hints, s,
&dev->mem_reentrancy_guard);
} }
if (virtio_has_feature(s->host_features, VIRTIO_BALLOON_F_REPORTING)) { if (virtio_has_feature(s->host_features, VIRTIO_BALLOON_F_REPORTING)) {

View file

@ -1074,7 +1074,8 @@ static void virtio_crypto_device_realize(DeviceState *dev, Error **errp)
vcrypto->vqs[i].dataq = vcrypto->vqs[i].dataq =
virtio_add_queue(vdev, 1024, virtio_crypto_handle_dataq_bh); virtio_add_queue(vdev, 1024, virtio_crypto_handle_dataq_bh);
vcrypto->vqs[i].dataq_bh = vcrypto->vqs[i].dataq_bh =
qemu_bh_new(virtio_crypto_dataq_bh, &vcrypto->vqs[i]); qemu_bh_new_guarded(virtio_crypto_dataq_bh, &vcrypto->vqs[i],
&dev->mem_reentrancy_guard);
vcrypto->vqs[i].vcrypto = vcrypto; vcrypto->vqs[i].vcrypto = vcrypto;
} }

View file

@ -23,6 +23,8 @@
#include "qemu/thread.h" #include "qemu/thread.h"
#include "qemu/timer.h" #include "qemu/timer.h"
#include "block/graph-lock.h" #include "block/graph-lock.h"
#include "hw/qdev-core.h"
typedef struct BlockAIOCB BlockAIOCB; typedef struct BlockAIOCB BlockAIOCB;
typedef void BlockCompletionFunc(void *opaque, int ret); typedef void BlockCompletionFunc(void *opaque, int ret);
@ -323,9 +325,11 @@ void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
* is opaque and must be allocated prior to its use. * is opaque and must be allocated prior to its use.
* *
* @name: A human-readable identifier for debugging purposes. * @name: A human-readable identifier for debugging purposes.
* @reentrancy_guard: A guard set when entering a cb to prevent
* device-reentrancy issues
*/ */
QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque, QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
const char *name); const char *name, MemReentrancyGuard *reentrancy_guard);
/** /**
* aio_bh_new: Allocate a new bottom half structure * aio_bh_new: Allocate a new bottom half structure
@ -334,7 +338,17 @@ QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
* string. * string.
*/ */
#define aio_bh_new(ctx, cb, opaque) \ #define aio_bh_new(ctx, cb, opaque) \
aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb))) aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)), NULL)
/**
* aio_bh_new_guarded: Allocate a new bottom half structure with a
* reentrancy_guard
*
* A convenience wrapper for aio_bh_new_full() that uses the cb as the name
* string.
*/
#define aio_bh_new_guarded(ctx, cb, opaque, guard) \
aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)), guard)
/** /**
* aio_notify: Force processing of pending events. * aio_notify: Force processing of pending events.

View file

@ -767,6 +767,8 @@ struct MemoryRegion {
bool is_iommu; bool is_iommu;
RAMBlock *ram_block; RAMBlock *ram_block;
Object *owner; Object *owner;
/* owner as TYPE_DEVICE. Used for re-entrancy checks in MR access hotpath */
DeviceState *dev;
const MemoryRegionOps *ops; const MemoryRegionOps *ops;
void *opaque; void *opaque;
@ -791,6 +793,9 @@ struct MemoryRegion {
unsigned ioeventfd_nb; unsigned ioeventfd_nb;
MemoryRegionIoeventfd *ioeventfds; MemoryRegionIoeventfd *ioeventfds;
RamDiscardManager *rdm; /* Only for RAM */ RamDiscardManager *rdm; /* Only for RAM */
/* For devices designed to perform re-entrant IO into their own IO MRs */
bool disable_reentrancy_guard;
}; };
struct IOMMUMemoryRegion { struct IOMMUMemoryRegion {

View file

@ -162,6 +162,10 @@ struct NamedClockList {
QLIST_ENTRY(NamedClockList) node; QLIST_ENTRY(NamedClockList) node;
}; };
typedef struct {
bool engaged_in_io;
} MemReentrancyGuard;
/** /**
* DeviceState: * DeviceState:
* @realized: Indicates whether the device has been fully constructed. * @realized: Indicates whether the device has been fully constructed.
@ -194,6 +198,9 @@ struct DeviceState {
int alias_required_for_version; int alias_required_for_version;
ResettableState reset; ResettableState reset;
GSList *unplug_blockers; GSList *unplug_blockers;
/* Is the device currently in mmio/pio/dma? Used to prevent re-entrancy */
MemReentrancyGuard mem_reentrancy_guard;
}; };
struct DeviceListener { struct DeviceListener {

View file

@ -387,9 +387,12 @@ void qemu_cond_timedwait_iothread(QemuCond *cond, int ms);
/* internal interfaces */ /* internal interfaces */
#define qemu_bh_new_guarded(cb, opaque, guard) \
qemu_bh_new_full((cb), (opaque), (stringify(cb)), guard)
#define qemu_bh_new(cb, opaque) \ #define qemu_bh_new(cb, opaque) \
qemu_bh_new_full((cb), (opaque), (stringify(cb))) qemu_bh_new_full((cb), (opaque), (stringify(cb)), NULL)
QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name); QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name,
MemReentrancyGuard *reentrancy_guard);
void qemu_bh_schedule_idle(QEMUBH *bh); void qemu_bh_schedule_idle(QEMUBH *bh);
enum { enum {

View file

@ -2865,6 +2865,14 @@ sub process {
if ($line =~ /\bsignal\s*\(/ && !($line =~ /SIG_(?:IGN|DFL)/)) { if ($line =~ /\bsignal\s*\(/ && !($line =~ /SIG_(?:IGN|DFL)/)) {
ERROR("use sigaction to establish signal handlers; signal is not portable\n" . $herecurr); ERROR("use sigaction to establish signal handlers; signal is not portable\n" . $herecurr);
} }
# recommend qemu_bh_new_guarded instead of qemu_bh_new
if ($realfile =~ /.*\/hw\/.*/ && $line =~ /\bqemu_bh_new\s*\(/) {
ERROR("use qemu_bh_new_guarded() instead of qemu_bh_new() to avoid reentrancy problems\n" . $herecurr);
}
# recommend aio_bh_new_guarded instead of aio_bh_new
if ($realfile =~ /.*\/hw\/.*/ && $line =~ /\baio_bh_new\s*\(/) {
ERROR("use aio_bh_new_guarded() instead of aio_bh_new() to avoid reentrancy problems\n" . $herecurr);
}
# check for module_init(), use category-specific init macros explicitly please # check for module_init(), use category-specific init macros explicitly please
if ($line =~ /^module_init\s*\(/) { if ($line =~ /^module_init\s*\(/) {
ERROR("please use block_init(), type_init() etc. instead of module_init()\n" . $herecurr); ERROR("please use block_init(), type_init() etc. instead of module_init()\n" . $herecurr);

View file

@ -542,6 +542,18 @@ static MemTxResult access_with_adjusted_size(hwaddr addr,
access_size_max = 4; access_size_max = 4;
} }
/* Do not allow more than one simultaneous access to a device's IO Regions */
if (mr->dev && !mr->disable_reentrancy_guard &&
!mr->ram_device && !mr->ram && !mr->rom_device && !mr->readonly) {
if (mr->dev->mem_reentrancy_guard.engaged_in_io) {
warn_report_once("Blocked re-entrant IO on MemoryRegion: "
"%s at addr: 0x%" HWADDR_PRIX,
memory_region_name(mr), addr);
return MEMTX_ACCESS_ERROR;
}
mr->dev->mem_reentrancy_guard.engaged_in_io = true;
}
/* FIXME: support unaligned access? */ /* FIXME: support unaligned access? */
access_size = MAX(MIN(size, access_size_max), access_size_min); access_size = MAX(MIN(size, access_size_max), access_size_min);
access_mask = MAKE_64BIT_MASK(0, access_size * 8); access_mask = MAKE_64BIT_MASK(0, access_size * 8);
@ -556,6 +568,9 @@ static MemTxResult access_with_adjusted_size(hwaddr addr,
access_mask, attrs); access_mask, attrs);
} }
} }
if (mr->dev) {
mr->dev->mem_reentrancy_guard.engaged_in_io = false;
}
return r; return r;
} }
@ -1170,6 +1185,7 @@ static void memory_region_do_init(MemoryRegion *mr,
} }
mr->name = g_strdup(name); mr->name = g_strdup(name);
mr->owner = owner; mr->owner = owner;
mr->dev = (DeviceState *) object_dynamic_cast(mr->owner, TYPE_DEVICE);
mr->ram_block = NULL; mr->ram_block = NULL;
if (name) { if (name) {

View file

@ -206,12 +206,8 @@ static int cpu_write_c_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
#define S390_VIRT_CPUTM_REGNUM 1 #define S390_VIRT_CPUTM_REGNUM 1
#define S390_VIRT_BEA_REGNUM 2 #define S390_VIRT_BEA_REGNUM 2
#define S390_VIRT_PREFIX_REGNUM 3 #define S390_VIRT_PREFIX_REGNUM 3
#define S390_VIRT_PP_REGNUM 4
#define S390_VIRT_PFT_REGNUM 5
#define S390_VIRT_PFS_REGNUM 6
#define S390_VIRT_PFC_REGNUM 7
/* total number of registers in s390-virt.xml */ /* total number of registers in s390-virt.xml */
#define S390_NUM_VIRT_REGS 8 #define S390_NUM_VIRT_REGS 4
static int cpu_read_virt_reg(CPUS390XState *env, GByteArray *mem_buf, int n) static int cpu_read_virt_reg(CPUS390XState *env, GByteArray *mem_buf, int n)
{ {
@ -224,14 +220,6 @@ static int cpu_read_virt_reg(CPUS390XState *env, GByteArray *mem_buf, int n)
return gdb_get_regl(mem_buf, env->gbea); return gdb_get_regl(mem_buf, env->gbea);
case S390_VIRT_PREFIX_REGNUM: case S390_VIRT_PREFIX_REGNUM:
return gdb_get_regl(mem_buf, env->psa); return gdb_get_regl(mem_buf, env->psa);
case S390_VIRT_PP_REGNUM:
return gdb_get_regl(mem_buf, env->pp);
case S390_VIRT_PFT_REGNUM:
return gdb_get_regl(mem_buf, env->pfault_token);
case S390_VIRT_PFS_REGNUM:
return gdb_get_regl(mem_buf, env->pfault_select);
case S390_VIRT_PFC_REGNUM:
return gdb_get_regl(mem_buf, env->pfault_compare);
default: default:
return 0; return 0;
} }
@ -256,19 +244,51 @@ static int cpu_write_virt_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
env->psa = ldtul_p(mem_buf); env->psa = ldtul_p(mem_buf);
cpu_synchronize_post_init(env_cpu(env)); cpu_synchronize_post_init(env_cpu(env));
return 8; return 8;
case S390_VIRT_PP_REGNUM: default:
return 0;
}
}
/* the values represent the positions in s390-virt-kvm.xml */
#define S390_VIRT_KVM_PP_REGNUM 0
#define S390_VIRT_KVM_PFT_REGNUM 1
#define S390_VIRT_KVM_PFS_REGNUM 2
#define S390_VIRT_KVM_PFC_REGNUM 3
/* total number of registers in s390-virt-kvm.xml */
#define S390_NUM_VIRT_KVM_REGS 4
static int cpu_read_virt_kvm_reg(CPUS390XState *env, GByteArray *mem_buf, int n)
{
switch (n) {
case S390_VIRT_KVM_PP_REGNUM:
return gdb_get_regl(mem_buf, env->pp);
case S390_VIRT_KVM_PFT_REGNUM:
return gdb_get_regl(mem_buf, env->pfault_token);
case S390_VIRT_KVM_PFS_REGNUM:
return gdb_get_regl(mem_buf, env->pfault_select);
case S390_VIRT_KVM_PFC_REGNUM:
return gdb_get_regl(mem_buf, env->pfault_compare);
default:
return 0;
}
}
static int cpu_write_virt_kvm_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
{
switch (n) {
case S390_VIRT_KVM_PP_REGNUM:
env->pp = ldtul_p(mem_buf); env->pp = ldtul_p(mem_buf);
cpu_synchronize_post_init(env_cpu(env)); cpu_synchronize_post_init(env_cpu(env));
return 8; return 8;
case S390_VIRT_PFT_REGNUM: case S390_VIRT_KVM_PFT_REGNUM:
env->pfault_token = ldtul_p(mem_buf); env->pfault_token = ldtul_p(mem_buf);
cpu_synchronize_post_init(env_cpu(env)); cpu_synchronize_post_init(env_cpu(env));
return 8; return 8;
case S390_VIRT_PFS_REGNUM: case S390_VIRT_KVM_PFS_REGNUM:
env->pfault_select = ldtul_p(mem_buf); env->pfault_select = ldtul_p(mem_buf);
cpu_synchronize_post_init(env_cpu(env)); cpu_synchronize_post_init(env_cpu(env));
return 8; return 8;
case S390_VIRT_PFC_REGNUM: case S390_VIRT_KVM_PFC_REGNUM:
env->pfault_compare = ldtul_p(mem_buf); env->pfault_compare = ldtul_p(mem_buf);
cpu_synchronize_post_init(env_cpu(env)); cpu_synchronize_post_init(env_cpu(env));
return 8; return 8;
@ -321,10 +341,15 @@ void s390_cpu_gdb_init(CPUState *cs)
cpu_write_c_reg, cpu_write_c_reg,
S390_NUM_C_REGS, "s390-cr.xml", 0); S390_NUM_C_REGS, "s390-cr.xml", 0);
if (kvm_enabled()) {
gdb_register_coprocessor(cs, cpu_read_virt_reg, gdb_register_coprocessor(cs, cpu_read_virt_reg,
cpu_write_virt_reg, cpu_write_virt_reg,
S390_NUM_VIRT_REGS, "s390-virt.xml", 0); S390_NUM_VIRT_REGS, "s390-virt.xml", 0);
if (kvm_enabled()) {
gdb_register_coprocessor(cs, cpu_read_virt_kvm_reg,
cpu_write_virt_kvm_reg,
S390_NUM_VIRT_KVM_REGS, "s390-virt-kvm.xml",
0);
} }
#endif #endif
} }

View file

@ -351,7 +351,7 @@ static void chr_read(void *opaque, const uint8_t *buf, int size)
if (size != msg.size) { if (size != msg.size) {
qos_printf("%s: Wrong message size received %d != %d\n", qos_printf("%s: Wrong message size received %d != %d\n",
__func__, size, msg.size); __func__, size, msg.size);
return; goto out;
} }
} }
@ -509,6 +509,7 @@ static void chr_read(void *opaque, const uint8_t *buf, int size)
break; break;
} }
out:
g_mutex_unlock(&s->data_mutex); g_mutex_unlock(&s->data_mutex);
} }

View file

@ -107,7 +107,8 @@ int64_t qemu_clock_deadline_ns_all(QEMUClockType type, int attr_mask)
return deadline; return deadline;
} }
QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name) QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name,
MemReentrancyGuard *reentrancy_guard)
{ {
QEMUBH *bh = g_new(QEMUBH, 1); QEMUBH *bh = g_new(QEMUBH, 1);

View file

@ -65,6 +65,7 @@ struct QEMUBH {
void *opaque; void *opaque;
QSLIST_ENTRY(QEMUBH) next; QSLIST_ENTRY(QEMUBH) next;
unsigned flags; unsigned flags;
MemReentrancyGuard *reentrancy_guard;
}; };
/* Called concurrently from any thread */ /* Called concurrently from any thread */
@ -137,7 +138,7 @@ void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb,
} }
QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque, QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
const char *name) const char *name, MemReentrancyGuard *reentrancy_guard)
{ {
QEMUBH *bh; QEMUBH *bh;
bh = g_new(QEMUBH, 1); bh = g_new(QEMUBH, 1);
@ -146,13 +147,28 @@ QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
.cb = cb, .cb = cb,
.opaque = opaque, .opaque = opaque,
.name = name, .name = name,
.reentrancy_guard = reentrancy_guard,
}; };
return bh; return bh;
} }
void aio_bh_call(QEMUBH *bh) void aio_bh_call(QEMUBH *bh)
{ {
bool last_engaged_in_io = false;
if (bh->reentrancy_guard) {
last_engaged_in_io = bh->reentrancy_guard->engaged_in_io;
if (bh->reentrancy_guard->engaged_in_io) {
trace_reentrant_aio(bh->ctx, bh->name);
}
bh->reentrancy_guard->engaged_in_io = true;
}
bh->cb(bh->opaque); bh->cb(bh->opaque);
if (bh->reentrancy_guard) {
bh->reentrancy_guard->engaged_in_io = last_engaged_in_io;
}
} }
/* Multiple occurrences of aio_bh_poll cannot be called concurrently. */ /* Multiple occurrences of aio_bh_poll cannot be called concurrently. */

View file

@ -605,9 +605,11 @@ void main_loop_wait(int nonblocking)
/* Functions to operate on the main QEMU AioContext. */ /* Functions to operate on the main QEMU AioContext. */
QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name) QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name,
MemReentrancyGuard *reentrancy_guard)
{ {
return aio_bh_new_full(qemu_aio_context, cb, opaque, name); return aio_bh_new_full(qemu_aio_context, cb, opaque, name,
reentrancy_guard);
} }
/* /*

View file

@ -11,6 +11,7 @@ poll_remove(void *ctx, void *node, int fd) "ctx %p node %p fd %d"
# async.c # async.c
aio_co_schedule(void *ctx, void *co) "ctx %p co %p" aio_co_schedule(void *ctx, void *co) "ctx %p co %p"
aio_co_schedule_bh_cb(void *ctx, void *co) "ctx %p co %p" aio_co_schedule_bh_cb(void *ctx, void *co) "ctx %p co %p"
reentrant_aio(void *ctx, const char *name) "ctx %p name %s"
# thread-pool.c # thread-pool.c
thread_pool_submit(void *pool, void *req, void *opaque) "pool %p req %p opaque %p" thread_pool_submit(void *pool, void *req, void *opaque) "pool %p req %p opaque %p"