mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-23 10:01:59 -06:00
Pull request
v2: * Fixed stray slirp submodule change [Peter] Fixes for the lock guard macros, code conversions to the lock guard macros, and support for selecting fuzzer targets with argv[0]. -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEhpWov9P5fNqsNXdanKSrs4Grc8gFAl6wMI8ACgkQnKSrs4Gr c8g36Af/fgEhGUVq3S3U+7pupdhFwTJD3oXrayPf/ZlFkACx5vXmV/X7gTMwc7Lw nGPEP3Xi+6be3Ca+4eZ6P2J91p1oz6uOmWHajMM9uc9i0yLim+6GsCbUcRDbVZF4 FRd9dHIqK8I/5yeKSlC1RVdAETPaRTE0XL/kugxOZDlmdhDrD7kpaZmBsCjOEH4E +oBEweakkaLwX0yk8zo6tfw4qxsC3wb1gtmQvCE2MdFhASIxdgYzdYXkZnLa+7dy fkWLk1F4bRKUyjwoW6EBFGDAGKM5u22tKJOvXwtMOPKMVHm1D36MWau+5oqVMMrs JwxHAfPSWJhN1laCykm6ltwo+B9g2w== =hRXC -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging Pull request v2: * Fixed stray slirp submodule change [Peter] Fixes for the lock guard macros, code conversions to the lock guard macros, and support for selecting fuzzer targets with argv[0]. # gpg: Signature made Mon 04 May 2020 16:11:11 BST # gpg: using RSA key 8695A8BFD3F97CDAAC35775A9CA4ABB381AB73C8 # gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" [full] # gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>" [full] # Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35 775A 9CA4 ABB3 81AB 73C8 * remotes/stefanha/tags/block-pull-request: lockable: Replace locks with lock guard macros lockable: replaced locks with lock guard macros where appropriate lockable: fix __COUNTER__ macro to be referenced properly fuzz: select fuzz target using executable name Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
5c7c46fea9
21 changed files with 134 additions and 155 deletions
|
@ -1394,20 +1394,17 @@ static void iscsi_nop_timed_event(void *opaque)
|
||||||
{
|
{
|
||||||
IscsiLun *iscsilun = opaque;
|
IscsiLun *iscsilun = opaque;
|
||||||
|
|
||||||
qemu_mutex_lock(&iscsilun->mutex);
|
QEMU_LOCK_GUARD(&iscsilun->mutex);
|
||||||
if (iscsi_get_nops_in_flight(iscsilun->iscsi) >= MAX_NOP_FAILURES) {
|
if (iscsi_get_nops_in_flight(iscsilun->iscsi) >= MAX_NOP_FAILURES) {
|
||||||
error_report("iSCSI: NOP timeout. Reconnecting...");
|
error_report("iSCSI: NOP timeout. Reconnecting...");
|
||||||
iscsilun->request_timed_out = true;
|
iscsilun->request_timed_out = true;
|
||||||
} else if (iscsi_nop_out_async(iscsilun->iscsi, NULL, NULL, 0, NULL) != 0) {
|
} else if (iscsi_nop_out_async(iscsilun->iscsi, NULL, NULL, 0, NULL) != 0) {
|
||||||
error_report("iSCSI: failed to sent NOP-Out. Disabling NOP messages.");
|
error_report("iSCSI: failed to sent NOP-Out. Disabling NOP messages.");
|
||||||
goto out;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
timer_mod(iscsilun->nop_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + NOP_INTERVAL);
|
timer_mod(iscsilun->nop_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + NOP_INTERVAL);
|
||||||
iscsi_set_events(iscsilun);
|
iscsi_set_events(iscsilun);
|
||||||
|
|
||||||
out:
|
|
||||||
qemu_mutex_unlock(&iscsilun->mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp)
|
static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp)
|
||||||
|
|
51
block/nfs.c
51
block/nfs.c
|
@ -273,15 +273,14 @@ static int coroutine_fn nfs_co_preadv(BlockDriverState *bs, uint64_t offset,
|
||||||
nfs_co_init_task(bs, &task);
|
nfs_co_init_task(bs, &task);
|
||||||
task.iov = iov;
|
task.iov = iov;
|
||||||
|
|
||||||
qemu_mutex_lock(&client->mutex);
|
WITH_QEMU_LOCK_GUARD(&client->mutex) {
|
||||||
if (nfs_pread_async(client->context, client->fh,
|
if (nfs_pread_async(client->context, client->fh,
|
||||||
offset, bytes, nfs_co_generic_cb, &task) != 0) {
|
offset, bytes, nfs_co_generic_cb, &task) != 0) {
|
||||||
qemu_mutex_unlock(&client->mutex);
|
return -ENOMEM;
|
||||||
return -ENOMEM;
|
}
|
||||||
}
|
|
||||||
|
|
||||||
nfs_set_events(client);
|
nfs_set_events(client);
|
||||||
qemu_mutex_unlock(&client->mutex);
|
}
|
||||||
while (!task.complete) {
|
while (!task.complete) {
|
||||||
qemu_coroutine_yield();
|
qemu_coroutine_yield();
|
||||||
}
|
}
|
||||||
|
@ -320,19 +319,18 @@ static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, uint64_t offset,
|
||||||
buf = iov->iov[0].iov_base;
|
buf = iov->iov[0].iov_base;
|
||||||
}
|
}
|
||||||
|
|
||||||
qemu_mutex_lock(&client->mutex);
|
WITH_QEMU_LOCK_GUARD(&client->mutex) {
|
||||||
if (nfs_pwrite_async(client->context, client->fh,
|
if (nfs_pwrite_async(client->context, client->fh,
|
||||||
offset, bytes, buf,
|
offset, bytes, buf,
|
||||||
nfs_co_generic_cb, &task) != 0) {
|
nfs_co_generic_cb, &task) != 0) {
|
||||||
qemu_mutex_unlock(&client->mutex);
|
if (my_buffer) {
|
||||||
if (my_buffer) {
|
g_free(buf);
|
||||||
g_free(buf);
|
}
|
||||||
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
nfs_set_events(client);
|
nfs_set_events(client);
|
||||||
qemu_mutex_unlock(&client->mutex);
|
}
|
||||||
while (!task.complete) {
|
while (!task.complete) {
|
||||||
qemu_coroutine_yield();
|
qemu_coroutine_yield();
|
||||||
}
|
}
|
||||||
|
@ -355,15 +353,14 @@ static int coroutine_fn nfs_co_flush(BlockDriverState *bs)
|
||||||
|
|
||||||
nfs_co_init_task(bs, &task);
|
nfs_co_init_task(bs, &task);
|
||||||
|
|
||||||
qemu_mutex_lock(&client->mutex);
|
WITH_QEMU_LOCK_GUARD(&client->mutex) {
|
||||||
if (nfs_fsync_async(client->context, client->fh, nfs_co_generic_cb,
|
if (nfs_fsync_async(client->context, client->fh, nfs_co_generic_cb,
|
||||||
&task) != 0) {
|
&task) != 0) {
|
||||||
qemu_mutex_unlock(&client->mutex);
|
return -ENOMEM;
|
||||||
return -ENOMEM;
|
}
|
||||||
}
|
|
||||||
|
|
||||||
nfs_set_events(client);
|
nfs_set_events(client);
|
||||||
qemu_mutex_unlock(&client->mutex);
|
}
|
||||||
while (!task.complete) {
|
while (!task.complete) {
|
||||||
qemu_coroutine_yield();
|
qemu_coroutine_yield();
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
#include "exec/cpu-common.h"
|
#include "exec/cpu-common.h"
|
||||||
#include "hw/core/cpu.h"
|
#include "hw/core/cpu.h"
|
||||||
#include "sysemu/cpus.h"
|
#include "sysemu/cpus.h"
|
||||||
|
#include "qemu/lockable.h"
|
||||||
|
|
||||||
static QemuMutex qemu_cpu_list_lock;
|
static QemuMutex qemu_cpu_list_lock;
|
||||||
static QemuCond exclusive_cond;
|
static QemuCond exclusive_cond;
|
||||||
|
@ -71,7 +72,7 @@ static int cpu_get_free_index(void)
|
||||||
|
|
||||||
void cpu_list_add(CPUState *cpu)
|
void cpu_list_add(CPUState *cpu)
|
||||||
{
|
{
|
||||||
qemu_mutex_lock(&qemu_cpu_list_lock);
|
QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
|
||||||
if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) {
|
if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) {
|
||||||
cpu->cpu_index = cpu_get_free_index();
|
cpu->cpu_index = cpu_get_free_index();
|
||||||
assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX);
|
assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX);
|
||||||
|
@ -79,15 +80,13 @@ void cpu_list_add(CPUState *cpu)
|
||||||
assert(!cpu_index_auto_assigned);
|
assert(!cpu_index_auto_assigned);
|
||||||
}
|
}
|
||||||
QTAILQ_INSERT_TAIL_RCU(&cpus, cpu, node);
|
QTAILQ_INSERT_TAIL_RCU(&cpus, cpu, node);
|
||||||
qemu_mutex_unlock(&qemu_cpu_list_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void cpu_list_remove(CPUState *cpu)
|
void cpu_list_remove(CPUState *cpu)
|
||||||
{
|
{
|
||||||
qemu_mutex_lock(&qemu_cpu_list_lock);
|
QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
|
||||||
if (!QTAILQ_IN_USE(cpu, node)) {
|
if (!QTAILQ_IN_USE(cpu, node)) {
|
||||||
/* there is nothing to undo since cpu_exec_init() hasn't been called */
|
/* there is nothing to undo since cpu_exec_init() hasn't been called */
|
||||||
qemu_mutex_unlock(&qemu_cpu_list_lock);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -95,7 +94,6 @@ void cpu_list_remove(CPUState *cpu)
|
||||||
|
|
||||||
QTAILQ_REMOVE_RCU(&cpus, cpu, node);
|
QTAILQ_REMOVE_RCU(&cpus, cpu, node);
|
||||||
cpu->cpu_index = UNASSIGNED_CPU_INDEX;
|
cpu->cpu_index = UNASSIGNED_CPU_INDEX;
|
||||||
qemu_mutex_unlock(&qemu_cpu_list_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct qemu_work_item {
|
struct qemu_work_item {
|
||||||
|
@ -237,7 +235,7 @@ void cpu_exec_start(CPUState *cpu)
|
||||||
* see cpu->running == true, and it will kick the CPU.
|
* see cpu->running == true, and it will kick the CPU.
|
||||||
*/
|
*/
|
||||||
if (unlikely(atomic_read(&pending_cpus))) {
|
if (unlikely(atomic_read(&pending_cpus))) {
|
||||||
qemu_mutex_lock(&qemu_cpu_list_lock);
|
QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
|
||||||
if (!cpu->has_waiter) {
|
if (!cpu->has_waiter) {
|
||||||
/* Not counted in pending_cpus, let the exclusive item
|
/* Not counted in pending_cpus, let the exclusive item
|
||||||
* run. Since we have the lock, just set cpu->running to true
|
* run. Since we have the lock, just set cpu->running to true
|
||||||
|
@ -252,7 +250,6 @@ void cpu_exec_start(CPUState *cpu)
|
||||||
* waiter at cpu_exec_end.
|
* waiter at cpu_exec_end.
|
||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
qemu_mutex_unlock(&qemu_cpu_list_lock);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -280,7 +277,7 @@ void cpu_exec_end(CPUState *cpu)
|
||||||
* next cpu_exec_start.
|
* next cpu_exec_start.
|
||||||
*/
|
*/
|
||||||
if (unlikely(atomic_read(&pending_cpus))) {
|
if (unlikely(atomic_read(&pending_cpus))) {
|
||||||
qemu_mutex_lock(&qemu_cpu_list_lock);
|
QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
|
||||||
if (cpu->has_waiter) {
|
if (cpu->has_waiter) {
|
||||||
cpu->has_waiter = false;
|
cpu->has_waiter = false;
|
||||||
atomic_set(&pending_cpus, pending_cpus - 1);
|
atomic_set(&pending_cpus, pending_cpus - 1);
|
||||||
|
@ -288,7 +285,6 @@ void cpu_exec_end(CPUState *cpu)
|
||||||
qemu_cond_signal(&exclusive_cond);
|
qemu_cond_signal(&exclusive_cond);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
qemu_mutex_unlock(&qemu_cpu_list_lock);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -478,18 +478,19 @@ static int qxl_track_command(PCIQXLDevice *qxl, struct QXLCommandExt *ext)
|
||||||
cmd->u.surface_create.stride);
|
cmd->u.surface_create.stride);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
qemu_mutex_lock(&qxl->track_lock);
|
WITH_QEMU_LOCK_GUARD(&qxl->track_lock) {
|
||||||
if (cmd->type == QXL_SURFACE_CMD_CREATE) {
|
if (cmd->type == QXL_SURFACE_CMD_CREATE) {
|
||||||
qxl->guest_surfaces.cmds[id] = ext->cmd.data;
|
qxl->guest_surfaces.cmds[id] = ext->cmd.data;
|
||||||
qxl->guest_surfaces.count++;
|
qxl->guest_surfaces.count++;
|
||||||
if (qxl->guest_surfaces.max < qxl->guest_surfaces.count)
|
if (qxl->guest_surfaces.max < qxl->guest_surfaces.count) {
|
||||||
qxl->guest_surfaces.max = qxl->guest_surfaces.count;
|
qxl->guest_surfaces.max = qxl->guest_surfaces.count;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (cmd->type == QXL_SURFACE_CMD_DESTROY) {
|
||||||
|
qxl->guest_surfaces.cmds[id] = 0;
|
||||||
|
qxl->guest_surfaces.count--;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (cmd->type == QXL_SURFACE_CMD_DESTROY) {
|
|
||||||
qxl->guest_surfaces.cmds[id] = 0;
|
|
||||||
qxl->guest_surfaces.count--;
|
|
||||||
}
|
|
||||||
qemu_mutex_unlock(&qxl->track_lock);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case QXL_CMD_CURSOR:
|
case QXL_CMD_CURSOR:
|
||||||
|
@ -958,10 +959,9 @@ static void interface_update_area_complete(QXLInstance *sin,
|
||||||
int i;
|
int i;
|
||||||
int qxl_i;
|
int qxl_i;
|
||||||
|
|
||||||
qemu_mutex_lock(&qxl->ssd.lock);
|
QEMU_LOCK_GUARD(&qxl->ssd.lock);
|
||||||
if (surface_id != 0 || !num_updated_rects ||
|
if (surface_id != 0 || !num_updated_rects ||
|
||||||
!qxl->render_update_cookie_num) {
|
!qxl->render_update_cookie_num) {
|
||||||
qemu_mutex_unlock(&qxl->ssd.lock);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
trace_qxl_interface_update_area_complete(qxl->id, surface_id, dirty->left,
|
trace_qxl_interface_update_area_complete(qxl->id, surface_id, dirty->left,
|
||||||
|
@ -980,7 +980,6 @@ static void interface_update_area_complete(QXLInstance *sin,
|
||||||
* Don't bother copying or scheduling the bh since we will flip
|
* Don't bother copying or scheduling the bh since we will flip
|
||||||
* the whole area anyway on completion of the update_area async call
|
* the whole area anyway on completion of the update_area async call
|
||||||
*/
|
*/
|
||||||
qemu_mutex_unlock(&qxl->ssd.lock);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
qxl_i = qxl->num_dirty_rects;
|
qxl_i = qxl->num_dirty_rects;
|
||||||
|
@ -991,7 +990,6 @@ static void interface_update_area_complete(QXLInstance *sin,
|
||||||
trace_qxl_interface_update_area_complete_schedule_bh(qxl->id,
|
trace_qxl_interface_update_area_complete_schedule_bh(qxl->id,
|
||||||
qxl->num_dirty_rects);
|
qxl->num_dirty_rects);
|
||||||
qemu_bh_schedule(qxl->update_area_bh);
|
qemu_bh_schedule(qxl->update_area_bh);
|
||||||
qemu_mutex_unlock(&qxl->ssd.lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* called from spice server thread context only */
|
/* called from spice server thread context only */
|
||||||
|
@ -1694,15 +1692,14 @@ static void ioport_write(void *opaque, hwaddr addr,
|
||||||
case QXL_IO_MONITORS_CONFIG_ASYNC:
|
case QXL_IO_MONITORS_CONFIG_ASYNC:
|
||||||
async_common:
|
async_common:
|
||||||
async = QXL_ASYNC;
|
async = QXL_ASYNC;
|
||||||
qemu_mutex_lock(&d->async_lock);
|
WITH_QEMU_LOCK_GUARD(&d->async_lock) {
|
||||||
if (d->current_async != QXL_UNDEFINED_IO) {
|
if (d->current_async != QXL_UNDEFINED_IO) {
|
||||||
qxl_set_guest_bug(d, "%d async started before last (%d) complete",
|
qxl_set_guest_bug(d, "%d async started before last (%d) complete",
|
||||||
io_port, d->current_async);
|
io_port, d->current_async);
|
||||||
qemu_mutex_unlock(&d->async_lock);
|
return;
|
||||||
return;
|
}
|
||||||
|
d->current_async = orig_io_port;
|
||||||
}
|
}
|
||||||
d->current_async = orig_io_port;
|
|
||||||
qemu_mutex_unlock(&d->async_lock);
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
#include "sysemu/kvm.h"
|
#include "sysemu/kvm.h"
|
||||||
#include "qemu/bitops.h"
|
#include "qemu/bitops.h"
|
||||||
#include "qemu/error-report.h"
|
#include "qemu/error-report.h"
|
||||||
|
#include "qemu/lockable.h"
|
||||||
#include "qemu/queue.h"
|
#include "qemu/queue.h"
|
||||||
#include "qemu/rcu.h"
|
#include "qemu/rcu.h"
|
||||||
#include "qemu/rcu_queue.h"
|
#include "qemu/rcu_queue.h"
|
||||||
|
@ -491,7 +492,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
|
||||||
int ret;
|
int ret;
|
||||||
MsgHandler *mh;
|
MsgHandler *mh;
|
||||||
|
|
||||||
qemu_mutex_lock(&handlers_mutex);
|
QEMU_LOCK_GUARD(&handlers_mutex);
|
||||||
QLIST_FOREACH(mh, &msg_handlers, link) {
|
QLIST_FOREACH(mh, &msg_handlers, link) {
|
||||||
if (mh->conn_id == conn_id) {
|
if (mh->conn_id == conn_id) {
|
||||||
if (handler) {
|
if (handler) {
|
||||||
|
@ -501,7 +502,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
|
||||||
g_free_rcu(mh, rcu);
|
g_free_rcu(mh, rcu);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
}
|
||||||
goto unlock;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -515,8 +516,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
|
||||||
} else {
|
} else {
|
||||||
ret = -ENOENT;
|
ret = -ENOENT;
|
||||||
}
|
}
|
||||||
unlock:
|
|
||||||
qemu_mutex_unlock(&handlers_mutex);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -565,7 +565,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
|
||||||
int ret;
|
int ret;
|
||||||
EventFlagHandler *handler;
|
EventFlagHandler *handler;
|
||||||
|
|
||||||
qemu_mutex_lock(&handlers_mutex);
|
QEMU_LOCK_GUARD(&handlers_mutex);
|
||||||
QLIST_FOREACH(handler, &event_flag_handlers, link) {
|
QLIST_FOREACH(handler, &event_flag_handlers, link) {
|
||||||
if (handler->conn_id == conn_id) {
|
if (handler->conn_id == conn_id) {
|
||||||
if (notifier) {
|
if (notifier) {
|
||||||
|
@ -575,7 +575,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
|
||||||
g_free_rcu(handler, rcu);
|
g_free_rcu(handler, rcu);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
}
|
||||||
goto unlock;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -588,8 +588,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
|
||||||
} else {
|
} else {
|
||||||
ret = -ENOENT;
|
ret = -ENOENT;
|
||||||
}
|
}
|
||||||
unlock:
|
|
||||||
qemu_mutex_unlock(&handlers_mutex);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -95,36 +95,36 @@ static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq)
|
||||||
struct ibv_wc wc[2];
|
struct ibv_wc wc[2];
|
||||||
RdmaProtectedGSList *cqe_ctx_list;
|
RdmaProtectedGSList *cqe_ctx_list;
|
||||||
|
|
||||||
qemu_mutex_lock(&rdma_dev_res->lock);
|
WITH_QEMU_LOCK_GUARD(&rdma_dev_res->lock) {
|
||||||
do {
|
do {
|
||||||
ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
|
ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
|
||||||
|
|
||||||
trace_rdma_poll_cq(ne, ibcq);
|
trace_rdma_poll_cq(ne, ibcq);
|
||||||
|
|
||||||
for (i = 0; i < ne; i++) {
|
for (i = 0; i < ne; i++) {
|
||||||
bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
|
bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
|
||||||
if (unlikely(!bctx)) {
|
if (unlikely(!bctx)) {
|
||||||
rdma_error_report("No matching ctx for req %"PRId64,
|
rdma_error_report("No matching ctx for req %"PRId64,
|
||||||
wc[i].wr_id);
|
wc[i].wr_id);
|
||||||
continue;
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
comp_handler(bctx->up_ctx, &wc[i]);
|
||||||
|
|
||||||
|
if (bctx->backend_qp) {
|
||||||
|
cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
|
||||||
|
} else {
|
||||||
|
cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
|
||||||
|
}
|
||||||
|
|
||||||
|
rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
|
||||||
|
rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
|
||||||
|
g_free(bctx);
|
||||||
}
|
}
|
||||||
|
total_ne += ne;
|
||||||
comp_handler(bctx->up_ctx, &wc[i]);
|
} while (ne > 0);
|
||||||
|
atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
|
||||||
if (bctx->backend_qp) {
|
}
|
||||||
cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
|
|
||||||
} else {
|
|
||||||
cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
|
|
||||||
}
|
|
||||||
|
|
||||||
rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
|
|
||||||
rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
|
|
||||||
g_free(bctx);
|
|
||||||
}
|
|
||||||
total_ne += ne;
|
|
||||||
} while (ne > 0);
|
|
||||||
atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
|
|
||||||
qemu_mutex_unlock(&rdma_dev_res->lock);
|
|
||||||
|
|
||||||
if (ne < 0) {
|
if (ne < 0) {
|
||||||
rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne, errno);
|
rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne, errno);
|
||||||
|
|
|
@ -147,14 +147,13 @@ static inline void rdma_res_tbl_dealloc(RdmaRmResTbl *tbl, uint32_t handle)
|
||||||
{
|
{
|
||||||
trace_rdma_res_tbl_dealloc(tbl->name, handle);
|
trace_rdma_res_tbl_dealloc(tbl->name, handle);
|
||||||
|
|
||||||
qemu_mutex_lock(&tbl->lock);
|
QEMU_LOCK_GUARD(&tbl->lock);
|
||||||
|
|
||||||
if (handle < tbl->tbl_sz) {
|
if (handle < tbl->tbl_sz) {
|
||||||
clear_bit(handle, tbl->bitmap);
|
clear_bit(handle, tbl->bitmap);
|
||||||
tbl->used--;
|
tbl->used--;
|
||||||
}
|
}
|
||||||
|
|
||||||
qemu_mutex_unlock(&tbl->lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int rdma_rm_alloc_pd(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
|
int rdma_rm_alloc_pd(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
#include "hw/vfio/vfio-platform.h"
|
#include "hw/vfio/vfio-platform.h"
|
||||||
#include "migration/vmstate.h"
|
#include "migration/vmstate.h"
|
||||||
#include "qemu/error-report.h"
|
#include "qemu/error-report.h"
|
||||||
|
#include "qemu/lockable.h"
|
||||||
#include "qemu/main-loop.h"
|
#include "qemu/main-loop.h"
|
||||||
#include "qemu/module.h"
|
#include "qemu/module.h"
|
||||||
#include "qemu/range.h"
|
#include "qemu/range.h"
|
||||||
|
@ -216,7 +217,7 @@ static void vfio_intp_interrupt(VFIOINTp *intp)
|
||||||
VFIOPlatformDevice *vdev = intp->vdev;
|
VFIOPlatformDevice *vdev = intp->vdev;
|
||||||
bool delay_handling = false;
|
bool delay_handling = false;
|
||||||
|
|
||||||
qemu_mutex_lock(&vdev->intp_mutex);
|
QEMU_LOCK_GUARD(&vdev->intp_mutex);
|
||||||
if (intp->state == VFIO_IRQ_INACTIVE) {
|
if (intp->state == VFIO_IRQ_INACTIVE) {
|
||||||
QLIST_FOREACH(tmp, &vdev->intp_list, next) {
|
QLIST_FOREACH(tmp, &vdev->intp_list, next) {
|
||||||
if (tmp->state == VFIO_IRQ_ACTIVE ||
|
if (tmp->state == VFIO_IRQ_ACTIVE ||
|
||||||
|
@ -236,7 +237,6 @@ static void vfio_intp_interrupt(VFIOINTp *intp)
|
||||||
QSIMPLEQ_INSERT_TAIL(&vdev->pending_intp_queue,
|
QSIMPLEQ_INSERT_TAIL(&vdev->pending_intp_queue,
|
||||||
intp, pqnext);
|
intp, pqnext);
|
||||||
ret = event_notifier_test_and_clear(intp->interrupt);
|
ret = event_notifier_test_and_clear(intp->interrupt);
|
||||||
qemu_mutex_unlock(&vdev->intp_mutex);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -266,7 +266,6 @@ static void vfio_intp_interrupt(VFIOINTp *intp)
|
||||||
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
|
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
|
||||||
vdev->mmap_timeout);
|
vdev->mmap_timeout);
|
||||||
}
|
}
|
||||||
qemu_mutex_unlock(&vdev->intp_mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -152,7 +152,7 @@ G_DEFINE_AUTOPTR_CLEANUP_FUNC(QemuLockable, qemu_lockable_auto_unlock)
|
||||||
* }
|
* }
|
||||||
*/
|
*/
|
||||||
#define WITH_QEMU_LOCK_GUARD(x) \
|
#define WITH_QEMU_LOCK_GUARD(x) \
|
||||||
WITH_QEMU_LOCK_GUARD_((x), qemu_lockable_auto##__COUNTER__)
|
WITH_QEMU_LOCK_GUARD_((x), glue(qemu_lockable_auto, __COUNTER__))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* QEMU_LOCK_GUARD - Lock an object until the end of the scope
|
* QEMU_LOCK_GUARD - Lock an object until the end of the scope
|
||||||
|
@ -169,8 +169,9 @@ G_DEFINE_AUTOPTR_CLEANUP_FUNC(QemuLockable, qemu_lockable_auto_unlock)
|
||||||
* return; <-- mutex is automatically unlocked
|
* return; <-- mutex is automatically unlocked
|
||||||
* }
|
* }
|
||||||
*/
|
*/
|
||||||
#define QEMU_LOCK_GUARD(x) \
|
#define QEMU_LOCK_GUARD(x) \
|
||||||
g_autoptr(QemuLockable) qemu_lockable_auto##__COUNTER__ = \
|
g_autoptr(QemuLockable) \
|
||||||
|
glue(qemu_lockable_auto, __COUNTER__) G_GNUC_UNUSED = \
|
||||||
qemu_lockable_auto_lock(QEMU_MAKE_LOCKABLE((x)))
|
qemu_lockable_auto_lock(QEMU_MAKE_LOCKABLE((x)))
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -170,7 +170,7 @@ static inline void rcu_read_auto_unlock(RCUReadAuto *r)
|
||||||
G_DEFINE_AUTOPTR_CLEANUP_FUNC(RCUReadAuto, rcu_read_auto_unlock)
|
G_DEFINE_AUTOPTR_CLEANUP_FUNC(RCUReadAuto, rcu_read_auto_unlock)
|
||||||
|
|
||||||
#define WITH_RCU_READ_LOCK_GUARD() \
|
#define WITH_RCU_READ_LOCK_GUARD() \
|
||||||
WITH_RCU_READ_LOCK_GUARD_(_rcu_read_auto##__COUNTER__)
|
WITH_RCU_READ_LOCK_GUARD_(glue(_rcu_read_auto, __COUNTER__))
|
||||||
|
|
||||||
#define WITH_RCU_READ_LOCK_GUARD_(var) \
|
#define WITH_RCU_READ_LOCK_GUARD_(var) \
|
||||||
for (g_autoptr(RCUReadAuto) var = rcu_read_auto_lock(); \
|
for (g_autoptr(RCUReadAuto) var = rcu_read_auto_lock(); \
|
||||||
|
|
|
@ -1653,11 +1653,10 @@ static void migrate_fd_cleanup_bh(void *opaque)
|
||||||
|
|
||||||
void migrate_set_error(MigrationState *s, const Error *error)
|
void migrate_set_error(MigrationState *s, const Error *error)
|
||||||
{
|
{
|
||||||
qemu_mutex_lock(&s->error_mutex);
|
QEMU_LOCK_GUARD(&s->error_mutex);
|
||||||
if (!s->error) {
|
if (!s->error) {
|
||||||
s->error = error_copy(error);
|
s->error = error_copy(error);
|
||||||
}
|
}
|
||||||
qemu_mutex_unlock(&s->error_mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void migrate_fd_error(MigrationState *s, const Error *error)
|
void migrate_fd_error(MigrationState *s, const Error *error)
|
||||||
|
|
|
@ -894,11 +894,11 @@ void multifd_recv_sync_main(void)
|
||||||
for (i = 0; i < migrate_multifd_channels(); i++) {
|
for (i = 0; i < migrate_multifd_channels(); i++) {
|
||||||
MultiFDRecvParams *p = &multifd_recv_state->params[i];
|
MultiFDRecvParams *p = &multifd_recv_state->params[i];
|
||||||
|
|
||||||
qemu_mutex_lock(&p->mutex);
|
WITH_QEMU_LOCK_GUARD(&p->mutex) {
|
||||||
if (multifd_recv_state->packet_num < p->packet_num) {
|
if (multifd_recv_state->packet_num < p->packet_num) {
|
||||||
multifd_recv_state->packet_num = p->packet_num;
|
multifd_recv_state->packet_num = p->packet_num;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
qemu_mutex_unlock(&p->mutex);
|
|
||||||
trace_multifd_recv_sync_main_signal(p->id);
|
trace_multifd_recv_sync_main_signal(p->id);
|
||||||
qemu_sem_post(&p->sem_sync);
|
qemu_sem_post(&p->sem_sync);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1369,7 +1369,7 @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
qemu_mutex_lock(&rs->src_page_req_mutex);
|
QEMU_LOCK_GUARD(&rs->src_page_req_mutex);
|
||||||
if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
|
if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
|
||||||
struct RAMSrcPageRequest *entry =
|
struct RAMSrcPageRequest *entry =
|
||||||
QSIMPLEQ_FIRST(&rs->src_page_requests);
|
QSIMPLEQ_FIRST(&rs->src_page_requests);
|
||||||
|
@ -1386,7 +1386,6 @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
|
||||||
migration_consume_urgent_request();
|
migration_consume_urgent_request();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
qemu_mutex_unlock(&rs->src_page_req_mutex);
|
|
||||||
|
|
||||||
return block;
|
return block;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1473,7 +1473,7 @@ AddfdInfo *monitor_fdset_add_fd(int fd, bool has_fdset_id, int64_t fdset_id,
|
||||||
MonFdsetFd *mon_fdset_fd;
|
MonFdsetFd *mon_fdset_fd;
|
||||||
AddfdInfo *fdinfo;
|
AddfdInfo *fdinfo;
|
||||||
|
|
||||||
qemu_mutex_lock(&mon_fdsets_lock);
|
QEMU_LOCK_GUARD(&mon_fdsets_lock);
|
||||||
if (has_fdset_id) {
|
if (has_fdset_id) {
|
||||||
QLIST_FOREACH(mon_fdset, &mon_fdsets, next) {
|
QLIST_FOREACH(mon_fdset, &mon_fdsets, next) {
|
||||||
/* Break if match found or match impossible due to ordering by ID */
|
/* Break if match found or match impossible due to ordering by ID */
|
||||||
|
@ -1494,7 +1494,6 @@ AddfdInfo *monitor_fdset_add_fd(int fd, bool has_fdset_id, int64_t fdset_id,
|
||||||
if (fdset_id < 0) {
|
if (fdset_id < 0) {
|
||||||
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "fdset-id",
|
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "fdset-id",
|
||||||
"a non-negative value");
|
"a non-negative value");
|
||||||
qemu_mutex_unlock(&mon_fdsets_lock);
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
/* Use specified fdset ID */
|
/* Use specified fdset ID */
|
||||||
|
@ -1545,7 +1544,6 @@ AddfdInfo *monitor_fdset_add_fd(int fd, bool has_fdset_id, int64_t fdset_id,
|
||||||
fdinfo->fdset_id = mon_fdset->id;
|
fdinfo->fdset_id = mon_fdset->id;
|
||||||
fdinfo->fd = mon_fdset_fd->fd;
|
fdinfo->fd = mon_fdset_fd->fd;
|
||||||
|
|
||||||
qemu_mutex_unlock(&mon_fdsets_lock);
|
|
||||||
return fdinfo;
|
return fdinfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -91,6 +91,7 @@ static void usage(char *path)
|
||||||
printf(" * %s : %s\n", tmp->target->name,
|
printf(" * %s : %s\n", tmp->target->name,
|
||||||
tmp->target->description);
|
tmp->target->description);
|
||||||
}
|
}
|
||||||
|
printf("Alternatively, add -target-FUZZ_TARGET to the executable name\n");
|
||||||
exit(0);
|
exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -143,18 +144,20 @@ int LLVMFuzzerInitialize(int *argc, char ***argv, char ***envp)
|
||||||
module_call_init(MODULE_INIT_QOM);
|
module_call_init(MODULE_INIT_QOM);
|
||||||
module_call_init(MODULE_INIT_LIBQOS);
|
module_call_init(MODULE_INIT_LIBQOS);
|
||||||
|
|
||||||
if (*argc <= 1) {
|
target_name = strstr(**argv, "-target-");
|
||||||
|
if (target_name) { /* The binary name specifies the target */
|
||||||
|
target_name += strlen("-target-");
|
||||||
|
} else if (*argc > 1) { /* The target is specified as an argument */
|
||||||
|
target_name = (*argv)[1];
|
||||||
|
if (!strstr(target_name, "--fuzz-target=")) {
|
||||||
|
usage(**argv);
|
||||||
|
}
|
||||||
|
target_name += strlen("--fuzz-target=");
|
||||||
|
} else {
|
||||||
usage(**argv);
|
usage(**argv);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Identify the fuzz target */
|
/* Identify the fuzz target */
|
||||||
target_name = (*argv)[1];
|
|
||||||
if (!strstr(target_name, "--fuzz-target=")) {
|
|
||||||
usage(**argv);
|
|
||||||
}
|
|
||||||
|
|
||||||
target_name += strlen("--fuzz-target=");
|
|
||||||
|
|
||||||
fuzz_target = fuzz_get_target(target_name);
|
fuzz_target = fuzz_get_target(target_name);
|
||||||
if (!fuzz_target) {
|
if (!fuzz_target) {
|
||||||
usage(**argv);
|
usage(**argv);
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
#include "qemu/osdep.h"
|
#include "qemu/osdep.h"
|
||||||
#include "ui/qemu-spice.h"
|
#include "ui/qemu-spice.h"
|
||||||
#include "qemu/timer.h"
|
#include "qemu/timer.h"
|
||||||
|
#include "qemu/lockable.h"
|
||||||
#include "qemu/main-loop.h"
|
#include "qemu/main-loop.h"
|
||||||
#include "qemu/option.h"
|
#include "qemu/option.h"
|
||||||
#include "qemu/queue.h"
|
#include "qemu/queue.h"
|
||||||
|
@ -483,12 +484,12 @@ void qemu_spice_display_refresh(SimpleSpiceDisplay *ssd)
|
||||||
{
|
{
|
||||||
graphic_hw_update(ssd->dcl.con);
|
graphic_hw_update(ssd->dcl.con);
|
||||||
|
|
||||||
qemu_mutex_lock(&ssd->lock);
|
WITH_QEMU_LOCK_GUARD(&ssd->lock) {
|
||||||
if (QTAILQ_EMPTY(&ssd->updates) && ssd->ds) {
|
if (QTAILQ_EMPTY(&ssd->updates) && ssd->ds) {
|
||||||
qemu_spice_create_update(ssd);
|
qemu_spice_create_update(ssd);
|
||||||
ssd->notify++;
|
ssd->notify++;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
qemu_mutex_unlock(&ssd->lock);
|
|
||||||
|
|
||||||
trace_qemu_spice_display_refresh(ssd->qxl.id, ssd->notify);
|
trace_qemu_spice_display_refresh(ssd->qxl.id, ssd->notify);
|
||||||
if (ssd->notify) {
|
if (ssd->notify) {
|
||||||
|
@ -580,7 +581,7 @@ static int interface_get_cursor_command(QXLInstance *sin, QXLCommandExt *ext)
|
||||||
SimpleSpiceDisplay *ssd = container_of(sin, SimpleSpiceDisplay, qxl);
|
SimpleSpiceDisplay *ssd = container_of(sin, SimpleSpiceDisplay, qxl);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
qemu_mutex_lock(&ssd->lock);
|
QEMU_LOCK_GUARD(&ssd->lock);
|
||||||
if (ssd->ptr_define) {
|
if (ssd->ptr_define) {
|
||||||
*ext = ssd->ptr_define->ext;
|
*ext = ssd->ptr_define->ext;
|
||||||
ssd->ptr_define = NULL;
|
ssd->ptr_define = NULL;
|
||||||
|
@ -592,7 +593,6 @@ static int interface_get_cursor_command(QXLInstance *sin, QXLCommandExt *ext)
|
||||||
} else {
|
} else {
|
||||||
ret = false;
|
ret = false;
|
||||||
}
|
}
|
||||||
qemu_mutex_unlock(&ssd->lock);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -25,6 +25,7 @@
|
||||||
#include "qemu/cutils.h"
|
#include "qemu/cutils.h"
|
||||||
#include "trace/control.h"
|
#include "trace/control.h"
|
||||||
#include "qemu/thread.h"
|
#include "qemu/thread.h"
|
||||||
|
#include "qemu/lockable.h"
|
||||||
|
|
||||||
static char *logfilename;
|
static char *logfilename;
|
||||||
static QemuMutex qemu_logfile_mutex;
|
static QemuMutex qemu_logfile_mutex;
|
||||||
|
@ -94,7 +95,7 @@ void qemu_set_log(int log_flags)
|
||||||
if (qemu_loglevel && (!is_daemonized() || logfilename)) {
|
if (qemu_loglevel && (!is_daemonized() || logfilename)) {
|
||||||
need_to_open_file = true;
|
need_to_open_file = true;
|
||||||
}
|
}
|
||||||
qemu_mutex_lock(&qemu_logfile_mutex);
|
QEMU_LOCK_GUARD(&qemu_logfile_mutex);
|
||||||
if (qemu_logfile && !need_to_open_file) {
|
if (qemu_logfile && !need_to_open_file) {
|
||||||
logfile = qemu_logfile;
|
logfile = qemu_logfile;
|
||||||
atomic_rcu_set(&qemu_logfile, NULL);
|
atomic_rcu_set(&qemu_logfile, NULL);
|
||||||
|
@ -136,7 +137,6 @@ void qemu_set_log(int log_flags)
|
||||||
}
|
}
|
||||||
atomic_rcu_set(&qemu_logfile, logfile);
|
atomic_rcu_set(&qemu_logfile, logfile);
|
||||||
}
|
}
|
||||||
qemu_mutex_unlock(&qemu_logfile_mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void qemu_log_needs_buffers(void)
|
void qemu_log_needs_buffers(void)
|
||||||
|
|
|
@ -459,17 +459,16 @@ void timer_mod_anticipate_ns(QEMUTimer *ts, int64_t expire_time)
|
||||||
QEMUTimerList *timer_list = ts->timer_list;
|
QEMUTimerList *timer_list = ts->timer_list;
|
||||||
bool rearm;
|
bool rearm;
|
||||||
|
|
||||||
qemu_mutex_lock(&timer_list->active_timers_lock);
|
WITH_QEMU_LOCK_GUARD(&timer_list->active_timers_lock) {
|
||||||
if (ts->expire_time == -1 || ts->expire_time > expire_time) {
|
if (ts->expire_time == -1 || ts->expire_time > expire_time) {
|
||||||
if (ts->expire_time != -1) {
|
if (ts->expire_time != -1) {
|
||||||
timer_del_locked(timer_list, ts);
|
timer_del_locked(timer_list, ts);
|
||||||
|
}
|
||||||
|
rearm = timer_mod_ns_locked(timer_list, ts, expire_time);
|
||||||
|
} else {
|
||||||
|
rearm = false;
|
||||||
}
|
}
|
||||||
rearm = timer_mod_ns_locked(timer_list, ts, expire_time);
|
|
||||||
} else {
|
|
||||||
rearm = false;
|
|
||||||
}
|
}
|
||||||
qemu_mutex_unlock(&timer_list->active_timers_lock);
|
|
||||||
|
|
||||||
if (rearm) {
|
if (rearm) {
|
||||||
timerlist_rearm(timer_list);
|
timerlist_rearm(timer_list);
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,6 +31,7 @@
|
||||||
#include "qemu/atomic.h"
|
#include "qemu/atomic.h"
|
||||||
#include "qemu/thread.h"
|
#include "qemu/thread.h"
|
||||||
#include "qemu/main-loop.h"
|
#include "qemu/main-loop.h"
|
||||||
|
#include "qemu/lockable.h"
|
||||||
#if defined(CONFIG_MALLOC_TRIM)
|
#if defined(CONFIG_MALLOC_TRIM)
|
||||||
#include <malloc.h>
|
#include <malloc.h>
|
||||||
#endif
|
#endif
|
||||||
|
@ -141,14 +142,14 @@ static void wait_for_readers(void)
|
||||||
|
|
||||||
void synchronize_rcu(void)
|
void synchronize_rcu(void)
|
||||||
{
|
{
|
||||||
qemu_mutex_lock(&rcu_sync_lock);
|
QEMU_LOCK_GUARD(&rcu_sync_lock);
|
||||||
|
|
||||||
/* Write RCU-protected pointers before reading p_rcu_reader->ctr.
|
/* Write RCU-protected pointers before reading p_rcu_reader->ctr.
|
||||||
* Pairs with smp_mb_placeholder() in rcu_read_lock().
|
* Pairs with smp_mb_placeholder() in rcu_read_lock().
|
||||||
*/
|
*/
|
||||||
smp_mb_global();
|
smp_mb_global();
|
||||||
|
|
||||||
qemu_mutex_lock(&rcu_registry_lock);
|
QEMU_LOCK_GUARD(&rcu_registry_lock);
|
||||||
if (!QLIST_EMPTY(®istry)) {
|
if (!QLIST_EMPTY(®istry)) {
|
||||||
/* In either case, the atomic_mb_set below blocks stores that free
|
/* In either case, the atomic_mb_set below blocks stores that free
|
||||||
* old RCU-protected pointers.
|
* old RCU-protected pointers.
|
||||||
|
@ -169,9 +170,6 @@ void synchronize_rcu(void)
|
||||||
|
|
||||||
wait_for_readers();
|
wait_for_readers();
|
||||||
}
|
}
|
||||||
|
|
||||||
qemu_mutex_unlock(&rcu_registry_lock);
|
|
||||||
qemu_mutex_unlock(&rcu_sync_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -210,7 +210,7 @@ static void thread_pool_cancel(BlockAIOCB *acb)
|
||||||
|
|
||||||
trace_thread_pool_cancel(elem, elem->common.opaque);
|
trace_thread_pool_cancel(elem, elem->common.opaque);
|
||||||
|
|
||||||
qemu_mutex_lock(&pool->lock);
|
QEMU_LOCK_GUARD(&pool->lock);
|
||||||
if (elem->state == THREAD_QUEUED &&
|
if (elem->state == THREAD_QUEUED &&
|
||||||
/* No thread has yet started working on elem. we can try to "steal"
|
/* No thread has yet started working on elem. we can try to "steal"
|
||||||
* the item from the worker if we can get a signal from the
|
* the item from the worker if we can get a signal from the
|
||||||
|
@ -225,7 +225,6 @@ static void thread_pool_cancel(BlockAIOCB *acb)
|
||||||
elem->ret = -ECANCELED;
|
elem->ret = -ECANCELED;
|
||||||
}
|
}
|
||||||
|
|
||||||
qemu_mutex_unlock(&pool->lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static AioContext *thread_pool_get_aio_context(BlockAIOCB *acb)
|
static AioContext *thread_pool_get_aio_context(BlockAIOCB *acb)
|
||||||
|
|
|
@ -21,6 +21,7 @@
|
||||||
#include "standard-headers/linux/pci_regs.h"
|
#include "standard-headers/linux/pci_regs.h"
|
||||||
#include "qemu/event_notifier.h"
|
#include "qemu/event_notifier.h"
|
||||||
#include "qemu/vfio-helpers.h"
|
#include "qemu/vfio-helpers.h"
|
||||||
|
#include "qemu/lockable.h"
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
|
|
||||||
#define QEMU_VFIO_DEBUG 0
|
#define QEMU_VFIO_DEBUG 0
|
||||||
|
@ -667,14 +668,12 @@ int qemu_vfio_dma_reset_temporary(QEMUVFIOState *s)
|
||||||
.size = QEMU_VFIO_IOVA_MAX - s->high_water_mark,
|
.size = QEMU_VFIO_IOVA_MAX - s->high_water_mark,
|
||||||
};
|
};
|
||||||
trace_qemu_vfio_dma_reset_temporary(s);
|
trace_qemu_vfio_dma_reset_temporary(s);
|
||||||
qemu_mutex_lock(&s->lock);
|
QEMU_LOCK_GUARD(&s->lock);
|
||||||
if (ioctl(s->container, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
|
if (ioctl(s->container, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
|
||||||
error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno));
|
error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno));
|
||||||
qemu_mutex_unlock(&s->lock);
|
|
||||||
return -errno;
|
return -errno;
|
||||||
}
|
}
|
||||||
s->high_water_mark = QEMU_VFIO_IOVA_MAX;
|
s->high_water_mark = QEMU_VFIO_IOVA_MAX;
|
||||||
qemu_mutex_unlock(&s->lock);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue