-Werror=maybe-uninitialized fixes

-----BEGIN PGP SIGNATURE-----
 
 iQJQBAABCAA6FiEEh6m9kz+HxgbSdvYt2ujhCXWWnOUFAmb9PWwcHG1hcmNhbmRy
 ZS5sdXJlYXVAcmVkaGF0LmNvbQAKCRDa6OEJdZac5ebjD/43hDmLUGtq3WvEzG7T
 A9LjvKJ4bZrJkjwuogMUAAq89L65OcHdnXARgueSwt6Mi0qoakj7F2Ymv2oSw6Gq
 33uspja5PySCGkAs3qQ9lb/zsPFNmXZkhR/XaDGqAFjI24w/QTMq+wwiEuqjVC6P
 2C4VEXxz2Qn+WTQQjzpQ7E7CAUE/grHqflm+5JFICHywjj+oyoa5EnqEXHNlMb2J
 b8YVJ3z4SPNkq3VkQMHT0+aVO3+uS0NGxfXxGkVsSTdG1NlvwUr7bSomwZfXiUvP
 C0meotfsZTHZCJRtskCvn3kAd3E5EmIjMyDsbhqB0doCLCElV4AlzWSscy1y/GO+
 xm49Kutc+GRx/pztVMGzmC7aJ3Gwa8gKIrY1C/OvO8G2dZrJmTs2ydD4J9mIGxvC
 1p1XeHZi8UOVshBDyAKRovKGzGiRtoC05SvjPOgB58RYGbUfFYHUOah3qYfRRZSw
 nPOXiwcrqiIlzkPRXB1ACVLfoZAHWzEFhrGxIKVdHABfwg8Rt9SmJq3JX8ysbKUJ
 FUI0/ZExrzGTURWWCm48k2rXZGKG/YSgvdmsSB5QmPEdrrb2jKqp5dmAbg3o/04b
 z4A7AatVNfK3tG69/hD1PwAy50q/sbbRUL9ZbBnc4Fnx1xyAOL4LgZ2tMov/jQWE
 1SXLu8GKi4Yt76hUXFn1anqR0A==
 =zBkM
 -----END PGP SIGNATURE-----

Merge tag 'warn-pull-request' of https://gitlab.com/marcandre.lureau/qemu into staging

-Werror=maybe-uninitialized fixes

# -----BEGIN PGP SIGNATURE-----
#
# iQJQBAABCAA6FiEEh6m9kz+HxgbSdvYt2ujhCXWWnOUFAmb9PWwcHG1hcmNhbmRy
# ZS5sdXJlYXVAcmVkaGF0LmNvbQAKCRDa6OEJdZac5ebjD/43hDmLUGtq3WvEzG7T
# A9LjvKJ4bZrJkjwuogMUAAq89L65OcHdnXARgueSwt6Mi0qoakj7F2Ymv2oSw6Gq
# 33uspja5PySCGkAs3qQ9lb/zsPFNmXZkhR/XaDGqAFjI24w/QTMq+wwiEuqjVC6P
# 2C4VEXxz2Qn+WTQQjzpQ7E7CAUE/grHqflm+5JFICHywjj+oyoa5EnqEXHNlMb2J
# b8YVJ3z4SPNkq3VkQMHT0+aVO3+uS0NGxfXxGkVsSTdG1NlvwUr7bSomwZfXiUvP
# C0meotfsZTHZCJRtskCvn3kAd3E5EmIjMyDsbhqB0doCLCElV4AlzWSscy1y/GO+
# xm49Kutc+GRx/pztVMGzmC7aJ3Gwa8gKIrY1C/OvO8G2dZrJmTs2ydD4J9mIGxvC
# 1p1XeHZi8UOVshBDyAKRovKGzGiRtoC05SvjPOgB58RYGbUfFYHUOah3qYfRRZSw
# nPOXiwcrqiIlzkPRXB1ACVLfoZAHWzEFhrGxIKVdHABfwg8Rt9SmJq3JX8ysbKUJ
# FUI0/ZExrzGTURWWCm48k2rXZGKG/YSgvdmsSB5QmPEdrrb2jKqp5dmAbg3o/04b
# z4A7AatVNfK3tG69/hD1PwAy50q/sbbRUL9ZbBnc4Fnx1xyAOL4LgZ2tMov/jQWE
# 1SXLu8GKi4Yt76hUXFn1anqR0A==
# =zBkM
# -----END PGP SIGNATURE-----
# gpg: Signature made Wed 02 Oct 2024 13:32:44 BST
# gpg:                using RSA key 87A9BD933F87C606D276F62DDAE8E10975969CE5
# gpg:                issuer "marcandre.lureau@redhat.com"
# gpg: Good signature from "Marc-André Lureau <marcandre.lureau@redhat.com>" [full]
# gpg:                 aka "Marc-André Lureau <marcandre.lureau@gmail.com>" [full]
# Primary key fingerprint: 87A9 BD93 3F87 C606 D276  F62D DAE8 E109 7596 9CE5

* tag 'warn-pull-request' of https://gitlab.com/marcandre.lureau/qemu: (22 commits)
  qom/object: fix -Werror=maybe-uninitialized
  fsdep/9p: fix -Werror=maybe-uninitialized false-positive
  block: fix -Werror=maybe-uninitialized false-positive
  hw/virtio: freeing leaked memory from vhost_svq_get_buf in vhost_svq_poll
  hw/virtio: fix -Werror=maybe-uninitialized
  tests: fix -Werror=maybe-uninitialized false-positive
  target/loongarch: fix -Werror=maybe-uninitialized false-positive
  linux-user/hppa: fix -Werror=maybe-uninitialized false-positive
  migration: fix -Werror=maybe-uninitialized false-positive
  hw/virtio-blk: fix -Werror=maybe-uninitialized false-positive
  migration: fix -Werror=maybe-uninitialized false-positives
  block/block-copy: fix -Werror=maybe-uninitialized false-positive
  hw/sdhci: fix -Werror=maybe-uninitialized false-positive
  hw/vhost-scsi: fix -Werror=maybe-uninitialized
  hw/ahci: fix -Werror=maybe-uninitialized false-positive
  block/stream: fix -Werror=maybe-uninitialized false-positives
  block/mirror: fix -Werror=maybe-uninitialized false-positive
  block/mirror: fix -Werror=maybe-uninitialized false-positive
  nbd: fix -Werror=maybe-uninitialized false-positive
  hw/qxl: fix -Werror=maybe-uninitialized false-positives
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2024-10-03 10:32:54 +01:00
commit 423be09ab9
22 changed files with 60 additions and 44 deletions

View file

@ -584,7 +584,7 @@ static coroutine_fn int block_copy_task_entry(AioTask *task)
BlockCopyState *s = t->s; BlockCopyState *s = t->s;
bool error_is_read = false; bool error_is_read = false;
BlockCopyMethod method = t->method; BlockCopyMethod method = t->method;
int ret; int ret = -1;
WITH_GRAPH_RDLOCK_GUARD() { WITH_GRAPH_RDLOCK_GUARD() {
ret = block_copy_do_copy(s, t->req.offset, t->req.bytes, &method, ret = block_copy_do_copy(s, t->req.offset, t->req.bytes, &method,

View file

@ -1398,7 +1398,7 @@ static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st,
Error **errp) Error **errp)
{ {
BDRVRawState *s = bs->opaque; BDRVRawState *s = bs->opaque;
BlockZoneModel zoned; BlockZoneModel zoned = BLK_Z_NONE;
int ret; int ret;
ret = get_sysfs_zoned_model(st, &zoned); ret = get_sysfs_zoned_model(st, &zoned);

View file

@ -349,7 +349,7 @@ static void coroutine_fn mirror_co_read(void *opaque)
MirrorOp *op = opaque; MirrorOp *op = opaque;
MirrorBlockJob *s = op->s; MirrorBlockJob *s = op->s;
int nb_chunks; int nb_chunks;
uint64_t ret; int ret = -1;
uint64_t max_bytes; uint64_t max_bytes;
max_bytes = s->granularity * s->max_iov; max_bytes = s->granularity * s->max_iov;
@ -565,7 +565,7 @@ static void coroutine_fn GRAPH_UNLOCKED mirror_iteration(MirrorBlockJob *s)
bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks); bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks);
while (nb_chunks > 0 && offset < s->bdev_length) { while (nb_chunks > 0 && offset < s->bdev_length) {
int ret; int ret = -1;
int64_t io_bytes; int64_t io_bytes;
int64_t io_bytes_acct; int64_t io_bytes_acct;
MirrorMethod mirror_method = MIRROR_METHOD_COPY; MirrorMethod mirror_method = MIRROR_METHOD_COPY;
@ -841,7 +841,7 @@ static int coroutine_fn GRAPH_UNLOCKED mirror_dirty_init(MirrorBlockJob *s)
int64_t offset; int64_t offset;
BlockDriverState *bs; BlockDriverState *bs;
BlockDriverState *target_bs = blk_bs(s->target); BlockDriverState *target_bs = blk_bs(s->target);
int ret; int ret = -1;
int64_t count; int64_t count;
bdrv_graph_co_rdlock(); bdrv_graph_co_rdlock();
@ -931,7 +931,7 @@ static int coroutine_fn mirror_run(Job *job, Error **errp)
MirrorBDSOpaque *mirror_top_opaque = s->mirror_top_bs->opaque; MirrorBDSOpaque *mirror_top_opaque = s->mirror_top_bs->opaque;
BlockDriverState *target_bs = blk_bs(s->target); BlockDriverState *target_bs = blk_bs(s->target);
bool need_drain = true; bool need_drain = true;
BlockDeviceIoStatus iostatus; BlockDeviceIoStatus iostatus = BLOCK_DEVICE_IO_STATUS__MAX;
int64_t length; int64_t length;
int64_t target_length; int64_t target_length;
BlockDriverInfo bdi; BlockDriverInfo bdi;

View file

@ -155,8 +155,8 @@ static void stream_clean(Job *job)
static int coroutine_fn stream_run(Job *job, Error **errp) static int coroutine_fn stream_run(Job *job, Error **errp)
{ {
StreamBlockJob *s = container_of(job, StreamBlockJob, common.job); StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
BlockDriverState *unfiltered_bs; BlockDriverState *unfiltered_bs = NULL;
int64_t len; int64_t len = -1;
int64_t offset = 0; int64_t offset = 0;
int error = 0; int error = 0;
int64_t n = 0; /* bytes */ int64_t n = 0; /* bytes */
@ -177,7 +177,7 @@ static int coroutine_fn stream_run(Job *job, Error **errp)
for ( ; offset < len; offset += n) { for ( ; offset < len; offset += n) {
bool copy; bool copy;
int ret; int ret = -1;
/* Note that even when no rate limit is applied we need to yield /* Note that even when no rate limit is applied we need to yield
* with no pending I/O here so that bdrv_drain_all() returns. * with no pending I/O here so that bdrv_drain_all() returns.

View file

@ -84,9 +84,12 @@ ssize_t v9fs_iov_vunmarshal(struct iovec *out_sg, int out_num, size_t offset,
break; break;
} }
case 'w': { case 'w': {
uint16_t val, *valp; uint16_t val = 0, *valp;
valp = va_arg(ap, uint16_t *); valp = va_arg(ap, uint16_t *);
copied = v9fs_unpack(&val, out_sg, out_num, offset, sizeof(val)); copied = v9fs_unpack(&val, out_sg, out_num, offset, sizeof(val));
if (copied <= 0) {
break;
}
if (bswap) { if (bswap) {
*valp = le16_to_cpu(val); *valp = le16_to_cpu(val);
} else { } else {
@ -95,9 +98,12 @@ ssize_t v9fs_iov_vunmarshal(struct iovec *out_sg, int out_num, size_t offset,
break; break;
} }
case 'd': { case 'd': {
uint32_t val, *valp; uint32_t val = 0, *valp;
valp = va_arg(ap, uint32_t *); valp = va_arg(ap, uint32_t *);
copied = v9fs_unpack(&val, out_sg, out_num, offset, sizeof(val)); copied = v9fs_unpack(&val, out_sg, out_num, offset, sizeof(val));
if (copied <= 0) {
break;
}
if (bswap) { if (bswap) {
*valp = le32_to_cpu(val); *valp = le32_to_cpu(val);
} else { } else {
@ -106,9 +112,12 @@ ssize_t v9fs_iov_vunmarshal(struct iovec *out_sg, int out_num, size_t offset,
break; break;
} }
case 'q': { case 'q': {
uint64_t val, *valp; uint64_t val = 0, *valp;
valp = va_arg(ap, uint64_t *); valp = va_arg(ap, uint64_t *);
copied = v9fs_unpack(&val, out_sg, out_num, offset, sizeof(val)); copied = v9fs_unpack(&val, out_sg, out_num, offset, sizeof(val));
if (copied <= 0) {
break;
}
if (bswap) { if (bswap) {
*valp = le64_to_cpu(val); *valp = le64_to_cpu(val);
} else { } else {

View file

@ -1060,7 +1060,7 @@ static void virtio_blk_dma_restart_cb(void *opaque, bool running,
VirtIOBlock *s = opaque; VirtIOBlock *s = opaque;
uint16_t num_queues = s->conf.num_queues; uint16_t num_queues = s->conf.num_queues;
g_autofree VirtIOBlockReq **vq_rq = NULL; g_autofree VirtIOBlockReq **vq_rq = NULL;
VirtIOBlockReq *rq; VirtIOBlockReq *rq = NULL;
if (!running) { if (!running) {
return; return;

View file

@ -1301,8 +1301,8 @@ static int qxl_add_memslot(PCIQXLDevice *d, uint32_t slot_id, uint64_t delta,
}; };
uint64_t guest_start; uint64_t guest_start;
uint64_t guest_end; uint64_t guest_end;
int pci_region; int pci_region = -1;
pcibus_t pci_start; pcibus_t pci_start = PCI_BAR_UNMAPPED;
pcibus_t pci_end; pcibus_t pci_end;
MemoryRegion *mr; MemoryRegion *mr;
intptr_t virt_start; intptr_t virt_start;

View file

@ -948,7 +948,6 @@ static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist,
uint64_t sum = 0; uint64_t sum = 0;
int off_idx = -1; int off_idx = -1;
int64_t off_pos = -1; int64_t off_pos = -1;
int tbl_entry_size;
IDEBus *bus = &ad->port; IDEBus *bus = &ad->port;
BusState *qbus = BUS(bus); BusState *qbus = BUS(bus);
@ -976,6 +975,8 @@ static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist,
/* Get entries in the PRDT, init a qemu sglist accordingly */ /* Get entries in the PRDT, init a qemu sglist accordingly */
if (prdtl > 0) { if (prdtl > 0) {
AHCI_SG *tbl = (AHCI_SG *)prdt; AHCI_SG *tbl = (AHCI_SG *)prdt;
int tbl_entry_size = 0;
sum = 0; sum = 0;
for (i = 0; i < prdtl; i++) { for (i = 0; i < prdtl; i++) {
tbl_entry_size = prdt_tbl_entry_size(&tbl[i]); tbl_entry_size = prdt_tbl_entry_size(&tbl[i]);

View file

@ -172,7 +172,7 @@ static int vhost_scsi_set_workers(VHostSCSICommon *vsc, bool per_virtqueue)
struct vhost_dev *dev = &vsc->dev; struct vhost_dev *dev = &vsc->dev;
struct vhost_vring_worker vq_worker; struct vhost_vring_worker vq_worker;
struct vhost_worker_state worker; struct vhost_worker_state worker;
int i, ret; int i, ret = 0;
/* Use default worker */ /* Use default worker */
if (!per_virtqueue || dev->nvqs == VHOST_SCSI_VQ_NUM_FIXED + 1) { if (!per_virtqueue || dev->nvqs == VHOST_SCSI_VQ_NUM_FIXED + 1) {

View file

@ -747,7 +747,7 @@ static void sdhci_do_adma(SDHCIState *s)
const uint16_t block_size = s->blksize & BLOCK_SIZE_MASK; const uint16_t block_size = s->blksize & BLOCK_SIZE_MASK;
const MemTxAttrs attrs = { .memory = true }; const MemTxAttrs attrs = { .memory = true };
ADMADescr dscr = {}; ADMADescr dscr = {};
MemTxResult res; MemTxResult res = MEMTX_ERROR;
int i; int i;
if (s->trnmod & SDHC_TRNS_BLK_CNT_EN && !s->blkcnt) { if (s->trnmod & SDHC_TRNS_BLK_CNT_EN && !s->blkcnt) {

View file

@ -414,6 +414,7 @@ static uint16_t vhost_svq_last_desc_of_chain(const VhostShadowVirtqueue *svq,
return i; return i;
} }
G_GNUC_WARN_UNUSED_RESULT
static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq, static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq,
uint32_t *len) uint32_t *len)
{ {
@ -526,10 +527,11 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq,
size_t vhost_svq_poll(VhostShadowVirtqueue *svq, size_t num) size_t vhost_svq_poll(VhostShadowVirtqueue *svq, size_t num)
{ {
size_t len = 0; size_t len = 0;
uint32_t r;
while (num--) { while (num--) {
g_autofree VirtQueueElement *elem = NULL;
int64_t start_us = g_get_monotonic_time(); int64_t start_us = g_get_monotonic_time();
uint32_t r = 0;
do { do {
if (vhost_svq_more_used(svq)) { if (vhost_svq_more_used(svq)) {
@ -541,7 +543,7 @@ size_t vhost_svq_poll(VhostShadowVirtqueue *svq, size_t num)
} }
} while (true); } while (true);
vhost_svq_get_buf(svq, &r); elem = vhost_svq_get_buf(svq, &r);
len += r; len += r;
} }

View file

@ -99,6 +99,8 @@ static abi_ulong hppa_lws(CPUHPPAState *env)
#endif #endif
} }
break; break;
default:
g_assert_not_reached();
} }
break; break;
} }

View file

@ -149,12 +149,12 @@ int64_t vcpu_calculate_dirtyrate(int64_t calc_time_ms,
unsigned int flag, unsigned int flag,
bool one_shot) bool one_shot)
{ {
DirtyPageRecord *records; DirtyPageRecord *records = NULL;
int64_t init_time_ms; int64_t init_time_ms;
int64_t duration; int64_t duration;
int64_t dirtyrate; int64_t dirtyrate;
int i = 0; int i = 0;
unsigned int gen_id; unsigned int gen_id = 0;
retry: retry:
init_time_ms = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); init_time_ms = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);

View file

@ -2278,7 +2278,7 @@ static bool migrate_handle_rp_resume_ack(MigrationState *s,
*/ */
static void migration_release_dst_files(MigrationState *ms) static void migration_release_dst_files(MigrationState *ms)
{ {
QEMUFile *file; QEMUFile *file = NULL;
WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) { WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) {
/* /*

View file

@ -1793,7 +1793,7 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
{ {
RAMBlock *block; RAMBlock *block;
ram_addr_t offset; ram_addr_t offset;
bool dirty; bool dirty = false;
do { do {
block = unqueue_page(rs, &offset); block = unqueue_page(rs, &offset);

View file

@ -410,7 +410,7 @@ nbd_co_establish_connection(NBDClientConnection *conn, NBDExportInfo *info,
*/ */
void nbd_co_establish_connection_cancel(NBDClientConnection *conn) void nbd_co_establish_connection_cancel(NBDClientConnection *conn)
{ {
Coroutine *wait_co; Coroutine *wait_co = NULL;
WITH_QEMU_LOCK_GUARD(&conn->mutex) { WITH_QEMU_LOCK_GUARD(&conn->mutex) {
wait_co = g_steal_pointer(&conn->wait_co); wait_co = g_steal_pointer(&conn->wait_co);

View file

@ -2226,7 +2226,7 @@ Object *object_resolve_path_at(Object *parent, const char *path)
Object *object_resolve_type_unambiguous(const char *typename, Error **errp) Object *object_resolve_type_unambiguous(const char *typename, Error **errp)
{ {
bool ambig; bool ambig = false;
Object *o = object_resolve_path_type("", typename, &ambig); Object *o = object_resolve_path_type("", typename, &ambig);
if (ambig) { if (ambig) {

View file

@ -34,26 +34,28 @@ void write_fcc(CPULoongArchState *env, uint64_t val)
int loongarch_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) int loongarch_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
{ {
CPULoongArchState *env = cpu_env(cs); CPULoongArchState *env = cpu_env(cs);
uint64_t val;
if (0 <= n && n < 32) {
val = env->gpr[n];
} else if (n == 32) {
/* orig_a0 */
val = 0;
} else if (n == 33) {
val = env->pc;
} else if (n == 34) {
val = env->CSR_BADV;
}
if (0 <= n && n <= 34) { if (0 <= n && n <= 34) {
uint64_t val;
if (n < 32) {
val = env->gpr[n];
} else if (n == 32) {
/* orig_a0 */
val = 0;
} else if (n == 33) {
val = env->pc;
} else /* if (n == 34) */ {
val = env->CSR_BADV;
}
if (is_la64(env)) { if (is_la64(env)) {
return gdb_get_reg64(mem_buf, val); return gdb_get_reg64(mem_buf, val);
} else { } else {
return gdb_get_reg32(mem_buf, val); return gdb_get_reg32(mem_buf, val);
} }
} }
return 0; return 0;
} }

View file

@ -722,7 +722,7 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
BlockJob *job; BlockJob *job;
TestBlockJob *tjob; TestBlockJob *tjob;
IOThread *iothread = NULL; IOThread *iothread = NULL;
int ret; int ret = -1;
src = bdrv_new_open_driver(&bdrv_test, "source", BDRV_O_RDWR, src = bdrv_new_open_driver(&bdrv_test, "source", BDRV_O_RDWR,
&error_abort); &error_abort);

View file

@ -745,7 +745,7 @@ static void test_propagate_mirror(void)
AioContext *main_ctx = qemu_get_aio_context(); AioContext *main_ctx = qemu_get_aio_context();
BlockDriverState *src, *target, *filter; BlockDriverState *src, *target, *filter;
BlockBackend *blk; BlockBackend *blk;
Job *job; Job *job = NULL;
Error *local_err = NULL; Error *local_err = NULL;
/* Create src and target*/ /* Create src and target*/

View file

@ -136,7 +136,7 @@ static Coroutine *coroutine_pool_get_local(void)
static void coroutine_pool_refill_local(void) static void coroutine_pool_refill_local(void)
{ {
CoroutinePool *local_pool = get_ptr_local_pool(); CoroutinePool *local_pool = get_ptr_local_pool();
CoroutinePoolBatch *batch; CoroutinePoolBatch *batch = NULL;
WITH_QEMU_LOCK_GUARD(&global_pool_lock) { WITH_QEMU_LOCK_GUARD(&global_pool_lock) {
batch = QSLIST_FIRST(&global_pool); batch = QSLIST_FIRST(&global_pool);

View file

@ -182,7 +182,7 @@ bool qemu_clock_has_timers(QEMUClockType type)
bool timerlist_expired(QEMUTimerList *timer_list) bool timerlist_expired(QEMUTimerList *timer_list)
{ {
int64_t expire_time; int64_t expire_time = 0;
if (!qatomic_read(&timer_list->active_timers)) { if (!qatomic_read(&timer_list->active_timers)) {
return false; return false;
@ -212,7 +212,7 @@ bool qemu_clock_expired(QEMUClockType type)
int64_t timerlist_deadline_ns(QEMUTimerList *timer_list) int64_t timerlist_deadline_ns(QEMUTimerList *timer_list)
{ {
int64_t delta; int64_t delta;
int64_t expire_time; int64_t expire_time = 0;
if (!qatomic_read(&timer_list->active_timers)) { if (!qatomic_read(&timer_list->active_timers)) {
return -1; return -1;
@ -451,7 +451,7 @@ void timer_mod_ns(QEMUTimer *ts, int64_t expire_time)
void timer_mod_anticipate_ns(QEMUTimer *ts, int64_t expire_time) void timer_mod_anticipate_ns(QEMUTimer *ts, int64_t expire_time)
{ {
QEMUTimerList *timer_list = ts->timer_list; QEMUTimerList *timer_list = ts->timer_list;
bool rearm; bool rearm = false;
WITH_QEMU_LOCK_GUARD(&timer_list->active_timers_lock) { WITH_QEMU_LOCK_GUARD(&timer_list->active_timers_lock) {
if (ts->expire_time == -1 || ts->expire_time > expire_time) { if (ts->expire_time == -1 || ts->expire_time > expire_time) {