Replace "iothread lock" with "BQL" in comments

The term "iothread lock" is obsolete. The APIs use Big QEMU Lock (BQL)
in their names. Update the code comments to use "BQL" instead of
"iothread lock".

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Paul Durrant <paul@xen.org>
Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Reviewed-by: Cédric Le Goater <clg@kaod.org>
Reviewed-by: Harsh Prateek Bora <harshpb@linux.ibm.com>
Message-id: 20240102153529.486531-5-stefanha@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2024-01-02 10:35:28 -05:00
parent 7c754c787e
commit a4a411fbaf
21 changed files with 47 additions and 47 deletions

View file

@ -464,7 +464,7 @@ static void send_bitmap_bits(QEMUFile *f, DBMSaveState *s,
g_free(buf);
}
/* Called with iothread lock taken. */
/* Called with the BQL taken. */
static void dirty_bitmap_do_save_cleanup(DBMSaveState *s)
{
SaveBitmapState *dbms;
@ -479,7 +479,7 @@ static void dirty_bitmap_do_save_cleanup(DBMSaveState *s)
}
}
/* Called with iothread lock taken. */
/* Called with the BQL taken. */
static int add_bitmaps_to_list(DBMSaveState *s, BlockDriverState *bs,
const char *bs_name, GHashTable *alias_map)
{
@ -598,7 +598,7 @@ static int add_bitmaps_to_list(DBMSaveState *s, BlockDriverState *bs,
return 0;
}
/* Called with iothread lock taken. */
/* Called with the BQL taken. */
static int init_dirty_bitmap_migration(DBMSaveState *s)
{
BlockDriverState *bs;
@ -607,7 +607,7 @@ static int init_dirty_bitmap_migration(DBMSaveState *s)
BlockBackend *blk;
GHashTable *alias_map = NULL;
/* Runs in the migration thread, but holds the iothread lock */
/* Runs in the migration thread, but holds the BQL */
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
@ -742,7 +742,7 @@ static int dirty_bitmap_save_iterate(QEMUFile *f, void *opaque)
return s->bulk_completed;
}
/* Called with iothread lock taken. */
/* Called with the BQL taken. */
static int dirty_bitmap_save_complete(QEMUFile *f, void *opaque)
{

View file

@ -101,7 +101,7 @@ typedef struct BlkMigState {
int prev_progress;
int bulk_completed;
/* Lock must be taken _inside_ the iothread lock. */
/* Lock must be taken _inside_ the BQL. */
QemuMutex lock;
} BlkMigState;
@ -117,7 +117,7 @@ static void blk_mig_unlock(void)
qemu_mutex_unlock(&block_mig_state.lock);
}
/* Must run outside of the iothread lock during the bulk phase,
/* Must run outside of the BQL during the bulk phase,
* or the VM will stall.
*/
@ -327,7 +327,7 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
return (bmds->cur_sector >= total_sectors);
}
/* Called with iothread lock taken. */
/* Called with the BQL taken. */
static int set_dirty_tracking(void)
{
@ -354,7 +354,7 @@ fail:
return ret;
}
/* Called with iothread lock taken. */
/* Called with the BQL taken. */
static void unset_dirty_tracking(void)
{
@ -505,7 +505,7 @@ static void blk_mig_reset_dirty_cursor(void)
}
}
/* Called with iothread lock taken. */
/* Called with the BQL taken. */
static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
int is_async)
@ -587,7 +587,7 @@ error:
return ret;
}
/* Called with iothread lock taken.
/* Called with the BQL taken.
*
* return value:
* 0: too much data for max_downtime
@ -649,7 +649,7 @@ static int flush_blks(QEMUFile *f)
return ret;
}
/* Called with iothread lock taken. */
/* Called with the BQL taken. */
static int64_t get_remaining_dirty(void)
{
@ -667,7 +667,7 @@ static int64_t get_remaining_dirty(void)
/* Called with iothread lock taken. */
/* Called with the BQL taken. */
static void block_migration_cleanup_bmds(void)
{
BlkMigDevState *bmds;
@ -690,7 +690,7 @@ static void block_migration_cleanup_bmds(void)
}
}
/* Called with iothread lock taken. */
/* Called with the BQL taken. */
static void block_migration_cleanup(void *opaque)
{
BlkMigBlock *blk;
@ -767,7 +767,7 @@ static int block_save_iterate(QEMUFile *f, void *opaque)
}
ret = 0;
} else {
/* Always called with iothread lock taken for
/* Always called with the BQL taken for
* simplicity, block_save_complete also calls it.
*/
bql_lock();
@ -795,7 +795,7 @@ static int block_save_iterate(QEMUFile *f, void *opaque)
return (delta_bytes > 0);
}
/* Called with iothread lock taken. */
/* Called with the BQL taken. */
static int block_save_complete(QEMUFile *f, void *opaque)
{

View file

@ -945,7 +945,7 @@ int coroutine_fn colo_incoming_co(void)
qemu_thread_join(&th);
bql_lock();
/* We hold the global iothread lock, so it is safe here */
/* We hold the global BQL, so it is safe here */
colo_release_ram_cache();
return 0;

View file

@ -2551,7 +2551,7 @@ fail:
/**
* migration_maybe_pause: Pause if required to by
* migrate_pause_before_switchover called with the iothread locked
* migrate_pause_before_switchover called with the BQL locked
* Returns: 0 on success
*/
static int migration_maybe_pause(MigrationState *s,

View file

@ -2395,7 +2395,7 @@ static void ram_save_cleanup(void *opaque)
/* We don't use dirty log with background snapshots */
if (!migrate_background_snapshot()) {
/* caller have hold iothread lock or is in a bh, so there is
/* caller have hold BQL or is in a bh, so there is
* no writing race against the migration bitmap
*/
if (global_dirty_tracking & GLOBAL_DIRTY_MIGRATION) {
@ -3131,7 +3131,7 @@ out:
*
* Returns zero to indicate success or negative on error
*
* Called with iothread lock
* Called with the BQL
*
* @f: QEMUFile where to send the data
* @opaque: RAMState pointer