mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-02 07:13:54 -06:00
migration: Centralize BH creation and dispatch
Now that the migration state reference counting is correct, further wrap the bottom half dispatch process to avoid future issues. Move BH creation and scheduling together and wrap the dispatch with an intermediary function that will ensure we always keep the ref/unref balanced. Also move the responsibility of deleting the BH into the wrapper and remove the now unnecessary pointers. Signed-off-by: Fabiano Rosas <farosas@suse.de> Link: https://lore.kernel.org/r/20240119233922.32588-6-farosas@suse.de Signed-off-by: Peter Xu <peterx@redhat.com>
This commit is contained in:
parent
699d9476a0
commit
44d0d456d7
3 changed files with 40 additions and 37 deletions
|
@ -199,8 +199,39 @@ void migration_object_init(void)
|
|||
dirty_bitmap_mig_init();
|
||||
}
|
||||
|
||||
static void migration_bh_schedule(MigrationState *s, QEMUBH *bh)
|
||||
typedef struct {
|
||||
QEMUBH *bh;
|
||||
QEMUBHFunc *cb;
|
||||
void *opaque;
|
||||
} MigrationBH;
|
||||
|
||||
static void migration_bh_dispatch_bh(void *opaque)
|
||||
{
|
||||
MigrationState *s = migrate_get_current();
|
||||
MigrationBH *migbh = opaque;
|
||||
|
||||
/* cleanup this BH */
|
||||
qemu_bh_delete(migbh->bh);
|
||||
migbh->bh = NULL;
|
||||
|
||||
/* dispatch the other one */
|
||||
migbh->cb(migbh->opaque);
|
||||
object_unref(OBJECT(s));
|
||||
|
||||
g_free(migbh);
|
||||
}
|
||||
|
||||
void migration_bh_schedule(QEMUBHFunc *cb, void *opaque)
|
||||
{
|
||||
MigrationState *s = migrate_get_current();
|
||||
MigrationBH *migbh = g_new0(MigrationBH, 1);
|
||||
QEMUBH *bh = qemu_bh_new(migration_bh_dispatch_bh, migbh);
|
||||
|
||||
/* Store these to dispatch when the BH runs */
|
||||
migbh->bh = bh;
|
||||
migbh->cb = cb;
|
||||
migbh->opaque = opaque;
|
||||
|
||||
/*
|
||||
* Ref the state for bh, because it may be called when
|
||||
* there're already no other refs
|
||||
|
@ -656,9 +687,7 @@ static void process_incoming_migration_bh(void *opaque)
|
|||
*/
|
||||
migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
|
||||
MIGRATION_STATUS_COMPLETED);
|
||||
qemu_bh_delete(mis->bh);
|
||||
migration_incoming_state_destroy();
|
||||
object_unref(OBJECT(migrate_get_current()));
|
||||
}
|
||||
|
||||
static void coroutine_fn
|
||||
|
@ -723,8 +752,7 @@ process_incoming_migration_co(void *opaque)
|
|||
goto fail;
|
||||
}
|
||||
|
||||
mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
|
||||
migration_bh_schedule(migrate_get_current(), mis->bh);
|
||||
migration_bh_schedule(process_incoming_migration_bh, mis);
|
||||
return;
|
||||
fail:
|
||||
migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
|
||||
|
@ -1285,9 +1313,6 @@ void migrate_set_state(int *state, int old_state, int new_state)
|
|||
|
||||
static void migrate_fd_cleanup(MigrationState *s)
|
||||
{
|
||||
qemu_bh_delete(s->cleanup_bh);
|
||||
s->cleanup_bh = NULL;
|
||||
|
||||
g_free(s->hostname);
|
||||
s->hostname = NULL;
|
||||
json_writer_free(s->vmdesc);
|
||||
|
@ -1343,9 +1368,7 @@ static void migrate_fd_cleanup(MigrationState *s)
|
|||
|
||||
static void migrate_fd_cleanup_bh(void *opaque)
|
||||
{
|
||||
MigrationState *s = opaque;
|
||||
migrate_fd_cleanup(s);
|
||||
object_unref(OBJECT(s));
|
||||
migrate_fd_cleanup(opaque);
|
||||
}
|
||||
|
||||
void migrate_set_error(MigrationState *s, const Error *error)
|
||||
|
@ -1568,8 +1591,6 @@ int migrate_init(MigrationState *s, Error **errp)
|
|||
* parameters/capabilities that the user set, and
|
||||
* locks.
|
||||
*/
|
||||
s->cleanup_bh = 0;
|
||||
s->vm_start_bh = 0;
|
||||
s->to_dst_file = NULL;
|
||||
s->state = MIGRATION_STATUS_NONE;
|
||||
s->rp_state.from_dst_file = NULL;
|
||||
|
@ -3139,7 +3160,8 @@ static void migration_iteration_finish(MigrationState *s)
|
|||
error_report("%s: Unknown ending state %d", __func__, s->state);
|
||||
break;
|
||||
}
|
||||
migration_bh_schedule(s, s->cleanup_bh);
|
||||
|
||||
migration_bh_schedule(migrate_fd_cleanup_bh, s);
|
||||
bql_unlock();
|
||||
}
|
||||
|
||||
|
@ -3170,7 +3192,7 @@ static void bg_migration_iteration_finish(MigrationState *s)
|
|||
break;
|
||||
}
|
||||
|
||||
migration_bh_schedule(s, s->cleanup_bh);
|
||||
migration_bh_schedule(migrate_fd_cleanup_bh, s);
|
||||
bql_unlock();
|
||||
}
|
||||
|
||||
|
@ -3376,12 +3398,8 @@ static void bg_migration_vm_start_bh(void *opaque)
|
|||
{
|
||||
MigrationState *s = opaque;
|
||||
|
||||
qemu_bh_delete(s->vm_start_bh);
|
||||
s->vm_start_bh = NULL;
|
||||
|
||||
vm_resume(s->vm_old_state);
|
||||
migration_downtime_end(s);
|
||||
object_unref(OBJECT(s));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3485,8 +3503,7 @@ static void *bg_migration_thread(void *opaque)
|
|||
* calling VM state change notifiers from vm_start() would initiate
|
||||
* writes to virtio VQs memory which is in write-protected region.
|
||||
*/
|
||||
s->vm_start_bh = qemu_bh_new(bg_migration_vm_start_bh, s);
|
||||
migration_bh_schedule(s, s->vm_start_bh);
|
||||
migration_bh_schedule(bg_migration_vm_start_bh, s);
|
||||
bql_unlock();
|
||||
|
||||
while (migration_is_active(s)) {
|
||||
|
@ -3542,12 +3559,6 @@ void migrate_fd_connect(MigrationState *s, Error *error_in)
|
|||
migrate_error_free(s);
|
||||
|
||||
s->expected_downtime = migrate_downtime_limit();
|
||||
if (resume) {
|
||||
assert(s->cleanup_bh);
|
||||
} else {
|
||||
assert(!s->cleanup_bh);
|
||||
s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup_bh, s);
|
||||
}
|
||||
if (error_in) {
|
||||
migrate_fd_error(s, error_in);
|
||||
if (resume) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue