mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-07-31 06:13:53 -06:00
migration: Change migrate_fd_ to migration_
Remove all instances of _fd_ from the migration generic code. These functions have grown over time and the _fd_ part is now just confusing. migration_fd_error() -> migration_error() makes it a little vague. Since it's only used for migration_connect() failures, change it to migration_connect_set_error(). Reviewed-by: Peter Xu <peterx@redhat.com> Message-ID: <20250213175927.19642-4-farosas@suse.de> Signed-off-by: Fabiano Rosas <farosas@suse.de>
This commit is contained in:
parent
8444d09381
commit
4bbadfc55e
6 changed files with 21 additions and 21 deletions
|
@ -74,7 +74,7 @@ void migration_channel_connect(MigrationState *s,
|
||||||
if (!error) {
|
if (!error) {
|
||||||
/* tls_channel_connect will call back to this
|
/* tls_channel_connect will call back to this
|
||||||
* function after the TLS handshake,
|
* function after the TLS handshake,
|
||||||
* so we mustn't call migrate_fd_connect until then
|
* so we mustn't call migration_connect until then
|
||||||
*/
|
*/
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
@ -89,7 +89,7 @@ void migration_channel_connect(MigrationState *s,
|
||||||
qemu_mutex_unlock(&s->qemu_file_lock);
|
qemu_mutex_unlock(&s->qemu_file_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
migrate_fd_connect(s, error);
|
migration_connect(s, error);
|
||||||
error_free(error);
|
error_free(error);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1423,12 +1423,12 @@ static void migration_cleanup_json_writer(MigrationState *s)
|
||||||
g_clear_pointer(&s->vmdesc, json_writer_free);
|
g_clear_pointer(&s->vmdesc, json_writer_free);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void migrate_fd_cleanup(MigrationState *s)
|
static void migration_cleanup(MigrationState *s)
|
||||||
{
|
{
|
||||||
MigrationEventType type;
|
MigrationEventType type;
|
||||||
QEMUFile *tmp = NULL;
|
QEMUFile *tmp = NULL;
|
||||||
|
|
||||||
trace_migrate_fd_cleanup();
|
trace_migration_cleanup();
|
||||||
|
|
||||||
migration_cleanup_json_writer(s);
|
migration_cleanup_json_writer(s);
|
||||||
|
|
||||||
|
@ -1485,9 +1485,9 @@ static void migrate_fd_cleanup(MigrationState *s)
|
||||||
yank_unregister_instance(MIGRATION_YANK_INSTANCE);
|
yank_unregister_instance(MIGRATION_YANK_INSTANCE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void migrate_fd_cleanup_bh(void *opaque)
|
static void migration_cleanup_bh(void *opaque)
|
||||||
{
|
{
|
||||||
migrate_fd_cleanup(opaque);
|
migration_cleanup(opaque);
|
||||||
}
|
}
|
||||||
|
|
||||||
void migrate_set_error(MigrationState *s, const Error *error)
|
void migrate_set_error(MigrationState *s, const Error *error)
|
||||||
|
@ -1517,7 +1517,7 @@ static void migrate_error_free(MigrationState *s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void migrate_fd_error(MigrationState *s, const Error *error)
|
static void migration_connect_set_error(MigrationState *s, const Error *error)
|
||||||
{
|
{
|
||||||
MigrationStatus current = s->state;
|
MigrationStatus current = s->state;
|
||||||
MigrationStatus next;
|
MigrationStatus next;
|
||||||
|
@ -2198,7 +2198,7 @@ void qmp_migrate(const char *uri, bool has_channels,
|
||||||
|
|
||||||
out:
|
out:
|
||||||
if (local_err) {
|
if (local_err) {
|
||||||
migrate_fd_error(s, local_err);
|
migration_connect_set_error(s, local_err);
|
||||||
error_propagate(errp, local_err);
|
error_propagate(errp, local_err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2243,7 +2243,7 @@ static void qmp_migrate_finish(MigrationAddress *addr, bool resume_requested,
|
||||||
if (!resume_requested) {
|
if (!resume_requested) {
|
||||||
yank_unregister_instance(MIGRATION_YANK_INSTANCE);
|
yank_unregister_instance(MIGRATION_YANK_INSTANCE);
|
||||||
}
|
}
|
||||||
migrate_fd_error(s, local_err);
|
migration_connect_set_error(s, local_err);
|
||||||
error_propagate(errp, local_err);
|
error_propagate(errp, local_err);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -3427,7 +3427,7 @@ static void migration_iteration_finish(MigrationState *s)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
migration_bh_schedule(migrate_fd_cleanup_bh, s);
|
migration_bh_schedule(migration_cleanup_bh, s);
|
||||||
bql_unlock();
|
bql_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3455,7 +3455,7 @@ static void bg_migration_iteration_finish(MigrationState *s)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
migration_bh_schedule(migrate_fd_cleanup_bh, s);
|
migration_bh_schedule(migration_cleanup_bh, s);
|
||||||
bql_unlock();
|
bql_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3837,7 +3837,7 @@ fail_setup:
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void migrate_fd_connect(MigrationState *s, Error *error_in)
|
void migration_connect(MigrationState *s, Error *error_in)
|
||||||
{
|
{
|
||||||
Error *local_err = NULL;
|
Error *local_err = NULL;
|
||||||
uint64_t rate_limit;
|
uint64_t rate_limit;
|
||||||
|
@ -3847,24 +3847,24 @@ void migrate_fd_connect(MigrationState *s, Error *error_in)
|
||||||
/*
|
/*
|
||||||
* If there's a previous error, free it and prepare for another one.
|
* If there's a previous error, free it and prepare for another one.
|
||||||
* Meanwhile if migration completes successfully, there won't have an error
|
* Meanwhile if migration completes successfully, there won't have an error
|
||||||
* dumped when calling migrate_fd_cleanup().
|
* dumped when calling migration_cleanup().
|
||||||
*/
|
*/
|
||||||
migrate_error_free(s);
|
migrate_error_free(s);
|
||||||
|
|
||||||
s->expected_downtime = migrate_downtime_limit();
|
s->expected_downtime = migrate_downtime_limit();
|
||||||
if (error_in) {
|
if (error_in) {
|
||||||
migrate_fd_error(s, error_in);
|
migration_connect_set_error(s, error_in);
|
||||||
if (resume) {
|
if (resume) {
|
||||||
/*
|
/*
|
||||||
* Don't do cleanup for resume if channel is invalid, but only dump
|
* Don't do cleanup for resume if channel is invalid, but only dump
|
||||||
* the error. We wait for another channel connect from the user.
|
* the error. We wait for another channel connect from the user.
|
||||||
* The error_report still gives HMP user a hint on what failed.
|
* The error_report still gives HMP user a hint on what failed.
|
||||||
* It's normally done in migrate_fd_cleanup(), but call it here
|
* It's normally done in migration_cleanup(), but call it here
|
||||||
* explicitly.
|
* explicitly.
|
||||||
*/
|
*/
|
||||||
error_report_err(error_copy(s->error));
|
error_report_err(error_copy(s->error));
|
||||||
} else {
|
} else {
|
||||||
migrate_fd_cleanup(s);
|
migration_cleanup(s);
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -3944,7 +3944,7 @@ fail:
|
||||||
migrate_set_error(s, local_err);
|
migrate_set_error(s, local_err);
|
||||||
migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED);
|
migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED);
|
||||||
error_report_err(local_err);
|
error_report_err(local_err);
|
||||||
migrate_fd_cleanup(s);
|
migration_cleanup(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void migration_class_init(ObjectClass *klass, void *data)
|
static void migration_class_init(ObjectClass *klass, void *data)
|
||||||
|
|
|
@ -517,7 +517,7 @@ bool migration_has_all_channels(void);
|
||||||
void migrate_set_error(MigrationState *s, const Error *error);
|
void migrate_set_error(MigrationState *s, const Error *error);
|
||||||
bool migrate_has_error(MigrationState *s);
|
bool migrate_has_error(MigrationState *s);
|
||||||
|
|
||||||
void migrate_fd_connect(MigrationState *s, Error *error_in);
|
void migration_connect(MigrationState *s, Error *error_in);
|
||||||
|
|
||||||
int migration_call_notifiers(MigrationState *s, MigrationEventType type,
|
int migration_call_notifiers(MigrationState *s, MigrationEventType type,
|
||||||
Error **errp);
|
Error **errp);
|
||||||
|
|
|
@ -444,7 +444,7 @@ static bool multifd_send_cleanup_channel(MultiFDSendParams *p, Error **errp)
|
||||||
* channels have no I/O handler callback registered when reaching
|
* channels have no I/O handler callback registered when reaching
|
||||||
* here, because migration thread will wait for all multifd channel
|
* here, because migration thread will wait for all multifd channel
|
||||||
* establishments to complete during setup. Since
|
* establishments to complete during setup. Since
|
||||||
* migrate_fd_cleanup() will be scheduled in main thread too, all
|
* migration_cleanup() will be scheduled in main thread too, all
|
||||||
* previous callbacks should guarantee to be completed when
|
* previous callbacks should guarantee to be completed when
|
||||||
* reaching here. See multifd_send_state.channels_created and its
|
* reaching here. See multifd_send_state.channels_created and its
|
||||||
* usage. In the future, we could replace this with an assert
|
* usage. In the future, we could replace this with an assert
|
||||||
|
|
|
@ -4174,7 +4174,7 @@ void rdma_start_outgoing_migration(void *opaque,
|
||||||
|
|
||||||
s->to_dst_file = rdma_new_output(rdma);
|
s->to_dst_file = rdma_new_output(rdma);
|
||||||
s->rdma_migration = true;
|
s->rdma_migration = true;
|
||||||
migrate_fd_connect(s, NULL);
|
migration_connect(s, NULL);
|
||||||
return;
|
return;
|
||||||
return_path_err:
|
return_path_err:
|
||||||
qemu_rdma_cleanup(rdma);
|
qemu_rdma_cleanup(rdma);
|
||||||
|
|
|
@ -154,7 +154,7 @@ multifd_set_outgoing_channel(void *ioc, const char *ioctype, const char *hostnam
|
||||||
|
|
||||||
# migration.c
|
# migration.c
|
||||||
migrate_set_state(const char *new_state) "new state %s"
|
migrate_set_state(const char *new_state) "new state %s"
|
||||||
migrate_fd_cleanup(void) ""
|
migration_cleanup(void) ""
|
||||||
migrate_error(const char *error_desc) "error=%s"
|
migrate_error(const char *error_desc) "error=%s"
|
||||||
migration_cancel(void) ""
|
migration_cancel(void) ""
|
||||||
migrate_handle_rp_req_pages(const char *rbname, size_t start, size_t len) "in %s at 0x%zx len 0x%zx"
|
migrate_handle_rp_req_pages(const char *rbname, size_t start, size_t len) "in %s at 0x%zx len 0x%zx"
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue