mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-05 00:33:55 -06:00
migration: Add Error** argument to .save_setup() handler
The purpose is to record a potential error in the migration stream if qemu_savevm_state_setup() fails. Most of the current .save_setup() handlers can be modified to use the Error argument instead of managing their own and calling locally error_report(). Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Harsh Prateek Bora <harshpb@linux.ibm.com> Cc: Halil Pasic <pasic@linux.ibm.com> Cc: Thomas Huth <thuth@redhat.com> Cc: Eric Blake <eblake@redhat.com> Cc: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> Cc: John Snow <jsnow@redhat.com> Cc: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Reviewed-by: Peter Xu <peterx@redhat.com> Reviewed-by: Thomas Huth <thuth@redhat.com> Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> Signed-off-by: Cédric Le Goater <clg@redhat.com> Link: https://lore.kernel.org/r/20240320064911.545001-8-clg@redhat.com Signed-off-by: Peter Xu <peterx@redhat.com>
This commit is contained in:
parent
057a20099b
commit
01c3ac681b
8 changed files with 29 additions and 35 deletions
|
@ -1213,12 +1213,14 @@ fail:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int dirty_bitmap_save_setup(QEMUFile *f, void *opaque)
|
||||
static int dirty_bitmap_save_setup(QEMUFile *f, void *opaque, Error **errp)
|
||||
{
|
||||
DBMSaveState *s = &((DBMState *)opaque)->save;
|
||||
SaveBitmapState *dbms = NULL;
|
||||
|
||||
if (init_dirty_bitmap_migration(s) < 0) {
|
||||
error_setg(errp,
|
||||
"Failed to initialize dirty tracking bitmap for blocks");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -711,10 +711,9 @@ static void block_migration_cleanup(void *opaque)
|
|||
blk_mig_unlock();
|
||||
}
|
||||
|
||||
static int block_save_setup(QEMUFile *f, void *opaque)
|
||||
static int block_save_setup(QEMUFile *f, void *opaque, Error **errp)
|
||||
{
|
||||
int ret;
|
||||
Error *local_err = NULL;
|
||||
|
||||
trace_migration_block_save("setup", block_mig_state.submitted,
|
||||
block_mig_state.transferred);
|
||||
|
@ -722,25 +721,21 @@ static int block_save_setup(QEMUFile *f, void *opaque)
|
|||
warn_report("block migration is deprecated;"
|
||||
" use blockdev-mirror with NBD instead");
|
||||
|
||||
ret = init_blk_migration(f, &local_err);
|
||||
ret = init_blk_migration(f, errp);
|
||||
if (ret < 0) {
|
||||
error_report_err(local_err);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* start track dirty blocks */
|
||||
ret = set_dirty_tracking();
|
||||
if (ret) {
|
||||
error_setg_errno(&local_err, -ret,
|
||||
"Failed to start block dirty tracking");
|
||||
error_report_err(local_err);
|
||||
error_setg_errno(errp, -ret, "Failed to start block dirty tracking");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = flush_blks(f);
|
||||
if (ret) {
|
||||
error_setg_errno(&local_err, -ret, "Flushing block failed");
|
||||
error_report_err(local_err);
|
||||
error_setg_errno(errp, -ret, "Flushing block failed");
|
||||
return ret;
|
||||
}
|
||||
blk_mig_reset_dirty_cursor();
|
||||
|
|
|
@ -3066,22 +3066,23 @@ static bool mapped_ram_read_header(QEMUFile *file, MappedRamHeader *header,
|
|||
*
|
||||
* @f: QEMUFile where to send the data
|
||||
* @opaque: RAMState pointer
|
||||
* @errp: pointer to Error*, to store an error if it happens.
|
||||
*/
|
||||
static int ram_save_setup(QEMUFile *f, void *opaque)
|
||||
static int ram_save_setup(QEMUFile *f, void *opaque, Error **errp)
|
||||
{
|
||||
RAMState **rsp = opaque;
|
||||
RAMBlock *block;
|
||||
int ret, max_hg_page_size;
|
||||
|
||||
if (compress_threads_save_setup()) {
|
||||
error_report("%s: failed to start compress threads", __func__);
|
||||
error_setg(errp, "%s: failed to start compress threads", __func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* migration has already setup the bitmap, reuse it. */
|
||||
if (!migration_in_colo_state()) {
|
||||
if (ram_init_all(rsp) != 0) {
|
||||
error_report("%s: failed to setup RAM for migration", __func__);
|
||||
error_setg(errp, "%s: failed to setup RAM for migration", __func__);
|
||||
compress_threads_save_cleanup();
|
||||
return -1;
|
||||
}
|
||||
|
@ -3118,14 +3119,14 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
|
|||
|
||||
ret = rdma_registration_start(f, RAM_CONTROL_SETUP);
|
||||
if (ret < 0) {
|
||||
error_report("%s: failed to start RDMA registration", __func__);
|
||||
error_setg(errp, "%s: failed to start RDMA registration", __func__);
|
||||
qemu_file_set_error(f, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = rdma_registration_stop(f, RAM_CONTROL_SETUP);
|
||||
if (ret < 0) {
|
||||
error_report("%s: failed to stop RDMA registration", __func__);
|
||||
error_setg(errp, "%s: failed to stop RDMA registration", __func__);
|
||||
qemu_file_set_error(f, ret);
|
||||
return ret;
|
||||
}
|
||||
|
@ -3142,7 +3143,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
|
|||
ret = multifd_send_sync_main();
|
||||
bql_lock();
|
||||
if (ret < 0) {
|
||||
error_report("%s: multifd synchronization failed", __func__);
|
||||
error_setg(errp, "%s: multifd synchronization failed", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -3154,7 +3155,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
|
|||
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
|
||||
ret = qemu_fflush(f);
|
||||
if (ret < 0) {
|
||||
error_report("%s failed : %s", __func__, strerror(-ret));
|
||||
error_setg_errno(errp, -ret, "%s failed", __func__);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1342,11 +1342,9 @@ int qemu_savevm_state_setup(QEMUFile *f, Error **errp)
|
|||
}
|
||||
save_section_header(f, se, QEMU_VM_SECTION_START);
|
||||
|
||||
ret = se->ops->save_setup(f, se->opaque);
|
||||
ret = se->ops->save_setup(f, se->opaque, errp);
|
||||
save_section_footer(f, se);
|
||||
if (ret < 0) {
|
||||
error_setg(errp, "failed to setup SaveStateEntry with id(name): "
|
||||
"%d(%s): %d", se->section_id, se->idstr, ret);
|
||||
qemu_file_set_error(f, ret);
|
||||
break;
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue