mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-05 16:53:55 -06:00
migration/ram: Implement save_postcopy_prepare()
Implement save_postcopy_prepare(), preparing for the enablement of both multifd and postcopy. Signed-off-by: Peter Xu <peterx@redhat.com> Signed-off-by: Prasad Pandit <pjp@fedoraproject.org> Reviewed-by: Fabiano Rosas <farosas@suse.de> Message-ID: <20250411114534.3370816-5-ppandit@redhat.com> Signed-off-by: Fabiano Rosas <farosas@suse.de>
This commit is contained in:
parent
1d48111601
commit
ad8d82ffbb
1 changed files with 37 additions and 0 deletions
|
@ -4398,6 +4398,42 @@ static int ram_resume_prepare(MigrationState *s, void *opaque)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool ram_save_postcopy_prepare(QEMUFile *f, void *opaque, Error **errp)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (migrate_multifd()) {
|
||||||
|
/*
|
||||||
|
* When multifd is enabled, source QEMU needs to make sure all the
|
||||||
|
* pages queued before postcopy starts have been flushed.
|
||||||
|
*
|
||||||
|
* The load of these pages must happen before switching to postcopy.
|
||||||
|
* It's because loading of guest pages (so far) in multifd recv
|
||||||
|
* threads is still non-atomic, so the load cannot happen with vCPUs
|
||||||
|
* running on the destination side.
|
||||||
|
*
|
||||||
|
* This flush and sync will guarantee that those pages are loaded
|
||||||
|
* _before_ postcopy starts on the destination. The rationale is,
|
||||||
|
* this happens before VM stops (and before source QEMU sends all
|
||||||
|
* the rest of the postcopy messages). So when the destination QEMU
|
||||||
|
* receives the postcopy messages, it must have received the sync
|
||||||
|
* message on the main channel (either RAM_SAVE_FLAG_MULTIFD_FLUSH,
|
||||||
|
* or RAM_SAVE_FLAG_EOS), and such message would guarantee that
|
||||||
|
* all previous guest pages queued in the multifd channels are
|
||||||
|
* completely loaded.
|
||||||
|
*/
|
||||||
|
ret = multifd_ram_flush_and_sync(f);
|
||||||
|
if (ret < 0) {
|
||||||
|
error_setg(errp, "%s: multifd flush and sync failed", __func__);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
void postcopy_preempt_shutdown_file(MigrationState *s)
|
void postcopy_preempt_shutdown_file(MigrationState *s)
|
||||||
{
|
{
|
||||||
qemu_put_be64(s->postcopy_qemufile_src, RAM_SAVE_FLAG_EOS);
|
qemu_put_be64(s->postcopy_qemufile_src, RAM_SAVE_FLAG_EOS);
|
||||||
|
@ -4417,6 +4453,7 @@ static SaveVMHandlers savevm_ram_handlers = {
|
||||||
.load_setup = ram_load_setup,
|
.load_setup = ram_load_setup,
|
||||||
.load_cleanup = ram_load_cleanup,
|
.load_cleanup = ram_load_cleanup,
|
||||||
.resume_prepare = ram_resume_prepare,
|
.resume_prepare = ram_resume_prepare,
|
||||||
|
.save_postcopy_prepare = ram_save_postcopy_prepare,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void ram_mig_ram_block_resized(RAMBlockNotifier *n, void *host,
|
static void ram_mig_ram_block_resized(RAMBlockNotifier *n, void *host,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue