migration: split common postcopy out of ram postcopy

Split common postcopy staff from ram postcopy staff.

Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
This commit is contained in:
Vladimir Sementsov-Ogievskiy 2017-07-10 19:30:16 +03:00 committed by Juan Quintela
parent 86e1167e9a
commit 58110f0acb
3 changed files with 67 additions and 22 deletions

View file

@ -1443,6 +1443,11 @@ bool migrate_postcopy_ram(void)
return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM];
}
bool migrate_postcopy(void)
{
return migrate_postcopy_ram();
}
bool migrate_auto_converge(void)
{
MigrationState *s;
@ -1826,9 +1831,11 @@ static int postcopy_start(MigrationState *ms, bool *old_vm_running)
* need to tell the destination to throw any pages it's already received
* that are dirty
*/
if (ram_postcopy_send_discard_bitmap(ms)) {
error_report("postcopy send discard bitmap failed");
goto fail;
if (migrate_postcopy_ram()) {
if (ram_postcopy_send_discard_bitmap(ms)) {
error_report("postcopy send discard bitmap failed");
goto fail;
}
}
/*
@ -1837,8 +1844,10 @@ static int postcopy_start(MigrationState *ms, bool *old_vm_running)
* wrap their state up here
*/
qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX);
/* Ping just for debugging, helps line traces up */
qemu_savevm_send_ping(ms->to_dst_file, 2);
if (migrate_postcopy_ram()) {
/* Ping just for debugging, helps line traces up */
qemu_savevm_send_ping(ms->to_dst_file, 2);
}
/*
* While loading the device state we may trigger page transfer
@ -1863,7 +1872,9 @@ static int postcopy_start(MigrationState *ms, bool *old_vm_running)
qemu_savevm_send_postcopy_listen(fb);
qemu_savevm_state_complete_precopy(fb, false, false);
qemu_savevm_send_ping(fb, 3);
if (migrate_postcopy_ram()) {
qemu_savevm_send_ping(fb, 3);
}
qemu_savevm_send_postcopy_run(fb);
@ -1898,11 +1909,13 @@ static int postcopy_start(MigrationState *ms, bool *old_vm_running)
qemu_mutex_unlock_iothread();
/*
* Although this ping is just for debug, it could potentially be
* used for getting a better measurement of downtime at the source.
*/
qemu_savevm_send_ping(ms->to_dst_file, 4);
if (migrate_postcopy_ram()) {
/*
* Although this ping is just for debug, it could potentially be
* used for getting a better measurement of downtime at the source.
*/
qemu_savevm_send_ping(ms->to_dst_file, 4);
}
if (migrate_release_ram()) {
ram_postcopy_migrated_memory_release(ms);
@ -2080,7 +2093,7 @@ static void *migration_thread(void *opaque)
qemu_savevm_send_ping(s->to_dst_file, 1);
}
if (migrate_postcopy_ram()) {
if (migrate_postcopy()) {
/*
* Tell the destination that we *might* want to do postcopy later;
* if the other end can't do postcopy it should fail now, nice and
@ -2113,7 +2126,7 @@ static void *migration_thread(void *opaque)
if (pending_size && pending_size >= threshold_size) {
/* Still a significant amount to transfer */
if (migrate_postcopy_ram() &&
if (migrate_postcopy() &&
s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE &&
pend_nonpost <= threshold_size &&
atomic_read(&s->start_postcopy)) {