mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-04 00:03:54 -06:00
migration: preserve suspended for snapshot
Restoring a snapshot can break a suspended guest. Snapshots suffer from the same suspended-state issues that affect live migration, plus they must handle an additional problematic scenario, which is that a running vm must remain running if it loads a suspended snapshot. To save, the existing vm_stop call now completely stops the suspended state. Finish with vm_resume to leave the vm in the state it had prior to the save, correctly restoring the suspended state. To load, if the snapshot is not suspended, then vm_stop + vm_resume correctly handles all states, and leaves the vm in the state it had prior to the load. However, if the snapshot is suspended, restoration is trickier. First, call vm_resume to restore the state to suspended so the current state matches the saved state. Then, if the pre-load state is running, call wakeup to resume running. Prior to these changes, the vm_stop to RUN_STATE_SAVE_VM and RUN_STATE_RESTORE_VM did not change runstate if the current state was suspended, but now it does, so allow these transitions. Signed-off-by: Steve Sistare <steven.sistare@oracle.com> Reviewed-by: Peter Xu <peterx@redhat.com> Link: https://lore.kernel.org/r/1704312341-66640-8-git-send-email-steven.sistare@oracle.com Signed-off-by: Peter Xu <peterx@redhat.com>
This commit is contained in:
parent
b4e9ddccd1
commit
58b105703e
5 changed files with 32 additions and 13 deletions
|
@ -3046,7 +3046,7 @@ bool save_snapshot(const char *name, bool overwrite, const char *vmstate,
|
|||
QEMUSnapshotInfo sn1, *sn = &sn1;
|
||||
int ret = -1, ret2;
|
||||
QEMUFile *f;
|
||||
int saved_vm_running;
|
||||
RunState saved_state = runstate_get();
|
||||
uint64_t vm_state_size;
|
||||
g_autoptr(GDateTime) now = g_date_time_new_now_local();
|
||||
|
||||
|
@ -3092,8 +3092,6 @@ bool save_snapshot(const char *name, bool overwrite, const char *vmstate,
|
|||
return false;
|
||||
}
|
||||
|
||||
saved_vm_running = runstate_is_running();
|
||||
|
||||
global_state_store();
|
||||
vm_stop(RUN_STATE_SAVE_VM);
|
||||
|
||||
|
@ -3147,9 +3145,7 @@ bool save_snapshot(const char *name, bool overwrite, const char *vmstate,
|
|||
the_end:
|
||||
bdrv_drain_all_end();
|
||||
|
||||
if (saved_vm_running) {
|
||||
vm_start();
|
||||
}
|
||||
vm_resume(saved_state);
|
||||
return ret == 0;
|
||||
}
|
||||
|
||||
|
@ -3317,6 +3313,14 @@ err_drain:
|
|||
return false;
|
||||
}
|
||||
|
||||
void load_snapshot_resume(RunState state)
|
||||
{
|
||||
vm_resume(state);
|
||||
if (state == RUN_STATE_RUNNING && runstate_get() == RUN_STATE_SUSPENDED) {
|
||||
qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, &error_abort);
|
||||
}
|
||||
}
|
||||
|
||||
bool delete_snapshot(const char *name, bool has_devices,
|
||||
strList *devices, Error **errp)
|
||||
{
|
||||
|
@ -3381,16 +3385,15 @@ static void snapshot_load_job_bh(void *opaque)
|
|||
{
|
||||
Job *job = opaque;
|
||||
SnapshotJob *s = container_of(job, SnapshotJob, common);
|
||||
int orig_vm_running;
|
||||
RunState orig_state = runstate_get();
|
||||
|
||||
job_progress_set_remaining(&s->common, 1);
|
||||
|
||||
orig_vm_running = runstate_is_running();
|
||||
vm_stop(RUN_STATE_RESTORE_VM);
|
||||
|
||||
s->ret = load_snapshot(s->tag, s->vmstate, true, s->devices, s->errp);
|
||||
if (s->ret && orig_vm_running) {
|
||||
vm_start();
|
||||
if (s->ret) {
|
||||
load_snapshot_resume(orig_state);
|
||||
}
|
||||
|
||||
job_progress_update(&s->common, 1);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue