mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-07-27 04:13:53 -06:00

This patch proposes a flag to maintain disk activation status globally. It mostly rewrites disk activation mgmt for QEMU, including COLO and QMP command xen_save_devices_state. Backgrounds =========== We have two problems on disk activations, one resolved, one not. Problem 1: disk activation recover (for switchover interruptions) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When migration is either cancelled or failed during switchover, especially when after the disks are inactivated, QEMU needs to remember re-activate the disks again before vm starts. It used to be done separately in two paths: one in qmp_migrate_cancel(), the other one in the failure path of migration_completion(). It used to be fixed in different commits, all over the places in QEMU. So these are the relevant changes I saw, I'm not sure if it's complete list: - In 2016, commitfe904ea824
("migration: regain control of images when migration fails to complete") - In 2017, commit1d2acc3162
("migration: re-active images while migration been canceled after inactive them") - In 2023, commit6dab4c93ec
("migration: Attempt disk reactivation in more failure scenarios") Now since we have a slightly better picture maybe we can unify the reactivation in a single path. One side benefit of doing so is, we can move the disk operation outside QMP command "migrate_cancel". It's possible that in the future we may want to make "migrate_cancel" be OOB-compatible, while that requires the command doesn't need BQL in the first place. This will already do that and make migrate_cancel command lightweight. Problem 2: disk invalidation on top of invalidated disks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This is an unresolved bug for current QEMU. Link in "Resolves:" at the end. It turns out besides the src switchover phase (problem 1 above), QEMU also needs to remember block activation on destination. Consider two continuous migration in a row, where the VM was always paused. In that scenario, the disks are not activated even until migration completed in the 1st round. When the 2nd round starts, if QEMU doesn't know the status of the disks, it needs to try inactivate the disk again. Here the issue is the block layer API bdrv_inactivate_all() will crash a QEMU if invoked on already inactive disks for the 2nd migration. For detail, see the bug link at the end. Implementation ============== This patch proposes to maintain disk activation with a global flag, so we know: - If we used to inactivate disks for migration, but migration got cancelled, or failed, QEMU will know it should reactivate the disks. - On incoming side, if the disks are never activated but then another migration is triggered, QEMU should be able to tell that inactivate is not needed for the 2nd migration. We used to have disk_inactive, but it only solves the 1st issue, not the 2nd. Also, it's done in completely separate paths so it's extremely hard to follow either how the flag changes, or the duration that the flag is valid, and when we will reactivate the disks. Convert the existing disk_inactive flag into that global flag (also invert its naming), and maintain the disk activation status for the whole lifecycle of qemu. That includes the incoming QEMU. Put both of the error cases of source migration (failure, cancelled) together into migration_iteration_finish(), which will be invoked for either of the scenario. So from that part QEMU should behave the same as before. However with such global maintenance on disk activation status, we not only cleanup quite a few temporary paths that we try to maintain the disk activation status (e.g. in postcopy code), meanwhile it fixes the crash for problem 2 in one shot. For freshly started QEMU, the flag is initialized to TRUE showing that the QEMU owns the disks by default. For incoming migrated QEMU, the flag will be initialized to FALSE once and for all showing that the dest QEMU doesn't own the disks until switchover. That is guaranteed by the "once" variable. Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2395 Signed-off-by: Peter Xu <peterx@redhat.com> Reviewed-by: Fabiano Rosas <farosas@suse.de> Message-Id: <20241206230838.1111496-7-peterx@redhat.com> Signed-off-by: Fabiano Rosas <farosas@suse.de>
111 lines
3.2 KiB
C
111 lines
3.2 KiB
C
/*
|
|
* QEMU migration miscellaneus exported functions
|
|
*
|
|
* Copyright IBM, Corp. 2008
|
|
*
|
|
* Authors:
|
|
* Anthony Liguori <aliguori@us.ibm.com>
|
|
*
|
|
* This work is licensed under the terms of the GNU GPL, version 2. See
|
|
* the COPYING file in the top-level directory.
|
|
*
|
|
*/
|
|
|
|
#ifndef MIGRATION_MISC_H
|
|
#define MIGRATION_MISC_H
|
|
|
|
#include "qemu/notify.h"
|
|
#include "qapi/qapi-types-migration.h"
|
|
#include "qapi/qapi-types-net.h"
|
|
#include "migration/client-options.h"
|
|
|
|
/* migration/ram.c */
|
|
|
|
typedef enum PrecopyNotifyReason {
|
|
PRECOPY_NOTIFY_SETUP = 0,
|
|
PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC = 1,
|
|
PRECOPY_NOTIFY_AFTER_BITMAP_SYNC = 2,
|
|
PRECOPY_NOTIFY_COMPLETE = 3,
|
|
PRECOPY_NOTIFY_CLEANUP = 4,
|
|
PRECOPY_NOTIFY_MAX = 5,
|
|
} PrecopyNotifyReason;
|
|
|
|
typedef struct PrecopyNotifyData {
|
|
enum PrecopyNotifyReason reason;
|
|
} PrecopyNotifyData;
|
|
|
|
void precopy_infrastructure_init(void);
|
|
void precopy_add_notifier(NotifierWithReturn *n);
|
|
void precopy_remove_notifier(NotifierWithReturn *n);
|
|
int precopy_notify(PrecopyNotifyReason reason, Error **errp);
|
|
|
|
void qemu_guest_free_page_hint(void *addr, size_t len);
|
|
bool migrate_ram_is_ignored(RAMBlock *block);
|
|
|
|
/* migration/block.c */
|
|
|
|
AnnounceParameters *migrate_announce_params(void);
|
|
/* migration/savevm.c */
|
|
|
|
void dump_vmstate_json_to_file(FILE *out_fp);
|
|
|
|
/* migration/migration.c */
|
|
void migration_object_init(void);
|
|
void migration_shutdown(void);
|
|
|
|
bool migration_is_running(void);
|
|
bool migration_thread_is_self(void);
|
|
|
|
typedef enum MigrationEventType {
|
|
MIG_EVENT_PRECOPY_SETUP,
|
|
MIG_EVENT_PRECOPY_DONE,
|
|
MIG_EVENT_PRECOPY_FAILED,
|
|
MIG_EVENT_MAX
|
|
} MigrationEventType;
|
|
|
|
typedef struct MigrationEvent {
|
|
MigrationEventType type;
|
|
} MigrationEvent;
|
|
|
|
/*
|
|
* A MigrationNotifyFunc may return an error code and an Error object,
|
|
* but only when @e->type is MIG_EVENT_PRECOPY_SETUP. The code is an int
|
|
* to allow for different failure modes and recovery actions.
|
|
*/
|
|
typedef int (*MigrationNotifyFunc)(NotifierWithReturn *notify,
|
|
MigrationEvent *e, Error **errp);
|
|
|
|
/*
|
|
* Register the notifier @notify to be called when a migration event occurs
|
|
* for MIG_MODE_NORMAL, as specified by the MigrationEvent passed to @func.
|
|
* Notifiers may receive events in any of the following orders:
|
|
* - MIG_EVENT_PRECOPY_SETUP -> MIG_EVENT_PRECOPY_DONE
|
|
* - MIG_EVENT_PRECOPY_SETUP -> MIG_EVENT_PRECOPY_FAILED
|
|
* - MIG_EVENT_PRECOPY_FAILED
|
|
*/
|
|
void migration_add_notifier(NotifierWithReturn *notify,
|
|
MigrationNotifyFunc func);
|
|
|
|
/*
|
|
* Same as migration_add_notifier, but applies to be specified @mode.
|
|
*/
|
|
void migration_add_notifier_mode(NotifierWithReturn *notify,
|
|
MigrationNotifyFunc func, MigMode mode);
|
|
|
|
void migration_remove_notifier(NotifierWithReturn *notify);
|
|
void migration_file_set_error(int ret, Error *err);
|
|
|
|
/* True if incoming migration entered POSTCOPY_INCOMING_DISCARD */
|
|
bool migration_in_incoming_postcopy(void);
|
|
|
|
/* True if incoming migration entered POSTCOPY_INCOMING_ADVISE */
|
|
bool migration_incoming_postcopy_advised(void);
|
|
|
|
/* True if background snapshot is active */
|
|
bool migration_in_bg_snapshot(void);
|
|
|
|
/* Wrapper for block active/inactive operations */
|
|
bool migration_block_activate(Error **errp);
|
|
bool migration_block_inactivate(void);
|
|
|
|
#endif
|