migration/multifd: Unify RAM_SAVE_FLAG_MULTIFD_FLUSH messages

RAM_SAVE_FLAG_MULTIFD_FLUSH message should always be correlated to a sync
request on src.  Unify such message into one place, and conditionally send
the message only if necessary.

Reviewed-by: Fabiano Rosas <farosas@suse.de>
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20241206224755.1108686-5-peterx@redhat.com>
Signed-off-by: Fabiano Rosas <farosas@suse.de>
This commit is contained in:
Peter Xu 2024-12-06 17:47:52 -05:00 committed by Fabiano Rosas
parent 604b4749c5
commit e5f14aa5fe
3 changed files with 30 additions and 17 deletions

View file

@ -20,6 +20,7 @@
#include "qemu/cutils.h"
#include "qemu/error-report.h"
#include "trace.h"
#include "qemu-file.h"
static MultiFDSendData *multifd_ram_send;
@ -343,9 +344,10 @@ retry:
return true;
}
int multifd_ram_flush_and_sync(void)
int multifd_ram_flush_and_sync(QEMUFile *f)
{
MultiFDSyncReq req;
int ret;
if (!migrate_multifd()) {
return 0;
@ -361,7 +363,28 @@ int multifd_ram_flush_and_sync(void)
/* File migrations only need to sync with threads */
req = migrate_mapped_ram() ? MULTIFD_SYNC_LOCAL : MULTIFD_SYNC_ALL;
return multifd_send_sync_main(req);
ret = multifd_send_sync_main(req);
if (ret) {
return ret;
}
/* If we don't need to sync with remote at all, nothing else to do */
if (req == MULTIFD_SYNC_LOCAL) {
return 0;
}
/*
* Old QEMUs don't understand RAM_SAVE_FLAG_MULTIFD_FLUSH, it relies
* on RAM_SAVE_FLAG_EOS instead.
*/
if (migrate_multifd_flush_after_each_section()) {
return 0;
}
qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
qemu_fflush(f);
return 0;
}
bool multifd_send_prepare_common(MultiFDSendParams *p)

View file

@ -354,7 +354,7 @@ static inline uint32_t multifd_ram_page_count(void)
void multifd_ram_save_setup(void);
void multifd_ram_save_cleanup(void);
int multifd_ram_flush_and_sync(void);
int multifd_ram_flush_and_sync(QEMUFile *f);
size_t multifd_ram_payload_size(void);
void multifd_ram_fill_packet(MultiFDSendParams *p);
int multifd_ram_unfill_packet(MultiFDRecvParams *p, Error **errp);

View file

@ -1306,15 +1306,10 @@ static int find_dirty_block(RAMState *rs, PageSearchStatus *pss)
(!migrate_multifd_flush_after_each_section() ||
migrate_mapped_ram())) {
QEMUFile *f = rs->pss[RAM_CHANNEL_PRECOPY].pss_channel;
int ret = multifd_ram_flush_and_sync();
int ret = multifd_ram_flush_and_sync(f);
if (ret < 0) {
return ret;
}
if (!migrate_mapped_ram()) {
qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
qemu_fflush(f);
}
}
/* Hit the end of the list */
@ -3044,18 +3039,13 @@ static int ram_save_setup(QEMUFile *f, void *opaque, Error **errp)
}
bql_unlock();
ret = multifd_ram_flush_and_sync();
ret = multifd_ram_flush_and_sync(f);
bql_lock();
if (ret < 0) {
error_setg(errp, "%s: multifd synchronization failed", __func__);
return ret;
}
if (migrate_multifd() && !migrate_multifd_flush_after_each_section()
&& !migrate_mapped_ram()) {
qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
}
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
ret = qemu_fflush(f);
if (ret < 0) {
@ -3190,7 +3180,7 @@ out:
if (ret >= 0 && migration_is_running()) {
if (migrate_multifd() && migrate_multifd_flush_after_each_section() &&
!migrate_mapped_ram()) {
ret = multifd_ram_flush_and_sync();
ret = multifd_ram_flush_and_sync(f);
if (ret < 0) {
return ret;
}
@ -3268,7 +3258,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
* Only the old dest QEMU will need this sync, because each EOS
* will require one SYNC message on each channel.
*/
ret = multifd_ram_flush_and_sync();
ret = multifd_ram_flush_and_sync(f);
if (ret < 0) {
return ret;
}