migration/multifd: Implement zero page transmission on the multifd thread.

1. Add zero_pages field in MultiFDPacket_t.
2. Implements the zero page detection and handling on the multifd
threads for non-compression, zlib and zstd compression backends.
3. Added a new value 'multifd' in ZeroPageDetection enumeration.
4. Adds zero page counters and updates multifd send/receive tracing
format to track the newly added counters.

Signed-off-by: Hao Xiang <hao.xiang@bytedance.com>
Acked-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Fabiano Rosas <farosas@suse.de>
Link: https://lore.kernel.org/r/20240311180015.3359271-5-hao.xiang@linux.dev
Signed-off-by: Peter Xu <peterx@redhat.com>
This commit is contained in:
Hao Xiang 2024-03-11 18:00:12 +00:00 committed by Peter Xu
parent 5fdbb1dfcc
commit 303e6f54f9
10 changed files with 228 additions and 32 deletions

View file

@ -11,6 +11,7 @@
*/
#include "qemu/osdep.h"
#include "qemu/cutils.h"
#include "qemu/rcu.h"
#include "exec/target_page.h"
#include "sysemu/sysemu.h"
@ -111,12 +112,17 @@ void multifd_send_channel_created(void)
static void multifd_set_file_bitmap(MultiFDSendParams *p)
{
MultiFDPages_t *pages = p->pages;
uint32_t zero_num = p->pages->num - p->pages->normal_num;
assert(pages->block);
for (int i = 0; i < p->pages->num; i++) {
for (int i = 0; i < p->pages->normal_num; i++) {
ramblock_set_file_bmap_atomic(pages->block, pages->offset[i], true);
}
for (int i = p->pages->num; i < zero_num; i++) {
ramblock_set_file_bmap_atomic(pages->block, pages->offset[i], false);
}
}
/* Multifd without compression */
@ -153,13 +159,13 @@ static void multifd_send_prepare_iovs(MultiFDSendParams *p)
{
MultiFDPages_t *pages = p->pages;
for (int i = 0; i < pages->num; i++) {
for (int i = 0; i < pages->normal_num; i++) {
p->iov[p->iovs_num].iov_base = pages->block->host + pages->offset[i];
p->iov[p->iovs_num].iov_len = p->page_size;
p->iovs_num++;
}
p->next_packet_size = pages->num * p->page_size;
p->next_packet_size = pages->normal_num * p->page_size;
}
/**
@ -178,6 +184,8 @@ static int nocomp_send_prepare(MultiFDSendParams *p, Error **errp)
bool use_zero_copy_send = migrate_zero_copy_send();
int ret;
multifd_send_zero_page_detect(p);
if (!multifd_use_packets()) {
multifd_send_prepare_iovs(p);
multifd_set_file_bitmap(p);
@ -261,6 +269,13 @@ static int nocomp_recv(MultiFDRecvParams *p, Error **errp)
p->id, flags, MULTIFD_FLAG_NOCOMP);
return -1;
}
multifd_recv_zero_page_process(p);
if (!p->normal_num) {
return 0;
}
for (int i = 0; i < p->normal_num; i++) {
p->iov[i].iov_base = p->host + p->normal[i];
p->iov[i].iov_len = p->page_size;
@ -295,6 +310,7 @@ static void multifd_pages_reset(MultiFDPages_t *pages)
* overwritten later when reused.
*/
pages->num = 0;
pages->normal_num = 0;
pages->block = NULL;
}
@ -386,11 +402,13 @@ void multifd_send_fill_packet(MultiFDSendParams *p)
MultiFDPacket_t *packet = p->packet;
MultiFDPages_t *pages = p->pages;
uint64_t packet_num;
uint32_t zero_num = pages->num - pages->normal_num;
int i;
packet->flags = cpu_to_be32(p->flags);
packet->pages_alloc = cpu_to_be32(p->pages->allocated);
packet->normal_pages = cpu_to_be32(pages->num);
packet->normal_pages = cpu_to_be32(pages->normal_num);
packet->zero_pages = cpu_to_be32(zero_num);
packet->next_packet_size = cpu_to_be32(p->next_packet_size);
packet_num = qatomic_fetch_inc(&multifd_send_state->packet_num);
@ -408,10 +426,11 @@ void multifd_send_fill_packet(MultiFDSendParams *p)
}
p->packets_sent++;
p->total_normal_pages += pages->num;
p->total_normal_pages += pages->normal_num;
p->total_zero_pages += zero_num;
trace_multifd_send(p->id, packet_num, pages->num, p->flags,
p->next_packet_size);
trace_multifd_send(p->id, packet_num, pages->normal_num, zero_num,
p->flags, p->next_packet_size);
}
static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
@ -452,20 +471,29 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
p->normal_num = be32_to_cpu(packet->normal_pages);
if (p->normal_num > packet->pages_alloc) {
error_setg(errp, "multifd: received packet "
"with %u pages and expected maximum pages are %u",
"with %u normal pages and expected maximum pages are %u",
p->normal_num, packet->pages_alloc) ;
return -1;
}
p->zero_num = be32_to_cpu(packet->zero_pages);
if (p->zero_num > packet->pages_alloc - p->normal_num) {
error_setg(errp, "multifd: received packet "
"with %u zero pages and expected maximum zero pages are %u",
p->zero_num, packet->pages_alloc - p->normal_num) ;
return -1;
}
p->next_packet_size = be32_to_cpu(packet->next_packet_size);
p->packet_num = be64_to_cpu(packet->packet_num);
p->packets_recved++;
p->total_normal_pages += p->normal_num;
p->total_zero_pages += p->zero_num;
trace_multifd_recv(p->id, p->packet_num, p->normal_num, p->flags,
p->next_packet_size);
trace_multifd_recv(p->id, p->packet_num, p->normal_num, p->zero_num,
p->flags, p->next_packet_size);
if (p->normal_num == 0) {
if (p->normal_num == 0 && p->zero_num == 0) {
return 0;
}
@ -491,6 +519,18 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
p->normal[i] = offset;
}
for (i = 0; i < p->zero_num; i++) {
uint64_t offset = be64_to_cpu(packet->offset[p->normal_num + i]);
if (offset > (p->block->used_length - p->page_size)) {
error_setg(errp, "multifd: offset too long %" PRIu64
" (max " RAM_ADDR_FMT ")",
offset, p->block->used_length);
return -1;
}
p->zero[i] = offset;
}
return 0;
}
@ -918,6 +958,8 @@ static void *multifd_send_thread(void *opaque)
stat64_add(&mig_stats.multifd_bytes,
p->next_packet_size + p->packet_len);
stat64_add(&mig_stats.normal_pages, pages->normal_num);
stat64_add(&mig_stats.zero_pages, pages->num - pages->normal_num);
multifd_pages_reset(p->pages);
p->next_packet_size = 0;
@ -965,7 +1007,8 @@ out:
rcu_unregister_thread();
migration_threads_remove(thread);
trace_multifd_send_thread_end(p->id, p->packets_sent, p->total_normal_pages);
trace_multifd_send_thread_end(p->id, p->packets_sent, p->total_normal_pages,
p->total_zero_pages);
return NULL;
}
@ -1316,6 +1359,8 @@ static void multifd_recv_cleanup_channel(MultiFDRecvParams *p)
p->iov = NULL;
g_free(p->normal);
p->normal = NULL;
g_free(p->zero);
p->zero = NULL;
multifd_recv_state->ops->recv_cleanup(p);
}
@ -1449,7 +1494,7 @@ static void *multifd_recv_thread(void *opaque)
flags = p->flags;
/* recv methods don't know how to handle the SYNC flag */
p->flags &= ~MULTIFD_FLAG_SYNC;
has_data = !!p->normal_num;
has_data = p->normal_num || p->zero_num;
qemu_mutex_unlock(&p->mutex);
} else {
/*
@ -1507,7 +1552,9 @@ static void *multifd_recv_thread(void *opaque)
}
rcu_unregister_thread();
trace_multifd_recv_thread_end(p->id, p->packets_recved, p->total_normal_pages);
trace_multifd_recv_thread_end(p->id, p->packets_recved,
p->total_normal_pages,
p->total_zero_pages);
return NULL;
}
@ -1559,6 +1606,7 @@ int multifd_recv_setup(Error **errp)
p->name = g_strdup_printf("multifdrecv_%d", i);
p->iov = g_new0(struct iovec, page_count);
p->normal = g_new0(ram_addr_t, page_count);
p->zero = g_new0(ram_addr_t, page_count);
p->page_count = page_count;
p->page_size = qemu_target_page_size();
}
@ -1633,3 +1681,17 @@ void multifd_recv_new_channel(QIOChannel *ioc, Error **errp)
QEMU_THREAD_JOINABLE);
qatomic_inc(&multifd_recv_state->count);
}
bool multifd_send_prepare_common(MultiFDSendParams *p)
{
multifd_send_zero_page_detect(p);
if (!p->pages->normal_num) {
p->next_packet_size = 0;
return false;
}
multifd_send_prepare_header(p);
return true;
}