mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-03 07:43:54 -06:00
migration: do not wait for free thread
Instead of putting the main thread to sleep state to wait for free compression thread, we can directly post it out as normal page that reduces the latency and uses CPUs more efficiently A parameter, compress-wait-thread, is introduced, it can be enabled if the user really wants the old behavior Reviewed-by: Peter Xu <peterx@redhat.com> Signed-off-by: Xiao Guangrong <xiaoguangrong@tencent.com> Reviewed-by: Juan Quintela <quintela@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com>
This commit is contained in:
parent
923709896b
commit
1d58872a91
5 changed files with 74 additions and 19 deletions
|
@ -1896,30 +1896,34 @@ static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block,
|
|||
ram_addr_t offset)
|
||||
{
|
||||
int idx, thread_count, bytes_xmit = -1, pages = -1;
|
||||
bool wait = migrate_compress_wait_thread();
|
||||
|
||||
thread_count = migrate_compress_threads();
|
||||
qemu_mutex_lock(&comp_done_lock);
|
||||
while (true) {
|
||||
for (idx = 0; idx < thread_count; idx++) {
|
||||
if (comp_param[idx].done) {
|
||||
comp_param[idx].done = false;
|
||||
bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
|
||||
qemu_mutex_lock(&comp_param[idx].mutex);
|
||||
set_compress_params(&comp_param[idx], block, offset);
|
||||
qemu_cond_signal(&comp_param[idx].cond);
|
||||
qemu_mutex_unlock(&comp_param[idx].mutex);
|
||||
pages = 1;
|
||||
ram_counters.normal++;
|
||||
ram_counters.transferred += bytes_xmit;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (pages > 0) {
|
||||
retry:
|
||||
for (idx = 0; idx < thread_count; idx++) {
|
||||
if (comp_param[idx].done) {
|
||||
comp_param[idx].done = false;
|
||||
bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
|
||||
qemu_mutex_lock(&comp_param[idx].mutex);
|
||||
set_compress_params(&comp_param[idx], block, offset);
|
||||
qemu_cond_signal(&comp_param[idx].cond);
|
||||
qemu_mutex_unlock(&comp_param[idx].mutex);
|
||||
pages = 1;
|
||||
ram_counters.normal++;
|
||||
ram_counters.transferred += bytes_xmit;
|
||||
break;
|
||||
} else {
|
||||
qemu_cond_wait(&comp_done_cond, &comp_done_lock);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* wait for the free thread if the user specifies 'compress-wait-thread',
|
||||
* otherwise we will post the page out in the main thread as normal page.
|
||||
*/
|
||||
if (pages < 0 && wait) {
|
||||
qemu_cond_wait(&comp_done_cond, &comp_done_lock);
|
||||
goto retry;
|
||||
}
|
||||
qemu_mutex_unlock(&comp_done_lock);
|
||||
|
||||
return pages;
|
||||
|
@ -2233,7 +2237,10 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
|
|||
* CPU resource.
|
||||
*/
|
||||
if (block == rs->last_sent_block && save_page_use_compression(rs)) {
|
||||
return compress_page_with_multi_thread(rs, block, offset);
|
||||
res = compress_page_with_multi_thread(rs, block, offset);
|
||||
if (res > 0) {
|
||||
return res;
|
||||
}
|
||||
} else if (migrate_use_multifd()) {
|
||||
return ram_save_multifd_page(rs, block, offset);
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue