mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-03 07:43:54 -06:00
ram.c: Move core compression code into its own file
No functional changes intended. Signed-off-by: Lukas Straub <lukasstraub2@web.de> Reviewed-by: Juan Quintela <quintela@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com>
This commit is contained in:
parent
ef4f5f5d5a
commit
b5ca3368d9
4 changed files with 344 additions and 262 deletions
262
migration/ram.c
262
migration/ram.c
|
@ -32,8 +32,8 @@
|
|||
#include "qemu/bitmap.h"
|
||||
#include "qemu/madvise.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "io/channel-null.h"
|
||||
#include "xbzrle.h"
|
||||
#include "ram-compress.h"
|
||||
#include "ram.h"
|
||||
#include "migration.h"
|
||||
#include "migration-stats.h"
|
||||
|
@ -480,32 +480,6 @@ typedef struct MigrationOps MigrationOps;
|
|||
|
||||
MigrationOps *migration_ops;
|
||||
|
||||
CompressionStats compression_counters;
|
||||
|
||||
enum CompressResult {
|
||||
RES_NONE = 0,
|
||||
RES_ZEROPAGE = 1,
|
||||
RES_COMPRESS = 2
|
||||
};
|
||||
typedef enum CompressResult CompressResult;
|
||||
|
||||
struct CompressParam {
|
||||
bool done;
|
||||
bool quit;
|
||||
bool trigger;
|
||||
CompressResult result;
|
||||
QEMUFile *file;
|
||||
QemuMutex mutex;
|
||||
QemuCond cond;
|
||||
RAMBlock *block;
|
||||
ram_addr_t offset;
|
||||
|
||||
/* internally used fields */
|
||||
z_stream stream;
|
||||
uint8_t *originbuf;
|
||||
};
|
||||
typedef struct CompressParam CompressParam;
|
||||
|
||||
struct DecompressParam {
|
||||
bool done;
|
||||
bool quit;
|
||||
|
@ -518,15 +492,6 @@ struct DecompressParam {
|
|||
};
|
||||
typedef struct DecompressParam DecompressParam;
|
||||
|
||||
static CompressParam *comp_param;
|
||||
static QemuThread *compress_threads;
|
||||
/* comp_done_cond is used to wake up the migration thread when
|
||||
* one of the compression threads has finished the compression.
|
||||
* comp_done_lock is used to co-work with comp_done_cond.
|
||||
*/
|
||||
static QemuMutex comp_done_lock;
|
||||
static QemuCond comp_done_cond;
|
||||
|
||||
static QEMUFile *decomp_file;
|
||||
static DecompressParam *decomp_param;
|
||||
static QemuThread *decompress_threads;
|
||||
|
@ -535,10 +500,6 @@ static QemuCond decomp_done_cond;
|
|||
|
||||
static int ram_save_host_page_urgent(PageSearchStatus *pss);
|
||||
|
||||
static CompressResult do_compress_ram_page(QEMUFile *f, z_stream *stream,
|
||||
RAMBlock *block, ram_addr_t offset,
|
||||
uint8_t *source_buf);
|
||||
|
||||
/* NOTE: page is the PFN not real ram_addr_t. */
|
||||
static void pss_init(PageSearchStatus *pss, RAMBlock *rb, ram_addr_t page)
|
||||
{
|
||||
|
@ -557,123 +518,6 @@ static bool pss_overlap(PageSearchStatus *pss1, PageSearchStatus *pss2)
|
|||
(pss1->host_page_start == pss2->host_page_start);
|
||||
}
|
||||
|
||||
static void *do_data_compress(void *opaque)
|
||||
{
|
||||
CompressParam *param = opaque;
|
||||
RAMBlock *block;
|
||||
ram_addr_t offset;
|
||||
CompressResult result;
|
||||
|
||||
qemu_mutex_lock(¶m->mutex);
|
||||
while (!param->quit) {
|
||||
if (param->trigger) {
|
||||
block = param->block;
|
||||
offset = param->offset;
|
||||
param->trigger = false;
|
||||
qemu_mutex_unlock(¶m->mutex);
|
||||
|
||||
result = do_compress_ram_page(param->file, ¶m->stream,
|
||||
block, offset, param->originbuf);
|
||||
|
||||
qemu_mutex_lock(&comp_done_lock);
|
||||
param->done = true;
|
||||
param->result = result;
|
||||
qemu_cond_signal(&comp_done_cond);
|
||||
qemu_mutex_unlock(&comp_done_lock);
|
||||
|
||||
qemu_mutex_lock(¶m->mutex);
|
||||
} else {
|
||||
qemu_cond_wait(¶m->cond, ¶m->mutex);
|
||||
}
|
||||
}
|
||||
qemu_mutex_unlock(¶m->mutex);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void compress_threads_save_cleanup(void)
|
||||
{
|
||||
int i, thread_count;
|
||||
|
||||
if (!migrate_compress() || !comp_param) {
|
||||
return;
|
||||
}
|
||||
|
||||
thread_count = migrate_compress_threads();
|
||||
for (i = 0; i < thread_count; i++) {
|
||||
/*
|
||||
* we use it as a indicator which shows if the thread is
|
||||
* properly init'd or not
|
||||
*/
|
||||
if (!comp_param[i].file) {
|
||||
break;
|
||||
}
|
||||
|
||||
qemu_mutex_lock(&comp_param[i].mutex);
|
||||
comp_param[i].quit = true;
|
||||
qemu_cond_signal(&comp_param[i].cond);
|
||||
qemu_mutex_unlock(&comp_param[i].mutex);
|
||||
|
||||
qemu_thread_join(compress_threads + i);
|
||||
qemu_mutex_destroy(&comp_param[i].mutex);
|
||||
qemu_cond_destroy(&comp_param[i].cond);
|
||||
deflateEnd(&comp_param[i].stream);
|
||||
g_free(comp_param[i].originbuf);
|
||||
qemu_fclose(comp_param[i].file);
|
||||
comp_param[i].file = NULL;
|
||||
}
|
||||
qemu_mutex_destroy(&comp_done_lock);
|
||||
qemu_cond_destroy(&comp_done_cond);
|
||||
g_free(compress_threads);
|
||||
g_free(comp_param);
|
||||
compress_threads = NULL;
|
||||
comp_param = NULL;
|
||||
}
|
||||
|
||||
static int compress_threads_save_setup(void)
|
||||
{
|
||||
int i, thread_count;
|
||||
|
||||
if (!migrate_compress()) {
|
||||
return 0;
|
||||
}
|
||||
thread_count = migrate_compress_threads();
|
||||
compress_threads = g_new0(QemuThread, thread_count);
|
||||
comp_param = g_new0(CompressParam, thread_count);
|
||||
qemu_cond_init(&comp_done_cond);
|
||||
qemu_mutex_init(&comp_done_lock);
|
||||
for (i = 0; i < thread_count; i++) {
|
||||
comp_param[i].originbuf = g_try_malloc(TARGET_PAGE_SIZE);
|
||||
if (!comp_param[i].originbuf) {
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (deflateInit(&comp_param[i].stream,
|
||||
migrate_compress_level()) != Z_OK) {
|
||||
g_free(comp_param[i].originbuf);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* comp_param[i].file is just used as a dummy buffer to save data,
|
||||
* set its ops to empty.
|
||||
*/
|
||||
comp_param[i].file = qemu_file_new_output(
|
||||
QIO_CHANNEL(qio_channel_null_new()));
|
||||
comp_param[i].done = true;
|
||||
comp_param[i].quit = false;
|
||||
qemu_mutex_init(&comp_param[i].mutex);
|
||||
qemu_cond_init(&comp_param[i].cond);
|
||||
qemu_thread_create(compress_threads + i, "compress",
|
||||
do_data_compress, comp_param + i,
|
||||
QEMU_THREAD_JOINABLE);
|
||||
}
|
||||
return 0;
|
||||
|
||||
exit:
|
||||
compress_threads_save_cleanup();
|
||||
return -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* save_page_header: write page header to wire
|
||||
*
|
||||
|
@ -1461,32 +1305,6 @@ static int ram_save_multifd_page(QEMUFile *file, RAMBlock *block,
|
|||
return 1;
|
||||
}
|
||||
|
||||
static CompressResult do_compress_ram_page(QEMUFile *f, z_stream *stream,
|
||||
RAMBlock *block, ram_addr_t offset,
|
||||
uint8_t *source_buf)
|
||||
{
|
||||
uint8_t *p = block->host + offset;
|
||||
int ret;
|
||||
|
||||
if (buffer_is_zero(p, TARGET_PAGE_SIZE)) {
|
||||
return RES_ZEROPAGE;
|
||||
}
|
||||
|
||||
/*
|
||||
* copy it to a internal buffer to avoid it being modified by VM
|
||||
* so that we can catch up the error during compression and
|
||||
* decompression
|
||||
*/
|
||||
memcpy(source_buf, p, TARGET_PAGE_SIZE);
|
||||
ret = qemu_put_compression_data(f, stream, source_buf, TARGET_PAGE_SIZE);
|
||||
if (ret < 0) {
|
||||
qemu_file_set_error(migrate_get_current()->to_dst_file, ret);
|
||||
error_report("compressed data failed!");
|
||||
return RES_NONE;
|
||||
}
|
||||
return RES_COMPRESS;
|
||||
}
|
||||
|
||||
static void
|
||||
update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
|
||||
{
|
||||
|
@ -1504,13 +1322,6 @@ update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
|
|||
|
||||
static bool save_page_use_compression(RAMState *rs);
|
||||
|
||||
static inline void compress_reset_result(CompressParam *param)
|
||||
{
|
||||
param->result = RES_NONE;
|
||||
param->block = NULL;
|
||||
param->offset = 0;
|
||||
}
|
||||
|
||||
static int send_queued_data(CompressParam *param)
|
||||
{
|
||||
PageSearchStatus *pss = &ram_state->pss[RAM_CHANNEL_PRECOPY];
|
||||
|
@ -1545,31 +1356,6 @@ static int send_queued_data(CompressParam *param)
|
|||
return len;
|
||||
}
|
||||
|
||||
static void flush_compressed_data(int (send_queued_data(CompressParam *)))
|
||||
{
|
||||
int idx, thread_count;
|
||||
|
||||
thread_count = migrate_compress_threads();
|
||||
|
||||
qemu_mutex_lock(&comp_done_lock);
|
||||
for (idx = 0; idx < thread_count; idx++) {
|
||||
while (!comp_param[idx].done) {
|
||||
qemu_cond_wait(&comp_done_cond, &comp_done_lock);
|
||||
}
|
||||
}
|
||||
qemu_mutex_unlock(&comp_done_lock);
|
||||
|
||||
for (idx = 0; idx < thread_count; idx++) {
|
||||
qemu_mutex_lock(&comp_param[idx].mutex);
|
||||
if (!comp_param[idx].quit) {
|
||||
CompressParam *param = &comp_param[idx];
|
||||
send_queued_data(param);
|
||||
compress_reset_result(param);
|
||||
}
|
||||
qemu_mutex_unlock(&comp_param[idx].mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static void ram_flush_compressed_data(RAMState *rs)
|
||||
{
|
||||
if (!save_page_use_compression(rs)) {
|
||||
|
@ -1579,52 +1365,6 @@ static void ram_flush_compressed_data(RAMState *rs)
|
|||
flush_compressed_data(send_queued_data);
|
||||
}
|
||||
|
||||
static inline void set_compress_params(CompressParam *param, RAMBlock *block,
|
||||
ram_addr_t offset)
|
||||
{
|
||||
param->block = block;
|
||||
param->offset = offset;
|
||||
param->trigger = true;
|
||||
}
|
||||
|
||||
static int compress_page_with_multi_thread(RAMBlock *block, ram_addr_t offset,
|
||||
int (send_queued_data(CompressParam *)))
|
||||
{
|
||||
int idx, thread_count, pages = -1;
|
||||
bool wait = migrate_compress_wait_thread();
|
||||
|
||||
thread_count = migrate_compress_threads();
|
||||
qemu_mutex_lock(&comp_done_lock);
|
||||
retry:
|
||||
for (idx = 0; idx < thread_count; idx++) {
|
||||
if (comp_param[idx].done) {
|
||||
CompressParam *param = &comp_param[idx];
|
||||
qemu_mutex_lock(¶m->mutex);
|
||||
param->done = false;
|
||||
send_queued_data(param);
|
||||
compress_reset_result(param);
|
||||
set_compress_params(param, block, offset);
|
||||
|
||||
qemu_cond_signal(¶m->cond);
|
||||
qemu_mutex_unlock(¶m->mutex);
|
||||
pages = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* wait for the free thread if the user specifies 'compress-wait-thread',
|
||||
* otherwise we will post the page out in the main thread as normal page.
|
||||
*/
|
||||
if (pages < 0 && wait) {
|
||||
qemu_cond_wait(&comp_done_cond, &comp_done_lock);
|
||||
goto retry;
|
||||
}
|
||||
qemu_mutex_unlock(&comp_done_lock);
|
||||
|
||||
return pages;
|
||||
}
|
||||
|
||||
#define PAGE_ALL_CLEAN 0
|
||||
#define PAGE_TRY_AGAIN 1
|
||||
#define PAGE_DIRTY_FOUND 2
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue