ram: Move dup_pages into RAMState

Once there rename it to its actual meaning, zero_pages.

Signed-off-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
This commit is contained in:
Juan Quintela 2017-03-13 20:30:21 +01:00
parent 36040d9cb2
commit f7ccd61b4c

View file

@ -165,6 +165,9 @@ struct RAMState {
uint64_t xbzrle_cache_miss_prev; uint64_t xbzrle_cache_miss_prev;
/* number of iterations at the beginning of period */ /* number of iterations at the beginning of period */
uint64_t iterations_prev; uint64_t iterations_prev;
/* Accounting fields */
/* number of zero pages. It used to be pages filled by the same char. */
uint64_t zero_pages;
}; };
typedef struct RAMState RAMState; typedef struct RAMState RAMState;
@ -172,7 +175,6 @@ static RAMState ram_state;
/* accounting for migration statistics */ /* accounting for migration statistics */
typedef struct AccountingInfo { typedef struct AccountingInfo {
uint64_t dup_pages;
uint64_t skipped_pages; uint64_t skipped_pages;
uint64_t norm_pages; uint64_t norm_pages;
uint64_t iterations; uint64_t iterations;
@ -192,12 +194,12 @@ static void acct_clear(void)
uint64_t dup_mig_bytes_transferred(void) uint64_t dup_mig_bytes_transferred(void)
{ {
return acct_info.dup_pages * TARGET_PAGE_SIZE; return ram_state.zero_pages * TARGET_PAGE_SIZE;
} }
uint64_t dup_mig_pages_transferred(void) uint64_t dup_mig_pages_transferred(void)
{ {
return acct_info.dup_pages; return ram_state.zero_pages;
} }
uint64_t skipped_mig_bytes_transferred(void) uint64_t skipped_mig_bytes_transferred(void)
@ -737,19 +739,21 @@ static void migration_bitmap_sync(RAMState *rs)
* *
* Returns the number of pages written. * Returns the number of pages written.
* *
* @rs: current RAM state
* @f: QEMUFile where to send the data * @f: QEMUFile where to send the data
* @block: block that contains the page we want to send * @block: block that contains the page we want to send
* @offset: offset inside the block for the page * @offset: offset inside the block for the page
* @p: pointer to the page * @p: pointer to the page
* @bytes_transferred: increase it with the number of transferred bytes * @bytes_transferred: increase it with the number of transferred bytes
*/ */
static int save_zero_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset, static int save_zero_page(RAMState *rs, QEMUFile *f, RAMBlock *block,
ram_addr_t offset,
uint8_t *p, uint64_t *bytes_transferred) uint8_t *p, uint64_t *bytes_transferred)
{ {
int pages = -1; int pages = -1;
if (is_zero_range(p, TARGET_PAGE_SIZE)) { if (is_zero_range(p, TARGET_PAGE_SIZE)) {
acct_info.dup_pages++; rs->zero_pages++;
*bytes_transferred += save_page_header(f, block, *bytes_transferred += save_page_header(f, block,
offset | RAM_SAVE_FLAG_COMPRESS); offset | RAM_SAVE_FLAG_COMPRESS);
qemu_put_byte(f, 0); qemu_put_byte(f, 0);
@ -822,11 +826,11 @@ static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
if (bytes_xmit > 0) { if (bytes_xmit > 0) {
acct_info.norm_pages++; acct_info.norm_pages++;
} else if (bytes_xmit == 0) { } else if (bytes_xmit == 0) {
acct_info.dup_pages++; rs->zero_pages++;
} }
} }
} else { } else {
pages = save_zero_page(f, block, offset, p, bytes_transferred); pages = save_zero_page(rs, f, block, offset, p, bytes_transferred);
if (pages > 0) { if (pages > 0) {
/* Must let xbzrle know, otherwise a previous (now 0'd) cached /* Must let xbzrle know, otherwise a previous (now 0'd) cached
* page would be stale * page would be stale
@ -998,7 +1002,7 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms,
if (bytes_xmit > 0) { if (bytes_xmit > 0) {
acct_info.norm_pages++; acct_info.norm_pages++;
} else if (bytes_xmit == 0) { } else if (bytes_xmit == 0) {
acct_info.dup_pages++; rs->zero_pages++;
} }
} }
} else { } else {
@ -1010,7 +1014,7 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms,
*/ */
if (block != rs->last_sent_block) { if (block != rs->last_sent_block) {
flush_compressed_data(f); flush_compressed_data(f);
pages = save_zero_page(f, block, offset, p, bytes_transferred); pages = save_zero_page(rs, f, block, offset, p, bytes_transferred);
if (pages == -1) { if (pages == -1) {
/* Make sure the first page is sent out before other pages */ /* Make sure the first page is sent out before other pages */
bytes_xmit = save_page_header(f, block, offset | bytes_xmit = save_page_header(f, block, offset |
@ -1031,7 +1035,7 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms,
} }
} else { } else {
offset |= RAM_SAVE_FLAG_CONTINUE; offset |= RAM_SAVE_FLAG_CONTINUE;
pages = save_zero_page(f, block, offset, p, bytes_transferred); pages = save_zero_page(rs, f, block, offset, p, bytes_transferred);
if (pages == -1) { if (pages == -1) {
pages = compress_page_with_multi_thread(f, block, offset, pages = compress_page_with_multi_thread(f, block, offset,
bytes_transferred); bytes_transferred);
@ -1463,8 +1467,10 @@ static int ram_find_and_save_block(RAMState *rs, QEMUFile *f, bool last_stage,
void acct_update_position(QEMUFile *f, size_t size, bool zero) void acct_update_position(QEMUFile *f, size_t size, bool zero)
{ {
uint64_t pages = size / TARGET_PAGE_SIZE; uint64_t pages = size / TARGET_PAGE_SIZE;
RAMState *rs = &ram_state;
if (zero) { if (zero) {
acct_info.dup_pages += pages; rs->zero_pages += pages;
} else { } else {
acct_info.norm_pages += pages; acct_info.norm_pages += pages;
bytes_transferred += size; bytes_transferred += size;
@ -2006,6 +2012,7 @@ static int ram_save_init_globals(RAMState *rs)
rs->dirty_rate_high_cnt = 0; rs->dirty_rate_high_cnt = 0;
rs->bitmap_sync_count = 0; rs->bitmap_sync_count = 0;
rs->zero_pages = 0;
migration_bitmap_sync_init(rs); migration_bitmap_sync_init(rs);
qemu_mutex_init(&migration_bitmap_mutex); qemu_mutex_init(&migration_bitmap_mutex);