mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-06 01:03:55 -06:00
migration/next for 20170421
-----BEGIN PGP SIGNATURE----- iQIcBAABCAAGBQJY+d69AAoJEPSH7xhYctcj/4oQAIFFEyWaqrL9ve5ySiJgdtcY zYtiIhZQ+nPuy2i1oDSX+vbMcmkJDDyfO5qLovxyHGkZHniR8HtxNHP+MkZQa07p DiSIvd51HvcixIouhbGcoUCU63AYxqNL3o5/TyNpUI72nvsgwl3yfOot7PtutE/F r384j8DrOJ9VwC5GGPg27mJvRPvyfDQWfxDCyMYVw153HTuwVYtgiu/layWojJDV D2L1KV45ezBuGckZTHt9y6K4J5qz8qHb/dJc+whBBjj4j9T9XOILU9NPDAEuvjFZ gHbrUyxj7EiApjHcDZoQm9Raez422ALU30yc9Kn7ik7vSqTxk2Ejq6Gz7y9MJrDn KdMj75OETJNjBL+0T9MmbtWts28+aalpTUXtBpmi3eWQV5Hcox2NF1RP42jtD9Pa lkrM6jv0nsdNfBPlQ+ZmBTJxysWECcMqy487nrzmPNC8vZfokjXL5be12puho9fh ziU4gx9C6/k82S+/H6WD/AUtRiXJM7j4oTU2mnjrsSXQC1JNWqODBOFUo9zsDufl vtcrxfPhSD1DwOInFSIBHf/RylcgTkPCL0rPoJ8npNDly6rHFYJ+oIbsn84Z4uYY RWvH8xB9wgRlK9L1WdRgOd2q7PaeHQoSSdPOiS9YVEVMVvSW8Es5CRlhcAsw/M/T 1Tl65cNrjETAuZKL3dLH =EsZ5 -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/juanquintela/tags/migration/20170421' into staging migration/next for 20170421 # gpg: Signature made Fri 21 Apr 2017 11:28:13 BST # gpg: using RSA key 0xF487EF185872D723 # gpg: Good signature from "Juan Quintela <quintela@redhat.com>" # gpg: aka "Juan Quintela <quintela@trasno.org>" # Primary key fingerprint: 1899 FF8E DEBF 58CC EE03 4B82 F487 EF18 5872 D723 * remotes/juanquintela/tags/migration/20170421: (65 commits) hmp: info migrate_parameters format tunes hmp: info migrate_capability format tunes migration: rename max_size to threshold_size migration: set current_active_state once virtio-rng: stop virtqueue while the CPU is stopped migration: don't close a file descriptor while it can be in use ram: Remove migration_bitmap_extend() migration: Disable hotplug/unplug during migration qdev: Move qdev_unplug() to qdev-monitor.c qdev: Export qdev_hot_removed qdev: qdev_hotplug is really a bool migration: Remove MigrationState parameter from migration_is_idle() ram: Use RAMBitmap type for coherence ram: rename last_ram_offset() last_ram_pages() ram: Use ramblock and page offset instead of absolute offset ram: Change offset field in PageSearchStatus to page ram: Remember last_page instead of last_offset ram: Use page number instead of an address for the bitmap operations ram: reorganize last_sent_block ram: ram_discard_range() don't use the mis parameter ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
32c7e0ab75
18 changed files with 839 additions and 779 deletions
|
@ -109,7 +109,6 @@ MigrationState *migrate_get_current(void)
|
|||
};
|
||||
|
||||
if (!once) {
|
||||
qemu_mutex_init(¤t_migration.src_page_req_mutex);
|
||||
current_migration.parameters.tls_creds = g_strdup("");
|
||||
current_migration.parameters.tls_hostname = g_strdup("");
|
||||
once = true;
|
||||
|
@ -436,9 +435,6 @@ static void process_incoming_migration_co(void *opaque)
|
|||
qemu_thread_join(&mis->colo_incoming_thread);
|
||||
}
|
||||
|
||||
qemu_fclose(f);
|
||||
free_xbzrle_decoded_buf();
|
||||
|
||||
if (ret < 0) {
|
||||
migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
|
||||
MIGRATION_STATUS_FAILED);
|
||||
|
@ -447,6 +443,9 @@ static void process_incoming_migration_co(void *opaque)
|
|||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
qemu_fclose(f);
|
||||
free_xbzrle_decoded_buf();
|
||||
|
||||
mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
|
||||
qemu_bh_schedule(mis->bh);
|
||||
}
|
||||
|
@ -651,16 +650,19 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s)
|
|||
info->ram->transferred = ram_bytes_transferred();
|
||||
info->ram->total = ram_bytes_total();
|
||||
info->ram->duplicate = dup_mig_pages_transferred();
|
||||
info->ram->skipped = skipped_mig_pages_transferred();
|
||||
/* legacy value. It is not used anymore */
|
||||
info->ram->skipped = 0;
|
||||
info->ram->normal = norm_mig_pages_transferred();
|
||||
info->ram->normal_bytes = norm_mig_bytes_transferred();
|
||||
info->ram->normal_bytes = norm_mig_pages_transferred() *
|
||||
qemu_target_page_size();
|
||||
info->ram->mbps = s->mbps;
|
||||
info->ram->dirty_sync_count = s->dirty_sync_count;
|
||||
info->ram->postcopy_requests = s->postcopy_requests;
|
||||
info->ram->dirty_sync_count = ram_dirty_sync_count();
|
||||
info->ram->postcopy_requests = ram_postcopy_requests();
|
||||
info->ram->page_size = qemu_target_page_size();
|
||||
|
||||
if (s->state != MIGRATION_STATUS_COMPLETED) {
|
||||
info->ram->remaining = ram_bytes_remaining();
|
||||
info->ram->dirty_pages_rate = s->dirty_pages_rate;
|
||||
info->ram->dirty_pages_rate = ram_dirty_pages_rate();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -955,7 +957,7 @@ static void migrate_fd_cleanup(void *opaque)
|
|||
qemu_bh_delete(s->cleanup_bh);
|
||||
s->cleanup_bh = NULL;
|
||||
|
||||
flush_page_queue(s);
|
||||
migration_page_queue_free();
|
||||
|
||||
if (s->to_dst_file) {
|
||||
trace_migrate_fd_cleanup();
|
||||
|
@ -1061,21 +1063,21 @@ bool migration_has_failed(MigrationState *s)
|
|||
s->state == MIGRATION_STATUS_FAILED);
|
||||
}
|
||||
|
||||
bool migration_in_postcopy(MigrationState *s)
|
||||
bool migration_in_postcopy(void)
|
||||
{
|
||||
MigrationState *s = migrate_get_current();
|
||||
|
||||
return (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
|
||||
}
|
||||
|
||||
bool migration_in_postcopy_after_devices(MigrationState *s)
|
||||
{
|
||||
return migration_in_postcopy(s) && s->postcopy_after_devices;
|
||||
return migration_in_postcopy() && s->postcopy_after_devices;
|
||||
}
|
||||
|
||||
bool migration_is_idle(MigrationState *s)
|
||||
bool migration_is_idle(void)
|
||||
{
|
||||
if (!s) {
|
||||
s = migrate_get_current();
|
||||
}
|
||||
MigrationState *s = migrate_get_current();
|
||||
|
||||
switch (s->state) {
|
||||
case MIGRATION_STATUS_NONE:
|
||||
|
@ -1116,22 +1118,15 @@ MigrationState *migrate_init(const MigrationParams *params)
|
|||
s->mbps = 0.0;
|
||||
s->downtime = 0;
|
||||
s->expected_downtime = 0;
|
||||
s->dirty_pages_rate = 0;
|
||||
s->dirty_bytes_rate = 0;
|
||||
s->setup_time = 0;
|
||||
s->dirty_sync_count = 0;
|
||||
s->start_postcopy = false;
|
||||
s->postcopy_after_devices = false;
|
||||
s->postcopy_requests = 0;
|
||||
s->migration_thread_running = false;
|
||||
s->last_req_rb = NULL;
|
||||
error_free(s->error);
|
||||
s->error = NULL;
|
||||
|
||||
migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
|
||||
|
||||
QSIMPLEQ_INIT(&s->src_page_requests);
|
||||
|
||||
s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
||||
return s;
|
||||
}
|
||||
|
@ -1147,7 +1142,7 @@ int migrate_add_blocker(Error *reason, Error **errp)
|
|||
return -EACCES;
|
||||
}
|
||||
|
||||
if (migration_is_idle(NULL)) {
|
||||
if (migration_is_idle()) {
|
||||
migration_blockers = g_slist_prepend(migration_blockers, reason);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1485,7 +1480,7 @@ static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
|
|||
return;
|
||||
}
|
||||
|
||||
if (ram_save_queue_pages(ms, rbname, start, len)) {
|
||||
if (ram_save_queue_pages(rbname, start, len)) {
|
||||
mark_source_rp_bad(ms);
|
||||
}
|
||||
}
|
||||
|
@ -1915,7 +1910,12 @@ static void *migration_thread(void *opaque)
|
|||
int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
||||
int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
|
||||
int64_t initial_bytes = 0;
|
||||
int64_t max_size = 0;
|
||||
/*
|
||||
* The final stage happens when the remaining data is smaller than
|
||||
* this threshold; it's calculated from the requested downtime and
|
||||
* measured bandwidth
|
||||
*/
|
||||
int64_t threshold_size = 0;
|
||||
int64_t start_time = initial_time;
|
||||
int64_t end_time;
|
||||
bool old_vm_running = false;
|
||||
|
@ -1946,7 +1946,6 @@ static void *migration_thread(void *opaque)
|
|||
qemu_savevm_state_begin(s->to_dst_file, &s->params);
|
||||
|
||||
s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
|
||||
current_active_state = MIGRATION_STATUS_ACTIVE;
|
||||
migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
|
||||
MIGRATION_STATUS_ACTIVE);
|
||||
|
||||
|
@ -1960,17 +1959,17 @@ static void *migration_thread(void *opaque)
|
|||
if (!qemu_file_rate_limit(s->to_dst_file)) {
|
||||
uint64_t pend_post, pend_nonpost;
|
||||
|
||||
qemu_savevm_state_pending(s->to_dst_file, max_size, &pend_nonpost,
|
||||
&pend_post);
|
||||
qemu_savevm_state_pending(s->to_dst_file, threshold_size,
|
||||
&pend_nonpost, &pend_post);
|
||||
pending_size = pend_nonpost + pend_post;
|
||||
trace_migrate_pending(pending_size, max_size,
|
||||
trace_migrate_pending(pending_size, threshold_size,
|
||||
pend_post, pend_nonpost);
|
||||
if (pending_size && pending_size >= max_size) {
|
||||
if (pending_size && pending_size >= threshold_size) {
|
||||
/* Still a significant amount to transfer */
|
||||
|
||||
if (migrate_postcopy_ram() &&
|
||||
s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE &&
|
||||
pend_nonpost <= max_size &&
|
||||
pend_nonpost <= threshold_size &&
|
||||
atomic_read(&s->start_postcopy)) {
|
||||
|
||||
if (!postcopy_start(s, &old_vm_running)) {
|
||||
|
@ -2002,17 +2001,18 @@ static void *migration_thread(void *opaque)
|
|||
initial_bytes;
|
||||
uint64_t time_spent = current_time - initial_time;
|
||||
double bandwidth = (double)transferred_bytes / time_spent;
|
||||
max_size = bandwidth * s->parameters.downtime_limit;
|
||||
threshold_size = bandwidth * s->parameters.downtime_limit;
|
||||
|
||||
s->mbps = (((double) transferred_bytes * 8.0) /
|
||||
((double) time_spent / 1000.0)) / 1000.0 / 1000.0;
|
||||
|
||||
trace_migrate_transferred(transferred_bytes, time_spent,
|
||||
bandwidth, max_size);
|
||||
bandwidth, threshold_size);
|
||||
/* if we haven't sent anything, we don't want to recalculate
|
||||
10000 is a small enough number for our purposes */
|
||||
if (s->dirty_bytes_rate && transferred_bytes > 10000) {
|
||||
s->expected_downtime = s->dirty_bytes_rate / bandwidth;
|
||||
if (ram_dirty_pages_rate() && transferred_bytes > 10000) {
|
||||
s->expected_downtime = ram_dirty_pages_rate() *
|
||||
qemu_target_page_size() / bandwidth;
|
||||
}
|
||||
|
||||
qemu_file_reset_rate_limit(s->to_dst_file);
|
||||
|
|
|
@ -123,7 +123,7 @@ bool postcopy_ram_supported_by_host(void)
|
|||
struct uffdio_range range_struct;
|
||||
uint64_t feature_mask;
|
||||
|
||||
if ((1ul << qemu_target_page_bits()) > pagesize) {
|
||||
if (qemu_target_page_size() > pagesize) {
|
||||
error_report("Target page size bigger than host page size");
|
||||
goto out;
|
||||
}
|
||||
|
@ -213,8 +213,6 @@ out:
|
|||
static int init_range(const char *block_name, void *host_addr,
|
||||
ram_addr_t offset, ram_addr_t length, void *opaque)
|
||||
{
|
||||
MigrationIncomingState *mis = opaque;
|
||||
|
||||
trace_postcopy_init_range(block_name, host_addr, offset, length);
|
||||
|
||||
/*
|
||||
|
@ -223,7 +221,7 @@ static int init_range(const char *block_name, void *host_addr,
|
|||
* - we're going to get the copy from the source anyway.
|
||||
* (Precopy will just overwrite this data, so doesn't need the discard)
|
||||
*/
|
||||
if (ram_discard_range(mis, block_name, 0, length)) {
|
||||
if (ram_discard_range(block_name, 0, length)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -271,7 +269,7 @@ static int cleanup_range(const char *block_name, void *host_addr,
|
|||
*/
|
||||
int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages)
|
||||
{
|
||||
if (qemu_ram_foreach_block(init_range, mis)) {
|
||||
if (qemu_ram_foreach_block(init_range, NULL)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -745,10 +743,10 @@ PostcopyDiscardState *postcopy_discard_send_init(MigrationState *ms,
|
|||
void postcopy_discard_send_range(MigrationState *ms, PostcopyDiscardState *pds,
|
||||
unsigned long start, unsigned long length)
|
||||
{
|
||||
size_t tp_bits = qemu_target_page_bits();
|
||||
size_t tp_size = qemu_target_page_size();
|
||||
/* Convert to byte offsets within the RAM block */
|
||||
pds->start_list[pds->cur_entry] = (start - pds->offset) << tp_bits;
|
||||
pds->length_list[pds->cur_entry] = length << tp_bits;
|
||||
pds->start_list[pds->cur_entry] = (start - pds->offset) * tp_size;
|
||||
pds->length_list[pds->cur_entry] = length * tp_size;
|
||||
trace_postcopy_discard_send_range(pds->ramblock_name, start, length);
|
||||
pds->cur_entry++;
|
||||
pds->nsentwords++;
|
||||
|
|
1284
migration/ram.c
1284
migration/ram.c
File diff suppressed because it is too large
Load diff
|
@ -871,7 +871,7 @@ void qemu_savevm_send_postcopy_advise(QEMUFile *f)
|
|||
{
|
||||
uint64_t tmp[2];
|
||||
tmp[0] = cpu_to_be64(ram_pagesize_summary());
|
||||
tmp[1] = cpu_to_be64(1ul << qemu_target_page_bits());
|
||||
tmp[1] = cpu_to_be64(qemu_target_page_size());
|
||||
|
||||
trace_qemu_savevm_send_postcopy_advise();
|
||||
qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_ADVISE, 16, (uint8_t *)tmp);
|
||||
|
@ -1062,7 +1062,7 @@ int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy)
|
|||
static bool should_send_vmdesc(void)
|
||||
{
|
||||
MachineState *machine = MACHINE(qdev_get_machine());
|
||||
bool in_postcopy = migration_in_postcopy(migrate_get_current());
|
||||
bool in_postcopy = migration_in_postcopy();
|
||||
return !machine->suppress_vmdesc && !in_postcopy;
|
||||
}
|
||||
|
||||
|
@ -1111,7 +1111,7 @@ void qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only)
|
|||
int vmdesc_len;
|
||||
SaveStateEntry *se;
|
||||
int ret;
|
||||
bool in_postcopy = migration_in_postcopy(migrate_get_current());
|
||||
bool in_postcopy = migration_in_postcopy();
|
||||
|
||||
trace_savevm_state_complete_precopy();
|
||||
|
||||
|
@ -1197,7 +1197,7 @@ void qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only)
|
|||
* the result is split into the amount for units that can and
|
||||
* for units that can't do postcopy.
|
||||
*/
|
||||
void qemu_savevm_state_pending(QEMUFile *f, uint64_t max_size,
|
||||
void qemu_savevm_state_pending(QEMUFile *f, uint64_t threshold_size,
|
||||
uint64_t *res_non_postcopiable,
|
||||
uint64_t *res_postcopiable)
|
||||
{
|
||||
|
@ -1216,7 +1216,7 @@ void qemu_savevm_state_pending(QEMUFile *f, uint64_t max_size,
|
|||
continue;
|
||||
}
|
||||
}
|
||||
se->ops->save_live_pending(f, se->opaque, max_size,
|
||||
se->ops->save_live_pending(f, se->opaque, threshold_size,
|
||||
res_non_postcopiable, res_postcopiable);
|
||||
}
|
||||
}
|
||||
|
@ -1390,13 +1390,13 @@ static int loadvm_postcopy_handle_advise(MigrationIncomingState *mis)
|
|||
}
|
||||
|
||||
remote_tps = qemu_get_be64(mis->from_src_file);
|
||||
if (remote_tps != (1ul << qemu_target_page_bits())) {
|
||||
if (remote_tps != qemu_target_page_size()) {
|
||||
/*
|
||||
* Again, some differences could be dealt with, but for now keep it
|
||||
* simple.
|
||||
*/
|
||||
error_report("Postcopy needs matching target page sizes (s=%d d=%d)",
|
||||
(int)remote_tps, 1 << qemu_target_page_bits());
|
||||
error_report("Postcopy needs matching target page sizes (s=%d d=%zd)",
|
||||
(int)remote_tps, qemu_target_page_size());
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -1479,8 +1479,7 @@ static int loadvm_postcopy_ram_handle_discard(MigrationIncomingState *mis,
|
|||
block_length = qemu_get_be64(mis->from_src_file);
|
||||
|
||||
len -= 16;
|
||||
int ret = ram_discard_range(mis, ramid, start_addr,
|
||||
block_length);
|
||||
int ret = ram_discard_range(ramid, start_addr, block_length);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -63,8 +63,8 @@ put_qtailq_end(const char *name, const char *reason) "%s %s"
|
|||
qemu_file_fclose(void) ""
|
||||
|
||||
# migration/ram.c
|
||||
get_queued_page(const char *block_name, uint64_t tmp_offset, uint64_t ram_addr) "%s/%" PRIx64 " ram_addr=%" PRIx64
|
||||
get_queued_page_not_dirty(const char *block_name, uint64_t tmp_offset, uint64_t ram_addr, int sent) "%s/%" PRIx64 " ram_addr=%" PRIx64 " (sent=%d)"
|
||||
get_queued_page(const char *block_name, uint64_t tmp_offset, unsigned long page_abs) "%s/%" PRIx64 " page_abs=%lx"
|
||||
get_queued_page_not_dirty(const char *block_name, uint64_t tmp_offset, unsigned long page_abs, int sent) "%s/%" PRIx64 " page_abs=%lx (sent=%d)"
|
||||
migration_bitmap_sync_start(void) ""
|
||||
migration_bitmap_sync_end(uint64_t dirty_pages) "dirty_pages %" PRIu64
|
||||
migration_throttle(void) ""
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue