mirror of
https://github.com/Motorhead1991/qemu.git
synced 2026-01-06 06:27:41 -07:00
Migration pull 2019-10-11
Mostly cleanups and minor fixes [Note I'm seeing a hang on the aarch64 hosted x86-64 tcg migration test in xbzrle; but I'm seeing that on current head as well] -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEERfXHG0oMt/uXep+pBRYzHrxb/ecFAl2g1JcACgkQBRYzHrxb /efl1RAAjYukmf+kCFCw4Ws6nJ4000O85mpj0117SJpgTck1ivTC968REpl5pD0C aHDzamNW82fiqjRxwF6KJRWic217NrmR1Z/j++SDyIjOc1ERQdB+RdCc7T2NkBT5 2HiPaceNiu9wOpqX/bto/xAug9vAxq5/1jeq+vhKxd+IcvAZII0SwKWn9mWA2209 H4i3v8OCv9isT6MRNitfWT/giYkI5HwFzA9a13S+zXioEGnoAmqzrrAQs2/MkyDt bIeLbZyonH9hKbdrwmIXCvNEHA32BOPQyrsRp9CPZwRKVP2AzRYU9K9UjKncmYJS bPdLYFmqEQm8ILQI6lyJ+pW1r/cyAUQBQii6NA+9ZfimxCSB06ArU+JeM0csl7HV b4cG/bENFmtOzaoc3SrE6t1APlTiS9nxW6iH8zW3ozMEQGGihru7/6VIlwKTOfeX kXKF92FTiTBpJ1u3/t05TPnxo4c2bKWM+Gj1okDAUsP8HovQpvJa8r92n1cC0+l8 l3pkFnrejzTcrexWIiKXYnPnO7Ez/Dm+0aCzlQkX7DSFxDnwI2T/BYk21FNlcI/L rCHnkSLjYMWPelTLo9ZNuFaKL9UMeMtLPaIU9NBSSmsQ32/d8EXpDQwe8uAq+9Z/ qBir/mKyDe7I/InumtWQS46SS1/E1VyxDG2dxRWK9lN8DDOXRlM= =Jouv -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/dgilbert/tags/pull-migration-20191011a' into staging Migration pull 2019-10-11 Mostly cleanups and minor fixes [Note I'm seeing a hang on the aarch64 hosted x86-64 tcg migration test in xbzrle; but I'm seeing that on current head as well] # gpg: Signature made Fri 11 Oct 2019 20:14:31 BST # gpg: using RSA key 45F5C71B4A0CB7FB977A9FA90516331EBC5BFDE7 # gpg: Good signature from "Dr. David Alan Gilbert (RH2) <dgilbert@redhat.com>" [full] # Primary key fingerprint: 45F5 C71B 4A0C B7FB 977A 9FA9 0516 331E BC5B FDE7 * remotes/dgilbert/tags/pull-migration-20191011a: (21 commits) migration: Support gtree migration migration/multifd: pages->used would be cleared when attach to multifd_send_state migration/multifd: initialize packet->magic/version once at setup stage migration/multifd: use pages->allocated instead of the static max migration/multifd: fix a typo in comment of multifd_recv_unfill_packet() migration/postcopy: check PostcopyState before setting to POSTCOPY_INCOMING_RUNNING migration/postcopy: rename postcopy_ram_enable_notify to postcopy_ram_incoming_setup migration/postcopy: postpone setting PostcopyState to END migration/postcopy: mis->have_listen_thread check will never be touched migration: report SaveStateEntry id and name on failure migration: pass in_postcopy instead of check state again migration/postcopy: fix typo in mark_postcopy_blocktime_begin's comment migration/postcopy: map large zero page in postcopy_ram_incoming_setup() migration/postcopy: allocate tmp_page in setup stage migration: Don't try and recover return path in non-postcopy rcu: Use automatic rc_read unlock in core memory/exec code migration: Use automatic rcu_read unlock in rdma.c migration: Use automatic rcu_read unlock in ram.c migration: Fix missing rcu_read_unlock rcu: Add automatically released rcu_read_lock variants ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
c760cb77e5
16 changed files with 995 additions and 447 deletions
|
|
@ -193,30 +193,29 @@ static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
|
|||
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
|
||||
page = start >> TARGET_PAGE_BITS;
|
||||
|
||||
rcu_read_lock();
|
||||
WITH_RCU_READ_LOCK_GUARD() {
|
||||
blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
|
||||
|
||||
blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
|
||||
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
|
||||
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
|
||||
base = page - offset;
|
||||
while (page < end) {
|
||||
unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
|
||||
unsigned long num = next - base;
|
||||
unsigned long found = find_next_bit(blocks->blocks[idx],
|
||||
num, offset);
|
||||
if (found < num) {
|
||||
dirty = true;
|
||||
break;
|
||||
}
|
||||
|
||||
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
|
||||
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
|
||||
base = page - offset;
|
||||
while (page < end) {
|
||||
unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
|
||||
unsigned long num = next - base;
|
||||
unsigned long found = find_next_bit(blocks->blocks[idx], num, offset);
|
||||
if (found < num) {
|
||||
dirty = true;
|
||||
break;
|
||||
page = next;
|
||||
idx++;
|
||||
offset = 0;
|
||||
base += DIRTY_MEMORY_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
page = next;
|
||||
idx++;
|
||||
offset = 0;
|
||||
base += DIRTY_MEMORY_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
return dirty;
|
||||
}
|
||||
|
||||
|
|
@ -234,7 +233,7 @@ static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
|
|||
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
|
||||
page = start >> TARGET_PAGE_BITS;
|
||||
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
|
||||
blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
|
||||
|
||||
|
|
@ -256,8 +255,6 @@ static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
|
|||
base += DIRTY_MEMORY_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
return dirty;
|
||||
}
|
||||
|
||||
|
|
@ -309,13 +306,11 @@ static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
|
|||
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
|
||||
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
|
||||
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
|
||||
blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
|
||||
|
||||
set_bit_atomic(offset, blocks->blocks[idx]);
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
|
||||
|
|
@ -334,39 +329,37 @@ static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
|
|||
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
|
||||
page = start >> TARGET_PAGE_BITS;
|
||||
|
||||
rcu_read_lock();
|
||||
WITH_RCU_READ_LOCK_GUARD() {
|
||||
for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
|
||||
blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]);
|
||||
}
|
||||
|
||||
for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
|
||||
blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]);
|
||||
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
|
||||
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
|
||||
base = page - offset;
|
||||
while (page < end) {
|
||||
unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
|
||||
|
||||
if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
|
||||
bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
|
||||
offset, next - page);
|
||||
}
|
||||
if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
|
||||
bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
|
||||
offset, next - page);
|
||||
}
|
||||
if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
|
||||
bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
|
||||
offset, next - page);
|
||||
}
|
||||
|
||||
page = next;
|
||||
idx++;
|
||||
offset = 0;
|
||||
base += DIRTY_MEMORY_BLOCK_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
|
||||
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
|
||||
base = page - offset;
|
||||
while (page < end) {
|
||||
unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
|
||||
|
||||
if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
|
||||
bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
|
||||
offset, next - page);
|
||||
}
|
||||
if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
|
||||
bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
|
||||
offset, next - page);
|
||||
}
|
||||
if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
|
||||
bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
|
||||
offset, next - page);
|
||||
}
|
||||
|
||||
page = next;
|
||||
idx++;
|
||||
offset = 0;
|
||||
base += DIRTY_MEMORY_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
xen_hvm_modified_memory(start, length);
|
||||
}
|
||||
|
||||
|
|
@ -396,36 +389,35 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
|
|||
offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
|
||||
DIRTY_MEMORY_BLOCK_SIZE);
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
|
||||
blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
|
||||
}
|
||||
|
||||
for (k = 0; k < nr; k++) {
|
||||
if (bitmap[k]) {
|
||||
unsigned long temp = leul_to_cpu(bitmap[k]);
|
||||
|
||||
atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
|
||||
|
||||
if (global_dirty_log) {
|
||||
atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset],
|
||||
temp);
|
||||
}
|
||||
|
||||
if (tcg_enabled()) {
|
||||
atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], temp);
|
||||
}
|
||||
WITH_RCU_READ_LOCK_GUARD() {
|
||||
for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
|
||||
blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
|
||||
}
|
||||
|
||||
if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
|
||||
offset = 0;
|
||||
idx++;
|
||||
for (k = 0; k < nr; k++) {
|
||||
if (bitmap[k]) {
|
||||
unsigned long temp = leul_to_cpu(bitmap[k]);
|
||||
|
||||
atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
|
||||
|
||||
if (global_dirty_log) {
|
||||
atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset],
|
||||
temp);
|
||||
}
|
||||
|
||||
if (tcg_enabled()) {
|
||||
atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset],
|
||||
temp);
|
||||
}
|
||||
}
|
||||
|
||||
if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
|
||||
offset = 0;
|
||||
idx++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS);
|
||||
} else {
|
||||
uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue