mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-02 07:13:54 -06:00
migration: fix deadlock
Release qemu global mutex before call synchronize_rcu(). synchronize_rcu() waiting for all readers to finish their critical sections. There is at least one critical section in which we try to get QGM (critical section is in address_space_rw() and prepare_mmio_access() is trying to aquire QGM). Both functions (migration_end() and migration_bitmap_extend()) are called from main thread which is holding QGM. Thus there is a race condition that ends up with deadlock: main thread working thread Lock QGA | | Call KVM_EXIT_IO handler | | | Open rcu reader's critical section Migration cleanup bh | | | synchronize_rcu() is | waiting for readers | | prepare_mmio_access() is waiting for QGM \ / deadlock The patch changes bitmap freeing from direct g_free after synchronize_rcu to free inside call_rcu. Signed-off-by: Denis V. Lunev <den@openvz.org> Reported-by: Igor Redko <redkoi@virtuozzo.com> Tested-by: Igor Redko <redkoi@virtuozzo.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Juan Quintela <quintela@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com> CC: Anna Melekhova <annam@virtuozzo.com> CC: Juan Quintela <quintela@redhat.com> CC: Amit Shah <amit.shah@redhat.com> CC: Paolo Bonzini <pbonzini@redhat.com> CC: Wen Congyang <wency@cn.fujitsu.com>
This commit is contained in:
parent
92e3762237
commit
60be634079
1 changed files with 27 additions and 17 deletions
|
@ -219,7 +219,6 @@ static RAMBlock *last_seen_block;
|
||||||
/* This is the last block from where we have sent data */
|
/* This is the last block from where we have sent data */
|
||||||
static RAMBlock *last_sent_block;
|
static RAMBlock *last_sent_block;
|
||||||
static ram_addr_t last_offset;
|
static ram_addr_t last_offset;
|
||||||
static unsigned long *migration_bitmap;
|
|
||||||
static QemuMutex migration_bitmap_mutex;
|
static QemuMutex migration_bitmap_mutex;
|
||||||
static uint64_t migration_dirty_pages;
|
static uint64_t migration_dirty_pages;
|
||||||
static uint32_t last_version;
|
static uint32_t last_version;
|
||||||
|
@ -236,6 +235,11 @@ struct PageSearchStatus {
|
||||||
};
|
};
|
||||||
typedef struct PageSearchStatus PageSearchStatus;
|
typedef struct PageSearchStatus PageSearchStatus;
|
||||||
|
|
||||||
|
static struct BitmapRcu {
|
||||||
|
struct rcu_head rcu;
|
||||||
|
unsigned long *bmap;
|
||||||
|
} *migration_bitmap_rcu;
|
||||||
|
|
||||||
struct CompressParam {
|
struct CompressParam {
|
||||||
bool start;
|
bool start;
|
||||||
bool done;
|
bool done;
|
||||||
|
@ -540,7 +544,7 @@ ram_addr_t migration_bitmap_find_and_reset_dirty(RAMBlock *rb,
|
||||||
|
|
||||||
unsigned long next;
|
unsigned long next;
|
||||||
|
|
||||||
bitmap = atomic_rcu_read(&migration_bitmap);
|
bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
|
||||||
if (ram_bulk_stage && nr > base) {
|
if (ram_bulk_stage && nr > base) {
|
||||||
next = nr + 1;
|
next = nr + 1;
|
||||||
} else {
|
} else {
|
||||||
|
@ -558,7 +562,7 @@ ram_addr_t migration_bitmap_find_and_reset_dirty(RAMBlock *rb,
|
||||||
static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
|
static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
|
||||||
{
|
{
|
||||||
unsigned long *bitmap;
|
unsigned long *bitmap;
|
||||||
bitmap = atomic_rcu_read(&migration_bitmap);
|
bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
|
||||||
migration_dirty_pages +=
|
migration_dirty_pages +=
|
||||||
cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length);
|
cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length);
|
||||||
}
|
}
|
||||||
|
@ -1090,17 +1094,22 @@ void free_xbzrle_decoded_buf(void)
|
||||||
xbzrle_decoded_buf = NULL;
|
xbzrle_decoded_buf = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void migration_bitmap_free(struct BitmapRcu *bmap)
|
||||||
|
{
|
||||||
|
g_free(bmap->bmap);
|
||||||
|
g_free(bmap);
|
||||||
|
}
|
||||||
|
|
||||||
static void migration_end(void)
|
static void migration_end(void)
|
||||||
{
|
{
|
||||||
/* caller have hold iothread lock or is in a bh, so there is
|
/* caller have hold iothread lock or is in a bh, so there is
|
||||||
* no writing race against this migration_bitmap
|
* no writing race against this migration_bitmap
|
||||||
*/
|
*/
|
||||||
unsigned long *bitmap = migration_bitmap;
|
struct BitmapRcu *bitmap = migration_bitmap_rcu;
|
||||||
atomic_rcu_set(&migration_bitmap, NULL);
|
atomic_rcu_set(&migration_bitmap_rcu, NULL);
|
||||||
if (bitmap) {
|
if (bitmap) {
|
||||||
memory_global_dirty_log_stop();
|
memory_global_dirty_log_stop();
|
||||||
synchronize_rcu();
|
call_rcu(bitmap, migration_bitmap_free, rcu);
|
||||||
g_free(bitmap);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
XBZRLE_cache_lock();
|
XBZRLE_cache_lock();
|
||||||
|
@ -1136,9 +1145,10 @@ void migration_bitmap_extend(ram_addr_t old, ram_addr_t new)
|
||||||
/* called in qemu main thread, so there is
|
/* called in qemu main thread, so there is
|
||||||
* no writing race against this migration_bitmap
|
* no writing race against this migration_bitmap
|
||||||
*/
|
*/
|
||||||
if (migration_bitmap) {
|
if (migration_bitmap_rcu) {
|
||||||
unsigned long *old_bitmap = migration_bitmap, *bitmap;
|
struct BitmapRcu *old_bitmap = migration_bitmap_rcu, *bitmap;
|
||||||
bitmap = bitmap_new(new);
|
bitmap = g_new(struct BitmapRcu, 1);
|
||||||
|
bitmap->bmap = bitmap_new(new);
|
||||||
|
|
||||||
/* prevent migration_bitmap content from being set bit
|
/* prevent migration_bitmap content from being set bit
|
||||||
* by migration_bitmap_sync_range() at the same time.
|
* by migration_bitmap_sync_range() at the same time.
|
||||||
|
@ -1146,13 +1156,12 @@ void migration_bitmap_extend(ram_addr_t old, ram_addr_t new)
|
||||||
* at the same time.
|
* at the same time.
|
||||||
*/
|
*/
|
||||||
qemu_mutex_lock(&migration_bitmap_mutex);
|
qemu_mutex_lock(&migration_bitmap_mutex);
|
||||||
bitmap_copy(bitmap, old_bitmap, old);
|
bitmap_copy(bitmap->bmap, old_bitmap->bmap, old);
|
||||||
bitmap_set(bitmap, old, new - old);
|
bitmap_set(bitmap->bmap, old, new - old);
|
||||||
atomic_rcu_set(&migration_bitmap, bitmap);
|
atomic_rcu_set(&migration_bitmap_rcu, bitmap);
|
||||||
qemu_mutex_unlock(&migration_bitmap_mutex);
|
qemu_mutex_unlock(&migration_bitmap_mutex);
|
||||||
migration_dirty_pages += new - old;
|
migration_dirty_pages += new - old;
|
||||||
synchronize_rcu();
|
call_rcu(old_bitmap, migration_bitmap_free, rcu);
|
||||||
g_free(old_bitmap);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1210,8 +1219,9 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
|
||||||
reset_ram_globals();
|
reset_ram_globals();
|
||||||
|
|
||||||
ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS;
|
ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS;
|
||||||
migration_bitmap = bitmap_new(ram_bitmap_pages);
|
migration_bitmap_rcu = g_new(struct BitmapRcu, 1);
|
||||||
bitmap_set(migration_bitmap, 0, ram_bitmap_pages);
|
migration_bitmap_rcu->bmap = bitmap_new(ram_bitmap_pages);
|
||||||
|
bitmap_set(migration_bitmap_rcu->bmap, 0, ram_bitmap_pages);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Count the total number of pages used by ram blocks not including any
|
* Count the total number of pages used by ram blocks not including any
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue