migration: run pending/iterate callbacks out of big lock

This makes it possible to do blocking writes directly to the socket,
with no buffer in the middle.  For RAM, only the migration_bitmap_sync()
call needs the iothread lock.  For block migration, it is needed by
the block layer (including bdrv_drain_all and dirty bitmap access),
but because some code is shared between iterate and complete, all of
mig_save_device_dirty is run with the lock taken.

In the savevm case, the iterate callback runs within the big lock.
This is annoying because it complicates the rules.  Luckily we do not
need to do anything about it: the RAM iterate callback does not need
the iothread lock, and block migration never runs during savevm.

Reviewed-by: Orit Wasserman <owasserm@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
This commit is contained in:
Paolo Bonzini 2013-02-22 17:36:27 +01:00 committed by Juan Quintela
parent 8c8de19d93
commit 32c835ba39
4 changed files with 52 additions and 4 deletions

View file

@ -670,7 +670,6 @@ static void *buffered_file_thread(void *opaque)
uint64_t pending_size;
if (s->bytes_xfer < s->xfer_limit) {
qemu_mutex_lock_iothread();
DPRINTF("iterate\n");
pending_size = qemu_savevm_state_pending(s->file, max_size);
DPRINTF("pending size %lu max %lu\n", pending_size, max_size);
@ -678,6 +677,7 @@ static void *buffered_file_thread(void *opaque)
qemu_savevm_state_iterate(s->file);
} else {
DPRINTF("done iterating\n");
qemu_mutex_lock_iothread();
start_time = qemu_get_clock_ms(rt_clock);
qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
old_vm_running = runstate_is_running();
@ -685,8 +685,8 @@ static void *buffered_file_thread(void *opaque)
s->xfer_limit = INT_MAX;
qemu_savevm_state_complete(s->file);
last_round = true;
qemu_mutex_unlock_iothread();
}
qemu_mutex_unlock_iothread();
}
current_time = qemu_get_clock_ms(rt_clock);