dma-helpers: Fix race condition of continue_after_map_failure and dma_aio_cancel

If DMA's owning thread cancels the IO while the bounce buffer's owning thread
is notifying the "cpu client list", a use-after-free happens:

     continue_after_map_failure               dma_aio_cancel
     ------------------------------------------------------------------
     aio_bh_new
                                              qemu_bh_delete
     qemu_bh_schedule (use after free)

Also, the old code doesn't run the bh in the right AioContext.

Fix both problems by passing a QEMUBH to cpu_register_map_client.

Signed-off-by: Fam Zheng <famz@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <1426496617-10702-6-git-send-email-famz@redhat.com>
[Remove unnecessary forward declaration. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Fam Zheng 2015-03-16 17:03:37 +08:00 committed by Paolo Bonzini
parent 33b6c2edf6
commit e95205e1f9
3 changed files with 31 additions and 23 deletions

34
exec.c
View file

@ -2479,8 +2479,7 @@ typedef struct {
static BounceBuffer bounce;
typedef struct MapClient {
void *opaque;
void (*callback)(void *opaque);
QEMUBH *bh;
QLIST_ENTRY(MapClient) link;
} MapClient;
@ -2488,31 +2487,34 @@ QemuMutex map_client_list_lock;
static QLIST_HEAD(map_client_list, MapClient) map_client_list
= QLIST_HEAD_INITIALIZER(map_client_list);
static void cpu_unregister_map_client(void *_client);
static void cpu_unregister_map_client_do(MapClient *client)
{
QLIST_REMOVE(client, link);
g_free(client);
}
static void cpu_notify_map_clients_locked(void)
{
MapClient *client;
while (!QLIST_EMPTY(&map_client_list)) {
client = QLIST_FIRST(&map_client_list);
client->callback(client->opaque);
cpu_unregister_map_client(client);
qemu_bh_schedule(client->bh);
cpu_unregister_map_client_do(client);
}
}
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
void cpu_register_map_client(QEMUBH *bh)
{
MapClient *client = g_malloc(sizeof(*client));
qemu_mutex_lock(&map_client_list_lock);
client->opaque = opaque;
client->callback = callback;
client->bh = bh;
QLIST_INSERT_HEAD(&map_client_list, client, link);
if (!atomic_read(&bounce.in_use)) {
cpu_notify_map_clients_locked();
}
qemu_mutex_unlock(&map_client_list_lock);
return client;
}
void cpu_exec_init_all(void)
@ -2523,12 +2525,18 @@ void cpu_exec_init_all(void)
qemu_mutex_init(&map_client_list_lock);
}
static void cpu_unregister_map_client(void *_client)
void cpu_unregister_map_client(QEMUBH *bh)
{
MapClient *client = (MapClient *)_client;
MapClient *client;
QLIST_REMOVE(client, link);
g_free(client);
qemu_mutex_lock(&map_client_list_lock);
QLIST_FOREACH(client, &map_client_list, link) {
if (client->bh == bh) {
cpu_unregister_map_client_do(client);
break;
}
}
qemu_mutex_unlock(&map_client_list_lock);
}
static void cpu_notify_map_clients(void)