async: update documentation of the memory barriers

Ever since commit 8c6b0356b5 ("util/async: make bh_aio_poll() O(1)",
2020-02-22), synchronization between qemu_bh_schedule() and aio_bh_poll()
is happening when the bottom half is enqueued in the bh_list; not
when the flags are set.  Update the documentation to match.

Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Paolo Bonzini 2023-03-06 10:15:06 +01:00
parent 33828ca11d
commit 8dd48650b4

View file

@ -74,14 +74,21 @@ static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags)
unsigned old_flags; unsigned old_flags;
/* /*
* The memory barrier implicit in qatomic_fetch_or makes sure that: * Synchronizes with atomic_fetch_and() in aio_bh_dequeue(), ensuring that
* 1. idle & any writes needed by the callback are done before the * insertion starts after BH_PENDING is set.
* locations are read in the aio_bh_poll. */
old_flags = qatomic_fetch_or(&bh->flags, BH_PENDING | new_flags);
if (!(old_flags & BH_PENDING)) {
/*
* At this point the bottom half becomes visible to aio_bh_poll().
* This insertion thus synchronizes with QSLIST_MOVE_ATOMIC in
* aio_bh_poll(), ensuring that:
* 1. any writes needed by the callback are visible from the callback
* after aio_bh_dequeue() returns bh.
* 2. ctx is loaded before the callback has a chance to execute and bh * 2. ctx is loaded before the callback has a chance to execute and bh
* could be freed. * could be freed.
*/ */
old_flags = qatomic_fetch_or(&bh->flags, BH_PENDING | new_flags);
if (!(old_flags & BH_PENDING)) {
QSLIST_INSERT_HEAD_ATOMIC(&ctx->bh_list, bh, next); QSLIST_INSERT_HEAD_ATOMIC(&ctx->bh_list, bh, next);
} }
@ -107,11 +114,8 @@ static QEMUBH *aio_bh_dequeue(BHList *head, unsigned *flags)
QSLIST_REMOVE_HEAD(head, next); QSLIST_REMOVE_HEAD(head, next);
/* /*
* The qatomic_and is paired with aio_bh_enqueue(). The implicit memory * Synchronizes with qatomic_fetch_or() in aio_bh_enqueue(), ensuring that
* barrier ensures that the callback sees all writes done by the scheduling * the removal finishes before BH_PENDING is reset.
* thread. It also ensures that the scheduling thread sees the cleared
* flag before bh->cb has run, and thus will call aio_notify again if
* necessary.
*/ */
*flags = qatomic_fetch_and(&bh->flags, *flags = qatomic_fetch_and(&bh->flags,
~(BH_PENDING | BH_SCHEDULED | BH_IDLE)); ~(BH_PENDING | BH_SCHEDULED | BH_IDLE));
@ -158,6 +162,7 @@ int aio_bh_poll(AioContext *ctx)
BHListSlice *s; BHListSlice *s;
int ret = 0; int ret = 0;
/* Synchronizes with QSLIST_INSERT_HEAD_ATOMIC in aio_bh_enqueue(). */
QSLIST_MOVE_ATOMIC(&slice.bh_list, &ctx->bh_list); QSLIST_MOVE_ATOMIC(&slice.bh_list, &ctx->bh_list);
QSIMPLEQ_INSERT_TAIL(&ctx->bh_slice_list, &slice, next); QSIMPLEQ_INSERT_TAIL(&ctx->bh_slice_list, &slice, next);
@ -448,15 +453,15 @@ LuringState *aio_get_linux_io_uring(AioContext *ctx)
void aio_notify(AioContext *ctx) void aio_notify(AioContext *ctx)
{ {
/* /*
* Write e.g. bh->flags before writing ctx->notified. Pairs with smp_mb in * Write e.g. ctx->bh_list before writing ctx->notified. Pairs with
* aio_notify_accept. * smp_mb() in aio_notify_accept().
*/ */
smp_wmb(); smp_wmb();
qatomic_set(&ctx->notified, true); qatomic_set(&ctx->notified, true);
/* /*
* Write ctx->notified before reading ctx->notify_me. Pairs * Write ctx->notified (and also ctx->bh_list) before reading ctx->notify_me.
* with smp_mb in aio_ctx_prepare or aio_poll. * Pairs with smp_mb() in aio_ctx_prepare or aio_poll.
*/ */
smp_mb(); smp_mb();
if (qatomic_read(&ctx->notify_me)) { if (qatomic_read(&ctx->notify_me)) {