mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-03 07:43:54 -06:00
atomics: eliminate mb_read/mb_set
qatomic_mb_read and qatomic_mb_set were the very first atomic primitives introduced for QEMU; their semantics are unclear and they provide a false sense of safety. The last use of qatomic_mb_read() has been removed, so delete it. qatomic_mb_set() instead can survive as an optimized qatomic_set()+smp_mb(), similar to Linux's smp_store_mb(), but rename it to qatomic_set_mb() to match the order of the two operations. Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
09a49afeae
commit
06831001ac
11 changed files with 20 additions and 46 deletions
|
@ -202,7 +202,7 @@ static void coroutine_fn qemu_co_mutex_lock_slowpath(AioContext *ctx,
|
|||
push_waiter(mutex, &w);
|
||||
|
||||
/*
|
||||
* Add waiter before reading mutex->handoff. Pairs with qatomic_mb_set
|
||||
* Add waiter before reading mutex->handoff. Pairs with qatomic_set_mb
|
||||
* in qemu_co_mutex_unlock.
|
||||
*/
|
||||
smp_mb__after_rmw();
|
||||
|
@ -310,7 +310,7 @@ void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex)
|
|||
|
||||
our_handoff = mutex->sequence;
|
||||
/* Set handoff before checking for waiters. */
|
||||
qatomic_mb_set(&mutex->handoff, our_handoff);
|
||||
qatomic_set_mb(&mutex->handoff, our_handoff);
|
||||
if (!has_waiters(mutex)) {
|
||||
/* The concurrent lock has not added itself yet, so it
|
||||
* will be able to pick our handoff.
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue