rcu: make memory barriers more explicit

Prepare for introducing smp_mb_placeholder() and smp_mb_global().
The new smp_mb() in synchronize_rcu() is not strictly necessary, since
the first atomic_mb_set for rcu_gp_ctr provides the required ordering.
However, synchronize_rcu is not performance critical, and it *will* be
necessary to introduce a smp_mb_global before calling wait_for_readers().

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Paolo Bonzini 2018-02-16 09:23:31 +01:00
parent 729c0ddd3c
commit 77a8b8462b
2 changed files with 22 additions and 5 deletions

View file

@ -92,8 +92,9 @@ static void wait_for_readers(void)
atomic_set(&index->waiting, true);
}
/* Here, order the stores to index->waiting before the
* loads of index->ctr.
/* Here, order the stores to index->waiting before the loads of
* index->ctr. Pairs with smp_mb() in rcu_read_unlock(),
* ensuring that the loads of index->ctr are sequentially consistent.
*/
smp_mb();
@ -142,8 +143,13 @@ static void wait_for_readers(void)
void synchronize_rcu(void)
{
qemu_mutex_lock(&rcu_sync_lock);
qemu_mutex_lock(&rcu_registry_lock);
/* Write RCU-protected pointers before reading p_rcu_reader->ctr.
* Pairs with smp_mb() in rcu_read_lock().
*/
smp_mb();
qemu_mutex_lock(&rcu_registry_lock);
if (!QLIST_EMPTY(&registry)) {
/* In either case, the atomic_mb_set below blocks stores that free
* old RCU-protected pointers.