mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-02 23:33:54 -06:00
exec: RCUify AddressSpaceDispatch
Note that even after this patch, most callers of address_space_* functions must still be under the big QEMU lock, otherwise the memory region returned by address_space_translate can disappear as soon as address_space_translate returns. This will be fixed in the next part of this series. Reviewed-by: Fam Zheng <famz@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
9d82b5a792
commit
79e2b9aecc
8 changed files with 63 additions and 12 deletions
25
cpu-exec.c
25
cpu-exec.c
|
@ -26,6 +26,7 @@
|
|||
#include "qemu/timer.h"
|
||||
#include "exec/address-spaces.h"
|
||||
#include "exec/memory-internal.h"
|
||||
#include "qemu/rcu.h"
|
||||
|
||||
/* -icount align implementation. */
|
||||
|
||||
|
@ -146,8 +147,27 @@ void cpu_resume_from_signal(CPUState *cpu, void *puc)
|
|||
|
||||
void cpu_reload_memory_map(CPUState *cpu)
|
||||
{
|
||||
AddressSpaceDispatch *d;
|
||||
|
||||
if (qemu_in_vcpu_thread()) {
|
||||
/* Do not let the guest prolong the critical section as much as it
|
||||
* as it desires.
|
||||
*
|
||||
* Currently, this is prevented by the I/O thread's periodinc kicking
|
||||
* of the VCPU thread (iothread_requesting_mutex, qemu_cpu_kick_thread)
|
||||
* but this will go away once TCG's execution moves out of the global
|
||||
* mutex.
|
||||
*
|
||||
* This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which
|
||||
* only protects cpu->as->dispatch. Since we reload it below, we can
|
||||
* split the critical section.
|
||||
*/
|
||||
rcu_read_unlock();
|
||||
rcu_read_lock();
|
||||
}
|
||||
|
||||
/* The CPU and TLB are protected by the iothread lock. */
|
||||
AddressSpaceDispatch *d = cpu->as->dispatch;
|
||||
d = atomic_rcu_read(&cpu->as->dispatch);
|
||||
cpu->memory_dispatch = d;
|
||||
tlb_flush(cpu, 1);
|
||||
}
|
||||
|
@ -362,6 +382,8 @@ int cpu_exec(CPUArchState *env)
|
|||
* an instruction scheduling constraint on modern architectures. */
|
||||
smp_mb();
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
if (unlikely(exit_request)) {
|
||||
cpu->exit_request = 1;
|
||||
}
|
||||
|
@ -564,6 +586,7 @@ int cpu_exec(CPUArchState *env)
|
|||
} /* for(;;) */
|
||||
|
||||
cc->cpu_exec_exit(cpu);
|
||||
rcu_read_unlock();
|
||||
|
||||
/* fail safe : never use current_cpu outside cpu_exec() */
|
||||
current_cpu = NULL;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue