mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-02 07:13:54 -06:00
tcg: drop global lock during TCG code execution
This finally allows TCG to benefit from the iothread introduction: Drop the global mutex while running pure TCG CPU code. Reacquire the lock when entering MMIO or PIO emulation, or when leaving the TCG loop. We have to revert a few optimization for the current TCG threading model, namely kicking the TCG thread in qemu_mutex_lock_iothread and not kicking it in qemu_cpu_kick. We also need to disable RAM block reordering until we have a more efficient locking mechanism at hand. Still, a Linux x86 UP guest and my Musicpal ARM model boot fine here. These numbers demonstrate where we gain something: 20338 jan 20 0 331m 75m 6904 R 99 0.9 0:50.95 qemu-system-arm 20337 jan 20 0 331m 75m 6904 S 20 0.9 0:26.50 qemu-system-arm The guest CPU was fully loaded, but the iothread could still run mostly independent on a second core. Without the patch we don't get beyond 32206 jan 20 0 330m 73m 7036 R 82 0.9 1:06.00 qemu-system-arm 32204 jan 20 0 330m 73m 7036 S 21 0.9 0:17.03 qemu-system-arm We don't benefit significantly, though, when the guest is not fully loading a host CPU. Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Message-Id: <1439220437-23957-10-git-send-email-fred.konrad@greensocs.com> [FK: Rebase, fix qemu_devices_reset deadlock, rm address_space_* mutex] Signed-off-by: KONRAD Frederic <fred.konrad@greensocs.com> [EGC: fixed iothread lock for cpu-exec IRQ handling] Signed-off-by: Emilio G. Cota <cota@braap.org> [AJB: -smp single-threaded fix, clean commit msg, BQL fixes] Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Richard Henderson <rth@twiddle.net> Reviewed-by: Pranith Kumar <bobby.prani@gmail.com> [PM: target-arm changes] Acked-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
parent
791158d93b
commit
8d04fb55de
18 changed files with 166 additions and 49 deletions
23
cpu-exec.c
23
cpu-exec.c
|
@ -29,6 +29,7 @@
|
|||
#include "qemu/rcu.h"
|
||||
#include "exec/tb-hash.h"
|
||||
#include "exec/log.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
|
||||
#include "hw/i386/apic.h"
|
||||
#endif
|
||||
|
@ -388,8 +389,10 @@ static inline bool cpu_handle_halt(CPUState *cpu)
|
|||
if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
|
||||
&& replay_interrupt()) {
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
apic_poll_irq(x86_cpu->apic_state);
|
||||
cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
|
||||
qemu_mutex_unlock_iothread();
|
||||
}
|
||||
#endif
|
||||
if (!cpu_has_work(cpu)) {
|
||||
|
@ -443,7 +446,9 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
|
|||
#else
|
||||
if (replay_exception()) {
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
cc->do_interrupt(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
cpu->exception_index = -1;
|
||||
} else if (!replay_has_interrupt()) {
|
||||
/* give a chance to iothread in replay mode */
|
||||
|
@ -469,9 +474,11 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
|||
TranslationBlock **last_tb)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
int interrupt_request = cpu->interrupt_request;
|
||||
|
||||
if (unlikely(interrupt_request)) {
|
||||
if (unlikely(atomic_read(&cpu->interrupt_request))) {
|
||||
int interrupt_request;
|
||||
qemu_mutex_lock_iothread();
|
||||
interrupt_request = cpu->interrupt_request;
|
||||
if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
|
||||
/* Mask out external interrupts for this step. */
|
||||
interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
|
||||
|
@ -479,6 +486,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
|||
if (interrupt_request & CPU_INTERRUPT_DEBUG) {
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
|
||||
cpu->exception_index = EXCP_DEBUG;
|
||||
qemu_mutex_unlock_iothread();
|
||||
return true;
|
||||
}
|
||||
if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
|
||||
|
@ -488,6 +496,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
|||
cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
|
||||
cpu->halted = 1;
|
||||
cpu->exception_index = EXCP_HLT;
|
||||
qemu_mutex_unlock_iothread();
|
||||
return true;
|
||||
}
|
||||
#if defined(TARGET_I386)
|
||||
|
@ -498,12 +507,14 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
|||
cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
|
||||
do_cpu_init(x86_cpu);
|
||||
cpu->exception_index = EXCP_HALTED;
|
||||
qemu_mutex_unlock_iothread();
|
||||
return true;
|
||||
}
|
||||
#else
|
||||
else if (interrupt_request & CPU_INTERRUPT_RESET) {
|
||||
replay_interrupt();
|
||||
cpu_reset(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
@ -526,7 +537,12 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
|||
the program flow was changed */
|
||||
*last_tb = NULL;
|
||||
}
|
||||
|
||||
/* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
|
||||
qemu_mutex_unlock_iothread();
|
||||
}
|
||||
|
||||
|
||||
if (unlikely(atomic_read(&cpu->exit_request) || replay_has_interrupt())) {
|
||||
atomic_set(&cpu->exit_request, 0);
|
||||
cpu->exception_index = EXCP_INTERRUPT;
|
||||
|
@ -643,6 +659,9 @@ int cpu_exec(CPUState *cpu)
|
|||
#endif /* buggy compiler */
|
||||
cpu->can_do_io = 1;
|
||||
tb_lock_reset();
|
||||
if (qemu_mutex_iothread_locked()) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
}
|
||||
}
|
||||
|
||||
/* if an exception is pending, we execute it here */
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue