mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-04 08:13:54 -06:00
icount: fix cpu_restore_state_from_tb for non-tb-exit cases
In icount mode, instructions that access io memory spaces in the middle of the translation block invoke TB recompilation. After recompilation, such instructions become last in the TB and are allowed to access io memory spaces. When the code includes instruction like i386 'xchg eax, 0xffffd080' which accesses APIC, QEMU goes into an infinite loop of the recompilation. This instruction includes two memory accesses - one read and one write. After the first access, APIC calls cpu_report_tpr_access, which restores the CPU state to get the current eip. But cpu_restore_state_from_tb resets the cpu->can_do_io flag which makes the second memory access invalid. Therefore the second memory access causes a recompilation of the block. Then these operations repeat again and again. This patch moves resetting cpu->can_do_io flag from cpu_restore_state_from_tb to cpu_loop_exit* functions. It also adds a parameter for cpu_restore_state which controls restoring icount. There is no need to restore icount when we only query CPU state without breaking the TB. Restoring it in such cases leads to the incorrect flow of the virtual time. In most cases new parameter is true (icount should be recalculated). But there are two cases in i386 and openrisc when the CPU state is only queried without the need to break the TB. This patch fixes both of these cases. Signed-off-by: Pavel Dovgalyuk <Pavel.Dovgaluk@ispras.ru> Message-Id: <20180409091320.12504.35329.stgit@pasha-VirtualBox> [rth: Make can_do_io setting unconditional; move from cpu_exec; make cpu_loop_exit_{noexc,restore} call cpu_loop_exit.] Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
26d6a7c87b
commit
afd46fcad2
17 changed files with 45 additions and 45 deletions
|
@ -482,7 +482,7 @@ void QEMU_NORETURN dynamic_excp(CPUAlphaState *env, uintptr_t retaddr,
|
|||
cs->exception_index = excp;
|
||||
env->error_code = error;
|
||||
if (retaddr) {
|
||||
cpu_restore_state(cs, retaddr);
|
||||
cpu_restore_state(cs, retaddr, true);
|
||||
/* Floating-point exceptions (our only users) point to the next PC. */
|
||||
env->pc += 4;
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ void alpha_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
|
|||
uint64_t pc;
|
||||
uint32_t insn;
|
||||
|
||||
cpu_restore_state(cs, retaddr);
|
||||
cpu_restore_state(cs, retaddr, true);
|
||||
|
||||
pc = env->pc;
|
||||
insn = cpu_ldl_code(env, pc);
|
||||
|
@ -56,13 +56,11 @@ void alpha_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
|
|||
AlphaCPU *cpu = ALPHA_CPU(cs);
|
||||
CPUAlphaState *env = &cpu->env;
|
||||
|
||||
cpu_restore_state(cs, retaddr);
|
||||
|
||||
env->trap_arg0 = addr;
|
||||
env->trap_arg1 = access_type == MMU_DATA_STORE ? 1 : 0;
|
||||
cs->exception_index = EXCP_MCHK;
|
||||
env->error_code = 0;
|
||||
cpu_loop_exit(cs);
|
||||
cpu_loop_exit_restore(cs, retaddr);
|
||||
}
|
||||
|
||||
/* try to fill the TLB and return an exception if error. If retaddr is
|
||||
|
|
|
@ -180,7 +180,7 @@ void tlb_fill(CPUState *cs, target_ulong addr, int size,
|
|||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
|
||||
/* now we have a real cpu fault */
|
||||
cpu_restore_state(cs, retaddr);
|
||||
cpu_restore_state(cs, retaddr, true);
|
||||
|
||||
deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
|
||||
}
|
||||
|
@ -195,7 +195,7 @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
|
|||
ARMMMUFaultInfo fi = {};
|
||||
|
||||
/* now we have a real cpu fault */
|
||||
cpu_restore_state(cs, retaddr);
|
||||
cpu_restore_state(cs, retaddr, true);
|
||||
|
||||
fi.type = ARMFault_Alignment;
|
||||
deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
|
||||
|
@ -215,7 +215,7 @@ void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
|
|||
ARMMMUFaultInfo fi = {};
|
||||
|
||||
/* now we have a real cpu fault */
|
||||
cpu_restore_state(cs, retaddr);
|
||||
cpu_restore_state(cs, retaddr, true);
|
||||
|
||||
fi.ea = arm_extabort_type(response);
|
||||
fi.type = ARMFault_SyncExternal;
|
||||
|
|
|
@ -54,8 +54,8 @@ void tlb_fill(CPUState *cs, target_ulong addr, int size,
|
|||
if (unlikely(ret)) {
|
||||
if (retaddr) {
|
||||
/* now we have a real cpu fault */
|
||||
if (cpu_restore_state(cs, retaddr)) {
|
||||
/* Evaluate flags after retranslation. */
|
||||
if (cpu_restore_state(cs, retaddr, true)) {
|
||||
/* Evaluate flags after retranslation. */
|
||||
helper_top_evaluate_flags(env);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -991,7 +991,7 @@ void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
|
|||
|
||||
cpu_interrupt(cs, CPU_INTERRUPT_TPR);
|
||||
} else if (tcg_enabled()) {
|
||||
cpu_restore_state(cs, cs->mem_io_pc);
|
||||
cpu_restore_state(cs, cs->mem_io_pc, false);
|
||||
|
||||
apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
|
||||
}
|
||||
|
|
|
@ -584,7 +584,7 @@ void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
|
|||
{
|
||||
CPUState *cs = CPU(x86_env_get_cpu(env));
|
||||
|
||||
cpu_restore_state(cs, retaddr);
|
||||
cpu_restore_state(cs, retaddr, true);
|
||||
|
||||
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
|
||||
PRIx64 ", " TARGET_FMT_lx ")!\n",
|
||||
|
|
|
@ -1056,7 +1056,7 @@ void HELPER(chk)(CPUM68KState *env, int32_t val, int32_t ub)
|
|||
CPUState *cs = CPU(m68k_env_get_cpu(env));
|
||||
|
||||
/* Recover PC and CC_OP for the beginning of the insn. */
|
||||
cpu_restore_state(cs, GETPC());
|
||||
cpu_restore_state(cs, GETPC(), true);
|
||||
|
||||
/* flags have been modified by gen_flush_flags() */
|
||||
env->cc_op = CC_OP_FLAGS;
|
||||
|
@ -1087,7 +1087,7 @@ void HELPER(chk2)(CPUM68KState *env, int32_t val, int32_t lb, int32_t ub)
|
|||
CPUState *cs = CPU(m68k_env_get_cpu(env));
|
||||
|
||||
/* Recover PC and CC_OP for the beginning of the insn. */
|
||||
cpu_restore_state(cs, GETPC());
|
||||
cpu_restore_state(cs, GETPC(), true);
|
||||
|
||||
/* flags have been modified by gen_flush_flags() */
|
||||
env->cc_op = CC_OP_FLAGS;
|
||||
|
|
|
@ -48,7 +48,7 @@ void helper_raise_exception(CPUMoxieState *env, int ex)
|
|||
/* Stash the exception type. */
|
||||
env->sregs[2] = ex;
|
||||
/* Stash the address where the exception occurred. */
|
||||
cpu_restore_state(cs, GETPC());
|
||||
cpu_restore_state(cs, GETPC(), true);
|
||||
env->sregs[5] = env->pc;
|
||||
/* Jump to the exception handline routine. */
|
||||
env->pc = env->sregs[1];
|
||||
|
|
|
@ -46,7 +46,7 @@ void HELPER(mtspr)(CPUOpenRISCState *env,
|
|||
break;
|
||||
|
||||
case TO_SPR(0, 16): /* NPC */
|
||||
cpu_restore_state(cs, GETPC());
|
||||
cpu_restore_state(cs, GETPC(), true);
|
||||
/* ??? Mirror or1ksim in not trashing delayed branch state
|
||||
when "jumping" to the current instruction. */
|
||||
if (env->pc != rb) {
|
||||
|
@ -146,7 +146,7 @@ void HELPER(mtspr)(CPUOpenRISCState *env,
|
|||
case TO_SPR(8, 0): /* PMR */
|
||||
env->pmr = rb;
|
||||
if (env->pmr & PMR_DME || env->pmr & PMR_SME) {
|
||||
cpu_restore_state(cs, GETPC());
|
||||
cpu_restore_state(cs, GETPC(), true);
|
||||
env->pc += 4;
|
||||
cs->halted = 1;
|
||||
raise_exception(cpu, EXCP_HALTED);
|
||||
|
@ -230,14 +230,14 @@ target_ulong HELPER(mfspr)(CPUOpenRISCState *env,
|
|||
return env->evbar;
|
||||
|
||||
case TO_SPR(0, 16): /* NPC (equals PC) */
|
||||
cpu_restore_state(cs, GETPC());
|
||||
cpu_restore_state(cs, GETPC(), false);
|
||||
return env->pc;
|
||||
|
||||
case TO_SPR(0, 17): /* SR */
|
||||
return cpu_get_sr(env);
|
||||
|
||||
case TO_SPR(0, 18): /* PPC */
|
||||
cpu_restore_state(cs, GETPC());
|
||||
cpu_restore_state(cs, GETPC(), false);
|
||||
return env->ppc;
|
||||
|
||||
case TO_SPR(0, 32): /* EPCR */
|
||||
|
|
|
@ -31,7 +31,7 @@ raise_exception_sync_internal(CPUTriCoreState *env, uint32_t class, int tin,
|
|||
{
|
||||
CPUState *cs = CPU(tricore_env_get_cpu(env));
|
||||
/* in case we come from a helper-call we need to restore the PC */
|
||||
cpu_restore_state(cs, pc);
|
||||
cpu_restore_state(cs, pc, true);
|
||||
|
||||
/* Tin is loaded into d[15] */
|
||||
env->gpr_d[15] = tin;
|
||||
|
|
|
@ -52,7 +52,7 @@ void xtensa_cpu_do_unaligned_access(CPUState *cs,
|
|||
|
||||
if (xtensa_option_enabled(env->config, XTENSA_OPTION_UNALIGNED_EXCEPTION) &&
|
||||
!xtensa_option_enabled(env->config, XTENSA_OPTION_HW_ALIGNMENT)) {
|
||||
cpu_restore_state(CPU(cpu), retaddr);
|
||||
cpu_restore_state(CPU(cpu), retaddr, true);
|
||||
HELPER(exception_cause_vaddr)(env,
|
||||
env->pc, LOAD_STORE_ALIGNMENT_CAUSE, addr);
|
||||
}
|
||||
|
@ -78,7 +78,7 @@ void tlb_fill(CPUState *cs, target_ulong vaddr, int size,
|
|||
paddr & TARGET_PAGE_MASK,
|
||||
access, mmu_idx, page_size);
|
||||
} else {
|
||||
cpu_restore_state(cs, retaddr);
|
||||
cpu_restore_state(cs, retaddr, true);
|
||||
HELPER(exception_cause_vaddr)(env, env->pc, ret, vaddr);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue