accel/tcg: Introduce tb_pc and log_pc

The availability of tb->pc will shortly be conditional.
Introduce accessor functions to minimize ifdefs.

Pass around a known pc to places like tcg_gen_code,
where the caller must already have the value.

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2022-08-15 15:16:06 -05:00
parent e4fdf9df5b
commit fbf59aad17
21 changed files with 82 additions and 61 deletions

View file

@ -186,7 +186,7 @@ static bool tb_lookup_cmp(const void *p, const void *d)
const TranslationBlock *tb = p;
const struct tb_desc *desc = d;
if (tb->pc == desc->pc &&
if (tb_pc(tb) == desc->pc &&
tb->page_addr[0] == desc->page_addr0 &&
tb->cs_base == desc->cs_base &&
tb->flags == desc->flags &&
@ -271,12 +271,10 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
return tb;
}
static inline void log_cpu_exec(target_ulong pc, CPUState *cpu,
const TranslationBlock *tb)
static void log_cpu_exec(target_ulong pc, CPUState *cpu,
const TranslationBlock *tb)
{
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC))
&& qemu_log_in_addr_range(pc)) {
if (qemu_log_in_addr_range(pc)) {
qemu_log_mask(CPU_LOG_EXEC,
"Trace %d: %p [" TARGET_FMT_lx
"/" TARGET_FMT_lx "/%08x/%08x] %s\n",
@ -400,7 +398,9 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
return tcg_code_gen_epilogue;
}
log_cpu_exec(pc, cpu, tb);
if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) {
log_cpu_exec(pc, cpu, tb);
}
return tb->tc.ptr;
}
@ -423,7 +423,9 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
TranslationBlock *last_tb;
const void *tb_ptr = itb->tc.ptr;
log_cpu_exec(itb->pc, cpu, itb);
if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) {
log_cpu_exec(log_pc(cpu, itb), cpu, itb);
}
qemu_thread_jit_execute();
ret = tcg_qemu_tb_exec(env, tb_ptr);
@ -447,16 +449,20 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
* of the start of the TB.
*/
CPUClass *cc = CPU_GET_CLASS(cpu);
qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
"Stopped execution of TB chain before %p ["
TARGET_FMT_lx "] %s\n",
last_tb->tc.ptr, last_tb->pc,
lookup_symbol(last_tb->pc));
if (cc->tcg_ops->synchronize_from_tb) {
cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
} else {
assert(cc->set_pc);
cc->set_pc(cpu, last_tb->pc);
cc->set_pc(cpu, tb_pc(last_tb));
}
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
target_ulong pc = log_pc(cpu, last_tb);
if (qemu_log_in_addr_range(pc)) {
qemu_log("Stopped execution of TB chain before %p ["
TARGET_FMT_lx "] %s\n",
last_tb->tc.ptr, pc, lookup_symbol(pc));
}
}
}
@ -598,11 +604,8 @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
qemu_spin_unlock(&tb_next->jmp_lock);
qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
"Linking TBs %p [" TARGET_FMT_lx
"] index %d -> %p [" TARGET_FMT_lx "]\n",
tb->tc.ptr, tb->pc, n,
tb_next->tc.ptr, tb_next->pc);
qemu_log_mask(CPU_LOG_EXEC, "Linking TBs %p index %d -> %p\n",
tb->tc.ptr, n, tb_next->tc.ptr);
return;
out_unlock_next:
@ -848,11 +851,12 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
}
static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
target_ulong pc,
TranslationBlock **last_tb, int *tb_exit)
{
int32_t insns_left;
trace_exec_tb(tb, tb->pc);
trace_exec_tb(tb, pc);
tb = cpu_tb_exec(cpu, tb, tb_exit);
if (*tb_exit != TB_EXIT_REQUESTED) {
*last_tb = tb;
@ -1017,7 +1021,7 @@ int cpu_exec(CPUState *cpu)
tb_add_jump(last_tb, tb_exit, tb);
}
cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit);
/* Try to align the host and virtual clocks
if the guest is in advance */