target/cris: Convert to TranslatorOps

Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
Tested-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2021-06-19 20:49:26 -07:00
parent 06188c8981
commit 330ca14bce

View file

@ -3114,17 +3114,12 @@ static unsigned int crisv32_decoder(CPUCRISState *env, DisasContext *dc)
* *
*/ */
/* generate intermediate code for basic block 'tb'. */ static void cris_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
{ {
DisasContext *dc = container_of(dcbase, DisasContext, base);
CPUCRISState *env = cs->env_ptr; CPUCRISState *env = cs->env_ptr;
uint32_t tb_flags = dc->base.tb->flags;
uint32_t pc_start; uint32_t pc_start;
unsigned int insn_len;
struct DisasContext ctx;
struct DisasContext *dc = &ctx;
uint32_t page_start;
target_ulong npc;
int num_insns;
if (env->pregs[PR_VR] == 32) { if (env->pregs[PR_VR] == 32) {
dc->decoder = crisv32_decoder; dc->decoder = crisv32_decoder;
@ -3134,150 +3129,174 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
dc->clear_locked_irq = 1; dc->clear_locked_irq = 1;
} }
/* Odd PC indicates that branch is rexecuting due to exception in the /*
* Odd PC indicates that branch is rexecuting due to exception in the
* delayslot, like in real hw. * delayslot, like in real hw.
*/ */
pc_start = tb->pc & ~1; pc_start = dc->base.pc_first & ~1;
dc->base.tb = tb;
dc->base.pc_first = pc_start; dc->base.pc_first = pc_start;
dc->base.pc_next = pc_start; dc->base.pc_next = pc_start;
dc->base.is_jmp = DISAS_NEXT;
dc->base.singlestep_enabled = cs->singlestep_enabled;
dc->cpu = env_archcpu(env); dc->cpu = env_archcpu(env);
dc->ppc = pc_start; dc->ppc = pc_start;
dc->pc = pc_start; dc->pc = pc_start;
dc->flags_uptodate = 1; dc->flags_uptodate = 1;
dc->flagx_known = 1; dc->flagx_known = 1;
dc->flags_x = tb->flags & X_FLAG; dc->flags_x = tb_flags & X_FLAG;
dc->cc_x_uptodate = 0; dc->cc_x_uptodate = 0;
dc->cc_mask = 0; dc->cc_mask = 0;
dc->update_cc = 0; dc->update_cc = 0;
dc->clear_prefix = 0; dc->clear_prefix = 0;
dc->cpustate_changed = 0;
cris_update_cc_op(dc, CC_OP_FLAGS, 4); cris_update_cc_op(dc, CC_OP_FLAGS, 4);
dc->cc_size_uptodate = -1; dc->cc_size_uptodate = -1;
/* Decode TB flags. */ /* Decode TB flags. */
dc->tb_flags = tb->flags & (S_FLAG | P_FLAG | U_FLAG \ dc->tb_flags = tb_flags & (S_FLAG | P_FLAG | U_FLAG | X_FLAG | PFIX_FLAG);
| X_FLAG | PFIX_FLAG); dc->delayed_branch = !!(tb_flags & 7);
dc->delayed_branch = !!(tb->flags & 7);
if (dc->delayed_branch) { if (dc->delayed_branch) {
dc->jmp = JMP_INDIRECT; dc->jmp = JMP_INDIRECT;
} else { } else {
dc->jmp = JMP_NOJMP; dc->jmp = JMP_NOJMP;
} }
}
dc->cpustate_changed = 0; static void cris_tr_tb_start(DisasContextBase *db, CPUState *cpu)
{
}
page_start = pc_start & TARGET_PAGE_MASK; static void cris_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
num_insns = 0; {
DisasContext *dc = container_of(dcbase, DisasContext, base);
gen_tb_start(tb); tcg_gen_insn_start(dc->delayed_branch == 1 ? dc->ppc | 1 : dc->pc);
do { }
tcg_gen_insn_start(dc->delayed_branch == 1
? dc->ppc | 1 : dc->pc);
num_insns++;
if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) { static bool cris_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
const CPUBreakpoint *bp)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
cris_evaluate_flags(dc);
tcg_gen_movi_tl(env_pc, dc->pc);
t_gen_raise_exception(EXCP_DEBUG);
dc->base.is_jmp = DISAS_NORETURN;
/*
* The address covered by the breakpoint must be included in
* [tb->pc, tb->pc + tb->size) in order to for it to be
* properly cleared -- thus we increment the PC here so that
* the logic setting tb->size below does the right thing.
*/
dc->pc += 2;
return true;
}
static void cris_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
CPUCRISState *env = cs->env_ptr;
unsigned int insn_len;
/* Pretty disas. */
LOG_DIS("%8.8x:\t", dc->pc);
dc->clear_x = 1;
insn_len = dc->decoder(env, dc);
dc->ppc = dc->pc;
dc->pc += insn_len;
dc->base.pc_next += insn_len;
if (dc->base.is_jmp == DISAS_NORETURN) {
return;
}
if (dc->clear_x) {
cris_clear_x_flag(dc);
}
/*
* Check for delayed branches here. If we do it before
* actually generating any host code, the simulator will just
* loop doing nothing for on this program location.
*/
if (dc->delayed_branch && --dc->delayed_branch == 0) {
if (dc->base.tb->flags & 7) {
t_gen_movi_env_TN(dslot, 0);
}
if (dc->cpustate_changed
|| !dc->flagx_known
|| (dc->flags_x != (dc->base.tb->flags & X_FLAG))) {
cris_store_direct_jmp(dc);
}
if (dc->clear_locked_irq) {
dc->clear_locked_irq = 0;
t_gen_movi_env_TN(locked_irq, 0);
}
if (dc->jmp == JMP_DIRECT_CC) {
TCGLabel *l1 = gen_new_label();
cris_evaluate_flags(dc); cris_evaluate_flags(dc);
tcg_gen_movi_tl(env_pc, dc->pc);
t_gen_raise_exception(EXCP_DEBUG); /* Conditional jmp. */
tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
gen_goto_tb(dc, 1, dc->jmp_pc);
gen_set_label(l1);
gen_goto_tb(dc, 0, dc->pc);
dc->base.is_jmp = DISAS_NORETURN; dc->base.is_jmp = DISAS_NORETURN;
/* The address covered by the breakpoint must be included in dc->jmp = JMP_NOJMP;
[tb->pc, tb->pc + tb->size) in order to for it to be } else if (dc->jmp == JMP_DIRECT) {
properly cleared -- thus we increment the PC here so that cris_evaluate_flags(dc);
the logic setting tb->size below does the right thing. */ gen_goto_tb(dc, 0, dc->jmp_pc);
dc->pc += 2; dc->base.is_jmp = DISAS_NORETURN;
break; dc->jmp = JMP_NOJMP;
} else {
TCGv c = tcg_const_tl(dc->pc);
t_gen_cc_jmp(env_btarget, c);
tcg_temp_free(c);
dc->base.is_jmp = DISAS_JUMP;
} }
}
/* Pretty disas. */ /* Force an update if the per-tb cpu state has changed. */
LOG_DIS("%8.8x:\t", dc->pc); if (dc->base.is_jmp == DISAS_NEXT
&& (dc->cpustate_changed
|| !dc->flagx_known
|| (dc->flags_x != (dc->base.tb->flags & X_FLAG)))) {
dc->base.is_jmp = DISAS_UPDATE;
tcg_gen_movi_tl(env_pc, dc->pc);
}
if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) { /*
gen_io_start(); * FIXME: Only the first insn in the TB should cross a page boundary.
} * If we can detect the length of the next insn easily, we should.
dc->clear_x = 1; * In the meantime, simply stop when we do cross.
*/
if (dc->base.is_jmp == DISAS_NEXT
&& ((dc->pc ^ dc->base.pc_first) & TARGET_PAGE_MASK) != 0) {
dc->base.is_jmp = DISAS_TOO_MANY;
}
}
insn_len = dc->decoder(env, dc); static void cris_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
dc->ppc = dc->pc; {
dc->pc += insn_len; DisasContext *dc = container_of(dcbase, DisasContext, base);
if (dc->clear_x) { DisasJumpType is_jmp = dc->base.is_jmp;
cris_clear_x_flag(dc); target_ulong npc = dc->pc;
}
/* Check for delayed branches here. If we do it before if (is_jmp == DISAS_NORETURN) {
actually generating any host code, the simulator will just /* If we have a broken branch+delayslot sequence, it's too late. */
loop doing nothing for on this program location. */ assert(dc->delayed_branch != 1);
if (dc->delayed_branch) { return;
dc->delayed_branch--; }
if (dc->delayed_branch == 0) {
if (tb->flags & 7) {
t_gen_movi_env_TN(dslot, 0);
}
if (dc->cpustate_changed || !dc->flagx_known
|| (dc->flags_x != (tb->flags & X_FLAG))) {
cris_store_direct_jmp(dc);
}
if (dc->clear_locked_irq) {
dc->clear_locked_irq = 0;
t_gen_movi_env_TN(locked_irq, 0);
}
if (dc->jmp == JMP_DIRECT_CC) {
TCGLabel *l1 = gen_new_label();
cris_evaluate_flags(dc);
/* Conditional jmp. */
tcg_gen_brcondi_tl(TCG_COND_EQ,
env_btaken, 0, l1);
gen_goto_tb(dc, 1, dc->jmp_pc);
gen_set_label(l1);
gen_goto_tb(dc, 0, dc->pc);
dc->base.is_jmp = DISAS_NORETURN;
dc->jmp = JMP_NOJMP;
} else if (dc->jmp == JMP_DIRECT) {
cris_evaluate_flags(dc);
gen_goto_tb(dc, 0, dc->jmp_pc);
dc->base.is_jmp = DISAS_NORETURN;
dc->jmp = JMP_NOJMP;
} else {
TCGv c = tcg_const_tl(dc->pc);
t_gen_cc_jmp(env_btarget, c);
tcg_temp_free(c);
dc->base.is_jmp = DISAS_JUMP;
}
break;
}
}
/* If we are rexecuting a branch due to exceptions on
delay slots don't break. */
if (!(tb->pc & 1) && cs->singlestep_enabled) {
break;
}
} while (!dc->base.is_jmp && !dc->cpustate_changed
&& !tcg_op_buf_full()
&& !singlestep
&& (dc->pc - page_start < TARGET_PAGE_SIZE)
&& num_insns < max_insns);
if (dc->clear_locked_irq) { if (dc->clear_locked_irq) {
t_gen_movi_env_TN(locked_irq, 0); t_gen_movi_env_TN(locked_irq, 0);
} }
npc = dc->pc;
/* Force an update if the per-tb cpu state has changed. */
if (dc->base.is_jmp == DISAS_NEXT
&& (dc->cpustate_changed || !dc->flagx_known
|| (dc->flags_x != (tb->flags & X_FLAG)))) {
dc->base.is_jmp = DISAS_UPDATE;
tcg_gen_movi_tl(env_pc, npc);
}
/* Broken branch+delayslot sequence. */ /* Broken branch+delayslot sequence. */
if (dc->delayed_branch == 1) { if (dc->delayed_branch == 1) {
/* Set env->dslot to the size of the branch insn. */ /* Set env->dslot to the size of the branch insn. */
@ -3287,45 +3306,57 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
cris_evaluate_flags(dc); cris_evaluate_flags(dc);
if (unlikely(cs->singlestep_enabled)) { if (unlikely(dc->base.singlestep_enabled)) {
if (dc->base.is_jmp == DISAS_NEXT) { switch (is_jmp) {
case DISAS_TOO_MANY:
tcg_gen_movi_tl(env_pc, npc); tcg_gen_movi_tl(env_pc, npc);
} /* fall through */
t_gen_raise_exception(EXCP_DEBUG);
} else {
switch (dc->base.is_jmp) {
case DISAS_NEXT:
gen_goto_tb(dc, 1, npc);
break;
default:
case DISAS_JUMP: case DISAS_JUMP:
case DISAS_UPDATE: case DISAS_UPDATE:
/* indicate that the hash table must be used t_gen_raise_exception(EXCP_DEBUG);
to find the next TB */ return;
tcg_gen_exit_tb(NULL, 0); default:
break;
case DISAS_NORETURN:
/* nothing more to generate */
break; break;
} }
g_assert_not_reached();
} }
gen_tb_end(tb, num_insns);
tb->size = dc->pc - pc_start; switch (is_jmp) {
tb->icount = num_insns; case DISAS_TOO_MANY:
gen_goto_tb(dc, 0, npc);
#ifdef DEBUG_DISAS break;
#if !DISAS_CRIS case DISAS_JUMP:
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) case DISAS_UPDATE:
&& qemu_log_in_addr_range(pc_start)) { /* Indicate that interupts must be re-evaluated before the next TB. */
FILE *logfile = qemu_log_lock(); tcg_gen_exit_tb(NULL, 0);
qemu_log("--------------\n"); break;
qemu_log("IN: %s\n", lookup_symbol(pc_start)); default:
log_target_disas(cs, pc_start, dc->pc - pc_start); g_assert_not_reached();
qemu_log_unlock(logfile);
} }
#endif }
#endif
static void cris_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
{
if (!DISAS_CRIS) {
qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
}
}
static const TranslatorOps cris_tr_ops = {
.init_disas_context = cris_tr_init_disas_context,
.tb_start = cris_tr_tb_start,
.insn_start = cris_tr_insn_start,
.breakpoint_check = cris_tr_breakpoint_check,
.translate_insn = cris_tr_translate_insn,
.tb_stop = cris_tr_tb_stop,
.disas_log = cris_tr_disas_log,
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
{
DisasContext dc;
translator_loop(&cris_tr_ops, &dc.base, cs, tb, max_insns);
} }
void cris_cpu_dump_state(CPUState *cs, FILE *f, int flags) void cris_cpu_dump_state(CPUState *cs, FILE *f, int flags)