target/sh4: convert to TranslatorOps

This was fairly straightforward since it had already been converted
to DisasContextBase; just had to add TARGET_TOO_MANY to the switch
in tb_stop.

Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Cc: Aurelien Jarno <aurelien@aurel32.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Emilio G. Cota 2018-02-15 21:15:25 -05:00 committed by Richard Henderson
parent b542683d77
commit fd1b3d3864

View file

@ -2258,126 +2258,127 @@ static int decode_gusa(DisasContext *ctx, CPUSH4State *env, int *pmax_insns)
} }
#endif #endif
void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb) static void sh4_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
{ {
DisasContext *ctx = container_of(dcbase, DisasContext, base);
CPUSH4State *env = cs->env_ptr; CPUSH4State *env = cs->env_ptr;
DisasContext ctx; int bound;
target_ulong pc_start;
int num_insns;
int max_insns;
pc_start = tb->pc; ctx->tbflags = (uint32_t)ctx->base.tb->flags;
ctx.base.pc_next = pc_start; ctx->envflags = ctx->base.tb->flags & TB_FLAG_ENVFLAGS_MASK;
ctx.tbflags = (uint32_t)tb->flags; ctx->memidx = (ctx->tbflags & (1u << SR_MD)) == 0 ? 1 : 0;
ctx.envflags = tb->flags & TB_FLAG_ENVFLAGS_MASK;
ctx.base.is_jmp = DISAS_NEXT;
ctx.memidx = (ctx.tbflags & (1u << SR_MD)) == 0 ? 1 : 0;
/* We don't know if the delayed pc came from a dynamic or static branch, /* We don't know if the delayed pc came from a dynamic or static branch,
so assume it is a dynamic branch. */ so assume it is a dynamic branch. */
ctx.delayed_pc = -1; /* use delayed pc from env pointer */ ctx->delayed_pc = -1; /* use delayed pc from env pointer */
ctx.base.tb = tb; ctx->features = env->features;
ctx.base.singlestep_enabled = cs->singlestep_enabled; ctx->has_movcal = (ctx->tbflags & TB_FLAG_PENDING_MOVCA);
ctx.features = env->features; ctx->gbank = ((ctx->tbflags & (1 << SR_MD)) &&
ctx.has_movcal = (ctx.tbflags & TB_FLAG_PENDING_MOVCA); (ctx->tbflags & (1 << SR_RB))) * 0x10;
ctx.gbank = ((ctx.tbflags & (1 << SR_MD)) && ctx->fbank = ctx->tbflags & FPSCR_FR ? 0x10 : 0;
(ctx.tbflags & (1 << SR_RB))) * 0x10;
ctx.fbank = ctx.tbflags & FPSCR_FR ? 0x10 : 0;
max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
max_insns = MIN(max_insns, TCG_MAX_INSNS);
/* Since the ISA is fixed-width, we can bound by the number /* Since the ISA is fixed-width, we can bound by the number
of instructions remaining on the page. */ of instructions remaining on the page. */
num_insns = -(ctx.base.pc_next | TARGET_PAGE_MASK) / 2; bound = -(ctx->base.pc_next | TARGET_PAGE_MASK) / 2;
max_insns = MIN(max_insns, num_insns); ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
}
/* Single stepping means just that. */
if (ctx.base.singlestep_enabled || singlestep) {
max_insns = 1;
}
gen_tb_start(tb);
num_insns = 0;
static void sh4_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
{
#ifdef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
if (ctx.tbflags & GUSA_MASK) { DisasContext *ctx = container_of(dcbase, DisasContext, base);
num_insns = decode_gusa(&ctx, env, &max_insns); CPUSH4State *env = cs->env_ptr;
if (ctx->tbflags & GUSA_MASK) {
ctx->base.num_insns = decode_gusa(ctx, env, &ctx->base.max_insns);
} }
#endif #endif
}
while (ctx.base.is_jmp == DISAS_NEXT static void sh4_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
&& num_insns < max_insns {
&& !tcg_op_buf_full()) { DisasContext *ctx = container_of(dcbase, DisasContext, base);
tcg_gen_insn_start(ctx.base.pc_next, ctx.envflags);
num_insns++;
if (unlikely(cpu_breakpoint_test(cs, ctx.base.pc_next, BP_ANY))) { tcg_gen_insn_start(ctx->base.pc_next, ctx->envflags);
/* We have hit a breakpoint - make sure PC is up-to-date */ }
gen_save_cpu_state(&ctx, true);
gen_helper_debug(cpu_env);
ctx.base.is_jmp = DISAS_NORETURN;
/* The address covered by the breakpoint must be included in
[tb->pc, tb->pc + tb->size) in order to for it to be
properly cleared -- thus we increment the PC here so that
the logic setting tb->size below does the right thing. */
ctx.base.pc_next += 2;
break;
}
if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) { static bool sh4_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
gen_io_start(); const CPUBreakpoint *bp)
} {
DisasContext *ctx = container_of(dcbase, DisasContext, base);
ctx.opcode = cpu_lduw_code(env, ctx.base.pc_next); /* We have hit a breakpoint - make sure PC is up-to-date */
decode_opc(&ctx); gen_save_cpu_state(ctx, true);
ctx.base.pc_next += 2; gen_helper_debug(cpu_env);
} ctx->base.is_jmp = DISAS_NORETURN;
if (tb_cflags(tb) & CF_LAST_IO) { /* The address covered by the breakpoint must be included in
gen_io_end(); [tb->pc, tb->pc + tb->size) in order to for it to be
} properly cleared -- thus we increment the PC here so that
the logic setting tb->size below does the right thing. */
ctx->base.pc_next += 2;
return true;
}
if (ctx.tbflags & GUSA_EXCLUSIVE) { static void sh4_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{
CPUSH4State *env = cs->env_ptr;
DisasContext *ctx = container_of(dcbase, DisasContext, base);
ctx->opcode = cpu_lduw_code(env, ctx->base.pc_next);
decode_opc(ctx);
ctx->base.pc_next += 2;
}
static void sh4_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
if (ctx->tbflags & GUSA_EXCLUSIVE) {
/* Ending the region of exclusivity. Clear the bits. */ /* Ending the region of exclusivity. Clear the bits. */
ctx.envflags &= ~GUSA_MASK; ctx->envflags &= ~GUSA_MASK;
} }
switch (ctx.base.is_jmp) { switch (ctx->base.is_jmp) {
case DISAS_STOP: case DISAS_STOP:
gen_save_cpu_state(&ctx, true); gen_save_cpu_state(ctx, true);
if (ctx.base.singlestep_enabled) { if (ctx->base.singlestep_enabled) {
gen_helper_debug(cpu_env); gen_helper_debug(cpu_env);
} else { } else {
tcg_gen_exit_tb(0); tcg_gen_exit_tb(0);
} }
break; break;
case DISAS_NEXT: case DISAS_NEXT:
gen_save_cpu_state(&ctx, false); case DISAS_TOO_MANY:
gen_goto_tb(&ctx, 0, ctx.base.pc_next); gen_save_cpu_state(ctx, false);
gen_goto_tb(ctx, 0, ctx->base.pc_next);
break; break;
case DISAS_NORETURN: case DISAS_NORETURN:
break; break;
default: default:
g_assert_not_reached(); g_assert_not_reached();
} }
}
gen_tb_end(tb, num_insns); static void sh4_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
{
qemu_log("IN:\n"); /* , lookup_symbol(dcbase->pc_first)); */
log_target_disas(cs, dcbase->pc_first, dcbase->tb->size);
}
tb->size = ctx.base.pc_next - pc_start; static const TranslatorOps sh4_tr_ops = {
tb->icount = num_insns; .init_disas_context = sh4_tr_init_disas_context,
.tb_start = sh4_tr_tb_start,
.insn_start = sh4_tr_insn_start,
.breakpoint_check = sh4_tr_breakpoint_check,
.translate_insn = sh4_tr_translate_insn,
.tb_stop = sh4_tr_tb_stop,
.disas_log = sh4_tr_disas_log,
};
#ifdef DEBUG_DISAS void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) {
&& qemu_log_in_addr_range(pc_start)) { DisasContext ctx;
qemu_log_lock();
qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */ translator_loop(&sh4_tr_ops, &ctx.base, cs, tb);
log_target_disas(cs, pc_start, ctx.base.pc_next - pc_start);
qemu_log("\n");
qemu_log_unlock();
}
#endif
} }
void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb, void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,