TranslatorOps conversion for target/avr

TranslatorOps conversion for target/cris
 TranslatorOps conversion for target/nios2
 Simple vector operations on TCGv_i32
 Host signal fixes for *BSD
 Improvements to tcg bswap operations
 -----BEGIN PGP SIGNATURE-----
 
 iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmDba5cdHHJpY2hhcmQu
 aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV+SZwgAmnRoeWUNLSfW+ZAw
 Zw0QONRmKP/8j4MrW042XDxj+4If2oHkNmgl6IC7xAot0Q+tUJirOwn6pbHkkUbN
 VplwRvxlXeYcSLPz+yw9omBYZ3RwZfgJ65QamgJ32/+/4W4MqA2Os4zew5kACtE3
 oFnpnmLISG5ik1NfxCxtp6aKLgNcRGMHNYnKVlF3HNoOW3gfu4rN5xaCj8diqz0F
 73AtVlmqg/IKLE4gK429pZA/0Q+eSUipkDQ0vwKarnehwbXuFduDbSMYcBNOXRm9
 TErRIPsNSGxHxIEVJYcY9ZUgrOO39rd5T/r/NONrlwUuDPly2j5FsTTkNFhLJ/h7
 HO6xkQ==
 =UcDr
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/rth-gitlab/tags/pull-tcg-20210629' into staging

TranslatorOps conversion for target/avr
TranslatorOps conversion for target/cris
TranslatorOps conversion for target/nios2
Simple vector operations on TCGv_i32
Host signal fixes for *BSD
Improvements to tcg bswap operations

# gpg: Signature made Tue 29 Jun 2021 19:51:03 BST
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full]
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A  05C0 64DF 38E8 AF7E 215F

* remotes/rth-gitlab/tags/pull-tcg-20210629: (63 commits)
  tcg/riscv: Remove MO_BSWAP handling
  tcg/aarch64: Unset TCG_TARGET_HAS_MEMORY_BSWAP
  tcg/arm: Unset TCG_TARGET_HAS_MEMORY_BSWAP
  target/mips: Fix gen_mxu_s32ldd_s32lddr
  target/sh4: Improve swap.b translation
  target/i386: Improve bswap translation
  target/arm: Improve REVSH
  target/arm: Improve vector REV
  target/arm: Improve REV32
  tcg: Make use of bswap flags in tcg_gen_qemu_st_*
  tcg: Make use of bswap flags in tcg_gen_qemu_ld_*
  tcg: Add flags argument to tcg_gen_bswap16_*, tcg_gen_bswap32_i64
  tcg: Handle new bswap flags during optimize
  tcg/tci: Support bswap flags
  tcg/mips: Support bswap flags in tcg_out_bswap32
  tcg/mips: Support bswap flags in tcg_out_bswap16
  tcg/s390: Support bswap flags
  tcg/ppc: Use power10 byte-reverse instructions
  tcg/ppc: Support bswap flags
  tcg/ppc: Split out tcg_out_bswap64
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2021-07-01 20:29:33 +01:00
commit 67e25eed97
32 changed files with 1458 additions and 1094 deletions

View file

@ -5430,22 +5430,13 @@ static void handle_rev32(DisasContext *s, unsigned int sf,
unsigned int rn, unsigned int rd)
{
TCGv_i64 tcg_rd = cpu_reg(s, rd);
TCGv_i64 tcg_rn = cpu_reg(s, rn);
if (sf) {
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
/* bswap32_i64 requires zero high word */
tcg_gen_ext32u_i64(tcg_tmp, tcg_rn);
tcg_gen_bswap32_i64(tcg_rd, tcg_tmp);
tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
tcg_gen_concat32_i64(tcg_rd, tcg_rd, tcg_tmp);
tcg_temp_free_i64(tcg_tmp);
tcg_gen_bswap64_i64(tcg_rd, tcg_rn);
tcg_gen_rotri_i64(tcg_rd, tcg_rd, 32);
} else {
tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rn));
tcg_gen_bswap32_i64(tcg_rd, tcg_rd);
tcg_gen_bswap32_i64(tcg_rd, tcg_rn, TCG_BSWAP_OZ);
}
}
@ -12453,10 +12444,10 @@ static void handle_rev(DisasContext *s, int opcode, bool u,
read_vec_element(s, tcg_tmp, rn, i, grp_size);
switch (grp_size) {
case MO_16:
tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ);
break;
case MO_32:
tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ);
break;
case MO_64:
tcg_gen_bswap64_i64(tcg_tmp, tcg_tmp);

View file

@ -354,9 +354,7 @@ void gen_rev16(TCGv_i32 dest, TCGv_i32 var)
/* Byteswap low halfword and sign extend. */
static void gen_revsh(TCGv_i32 dest, TCGv_i32 var)
{
tcg_gen_ext16u_i32(var, var);
tcg_gen_bswap16_i32(var, var);
tcg_gen_ext16s_i32(dest, var);
tcg_gen_bswap16_i32(var, var, TCG_BSWAP_OS);
}
/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.

View file

@ -80,7 +80,7 @@ typedef struct DisasContext DisasContext;
/* This is the state at translation time. */
struct DisasContext {
TranslationBlock *tb;
DisasContextBase base;
CPUAVRState *env;
CPUState *cs;
@ -90,8 +90,6 @@ struct DisasContext {
/* Routine used to access memory */
int memidx;
int bstate;
int singlestep;
/*
* some AVR instructions can make the following instruction to be skipped
@ -106,7 +104,7 @@ struct DisasContext {
* used in the following manner (sketch)
*
* TCGLabel *skip_label = NULL;
* if (ctx.skip_cond != TCG_COND_NEVER) {
* if (ctx->skip_cond != TCG_COND_NEVER) {
* skip_label = gen_new_label();
* tcg_gen_brcond_tl(skip_cond, skip_var0, skip_var1, skip_label);
* }
@ -116,7 +114,7 @@ struct DisasContext {
* free_skip_var0 = false;
* }
*
* translate(&ctx);
* translate(ctx);
*
* if (skip_label) {
* gen_set_label(skip_label);
@ -191,7 +189,7 @@ static bool avr_have_feature(DisasContext *ctx, int feature)
{
if (!avr_feature(ctx->env, feature)) {
gen_helper_unsupported(cpu_env);
ctx->bstate = DISAS_NORETURN;
ctx->base.is_jmp = DISAS_NORETURN;
return false;
}
return true;
@ -1011,13 +1009,13 @@ static void gen_jmp_ez(DisasContext *ctx)
{
tcg_gen_deposit_tl(cpu_pc, cpu_r[30], cpu_r[31], 8, 8);
tcg_gen_or_tl(cpu_pc, cpu_pc, cpu_eind);
ctx->bstate = DISAS_LOOKUP;
ctx->base.is_jmp = DISAS_LOOKUP;
}
static void gen_jmp_z(DisasContext *ctx)
{
tcg_gen_deposit_tl(cpu_pc, cpu_r[30], cpu_r[31], 8, 8);
ctx->bstate = DISAS_LOOKUP;
ctx->base.is_jmp = DISAS_LOOKUP;
}
static void gen_push_ret(DisasContext *ctx, int ret)
@ -1083,9 +1081,9 @@ static void gen_pop_ret(DisasContext *ctx, TCGv ret)
static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
{
TranslationBlock *tb = ctx->tb;
const TranslationBlock *tb = ctx->base.tb;
if (ctx->singlestep == 0) {
if (!ctx->base.singlestep_enabled) {
tcg_gen_goto_tb(n);
tcg_gen_movi_i32(cpu_pc, dest);
tcg_gen_exit_tb(tb, n);
@ -1094,7 +1092,7 @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
gen_helper_debug(cpu_env);
tcg_gen_exit_tb(NULL, 0);
}
ctx->bstate = DISAS_NORETURN;
ctx->base.is_jmp = DISAS_NORETURN;
}
/*
@ -1254,7 +1252,7 @@ static bool trans_RET(DisasContext *ctx, arg_RET *a)
{
gen_pop_ret(ctx, cpu_pc);
ctx->bstate = DISAS_LOOKUP;
ctx->base.is_jmp = DISAS_LOOKUP;
return true;
}
@ -1272,7 +1270,7 @@ static bool trans_RETI(DisasContext *ctx, arg_RETI *a)
tcg_gen_movi_tl(cpu_If, 1);
/* Need to return to main loop to re-evaluate interrupts. */
ctx->bstate = DISAS_EXIT;
ctx->base.is_jmp = DISAS_EXIT;
return true;
}
@ -1484,7 +1482,7 @@ static bool trans_BRBC(DisasContext *ctx, arg_BRBC *a)
gen_goto_tb(ctx, 0, ctx->npc + a->imm);
gen_set_label(not_taken);
ctx->bstate = DISAS_CHAIN;
ctx->base.is_jmp = DISAS_CHAIN;
return true;
}
@ -1533,7 +1531,7 @@ static bool trans_BRBS(DisasContext *ctx, arg_BRBS *a)
gen_goto_tb(ctx, 0, ctx->npc + a->imm);
gen_set_label(not_taken);
ctx->bstate = DISAS_CHAIN;
ctx->base.is_jmp = DISAS_CHAIN;
return true;
}
@ -1610,7 +1608,7 @@ static TCGv gen_get_zaddr(void)
*/
static void gen_data_store(DisasContext *ctx, TCGv data, TCGv addr)
{
if (ctx->tb->flags & TB_FLAGS_FULL_ACCESS) {
if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) {
gen_helper_fullwr(cpu_env, data, addr);
} else {
tcg_gen_qemu_st8(data, addr, MMU_DATA_IDX); /* mem[addr] = data */
@ -1619,7 +1617,7 @@ static void gen_data_store(DisasContext *ctx, TCGv data, TCGv addr)
static void gen_data_load(DisasContext *ctx, TCGv data, TCGv addr)
{
if (ctx->tb->flags & TB_FLAGS_FULL_ACCESS) {
if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) {
gen_helper_fullrd(data, cpu_env, addr);
} else {
tcg_gen_qemu_ld8u(data, addr, MMU_DATA_IDX); /* data = mem[addr] */
@ -2793,7 +2791,7 @@ static bool trans_BREAK(DisasContext *ctx, arg_BREAK *a)
#ifdef BREAKPOINT_ON_BREAK
tcg_gen_movi_tl(cpu_pc, ctx->npc - 1);
gen_helper_debug(cpu_env);
ctx->bstate = DISAS_EXIT;
ctx->base.is_jmp = DISAS_EXIT;
#else
/* NOP */
#endif
@ -2819,7 +2817,7 @@ static bool trans_NOP(DisasContext *ctx, arg_NOP *a)
static bool trans_SLEEP(DisasContext *ctx, arg_SLEEP *a)
{
gen_helper_sleep(cpu_env);
ctx->bstate = DISAS_NORETURN;
ctx->base.is_jmp = DISAS_NORETURN;
return true;
}
@ -2850,7 +2848,7 @@ static void translate(DisasContext *ctx)
if (!decode_insn(ctx, opcode)) {
gen_helper_unsupported(cpu_env);
ctx->bstate = DISAS_NORETURN;
ctx->base.is_jmp = DISAS_NORETURN;
}
}
@ -2899,112 +2897,134 @@ static bool canonicalize_skip(DisasContext *ctx)
return true;
}
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
static void gen_breakpoint(DisasContext *ctx)
{
CPUAVRState *env = cs->env_ptr;
DisasContext ctx = {
.tb = tb,
.cs = cs,
.env = env,
.memidx = 0,
.bstate = DISAS_NEXT,
.skip_cond = TCG_COND_NEVER,
.singlestep = cs->singlestep_enabled,
};
target_ulong pc_start = tb->pc / 2;
int num_insns = 0;
canonicalize_skip(ctx);
tcg_gen_movi_tl(cpu_pc, ctx->npc);
gen_helper_debug(cpu_env);
ctx->base.is_jmp = DISAS_NORETURN;
}
if (tb->flags & TB_FLAGS_FULL_ACCESS) {
static void avr_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
CPUAVRState *env = cs->env_ptr;
uint32_t tb_flags = ctx->base.tb->flags;
ctx->cs = cs;
ctx->env = env;
ctx->npc = ctx->base.pc_first / 2;
ctx->skip_cond = TCG_COND_NEVER;
if (tb_flags & TB_FLAGS_SKIP) {
ctx->skip_cond = TCG_COND_ALWAYS;
ctx->skip_var0 = cpu_skip;
}
if (tb_flags & TB_FLAGS_FULL_ACCESS) {
/*
* This flag is set by ST/LD instruction we will regenerate it ONLY
* with mem/cpu memory access instead of mem access
*/
max_insns = 1;
ctx->base.max_insns = 1;
}
if (ctx.singlestep) {
max_insns = 1;
}
static void avr_tr_tb_start(DisasContextBase *db, CPUState *cs)
{
}
static void avr_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
tcg_gen_insn_start(ctx->npc);
}
static bool avr_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
const CPUBreakpoint *bp)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
gen_breakpoint(ctx);
return true;
}
static void avr_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
TCGLabel *skip_label = NULL;
/*
* This is due to some strange GDB behavior
* Let's assume main has address 0x100:
* b main - sets breakpoint at address 0x00000100 (code)
* b *0x100 - sets breakpoint at address 0x00800100 (data)
*
* The translator driver has already taken care of the code pointer.
*/
if (!ctx->base.singlestep_enabled &&
cpu_breakpoint_test(cs, OFFSET_DATA + ctx->base.pc_next, BP_ANY)) {
gen_breakpoint(ctx);
return;
}
gen_tb_start(tb);
ctx.npc = pc_start;
if (tb->flags & TB_FLAGS_SKIP) {
ctx.skip_cond = TCG_COND_ALWAYS;
ctx.skip_var0 = cpu_skip;
}
do {
TCGLabel *skip_label = NULL;
/* translate current instruction */
tcg_gen_insn_start(ctx.npc);
num_insns++;
/*
* this is due to some strange GDB behavior
* let's assume main has address 0x100
* b main - sets breakpoint at address 0x00000100 (code)
* b *0x100 - sets breakpoint at address 0x00800100 (data)
*/
if (unlikely(!ctx.singlestep &&
(cpu_breakpoint_test(cs, OFFSET_CODE + ctx.npc * 2, BP_ANY) ||
cpu_breakpoint_test(cs, OFFSET_DATA + ctx.npc * 2, BP_ANY)))) {
canonicalize_skip(&ctx);
tcg_gen_movi_tl(cpu_pc, ctx.npc);
gen_helper_debug(cpu_env);
goto done_generating;
/* Conditionally skip the next instruction, if indicated. */
if (ctx->skip_cond != TCG_COND_NEVER) {
skip_label = gen_new_label();
if (ctx->skip_var0 == cpu_skip) {
/*
* Copy cpu_skip so that we may zero it before the branch.
* This ensures that cpu_skip is non-zero after the label
* if and only if the skipped insn itself sets a skip.
*/
ctx->free_skip_var0 = true;
ctx->skip_var0 = tcg_temp_new();
tcg_gen_mov_tl(ctx->skip_var0, cpu_skip);
tcg_gen_movi_tl(cpu_skip, 0);
}
/* Conditionally skip the next instruction, if indicated. */
if (ctx.skip_cond != TCG_COND_NEVER) {
skip_label = gen_new_label();
if (ctx.skip_var0 == cpu_skip) {
/*
* Copy cpu_skip so that we may zero it before the branch.
* This ensures that cpu_skip is non-zero after the label
* if and only if the skipped insn itself sets a skip.
*/
ctx.free_skip_var0 = true;
ctx.skip_var0 = tcg_temp_new();
tcg_gen_mov_tl(ctx.skip_var0, cpu_skip);
tcg_gen_movi_tl(cpu_skip, 0);
}
if (ctx.skip_var1 == NULL) {
tcg_gen_brcondi_tl(ctx.skip_cond, ctx.skip_var0, 0, skip_label);
} else {
tcg_gen_brcond_tl(ctx.skip_cond, ctx.skip_var0,
ctx.skip_var1, skip_label);
ctx.skip_var1 = NULL;
}
if (ctx.free_skip_var0) {
tcg_temp_free(ctx.skip_var0);
ctx.free_skip_var0 = false;
}
ctx.skip_cond = TCG_COND_NEVER;
ctx.skip_var0 = NULL;
if (ctx->skip_var1 == NULL) {
tcg_gen_brcondi_tl(ctx->skip_cond, ctx->skip_var0, 0, skip_label);
} else {
tcg_gen_brcond_tl(ctx->skip_cond, ctx->skip_var0,
ctx->skip_var1, skip_label);
ctx->skip_var1 = NULL;
}
translate(&ctx);
if (skip_label) {
canonicalize_skip(&ctx);
gen_set_label(skip_label);
if (ctx.bstate == DISAS_NORETURN) {
ctx.bstate = DISAS_CHAIN;
}
if (ctx->free_skip_var0) {
tcg_temp_free(ctx->skip_var0);
ctx->free_skip_var0 = false;
}
} while (ctx.bstate == DISAS_NEXT
&& num_insns < max_insns
&& (ctx.npc - pc_start) * 2 < TARGET_PAGE_SIZE - 4
&& !tcg_op_buf_full());
if (tb->cflags & CF_LAST_IO) {
gen_io_end();
ctx->skip_cond = TCG_COND_NEVER;
ctx->skip_var0 = NULL;
}
bool nonconst_skip = canonicalize_skip(&ctx);
translate(ctx);
switch (ctx.bstate) {
ctx->base.pc_next = ctx->npc * 2;
if (skip_label) {
canonicalize_skip(ctx);
gen_set_label(skip_label);
if (ctx->base.is_jmp == DISAS_NORETURN) {
ctx->base.is_jmp = DISAS_CHAIN;
}
}
if (ctx->base.is_jmp == DISAS_NEXT) {
target_ulong page_first = ctx->base.pc_first & TARGET_PAGE_MASK;
if ((ctx->base.pc_next - page_first) >= TARGET_PAGE_SIZE - 4) {
ctx->base.is_jmp = DISAS_TOO_MANY;
}
}
}
static void avr_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
bool nonconst_skip = canonicalize_skip(ctx);
switch (ctx->base.is_jmp) {
case DISAS_NORETURN:
assert(!nonconst_skip);
break;
@ -3013,19 +3033,19 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
case DISAS_CHAIN:
if (!nonconst_skip) {
/* Note gen_goto_tb checks singlestep. */
gen_goto_tb(&ctx, 1, ctx.npc);
gen_goto_tb(ctx, 1, ctx->npc);
break;
}
tcg_gen_movi_tl(cpu_pc, ctx.npc);
tcg_gen_movi_tl(cpu_pc, ctx->npc);
/* fall through */
case DISAS_LOOKUP:
if (!ctx.singlestep) {
if (!ctx->base.singlestep_enabled) {
tcg_gen_lookup_and_goto_ptr();
break;
}
/* fall through */
case DISAS_EXIT:
if (ctx.singlestep) {
if (ctx->base.singlestep_enabled) {
gen_helper_debug(cpu_env);
} else {
tcg_gen_exit_tb(NULL, 0);
@ -3034,24 +3054,28 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
default:
g_assert_not_reached();
}
}
done_generating:
gen_tb_end(tb, num_insns);
static void avr_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
{
qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
log_target_disas(cs, dcbase->pc_first, dcbase->tb->size);
}
tb->size = (ctx.npc - pc_start) * 2;
tb->icount = num_insns;
static const TranslatorOps avr_tr_ops = {
.init_disas_context = avr_tr_init_disas_context,
.tb_start = avr_tr_tb_start,
.insn_start = avr_tr_insn_start,
.breakpoint_check = avr_tr_breakpoint_check,
.translate_insn = avr_tr_translate_insn,
.tb_stop = avr_tr_tb_stop,
.disas_log = avr_tr_disas_log,
};
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
&& qemu_log_in_addr_range(tb->pc)) {
FILE *fd;
fd = qemu_log_lock();
qemu_log("IN: %s\n", lookup_symbol(tb->pc));
log_target_disas(cs, tb->pc, tb->size);
qemu_log("\n");
qemu_log_unlock(fd);
}
#endif
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
{
DisasContext dc = { };
translator_loop(&avr_tr_ops, &dc.base, cs, tb, max_insns);
}
void restore_state_to_opc(CPUAVRState *env, TranslationBlock *tb,

View file

@ -1,4 +1,4 @@
DEF_HELPER_2(raise_exception, void, env, i32)
DEF_HELPER_2(raise_exception, noreturn, env, i32)
DEF_HELPER_2(tlb_flush_pid, void, env, i32)
DEF_HELPER_2(spc_write, void, env, i32)
DEF_HELPER_1(rfe, void, env)

View file

@ -52,11 +52,17 @@
#define BUG() (gen_BUG(dc, __FILE__, __LINE__))
#define BUG_ON(x) ({if (x) BUG();})
/* is_jmp field values */
#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
#define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
#define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
#define DISAS_SWI DISAS_TARGET_3
/*
* Target-specific is_jmp field values
*/
/* Only pc was modified dynamically */
#define DISAS_JUMP DISAS_TARGET_0
/* Cpu state was modified dynamically, including pc */
#define DISAS_UPDATE DISAS_TARGET_1
/* Cpu state was modified dynamically, excluding pc -- use npc */
#define DISAS_UPDATE_NEXT DISAS_TARGET_2
/* PC update for delayed branch, see cpustate_changed otherwise */
#define DISAS_DBRANCH DISAS_TARGET_3
/* Used by the decoder. */
#define EXTRACT_FIELD(src, start, end) \
@ -85,6 +91,8 @@ static TCGv env_pc;
/* This is the state at translation time. */
typedef struct DisasContext {
DisasContextBase base;
CRISCPU *cpu;
target_ulong pc, ppc;
@ -112,8 +120,6 @@ typedef struct DisasContext {
int cc_x_uptodate; /* 1 - ccs, 2 - known | X_FLAG. 0 not up-to-date. */
int flags_uptodate; /* Whether or not $ccs is up-to-date. */
int flagx_known; /* Whether or not flags_x has the x flag known at
translation time. */
int flags_x;
int clear_x; /* Clear x after this insn? */
@ -121,7 +127,6 @@ typedef struct DisasContext {
int clear_locked_irq; /* Clear the irq lockout. */
int cpustate_changed;
unsigned int tb_flags; /* tb dependent flags. */
int is_jmp;
#define JMP_NOJMP 0
#define JMP_DIRECT 1
@ -131,9 +136,6 @@ typedef struct DisasContext {
uint32_t jmp_pc;
int delayed_branch;
TranslationBlock *tb;
int singlestep_enabled;
} DisasContext;
static void gen_BUG(DisasContext *dc, const char *file, int line)
@ -141,14 +143,15 @@ static void gen_BUG(DisasContext *dc, const char *file, int line)
cpu_abort(CPU(dc->cpu), "%s:%d pc=%x\n", file, line, dc->pc);
}
static const char *regnames_v32[] =
static const char * const regnames_v32[] =
{
"$r0", "$r1", "$r2", "$r3",
"$r4", "$r5", "$r6", "$r7",
"$r8", "$r9", "$r10", "$r11",
"$r12", "$r13", "$sp", "$acr",
};
static const char *pregnames_v32[] =
static const char * const pregnames_v32[] =
{
"$bz", "$vr", "$pid", "$srs",
"$wz", "$exs", "$eda", "$mof",
@ -157,7 +160,7 @@ static const char *pregnames_v32[] =
};
/* We need this table to handle preg-moves with implicit width. */
static int preg_sizes[] = {
static const int preg_sizes[] = {
1, /* bz. */
1, /* vr. */
4, /* pid. */
@ -372,66 +375,26 @@ static inline void t_gen_add_flag(TCGv d, int flag)
static inline void t_gen_addx_carry(DisasContext *dc, TCGv d)
{
if (dc->flagx_known) {
if (dc->flags_x) {
TCGv c;
c = tcg_temp_new();
t_gen_mov_TN_preg(c, PR_CCS);
/* C flag is already at bit 0. */
tcg_gen_andi_tl(c, c, C_FLAG);
tcg_gen_add_tl(d, d, c);
tcg_temp_free(c);
}
} else {
TCGv x, c;
if (dc->flags_x) {
TCGv c = tcg_temp_new();
x = tcg_temp_new();
c = tcg_temp_new();
t_gen_mov_TN_preg(x, PR_CCS);
tcg_gen_mov_tl(c, x);
/* Propagate carry into d if X is set. Branch free. */
t_gen_mov_TN_preg(c, PR_CCS);
/* C flag is already at bit 0. */
tcg_gen_andi_tl(c, c, C_FLAG);
tcg_gen_andi_tl(x, x, X_FLAG);
tcg_gen_shri_tl(x, x, 4);
tcg_gen_and_tl(x, x, c);
tcg_gen_add_tl(d, d, x);
tcg_temp_free(x);
tcg_gen_add_tl(d, d, c);
tcg_temp_free(c);
}
}
static inline void t_gen_subx_carry(DisasContext *dc, TCGv d)
{
if (dc->flagx_known) {
if (dc->flags_x) {
TCGv c;
c = tcg_temp_new();
t_gen_mov_TN_preg(c, PR_CCS);
/* C flag is already at bit 0. */
tcg_gen_andi_tl(c, c, C_FLAG);
tcg_gen_sub_tl(d, d, c);
tcg_temp_free(c);
}
} else {
TCGv x, c;
if (dc->flags_x) {
TCGv c = tcg_temp_new();
x = tcg_temp_new();
c = tcg_temp_new();
t_gen_mov_TN_preg(x, PR_CCS);
tcg_gen_mov_tl(c, x);
/* Propagate carry into d if X is set. Branch free. */
t_gen_mov_TN_preg(c, PR_CCS);
/* C flag is already at bit 0. */
tcg_gen_andi_tl(c, c, C_FLAG);
tcg_gen_andi_tl(x, x, X_FLAG);
tcg_gen_shri_tl(x, x, 4);
tcg_gen_and_tl(x, x, c);
tcg_gen_sub_tl(d, d, x);
tcg_temp_free(x);
tcg_gen_sub_tl(d, d, c);
tcg_temp_free(c);
}
}
@ -479,9 +442,9 @@ static inline void t_gen_swapw(TCGv d, TCGv s)
((T0 >> 5) & 0x02020202) |
((T0 >> 7) & 0x01010101));
*/
static inline void t_gen_swapr(TCGv d, TCGv s)
static void t_gen_swapr(TCGv d, TCGv s)
{
struct {
static const struct {
int shift; /* LSL when positive, LSR when negative. */
uint32_t mask;
} bitrev[] = {
@ -517,25 +480,9 @@ static inline void t_gen_swapr(TCGv d, TCGv s)
tcg_temp_free(org_s);
}
static void t_gen_cc_jmp(TCGv pc_true, TCGv pc_false)
static bool use_goto_tb(DisasContext *dc, target_ulong dest)
{
TCGLabel *l1 = gen_new_label();
/* Conditional jmp. */
tcg_gen_mov_tl(env_pc, pc_false);
tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
tcg_gen_mov_tl(env_pc, pc_true);
gen_set_label(l1);
}
static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
{
#ifndef CONFIG_USER_ONLY
return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
(dc->ppc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
#else
return true;
#endif
return ((dest ^ dc->base.pc_first) & TARGET_PAGE_MASK) == 0;
}
static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
@ -543,20 +490,18 @@ static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
if (use_goto_tb(dc, dest)) {
tcg_gen_goto_tb(n);
tcg_gen_movi_tl(env_pc, dest);
tcg_gen_exit_tb(dc->tb, n);
tcg_gen_exit_tb(dc->base.tb, n);
} else {
tcg_gen_movi_tl(env_pc, dest);
tcg_gen_exit_tb(NULL, 0);
tcg_gen_lookup_and_goto_ptr();
}
}
static inline void cris_clear_x_flag(DisasContext *dc)
{
if (dc->flagx_known && dc->flags_x) {
if (dc->flags_x) {
dc->flags_uptodate = 0;
}
dc->flagx_known = 1;
dc->flags_x = 0;
}
@ -641,12 +586,10 @@ static void cris_evaluate_flags(DisasContext *dc)
break;
}
if (dc->flagx_known) {
if (dc->flags_x) {
tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], X_FLAG);
} else if (dc->cc_op == CC_OP_FLAGS) {
tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~X_FLAG);
}
if (dc->flags_x) {
tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], X_FLAG);
} else if (dc->cc_op == CC_OP_FLAGS) {
tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~X_FLAG);
}
dc->flags_uptodate = 1;
}
@ -681,16 +624,11 @@ static void cris_update_cc_op(DisasContext *dc, int op, int size)
static inline void cris_update_cc_x(DisasContext *dc)
{
/* Save the x flag state at the time of the cc snapshot. */
if (dc->flagx_known) {
if (dc->cc_x_uptodate == (2 | dc->flags_x)) {
return;
}
tcg_gen_movi_tl(cc_x, dc->flags_x);
dc->cc_x_uptodate = 2 | dc->flags_x;
} else {
tcg_gen_andi_tl(cc_x, cpu_PR[PR_CCS], X_FLAG);
dc->cc_x_uptodate = 1;
if (dc->cc_x_uptodate == (2 | dc->flags_x)) {
return;
}
tcg_gen_movi_tl(cc_x, dc->flags_x);
dc->cc_x_uptodate = 2 | dc->flags_x;
}
/* Update cc prior to executing ALU op. Needs source operands untouched. */
@ -1142,7 +1080,7 @@ static void gen_store (DisasContext *dc, TCGv addr, TCGv val,
/* Conditional writes. We only support the kind were X and P are known
at translation time. */
if (dc->flagx_known && dc->flags_x && (dc->tb_flags & P_FLAG)) {
if (dc->flags_x && (dc->tb_flags & P_FLAG)) {
dc->postinc = 0;
cris_evaluate_flags(dc);
tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], C_FLAG);
@ -1151,7 +1089,7 @@ static void gen_store (DisasContext *dc, TCGv addr, TCGv val,
tcg_gen_qemu_st_tl(val, addr, mem_index, MO_TE + ctz32(size));
if (dc->flagx_known && dc->flags_x) {
if (dc->flags_x) {
cris_evaluate_flags(dc);
tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~C_FLAG);
}
@ -1288,7 +1226,7 @@ static int dec_prep_alu_m(CPUCRISState *env, DisasContext *dc,
#if DISAS_CRIS
static const char *cc_name(int cc)
{
static const char *cc_names[16] = {
static const char * const cc_names[16] = {
"cc", "cs", "ne", "eq", "vc", "vs", "pl", "mi",
"ls", "hi", "ge", "lt", "gt", "le", "a", "p"
};
@ -1738,8 +1676,8 @@ static int dec_addc_r(CPUCRISState *env, DisasContext *dc)
LOG_DIS("addc $r%u, $r%u\n",
dc->op1, dc->op2);
cris_evaluate_flags(dc);
/* Set for this insn. */
dc->flagx_known = 1;
dc->flags_x = X_FLAG;
cris_cc_mask(dc, CC_MASK_NZVC);
@ -2026,7 +1964,6 @@ static int dec_setclrf(CPUCRISState *env, DisasContext *dc)
}
if (flags & X_FLAG) {
dc->flagx_known = 1;
if (set) {
dc->flags_x = X_FLAG;
} else {
@ -2037,14 +1974,14 @@ static int dec_setclrf(CPUCRISState *env, DisasContext *dc)
/* Break the TB if any of the SPI flag changes. */
if (flags & (P_FLAG | S_FLAG)) {
tcg_gen_movi_tl(env_pc, dc->pc + 2);
dc->is_jmp = DISAS_UPDATE;
dc->base.is_jmp = DISAS_UPDATE;
dc->cpustate_changed = 1;
}
/* For the I flag, only act on posedge. */
if ((flags & I_FLAG)) {
tcg_gen_movi_tl(env_pc, dc->pc + 2);
dc->is_jmp = DISAS_UPDATE;
dc->base.is_jmp = DISAS_UPDATE;
dc->cpustate_changed = 1;
}
@ -2490,7 +2427,6 @@ static int dec_addc_mr(CPUCRISState *env, DisasContext *dc)
cris_evaluate_flags(dc);
/* Set for this insn. */
dc->flagx_known = 1;
dc->flags_x = X_FLAG;
cris_alu_m_alloc_temps(t);
@ -2877,6 +2813,7 @@ static int dec_rfe_etc(CPUCRISState *env, DisasContext *dc)
-offsetof(CRISCPU, env) + offsetof(CPUState, halted));
tcg_gen_movi_tl(env_pc, dc->pc + 2);
t_gen_raise_exception(EXCP_HLT);
dc->base.is_jmp = DISAS_NORETURN;
return 2;
}
@ -2886,14 +2823,16 @@ static int dec_rfe_etc(CPUCRISState *env, DisasContext *dc)
LOG_DIS("rfe\n");
cris_evaluate_flags(dc);
gen_helper_rfe(cpu_env);
dc->is_jmp = DISAS_UPDATE;
dc->base.is_jmp = DISAS_UPDATE;
dc->cpustate_changed = true;
break;
case 5:
/* rfn. */
LOG_DIS("rfn\n");
cris_evaluate_flags(dc);
gen_helper_rfn(cpu_env);
dc->is_jmp = DISAS_UPDATE;
dc->base.is_jmp = DISAS_UPDATE;
dc->cpustate_changed = true;
break;
case 6:
LOG_DIS("break %d\n", dc->op1);
@ -2904,7 +2843,7 @@ static int dec_rfe_etc(CPUCRISState *env, DisasContext *dc)
/* Breaks start at 16 in the exception vector. */
t_gen_movi_env_TN(trap_vector, dc->op1 + 16);
t_gen_raise_exception(EXCP_BREAK);
dc->is_jmp = DISAS_UPDATE;
dc->base.is_jmp = DISAS_NORETURN;
break;
default:
printf("op2=%x\n", dc->op2);
@ -2934,7 +2873,7 @@ static int dec_null(CPUCRISState *env, DisasContext *dc)
return 2;
}
static struct decoder_info {
static const struct decoder_info {
struct {
uint32_t bits;
uint32_t mask;
@ -3122,17 +3061,12 @@ static unsigned int crisv32_decoder(CPUCRISState *env, DisasContext *dc)
*
*/
/* generate intermediate code for basic block 'tb'. */
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
static void cris_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
CPUCRISState *env = cs->env_ptr;
uint32_t tb_flags = dc->base.tb->flags;
uint32_t pc_start;
unsigned int insn_len;
struct DisasContext ctx;
struct DisasContext *dc = &ctx;
uint32_t page_start;
target_ulong npc;
int num_insns;
if (env->pregs[PR_VR] == 32) {
dc->decoder = crisv32_decoder;
@ -3142,147 +3076,139 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
dc->clear_locked_irq = 1;
}
/* Odd PC indicates that branch is rexecuting due to exception in the
/*
* Odd PC indicates that branch is rexecuting due to exception in the
* delayslot, like in real hw.
*/
pc_start = tb->pc & ~1;
dc->cpu = env_archcpu(env);
dc->tb = tb;
pc_start = dc->base.pc_first & ~1;
dc->base.pc_first = pc_start;
dc->base.pc_next = pc_start;
dc->is_jmp = DISAS_NEXT;
dc->cpu = env_archcpu(env);
dc->ppc = pc_start;
dc->pc = pc_start;
dc->singlestep_enabled = cs->singlestep_enabled;
dc->flags_uptodate = 1;
dc->flagx_known = 1;
dc->flags_x = tb->flags & X_FLAG;
dc->flags_x = tb_flags & X_FLAG;
dc->cc_x_uptodate = 0;
dc->cc_mask = 0;
dc->update_cc = 0;
dc->clear_prefix = 0;
dc->cpustate_changed = 0;
cris_update_cc_op(dc, CC_OP_FLAGS, 4);
dc->cc_size_uptodate = -1;
/* Decode TB flags. */
dc->tb_flags = tb->flags & (S_FLAG | P_FLAG | U_FLAG \
| X_FLAG | PFIX_FLAG);
dc->delayed_branch = !!(tb->flags & 7);
dc->tb_flags = tb_flags & (S_FLAG | P_FLAG | U_FLAG | X_FLAG | PFIX_FLAG);
dc->delayed_branch = !!(tb_flags & 7);
if (dc->delayed_branch) {
dc->jmp = JMP_INDIRECT;
} else {
dc->jmp = JMP_NOJMP;
}
}
dc->cpustate_changed = 0;
static void cris_tr_tb_start(DisasContextBase *db, CPUState *cpu)
{
}
page_start = pc_start & TARGET_PAGE_MASK;
num_insns = 0;
static void cris_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
gen_tb_start(tb);
do {
tcg_gen_insn_start(dc->delayed_branch == 1
? dc->ppc | 1 : dc->pc);
num_insns++;
tcg_gen_insn_start(dc->delayed_branch == 1 ? dc->ppc | 1 : dc->pc);
}
if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
cris_evaluate_flags(dc);
tcg_gen_movi_tl(env_pc, dc->pc);
t_gen_raise_exception(EXCP_DEBUG);
dc->is_jmp = DISAS_UPDATE;
/* The address covered by the breakpoint must be included in
[tb->pc, tb->pc + tb->size) in order to for it to be
properly cleared -- thus we increment the PC here so that
the logic setting tb->size below does the right thing. */
dc->pc += 2;
break;
}
static bool cris_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
const CPUBreakpoint *bp)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
/* Pretty disas. */
LOG_DIS("%8.8x:\t", dc->pc);
cris_evaluate_flags(dc);
tcg_gen_movi_tl(env_pc, dc->pc);
t_gen_raise_exception(EXCP_DEBUG);
dc->base.is_jmp = DISAS_NORETURN;
/*
* The address covered by the breakpoint must be included in
* [tb->pc, tb->pc + tb->size) in order to for it to be
* properly cleared -- thus we increment the PC here so that
* the logic setting tb->size below does the right thing.
*/
dc->pc += 2;
return true;
}
if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start();
}
dc->clear_x = 1;
static void cris_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
CPUCRISState *env = cs->env_ptr;
unsigned int insn_len;
insn_len = dc->decoder(env, dc);
dc->ppc = dc->pc;
dc->pc += insn_len;
if (dc->clear_x) {
cris_clear_x_flag(dc);
}
/* Pretty disas. */
LOG_DIS("%8.8x:\t", dc->pc);
/* Check for delayed branches here. If we do it before
actually generating any host code, the simulator will just
loop doing nothing for on this program location. */
if (dc->delayed_branch) {
dc->delayed_branch--;
if (dc->delayed_branch == 0) {
if (tb->flags & 7) {
t_gen_movi_env_TN(dslot, 0);
}
if (dc->cpustate_changed || !dc->flagx_known
|| (dc->flags_x != (tb->flags & X_FLAG))) {
cris_store_direct_jmp(dc);
}
dc->clear_x = 1;
if (dc->clear_locked_irq) {
dc->clear_locked_irq = 0;
t_gen_movi_env_TN(locked_irq, 0);
}
insn_len = dc->decoder(env, dc);
dc->ppc = dc->pc;
dc->pc += insn_len;
dc->base.pc_next += insn_len;
if (dc->jmp == JMP_DIRECT_CC) {
TCGLabel *l1 = gen_new_label();
cris_evaluate_flags(dc);
if (dc->base.is_jmp == DISAS_NORETURN) {
return;
}
/* Conditional jmp. */
tcg_gen_brcondi_tl(TCG_COND_EQ,
env_btaken, 0, l1);
gen_goto_tb(dc, 1, dc->jmp_pc);
gen_set_label(l1);
gen_goto_tb(dc, 0, dc->pc);
dc->is_jmp = DISAS_TB_JUMP;
dc->jmp = JMP_NOJMP;
} else if (dc->jmp == JMP_DIRECT) {
cris_evaluate_flags(dc);
gen_goto_tb(dc, 0, dc->jmp_pc);
dc->is_jmp = DISAS_TB_JUMP;
dc->jmp = JMP_NOJMP;
} else {
TCGv c = tcg_const_tl(dc->pc);
t_gen_cc_jmp(env_btarget, c);
tcg_temp_free(c);
dc->is_jmp = DISAS_JUMP;
}
break;
}
}
if (dc->clear_x) {
cris_clear_x_flag(dc);
}
/* If we are rexecuting a branch due to exceptions on
delay slots don't break. */
if (!(tb->pc & 1) && cs->singlestep_enabled) {
break;
}
} while (!dc->is_jmp && !dc->cpustate_changed
&& !tcg_op_buf_full()
&& !singlestep
&& (dc->pc - page_start < TARGET_PAGE_SIZE)
&& num_insns < max_insns);
/*
* All branches are delayed branches, handled immediately below.
* We don't expect to see odd combinations of exit conditions.
*/
assert(dc->base.is_jmp == DISAS_NEXT || dc->cpustate_changed);
if (dc->delayed_branch && --dc->delayed_branch == 0) {
dc->base.is_jmp = DISAS_DBRANCH;
return;
}
if (dc->base.is_jmp != DISAS_NEXT) {
return;
}
/* Force an update if the per-tb cpu state has changed. */
if (dc->cpustate_changed) {
dc->base.is_jmp = DISAS_UPDATE_NEXT;
return;
}
/*
* FIXME: Only the first insn in the TB should cross a page boundary.
* If we can detect the length of the next insn easily, we should.
* In the meantime, simply stop when we do cross.
*/
if ((dc->pc ^ dc->base.pc_first) & TARGET_PAGE_MASK) {
dc->base.is_jmp = DISAS_TOO_MANY;
}
}
static void cris_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
DisasJumpType is_jmp = dc->base.is_jmp;
target_ulong npc = dc->pc;
if (is_jmp == DISAS_NORETURN) {
/* If we have a broken branch+delayslot sequence, it's too late. */
assert(dc->delayed_branch != 1);
return;
}
if (dc->clear_locked_irq) {
t_gen_movi_env_TN(locked_irq, 0);
}
npc = dc->pc;
/* Force an update if the per-tb cpu state has changed. */
if (dc->is_jmp == DISAS_NEXT
&& (dc->cpustate_changed || !dc->flagx_known
|| (dc->flags_x != (tb->flags & X_FLAG)))) {
dc->is_jmp = DISAS_UPDATE;
tcg_gen_movi_tl(env_pc, npc);
}
/* Broken branch+delayslot sequence. */
if (dc->delayed_branch == 1) {
/* Set env->dslot to the size of the branch insn. */
@ -3292,54 +3218,123 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
cris_evaluate_flags(dc);
if (unlikely(cs->singlestep_enabled)) {
if (dc->is_jmp == DISAS_NEXT) {
tcg_gen_movi_tl(env_pc, npc);
/* Evaluate delayed branch destination and fold to another is_jmp case. */
if (is_jmp == DISAS_DBRANCH) {
if (dc->base.tb->flags & 7) {
t_gen_movi_env_TN(dslot, 0);
}
t_gen_raise_exception(EXCP_DEBUG);
} else {
switch (dc->is_jmp) {
case DISAS_NEXT:
gen_goto_tb(dc, 1, npc);
switch (dc->jmp) {
case JMP_DIRECT:
npc = dc->jmp_pc;
is_jmp = dc->cpustate_changed ? DISAS_UPDATE_NEXT : DISAS_TOO_MANY;
break;
case JMP_DIRECT_CC:
/*
* Use a conditional branch if either taken or not-taken path
* can use goto_tb. If neither can, then treat it as indirect.
*/
if (likely(!dc->base.singlestep_enabled)
&& likely(!dc->cpustate_changed)
&& (use_goto_tb(dc, dc->jmp_pc) || use_goto_tb(dc, npc))) {
TCGLabel *not_taken = gen_new_label();
tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, not_taken);
gen_goto_tb(dc, 1, dc->jmp_pc);
gen_set_label(not_taken);
/* not-taken case handled below. */
is_jmp = DISAS_TOO_MANY;
break;
}
tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
/* fall through */
case JMP_INDIRECT:
tcg_gen_movcond_tl(TCG_COND_NE, env_pc,
env_btaken, tcg_constant_tl(0),
env_btarget, tcg_constant_tl(npc));
is_jmp = dc->cpustate_changed ? DISAS_UPDATE : DISAS_JUMP;
/*
* We have now consumed btaken and btarget. Hint to the
* tcg compiler that the writeback to env may be dropped.
*/
tcg_gen_discard_tl(env_btaken);
tcg_gen_discard_tl(env_btarget);
break;
default:
g_assert_not_reached();
}
}
if (unlikely(dc->base.singlestep_enabled)) {
switch (is_jmp) {
case DISAS_TOO_MANY:
case DISAS_UPDATE_NEXT:
tcg_gen_movi_tl(env_pc, npc);
/* fall through */
case DISAS_JUMP:
case DISAS_UPDATE:
/* indicate that the hash table must be used
to find the next TB */
tcg_gen_exit_tb(NULL, 0);
break;
case DISAS_SWI:
case DISAS_TB_JUMP:
/* nothing more to generate */
t_gen_raise_exception(EXCP_DEBUG);
return;
default:
break;
}
g_assert_not_reached();
}
gen_tb_end(tb, num_insns);
tb->size = dc->pc - pc_start;
tb->icount = num_insns;
#ifdef DEBUG_DISAS
#if !DISAS_CRIS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
&& qemu_log_in_addr_range(pc_start)) {
FILE *logfile = qemu_log_lock();
qemu_log("--------------\n");
qemu_log("IN: %s\n", lookup_symbol(pc_start));
log_target_disas(cs, pc_start, dc->pc - pc_start);
qemu_log_unlock(logfile);
switch (is_jmp) {
case DISAS_TOO_MANY:
gen_goto_tb(dc, 0, npc);
break;
case DISAS_UPDATE_NEXT:
tcg_gen_movi_tl(env_pc, npc);
/* fall through */
case DISAS_JUMP:
tcg_gen_lookup_and_goto_ptr();
break;
case DISAS_UPDATE:
/* Indicate that interupts must be re-evaluated before the next TB. */
tcg_gen_exit_tb(NULL, 0);
break;
default:
g_assert_not_reached();
}
#endif
#endif
}
static void cris_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
{
if (!DISAS_CRIS) {
qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
}
}
static const TranslatorOps cris_tr_ops = {
.init_disas_context = cris_tr_init_disas_context,
.tb_start = cris_tr_tb_start,
.insn_start = cris_tr_insn_start,
.breakpoint_check = cris_tr_breakpoint_check,
.translate_insn = cris_tr_translate_insn,
.tb_stop = cris_tr_tb_stop,
.disas_log = cris_tr_disas_log,
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
{
DisasContext dc;
translator_loop(&cris_tr_ops, &dc.base, cs, tb, max_insns);
}
void cris_cpu_dump_state(CPUState *cs, FILE *f, int flags)
{
CRISCPU *cpu = CRIS_CPU(cs);
CPUCRISState *env = &cpu->env;
const char **regnames;
const char **pregnames;
const char * const *regnames;
const char * const *pregnames;
int i;
if (!env) {

View file

@ -21,7 +21,7 @@
#include "qemu/osdep.h"
#include "crisv10-decode.h"
static const char *regnames_v10[] =
static const char * const regnames_v10[] =
{
"$r0", "$r1", "$r2", "$r3",
"$r4", "$r5", "$r6", "$r7",
@ -29,7 +29,7 @@ static const char *regnames_v10[] =
"$r12", "$r13", "$sp", "$pc",
};
static const char *pregnames_v10[] =
static const char * const pregnames_v10[] =
{
"$bz", "$vr", "$p2", "$p3",
"$wz", "$ccr", "$p6-prefix", "$mof",
@ -38,7 +38,7 @@ static const char *pregnames_v10[] =
};
/* We need this table to handle preg-moves with implicit width. */
static int preg_sizes_v10[] = {
static const int preg_sizes_v10[] = {
1, /* bz. */
1, /* vr. */
1, /* pid. */
@ -61,6 +61,7 @@ static inline void cris_illegal_insn(DisasContext *dc)
{
qemu_log_mask(LOG_GUEST_ERROR, "illegal insn at pc=%x\n", dc->pc);
t_gen_raise_exception(EXCP_BREAK);
dc->base.is_jmp = DISAS_NORETURN;
}
static void gen_store_v10_conditional(DisasContext *dc, TCGv addr, TCGv val,
@ -105,9 +106,8 @@ static void gen_store_v10(DisasContext *dc, TCGv addr, TCGv val,
cris_store_direct_jmp(dc);
}
/* Conditional writes. We only support the kind were X is known
at translation time. */
if (dc->flagx_known && dc->flags_x) {
/* Conditional writes. */
if (dc->flags_x) {
gen_store_v10_conditional(dc, addr, val, size, mem_index);
return;
}
@ -375,7 +375,6 @@ static unsigned int dec10_setclrf(DisasContext *dc)
if (flags & X_FLAG) {
dc->flagx_known = 1;
if (set)
dc->flags_x = X_FLAG;
else
@ -1169,7 +1168,7 @@ static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc)
t_gen_mov_env_TN(trap_vector, c);
tcg_temp_free(c);
t_gen_raise_exception(EXCP_BREAK);
dc->is_jmp = DISAS_UPDATE;
dc->base.is_jmp = DISAS_NORETURN;
return insn_len;
}
LOG_DIS("%d: jump.%d %d r%d r%d\n", __LINE__, size,
@ -1277,7 +1276,7 @@ static unsigned int crisv10_decoder(CPUCRISState *env, DisasContext *dc)
if (dc->clear_prefix && dc->tb_flags & PFIX_FLAG) {
dc->tb_flags &= ~PFIX_FLAG;
tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~PFIX_FLAG);
if (dc->tb_flags != dc->tb->flags) {
if (dc->tb_flags != dc->base.tb->flags) {
dc->cpustate_changed = 1;
}
}

View file

@ -7195,17 +7195,11 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
reg = (b & 7) | REX_B(s);
#ifdef TARGET_X86_64
if (dflag == MO_64) {
gen_op_mov_v_reg(s, MO_64, s->T0, reg);
tcg_gen_bswap64_i64(s->T0, s->T0);
gen_op_mov_reg_v(s, MO_64, reg, s->T0);
} else
#endif
{
gen_op_mov_v_reg(s, MO_32, s->T0, reg);
tcg_gen_ext32u_tl(s->T0, s->T0);
tcg_gen_bswap32_tl(s->T0, s->T0);
gen_op_mov_reg_v(s, MO_32, reg, s->T0);
tcg_gen_bswap64_i64(cpu_regs[reg], cpu_regs[reg]);
break;
}
#endif
tcg_gen_bswap32_tl(cpu_regs[reg], cpu_regs[reg], TCG_BSWAP_OZ);
break;
case 0xd6: /* salc */
if (CODE64(s))

View file

@ -857,12 +857,8 @@ static void gen_mxu_s32ldd_s32lddr(DisasContext *ctx)
tcg_gen_ori_tl(t1, t1, 0xFFFFF000);
}
tcg_gen_add_tl(t1, t0, t1);
tcg_gen_qemu_ld_tl(t1, t1, ctx->mem_idx, MO_SL);
tcg_gen_qemu_ld_tl(t1, t1, ctx->mem_idx, MO_TESL ^ (sel * MO_BSWAP));
if (sel == 1) {
/* S32LDDR */
tcg_gen_bswap32_tl(t1, t1);
}
gen_store_mxu_gpr(t1, XRa);
tcg_temp_free(t0);

View file

@ -37,7 +37,6 @@
/* is_jmp field values */
#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
#define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
#define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
#define INSTRUCTION_FLG(func, flags) { (func), (flags) }
#define INSTRUCTION(func) \
@ -98,16 +97,14 @@
}
typedef struct DisasContext {
TCGv_ptr cpu_env;
TCGv *cpu_R;
DisasContextBase base;
TCGv_i32 zero;
int is_jmp;
target_ulong pc;
TranslationBlock *tb;
int mem_idx;
bool singlestep_enabled;
} DisasContext;
static TCGv cpu_R[NUM_CORE_REGS];
typedef struct Nios2Instruction {
void (*handler)(DisasContext *dc, uint32_t code, uint32_t flags);
uint32_t flags;
@ -136,7 +133,7 @@ static TCGv load_zero(DisasContext *dc)
static TCGv load_gpr(DisasContext *dc, uint8_t reg)
{
if (likely(reg != R_ZERO)) {
return dc->cpu_R[reg];
return cpu_R[reg];
} else {
return load_zero(dc);
}
@ -147,20 +144,20 @@ static void t_gen_helper_raise_exception(DisasContext *dc,
{
TCGv_i32 tmp = tcg_const_i32(index);
tcg_gen_movi_tl(dc->cpu_R[R_PC], dc->pc);
gen_helper_raise_exception(dc->cpu_env, tmp);
tcg_gen_movi_tl(cpu_R[R_PC], dc->pc);
gen_helper_raise_exception(cpu_env, tmp);
tcg_temp_free_i32(tmp);
dc->is_jmp = DISAS_NORETURN;
dc->base.is_jmp = DISAS_NORETURN;
}
static bool use_goto_tb(DisasContext *dc, uint32_t dest)
{
if (unlikely(dc->singlestep_enabled)) {
if (unlikely(dc->base.singlestep_enabled)) {
return false;
}
#ifndef CONFIG_USER_ONLY
return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
return (dc->base.pc_first & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
#else
return true;
#endif
@ -168,14 +165,14 @@ static bool use_goto_tb(DisasContext *dc, uint32_t dest)
static void gen_goto_tb(DisasContext *dc, int n, uint32_t dest)
{
TranslationBlock *tb = dc->tb;
const TranslationBlock *tb = dc->base.tb;
if (use_goto_tb(dc, dest)) {
tcg_gen_goto_tb(n);
tcg_gen_movi_tl(dc->cpu_R[R_PC], dest);
tcg_gen_movi_tl(cpu_R[R_PC], dest);
tcg_gen_exit_tb(tb, n);
} else {
tcg_gen_movi_tl(dc->cpu_R[R_PC], dest);
tcg_gen_movi_tl(cpu_R[R_PC], dest);
tcg_gen_exit_tb(NULL, 0);
}
}
@ -187,7 +184,7 @@ static void gen_excp(DisasContext *dc, uint32_t code, uint32_t flags)
static void gen_check_supervisor(DisasContext *dc)
{
if (dc->tb->flags & CR_STATUS_U) {
if (dc->base.tb->flags & CR_STATUS_U) {
/* CPU in user mode, privileged instruction called, stop. */
t_gen_helper_raise_exception(dc, EXCP_SUPERI);
}
@ -209,12 +206,12 @@ static void jmpi(DisasContext *dc, uint32_t code, uint32_t flags)
{
J_TYPE(instr, code);
gen_goto_tb(dc, 0, (dc->pc & 0xF0000000) | (instr.imm26 << 2));
dc->is_jmp = DISAS_TB_JUMP;
dc->base.is_jmp = DISAS_NORETURN;
}
static void call(DisasContext *dc, uint32_t code, uint32_t flags)
{
tcg_gen_movi_tl(dc->cpu_R[R_RA], dc->pc + 4);
tcg_gen_movi_tl(cpu_R[R_RA], dc->base.pc_next);
jmpi(dc, code, flags);
}
@ -236,7 +233,7 @@ static void gen_ldx(DisasContext *dc, uint32_t code, uint32_t flags)
* the Nios2 CPU.
*/
if (likely(instr.b != R_ZERO)) {
data = dc->cpu_R[instr.b];
data = cpu_R[instr.b];
} else {
data = tcg_temp_new();
}
@ -268,8 +265,8 @@ static void br(DisasContext *dc, uint32_t code, uint32_t flags)
{
I_TYPE(instr, code);
gen_goto_tb(dc, 0, dc->pc + 4 + (instr.imm16.s & -4));
dc->is_jmp = DISAS_TB_JUMP;
gen_goto_tb(dc, 0, dc->base.pc_next + (instr.imm16.s & -4));
dc->base.is_jmp = DISAS_NORETURN;
}
static void gen_bxx(DisasContext *dc, uint32_t code, uint32_t flags)
@ -277,11 +274,11 @@ static void gen_bxx(DisasContext *dc, uint32_t code, uint32_t flags)
I_TYPE(instr, code);
TCGLabel *l1 = gen_new_label();
tcg_gen_brcond_tl(flags, dc->cpu_R[instr.a], dc->cpu_R[instr.b], l1);
gen_goto_tb(dc, 0, dc->pc + 4);
tcg_gen_brcond_tl(flags, cpu_R[instr.a], cpu_R[instr.b], l1);
gen_goto_tb(dc, 0, dc->base.pc_next);
gen_set_label(l1);
gen_goto_tb(dc, 1, dc->pc + 4 + (instr.imm16.s & -4));
dc->is_jmp = DISAS_TB_JUMP;
gen_goto_tb(dc, 1, dc->base.pc_next + (instr.imm16.s & -4));
dc->base.is_jmp = DISAS_NORETURN;
}
/* Comparison instructions */
@ -289,8 +286,7 @@ static void gen_bxx(DisasContext *dc, uint32_t code, uint32_t flags)
static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \
{ \
I_TYPE(instr, (code)); \
tcg_gen_setcondi_tl(flags, (dc)->cpu_R[instr.b], (dc)->cpu_R[instr.a], \
(op3)); \
tcg_gen_setcondi_tl(flags, cpu_R[instr.b], cpu_R[instr.a], (op3)); \
}
gen_i_cmpxx(gen_cmpxxsi, instr.imm16.s)
@ -304,10 +300,9 @@ static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \
if (unlikely(instr.b == R_ZERO)) { /* Store to R_ZERO is ignored */ \
return; \
} else if (instr.a == R_ZERO) { /* MOVxI optimizations */ \
tcg_gen_movi_tl(dc->cpu_R[instr.b], (resimm) ? (op3) : 0); \
tcg_gen_movi_tl(cpu_R[instr.b], (resimm) ? (op3) : 0); \
} else { \
tcg_gen_##insn##_tl((dc)->cpu_R[instr.b], (dc)->cpu_R[instr.a], \
(op3)); \
tcg_gen_##insn##_tl(cpu_R[instr.b], cpu_R[instr.a], (op3)); \
} \
}
@ -402,26 +397,26 @@ static const Nios2Instruction i_type_instructions[] = {
*/
static void eret(DisasContext *dc, uint32_t code, uint32_t flags)
{
tcg_gen_mov_tl(dc->cpu_R[CR_STATUS], dc->cpu_R[CR_ESTATUS]);
tcg_gen_mov_tl(dc->cpu_R[R_PC], dc->cpu_R[R_EA]);
tcg_gen_mov_tl(cpu_R[CR_STATUS], cpu_R[CR_ESTATUS]);
tcg_gen_mov_tl(cpu_R[R_PC], cpu_R[R_EA]);
dc->is_jmp = DISAS_JUMP;
dc->base.is_jmp = DISAS_JUMP;
}
/* PC <- ra */
static void ret(DisasContext *dc, uint32_t code, uint32_t flags)
{
tcg_gen_mov_tl(dc->cpu_R[R_PC], dc->cpu_R[R_RA]);
tcg_gen_mov_tl(cpu_R[R_PC], cpu_R[R_RA]);
dc->is_jmp = DISAS_JUMP;
dc->base.is_jmp = DISAS_JUMP;
}
/* PC <- ba */
static void bret(DisasContext *dc, uint32_t code, uint32_t flags)
{
tcg_gen_mov_tl(dc->cpu_R[R_PC], dc->cpu_R[R_BA]);
tcg_gen_mov_tl(cpu_R[R_PC], cpu_R[R_BA]);
dc->is_jmp = DISAS_JUMP;
dc->base.is_jmp = DISAS_JUMP;
}
/* PC <- rA */
@ -429,9 +424,9 @@ static void jmp(DisasContext *dc, uint32_t code, uint32_t flags)
{
R_TYPE(instr, code);
tcg_gen_mov_tl(dc->cpu_R[R_PC], load_gpr(dc, instr.a));
tcg_gen_mov_tl(cpu_R[R_PC], load_gpr(dc, instr.a));
dc->is_jmp = DISAS_JUMP;
dc->base.is_jmp = DISAS_JUMP;
}
/* rC <- PC + 4 */
@ -440,7 +435,7 @@ static void nextpc(DisasContext *dc, uint32_t code, uint32_t flags)
R_TYPE(instr, code);
if (likely(instr.c != R_ZERO)) {
tcg_gen_movi_tl(dc->cpu_R[instr.c], dc->pc + 4);
tcg_gen_movi_tl(cpu_R[instr.c], dc->base.pc_next);
}
}
@ -452,10 +447,10 @@ static void callr(DisasContext *dc, uint32_t code, uint32_t flags)
{
R_TYPE(instr, code);
tcg_gen_mov_tl(dc->cpu_R[R_PC], load_gpr(dc, instr.a));
tcg_gen_movi_tl(dc->cpu_R[R_RA], dc->pc + 4);
tcg_gen_mov_tl(cpu_R[R_PC], load_gpr(dc, instr.a));
tcg_gen_movi_tl(cpu_R[R_RA], dc->base.pc_next);
dc->is_jmp = DISAS_JUMP;
dc->base.is_jmp = DISAS_JUMP;
}
/* rC <- ctlN */
@ -472,10 +467,10 @@ static void rdctl(DisasContext *dc, uint32_t code, uint32_t flags)
{
#if !defined(CONFIG_USER_ONLY)
if (likely(instr.c != R_ZERO)) {
tcg_gen_mov_tl(dc->cpu_R[instr.c], dc->cpu_R[instr.imm5 + CR_BASE]);
tcg_gen_mov_tl(cpu_R[instr.c], cpu_R[instr.imm5 + CR_BASE]);
#ifdef DEBUG_MMU
TCGv_i32 tmp = tcg_const_i32(instr.imm5 + CR_BASE);
gen_helper_mmu_read_debug(dc->cpu_R[instr.c], dc->cpu_env, tmp);
gen_helper_mmu_read_debug(cpu_R[instr.c], cpu_env, tmp);
tcg_temp_free_i32(tmp);
#endif
}
@ -485,7 +480,7 @@ static void rdctl(DisasContext *dc, uint32_t code, uint32_t flags)
default:
if (likely(instr.c != R_ZERO)) {
tcg_gen_mov_tl(dc->cpu_R[instr.c], dc->cpu_R[instr.imm5 + CR_BASE]);
tcg_gen_mov_tl(cpu_R[instr.c], cpu_R[instr.imm5 + CR_BASE]);
}
break;
}
@ -505,25 +500,25 @@ static void wrctl(DisasContext *dc, uint32_t code, uint32_t flags)
{
#if !defined(CONFIG_USER_ONLY)
TCGv_i32 tmp = tcg_const_i32(instr.imm5 + CR_BASE);
gen_helper_mmu_write(dc->cpu_env, tmp, load_gpr(dc, instr.a));
gen_helper_mmu_write(cpu_env, tmp, load_gpr(dc, instr.a));
tcg_temp_free_i32(tmp);
#endif
break;
}
default:
tcg_gen_mov_tl(dc->cpu_R[instr.imm5 + CR_BASE], load_gpr(dc, instr.a));
tcg_gen_mov_tl(cpu_R[instr.imm5 + CR_BASE], load_gpr(dc, instr.a));
break;
}
/* If interrupts were enabled using WRCTL, trigger them. */
#if !defined(CONFIG_USER_ONLY)
if ((instr.imm5 + CR_BASE) == CR_STATUS) {
if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_check_interrupts(dc->cpu_env);
dc->is_jmp = DISAS_UPDATE;
gen_helper_check_interrupts(cpu_env);
dc->base.is_jmp = DISAS_UPDATE;
}
#endif
}
@ -533,8 +528,8 @@ static void gen_cmpxx(DisasContext *dc, uint32_t code, uint32_t flags)
{
R_TYPE(instr, code);
if (likely(instr.c != R_ZERO)) {
tcg_gen_setcond_tl(flags, dc->cpu_R[instr.c], dc->cpu_R[instr.a],
dc->cpu_R[instr.b]);
tcg_gen_setcond_tl(flags, cpu_R[instr.c], cpu_R[instr.a],
cpu_R[instr.b]);
}
}
@ -544,8 +539,7 @@ static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \
{ \
R_TYPE(instr, (code)); \
if (likely(instr.c != R_ZERO)) { \
tcg_gen_##insn((dc)->cpu_R[instr.c], load_gpr((dc), instr.a), \
(op3)); \
tcg_gen_##insn(cpu_R[instr.c], load_gpr((dc), instr.a), (op3)); \
} \
}
@ -569,8 +563,8 @@ static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \
R_TYPE(instr, (code)); \
if (likely(instr.c != R_ZERO)) { \
TCGv t0 = tcg_temp_new(); \
tcg_gen_##insn(t0, dc->cpu_R[instr.c], \
load_gpr(dc, instr.a), load_gpr(dc, instr.b)); \
tcg_gen_##insn(t0, cpu_R[instr.c], \
load_gpr(dc, instr.a), load_gpr(dc, instr.b)); \
tcg_temp_free(t0); \
} \
}
@ -586,7 +580,7 @@ static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \
if (likely(instr.c != R_ZERO)) { \
TCGv t0 = tcg_temp_new(); \
tcg_gen_andi_tl(t0, load_gpr((dc), instr.b), 31); \
tcg_gen_##insn((dc)->cpu_R[instr.c], load_gpr((dc), instr.a), t0); \
tcg_gen_##insn(cpu_R[instr.c], load_gpr((dc), instr.a), t0); \
tcg_temp_free(t0); \
} \
}
@ -620,8 +614,8 @@ static void divs(DisasContext *dc, uint32_t code, uint32_t flags)
tcg_gen_or_tl(t2, t2, t3);
tcg_gen_movi_tl(t3, 0);
tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
tcg_gen_div_tl(dc->cpu_R[instr.c], t0, t1);
tcg_gen_ext32s_tl(dc->cpu_R[instr.c], dc->cpu_R[instr.c]);
tcg_gen_div_tl(cpu_R[instr.c], t0, t1);
tcg_gen_ext32s_tl(cpu_R[instr.c], cpu_R[instr.c]);
tcg_temp_free(t3);
tcg_temp_free(t2);
@ -646,8 +640,8 @@ static void divu(DisasContext *dc, uint32_t code, uint32_t flags)
tcg_gen_ext32u_tl(t0, load_gpr(dc, instr.a));
tcg_gen_ext32u_tl(t1, load_gpr(dc, instr.b));
tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
tcg_gen_divu_tl(dc->cpu_R[instr.c], t0, t1);
tcg_gen_ext32s_tl(dc->cpu_R[instr.c], dc->cpu_R[instr.c]);
tcg_gen_divu_tl(cpu_R[instr.c], t0, t1);
tcg_gen_ext32s_tl(cpu_R[instr.c], cpu_R[instr.c]);
tcg_temp_free(t3);
tcg_temp_free(t2);
@ -741,41 +735,6 @@ illegal_op:
t_gen_helper_raise_exception(dc, EXCP_ILLEGAL);
}
static void handle_instruction(DisasContext *dc, CPUNios2State *env)
{
uint32_t code;
uint8_t op;
const Nios2Instruction *instr;
#if defined(CONFIG_USER_ONLY)
/* FIXME: Is this needed ? */
if (dc->pc >= 0x1000 && dc->pc < 0x2000) {
env->regs[R_PC] = dc->pc;
t_gen_helper_raise_exception(dc, 0xaa);
return;
}
#endif
code = cpu_ldl_code(env, dc->pc);
op = get_opcode(code);
if (unlikely(op >= ARRAY_SIZE(i_type_instructions))) {
goto illegal_op;
}
dc->zero = NULL;
instr = &i_type_instructions[op];
instr->handler(dc, code, instr->flags);
if (dc->zero) {
tcg_temp_free(dc->zero);
}
return;
illegal_op:
t_gen_helper_raise_exception(dc, EXCP_ILLEGAL);
}
static const char * const regnames[] = {
"zero", "at", "r2", "r3",
"r4", "r5", "r6", "r7",
@ -796,8 +755,6 @@ static const char * const regnames[] = {
"rpc"
};
static TCGv cpu_R[NUM_CORE_REGS];
#include "exec/gen-icount.h"
static void gen_exception(DisasContext *dc, uint32_t excp)
@ -807,104 +764,135 @@ static void gen_exception(DisasContext *dc, uint32_t excp)
tcg_gen_movi_tl(cpu_R[R_PC], dc->pc);
gen_helper_raise_exception(cpu_env, tmp);
tcg_temp_free_i32(tmp);
dc->is_jmp = DISAS_NORETURN;
dc->base.is_jmp = DISAS_NORETURN;
}
/* generate intermediate code for basic block 'tb'. */
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
static void nios2_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
CPUNios2State *env = cs->env_ptr;
DisasContext dc1, *dc = &dc1;
int num_insns;
int page_insns;
/* Initialize DC */
dc->cpu_env = cpu_env;
dc->cpu_R = cpu_R;
dc->is_jmp = DISAS_NEXT;
dc->pc = tb->pc;
dc->tb = tb;
dc->mem_idx = cpu_mmu_index(env, false);
dc->singlestep_enabled = cs->singlestep_enabled;
/* Set up instruction counts */
num_insns = 0;
if (max_insns > 1) {
int page_insns = (TARGET_PAGE_SIZE - (tb->pc & ~TARGET_PAGE_MASK)) / 4;
if (max_insns > page_insns) {
max_insns = page_insns;
}
/* Bound the number of insns to execute to those left on the page. */
page_insns = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
dc->base.max_insns = MIN(page_insns, dc->base.max_insns);
}
static void nios2_tr_tb_start(DisasContextBase *db, CPUState *cs)
{
}
static void nios2_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
{
tcg_gen_insn_start(dcbase->pc_next);
}
static bool nios2_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
const CPUBreakpoint *bp)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
gen_exception(dc, EXCP_DEBUG);
/*
* The address covered by the breakpoint must be included in
* [tb->pc, tb->pc + tb->size) in order to for it to be
* properly cleared -- thus we increment the PC here so that
* the logic setting tb->size below does the right thing.
*/
dc->base.pc_next += 4;
return true;
}
static void nios2_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
CPUNios2State *env = cs->env_ptr;
const Nios2Instruction *instr;
uint32_t code, pc;
uint8_t op;
pc = dc->base.pc_next;
dc->pc = pc;
dc->base.pc_next = pc + 4;
/* Decode an instruction */
#if defined(CONFIG_USER_ONLY)
/* FIXME: Is this needed ? */
if (pc >= 0x1000 && pc < 0x2000) {
t_gen_helper_raise_exception(dc, 0xaa);
return;
}
#endif
code = cpu_ldl_code(env, pc);
op = get_opcode(code);
if (unlikely(op >= ARRAY_SIZE(i_type_instructions))) {
t_gen_helper_raise_exception(dc, EXCP_ILLEGAL);
return;
}
gen_tb_start(tb);
do {
tcg_gen_insn_start(dc->pc);
num_insns++;
dc->zero = NULL;
if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
gen_exception(dc, EXCP_DEBUG);
/* The address covered by the breakpoint must be included in
[tb->pc, tb->pc + tb->size) in order to for it to be
properly cleared -- thus we increment the PC here so that
the logic setting tb->size below does the right thing. */
dc->pc += 4;
break;
}
instr = &i_type_instructions[op];
instr->handler(dc, code, instr->flags);
if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start();
}
if (dc->zero) {
tcg_temp_free(dc->zero);
}
}
/* Decode an instruction */
handle_instruction(dc, env);
dc->pc += 4;
/* Translation stops when a conditional branch is encountered.
* Otherwise the subsequent code could get translated several times.
* Also stop translation when a page boundary is reached. This
* ensures prefetch aborts occur at the right place. */
} while (!dc->is_jmp &&
!tcg_op_buf_full() &&
num_insns < max_insns);
static void nios2_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
/* Indicate where the next block should start */
switch (dc->is_jmp) {
case DISAS_NEXT:
switch (dc->base.is_jmp) {
case DISAS_TOO_MANY:
case DISAS_UPDATE:
/* Save the current PC back into the CPU register */
tcg_gen_movi_tl(cpu_R[R_PC], dc->pc);
tcg_gen_movi_tl(cpu_R[R_PC], dc->base.pc_next);
tcg_gen_exit_tb(NULL, 0);
break;
default:
case DISAS_JUMP:
/* The jump will already have updated the PC register */
tcg_gen_exit_tb(NULL, 0);
break;
case DISAS_NORETURN:
case DISAS_TB_JUMP:
/* nothing more to generate */
break;
default:
g_assert_not_reached();
}
}
/* End off the block */
gen_tb_end(tb, num_insns);
static void nios2_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
{
qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
}
/* Mark instruction starts for the final generated instruction */
tb->size = dc->pc - tb->pc;
tb->icount = num_insns;
static const TranslatorOps nios2_tr_ops = {
.init_disas_context = nios2_tr_init_disas_context,
.tb_start = nios2_tr_tb_start,
.insn_start = nios2_tr_insn_start,
.breakpoint_check = nios2_tr_breakpoint_check,
.translate_insn = nios2_tr_translate_insn,
.tb_stop = nios2_tr_tb_stop,
.disas_log = nios2_tr_disas_log,
};
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
&& qemu_log_in_addr_range(tb->pc)) {
FILE *logfile = qemu_log_lock();
qemu_log("IN: %s\n", lookup_symbol(tb->pc));
log_target_disas(cs, tb->pc, dc->pc - tb->pc);
qemu_log("\n");
qemu_log_unlock(logfile);
}
#endif
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
{
DisasContext dc;
translator_loop(&nios2_tr_ops, &dc.base, cs, tb, max_insns);
}
void nios2_cpu_dump_state(CPUState *cs, FILE *f, int flags)

View file

@ -3939,13 +3939,13 @@ static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
{
tcg_gen_bswap16_i64(o->out, o->in2);
tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
return DISAS_NEXT;
}
static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
{
tcg_gen_bswap32_i64(o->out, o->in2);
tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
return DISAS_NEXT;
}

View file

@ -676,8 +676,7 @@ static void _decode_opc(DisasContext * ctx)
case 0x6008: /* swap.b Rm,Rn */
{
TCGv low = tcg_temp_new();
tcg_gen_ext16u_i32(low, REG(B7_4));
tcg_gen_bswap16_i32(low, low);
tcg_gen_bswap16_i32(low, REG(B7_4), 0);
tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
tcg_temp_free(low);
}