mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-15 14:13:31 -06:00
tcg: Merge INDEX_op_ld*_{i32,i64}
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
0de5c9d1f5
commit
e996804d40
6 changed files with 83 additions and 122 deletions
|
@ -57,6 +57,13 @@ DEF(divu2, 2, 3, 0, TCG_OPF_INT)
|
|||
DEF(eqv, 1, 2, 0, TCG_OPF_INT)
|
||||
DEF(extract, 1, 1, 2, TCG_OPF_INT)
|
||||
DEF(extract2, 1, 2, 1, TCG_OPF_INT)
|
||||
DEF(ld8u, 1, 1, 1, TCG_OPF_INT)
|
||||
DEF(ld8s, 1, 1, 1, TCG_OPF_INT)
|
||||
DEF(ld16u, 1, 1, 1, TCG_OPF_INT)
|
||||
DEF(ld16s, 1, 1, 1, TCG_OPF_INT)
|
||||
DEF(ld32u, 1, 1, 1, TCG_OPF_INT)
|
||||
DEF(ld32s, 1, 1, 1, TCG_OPF_INT)
|
||||
DEF(ld, 1, 1, 1, TCG_OPF_INT)
|
||||
DEF(movcond, 1, 4, 1, TCG_OPF_INT)
|
||||
DEF(mul, 1, 2, 0, TCG_OPF_INT)
|
||||
DEF(muls2, 2, 2, 0, TCG_OPF_INT)
|
||||
|
@ -93,11 +100,6 @@ DEF(subbi, 1, 2, 0, TCG_OPF_INT | TCG_OPF_CARRY_IN)
|
|||
DEF(subbio, 1, 2, 0, TCG_OPF_INT | TCG_OPF_CARRY_IN | TCG_OPF_CARRY_OUT)
|
||||
|
||||
/* load/store */
|
||||
DEF(ld8u_i32, 1, 1, 1, 0)
|
||||
DEF(ld8s_i32, 1, 1, 1, 0)
|
||||
DEF(ld16u_i32, 1, 1, 1, 0)
|
||||
DEF(ld16s_i32, 1, 1, 1, 0)
|
||||
DEF(ld_i32, 1, 1, 1, 0)
|
||||
DEF(st8_i32, 0, 2, 1, 0)
|
||||
DEF(st16_i32, 0, 2, 1, 0)
|
||||
DEF(st_i32, 0, 2, 1, 0)
|
||||
|
@ -106,13 +108,6 @@ DEF(brcond2_i32, 0, 4, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
|
|||
DEF(setcond2_i32, 1, 4, 1, 0)
|
||||
|
||||
/* load/store */
|
||||
DEF(ld8u_i64, 1, 1, 1, 0)
|
||||
DEF(ld8s_i64, 1, 1, 1, 0)
|
||||
DEF(ld16u_i64, 1, 1, 1, 0)
|
||||
DEF(ld16s_i64, 1, 1, 1, 0)
|
||||
DEF(ld32u_i64, 1, 1, 1, 0)
|
||||
DEF(ld32s_i64, 1, 1, 1, 0)
|
||||
DEF(ld_i64, 1, 1, 1, 0)
|
||||
DEF(st8_i64, 0, 2, 1, 0)
|
||||
DEF(st16_i64, 0, 2, 1, 0)
|
||||
DEF(st32_i64, 0, 2, 1, 0)
|
||||
|
|
|
@ -2880,22 +2880,22 @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
|
|||
|
||||
/* We can't do any folding with a load, but we can record bits. */
|
||||
switch (op->opc) {
|
||||
CASE_OP_32_64(ld8s):
|
||||
case INDEX_op_ld8s:
|
||||
s_mask = INT8_MIN;
|
||||
break;
|
||||
CASE_OP_32_64(ld8u):
|
||||
case INDEX_op_ld8u:
|
||||
z_mask = MAKE_64BIT_MASK(0, 8);
|
||||
break;
|
||||
CASE_OP_32_64(ld16s):
|
||||
case INDEX_op_ld16s:
|
||||
s_mask = INT16_MIN;
|
||||
break;
|
||||
CASE_OP_32_64(ld16u):
|
||||
case INDEX_op_ld16u:
|
||||
z_mask = MAKE_64BIT_MASK(0, 16);
|
||||
break;
|
||||
case INDEX_op_ld32s_i64:
|
||||
case INDEX_op_ld32s:
|
||||
s_mask = INT32_MIN;
|
||||
break;
|
||||
case INDEX_op_ld32u_i64:
|
||||
case INDEX_op_ld32u:
|
||||
z_mask = MAKE_64BIT_MASK(0, 32);
|
||||
break;
|
||||
default:
|
||||
|
@ -3126,16 +3126,15 @@ void tcg_optimize(TCGContext *s)
|
|||
case INDEX_op_extrh_i64_i32:
|
||||
done = fold_extu(&ctx, op);
|
||||
break;
|
||||
CASE_OP_32_64(ld8s):
|
||||
CASE_OP_32_64(ld8u):
|
||||
CASE_OP_32_64(ld16s):
|
||||
CASE_OP_32_64(ld16u):
|
||||
case INDEX_op_ld32s_i64:
|
||||
case INDEX_op_ld32u_i64:
|
||||
case INDEX_op_ld8s:
|
||||
case INDEX_op_ld8u:
|
||||
case INDEX_op_ld16s:
|
||||
case INDEX_op_ld16u:
|
||||
case INDEX_op_ld32s:
|
||||
case INDEX_op_ld32u:
|
||||
done = fold_tcg_ld(&ctx, op);
|
||||
break;
|
||||
case INDEX_op_ld_i32:
|
||||
case INDEX_op_ld_i64:
|
||||
case INDEX_op_ld:
|
||||
case INDEX_op_ld_vec:
|
||||
done = fold_tcg_ld_memcopy(&ctx, op);
|
||||
break;
|
||||
|
|
24
tcg/tcg-op.c
24
tcg/tcg-op.c
|
@ -1379,27 +1379,27 @@ void tcg_gen_abs_i32(TCGv_i32 ret, TCGv_i32 a)
|
|||
|
||||
void tcg_gen_ld8u_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset)
|
||||
{
|
||||
tcg_gen_ldst_op_i32(INDEX_op_ld8u_i32, ret, arg2, offset);
|
||||
tcg_gen_ldst_op_i32(INDEX_op_ld8u, ret, arg2, offset);
|
||||
}
|
||||
|
||||
void tcg_gen_ld8s_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset)
|
||||
{
|
||||
tcg_gen_ldst_op_i32(INDEX_op_ld8s_i32, ret, arg2, offset);
|
||||
tcg_gen_ldst_op_i32(INDEX_op_ld8s, ret, arg2, offset);
|
||||
}
|
||||
|
||||
void tcg_gen_ld16u_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset)
|
||||
{
|
||||
tcg_gen_ldst_op_i32(INDEX_op_ld16u_i32, ret, arg2, offset);
|
||||
tcg_gen_ldst_op_i32(INDEX_op_ld16u, ret, arg2, offset);
|
||||
}
|
||||
|
||||
void tcg_gen_ld16s_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset)
|
||||
{
|
||||
tcg_gen_ldst_op_i32(INDEX_op_ld16s_i32, ret, arg2, offset);
|
||||
tcg_gen_ldst_op_i32(INDEX_op_ld16s, ret, arg2, offset);
|
||||
}
|
||||
|
||||
void tcg_gen_ld_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset)
|
||||
{
|
||||
tcg_gen_ldst_op_i32(INDEX_op_ld_i32, ret, arg2, offset);
|
||||
tcg_gen_ldst_op_i32(INDEX_op_ld, ret, arg2, offset);
|
||||
}
|
||||
|
||||
void tcg_gen_st8_i32(TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset)
|
||||
|
@ -1463,7 +1463,7 @@ void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg)
|
|||
void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
|
||||
{
|
||||
if (TCG_TARGET_REG_BITS == 64) {
|
||||
tcg_gen_ldst_op_i64(INDEX_op_ld8u_i64, ret, arg2, offset);
|
||||
tcg_gen_ldst_op_i64(INDEX_op_ld8u, ret, arg2, offset);
|
||||
} else {
|
||||
tcg_gen_ld8u_i32(TCGV_LOW(ret), arg2, offset);
|
||||
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
|
||||
|
@ -1473,7 +1473,7 @@ void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
|
|||
void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
|
||||
{
|
||||
if (TCG_TARGET_REG_BITS == 64) {
|
||||
tcg_gen_ldst_op_i64(INDEX_op_ld8s_i64, ret, arg2, offset);
|
||||
tcg_gen_ldst_op_i64(INDEX_op_ld8s, ret, arg2, offset);
|
||||
} else {
|
||||
tcg_gen_ld8s_i32(TCGV_LOW(ret), arg2, offset);
|
||||
tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
|
||||
|
@ -1483,7 +1483,7 @@ void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
|
|||
void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
|
||||
{
|
||||
if (TCG_TARGET_REG_BITS == 64) {
|
||||
tcg_gen_ldst_op_i64(INDEX_op_ld16u_i64, ret, arg2, offset);
|
||||
tcg_gen_ldst_op_i64(INDEX_op_ld16u, ret, arg2, offset);
|
||||
} else {
|
||||
tcg_gen_ld16u_i32(TCGV_LOW(ret), arg2, offset);
|
||||
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
|
||||
|
@ -1493,7 +1493,7 @@ void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
|
|||
void tcg_gen_ld16s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
|
||||
{
|
||||
if (TCG_TARGET_REG_BITS == 64) {
|
||||
tcg_gen_ldst_op_i64(INDEX_op_ld16s_i64, ret, arg2, offset);
|
||||
tcg_gen_ldst_op_i64(INDEX_op_ld16s, ret, arg2, offset);
|
||||
} else {
|
||||
tcg_gen_ld16s_i32(TCGV_LOW(ret), arg2, offset);
|
||||
tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
|
||||
|
@ -1503,7 +1503,7 @@ void tcg_gen_ld16s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
|
|||
void tcg_gen_ld32u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
|
||||
{
|
||||
if (TCG_TARGET_REG_BITS == 64) {
|
||||
tcg_gen_ldst_op_i64(INDEX_op_ld32u_i64, ret, arg2, offset);
|
||||
tcg_gen_ldst_op_i64(INDEX_op_ld32u, ret, arg2, offset);
|
||||
} else {
|
||||
tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset);
|
||||
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
|
||||
|
@ -1513,7 +1513,7 @@ void tcg_gen_ld32u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
|
|||
void tcg_gen_ld32s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
|
||||
{
|
||||
if (TCG_TARGET_REG_BITS == 64) {
|
||||
tcg_gen_ldst_op_i64(INDEX_op_ld32s_i64, ret, arg2, offset);
|
||||
tcg_gen_ldst_op_i64(INDEX_op_ld32s, ret, arg2, offset);
|
||||
} else {
|
||||
tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset);
|
||||
tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
|
||||
|
@ -1527,7 +1527,7 @@ void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
|
|||
* they cannot be the same temporary -- no chance of overlap.
|
||||
*/
|
||||
if (TCG_TARGET_REG_BITS == 64) {
|
||||
tcg_gen_ldst_op_i64(INDEX_op_ld_i64, ret, arg2, offset);
|
||||
tcg_gen_ldst_op_i64(INDEX_op_ld, ret, arg2, offset);
|
||||
} else if (HOST_BIG_ENDIAN) {
|
||||
tcg_gen_ld_i32(TCGV_HIGH(ret), arg2, offset);
|
||||
tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset + 4);
|
||||
|
|
64
tcg/tcg.c
64
tcg/tcg.c
|
@ -1184,16 +1184,11 @@ static const TCGOutOp * const all_outop[NB_OPS] = {
|
|||
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
|
||||
OUTOP(INDEX_op_extract, TCGOutOpExtract, outop_extract),
|
||||
OUTOP(INDEX_op_extract2, TCGOutOpExtract2, outop_extract2),
|
||||
OUTOP(INDEX_op_ld8u_i32, TCGOutOpLoad, outop_ld8u),
|
||||
OUTOP(INDEX_op_ld8u_i64, TCGOutOpLoad, outop_ld8u),
|
||||
OUTOP(INDEX_op_ld8s_i32, TCGOutOpLoad, outop_ld8s),
|
||||
OUTOP(INDEX_op_ld8s_i64, TCGOutOpLoad, outop_ld8s),
|
||||
OUTOP(INDEX_op_ld16u_i32, TCGOutOpLoad, outop_ld16u),
|
||||
OUTOP(INDEX_op_ld16u_i64, TCGOutOpLoad, outop_ld16u),
|
||||
OUTOP(INDEX_op_ld16s_i32, TCGOutOpLoad, outop_ld16s),
|
||||
OUTOP(INDEX_op_ld16s_i64, TCGOutOpLoad, outop_ld16s),
|
||||
OUTOP(INDEX_op_ld_i32, TCGOutOpLoad, outop_ld),
|
||||
OUTOP(INDEX_op_ld_i64, TCGOutOpLoad, outop_ld),
|
||||
OUTOP(INDEX_op_ld8u, TCGOutOpLoad, outop_ld8u),
|
||||
OUTOP(INDEX_op_ld8s, TCGOutOpLoad, outop_ld8s),
|
||||
OUTOP(INDEX_op_ld16u, TCGOutOpLoad, outop_ld16u),
|
||||
OUTOP(INDEX_op_ld16s, TCGOutOpLoad, outop_ld16s),
|
||||
OUTOP(INDEX_op_ld, TCGOutOpLoad, outop_ld),
|
||||
OUTOP(INDEX_op_movcond, TCGOutOpMovcond, outop_movcond),
|
||||
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
|
||||
OUTOP(INDEX_op_muls2, TCGOutOpMul2, outop_muls2),
|
||||
|
@ -1235,8 +1230,8 @@ static const TCGOutOp * const all_outop[NB_OPS] = {
|
|||
OUTOP(INDEX_op_extu_i32_i64, TCGOutOpUnary, outop_extu_i32_i64),
|
||||
OUTOP(INDEX_op_extrl_i64_i32, TCGOutOpUnary, outop_extrl_i64_i32),
|
||||
OUTOP(INDEX_op_extrh_i64_i32, TCGOutOpUnary, outop_extrh_i64_i32),
|
||||
OUTOP(INDEX_op_ld32u_i64, TCGOutOpLoad, outop_ld32u),
|
||||
OUTOP(INDEX_op_ld32s_i64, TCGOutOpLoad, outop_ld32s),
|
||||
OUTOP(INDEX_op_ld32u, TCGOutOpLoad, outop_ld32u),
|
||||
OUTOP(INDEX_op_ld32s, TCGOutOpLoad, outop_ld32s),
|
||||
#endif
|
||||
};
|
||||
|
||||
|
@ -2443,6 +2438,11 @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
|
|||
case INDEX_op_brcond:
|
||||
case INDEX_op_deposit:
|
||||
case INDEX_op_extract:
|
||||
case INDEX_op_ld8u:
|
||||
case INDEX_op_ld8s:
|
||||
case INDEX_op_ld16u:
|
||||
case INDEX_op_ld16s:
|
||||
case INDEX_op_ld:
|
||||
case INDEX_op_mov:
|
||||
case INDEX_op_movcond:
|
||||
case INDEX_op_negsetcond:
|
||||
|
@ -2452,11 +2452,6 @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
|
|||
case INDEX_op_xor:
|
||||
return has_type;
|
||||
|
||||
case INDEX_op_ld8u_i32:
|
||||
case INDEX_op_ld8s_i32:
|
||||
case INDEX_op_ld16u_i32:
|
||||
case INDEX_op_ld16s_i32:
|
||||
case INDEX_op_ld_i32:
|
||||
case INDEX_op_st8_i32:
|
||||
case INDEX_op_st16_i32:
|
||||
case INDEX_op_st_i32:
|
||||
|
@ -2466,13 +2461,8 @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
|
|||
case INDEX_op_setcond2_i32:
|
||||
return TCG_TARGET_REG_BITS == 32;
|
||||
|
||||
case INDEX_op_ld8u_i64:
|
||||
case INDEX_op_ld8s_i64:
|
||||
case INDEX_op_ld16u_i64:
|
||||
case INDEX_op_ld16s_i64:
|
||||
case INDEX_op_ld32u_i64:
|
||||
case INDEX_op_ld32s_i64:
|
||||
case INDEX_op_ld_i64:
|
||||
case INDEX_op_ld32u:
|
||||
case INDEX_op_ld32s:
|
||||
case INDEX_op_st8_i64:
|
||||
case INDEX_op_st16_i64:
|
||||
case INDEX_op_st32_i64:
|
||||
|
@ -4428,10 +4418,7 @@ liveness_pass_2(TCGContext *s)
|
|||
arg_ts = arg_temp(op->args[i]);
|
||||
dir_ts = arg_ts->state_ptr;
|
||||
if (dir_ts && arg_ts->state == TS_DEAD) {
|
||||
TCGOpcode lopc = (arg_ts->type == TCG_TYPE_I32
|
||||
? INDEX_op_ld_i32
|
||||
: INDEX_op_ld_i64);
|
||||
TCGOp *lop = tcg_op_insert_before(s, op, lopc,
|
||||
TCGOp *lop = tcg_op_insert_before(s, op, INDEX_op_ld,
|
||||
arg_ts->type, 3);
|
||||
|
||||
lop->args[0] = temp_arg(dir_ts);
|
||||
|
@ -5763,20 +5750,13 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
|
|||
}
|
||||
break;
|
||||
|
||||
case INDEX_op_ld32u_i64:
|
||||
case INDEX_op_ld32s_i64:
|
||||
tcg_debug_assert(type == TCG_TYPE_I64);
|
||||
/* fall through */
|
||||
case INDEX_op_ld8u_i32:
|
||||
case INDEX_op_ld8u_i64:
|
||||
case INDEX_op_ld8s_i32:
|
||||
case INDEX_op_ld8s_i64:
|
||||
case INDEX_op_ld16u_i32:
|
||||
case INDEX_op_ld16u_i64:
|
||||
case INDEX_op_ld16s_i32:
|
||||
case INDEX_op_ld16s_i64:
|
||||
case INDEX_op_ld_i32:
|
||||
case INDEX_op_ld_i64:
|
||||
case INDEX_op_ld8u:
|
||||
case INDEX_op_ld8s:
|
||||
case INDEX_op_ld16u:
|
||||
case INDEX_op_ld16s:
|
||||
case INDEX_op_ld32u:
|
||||
case INDEX_op_ld32s:
|
||||
case INDEX_op_ld:
|
||||
{
|
||||
const TCGOutOpLoad *out =
|
||||
container_of(all_outop[op->opc], TCGOutOpLoad, base);
|
||||
|
|
43
tcg/tci.c
43
tcg/tci.c
|
@ -466,31 +466,30 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
|
|||
|
||||
/* Load/store operations (32 bit). */
|
||||
|
||||
CASE_32_64(ld8u)
|
||||
case INDEX_op_ld8u:
|
||||
tci_args_rrs(insn, &r0, &r1, &ofs);
|
||||
ptr = (void *)(regs[r1] + ofs);
|
||||
regs[r0] = *(uint8_t *)ptr;
|
||||
break;
|
||||
CASE_32_64(ld8s)
|
||||
case INDEX_op_ld8s:
|
||||
tci_args_rrs(insn, &r0, &r1, &ofs);
|
||||
ptr = (void *)(regs[r1] + ofs);
|
||||
regs[r0] = *(int8_t *)ptr;
|
||||
break;
|
||||
CASE_32_64(ld16u)
|
||||
case INDEX_op_ld16u:
|
||||
tci_args_rrs(insn, &r0, &r1, &ofs);
|
||||
ptr = (void *)(regs[r1] + ofs);
|
||||
regs[r0] = *(uint16_t *)ptr;
|
||||
break;
|
||||
CASE_32_64(ld16s)
|
||||
case INDEX_op_ld16s:
|
||||
tci_args_rrs(insn, &r0, &r1, &ofs);
|
||||
ptr = (void *)(regs[r1] + ofs);
|
||||
regs[r0] = *(int16_t *)ptr;
|
||||
break;
|
||||
case INDEX_op_ld_i32:
|
||||
CASE_64(ld32u)
|
||||
case INDEX_op_ld:
|
||||
tci_args_rrs(insn, &r0, &r1, &ofs);
|
||||
ptr = (void *)(regs[r1] + ofs);
|
||||
regs[r0] = *(uint32_t *)ptr;
|
||||
regs[r0] = *(tcg_target_ulong *)ptr;
|
||||
break;
|
||||
CASE_32_64(st8)
|
||||
tci_args_rrs(insn, &r0, &r1, &ofs);
|
||||
|
@ -716,16 +715,16 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
|
|||
#if TCG_TARGET_REG_BITS == 64
|
||||
/* Load/store operations (64 bit). */
|
||||
|
||||
case INDEX_op_ld32s_i64:
|
||||
case INDEX_op_ld32u:
|
||||
tci_args_rrs(insn, &r0, &r1, &ofs);
|
||||
ptr = (void *)(regs[r1] + ofs);
|
||||
regs[r0] = *(uint32_t *)ptr;
|
||||
break;
|
||||
case INDEX_op_ld32s:
|
||||
tci_args_rrs(insn, &r0, &r1, &ofs);
|
||||
ptr = (void *)(regs[r1] + ofs);
|
||||
regs[r0] = *(int32_t *)ptr;
|
||||
break;
|
||||
case INDEX_op_ld_i64:
|
||||
tci_args_rrs(insn, &r0, &r1, &ofs);
|
||||
ptr = (void *)(regs[r1] + ofs);
|
||||
regs[r0] = *(uint64_t *)ptr;
|
||||
break;
|
||||
case INDEX_op_st_i64:
|
||||
tci_args_rrs(insn, &r0, &r1, &ofs);
|
||||
ptr = (void *)(regs[r1] + ofs);
|
||||
|
@ -970,18 +969,12 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
|
|||
info->fprintf_func(info->stream, "%-12s", op_name);
|
||||
break;
|
||||
|
||||
case INDEX_op_ld8u_i32:
|
||||
case INDEX_op_ld8u_i64:
|
||||
case INDEX_op_ld8s_i32:
|
||||
case INDEX_op_ld8s_i64:
|
||||
case INDEX_op_ld16u_i32:
|
||||
case INDEX_op_ld16u_i64:
|
||||
case INDEX_op_ld16s_i32:
|
||||
case INDEX_op_ld16s_i64:
|
||||
case INDEX_op_ld32u_i64:
|
||||
case INDEX_op_ld32s_i64:
|
||||
case INDEX_op_ld_i32:
|
||||
case INDEX_op_ld_i64:
|
||||
case INDEX_op_ld8u:
|
||||
case INDEX_op_ld8s:
|
||||
case INDEX_op_ld16u:
|
||||
case INDEX_op_ld16s:
|
||||
case INDEX_op_ld32u:
|
||||
case INDEX_op_ld:
|
||||
case INDEX_op_st8_i32:
|
||||
case INDEX_op_st8_i64:
|
||||
case INDEX_op_st16_i32:
|
||||
|
|
|
@ -339,18 +339,12 @@ static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val,
|
|||
static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
|
||||
intptr_t offset)
|
||||
{
|
||||
switch (type) {
|
||||
case TCG_TYPE_I32:
|
||||
tcg_out_ldst(s, INDEX_op_ld_i32, val, base, offset);
|
||||
break;
|
||||
#if TCG_TARGET_REG_BITS == 64
|
||||
case TCG_TYPE_I64:
|
||||
tcg_out_ldst(s, INDEX_op_ld_i64, val, base, offset);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
TCGOpcode op = INDEX_op_ld;
|
||||
|
||||
if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
|
||||
op = INDEX_op_ld32u;
|
||||
}
|
||||
tcg_out_ldst(s, op, val, base, offset);
|
||||
}
|
||||
|
||||
static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
|
||||
|
@ -1132,7 +1126,7 @@ static void tcg_out_br(TCGContext *s, TCGLabel *l)
|
|||
static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest,
|
||||
TCGReg base, ptrdiff_t offset)
|
||||
{
|
||||
tcg_out_ldst(s, INDEX_op_ld8u_i32, dest, base, offset);
|
||||
tcg_out_ldst(s, INDEX_op_ld8u, dest, base, offset);
|
||||
}
|
||||
|
||||
static const TCGOutOpLoad outop_ld8u = {
|
||||
|
@ -1143,7 +1137,7 @@ static const TCGOutOpLoad outop_ld8u = {
|
|||
static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest,
|
||||
TCGReg base, ptrdiff_t offset)
|
||||
{
|
||||
tcg_out_ldst(s, INDEX_op_ld8s_i32, dest, base, offset);
|
||||
tcg_out_ldst(s, INDEX_op_ld8s, dest, base, offset);
|
||||
}
|
||||
|
||||
static const TCGOutOpLoad outop_ld8s = {
|
||||
|
@ -1154,7 +1148,7 @@ static const TCGOutOpLoad outop_ld8s = {
|
|||
static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest,
|
||||
TCGReg base, ptrdiff_t offset)
|
||||
{
|
||||
tcg_out_ldst(s, INDEX_op_ld16u_i32, dest, base, offset);
|
||||
tcg_out_ldst(s, INDEX_op_ld16u, dest, base, offset);
|
||||
}
|
||||
|
||||
static const TCGOutOpLoad outop_ld16u = {
|
||||
|
@ -1165,7 +1159,7 @@ static const TCGOutOpLoad outop_ld16u = {
|
|||
static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest,
|
||||
TCGReg base, ptrdiff_t offset)
|
||||
{
|
||||
tcg_out_ldst(s, INDEX_op_ld16s_i32, dest, base, offset);
|
||||
tcg_out_ldst(s, INDEX_op_ld16s, dest, base, offset);
|
||||
}
|
||||
|
||||
static const TCGOutOpLoad outop_ld16s = {
|
||||
|
@ -1177,7 +1171,7 @@ static const TCGOutOpLoad outop_ld16s = {
|
|||
static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest,
|
||||
TCGReg base, ptrdiff_t offset)
|
||||
{
|
||||
tcg_out_ldst(s, INDEX_op_ld32u_i64, dest, base, offset);
|
||||
tcg_out_ldst(s, INDEX_op_ld32u, dest, base, offset);
|
||||
}
|
||||
|
||||
static const TCGOutOpLoad outop_ld32u = {
|
||||
|
@ -1188,7 +1182,7 @@ static const TCGOutOpLoad outop_ld32u = {
|
|||
static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest,
|
||||
TCGReg base, ptrdiff_t offset)
|
||||
{
|
||||
tcg_out_ldst(s, INDEX_op_ld32s_i64, dest, base, offset);
|
||||
tcg_out_ldst(s, INDEX_op_ld32s, dest, base, offset);
|
||||
}
|
||||
|
||||
static const TCGOutOpLoad outop_ld32s = {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue