tcg: Merge INDEX_op_shr_{i32,i64}

Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2025-01-07 22:52:10 -08:00
parent edd6ba8a6b
commit 74dbd36f1f
7 changed files with 18 additions and 29 deletions

View file

@ -384,10 +384,10 @@ Shifts/Rotates
- | *t0* = *t1* << *t2*
| Unspecified behavior for negative or out-of-range shifts.
* - shr_i32/i64 *t0*, *t1*, *t2*
* - shr *t0*, *t1*, *t2*
- | *t0* = *t1* >> *t2* (unsigned)
| Unspecified behavior if *t2* < 0 or *t2* >= 32 (resp 64)
| Unspecified behavior for negative or out-of-range shifts.
* - sar_i32/i64 *t0*, *t1*, *t2*

View file

@ -59,6 +59,7 @@ DEF(orc, 1, 2, 0, TCG_OPF_INT)
DEF(rems, 1, 2, 0, TCG_OPF_INT)
DEF(remu, 1, 2, 0, TCG_OPF_INT)
DEF(shl, 1, 2, 0, TCG_OPF_INT)
DEF(shr, 1, 2, 0, TCG_OPF_INT)
DEF(sub, 1, 2, 0, TCG_OPF_INT)
DEF(xor, 1, 2, 0, TCG_OPF_INT)
@ -75,7 +76,6 @@ DEF(st8_i32, 0, 2, 1, 0)
DEF(st16_i32, 0, 2, 1, 0)
DEF(st_i32, 0, 2, 1, 0)
/* shifts/rotates */
DEF(shr_i32, 1, 2, 0, 0)
DEF(sar_i32, 1, 2, 0, 0)
DEF(rotl_i32, 1, 2, 0, 0)
DEF(rotr_i32, 1, 2, 0, 0)
@ -115,7 +115,6 @@ DEF(st16_i64, 0, 2, 1, 0)
DEF(st32_i64, 0, 2, 1, 0)
DEF(st_i64, 0, 2, 1, 0)
/* shifts/rotates */
DEF(shr_i64, 1, 2, 0, 0)
DEF(sar_i64, 1, 2, 0, 0)
DEF(rotl_i64, 1, 2, 0, 0)
DEF(rotr_i64, 1, 2, 0, 0)

View file

@ -452,10 +452,10 @@ static uint64_t do_constant_folding_2(TCGOpcode op, TCGType type,
}
return (uint64_t)x << (y & 63);
case INDEX_op_shr_i32:
return (uint32_t)x >> (y & 31);
case INDEX_op_shr_i64:
case INDEX_op_shr:
if (type == TCG_TYPE_I32) {
return (uint32_t)x >> (y & 31);
}
return (uint64_t)x >> (y & 63);
case INDEX_op_sar_i32:
@ -2342,7 +2342,6 @@ static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
{
TCGOpcode shr_opc;
TCGOpcode uext_opc = 0, sext_opc = 0;
TCGCond cond = op->args[3];
TCGArg ret, src1, src2;
@ -2364,7 +2363,6 @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
switch (ctx->type) {
case TCG_TYPE_I32:
shr_opc = INDEX_op_shr_i32;
if (TCG_TARGET_extract_valid(TCG_TYPE_I32, sh, 1)) {
uext_opc = INDEX_op_extract_i32;
}
@ -2373,7 +2371,6 @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
}
break;
case TCG_TYPE_I64:
shr_opc = INDEX_op_shr_i64;
if (TCG_TARGET_extract_valid(TCG_TYPE_I64, sh, 1)) {
uext_opc = INDEX_op_extract_i64;
}
@ -2402,7 +2399,7 @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
op->args[3] = 1;
} else {
if (sh) {
op2 = opt_insert_before(ctx, op, shr_opc, 3);
op2 = opt_insert_before(ctx, op, INDEX_op_shr, 3);
op2->args[0] = ret;
op2->args[1] = src1;
op2->args[2] = arg_new_constant(ctx, sh);
@ -2609,7 +2606,7 @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
* input sign repetitions.
*/
return fold_masks_s(ctx, op, s_mask);
CASE_OP_32_64(shr):
case INDEX_op_shr:
/*
* If the sign bit is known zero, then logical right shift
* will not reduce the number of input sign repetitions.
@ -3032,7 +3029,7 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(rotr):
CASE_OP_32_64(sar):
case INDEX_op_shl:
CASE_OP_32_64(shr):
case INDEX_op_shr:
done = fold_shift(&ctx, op);
break;
CASE_OP_32_64(setcond):

View file

@ -496,7 +496,7 @@ void tcg_gen_shli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
void tcg_gen_shr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
tcg_gen_op3_i32(INDEX_op_shr_i32, ret, arg1, arg2);
tcg_gen_op3_i32(INDEX_op_shr, ret, arg1, arg2);
}
void tcg_gen_shri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
@ -1615,7 +1615,7 @@ void tcg_gen_shl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
void tcg_gen_shr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
if (TCG_TARGET_REG_BITS == 64) {
tcg_gen_op3_i64(INDEX_op_shr_i64, ret, arg1, arg2);
tcg_gen_op3_i64(INDEX_op_shr, ret, arg1, arg2);
} else {
gen_helper_shr_i64(ret, arg1, arg2);
}

View file

@ -1043,8 +1043,7 @@ static const TCGOutOp * const all_outop[NB_OPS] = {
OUTOP(INDEX_op_rems, TCGOutOpBinary, outop_rems),
OUTOP(INDEX_op_remu, TCGOutOpBinary, outop_remu),
OUTOP(INDEX_op_shl, TCGOutOpBinary, outop_shl),
OUTOP(INDEX_op_shr_i32, TCGOutOpBinary, outop_shr),
OUTOP(INDEX_op_shr_i64, TCGOutOpBinary, outop_shr),
OUTOP(INDEX_op_shr, TCGOutOpBinary, outop_shr),
OUTOP(INDEX_op_sub, TCGOutOpSubtract, outop_sub),
OUTOP(INDEX_op_xor, TCGOutOpBinary, outop_xor),
};
@ -5423,8 +5422,7 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
case INDEX_op_rems:
case INDEX_op_remu:
case INDEX_op_shl:
case INDEX_op_shr_i32:
case INDEX_op_shr_i64:
case INDEX_op_shr:
case INDEX_op_xor:
{
const TCGOutOpBinary *out =

View file

@ -621,9 +621,9 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = regs[r1] << (regs[r2] % TCG_TARGET_REG_BITS);
break;
case INDEX_op_shr_i32:
case INDEX_op_shr:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = (uint32_t)regs[r1] >> (regs[r2] & 31);
regs[r0] = regs[r1] >> (regs[r2] % TCG_TARGET_REG_BITS);
break;
case INDEX_op_sar_i32:
tci_args_rrr(insn, &r0, &r1, &r2);
@ -787,10 +787,6 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
/* Shift/rotate operations (64 bit). */
case INDEX_op_shr_i64:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = regs[r1] >> (regs[r2] & 63);
break;
case INDEX_op_sar_i64:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63);
@ -1078,10 +1074,9 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
case INDEX_op_rems:
case INDEX_op_remu:
case INDEX_op_shl:
case INDEX_op_shr:
case INDEX_op_sub:
case INDEX_op_xor:
case INDEX_op_shr_i32:
case INDEX_op_shr_i64:
case INDEX_op_sar_i32:
case INDEX_op_sar_i64:
case INDEX_op_rotl_i32:

View file

@ -792,7 +792,7 @@ static void tgen_shr(TCGContext *s, TCGType type,
tcg_out_ext32u(s, TCG_REG_TMP, a1);
a1 = TCG_REG_TMP;
}
tcg_out_op_rrr(s, glue(INDEX_op_shr_i,TCG_TARGET_REG_BITS), a0, a1, a2);
tcg_out_op_rrr(s, INDEX_op_shr, a0, a1, a2);
}
static const TCGOutOpBinary outop_shr = {