tcg: Merge INDEX_op_andc_{i32,i64}

Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2025-01-06 12:37:02 -08:00
parent a341c84e81
commit 46f96bff16
9 changed files with 17 additions and 19 deletions

View file

@ -319,7 +319,7 @@ Logical
- | *t0* = ~\ *t1* - | *t0* = ~\ *t1*
* - andc_i32/i64 *t0*, *t1*, *t2* * - andc *t0*, *t1*, *t2*
- | *t0* = *t1* & ~\ *t2* - | *t0* = *t1* & ~\ *t2*

View file

@ -41,6 +41,7 @@ DEF(mov, 1, 1, 0, TCG_OPF_INT | TCG_OPF_NOT_PRESENT)
DEF(add, 1, 2, 0, TCG_OPF_INT) DEF(add, 1, 2, 0, TCG_OPF_INT)
DEF(and, 1, 2, 0, TCG_OPF_INT) DEF(and, 1, 2, 0, TCG_OPF_INT)
DEF(andc, 1, 2, 0, TCG_OPF_INT)
DEF(setcond_i32, 1, 2, 1, 0) DEF(setcond_i32, 1, 2, 1, 0)
DEF(negsetcond_i32, 1, 2, 1, 0) DEF(negsetcond_i32, 1, 2, 1, 0)
@ -91,7 +92,6 @@ DEF(bswap16_i32, 1, 1, 1, 0)
DEF(bswap32_i32, 1, 1, 1, 0) DEF(bswap32_i32, 1, 1, 1, 0)
DEF(not_i32, 1, 1, 0, 0) DEF(not_i32, 1, 1, 0, 0)
DEF(neg_i32, 1, 1, 0, 0) DEF(neg_i32, 1, 1, 0, 0)
DEF(andc_i32, 1, 2, 0, 0)
DEF(orc_i32, 1, 2, 0, 0) DEF(orc_i32, 1, 2, 0, 0)
DEF(eqv_i32, 1, 2, 0, 0) DEF(eqv_i32, 1, 2, 0, 0)
DEF(nand_i32, 1, 2, 0, 0) DEF(nand_i32, 1, 2, 0, 0)
@ -149,7 +149,6 @@ DEF(bswap32_i64, 1, 1, 1, 0)
DEF(bswap64_i64, 1, 1, 1, 0) DEF(bswap64_i64, 1, 1, 1, 0)
DEF(not_i64, 1, 1, 0, 0) DEF(not_i64, 1, 1, 0, 0)
DEF(neg_i64, 1, 1, 0, 0) DEF(neg_i64, 1, 1, 0, 0)
DEF(andc_i64, 1, 2, 0, 0)
DEF(orc_i64, 1, 2, 0, 0) DEF(orc_i64, 1, 2, 0, 0)
DEF(eqv_i64, 1, 2, 0, 0) DEF(eqv_i64, 1, 2, 0, 0)
DEF(nand_i64, 1, 2, 0, 0) DEF(nand_i64, 1, 2, 0, 0)

View file

@ -8600,7 +8600,7 @@ static bool trans_CCMP(DisasContext *s, arg_CCMP *a)
tcg_gen_subi_i32(tcg_t2, tcg_t0, 1); tcg_gen_subi_i32(tcg_t2, tcg_t0, 1);
nzcv = a->nzcv; nzcv = a->nzcv;
has_andc = tcg_op_supported(INDEX_op_andc_i32, TCG_TYPE_I32, 0); has_andc = tcg_op_supported(INDEX_op_andc, TCG_TYPE_I32, 0);
if (nzcv & 8) { /* N */ if (nzcv & 8) { /* N */
tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1); tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1);
} else { } else {

View file

@ -3981,7 +3981,7 @@ static void decode_bit_andacc(DisasContext *ctx)
pos1, pos2, &tcg_gen_andc_tl, &tcg_gen_and_tl); pos1, pos2, &tcg_gen_andc_tl, &tcg_gen_and_tl);
break; break;
case OPC2_32_BIT_AND_NOR_T: case OPC2_32_BIT_AND_NOR_T:
if (tcg_op_supported(INDEX_op_andc_i32, TCG_TYPE_I32, 0)) { if (tcg_op_supported(INDEX_op_andc, TCG_TYPE_I32, 0)) {
gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
pos1, pos2, &tcg_gen_or_tl, &tcg_gen_andc_tl); pos1, pos2, &tcg_gen_or_tl, &tcg_gen_andc_tl);
} else { } else {

View file

@ -479,7 +479,8 @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
CASE_OP_32_64(neg): CASE_OP_32_64(neg):
return -x; return -x;
CASE_OP_32_64_VEC(andc): case INDEX_op_andc:
case INDEX_op_andc_vec:
return x & ~y; return x & ~y;
CASE_OP_32_64_VEC(orc): CASE_OP_32_64_VEC(orc):
@ -2852,7 +2853,8 @@ void tcg_optimize(TCGContext *s)
case INDEX_op_and_vec: case INDEX_op_and_vec:
done = fold_and(&ctx, op); done = fold_and(&ctx, op);
break; break;
CASE_OP_32_64_VEC(andc): case INDEX_op_andc:
case INDEX_op_andc_vec:
done = fold_andc(&ctx, op); done = fold_andc(&ctx, op);
break; break;
CASE_OP_32_64(brcond): CASE_OP_32_64(brcond):

View file

@ -668,8 +668,8 @@ void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
void tcg_gen_andc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) void tcg_gen_andc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{ {
if (tcg_op_supported(INDEX_op_andc_i32, TCG_TYPE_I32, 0)) { if (tcg_op_supported(INDEX_op_andc, TCG_TYPE_I32, 0)) {
tcg_gen_op3_i32(INDEX_op_andc_i32, ret, arg1, arg2); tcg_gen_op3_i32(INDEX_op_andc, ret, arg1, arg2);
} else { } else {
TCGv_i32 t0 = tcg_temp_ebb_new_i32(); TCGv_i32 t0 = tcg_temp_ebb_new_i32();
tcg_gen_not_i32(t0, arg2); tcg_gen_not_i32(t0, arg2);
@ -2264,8 +2264,8 @@ void tcg_gen_andc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
if (TCG_TARGET_REG_BITS == 32) { if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_andc_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); tcg_gen_andc_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
tcg_gen_andc_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); tcg_gen_andc_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
} else if (tcg_op_supported(INDEX_op_andc_i64, TCG_TYPE_I64, 0)) { } else if (tcg_op_supported(INDEX_op_andc, TCG_TYPE_I64, 0)) {
tcg_gen_op3_i64(INDEX_op_andc_i64, ret, arg1, arg2); tcg_gen_op3_i64(INDEX_op_andc, ret, arg1, arg2);
} else { } else {
TCGv_i64 t0 = tcg_temp_ebb_new_i64(); TCGv_i64 t0 = tcg_temp_ebb_new_i64();
tcg_gen_not_i64(t0, arg2); tcg_gen_not_i64(t0, arg2);

View file

@ -1006,8 +1006,7 @@ QEMU_BUILD_BUG_ON((int)(offsetof(CPUNegativeOffsetState, tlb.f[0]) -
static const TCGOutOp * const all_outop[NB_OPS] = { static const TCGOutOp * const all_outop[NB_OPS] = {
OUTOP(INDEX_op_add, TCGOutOpBinary, outop_add), OUTOP(INDEX_op_add, TCGOutOpBinary, outop_add),
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and), OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
OUTOP(INDEX_op_andc_i32, TCGOutOpBinary, outop_andc), OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
OUTOP(INDEX_op_andc_i64, TCGOutOpBinary, outop_andc),
}; };
#undef OUTOP #undef OUTOP
@ -5441,8 +5440,7 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
case INDEX_op_add: case INDEX_op_add:
case INDEX_op_and: case INDEX_op_and:
case INDEX_op_andc_i32: case INDEX_op_andc:
case INDEX_op_andc_i64:
{ {
const TCGOutOpBinary *out = const TCGOutOpBinary *out =
container_of(all_outop[op->opc], TCGOutOpBinary, base); container_of(all_outop[op->opc], TCGOutOpBinary, base);

View file

@ -547,7 +547,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
tci_args_rrr(insn, &r0, &r1, &r2); tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = regs[r1] ^ regs[r2]; regs[r0] = regs[r1] ^ regs[r2];
break; break;
CASE_32_64(andc) case INDEX_op_andc:
tci_args_rrr(insn, &r0, &r1, &r2); tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = regs[r1] & ~regs[r2]; regs[r0] = regs[r1] & ~regs[r2];
break; break;
@ -1082,6 +1082,7 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
case INDEX_op_add: case INDEX_op_add:
case INDEX_op_and: case INDEX_op_and:
case INDEX_op_andc:
case INDEX_op_sub_i32: case INDEX_op_sub_i32:
case INDEX_op_sub_i64: case INDEX_op_sub_i64:
case INDEX_op_mul_i32: case INDEX_op_mul_i32:
@ -1090,8 +1091,6 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
case INDEX_op_or_i64: case INDEX_op_or_i64:
case INDEX_op_xor_i32: case INDEX_op_xor_i32:
case INDEX_op_xor_i64: case INDEX_op_xor_i64:
case INDEX_op_andc_i32:
case INDEX_op_andc_i64:
case INDEX_op_orc_i32: case INDEX_op_orc_i32:
case INDEX_op_orc_i64: case INDEX_op_orc_i64:
case INDEX_op_eqv_i32: case INDEX_op_eqv_i32:

View file

@ -660,7 +660,7 @@ static const TCGOutOpBinary outop_and = {
static void tgen_andc(TCGContext *s, TCGType type, static void tgen_andc(TCGContext *s, TCGType type,
TCGReg a0, TCGReg a1, TCGReg a2) TCGReg a0, TCGReg a1, TCGReg a2)
{ {
tcg_out_op_rrr(s, glue(INDEX_op_andc_i,TCG_TARGET_REG_BITS), a0, a1, a2); tcg_out_op_rrr(s, INDEX_op_andc, a0, a1, a2);
} }
static const TCGOutOpBinary outop_andc = { static const TCGOutOpBinary outop_andc = {