mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-07-29 13:23:54 -06:00
tcg: Merge INDEX_op_divu_{i32,i64}
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
6d1a2365ea
commit
961b80aecd
7 changed files with 20 additions and 23 deletions
|
@ -282,7 +282,7 @@ Arithmetic
|
||||||
- | *t0* = *t1* / *t2* (signed)
|
- | *t0* = *t1* / *t2* (signed)
|
||||||
| Undefined behavior if division by zero or overflow.
|
| Undefined behavior if division by zero or overflow.
|
||||||
|
|
||||||
* - divu_i32/i64 *t0*, *t1*, *t2*
|
* - divu *t0*, *t1*, *t2*
|
||||||
|
|
||||||
- | *t0* = *t1* / *t2* (unsigned)
|
- | *t0* = *t1* / *t2* (unsigned)
|
||||||
| Undefined behavior if division by zero.
|
| Undefined behavior if division by zero.
|
||||||
|
|
|
@ -43,6 +43,7 @@ DEF(add, 1, 2, 0, TCG_OPF_INT)
|
||||||
DEF(and, 1, 2, 0, TCG_OPF_INT)
|
DEF(and, 1, 2, 0, TCG_OPF_INT)
|
||||||
DEF(andc, 1, 2, 0, TCG_OPF_INT)
|
DEF(andc, 1, 2, 0, TCG_OPF_INT)
|
||||||
DEF(divs, 1, 2, 0, TCG_OPF_INT)
|
DEF(divs, 1, 2, 0, TCG_OPF_INT)
|
||||||
|
DEF(divu, 1, 2, 0, TCG_OPF_INT)
|
||||||
DEF(eqv, 1, 2, 0, TCG_OPF_INT)
|
DEF(eqv, 1, 2, 0, TCG_OPF_INT)
|
||||||
DEF(mul, 1, 2, 0, TCG_OPF_INT)
|
DEF(mul, 1, 2, 0, TCG_OPF_INT)
|
||||||
DEF(mulsh, 1, 2, 0, TCG_OPF_INT)
|
DEF(mulsh, 1, 2, 0, TCG_OPF_INT)
|
||||||
|
@ -69,7 +70,6 @@ DEF(st8_i32, 0, 2, 1, 0)
|
||||||
DEF(st16_i32, 0, 2, 1, 0)
|
DEF(st16_i32, 0, 2, 1, 0)
|
||||||
DEF(st_i32, 0, 2, 1, 0)
|
DEF(st_i32, 0, 2, 1, 0)
|
||||||
/* arith */
|
/* arith */
|
||||||
DEF(divu_i32, 1, 2, 0, 0)
|
|
||||||
DEF(rem_i32, 1, 2, 0, 0)
|
DEF(rem_i32, 1, 2, 0, 0)
|
||||||
DEF(remu_i32, 1, 2, 0, 0)
|
DEF(remu_i32, 1, 2, 0, 0)
|
||||||
DEF(div2_i32, 2, 3, 0, 0)
|
DEF(div2_i32, 2, 3, 0, 0)
|
||||||
|
@ -116,7 +116,6 @@ DEF(st16_i64, 0, 2, 1, 0)
|
||||||
DEF(st32_i64, 0, 2, 1, 0)
|
DEF(st32_i64, 0, 2, 1, 0)
|
||||||
DEF(st_i64, 0, 2, 1, 0)
|
DEF(st_i64, 0, 2, 1, 0)
|
||||||
/* arith */
|
/* arith */
|
||||||
DEF(divu_i64, 1, 2, 0, 0)
|
|
||||||
DEF(rem_i64, 1, 2, 0, 0)
|
DEF(rem_i64, 1, 2, 0, 0)
|
||||||
DEF(remu_i64, 1, 2, 0, 0)
|
DEF(remu_i64, 1, 2, 0, 0)
|
||||||
DEF(div2_i64, 2, 3, 0, 0)
|
DEF(div2_i64, 2, 3, 0, 0)
|
||||||
|
|
|
@ -563,9 +563,10 @@ static uint64_t do_constant_folding_2(TCGOpcode op, TCGType type,
|
||||||
}
|
}
|
||||||
return (int64_t)x / ((int64_t)y ? : 1);
|
return (int64_t)x / ((int64_t)y ? : 1);
|
||||||
|
|
||||||
case INDEX_op_divu_i32:
|
case INDEX_op_divu:
|
||||||
return (uint32_t)x / ((uint32_t)y ? : 1);
|
if (type == TCG_TYPE_I32) {
|
||||||
case INDEX_op_divu_i64:
|
return (uint32_t)x / ((uint32_t)y ? : 1);
|
||||||
|
}
|
||||||
return (uint64_t)x / ((uint64_t)y ? : 1);
|
return (uint64_t)x / ((uint64_t)y ? : 1);
|
||||||
|
|
||||||
case INDEX_op_rem_i32:
|
case INDEX_op_rem_i32:
|
||||||
|
@ -2908,7 +2909,7 @@ void tcg_optimize(TCGContext *s)
|
||||||
done = fold_deposit(&ctx, op);
|
done = fold_deposit(&ctx, op);
|
||||||
break;
|
break;
|
||||||
case INDEX_op_divs:
|
case INDEX_op_divs:
|
||||||
CASE_OP_32_64(divu):
|
case INDEX_op_divu:
|
||||||
done = fold_divide(&ctx, op);
|
done = fold_divide(&ctx, op);
|
||||||
break;
|
break;
|
||||||
case INDEX_op_dup_vec:
|
case INDEX_op_dup_vec:
|
||||||
|
|
16
tcg/tcg-op.c
16
tcg/tcg-op.c
|
@ -635,8 +635,8 @@ void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
|
||||||
|
|
||||||
void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
|
void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
|
||||||
{
|
{
|
||||||
if (tcg_op_supported(INDEX_op_divu_i32, TCG_TYPE_I32, 0)) {
|
if (tcg_op_supported(INDEX_op_divu, TCG_TYPE_I32, 0)) {
|
||||||
tcg_gen_op3_i32(INDEX_op_divu_i32, ret, arg1, arg2);
|
tcg_gen_op3_i32(INDEX_op_divu, ret, arg1, arg2);
|
||||||
} else if (TCG_TARGET_HAS_div2_i32) {
|
} else if (TCG_TARGET_HAS_div2_i32) {
|
||||||
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
|
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
|
||||||
TCGv_i32 zero = tcg_constant_i32(0);
|
TCGv_i32 zero = tcg_constant_i32(0);
|
||||||
|
@ -651,9 +651,9 @@ void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
|
||||||
{
|
{
|
||||||
if (TCG_TARGET_HAS_rem_i32) {
|
if (TCG_TARGET_HAS_rem_i32) {
|
||||||
tcg_gen_op3_i32(INDEX_op_remu_i32, ret, arg1, arg2);
|
tcg_gen_op3_i32(INDEX_op_remu_i32, ret, arg1, arg2);
|
||||||
} else if (tcg_op_supported(INDEX_op_divu_i32, TCG_TYPE_I32, 0)) {
|
} else if (tcg_op_supported(INDEX_op_divu, TCG_TYPE_I32, 0)) {
|
||||||
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
|
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
|
||||||
tcg_gen_op3_i32(INDEX_op_divu_i32, t0, arg1, arg2);
|
tcg_gen_op3_i32(INDEX_op_divu, t0, arg1, arg2);
|
||||||
tcg_gen_mul_i32(t0, t0, arg2);
|
tcg_gen_mul_i32(t0, t0, arg2);
|
||||||
tcg_gen_sub_i32(ret, arg1, t0);
|
tcg_gen_sub_i32(ret, arg1, t0);
|
||||||
tcg_temp_free_i32(t0);
|
tcg_temp_free_i32(t0);
|
||||||
|
@ -2003,8 +2003,8 @@ void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
|
||||||
|
|
||||||
void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
|
void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
|
||||||
{
|
{
|
||||||
if (tcg_op_supported(INDEX_op_divu_i64, TCG_TYPE_I64, 0)) {
|
if (tcg_op_supported(INDEX_op_divu, TCG_TYPE_I64, 0)) {
|
||||||
tcg_gen_op3_i64(INDEX_op_divu_i64, ret, arg1, arg2);
|
tcg_gen_op3_i64(INDEX_op_divu, ret, arg1, arg2);
|
||||||
} else if (TCG_TARGET_HAS_div2_i64) {
|
} else if (TCG_TARGET_HAS_div2_i64) {
|
||||||
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
|
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
|
||||||
TCGv_i64 zero = tcg_constant_i64(0);
|
TCGv_i64 zero = tcg_constant_i64(0);
|
||||||
|
@ -2019,9 +2019,9 @@ void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
|
||||||
{
|
{
|
||||||
if (TCG_TARGET_HAS_rem_i64) {
|
if (TCG_TARGET_HAS_rem_i64) {
|
||||||
tcg_gen_op3_i64(INDEX_op_remu_i64, ret, arg1, arg2);
|
tcg_gen_op3_i64(INDEX_op_remu_i64, ret, arg1, arg2);
|
||||||
} else if (tcg_op_supported(INDEX_op_divu_i64, TCG_TYPE_I64, 0)) {
|
} else if (tcg_op_supported(INDEX_op_divu, TCG_TYPE_I64, 0)) {
|
||||||
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
|
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
|
||||||
tcg_gen_op3_i64(INDEX_op_divu_i64, t0, arg1, arg2);
|
tcg_gen_op3_i64(INDEX_op_divu, t0, arg1, arg2);
|
||||||
tcg_gen_mul_i64(t0, t0, arg2);
|
tcg_gen_mul_i64(t0, t0, arg2);
|
||||||
tcg_gen_sub_i64(ret, arg1, t0);
|
tcg_gen_sub_i64(ret, arg1, t0);
|
||||||
tcg_temp_free_i64(t0);
|
tcg_temp_free_i64(t0);
|
||||||
|
|
|
@ -1021,8 +1021,7 @@ static const TCGOutOp * const all_outop[NB_OPS] = {
|
||||||
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
|
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
|
||||||
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
|
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
|
||||||
OUTOP(INDEX_op_divs, TCGOutOpBinary, outop_divs),
|
OUTOP(INDEX_op_divs, TCGOutOpBinary, outop_divs),
|
||||||
OUTOP(INDEX_op_divu_i32, TCGOutOpBinary, outop_divu),
|
OUTOP(INDEX_op_divu, TCGOutOpBinary, outop_divu),
|
||||||
OUTOP(INDEX_op_divu_i64, TCGOutOpBinary, outop_divu),
|
|
||||||
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
|
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
|
||||||
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
|
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
|
||||||
OUTOP(INDEX_op_mulsh, TCGOutOpBinary, outop_mulsh),
|
OUTOP(INDEX_op_mulsh, TCGOutOpBinary, outop_mulsh),
|
||||||
|
@ -5415,8 +5414,7 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
|
||||||
case INDEX_op_and:
|
case INDEX_op_and:
|
||||||
case INDEX_op_andc:
|
case INDEX_op_andc:
|
||||||
case INDEX_op_divs:
|
case INDEX_op_divs:
|
||||||
case INDEX_op_divu_i32:
|
case INDEX_op_divu:
|
||||||
case INDEX_op_divu_i64:
|
|
||||||
case INDEX_op_eqv:
|
case INDEX_op_eqv:
|
||||||
case INDEX_op_mul:
|
case INDEX_op_mul:
|
||||||
case INDEX_op_mulsh:
|
case INDEX_op_mulsh:
|
||||||
|
|
|
@ -724,7 +724,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
|
||||||
tci_args_rrr(insn, &r0, &r1, &r2);
|
tci_args_rrr(insn, &r0, &r1, &r2);
|
||||||
regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2];
|
regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2];
|
||||||
break;
|
break;
|
||||||
case INDEX_op_divu_i64:
|
case INDEX_op_divu:
|
||||||
tci_args_rrr(insn, &r0, &r1, &r2);
|
tci_args_rrr(insn, &r0, &r1, &r2);
|
||||||
regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2];
|
regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2];
|
||||||
break;
|
break;
|
||||||
|
@ -1072,6 +1072,7 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
|
||||||
case INDEX_op_and:
|
case INDEX_op_and:
|
||||||
case INDEX_op_andc:
|
case INDEX_op_andc:
|
||||||
case INDEX_op_divs:
|
case INDEX_op_divs:
|
||||||
|
case INDEX_op_divu:
|
||||||
case INDEX_op_eqv:
|
case INDEX_op_eqv:
|
||||||
case INDEX_op_mul:
|
case INDEX_op_mul:
|
||||||
case INDEX_op_nand:
|
case INDEX_op_nand:
|
||||||
|
@ -1082,8 +1083,6 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
|
||||||
case INDEX_op_xor:
|
case INDEX_op_xor:
|
||||||
case INDEX_op_rem_i32:
|
case INDEX_op_rem_i32:
|
||||||
case INDEX_op_rem_i64:
|
case INDEX_op_rem_i64:
|
||||||
case INDEX_op_divu_i32:
|
|
||||||
case INDEX_op_divu_i64:
|
|
||||||
case INDEX_op_remu_i32:
|
case INDEX_op_remu_i32:
|
||||||
case INDEX_op_remu_i64:
|
case INDEX_op_remu_i64:
|
||||||
case INDEX_op_shl_i32:
|
case INDEX_op_shl_i32:
|
||||||
|
|
|
@ -663,7 +663,7 @@ static void tgen_divu(TCGContext *s, TCGType type,
|
||||||
{
|
{
|
||||||
TCGOpcode opc = (type == TCG_TYPE_I32
|
TCGOpcode opc = (type == TCG_TYPE_I32
|
||||||
? INDEX_op_tci_divu32
|
? INDEX_op_tci_divu32
|
||||||
: INDEX_op_divu_i64);
|
: INDEX_op_divu);
|
||||||
tcg_out_op_rrr(s, opc, a0, a1, a2);
|
tcg_out_op_rrr(s, opc, a0, a1, a2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue