mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-17 07:02:03 -06:00
tcg: Merge INDEX_op_muluh_{i32,i64}
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
937246f2ee
commit
aa28c9ef8e
5 changed files with 22 additions and 25 deletions
|
@ -591,7 +591,7 @@ Multiword arithmetic support
|
||||||
|
|
||||||
* - mulsh_i32/i64 *t0*, *t1*, *t2*
|
* - mulsh_i32/i64 *t0*, *t1*, *t2*
|
||||||
|
|
||||||
muluh_i32/i64 *t0*, *t1*, *t2*
|
muluh *t0*, *t1*, *t2*
|
||||||
|
|
||||||
- | Provide the high part of a signed or unsigned multiply, respectively.
|
- | Provide the high part of a signed or unsigned multiply, respectively.
|
||||||
|
|
|
|
||||||
|
|
|
@ -44,6 +44,7 @@ DEF(and, 1, 2, 0, TCG_OPF_INT)
|
||||||
DEF(andc, 1, 2, 0, TCG_OPF_INT)
|
DEF(andc, 1, 2, 0, TCG_OPF_INT)
|
||||||
DEF(eqv, 1, 2, 0, TCG_OPF_INT)
|
DEF(eqv, 1, 2, 0, TCG_OPF_INT)
|
||||||
DEF(mul, 1, 2, 0, TCG_OPF_INT)
|
DEF(mul, 1, 2, 0, TCG_OPF_INT)
|
||||||
|
DEF(muluh, 1, 2, 0, TCG_OPF_INT)
|
||||||
DEF(nand, 1, 2, 0, TCG_OPF_INT)
|
DEF(nand, 1, 2, 0, TCG_OPF_INT)
|
||||||
DEF(neg, 1, 1, 0, TCG_OPF_INT)
|
DEF(neg, 1, 1, 0, TCG_OPF_INT)
|
||||||
DEF(nor, 1, 2, 0, TCG_OPF_INT)
|
DEF(nor, 1, 2, 0, TCG_OPF_INT)
|
||||||
|
@ -89,7 +90,6 @@ DEF(add2_i32, 2, 4, 0, 0)
|
||||||
DEF(sub2_i32, 2, 4, 0, 0)
|
DEF(sub2_i32, 2, 4, 0, 0)
|
||||||
DEF(mulu2_i32, 2, 2, 0, 0)
|
DEF(mulu2_i32, 2, 2, 0, 0)
|
||||||
DEF(muls2_i32, 2, 2, 0, 0)
|
DEF(muls2_i32, 2, 2, 0, 0)
|
||||||
DEF(muluh_i32, 1, 2, 0, 0)
|
|
||||||
DEF(mulsh_i32, 1, 2, 0, 0)
|
DEF(mulsh_i32, 1, 2, 0, 0)
|
||||||
DEF(brcond2_i32, 0, 4, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
|
DEF(brcond2_i32, 0, 4, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
|
||||||
DEF(setcond2_i32, 1, 4, 1, 0)
|
DEF(setcond2_i32, 1, 4, 1, 0)
|
||||||
|
@ -151,7 +151,6 @@ DEF(add2_i64, 2, 4, 0, 0)
|
||||||
DEF(sub2_i64, 2, 4, 0, 0)
|
DEF(sub2_i64, 2, 4, 0, 0)
|
||||||
DEF(mulu2_i64, 2, 2, 0, 0)
|
DEF(mulu2_i64, 2, 2, 0, 0)
|
||||||
DEF(muls2_i64, 2, 2, 0, 0)
|
DEF(muls2_i64, 2, 2, 0, 0)
|
||||||
DEF(muluh_i64, 1, 2, 0, 0)
|
|
||||||
DEF(mulsh_i64, 1, 2, 0, 0)
|
DEF(mulsh_i64, 1, 2, 0, 0)
|
||||||
|
|
||||||
#define DATA64_ARGS (TCG_TARGET_REG_BITS == 64 ? 1 : 2)
|
#define DATA64_ARGS (TCG_TARGET_REG_BITS == 64 ? 1 : 2)
|
||||||
|
|
|
@ -419,7 +419,8 @@ static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
|
||||||
return tcg_opt_gen_mov(ctx, op, dst, arg_new_constant(ctx, val));
|
return tcg_opt_gen_mov(ctx, op, dst, arg_new_constant(ctx, val));
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
|
static uint64_t do_constant_folding_2(TCGOpcode op, TCGType type,
|
||||||
|
uint64_t x, uint64_t y)
|
||||||
{
|
{
|
||||||
uint64_t l64, h64;
|
uint64_t l64, h64;
|
||||||
|
|
||||||
|
@ -541,14 +542,16 @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
|
||||||
case INDEX_op_extrh_i64_i32:
|
case INDEX_op_extrh_i64_i32:
|
||||||
return (uint64_t)x >> 32;
|
return (uint64_t)x >> 32;
|
||||||
|
|
||||||
case INDEX_op_muluh_i32:
|
case INDEX_op_muluh:
|
||||||
|
if (type == TCG_TYPE_I32) {
|
||||||
return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32;
|
return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32;
|
||||||
|
}
|
||||||
|
mulu64(&l64, &h64, x, y);
|
||||||
|
return h64;
|
||||||
|
|
||||||
case INDEX_op_mulsh_i32:
|
case INDEX_op_mulsh_i32:
|
||||||
return ((int64_t)(int32_t)x * (int32_t)y) >> 32;
|
return ((int64_t)(int32_t)x * (int32_t)y) >> 32;
|
||||||
|
|
||||||
case INDEX_op_muluh_i64:
|
|
||||||
mulu64(&l64, &h64, x, y);
|
|
||||||
return h64;
|
|
||||||
case INDEX_op_mulsh_i64:
|
case INDEX_op_mulsh_i64:
|
||||||
muls64(&l64, &h64, x, y);
|
muls64(&l64, &h64, x, y);
|
||||||
return h64;
|
return h64;
|
||||||
|
@ -580,7 +583,7 @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
|
||||||
static uint64_t do_constant_folding(TCGOpcode op, TCGType type,
|
static uint64_t do_constant_folding(TCGOpcode op, TCGType type,
|
||||||
uint64_t x, uint64_t y)
|
uint64_t x, uint64_t y)
|
||||||
{
|
{
|
||||||
uint64_t res = do_constant_folding_2(op, x, y);
|
uint64_t res = do_constant_folding_2(op, type, x, y);
|
||||||
if (type == TCG_TYPE_I32) {
|
if (type == TCG_TYPE_I32) {
|
||||||
res = (int32_t)res;
|
res = (int32_t)res;
|
||||||
}
|
}
|
||||||
|
@ -2967,7 +2970,7 @@ void tcg_optimize(TCGContext *s)
|
||||||
done = fold_mul(&ctx, op);
|
done = fold_mul(&ctx, op);
|
||||||
break;
|
break;
|
||||||
CASE_OP_32_64(mulsh):
|
CASE_OP_32_64(mulsh):
|
||||||
CASE_OP_32_64(muluh):
|
case INDEX_op_muluh:
|
||||||
done = fold_mul_highpart(&ctx, op);
|
done = fold_mul_highpart(&ctx, op);
|
||||||
break;
|
break;
|
||||||
CASE_OP_32_64(muls2):
|
CASE_OP_32_64(muls2):
|
||||||
|
|
10
tcg/tcg-op.c
10
tcg/tcg-op.c
|
@ -1132,10 +1132,10 @@ void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
|
||||||
{
|
{
|
||||||
if (TCG_TARGET_HAS_mulu2_i32) {
|
if (TCG_TARGET_HAS_mulu2_i32) {
|
||||||
tcg_gen_op4_i32(INDEX_op_mulu2_i32, rl, rh, arg1, arg2);
|
tcg_gen_op4_i32(INDEX_op_mulu2_i32, rl, rh, arg1, arg2);
|
||||||
} else if (tcg_op_supported(INDEX_op_muluh_i32, TCG_TYPE_I32, 0)) {
|
} else if (tcg_op_supported(INDEX_op_muluh, TCG_TYPE_I32, 0)) {
|
||||||
TCGv_i32 t = tcg_temp_ebb_new_i32();
|
TCGv_i32 t = tcg_temp_ebb_new_i32();
|
||||||
tcg_gen_op3_i32(INDEX_op_mul, t, arg1, arg2);
|
tcg_gen_op3_i32(INDEX_op_mul, t, arg1, arg2);
|
||||||
tcg_gen_op3_i32(INDEX_op_muluh_i32, rh, arg1, arg2);
|
tcg_gen_op3_i32(INDEX_op_muluh, rh, arg1, arg2);
|
||||||
tcg_gen_mov_i32(rl, t);
|
tcg_gen_mov_i32(rl, t);
|
||||||
tcg_temp_free_i32(t);
|
tcg_temp_free_i32(t);
|
||||||
} else if (TCG_TARGET_REG_BITS == 64) {
|
} else if (TCG_TARGET_REG_BITS == 64) {
|
||||||
|
@ -2842,10 +2842,10 @@ void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
|
||||||
{
|
{
|
||||||
if (TCG_TARGET_HAS_mulu2_i64) {
|
if (TCG_TARGET_HAS_mulu2_i64) {
|
||||||
tcg_gen_op4_i64(INDEX_op_mulu2_i64, rl, rh, arg1, arg2);
|
tcg_gen_op4_i64(INDEX_op_mulu2_i64, rl, rh, arg1, arg2);
|
||||||
} else if (tcg_op_supported(INDEX_op_muluh_i64, TCG_TYPE_I64, 0)) {
|
} else if (tcg_op_supported(INDEX_op_muluh, TCG_TYPE_I64, 0)) {
|
||||||
TCGv_i64 t = tcg_temp_ebb_new_i64();
|
TCGv_i64 t = tcg_temp_ebb_new_i64();
|
||||||
tcg_gen_op3_i64(INDEX_op_mul, t, arg1, arg2);
|
tcg_gen_op3_i64(INDEX_op_mul, t, arg1, arg2);
|
||||||
tcg_gen_op3_i64(INDEX_op_muluh_i64, rh, arg1, arg2);
|
tcg_gen_op3_i64(INDEX_op_muluh, rh, arg1, arg2);
|
||||||
tcg_gen_mov_i64(rl, t);
|
tcg_gen_mov_i64(rl, t);
|
||||||
tcg_temp_free_i64(t);
|
tcg_temp_free_i64(t);
|
||||||
} else {
|
} else {
|
||||||
|
@ -2868,7 +2868,7 @@ void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
|
||||||
tcg_gen_mov_i64(rl, t);
|
tcg_gen_mov_i64(rl, t);
|
||||||
tcg_temp_free_i64(t);
|
tcg_temp_free_i64(t);
|
||||||
} else if (TCG_TARGET_HAS_mulu2_i64 ||
|
} else if (TCG_TARGET_HAS_mulu2_i64 ||
|
||||||
tcg_op_supported(INDEX_op_muluh_i64, TCG_TYPE_I64, 0)) {
|
tcg_op_supported(INDEX_op_muluh, TCG_TYPE_I64, 0)) {
|
||||||
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
|
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
|
||||||
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
|
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
|
||||||
TCGv_i64 t2 = tcg_temp_ebb_new_i64();
|
TCGv_i64 t2 = tcg_temp_ebb_new_i64();
|
||||||
|
|
13
tcg/tcg.c
13
tcg/tcg.c
|
@ -1022,8 +1022,7 @@ static const TCGOutOp * const all_outop[NB_OPS] = {
|
||||||
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
|
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
|
||||||
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
|
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
|
||||||
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
|
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
|
||||||
OUTOP(INDEX_op_muluh_i32, TCGOutOpBinary, outop_muluh),
|
OUTOP(INDEX_op_muluh, TCGOutOpBinary, outop_muluh),
|
||||||
OUTOP(INDEX_op_muluh_i64, TCGOutOpBinary, outop_muluh),
|
|
||||||
OUTOP(INDEX_op_nand, TCGOutOpBinary, outop_nand),
|
OUTOP(INDEX_op_nand, TCGOutOpBinary, outop_nand),
|
||||||
OUTOP(INDEX_op_neg, TCGOutOpUnary, outop_neg),
|
OUTOP(INDEX_op_neg, TCGOutOpUnary, outop_neg),
|
||||||
OUTOP(INDEX_op_nor, TCGOutOpBinary, outop_nor),
|
OUTOP(INDEX_op_nor, TCGOutOpBinary, outop_nor),
|
||||||
|
@ -4030,17 +4029,14 @@ liveness_pass_1(TCGContext *s)
|
||||||
}
|
}
|
||||||
goto do_not_remove;
|
goto do_not_remove;
|
||||||
|
|
||||||
case INDEX_op_mulu2_i32:
|
|
||||||
opc_new = INDEX_op_mul;
|
|
||||||
opc_new2 = INDEX_op_muluh_i32;
|
|
||||||
goto do_mul2;
|
|
||||||
case INDEX_op_muls2_i32:
|
case INDEX_op_muls2_i32:
|
||||||
opc_new = INDEX_op_mul;
|
opc_new = INDEX_op_mul;
|
||||||
opc_new2 = INDEX_op_mulsh_i32;
|
opc_new2 = INDEX_op_mulsh_i32;
|
||||||
goto do_mul2;
|
goto do_mul2;
|
||||||
|
case INDEX_op_mulu2_i32:
|
||||||
case INDEX_op_mulu2_i64:
|
case INDEX_op_mulu2_i64:
|
||||||
opc_new = INDEX_op_mul;
|
opc_new = INDEX_op_mul;
|
||||||
opc_new2 = INDEX_op_muluh_i64;
|
opc_new2 = INDEX_op_muluh;
|
||||||
goto do_mul2;
|
goto do_mul2;
|
||||||
case INDEX_op_muls2_i64:
|
case INDEX_op_muls2_i64:
|
||||||
opc_new = INDEX_op_mul;
|
opc_new = INDEX_op_mul;
|
||||||
|
@ -5430,8 +5426,7 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
|
||||||
case INDEX_op_andc:
|
case INDEX_op_andc:
|
||||||
case INDEX_op_eqv:
|
case INDEX_op_eqv:
|
||||||
case INDEX_op_mul:
|
case INDEX_op_mul:
|
||||||
case INDEX_op_muluh_i32:
|
case INDEX_op_muluh:
|
||||||
case INDEX_op_muluh_i64:
|
|
||||||
case INDEX_op_nand:
|
case INDEX_op_nand:
|
||||||
case INDEX_op_nor:
|
case INDEX_op_nor:
|
||||||
case INDEX_op_or:
|
case INDEX_op_or:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue