tcg: Merge INDEX_op_{ld,st}_{i32,i64,i128}

Merge into INDEX_op_{ld,st,ld2,st2}, where "2" indicates that two
inputs or outputs are required. This simplifies the processing of
i64/i128 depending on host word size.

Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2025-02-09 12:55:15 -08:00
parent 33aba058c8
commit aae2456ac0
15 changed files with 200 additions and 306 deletions

View file

@ -124,18 +124,10 @@ DEF(goto_ptr, 0, 1, 0, TCG_OPF_BB_EXIT | TCG_OPF_BB_END)
DEF(plugin_cb, 0, 0, 1, TCG_OPF_NOT_PRESENT)
DEF(plugin_mem_cb, 0, 1, 1, TCG_OPF_NOT_PRESENT)
DEF(qemu_ld_i32, 1, 1, 1,
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_st_i32, 0, 1 + 1, 1,
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_ld_i64, DATA64_ARGS, 1, 1,
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_st_i64, 0, DATA64_ARGS + 1, 1,
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
/* Only for 64-bit hosts at the moment. */
DEF(qemu_ld_i128, 2, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_st_i128, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_ld, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_INT)
DEF(qemu_st, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_INT)
DEF(qemu_ld2, 2, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_INT)
DEF(qemu_st2, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_INT)
/* Host vector support. */

View file

@ -2875,18 +2875,16 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
TCGArg a2 = args[2];
switch (opc) {
case INDEX_op_qemu_ld_i32:
case INDEX_op_qemu_ld_i64:
case INDEX_op_qemu_ld:
tcg_out_qemu_ld(s, a0, a1, a2, ext);
break;
case INDEX_op_qemu_st_i32:
case INDEX_op_qemu_st_i64:
case INDEX_op_qemu_st:
tcg_out_qemu_st(s, a0, a1, a2, ext);
break;
case INDEX_op_qemu_ld_i128:
case INDEX_op_qemu_ld2:
tcg_out_qemu_ldst_i128(s, a0, a1, a2, args[3], true);
break;
case INDEX_op_qemu_st_i128:
case INDEX_op_qemu_st2:
tcg_out_qemu_ldst_i128(s, a0, a1, a2, args[3], false);
break;
@ -3342,15 +3340,13 @@ static TCGConstraintSetIndex
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
{
switch (op) {
case INDEX_op_qemu_ld_i32:
case INDEX_op_qemu_ld_i64:
case INDEX_op_qemu_ld:
return C_O1_I1(r, r);
case INDEX_op_qemu_ld_i128:
case INDEX_op_qemu_ld2:
return C_O2_I1(r, r, r);
case INDEX_op_qemu_st_i32:
case INDEX_op_qemu_st_i64:
case INDEX_op_qemu_st:
return C_O0_I2(rz, r);
case INDEX_op_qemu_st_i128:
case INDEX_op_qemu_st2:
return C_O0_I3(rz, rz, r);
case INDEX_op_add_vec:

View file

@ -2570,17 +2570,17 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
const int const_args[TCG_MAX_OP_ARGS])
{
switch (opc) {
case INDEX_op_qemu_ld_i32:
case INDEX_op_qemu_ld:
tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], TCG_TYPE_I32);
break;
case INDEX_op_qemu_ld_i64:
case INDEX_op_qemu_ld2:
tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3], TCG_TYPE_I64);
break;
case INDEX_op_qemu_st_i32:
case INDEX_op_qemu_st:
tcg_out_qemu_st(s, args[0], -1, args[1], args[2], TCG_TYPE_I32);
break;
case INDEX_op_qemu_st_i64:
case INDEX_op_qemu_st2:
tcg_out_qemu_st(s, args[0], args[1], args[2], args[3], TCG_TYPE_I64);
break;
@ -2596,13 +2596,13 @@ static TCGConstraintSetIndex
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
{
switch (op) {
case INDEX_op_qemu_ld_i32:
case INDEX_op_qemu_ld:
return C_O1_I1(r, q);
case INDEX_op_qemu_ld_i64:
case INDEX_op_qemu_ld2:
return C_O2_I1(e, p, q);
case INDEX_op_qemu_st_i32:
case INDEX_op_qemu_st:
return C_O0_I2(q, q);
case INDEX_op_qemu_st_i64:
case INDEX_op_qemu_st2:
return C_O0_I3(Q, p, q);
case INDEX_op_st_vec:

View file

@ -2457,7 +2457,7 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
switch (memop & MO_SIZE) {
case MO_8:
/* This is handled with constraints on INDEX_op_qemu_st_i32. */
/* This is handled with constraints on INDEX_op_qemu_st. */
tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || datalo < 4);
tcg_out_modrm_sib_offset(s, OPC_MOVB_EvGv + P_REXB_R + h.seg,
datalo, h.base, h.index, 0, h.ofs);
@ -3552,34 +3552,18 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
a2 = args[2];
switch (opc) {
case INDEX_op_qemu_ld_i32:
tcg_out_qemu_ld(s, a0, -1, a1, a2, TCG_TYPE_I32);
case INDEX_op_qemu_ld:
tcg_out_qemu_ld(s, a0, -1, a1, a2, type);
break;
case INDEX_op_qemu_ld_i64:
if (TCG_TARGET_REG_BITS == 64) {
tcg_out_qemu_ld(s, a0, -1, a1, a2, TCG_TYPE_I64);
} else {
tcg_out_qemu_ld(s, a0, a1, a2, args[3], TCG_TYPE_I64);
}
break;
case INDEX_op_qemu_ld_i128:
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
tcg_out_qemu_ld(s, a0, a1, a2, args[3], TCG_TYPE_I128);
case INDEX_op_qemu_ld2:
tcg_out_qemu_ld(s, a0, a1, a2, args[3], type);
break;
case INDEX_op_qemu_st_i32:
tcg_out_qemu_st(s, a0, -1, a1, a2, TCG_TYPE_I32);
case INDEX_op_qemu_st:
tcg_out_qemu_st(s, a0, -1, a1, a2, type);
break;
case INDEX_op_qemu_st_i64:
if (TCG_TARGET_REG_BITS == 64) {
tcg_out_qemu_st(s, a0, -1, a1, a2, TCG_TYPE_I64);
} else {
tcg_out_qemu_st(s, a0, a1, a2, args[3], TCG_TYPE_I64);
}
break;
case INDEX_op_qemu_st_i128:
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
tcg_out_qemu_st(s, a0, a1, a2, args[3], TCG_TYPE_I128);
case INDEX_op_qemu_st2:
tcg_out_qemu_st(s, a0, a1, a2, args[3], type);
break;
case INDEX_op_call: /* Always emitted via tcg_out_call. */
@ -4135,25 +4119,17 @@ static TCGConstraintSetIndex
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
{
switch (op) {
case INDEX_op_qemu_ld_i32:
case INDEX_op_qemu_ld:
return C_O1_I1(r, L);
case INDEX_op_qemu_st_i32:
case INDEX_op_qemu_st:
return (TCG_TARGET_REG_BITS == 32 && flags == MO_8
? C_O0_I2(s, L)
: C_O0_I2(L, L));
case INDEX_op_qemu_ld_i64:
return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) : C_O2_I1(r, r, L);
case INDEX_op_qemu_st_i64:
return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(L, L) : C_O0_I3(L, L, L);
case INDEX_op_qemu_ld_i128:
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
case INDEX_op_qemu_ld2:
return C_O2_I1(r, r, L);
case INDEX_op_qemu_st_i128:
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
case INDEX_op_qemu_st2:
return C_O0_I3(L, L, L);
case INDEX_op_ld_vec:

View file

@ -2020,22 +2020,16 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
TCGArg a3 = args[3];
switch (opc) {
case INDEX_op_qemu_ld_i32:
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
case INDEX_op_qemu_ld:
tcg_out_qemu_ld(s, a0, a1, a2, type);
break;
case INDEX_op_qemu_ld_i64:
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
break;
case INDEX_op_qemu_ld_i128:
case INDEX_op_qemu_ld2:
tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, true);
break;
case INDEX_op_qemu_st_i32:
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
case INDEX_op_qemu_st:
tcg_out_qemu_st(s, a0, a1, a2, type);
break;
case INDEX_op_qemu_st_i64:
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
break;
case INDEX_op_qemu_st_i128:
case INDEX_op_qemu_st2:
tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, false);
break;
@ -2541,18 +2535,16 @@ static TCGConstraintSetIndex
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
{
switch (op) {
case INDEX_op_qemu_st_i32:
case INDEX_op_qemu_st_i64:
case INDEX_op_qemu_st:
return C_O0_I2(rz, r);
case INDEX_op_qemu_ld_i128:
case INDEX_op_qemu_ld2:
return C_N2_I1(r, r, r);
case INDEX_op_qemu_st_i128:
case INDEX_op_qemu_st2:
return C_O0_I3(r, r, r);
case INDEX_op_qemu_ld_i32:
case INDEX_op_qemu_ld_i64:
case INDEX_op_qemu_ld:
return C_O1_I1(r, r);
case INDEX_op_ld_vec:

View file

@ -2381,26 +2381,20 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
a2 = args[2];
switch (opc) {
case INDEX_op_qemu_ld_i32:
tcg_out_qemu_ld(s, a0, 0, a1, a2, TCG_TYPE_I32);
case INDEX_op_qemu_ld:
tcg_out_qemu_ld(s, a0, 0, a1, a2, type);
break;
case INDEX_op_qemu_ld_i64:
if (TCG_TARGET_REG_BITS == 64) {
tcg_out_qemu_ld(s, a0, 0, a1, a2, TCG_TYPE_I64);
} else {
tcg_out_qemu_ld(s, a0, a1, a2, args[3], TCG_TYPE_I64);
}
case INDEX_op_qemu_ld2:
tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
tcg_out_qemu_ld(s, a0, a1, a2, args[3], type);
break;
case INDEX_op_qemu_st_i32:
tcg_out_qemu_st(s, a0, 0, a1, a2, TCG_TYPE_I32);
case INDEX_op_qemu_st:
tcg_out_qemu_st(s, a0, 0, a1, a2, type);
break;
case INDEX_op_qemu_st_i64:
if (TCG_TARGET_REG_BITS == 64) {
tcg_out_qemu_st(s, a0, 0, a1, a2, TCG_TYPE_I64);
} else {
tcg_out_qemu_st(s, a0, a1, a2, args[3], TCG_TYPE_I64);
}
case INDEX_op_qemu_st2:
tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
tcg_out_qemu_st(s, a0, a1, a2, args[3], type);
break;
case INDEX_op_call: /* Always emitted via tcg_out_call. */
@ -2415,14 +2409,14 @@ static TCGConstraintSetIndex
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
{
switch (op) {
case INDEX_op_qemu_ld_i32:
case INDEX_op_qemu_ld:
return C_O1_I1(r, r);
case INDEX_op_qemu_st_i32:
case INDEX_op_qemu_st:
return C_O0_I2(rz, r);
case INDEX_op_qemu_ld_i64:
return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
case INDEX_op_qemu_st_i64:
return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(rz, r) : C_O0_I3(rz, rz, r);
case INDEX_op_qemu_ld2:
return TCG_TARGET_REG_BITS == 64 ? C_NotImplemented : C_O2_I1(r, r, r);
case INDEX_op_qemu_st2:
return TCG_TARGET_REG_BITS == 64 ? C_NotImplemented : C_O0_I3(rz, rz, r);
default:
return C_NotImplemented;

View file

@ -3180,21 +3180,14 @@ void tcg_optimize(TCGContext *s)
case INDEX_op_orc_vec:
done = fold_orc(&ctx, op);
break;
case INDEX_op_qemu_ld_i32:
case INDEX_op_qemu_ld:
done = fold_qemu_ld_1reg(&ctx, op);
break;
case INDEX_op_qemu_ld_i64:
if (TCG_TARGET_REG_BITS == 64) {
done = fold_qemu_ld_1reg(&ctx, op);
break;
}
QEMU_FALLTHROUGH;
case INDEX_op_qemu_ld_i128:
case INDEX_op_qemu_ld2:
done = fold_qemu_ld_2reg(&ctx, op);
break;
case INDEX_op_qemu_st_i32:
case INDEX_op_qemu_st_i64:
case INDEX_op_qemu_st_i128:
case INDEX_op_qemu_st:
case INDEX_op_qemu_st2:
done = fold_qemu_st(&ctx, op);
break;
case INDEX_op_rems:

View file

@ -3779,35 +3779,27 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
const int const_args[TCG_MAX_OP_ARGS])
{
switch (opc) {
case INDEX_op_qemu_ld_i32:
tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], TCG_TYPE_I32);
case INDEX_op_qemu_ld:
tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], type);
break;
case INDEX_op_qemu_ld_i64:
if (TCG_TARGET_REG_BITS == 64) {
tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], TCG_TYPE_I64);
} else {
case INDEX_op_qemu_ld2:
if (TCG_TARGET_REG_BITS == 32) {
tcg_out_qemu_ld(s, args[0], args[1], args[2],
args[3], TCG_TYPE_I64);
break;
}
break;
case INDEX_op_qemu_ld_i128:
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], true);
break;
case INDEX_op_qemu_st_i32:
tcg_out_qemu_st(s, args[0], -1, args[1], args[2], TCG_TYPE_I32);
case INDEX_op_qemu_st:
tcg_out_qemu_st(s, args[0], -1, args[1], args[2], type);
break;
case INDEX_op_qemu_st_i64:
if (TCG_TARGET_REG_BITS == 64) {
tcg_out_qemu_st(s, args[0], -1, args[1], args[2], TCG_TYPE_I64);
} else {
case INDEX_op_qemu_st2:
if (TCG_TARGET_REG_BITS == 32) {
tcg_out_qemu_st(s, args[0], args[1], args[2],
args[3], TCG_TYPE_I64);
break;
}
break;
case INDEX_op_qemu_st_i128:
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
break;
@ -4426,20 +4418,17 @@ static TCGConstraintSetIndex
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
{
switch (op) {
case INDEX_op_qemu_ld_i32:
case INDEX_op_qemu_ld:
return C_O1_I1(r, r);
case INDEX_op_qemu_ld_i64:
return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
case INDEX_op_qemu_ld2:
return TCG_TARGET_REG_BITS == 64
? C_N1O1_I1(o, m, r) : C_O2_I1(r, r, r);
case INDEX_op_qemu_st_i32:
case INDEX_op_qemu_st:
return C_O0_I2(r, r);
case INDEX_op_qemu_st_i64:
return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
case INDEX_op_qemu_ld_i128:
return C_N1O1_I1(o, m, r);
case INDEX_op_qemu_st_i128:
return C_O0_I3(o, m, r);
case INDEX_op_qemu_st2:
return TCG_TARGET_REG_BITS == 64
? C_O0_I3(o, m, r) : C_O0_I3(r, r, r);
case INDEX_op_add_vec:
case INDEX_op_sub_vec:

View file

@ -2633,17 +2633,11 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
TCGArg a2 = args[2];
switch (opc) {
case INDEX_op_qemu_ld_i32:
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
case INDEX_op_qemu_ld:
tcg_out_qemu_ld(s, a0, a1, a2, type);
break;
case INDEX_op_qemu_ld_i64:
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
break;
case INDEX_op_qemu_st_i32:
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
break;
case INDEX_op_qemu_st_i64:
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
case INDEX_op_qemu_st:
tcg_out_qemu_st(s, a0, a1, a2, type);
break;
case INDEX_op_call: /* Always emitted via tcg_out_call. */
@ -2875,11 +2869,9 @@ static TCGConstraintSetIndex
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
{
switch (op) {
case INDEX_op_qemu_ld_i32:
case INDEX_op_qemu_ld_i64:
case INDEX_op_qemu_ld:
return C_O1_I1(r, r);
case INDEX_op_qemu_st_i32:
case INDEX_op_qemu_st_i64:
case INDEX_op_qemu_st:
return C_O0_I2(rz, r);
case INDEX_op_st_vec:

View file

@ -3133,22 +3133,16 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
const int const_args[TCG_MAX_OP_ARGS])
{
switch (opc) {
case INDEX_op_qemu_ld_i32:
tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I32);
case INDEX_op_qemu_ld:
tcg_out_qemu_ld(s, args[0], args[1], args[2], type);
break;
case INDEX_op_qemu_ld_i64:
tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I64);
case INDEX_op_qemu_st:
tcg_out_qemu_st(s, args[0], args[1], args[2], type);
break;
case INDEX_op_qemu_st_i32:
tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I32);
break;
case INDEX_op_qemu_st_i64:
tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I64);
break;
case INDEX_op_qemu_ld_i128:
case INDEX_op_qemu_ld2:
tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], true);
break;
case INDEX_op_qemu_st_i128:
case INDEX_op_qemu_st2:
tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
break;
@ -3600,15 +3594,13 @@ static TCGConstraintSetIndex
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
{
switch (op) {
case INDEX_op_qemu_ld_i32:
case INDEX_op_qemu_ld_i64:
case INDEX_op_qemu_ld:
return C_O1_I1(r, r);
case INDEX_op_qemu_st_i64:
case INDEX_op_qemu_st_i32:
case INDEX_op_qemu_st:
return C_O0_I2(r, r);
case INDEX_op_qemu_ld_i128:
case INDEX_op_qemu_ld2:
return C_O2_I1(o, m, r);
case INDEX_op_qemu_st_i128:
case INDEX_op_qemu_st2:
return C_O0_I3(o, m, r);
case INDEX_op_st_vec:

View file

@ -2068,17 +2068,11 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
a2 = args[2];
switch (opc) {
case INDEX_op_qemu_ld_i32:
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
case INDEX_op_qemu_ld:
tcg_out_qemu_ld(s, a0, a1, a2, type);
break;
case INDEX_op_qemu_ld_i64:
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
break;
case INDEX_op_qemu_st_i32:
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
break;
case INDEX_op_qemu_st_i64:
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
case INDEX_op_qemu_st:
tcg_out_qemu_st(s, a0, a1, a2, type);
break;
case INDEX_op_call: /* Always emitted via tcg_out_call. */
@ -2093,12 +2087,10 @@ static TCGConstraintSetIndex
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
{
switch (op) {
case INDEX_op_qemu_ld_i32:
case INDEX_op_qemu_ld_i64:
case INDEX_op_qemu_ld:
return C_O1_I1(r, r);
case INDEX_op_qemu_st_i32:
case INDEX_op_qemu_st_i64:
case INDEX_op_qemu_st:
return C_O0_I2(rz, r);
default:

View file

@ -88,28 +88,40 @@ static MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
return op;
}
static void gen_ldst(TCGOpcode opc, TCGType type, TCGTemp *vl, TCGTemp *vh,
TCGTemp *addr, MemOpIdx oi)
static void gen_ldst1(TCGOpcode opc, TCGType type, TCGTemp *v,
TCGTemp *addr, MemOpIdx oi)
{
TCGOp *op;
if (vh) {
op = tcg_gen_op4(opc, type, temp_arg(vl), temp_arg(vh),
temp_arg(addr), oi);
} else {
op = tcg_gen_op3(opc, type, temp_arg(vl), temp_arg(addr), oi);
}
TCGOp *op = tcg_gen_op3(opc, type, temp_arg(v), temp_arg(addr), oi);
TCGOP_FLAGS(op) = get_memop(oi) & MO_SIZE;
}
static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 v, TCGTemp *addr, MemOpIdx oi)
static void gen_ldst2(TCGOpcode opc, TCGType type, TCGTemp *vl, TCGTemp *vh,
TCGTemp *addr, MemOpIdx oi)
{
TCGOp *op = tcg_gen_op4(opc, type, temp_arg(vl), temp_arg(vh),
temp_arg(addr), oi);
TCGOP_FLAGS(op) = get_memop(oi) & MO_SIZE;
}
static void gen_ld_i64(TCGv_i64 v, TCGTemp *addr, MemOpIdx oi)
{
if (TCG_TARGET_REG_BITS == 32) {
TCGTemp *vl = tcgv_i32_temp(TCGV_LOW(v));
TCGTemp *vh = tcgv_i32_temp(TCGV_HIGH(v));
gen_ldst(opc, TCG_TYPE_I64, vl, vh, addr, oi);
gen_ldst2(INDEX_op_qemu_ld2, TCG_TYPE_I64,
tcgv_i32_temp(TCGV_LOW(v)), tcgv_i32_temp(TCGV_HIGH(v)),
addr, oi);
} else {
gen_ldst(opc, TCG_TYPE_I64, tcgv_i64_temp(v), NULL, addr, oi);
gen_ldst1(INDEX_op_qemu_ld, TCG_TYPE_I64, tcgv_i64_temp(v), addr, oi);
}
}
static void gen_st_i64(TCGv_i64 v, TCGTemp *addr, MemOpIdx oi)
{
if (TCG_TARGET_REG_BITS == 32) {
gen_ldst2(INDEX_op_qemu_st2, TCG_TYPE_I64,
tcgv_i32_temp(TCGV_LOW(v)), tcgv_i32_temp(TCGV_HIGH(v)),
addr, oi);
} else {
gen_ldst1(INDEX_op_qemu_st, TCG_TYPE_I64, tcgv_i64_temp(v), addr, oi);
}
}
@ -236,8 +248,7 @@ static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val, TCGTemp *addr,
}
copy_addr = plugin_maybe_preserve_addr(addr);
gen_ldst(INDEX_op_qemu_ld_i32, TCG_TYPE_I32,
tcgv_i32_temp(val), NULL, addr, oi);
gen_ldst1(INDEX_op_qemu_ld, TCG_TYPE_I32, tcgv_i32_temp(val), addr, oi);
plugin_gen_mem_callbacks_i32(val, copy_addr, addr, orig_oi,
QEMU_PLUGIN_MEM_R);
@ -292,8 +303,7 @@ static void tcg_gen_qemu_st_i32_int(TCGv_i32 val, TCGTemp *addr,
oi = make_memop_idx(memop, idx);
}
gen_ldst(INDEX_op_qemu_st_i32, TCG_TYPE_I32,
tcgv_i32_temp(val), NULL, addr, oi);
gen_ldst1(INDEX_op_qemu_st, TCG_TYPE_I32, tcgv_i32_temp(val), addr, oi);
plugin_gen_mem_callbacks_i32(val, NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
if (swap) {
@ -340,7 +350,7 @@ static void tcg_gen_qemu_ld_i64_int(TCGv_i64 val, TCGTemp *addr,
}
copy_addr = plugin_maybe_preserve_addr(addr);
gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, oi);
gen_ld_i64(val, addr, oi);
plugin_gen_mem_callbacks_i64(val, copy_addr, addr, orig_oi,
QEMU_PLUGIN_MEM_R);
@ -407,7 +417,7 @@ static void tcg_gen_qemu_st_i64_int(TCGv_i64 val, TCGTemp *addr,
oi = make_memop_idx(memop, idx);
}
gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, oi);
gen_st_i64(val, addr, oi);
plugin_gen_mem_callbacks_i64(val, NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
if (swap) {
@ -546,8 +556,8 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
hi = TCGV128_HIGH(val);
}
gen_ldst(INDEX_op_qemu_ld_i128, TCG_TYPE_I128, tcgv_i64_temp(lo),
tcgv_i64_temp(hi), addr, oi);
gen_ldst2(INDEX_op_qemu_ld2, TCG_TYPE_I128, tcgv_i64_temp(lo),
tcgv_i64_temp(hi), addr, oi);
if (need_bswap) {
tcg_gen_bswap64_i64(lo, lo);
@ -575,8 +585,7 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
y = TCGV128_LOW(val);
}
gen_ldst_i64(INDEX_op_qemu_ld_i64, x, addr,
make_memop_idx(mop[0], idx));
gen_ld_i64(x, addr, make_memop_idx(mop[0], idx));
if (need_bswap) {
tcg_gen_bswap64_i64(x, x);
@ -592,8 +601,7 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
addr_p8 = tcgv_i64_temp(t);
}
gen_ldst_i64(INDEX_op_qemu_ld_i64, y, addr_p8,
make_memop_idx(mop[1], idx));
gen_ld_i64(y, addr_p8, make_memop_idx(mop[1], idx));
tcg_temp_free_internal(addr_p8);
if (need_bswap) {
@ -657,8 +665,8 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
hi = TCGV128_HIGH(val);
}
gen_ldst(INDEX_op_qemu_st_i128, TCG_TYPE_I128,
tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi);
gen_ldst2(INDEX_op_qemu_st2, TCG_TYPE_I128,
tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi);
if (need_bswap) {
tcg_temp_free_i64(lo);
@ -685,8 +693,7 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
x = b;
}
gen_ldst_i64(INDEX_op_qemu_st_i64, x, addr,
make_memop_idx(mop[0], idx));
gen_st_i64(x, addr, make_memop_idx(mop[0], idx));
if (tcg_ctx->addr_type == TCG_TYPE_I32) {
TCGv_i32 t = tcg_temp_ebb_new_i32();
@ -700,12 +707,10 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
if (b) {
tcg_gen_bswap64_i64(b, y);
gen_ldst_i64(INDEX_op_qemu_st_i64, b, addr_p8,
make_memop_idx(mop[1], idx));
gen_st_i64(b, addr_p8, make_memop_idx(mop[1], idx));
tcg_temp_free_i64(b);
} else {
gen_ldst_i64(INDEX_op_qemu_st_i64, y, addr_p8,
make_memop_idx(mop[1], idx));
gen_st_i64(y, addr_p8, make_memop_idx(mop[1], idx));
}
tcg_temp_free_internal(addr_p8);
} else {

View file

@ -2432,14 +2432,20 @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
case INDEX_op_exit_tb:
case INDEX_op_goto_tb:
case INDEX_op_goto_ptr:
case INDEX_op_qemu_ld_i32:
case INDEX_op_qemu_st_i32:
case INDEX_op_qemu_ld_i64:
case INDEX_op_qemu_st_i64:
return true;
case INDEX_op_qemu_ld_i128:
case INDEX_op_qemu_st_i128:
case INDEX_op_qemu_ld:
case INDEX_op_qemu_st:
tcg_debug_assert(type <= TCG_TYPE_REG);
return true;
case INDEX_op_qemu_ld2:
case INDEX_op_qemu_st2:
if (TCG_TARGET_REG_BITS == 32) {
tcg_debug_assert(type == TCG_TYPE_I64);
return true;
}
tcg_debug_assert(type == TCG_TYPE_I128);
return TCG_TARGET_HAS_qemu_ldst_i128;
case INDEX_op_add:
@ -3007,12 +3013,10 @@ void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
}
i = 1;
break;
case INDEX_op_qemu_ld_i32:
case INDEX_op_qemu_st_i32:
case INDEX_op_qemu_ld_i64:
case INDEX_op_qemu_st_i64:
case INDEX_op_qemu_ld_i128:
case INDEX_op_qemu_st_i128:
case INDEX_op_qemu_ld:
case INDEX_op_qemu_st:
case INDEX_op_qemu_ld2:
case INDEX_op_qemu_st2:
{
const char *s_al, *s_op, *s_at;
MemOpIdx oi = op->args[k++];

View file

@ -789,46 +789,33 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
tb_ptr = ptr;
break;
case INDEX_op_qemu_ld_i32:
case INDEX_op_qemu_ld:
tci_args_rrm(insn, &r0, &r1, &oi);
taddr = regs[r1];
regs[r0] = tci_qemu_ld(env, taddr, oi, tb_ptr);
break;
case INDEX_op_qemu_ld_i64:
if (TCG_TARGET_REG_BITS == 64) {
tci_args_rrm(insn, &r0, &r1, &oi);
taddr = regs[r1];
} else {
tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
taddr = regs[r2];
oi = regs[r3];
}
tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr);
if (TCG_TARGET_REG_BITS == 32) {
tci_write_reg64(regs, r1, r0, tmp64);
} else {
regs[r0] = tmp64;
}
break;
case INDEX_op_qemu_st_i32:
case INDEX_op_qemu_st:
tci_args_rrm(insn, &r0, &r1, &oi);
taddr = regs[r1];
tci_qemu_st(env, taddr, regs[r0], oi, tb_ptr);
break;
case INDEX_op_qemu_st_i64:
if (TCG_TARGET_REG_BITS == 64) {
tci_args_rrm(insn, &r0, &r1, &oi);
tmp64 = regs[r0];
taddr = regs[r1];
} else {
tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
tmp64 = tci_uint64(regs[r1], regs[r0]);
taddr = regs[r2];
oi = regs[r3];
}
case INDEX_op_qemu_ld2:
tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
taddr = regs[r2];
oi = regs[r3];
tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr);
tci_write_reg64(regs, r1, r0, tmp64);
break;
case INDEX_op_qemu_st2:
tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
tmp64 = tci_uint64(regs[r1], regs[r0]);
taddr = regs[r2];
oi = regs[r3];
tci_qemu_st(env, taddr, tmp64, oi, tb_ptr);
break;
@ -1056,23 +1043,21 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
str_r(r2), str_r(r3));
break;
case INDEX_op_qemu_ld_i64:
case INDEX_op_qemu_st_i64:
if (TCG_TARGET_REG_BITS == 32) {
tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
op_name, str_r(r0), str_r(r1),
str_r(r2), str_r(r3));
break;
}
/* fall through */
case INDEX_op_qemu_ld_i32:
case INDEX_op_qemu_st_i32:
case INDEX_op_qemu_ld:
case INDEX_op_qemu_st:
tci_args_rrm(insn, &r0, &r1, &oi);
info->fprintf_func(info->stream, "%-12s %s, %s, %x",
op_name, str_r(r0), str_r(r1), oi);
break;
case INDEX_op_qemu_ld2:
case INDEX_op_qemu_st2:
tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
op_name, str_r(r0), str_r(r1),
str_r(r2), str_r(r3));
break;
case 0:
/* tcg_out_nop_fill uses zeros */
if (insn == 0) {

View file

@ -40,14 +40,14 @@ static TCGConstraintSetIndex
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
{
switch (op) {
case INDEX_op_qemu_ld_i32:
case INDEX_op_qemu_ld:
return C_O1_I1(r, r);
case INDEX_op_qemu_ld_i64:
return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
case INDEX_op_qemu_st_i32:
case INDEX_op_qemu_st:
return C_O0_I2(r, r);
case INDEX_op_qemu_st_i64:
return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
case INDEX_op_qemu_ld2:
return TCG_TARGET_REG_BITS == 64 ? C_NotImplemented : C_O2_I1(r, r, r);
case INDEX_op_qemu_st2:
return TCG_TARGET_REG_BITS == 64 ? C_NotImplemented : C_O0_I3(r, r, r);
default:
return C_NotImplemented;
@ -1203,22 +1203,14 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
const int const_args[TCG_MAX_OP_ARGS])
{
switch (opc) {
case INDEX_op_qemu_ld_i64:
case INDEX_op_qemu_st_i64:
if (TCG_TARGET_REG_BITS == 32) {
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[3]);
tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], TCG_REG_TMP);
break;
}
/* fall through */
case INDEX_op_qemu_ld_i32:
case INDEX_op_qemu_st_i32:
if (TCG_TARGET_REG_BITS == 64 && s->addr_type == TCG_TYPE_I32) {
tcg_out_ext32u(s, TCG_REG_TMP, args[1]);
tcg_out_op_rrm(s, opc, args[0], TCG_REG_TMP, args[2]);
} else {
tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
}
case INDEX_op_qemu_ld:
case INDEX_op_qemu_st:
tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
break;
case INDEX_op_qemu_ld2:
case INDEX_op_qemu_st2:
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[3]);
tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], TCG_REG_TMP);
break;
case INDEX_op_call: /* Always emitted via tcg_out_call. */