tcg: Split INDEX_op_qemu_{ld,st}* for guest address size

For 32-bit hosts, we cannot simply rely on TCGContext.addr_bits,
as we need one or two host registers to represent the guest address.

Create the new opcodes and update all users.  Since we have not
yet eliminated TARGET_LONG_BITS, only one of the two opcodes will
ever be used, so we can get away with treating them the same in
the backends.

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2023-05-16 20:07:20 -07:00
parent 0700ceb393
commit fecccfcc54
15 changed files with 436 additions and 248 deletions

View file

@ -2832,43 +2832,58 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out32(s, MODUD | TAB(args[0], args[1], args[2]));
break;
case INDEX_op_qemu_ld_i32:
if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
args[2], TCG_TYPE_I32);
} else {
case INDEX_op_qemu_ld_a64_i32:
if (TCG_TARGET_REG_BITS == 32) {
tcg_out_qemu_ld(s, args[0], -1, args[1], args[2],
args[3], TCG_TYPE_I32);
break;
}
/* fall through */
case INDEX_op_qemu_ld_a32_i32:
tcg_out_qemu_ld(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
break;
case INDEX_op_qemu_ld_i64:
case INDEX_op_qemu_ld_a32_i64:
if (TCG_TARGET_REG_BITS == 64) {
tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
args[2], TCG_TYPE_I64);
} else if (TARGET_LONG_BITS == 32) {
} else {
tcg_out_qemu_ld(s, args[0], args[1], args[2], -1,
args[3], TCG_TYPE_I64);
}
break;
case INDEX_op_qemu_ld_a64_i64:
if (TCG_TARGET_REG_BITS == 64) {
tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
args[2], TCG_TYPE_I64);
} else {
tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3],
args[4], TCG_TYPE_I64);
}
break;
case INDEX_op_qemu_st_i32:
if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
tcg_out_qemu_st(s, args[0], -1, args[1], -1,
args[2], TCG_TYPE_I32);
} else {
case INDEX_op_qemu_st_a64_i32:
if (TCG_TARGET_REG_BITS == 32) {
tcg_out_qemu_st(s, args[0], -1, args[1], args[2],
args[3], TCG_TYPE_I32);
break;
}
/* fall through */
case INDEX_op_qemu_st_a32_i32:
tcg_out_qemu_st(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
break;
case INDEX_op_qemu_st_i64:
case INDEX_op_qemu_st_a32_i64:
if (TCG_TARGET_REG_BITS == 64) {
tcg_out_qemu_st(s, args[0], -1, args[1], -1,
args[2], TCG_TYPE_I64);
} else if (TARGET_LONG_BITS == 32) {
} else {
tcg_out_qemu_st(s, args[0], args[1], args[2], -1,
args[3], TCG_TYPE_I64);
}
break;
case INDEX_op_qemu_st_a64_i64:
if (TCG_TARGET_REG_BITS == 64) {
tcg_out_qemu_st(s, args[0], -1, args[1], -1,
args[2], TCG_TYPE_I64);
} else {
tcg_out_qemu_st(s, args[0], args[1], args[2], args[3],
args[4], TCG_TYPE_I64);
@ -3689,25 +3704,23 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_sub2_i32:
return C_O2_I4(r, r, rI, rZM, r, r);
case INDEX_op_qemu_ld_i32:
return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
? C_O1_I1(r, r)
: C_O1_I2(r, r, r));
case INDEX_op_qemu_ld_a32_i32:
return C_O1_I1(r, r);
case INDEX_op_qemu_ld_a64_i32:
return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O1_I2(r, r, r);
case INDEX_op_qemu_ld_a32_i64:
return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
case INDEX_op_qemu_ld_a64_i64:
return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I2(r, r, r, r);
case INDEX_op_qemu_st_i32:
return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
? C_O0_I2(r, r)
: C_O0_I3(r, r, r));
case INDEX_op_qemu_ld_i64:
return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r)
: TARGET_LONG_BITS == 32 ? C_O2_I1(r, r, r)
: C_O2_I2(r, r, r, r));
case INDEX_op_qemu_st_i64:
return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r)
: TARGET_LONG_BITS == 32 ? C_O0_I3(r, r, r)
: C_O0_I4(r, r, r, r));
case INDEX_op_qemu_st_a32_i32:
return C_O0_I2(r, r);
case INDEX_op_qemu_st_a64_i32:
return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
case INDEX_op_qemu_st_a32_i64:
return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
case INDEX_op_qemu_st_a64_i64:
return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I4(r, r, r, r);
case INDEX_op_add_vec:
case INDEX_op_sub_vec: