tcg: Introduce tcg_out_movext2

This is common code in most qemu_{ld,st} slow paths, moving two
registers when there may be overlap between sources and destinations.
At present, this is only used by 32-bit hosts for 64-bit data,
but will shortly be used for more than that.

Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2023-04-05 22:27:03 -07:00
parent c6a98619f7
commit 129f1f9ee7
3 changed files with 90 additions and 42 deletions

View file

@ -1545,7 +1545,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi,
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
{
TCGReg argreg, datalo, datahi;
TCGReg argreg;
MemOpIdx oi = lb->oi;
MemOp opc = get_memop(oi);
@ -1565,22 +1565,16 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
/* Use the canonical unsigned helpers and minimize icache usage. */
tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]);
datalo = lb->datalo_reg;
datahi = lb->datahi_reg;
if ((opc & MO_SIZE) == MO_64) {
if (datalo != TCG_REG_R1) {
tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
} else if (datahi != TCG_REG_R0) {
tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
} else {
tcg_out_mov_reg(s, COND_AL, TCG_REG_TMP, TCG_REG_R0);
tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_TMP);
}
TCGMovExtend ext[2] = {
{ .dst = lb->datalo_reg, .dst_type = TCG_TYPE_I32,
.src = TCG_REG_R0, .src_type = TCG_TYPE_I32, .src_ext = MO_UL },
{ .dst = lb->datahi_reg, .dst_type = TCG_TYPE_I32,
.src = TCG_REG_R1, .src_type = TCG_TYPE_I32, .src_ext = MO_UL },
};
tcg_out_movext2(s, &ext[0], &ext[1], TCG_REG_TMP);
} else {
tcg_out_movext(s, TCG_TYPE_I32, datalo,
tcg_out_movext(s, TCG_TYPE_I32, lb->datalo_reg,
TCG_TYPE_I32, opc & MO_SSIZE, TCG_REG_R0);
}
@ -1663,17 +1657,15 @@ static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
if (TARGET_LONG_BITS == 64) {
/* 64-bit target address is aligned into R2:R3. */
if (l->addrhi_reg != TCG_REG_R2) {
tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, l->addrlo_reg);
tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, l->addrhi_reg);
} else if (l->addrlo_reg != TCG_REG_R3) {
tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, l->addrhi_reg);
tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, l->addrlo_reg);
} else {
tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R1, TCG_REG_R2);
tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, TCG_REG_R3);
tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, TCG_REG_R1);
}
TCGMovExtend ext[2] = {
{ .dst = TCG_REG_R2, .dst_type = TCG_TYPE_I32,
.src = l->addrlo_reg,
.src_type = TCG_TYPE_I32, .src_ext = MO_UL },
{ .dst = TCG_REG_R3, .dst_type = TCG_TYPE_I32,
.src = l->addrhi_reg,
.src_type = TCG_TYPE_I32, .src_ext = MO_UL },
};
tcg_out_movext2(s, &ext[0], &ext[1], TCG_REG_TMP);
} else {
tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R1, l->addrlo_reg);
}