softfloat: Fix the incorrect computation in float32_exp2

tcg: Remove compatability helpers for qemu ld/st
 target/alpha: Remove TARGET_ALIGNED_ONLY
 target/hppa: Remove TARGET_ALIGNED_ONLY
 target/sparc: Remove TARGET_ALIGNED_ONLY
 tcg: Cleanups preparing to unify calls to qemu_ld/st helpers
 -----BEGIN PGP SIGNATURE-----
 
 iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmRVc9UdHHJpY2hhcmQu
 aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV9OiAgAgwc6wFOzFtSnYrvH
 b9YgcJLPX8urgx9g1Exv553hbVtt2J0lsLAhlgwKpms3Os4p6znKhUWcGosHFixO
 eBQFqcS22Cu/ZM2s6299GOGDpxCpjx0/bX7JJTjW805SdSgDAuEUIbKe0ZqQT5tx
 ++F9is2+plp95/BeQz2+hbkbbpdktUkkk288Adoz3KRHqt/zd8cer0WrqR2uVAuX
 swpEluwtCfaewc0iPcNjlp9rLzO882wCFm0RG1EC2j9NHtq8O8xyamM9PPEaRXLv
 MiMA2nB6hsGMz33Wuec8cZTMaCLB+Oqhbq7eYPbCA4SmJBE3V9Rgc7GL4B7yCsyI
 OXSK+Q==
 =GIXd
 -----END PGP SIGNATURE-----

Merge tag 'pull-tcg-20230505' of https://gitlab.com/rth7680/qemu into staging

softfloat: Fix the incorrect computation in float32_exp2
tcg: Remove compatability helpers for qemu ld/st
target/alpha: Remove TARGET_ALIGNED_ONLY
target/hppa: Remove TARGET_ALIGNED_ONLY
target/sparc: Remove TARGET_ALIGNED_ONLY
tcg: Cleanups preparing to unify calls to qemu_ld/st helpers

# -----BEGIN PGP SIGNATURE-----
#
# iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmRVc9UdHHJpY2hhcmQu
# aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV9OiAgAgwc6wFOzFtSnYrvH
# b9YgcJLPX8urgx9g1Exv553hbVtt2J0lsLAhlgwKpms3Os4p6znKhUWcGosHFixO
# eBQFqcS22Cu/ZM2s6299GOGDpxCpjx0/bX7JJTjW805SdSgDAuEUIbKe0ZqQT5tx
# ++F9is2+plp95/BeQz2+hbkbbpdktUkkk288Adoz3KRHqt/zd8cer0WrqR2uVAuX
# swpEluwtCfaewc0iPcNjlp9rLzO882wCFm0RG1EC2j9NHtq8O8xyamM9PPEaRXLv
# MiMA2nB6hsGMz33Wuec8cZTMaCLB+Oqhbq7eYPbCA4SmJBE3V9Rgc7GL4B7yCsyI
# OXSK+Q==
# =GIXd
# -----END PGP SIGNATURE-----
# gpg: Signature made Fri 05 May 2023 10:23:33 PM BST
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [ultimate]

* tag 'pull-tcg-20230505' of https://gitlab.com/rth7680/qemu: (42 commits)
  tcg: Widen helper_*_st[bw]_mmu val arguments
  tcg: Introduce arg_slot_stk_ofs
  tcg: Replace REG_P with arg_loc_reg_p
  tcg: Move TCGLabelQemuLdst to tcg.c
  tcg/sparc64: Pass TCGType to tcg_out_qemu_{ld,st}
  tcg/sparc64: Drop is_64 test from tcg_out_qemu_ld data return
  tcg/s390x: Introduce HostAddress
  tcg/s390x: Pass TCGType to tcg_out_qemu_{ld,st}
  tcg/riscv: Rationalize args to tcg_out_qemu_{ld,st}
  tcg/riscv: Require TCG_TARGET_REG_BITS == 64
  tcg/ppc: Introduce HostAddress
  tcg/ppc: Rationalize args to tcg_out_qemu_{ld,st}
  tcg/mips: Rationalize args to tcg_out_qemu_{ld,st}
  tcg/loongarch64: Introduce HostAddress
  tcg/loongarch64: Rationalize args to tcg_out_qemu_{ld,st}
  tcg/arm: Introduce HostAddress
  tcg/arm: Rationalize args to tcg_out_qemu_{ld,st}
  tcg/aarch64: Introduce HostAddress
  tcg/aarch64: Rationalize args to tcg_out_qemu_{ld,st}
  tcg/i386: Introduce tcg_out_testi
  ...

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2023-05-05 22:29:28 +01:00
commit 47d3878422
42 changed files with 1120 additions and 1291 deletions

View file

@ -2508,7 +2508,7 @@ full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
store_helper(env, addr, val, oi, retaddr, MO_UB); store_helper(env, addr, val, oi, retaddr, MO_UB);
} }
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
full_stb_mmu(env, addr, val, oi, retaddr); full_stb_mmu(env, addr, val, oi, retaddr);
@ -2521,7 +2521,7 @@ static void full_le_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
store_helper(env, addr, val, oi, retaddr, MO_LEUW); store_helper(env, addr, val, oi, retaddr, MO_LEUW);
} }
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
full_le_stw_mmu(env, addr, val, oi, retaddr); full_le_stw_mmu(env, addr, val, oi, retaddr);
@ -2534,7 +2534,7 @@ static void full_be_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
store_helper(env, addr, val, oi, retaddr, MO_BEUW); store_helper(env, addr, val, oi, retaddr, MO_BEUW);
} }
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
full_be_stw_mmu(env, addr, val, oi, retaddr); full_be_stw_mmu(env, addr, val, oi, retaddr);

View file

@ -1,4 +1,3 @@
TARGET_ARCH=alpha TARGET_ARCH=alpha
TARGET_SYSTBL_ABI=common TARGET_SYSTBL_ABI=common
TARGET_SYSTBL=syscall.tbl TARGET_SYSTBL=syscall.tbl
TARGET_ALIGNED_ONLY=y

View file

@ -1,3 +1,2 @@
TARGET_ARCH=alpha TARGET_ARCH=alpha
TARGET_ALIGNED_ONLY=y
TARGET_SUPPORTS_MTTCG=y TARGET_SUPPORTS_MTTCG=y

View file

@ -1,5 +1,4 @@
TARGET_ARCH=hppa TARGET_ARCH=hppa
TARGET_SYSTBL_ABI=common,32 TARGET_SYSTBL_ABI=common,32
TARGET_SYSTBL=syscall.tbl TARGET_SYSTBL=syscall.tbl
TARGET_ALIGNED_ONLY=y
TARGET_BIG_ENDIAN=y TARGET_BIG_ENDIAN=y

View file

@ -1,4 +1,3 @@
TARGET_ARCH=hppa TARGET_ARCH=hppa
TARGET_ALIGNED_ONLY=y
TARGET_BIG_ENDIAN=y TARGET_BIG_ENDIAN=y
TARGET_SUPPORTS_MTTCG=y TARGET_SUPPORTS_MTTCG=y

View file

@ -1,5 +1,4 @@
TARGET_ARCH=sparc TARGET_ARCH=sparc
TARGET_SYSTBL_ABI=common,32 TARGET_SYSTBL_ABI=common,32
TARGET_SYSTBL=syscall.tbl TARGET_SYSTBL=syscall.tbl
TARGET_ALIGNED_ONLY=y
TARGET_BIG_ENDIAN=y TARGET_BIG_ENDIAN=y

View file

@ -1,3 +1,2 @@
TARGET_ARCH=sparc TARGET_ARCH=sparc
TARGET_ALIGNED_ONLY=y
TARGET_BIG_ENDIAN=y TARGET_BIG_ENDIAN=y

View file

@ -4,5 +4,4 @@ TARGET_BASE_ARCH=sparc
TARGET_ABI_DIR=sparc TARGET_ABI_DIR=sparc
TARGET_SYSTBL_ABI=common,32 TARGET_SYSTBL_ABI=common,32
TARGET_SYSTBL=syscall.tbl TARGET_SYSTBL=syscall.tbl
TARGET_ALIGNED_ONLY=y
TARGET_BIG_ENDIAN=y TARGET_BIG_ENDIAN=y

View file

@ -3,5 +3,4 @@ TARGET_BASE_ARCH=sparc
TARGET_ABI_DIR=sparc TARGET_ABI_DIR=sparc
TARGET_SYSTBL_ABI=common,64 TARGET_SYSTBL_ABI=common,64
TARGET_SYSTBL=syscall.tbl TARGET_SYSTBL=syscall.tbl
TARGET_ALIGNED_ONLY=y
TARGET_BIG_ENDIAN=y TARGET_BIG_ENDIAN=y

View file

@ -1,4 +1,3 @@
TARGET_ARCH=sparc64 TARGET_ARCH=sparc64
TARGET_BASE_ARCH=sparc TARGET_BASE_ARCH=sparc
TARGET_ALIGNED_ONLY=y
TARGET_BIG_ENDIAN=y TARGET_BIG_ENDIAN=y

View file

@ -5135,7 +5135,7 @@ float32 float32_exp2(float32 a, float_status *status)
float64_unpack_canonical(&rp, float64_one, status); float64_unpack_canonical(&rp, float64_one, status);
for (i = 0 ; i < 15 ; i++) { for (i = 0 ; i < 15 ; i++) {
float64_unpack_canonical(&tp, float32_exp2_coefficients[i], status); float64_unpack_canonical(&tp, float32_exp2_coefficients[i], status);
rp = *parts_muladd(&tp, &xp, &rp, 0, status); rp = *parts_muladd(&tp, &xnp, &rp, 0, status);
xnp = *parts_mul(&xnp, &xp, status); xnp = *parts_mul(&xnp, &xp, status);
} }

View file

@ -55,15 +55,19 @@ tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr); MemOpIdx oi, uintptr_t retaddr);
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, /*
* Value extended to at least uint32_t, so that some ABIs do not require
* zero-extension from uint8_t or uint16_t.
*/
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr); MemOpIdx oi, uintptr_t retaddr);
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr); MemOpIdx oi, uintptr_t retaddr);
void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr); MemOpIdx oi, uintptr_t retaddr);
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr); MemOpIdx oi, uintptr_t retaddr);
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr); MemOpIdx oi, uintptr_t retaddr);
void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr); MemOpIdx oi, uintptr_t retaddr);

View file

@ -841,61 +841,6 @@ void tcg_gen_qemu_st_i64(TCGv_i64, TCGv, TCGArg, MemOp);
void tcg_gen_qemu_ld_i128(TCGv_i128, TCGv, TCGArg, MemOp); void tcg_gen_qemu_ld_i128(TCGv_i128, TCGv, TCGArg, MemOp);
void tcg_gen_qemu_st_i128(TCGv_i128, TCGv, TCGArg, MemOp); void tcg_gen_qemu_st_i128(TCGv_i128, TCGv, TCGArg, MemOp);
static inline void tcg_gen_qemu_ld8u(TCGv ret, TCGv addr, int mem_index)
{
tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_UB);
}
static inline void tcg_gen_qemu_ld8s(TCGv ret, TCGv addr, int mem_index)
{
tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_SB);
}
static inline void tcg_gen_qemu_ld16u(TCGv ret, TCGv addr, int mem_index)
{
tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TEUW);
}
static inline void tcg_gen_qemu_ld16s(TCGv ret, TCGv addr, int mem_index)
{
tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TESW);
}
static inline void tcg_gen_qemu_ld32u(TCGv ret, TCGv addr, int mem_index)
{
tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TEUL);
}
static inline void tcg_gen_qemu_ld32s(TCGv ret, TCGv addr, int mem_index)
{
tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TESL);
}
static inline void tcg_gen_qemu_ld64(TCGv_i64 ret, TCGv addr, int mem_index)
{
tcg_gen_qemu_ld_i64(ret, addr, mem_index, MO_TEUQ);
}
static inline void tcg_gen_qemu_st8(TCGv arg, TCGv addr, int mem_index)
{
tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_UB);
}
static inline void tcg_gen_qemu_st16(TCGv arg, TCGv addr, int mem_index)
{
tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_TEUW);
}
static inline void tcg_gen_qemu_st32(TCGv arg, TCGv addr, int mem_index)
{
tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_TEUL);
}
static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index)
{
tcg_gen_qemu_st_i64(arg, addr, mem_index, MO_TEUQ);
}
void tcg_gen_atomic_cmpxchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGv_i32, void tcg_gen_atomic_cmpxchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGv_i32,
TCGArg, MemOp); TCGArg, MemOp);
void tcg_gen_atomic_cmpxchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGv_i64, void tcg_gen_atomic_cmpxchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGv_i64,

View file

@ -72,7 +72,7 @@ struct DisasContext {
#ifdef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
#define UNALIGN(C) (C)->unalign #define UNALIGN(C) (C)->unalign
#else #else
#define UNALIGN(C) 0 #define UNALIGN(C) MO_ALIGN
#endif #endif
/* Target-specific return values from translate_one, indicating the /* Target-specific return values from translate_one, indicating the
@ -2399,21 +2399,21 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
switch ((insn >> 12) & 0xF) { switch ((insn >> 12) & 0xF) {
case 0x0: case 0x0:
/* Longword physical access (hw_ldl/p) */ /* Longword physical access (hw_ldl/p) */
tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL); tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
break; break;
case 0x1: case 0x1:
/* Quadword physical access (hw_ldq/p) */ /* Quadword physical access (hw_ldq/p) */
tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ); tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
break; break;
case 0x2: case 0x2:
/* Longword physical access with lock (hw_ldl_l/p) */ /* Longword physical access with lock (hw_ldl_l/p) */
tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL); tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
tcg_gen_mov_i64(cpu_lock_addr, addr); tcg_gen_mov_i64(cpu_lock_addr, addr);
tcg_gen_mov_i64(cpu_lock_value, va); tcg_gen_mov_i64(cpu_lock_value, va);
break; break;
case 0x3: case 0x3:
/* Quadword physical access with lock (hw_ldq_l/p) */ /* Quadword physical access with lock (hw_ldq_l/p) */
tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ); tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
tcg_gen_mov_i64(cpu_lock_addr, addr); tcg_gen_mov_i64(cpu_lock_addr, addr);
tcg_gen_mov_i64(cpu_lock_value, va); tcg_gen_mov_i64(cpu_lock_value, va);
break; break;
@ -2438,11 +2438,13 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
goto invalid_opc; goto invalid_opc;
case 0xA: case 0xA:
/* Longword virtual access with protection check (hw_ldl/w) */ /* Longword virtual access with protection check (hw_ldl/w) */
tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL); tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX,
MO_LESL | MO_ALIGN);
break; break;
case 0xB: case 0xB:
/* Quadword virtual access with protection check (hw_ldq/w) */ /* Quadword virtual access with protection check (hw_ldq/w) */
tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEUQ); tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX,
MO_LEUQ | MO_ALIGN);
break; break;
case 0xC: case 0xC:
/* Longword virtual access with alt access mode (hw_ldl/a)*/ /* Longword virtual access with alt access mode (hw_ldl/a)*/
@ -2453,12 +2455,14 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
case 0xE: case 0xE:
/* Longword virtual access with alternate access mode and /* Longword virtual access with alternate access mode and
protection checks (hw_ldl/wa) */ protection checks (hw_ldl/wa) */
tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL); tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX,
MO_LESL | MO_ALIGN);
break; break;
case 0xF: case 0xF:
/* Quadword virtual access with alternate access mode and /* Quadword virtual access with alternate access mode and
protection checks (hw_ldq/wa) */ protection checks (hw_ldq/wa) */
tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEUQ); tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX,
MO_LEUQ | MO_ALIGN);
break; break;
} }
break; break;
@ -2659,7 +2663,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
vb = load_gpr(ctx, rb); vb = load_gpr(ctx, rb);
tmp = tcg_temp_new(); tmp = tcg_temp_new();
tcg_gen_addi_i64(tmp, vb, disp12); tcg_gen_addi_i64(tmp, vb, disp12);
tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL); tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
break; break;
case 0x1: case 0x1:
/* Quadword physical access */ /* Quadword physical access */
@ -2667,17 +2671,17 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
vb = load_gpr(ctx, rb); vb = load_gpr(ctx, rb);
tmp = tcg_temp_new(); tmp = tcg_temp_new();
tcg_gen_addi_i64(tmp, vb, disp12); tcg_gen_addi_i64(tmp, vb, disp12);
tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ); tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
break; break;
case 0x2: case 0x2:
/* Longword physical access with lock */ /* Longword physical access with lock */
ret = gen_store_conditional(ctx, ra, rb, disp12, ret = gen_store_conditional(ctx, ra, rb, disp12,
MMU_PHYS_IDX, MO_LESL); MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
break; break;
case 0x3: case 0x3:
/* Quadword physical access with lock */ /* Quadword physical access with lock */
ret = gen_store_conditional(ctx, ra, rb, disp12, ret = gen_store_conditional(ctx, ra, rb, disp12,
MMU_PHYS_IDX, MO_LEUQ); MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
break; break;
case 0x4: case 0x4:
/* Longword virtual access */ /* Longword virtual access */
@ -2771,11 +2775,11 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
break; break;
case 0x2A: case 0x2A:
/* LDL_L */ /* LDL_L */
gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 1); gen_load_int(ctx, ra, rb, disp16, MO_LESL | MO_ALIGN, 0, 1);
break; break;
case 0x2B: case 0x2B:
/* LDQ_L */ /* LDQ_L */
gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 0, 1); gen_load_int(ctx, ra, rb, disp16, MO_LEUQ | MO_ALIGN, 0, 1);
break; break;
case 0x2C: case 0x2C:
/* STL */ /* STL */
@ -2788,12 +2792,12 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
case 0x2E: case 0x2E:
/* STL_C */ /* STL_C */
ret = gen_store_conditional(ctx, ra, rb, disp16, ret = gen_store_conditional(ctx, ra, rb, disp16,
ctx->mem_idx, MO_LESL); ctx->mem_idx, MO_LESL | MO_ALIGN);
break; break;
case 0x2F: case 0x2F:
/* STQ_C */ /* STQ_C */
ret = gen_store_conditional(ctx, ra, rb, disp16, ret = gen_store_conditional(ctx, ra, rb, disp16,
ctx->mem_idx, MO_LEUQ); ctx->mem_idx, MO_LEUQ | MO_ALIGN);
break; break;
case 0x30: case 0x30:
/* BR */ /* BR */

View file

@ -1492,7 +1492,7 @@ static void gen_data_store(DisasContext *ctx, TCGv data, TCGv addr)
if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) { if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) {
gen_helper_fullwr(cpu_env, data, addr); gen_helper_fullwr(cpu_env, data, addr);
} else { } else {
tcg_gen_qemu_st8(data, addr, MMU_DATA_IDX); /* mem[addr] = data */ tcg_gen_qemu_st_tl(data, addr, MMU_DATA_IDX, MO_UB);
} }
} }
@ -1501,7 +1501,7 @@ static void gen_data_load(DisasContext *ctx, TCGv data, TCGv addr)
if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) { if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) {
gen_helper_fullrd(data, cpu_env, addr); gen_helper_fullrd(data, cpu_env, addr);
} else { } else {
tcg_gen_qemu_ld8u(data, addr, MMU_DATA_IDX); /* data = mem[addr] */ tcg_gen_qemu_ld_tl(data, addr, MMU_DATA_IDX, MO_UB);
} }
} }
@ -1979,7 +1979,7 @@ static bool trans_LPM1(DisasContext *ctx, arg_LPM1 *a)
tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */ tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */
tcg_gen_or_tl(addr, addr, L); tcg_gen_or_tl(addr, addr, L);
tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */ tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB);
return true; return true;
} }
@ -1996,7 +1996,7 @@ static bool trans_LPM2(DisasContext *ctx, arg_LPM2 *a)
tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */ tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */
tcg_gen_or_tl(addr, addr, L); tcg_gen_or_tl(addr, addr, L);
tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */ tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB);
return true; return true;
} }
@ -2013,7 +2013,7 @@ static bool trans_LPMX(DisasContext *ctx, arg_LPMX *a)
tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */ tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */
tcg_gen_or_tl(addr, addr, L); tcg_gen_or_tl(addr, addr, L);
tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */ tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB);
tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */ tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
tcg_gen_andi_tl(L, addr, 0xff); tcg_gen_andi_tl(L, addr, 0xff);
tcg_gen_shri_tl(addr, addr, 8); tcg_gen_shri_tl(addr, addr, 8);
@ -2045,7 +2045,7 @@ static bool trans_ELPM1(DisasContext *ctx, arg_ELPM1 *a)
TCGv Rd = cpu_r[0]; TCGv Rd = cpu_r[0];
TCGv addr = gen_get_zaddr(); TCGv addr = gen_get_zaddr();
tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */ tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB);
return true; return true;
} }
@ -2058,7 +2058,7 @@ static bool trans_ELPM2(DisasContext *ctx, arg_ELPM2 *a)
TCGv Rd = cpu_r[a->rd]; TCGv Rd = cpu_r[a->rd];
TCGv addr = gen_get_zaddr(); TCGv addr = gen_get_zaddr();
tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */ tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB);
return true; return true;
} }
@ -2071,7 +2071,7 @@ static bool trans_ELPMX(DisasContext *ctx, arg_ELPMX *a)
TCGv Rd = cpu_r[a->rd]; TCGv Rd = cpu_r[a->rd];
TCGv addr = gen_get_zaddr(); TCGv addr = gen_get_zaddr();
tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */ tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB);
tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */ tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
gen_set_zaddr(addr); gen_set_zaddr(addr);
return true; return true;

View file

@ -80,13 +80,9 @@ static void gen_store_v10_conditional(DisasContext *dc, TCGv addr, TCGv val,
/* Store only if F flag isn't set */ /* Store only if F flag isn't set */
tcg_gen_andi_tl(t1, cpu_PR[PR_CCS], F_FLAG_V10); tcg_gen_andi_tl(t1, cpu_PR[PR_CCS], F_FLAG_V10);
tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1); tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1);
if (size == 1) {
tcg_gen_qemu_st8(tval, taddr, mem_index); tcg_gen_qemu_st_tl(tval, taddr, mem_index, ctz32(size) | MO_TE);
} else if (size == 2) {
tcg_gen_qemu_st16(tval, taddr, mem_index);
} else {
tcg_gen_qemu_st32(tval, taddr, mem_index);
}
gen_set_label(l1); gen_set_label(l1);
tcg_gen_shri_tl(t1, t1, 1); /* shift F to P position */ tcg_gen_shri_tl(t1, t1, 1); /* shift F to P position */
tcg_gen_or_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], t1); /*P=F*/ tcg_gen_or_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], t1); /*P=F*/
@ -109,13 +105,7 @@ static void gen_store_v10(DisasContext *dc, TCGv addr, TCGv val,
return; return;
} }
if (size == 1) { tcg_gen_qemu_st_tl(val, addr, mem_index, ctz32(size) | MO_TE);
tcg_gen_qemu_st8(val, addr, mem_index);
} else if (size == 2) {
tcg_gen_qemu_st16(val, addr, mem_index);
} else {
tcg_gen_qemu_st32(val, addr, mem_index);
}
} }

View file

@ -320,14 +320,14 @@ void gen_set_byte_i64(int N, TCGv_i64 result, TCGv src)
static inline void gen_load_locked4u(TCGv dest, TCGv vaddr, int mem_index) static inline void gen_load_locked4u(TCGv dest, TCGv vaddr, int mem_index)
{ {
tcg_gen_qemu_ld32u(dest, vaddr, mem_index); tcg_gen_qemu_ld_tl(dest, vaddr, mem_index, MO_TEUL);
tcg_gen_mov_tl(hex_llsc_addr, vaddr); tcg_gen_mov_tl(hex_llsc_addr, vaddr);
tcg_gen_mov_tl(hex_llsc_val, dest); tcg_gen_mov_tl(hex_llsc_val, dest);
} }
static inline void gen_load_locked8u(TCGv_i64 dest, TCGv vaddr, int mem_index) static inline void gen_load_locked8u(TCGv_i64 dest, TCGv vaddr, int mem_index)
{ {
tcg_gen_qemu_ld64(dest, vaddr, mem_index); tcg_gen_qemu_ld_i64(dest, vaddr, mem_index, MO_TEUQ);
tcg_gen_mov_tl(hex_llsc_addr, vaddr); tcg_gen_mov_tl(hex_llsc_addr, vaddr);
tcg_gen_mov_i64(hex_llsc_val_i64, dest); tcg_gen_mov_i64(hex_llsc_val_i64, dest);
} }
@ -678,7 +678,7 @@ static void gen_load_frame(DisasContext *ctx, TCGv_i64 frame, TCGv EA)
{ {
Insn *insn = ctx->insn; /* Needed for CHECK_NOSHUF */ Insn *insn = ctx->insn; /* Needed for CHECK_NOSHUF */
CHECK_NOSHUF(EA, 8); CHECK_NOSHUF(EA, 8);
tcg_gen_qemu_ld64(frame, EA, ctx->mem_idx); tcg_gen_qemu_ld_i64(frame, EA, ctx->mem_idx, MO_TEUQ);
} }
static void gen_return(DisasContext *ctx, TCGv_i64 dst, TCGv src) static void gen_return(DisasContext *ctx, TCGv_i64 dst, TCGv src)
@ -1019,7 +1019,7 @@ static void gen_vreg_load(DisasContext *ctx, intptr_t dstoff, TCGv src,
tcg_gen_andi_tl(src, src, ~((int32_t)sizeof(MMVector) - 1)); tcg_gen_andi_tl(src, src, ~((int32_t)sizeof(MMVector) - 1));
} }
for (int i = 0; i < sizeof(MMVector) / 8; i++) { for (int i = 0; i < sizeof(MMVector) / 8; i++) {
tcg_gen_qemu_ld64(tmp, src, ctx->mem_idx); tcg_gen_qemu_ld_i64(tmp, src, ctx->mem_idx, MO_TEUQ);
tcg_gen_addi_tl(src, src, 8); tcg_gen_addi_tl(src, src, 8);
tcg_gen_st_i64(tmp, cpu_env, dstoff + i * 8); tcg_gen_st_i64(tmp, cpu_env, dstoff + i * 8);
} }

View file

@ -1737,36 +1737,34 @@ void gen_load_cancel(Context *c, YYLTYPE *locp)
void gen_load(Context *c, YYLTYPE *locp, HexValue *width, void gen_load(Context *c, YYLTYPE *locp, HexValue *width,
HexSignedness signedness, HexValue *ea, HexValue *dst) HexSignedness signedness, HexValue *ea, HexValue *dst)
{ {
char size_suffix[4] = {0}; unsigned dst_bit_width;
const char *sign_suffix; unsigned src_bit_width;
/* Memop width is specified in the load macro */ /* Memop width is specified in the load macro */
assert_signedness(c, locp, signedness); assert_signedness(c, locp, signedness);
sign_suffix = (width->imm.value > 4)
? ""
: ((signedness == UNSIGNED) ? "u" : "s");
/* If dst is a variable, assert that is declared and load the type info */ /* If dst is a variable, assert that is declared and load the type info */
if (dst->type == VARID) { if (dst->type == VARID) {
find_variable(c, locp, dst, dst); find_variable(c, locp, dst, dst);
} }
snprintf(size_suffix, 4, "%" PRIu64, width->imm.value * 8); src_bit_width = width->imm.value * 8;
dst_bit_width = MAX(dst->bit_width, 32);
/* Lookup the effective address EA */ /* Lookup the effective address EA */
find_variable(c, locp, ea, ea); find_variable(c, locp, ea, ea);
OUT(c, locp, "if (insn->slot == 0 && pkt->pkt_has_store_s1) {\n"); OUT(c, locp, "if (insn->slot == 0 && pkt->pkt_has_store_s1) {\n");
OUT(c, locp, "probe_noshuf_load(", ea, ", ", width, ", ctx->mem_idx);\n"); OUT(c, locp, "probe_noshuf_load(", ea, ", ", width, ", ctx->mem_idx);\n");
OUT(c, locp, "process_store(ctx, 1);\n"); OUT(c, locp, "process_store(ctx, 1);\n");
OUT(c, locp, "}\n"); OUT(c, locp, "}\n");
OUT(c, locp, "tcg_gen_qemu_ld", size_suffix, sign_suffix);
OUT(c, locp, "tcg_gen_qemu_ld_i", &dst_bit_width);
OUT(c, locp, "("); OUT(c, locp, "(");
if (dst->bit_width > width->imm.value * 8) { OUT(c, locp, dst, ", ", ea, ", ctx->mem_idx, MO_", &src_bit_width);
/* if (signedness == SIGNED) {
* Cast to the correct TCG type if necessary, to avoid implict cast OUT(c, locp, " | MO_SIGN");
* warnings. This is needed when the width of the destination var is
* larger than the size of the requested load.
*/
OUT(c, locp, "(TCGv) ");
} }
OUT(c, locp, dst, ", ", ea, ", ctx->mem_idx);\n"); OUT(c, locp, " | MO_TE);\n");
} }
void gen_store(Context *c, YYLTYPE *locp, HexValue *width, HexValue *ea, void gen_store(Context *c, YYLTYPE *locp, HexValue *width, HexValue *ea,

View file

@ -99,37 +99,37 @@
#define MEM_LOAD1s(DST, VA) \ #define MEM_LOAD1s(DST, VA) \
do { \ do { \
CHECK_NOSHUF(VA, 1); \ CHECK_NOSHUF(VA, 1); \
tcg_gen_qemu_ld8s(DST, VA, ctx->mem_idx); \ tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_SB); \
} while (0) } while (0)
#define MEM_LOAD1u(DST, VA) \ #define MEM_LOAD1u(DST, VA) \
do { \ do { \
CHECK_NOSHUF(VA, 1); \ CHECK_NOSHUF(VA, 1); \
tcg_gen_qemu_ld8u(DST, VA, ctx->mem_idx); \ tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_UB); \
} while (0) } while (0)
#define MEM_LOAD2s(DST, VA) \ #define MEM_LOAD2s(DST, VA) \
do { \ do { \
CHECK_NOSHUF(VA, 2); \ CHECK_NOSHUF(VA, 2); \
tcg_gen_qemu_ld16s(DST, VA, ctx->mem_idx); \ tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TESW); \
} while (0) } while (0)
#define MEM_LOAD2u(DST, VA) \ #define MEM_LOAD2u(DST, VA) \
do { \ do { \
CHECK_NOSHUF(VA, 2); \ CHECK_NOSHUF(VA, 2); \
tcg_gen_qemu_ld16u(DST, VA, ctx->mem_idx); \ tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TEUW); \
} while (0) } while (0)
#define MEM_LOAD4s(DST, VA) \ #define MEM_LOAD4s(DST, VA) \
do { \ do { \
CHECK_NOSHUF(VA, 4); \ CHECK_NOSHUF(VA, 4); \
tcg_gen_qemu_ld32s(DST, VA, ctx->mem_idx); \ tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TESL); \
} while (0) } while (0)
#define MEM_LOAD4u(DST, VA) \ #define MEM_LOAD4u(DST, VA) \
do { \ do { \
CHECK_NOSHUF(VA, 4); \ CHECK_NOSHUF(VA, 4); \
tcg_gen_qemu_ld32s(DST, VA, ctx->mem_idx); \ tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TEUL); \
} while (0) } while (0)
#define MEM_LOAD8u(DST, VA) \ #define MEM_LOAD8u(DST, VA) \
do { \ do { \
CHECK_NOSHUF(VA, 8); \ CHECK_NOSHUF(VA, 8); \
tcg_gen_qemu_ld64(DST, VA, ctx->mem_idx); \ tcg_gen_qemu_ld_i64(DST, VA, ctx->mem_idx, MO_TEUQ); \
} while (0) } while (0)
#define MEM_STORE1_FUNC(X) \ #define MEM_STORE1_FUNC(X) \

View file

@ -627,27 +627,27 @@ void process_store(DisasContext *ctx, int slot_num)
switch (ctx->store_width[slot_num]) { switch (ctx->store_width[slot_num]) {
case 1: case 1:
gen_check_store_width(ctx, slot_num); gen_check_store_width(ctx, slot_num);
tcg_gen_qemu_st8(hex_store_val32[slot_num], tcg_gen_qemu_st_tl(hex_store_val32[slot_num],
hex_store_addr[slot_num], hex_store_addr[slot_num],
ctx->mem_idx); ctx->mem_idx, MO_UB);
break; break;
case 2: case 2:
gen_check_store_width(ctx, slot_num); gen_check_store_width(ctx, slot_num);
tcg_gen_qemu_st16(hex_store_val32[slot_num], tcg_gen_qemu_st_tl(hex_store_val32[slot_num],
hex_store_addr[slot_num], hex_store_addr[slot_num],
ctx->mem_idx); ctx->mem_idx, MO_TEUW);
break; break;
case 4: case 4:
gen_check_store_width(ctx, slot_num); gen_check_store_width(ctx, slot_num);
tcg_gen_qemu_st32(hex_store_val32[slot_num], tcg_gen_qemu_st_tl(hex_store_val32[slot_num],
hex_store_addr[slot_num], hex_store_addr[slot_num],
ctx->mem_idx); ctx->mem_idx, MO_TEUL);
break; break;
case 8: case 8:
gen_check_store_width(ctx, slot_num); gen_check_store_width(ctx, slot_num);
tcg_gen_qemu_st64(hex_store_val64[slot_num], tcg_gen_qemu_st_i64(hex_store_val64[slot_num],
hex_store_addr[slot_num], hex_store_addr[slot_num],
ctx->mem_idx); ctx->mem_idx, MO_TEUQ);
break; break;
default: default:
{ {
@ -693,13 +693,13 @@ static void process_dczeroa(DisasContext *ctx)
TCGv_i64 zero = tcg_constant_i64(0); TCGv_i64 zero = tcg_constant_i64(0);
tcg_gen_andi_tl(addr, hex_dczero_addr, ~0x1f); tcg_gen_andi_tl(addr, hex_dczero_addr, ~0x1f);
tcg_gen_qemu_st64(zero, addr, ctx->mem_idx); tcg_gen_qemu_st_i64(zero, addr, ctx->mem_idx, MO_UQ);
tcg_gen_addi_tl(addr, addr, 8); tcg_gen_addi_tl(addr, addr, 8);
tcg_gen_qemu_st64(zero, addr, ctx->mem_idx); tcg_gen_qemu_st_i64(zero, addr, ctx->mem_idx, MO_UQ);
tcg_gen_addi_tl(addr, addr, 8); tcg_gen_addi_tl(addr, addr, 8);
tcg_gen_qemu_st64(zero, addr, ctx->mem_idx); tcg_gen_qemu_st_i64(zero, addr, ctx->mem_idx, MO_UQ);
tcg_gen_addi_tl(addr, addr, 8); tcg_gen_addi_tl(addr, addr, 8);
tcg_gen_qemu_st64(zero, addr, ctx->mem_idx); tcg_gen_qemu_st_i64(zero, addr, ctx->mem_idx, MO_UQ);
} }
} }

View file

@ -271,7 +271,7 @@ typedef struct DisasContext {
#ifdef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
#define UNALIGN(C) (C)->unalign #define UNALIGN(C) (C)->unalign
#else #else
#define UNALIGN(C) 0 #define UNALIGN(C) MO_ALIGN
#endif #endif
/* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */ /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */

View file

@ -304,23 +304,14 @@ static inline void gen_addr_fault(DisasContext *s)
static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr, static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr,
int sign, int index) int sign, int index)
{ {
TCGv tmp; TCGv tmp = tcg_temp_new_i32();
tmp = tcg_temp_new_i32();
switch(opsize) { switch (opsize) {
case OS_BYTE: case OS_BYTE:
if (sign)
tcg_gen_qemu_ld8s(tmp, addr, index);
else
tcg_gen_qemu_ld8u(tmp, addr, index);
break;
case OS_WORD: case OS_WORD:
if (sign)
tcg_gen_qemu_ld16s(tmp, addr, index);
else
tcg_gen_qemu_ld16u(tmp, addr, index);
break;
case OS_LONG: case OS_LONG:
tcg_gen_qemu_ld32u(tmp, addr, index); tcg_gen_qemu_ld_tl(tmp, addr, index,
opsize | (sign ? MO_SIGN : 0) | MO_TE);
break; break;
default: default:
g_assert_not_reached(); g_assert_not_reached();
@ -332,15 +323,11 @@ static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr,
static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val, static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val,
int index) int index)
{ {
switch(opsize) { switch (opsize) {
case OS_BYTE: case OS_BYTE:
tcg_gen_qemu_st8(val, addr, index);
break;
case OS_WORD: case OS_WORD:
tcg_gen_qemu_st16(val, addr, index);
break;
case OS_LONG: case OS_LONG:
tcg_gen_qemu_st32(val, addr, index); tcg_gen_qemu_st_tl(val, addr, index, opsize | MO_TE);
break; break;
default: default:
g_assert_not_reached(); g_assert_not_reached();
@ -971,23 +958,16 @@ static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
tmp = tcg_temp_new(); tmp = tcg_temp_new();
switch (opsize) { switch (opsize) {
case OS_BYTE: case OS_BYTE:
tcg_gen_qemu_ld8s(tmp, addr, index);
gen_helper_exts32(cpu_env, fp, tmp);
break;
case OS_WORD: case OS_WORD:
tcg_gen_qemu_ld16s(tmp, addr, index); tcg_gen_qemu_ld_tl(tmp, addr, index, opsize | MO_SIGN | MO_TE);
gen_helper_exts32(cpu_env, fp, tmp);
break;
case OS_LONG:
tcg_gen_qemu_ld32u(tmp, addr, index);
gen_helper_exts32(cpu_env, fp, tmp); gen_helper_exts32(cpu_env, fp, tmp);
break; break;
case OS_SINGLE: case OS_SINGLE:
tcg_gen_qemu_ld32u(tmp, addr, index); tcg_gen_qemu_ld_tl(tmp, addr, index, MO_TEUL);
gen_helper_extf32(cpu_env, fp, tmp); gen_helper_extf32(cpu_env, fp, tmp);
break; break;
case OS_DOUBLE: case OS_DOUBLE:
tcg_gen_qemu_ld64(t64, addr, index); tcg_gen_qemu_ld_i64(t64, addr, index, MO_TEUQ);
gen_helper_extf64(cpu_env, fp, t64); gen_helper_extf64(cpu_env, fp, t64);
break; break;
case OS_EXTENDED: case OS_EXTENDED:
@ -995,11 +975,11 @@ static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP); gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
break; break;
} }
tcg_gen_qemu_ld32u(tmp, addr, index); tcg_gen_qemu_ld_i32(tmp, addr, index, MO_TEUL);
tcg_gen_shri_i32(tmp, tmp, 16); tcg_gen_shri_i32(tmp, tmp, 16);
tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper)); tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
tcg_gen_addi_i32(tmp, addr, 4); tcg_gen_addi_i32(tmp, addr, 4);
tcg_gen_qemu_ld64(t64, tmp, index); tcg_gen_qemu_ld_i64(t64, tmp, index, MO_TEUQ);
tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower)); tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
break; break;
case OS_PACKED: case OS_PACKED:
@ -1024,24 +1004,18 @@ static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
tmp = tcg_temp_new(); tmp = tcg_temp_new();
switch (opsize) { switch (opsize) {
case OS_BYTE: case OS_BYTE:
gen_helper_reds32(tmp, cpu_env, fp);
tcg_gen_qemu_st8(tmp, addr, index);
break;
case OS_WORD: case OS_WORD:
gen_helper_reds32(tmp, cpu_env, fp);
tcg_gen_qemu_st16(tmp, addr, index);
break;
case OS_LONG: case OS_LONG:
gen_helper_reds32(tmp, cpu_env, fp); gen_helper_reds32(tmp, cpu_env, fp);
tcg_gen_qemu_st32(tmp, addr, index); tcg_gen_qemu_st_tl(tmp, addr, index, opsize | MO_TE);
break; break;
case OS_SINGLE: case OS_SINGLE:
gen_helper_redf32(tmp, cpu_env, fp); gen_helper_redf32(tmp, cpu_env, fp);
tcg_gen_qemu_st32(tmp, addr, index); tcg_gen_qemu_st_tl(tmp, addr, index, MO_TEUL);
break; break;
case OS_DOUBLE: case OS_DOUBLE:
gen_helper_redf64(t64, cpu_env, fp); gen_helper_redf64(t64, cpu_env, fp);
tcg_gen_qemu_st64(t64, addr, index); tcg_gen_qemu_st_i64(t64, addr, index, MO_TEUQ);
break; break;
case OS_EXTENDED: case OS_EXTENDED:
if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) { if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
@ -1050,10 +1024,10 @@ static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
} }
tcg_gen_ld16u_i32(tmp, fp, offsetof(FPReg, l.upper)); tcg_gen_ld16u_i32(tmp, fp, offsetof(FPReg, l.upper));
tcg_gen_shli_i32(tmp, tmp, 16); tcg_gen_shli_i32(tmp, tmp, 16);
tcg_gen_qemu_st32(tmp, addr, index); tcg_gen_qemu_st_i32(tmp, addr, index, MO_TEUL);
tcg_gen_addi_i32(tmp, addr, 4); tcg_gen_addi_i32(tmp, addr, 4);
tcg_gen_ld_i64(t64, fp, offsetof(FPReg, l.lower)); tcg_gen_ld_i64(t64, fp, offsetof(FPReg, l.lower));
tcg_gen_qemu_st64(t64, tmp, index); tcg_gen_qemu_st_i64(t64, tmp, index, MO_TEUQ);
break; break;
case OS_PACKED: case OS_PACKED:
/* /*
@ -2079,14 +2053,14 @@ DISAS_INSN(movep)
if (insn & 0x80) { if (insn & 0x80) {
for ( ; i > 0 ; i--) { for ( ; i > 0 ; i--) {
tcg_gen_shri_i32(dbuf, reg, (i - 1) * 8); tcg_gen_shri_i32(dbuf, reg, (i - 1) * 8);
tcg_gen_qemu_st8(dbuf, abuf, IS_USER(s)); tcg_gen_qemu_st_i32(dbuf, abuf, IS_USER(s), MO_UB);
if (i > 1) { if (i > 1) {
tcg_gen_addi_i32(abuf, abuf, 2); tcg_gen_addi_i32(abuf, abuf, 2);
} }
} }
} else { } else {
for ( ; i > 0 ; i--) { for ( ; i > 0 ; i--) {
tcg_gen_qemu_ld8u(dbuf, abuf, IS_USER(s)); tcg_gen_qemu_ld_tl(dbuf, abuf, IS_USER(s), MO_UB);
tcg_gen_deposit_i32(reg, reg, dbuf, (i - 1) * 8, 8); tcg_gen_deposit_i32(reg, reg, dbuf, (i - 1) * 8, 8);
if (i > 1) { if (i > 1) {
tcg_gen_addi_i32(abuf, abuf, 2); tcg_gen_addi_i32(abuf, abuf, 2);
@ -4337,14 +4311,14 @@ static void m68k_copy_line(TCGv dst, TCGv src, int index)
t1 = tcg_temp_new_i64(); t1 = tcg_temp_new_i64();
tcg_gen_andi_i32(addr, src, ~15); tcg_gen_andi_i32(addr, src, ~15);
tcg_gen_qemu_ld64(t0, addr, index); tcg_gen_qemu_ld_i64(t0, addr, index, MO_TEUQ);
tcg_gen_addi_i32(addr, addr, 8); tcg_gen_addi_i32(addr, addr, 8);
tcg_gen_qemu_ld64(t1, addr, index); tcg_gen_qemu_ld_i64(t1, addr, index, MO_TEUQ);
tcg_gen_andi_i32(addr, dst, ~15); tcg_gen_andi_i32(addr, dst, ~15);
tcg_gen_qemu_st64(t0, addr, index); tcg_gen_qemu_st_i64(t0, addr, index, MO_TEUQ);
tcg_gen_addi_i32(addr, addr, 8); tcg_gen_addi_i32(addr, addr, 8);
tcg_gen_qemu_st64(t1, addr, index); tcg_gen_qemu_st_i64(t1, addr, index, MO_TEUQ);
} }
DISAS_INSN(move16_reg) DISAS_INSN(move16_reg)
@ -4767,7 +4741,7 @@ static void gen_qemu_store_fcr(DisasContext *s, TCGv addr, int reg)
tmp = tcg_temp_new(); tmp = tcg_temp_new();
gen_load_fcr(s, tmp, reg); gen_load_fcr(s, tmp, reg);
tcg_gen_qemu_st32(tmp, addr, index); tcg_gen_qemu_st_tl(tmp, addr, index, MO_TEUL);
} }
static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg) static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg)
@ -4776,7 +4750,7 @@ static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg)
TCGv tmp; TCGv tmp;
tmp = tcg_temp_new(); tmp = tcg_temp_new();
tcg_gen_qemu_ld32u(tmp, addr, index); tcg_gen_qemu_ld_tl(tmp, addr, index, MO_TEUL);
gen_store_fcr(s, tmp, reg); gen_store_fcr(s, tmp, reg);
} }

View file

@ -998,7 +998,7 @@ static void gen_llwp(DisasContext *ctx, uint32_t base, int16_t offset,
TCGv tmp2 = tcg_temp_new(); TCGv tmp2 = tcg_temp_new();
gen_base_offset_addr(ctx, taddr, base, offset); gen_base_offset_addr(ctx, taddr, base, offset);
tcg_gen_qemu_ld64(tval, taddr, ctx->mem_idx); tcg_gen_qemu_ld_i64(tval, taddr, ctx->mem_idx, MO_TEUQ);
if (cpu_is_bigendian(ctx)) { if (cpu_is_bigendian(ctx)) {
tcg_gen_extr_i64_tl(tmp2, tmp1, tval); tcg_gen_extr_i64_tl(tmp2, tmp1, tval);
} else { } else {

View file

@ -1949,13 +1949,13 @@ FOP_CONDNS(s, FMT_S, 32, gen_store_fpr32(ctx, fp0, fd))
/* load/store instructions. */ /* load/store instructions. */
#ifdef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
#define OP_LD_ATOMIC(insn, fname) \ #define OP_LD_ATOMIC(insn, memop) \
static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \ static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \
DisasContext *ctx) \ DisasContext *ctx) \
{ \ { \
TCGv t0 = tcg_temp_new(); \ TCGv t0 = tcg_temp_new(); \
tcg_gen_mov_tl(t0, arg1); \ tcg_gen_mov_tl(t0, arg1); \
tcg_gen_qemu_##fname(ret, arg1, ctx->mem_idx); \ tcg_gen_qemu_ld_tl(ret, arg1, ctx->mem_idx, memop); \
tcg_gen_st_tl(t0, cpu_env, offsetof(CPUMIPSState, lladdr)); \ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUMIPSState, lladdr)); \
tcg_gen_st_tl(ret, cpu_env, offsetof(CPUMIPSState, llval)); \ tcg_gen_st_tl(ret, cpu_env, offsetof(CPUMIPSState, llval)); \
} }
@ -1967,9 +1967,9 @@ static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \
gen_helper_##insn(ret, cpu_env, arg1, tcg_constant_i32(mem_idx)); \ gen_helper_##insn(ret, cpu_env, arg1, tcg_constant_i32(mem_idx)); \
} }
#endif #endif
OP_LD_ATOMIC(ll, ld32s); OP_LD_ATOMIC(ll, MO_TESL);
#if defined(TARGET_MIPS64) #if defined(TARGET_MIPS64)
OP_LD_ATOMIC(lld, ld64); OP_LD_ATOMIC(lld, MO_TEUQ);
#endif #endif
#undef OP_LD_ATOMIC #undef OP_LD_ATOMIC

View file

@ -1973,32 +1973,24 @@ static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
{ {
int l = get_field(s, l1); int l = get_field(s, l1);
TCGv_i32 vl; TCGv_i32 vl;
MemOp mop;
switch (l + 1) { switch (l + 1) {
case 1: case 1:
tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
break;
case 2: case 2:
tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
break;
case 4: case 4:
tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
break;
case 8: case 8:
tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s)); mop = ctz32(l + 1) | MO_TE;
tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s)); tcg_gen_qemu_ld_tl(cc_src, o->addr1, get_mem_index(s), mop);
break; tcg_gen_qemu_ld_tl(cc_dst, o->in2, get_mem_index(s), mop);
gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
return DISAS_NEXT;
default: default:
vl = tcg_constant_i32(l); vl = tcg_constant_i32(l);
gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2); gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
set_cc_static(s); set_cc_static(s);
return DISAS_NEXT; return DISAS_NEXT;
} }
gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
return DISAS_NEXT;
} }
static DisasJumpType op_clcl(DisasContext *s, DisasOps *o) static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
@ -2199,7 +2191,7 @@ static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
TCGv_i32 t2 = tcg_temp_new_i32(); TCGv_i32 t2 = tcg_temp_new_i32();
tcg_gen_extrl_i64_i32(t2, o->in1); tcg_gen_extrl_i64_i32(t2, o->in1);
gen_helper_cvd(t1, t2); gen_helper_cvd(t1, t2);
tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s)); tcg_gen_qemu_st_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
return DISAS_NEXT; return DISAS_NEXT;
} }
@ -2457,7 +2449,7 @@ static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
switch (m3) { switch (m3) {
case 0xf: case 0xf:
/* Effectively a 32-bit load. */ /* Effectively a 32-bit load. */
tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s)); tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
len = 32; len = 32;
goto one_insert; goto one_insert;
@ -2465,7 +2457,7 @@ static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
case 0x6: case 0x6:
case 0x3: case 0x3:
/* Effectively a 16-bit load. */ /* Effectively a 16-bit load. */
tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s)); tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
len = 16; len = 16;
goto one_insert; goto one_insert;
@ -2474,7 +2466,7 @@ static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
case 0x2: case 0x2:
case 0x1: case 0x1:
/* Effectively an 8-bit load. */ /* Effectively an 8-bit load. */
tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s)); tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
len = 8; len = 8;
goto one_insert; goto one_insert;
@ -2490,7 +2482,7 @@ static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
ccm = 0; ccm = 0;
while (m3) { while (m3) {
if (m3 & 0x8) { if (m3 & 0x8) {
tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s)); tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
tcg_gen_addi_i64(o->in2, o->in2, 1); tcg_gen_addi_i64(o->in2, o->in2, 1);
tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8); tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
ccm |= 0xffull << pos; ccm |= 0xffull << pos;
@ -2746,25 +2738,25 @@ static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o) static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
{ {
tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s)); tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_SB);
return DISAS_NEXT; return DISAS_NEXT;
} }
static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o) static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
{ {
tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s)); tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_UB);
return DISAS_NEXT; return DISAS_NEXT;
} }
static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o) static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
{ {
tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s)); tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TESW);
return DISAS_NEXT; return DISAS_NEXT;
} }
static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o) static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
{ {
tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s)); tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUW);
return DISAS_NEXT; return DISAS_NEXT;
} }
@ -2803,7 +2795,7 @@ static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
static DisasJumpType op_lgat(DisasContext *s, DisasOps *o) static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
{ {
TCGLabel *lab = gen_new_label(); TCGLabel *lab = gen_new_label();
tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s)); tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUQ);
/* The value is stored even in case of trap. */ /* The value is stored even in case of trap. */
tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab); tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
gen_trap(s); gen_trap(s);
@ -2825,7 +2817,8 @@ static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o) static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
{ {
TCGLabel *lab = gen_new_label(); TCGLabel *lab = gen_new_label();
tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
/* The value is stored even in case of trap. */ /* The value is stored even in case of trap. */
tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab); tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
gen_trap(s); gen_trap(s);
@ -2942,7 +2935,7 @@ static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
MO_TEUQ | MO_ALIGN_8); MO_TEUQ | MO_ALIGN_8);
tcg_gen_addi_i64(o->in2, o->in2, 8); tcg_gen_addi_i64(o->in2, o->in2, 8);
tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s)); tcg_gen_qemu_ld_i64(t2, o->in2, get_mem_index(s), MO_TEUQ);
gen_helper_load_psw(cpu_env, t1, t2); gen_helper_load_psw(cpu_env, t1, t2);
return DISAS_NORETURN; return DISAS_NORETURN;
} }
@ -2966,7 +2959,7 @@ static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
/* Only one register to read. */ /* Only one register to read. */
t1 = tcg_temp_new_i64(); t1 = tcg_temp_new_i64();
if (unlikely(r1 == r3)) { if (unlikely(r1 == r3)) {
tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
store_reg32_i64(r1, t1); store_reg32_i64(r1, t1);
return DISAS_NEXT; return DISAS_NEXT;
} }
@ -2974,9 +2967,9 @@ static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
/* First load the values of the first and last registers to trigger /* First load the values of the first and last registers to trigger
possible page faults. */ possible page faults. */
t2 = tcg_temp_new_i64(); t2 = tcg_temp_new_i64();
tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15)); tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s)); tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
store_reg32_i64(r1, t1); store_reg32_i64(r1, t1);
store_reg32_i64(r3, t2); store_reg32_i64(r3, t2);
@ -2991,7 +2984,7 @@ static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
while (r1 != r3) { while (r1 != r3) {
r1 = (r1 + 1) & 15; r1 = (r1 + 1) & 15;
tcg_gen_add_i64(o->in2, o->in2, t2); tcg_gen_add_i64(o->in2, o->in2, t2);
tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
store_reg32_i64(r1, t1); store_reg32_i64(r1, t1);
} }
return DISAS_NEXT; return DISAS_NEXT;
@ -3006,7 +2999,7 @@ static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
/* Only one register to read. */ /* Only one register to read. */
t1 = tcg_temp_new_i64(); t1 = tcg_temp_new_i64();
if (unlikely(r1 == r3)) { if (unlikely(r1 == r3)) {
tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
store_reg32h_i64(r1, t1); store_reg32h_i64(r1, t1);
return DISAS_NEXT; return DISAS_NEXT;
} }
@ -3014,9 +3007,9 @@ static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
/* First load the values of the first and last registers to trigger /* First load the values of the first and last registers to trigger
possible page faults. */ possible page faults. */
t2 = tcg_temp_new_i64(); t2 = tcg_temp_new_i64();
tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15)); tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s)); tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
store_reg32h_i64(r1, t1); store_reg32h_i64(r1, t1);
store_reg32h_i64(r3, t2); store_reg32h_i64(r3, t2);
@ -3031,7 +3024,7 @@ static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
while (r1 != r3) { while (r1 != r3) {
r1 = (r1 + 1) & 15; r1 = (r1 + 1) & 15;
tcg_gen_add_i64(o->in2, o->in2, t2); tcg_gen_add_i64(o->in2, o->in2, t2);
tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
store_reg32h_i64(r1, t1); store_reg32h_i64(r1, t1);
} }
return DISAS_NEXT; return DISAS_NEXT;
@ -3045,7 +3038,7 @@ static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
/* Only one register to read. */ /* Only one register to read. */
if (unlikely(r1 == r3)) { if (unlikely(r1 == r3)) {
tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s)); tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
return DISAS_NEXT; return DISAS_NEXT;
} }
@ -3053,9 +3046,9 @@ static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
possible page faults. */ possible page faults. */
t1 = tcg_temp_new_i64(); t1 = tcg_temp_new_i64();
t2 = tcg_temp_new_i64(); t2 = tcg_temp_new_i64();
tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s)); tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15)); tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s)); tcg_gen_qemu_ld_i64(regs[r3], t2, get_mem_index(s), MO_TEUQ);
tcg_gen_mov_i64(regs[r1], t1); tcg_gen_mov_i64(regs[r1], t1);
/* Only two registers to read. */ /* Only two registers to read. */
@ -3069,7 +3062,7 @@ static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
while (r1 != r3) { while (r1 != r3) {
r1 = (r1 + 1) & 15; r1 = (r1 + 1) & 15;
tcg_gen_add_i64(o->in2, o->in2, t1); tcg_gen_add_i64(o->in2, o->in2, t1);
tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s)); tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
} }
return DISAS_NEXT; return DISAS_NEXT;
} }
@ -3923,15 +3916,15 @@ static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
a = get_address(s, 0, get_field(s, b2), get_field(s, d2)); a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
switch (s->insn->data) { switch (s->insn->data) {
case 1: /* STOCG */ case 1: /* STOCG */
tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s)); tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUQ);
break; break;
case 0: /* STOC */ case 0: /* STOC */
tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s)); tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUL);
break; break;
case 2: /* STOCFH */ case 2: /* STOCFH */
h = tcg_temp_new_i64(); h = tcg_temp_new_i64();
tcg_gen_shri_i64(h, regs[r1], 32); tcg_gen_shri_i64(h, regs[r1], 32);
tcg_gen_qemu_st32(h, a, get_mem_index(s)); tcg_gen_qemu_st_i64(h, a, get_mem_index(s), MO_TEUL);
break; break;
default: default:
g_assert_not_reached(); g_assert_not_reached();
@ -4050,7 +4043,7 @@ static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0); gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
/* load the third operand into r3 before modifying anything */ /* load the third operand into r3 before modifying anything */
tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s)); tcg_gen_qemu_ld_i64(regs[r3], o->addr1, get_mem_index(s), MO_TEUQ);
/* subtract CPU timer from first operand and store in GR0 */ /* subtract CPU timer from first operand and store in GR0 */
gen_helper_stpt(tmp, cpu_env); gen_helper_stpt(tmp, cpu_env);
@ -4128,9 +4121,9 @@ static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
tcg_gen_shri_i64(c1, c1, 8); tcg_gen_shri_i64(c1, c1, 8);
tcg_gen_ori_i64(c2, c2, 0x10000); tcg_gen_ori_i64(c2, c2, 0x10000);
tcg_gen_or_i64(c2, c2, todpr); tcg_gen_or_i64(c2, c2, todpr);
tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s)); tcg_gen_qemu_st_i64(c1, o->in2, get_mem_index(s), MO_TEUQ);
tcg_gen_addi_i64(o->in2, o->in2, 8); tcg_gen_addi_i64(o->in2, o->in2, 8);
tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s)); tcg_gen_qemu_st_i64(c2, o->in2, get_mem_index(s), MO_TEUQ);
/* ??? We don't implement clock states. */ /* ??? We don't implement clock states. */
gen_op_movi_cc(s, 0); gen_op_movi_cc(s, 0);
return DISAS_NEXT; return DISAS_NEXT;
@ -4343,7 +4336,7 @@ static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
restart, we'll have the wrong SYSTEM MASK in place. */ restart, we'll have the wrong SYSTEM MASK in place. */
t = tcg_temp_new_i64(); t = tcg_temp_new_i64();
tcg_gen_shri_i64(t, psw_mask, 56); tcg_gen_shri_i64(t, psw_mask, 56);
tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s)); tcg_gen_qemu_st_i64(t, o->addr1, get_mem_index(s), MO_UB);
if (s->fields.op == 0xac) { if (s->fields.op == 0xac) {
tcg_gen_andi_i64(psw_mask, psw_mask, tcg_gen_andi_i64(psw_mask, psw_mask,
@ -4380,13 +4373,13 @@ static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
static DisasJumpType op_st8(DisasContext *s, DisasOps *o) static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
{ {
tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s)); tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_UB);
return DISAS_NEXT; return DISAS_NEXT;
} }
static DisasJumpType op_st16(DisasContext *s, DisasOps *o) static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
{ {
tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s)); tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_TEUW);
return DISAS_NEXT; return DISAS_NEXT;
} }
@ -4424,7 +4417,7 @@ static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
case 0xf: case 0xf:
/* Effectively a 32-bit store. */ /* Effectively a 32-bit store. */
tcg_gen_shri_i64(tmp, o->in1, pos); tcg_gen_shri_i64(tmp, o->in1, pos);
tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s)); tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
break; break;
case 0xc: case 0xc:
@ -4432,7 +4425,7 @@ static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
case 0x3: case 0x3:
/* Effectively a 16-bit store. */ /* Effectively a 16-bit store. */
tcg_gen_shri_i64(tmp, o->in1, pos); tcg_gen_shri_i64(tmp, o->in1, pos);
tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s)); tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
break; break;
case 0x8: case 0x8:
@ -4441,7 +4434,7 @@ static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
case 0x1: case 0x1:
/* Effectively an 8-bit store. */ /* Effectively an 8-bit store. */
tcg_gen_shri_i64(tmp, o->in1, pos); tcg_gen_shri_i64(tmp, o->in1, pos);
tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s)); tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
break; break;
default: default:
@ -4450,7 +4443,7 @@ static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
while (m3) { while (m3) {
if (m3 & 0x8) { if (m3 & 0x8) {
tcg_gen_shri_i64(tmp, o->in1, pos); tcg_gen_shri_i64(tmp, o->in1, pos);
tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s)); tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
tcg_gen_addi_i64(o->in2, o->in2, 1); tcg_gen_addi_i64(o->in2, o->in2, 1);
} }
m3 = (m3 << 1) & 0xf; m3 = (m3 << 1) & 0xf;
@ -4469,11 +4462,8 @@ static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
TCGv_i64 tsize = tcg_constant_i64(size); TCGv_i64 tsize = tcg_constant_i64(size);
while (1) { while (1) {
if (size == 8) { tcg_gen_qemu_st_i64(regs[r1], o->in2, get_mem_index(s),
tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s)); size == 8 ? MO_TEUQ : MO_TEUL);
} else {
tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
}
if (r1 == r3) { if (r1 == r3) {
break; break;
} }
@ -4494,7 +4484,7 @@ static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
while (1) { while (1) {
tcg_gen_shl_i64(t, regs[r1], t32); tcg_gen_shl_i64(t, regs[r1], t32);
tcg_gen_qemu_st32(t, o->in2, get_mem_index(s)); tcg_gen_qemu_st_i64(t, o->in2, get_mem_index(s), MO_TEUL);
if (r1 == r3) { if (r1 == r3) {
break; break;
} }
@ -4804,28 +4794,28 @@ static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
l++; l++;
while (l >= 8) { while (l >= 8) {
tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s)); tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UQ);
l -= 8; l -= 8;
if (l > 0) { if (l > 0) {
tcg_gen_addi_i64(o->addr1, o->addr1, 8); tcg_gen_addi_i64(o->addr1, o->addr1, 8);
} }
} }
if (l >= 4) { if (l >= 4) {
tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s)); tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UL);
l -= 4; l -= 4;
if (l > 0) { if (l > 0) {
tcg_gen_addi_i64(o->addr1, o->addr1, 4); tcg_gen_addi_i64(o->addr1, o->addr1, 4);
} }
} }
if (l >= 2) { if (l >= 2) {
tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s)); tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UW);
l -= 2; l -= 2;
if (l > 0) { if (l > 0) {
tcg_gen_addi_i64(o->addr1, o->addr1, 2); tcg_gen_addi_i64(o->addr1, o->addr1, 2);
} }
} }
if (l) { if (l) {
tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s)); tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UB);
} }
gen_op_movi_cc(s, 0); gen_op_movi_cc(s, 0);
return DISAS_NEXT; return DISAS_NEXT;
@ -5314,13 +5304,13 @@ static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
static void wout_m1_8(DisasContext *s, DisasOps *o) static void wout_m1_8(DisasContext *s, DisasOps *o)
{ {
tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s)); tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_UB);
} }
#define SPEC_wout_m1_8 0 #define SPEC_wout_m1_8 0
static void wout_m1_16(DisasContext *s, DisasOps *o) static void wout_m1_16(DisasContext *s, DisasOps *o)
{ {
tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s)); tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUW);
} }
#define SPEC_wout_m1_16 0 #define SPEC_wout_m1_16 0
@ -5334,7 +5324,7 @@ static void wout_m1_16a(DisasContext *s, DisasOps *o)
static void wout_m1_32(DisasContext *s, DisasOps *o) static void wout_m1_32(DisasContext *s, DisasOps *o)
{ {
tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s)); tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUL);
} }
#define SPEC_wout_m1_32 0 #define SPEC_wout_m1_32 0
@ -5348,7 +5338,7 @@ static void wout_m1_32a(DisasContext *s, DisasOps *o)
static void wout_m1_64(DisasContext *s, DisasOps *o) static void wout_m1_64(DisasContext *s, DisasOps *o)
{ {
tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s)); tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ);
} }
#define SPEC_wout_m1_64 0 #define SPEC_wout_m1_64 0
@ -5362,7 +5352,7 @@ static void wout_m1_64a(DisasContext *s, DisasOps *o)
static void wout_m2_32(DisasContext *s, DisasOps *o) static void wout_m2_32(DisasContext *s, DisasOps *o)
{ {
tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s)); tcg_gen_qemu_st_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
} }
#define SPEC_wout_m2_32 0 #define SPEC_wout_m2_32 0
@ -5557,7 +5547,7 @@ static void in1_m1_8u(DisasContext *s, DisasOps *o)
{ {
in1_la1(s, o); in1_la1(s, o);
o->in1 = tcg_temp_new_i64(); o->in1 = tcg_temp_new_i64();
tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s)); tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_UB);
} }
#define SPEC_in1_m1_8u 0 #define SPEC_in1_m1_8u 0
@ -5565,7 +5555,7 @@ static void in1_m1_16s(DisasContext *s, DisasOps *o)
{ {
in1_la1(s, o); in1_la1(s, o);
o->in1 = tcg_temp_new_i64(); o->in1 = tcg_temp_new_i64();
tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s)); tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESW);
} }
#define SPEC_in1_m1_16s 0 #define SPEC_in1_m1_16s 0
@ -5573,7 +5563,7 @@ static void in1_m1_16u(DisasContext *s, DisasOps *o)
{ {
in1_la1(s, o); in1_la1(s, o);
o->in1 = tcg_temp_new_i64(); o->in1 = tcg_temp_new_i64();
tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s)); tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUW);
} }
#define SPEC_in1_m1_16u 0 #define SPEC_in1_m1_16u 0
@ -5581,7 +5571,7 @@ static void in1_m1_32s(DisasContext *s, DisasOps *o)
{ {
in1_la1(s, o); in1_la1(s, o);
o->in1 = tcg_temp_new_i64(); o->in1 = tcg_temp_new_i64();
tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s)); tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESL);
} }
#define SPEC_in1_m1_32s 0 #define SPEC_in1_m1_32s 0
@ -5589,7 +5579,7 @@ static void in1_m1_32u(DisasContext *s, DisasOps *o)
{ {
in1_la1(s, o); in1_la1(s, o);
o->in1 = tcg_temp_new_i64(); o->in1 = tcg_temp_new_i64();
tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s)); tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUL);
} }
#define SPEC_in1_m1_32u 0 #define SPEC_in1_m1_32u 0
@ -5597,7 +5587,7 @@ static void in1_m1_64(DisasContext *s, DisasOps *o)
{ {
in1_la1(s, o); in1_la1(s, o);
o->in1 = tcg_temp_new_i64(); o->in1 = tcg_temp_new_i64();
tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s)); tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUQ);
} }
#define SPEC_in1_m1_64 0 #define SPEC_in1_m1_64 0
@ -5811,35 +5801,35 @@ static void in2_sh(DisasContext *s, DisasOps *o)
static void in2_m2_8u(DisasContext *s, DisasOps *o) static void in2_m2_8u(DisasContext *s, DisasOps *o)
{ {
in2_a2(s, o); in2_a2(s, o);
tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s)); tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_UB);
} }
#define SPEC_in2_m2_8u 0 #define SPEC_in2_m2_8u 0
static void in2_m2_16s(DisasContext *s, DisasOps *o) static void in2_m2_16s(DisasContext *s, DisasOps *o)
{ {
in2_a2(s, o); in2_a2(s, o);
tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s)); tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESW);
} }
#define SPEC_in2_m2_16s 0 #define SPEC_in2_m2_16s 0
static void in2_m2_16u(DisasContext *s, DisasOps *o) static void in2_m2_16u(DisasContext *s, DisasOps *o)
{ {
in2_a2(s, o); in2_a2(s, o);
tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s)); tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUW);
} }
#define SPEC_in2_m2_16u 0 #define SPEC_in2_m2_16u 0
static void in2_m2_32s(DisasContext *s, DisasOps *o) static void in2_m2_32s(DisasContext *s, DisasOps *o)
{ {
in2_a2(s, o); in2_a2(s, o);
tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s)); tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESL);
} }
#define SPEC_in2_m2_32s 0 #define SPEC_in2_m2_32s 0
static void in2_m2_32u(DisasContext *s, DisasOps *o) static void in2_m2_32u(DisasContext *s, DisasOps *o)
{ {
in2_a2(s, o); in2_a2(s, o);
tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s)); tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUL);
} }
#define SPEC_in2_m2_32u 0 #define SPEC_in2_m2_32u 0
@ -5855,14 +5845,14 @@ static void in2_m2_32ua(DisasContext *s, DisasOps *o)
static void in2_m2_64(DisasContext *s, DisasOps *o) static void in2_m2_64(DisasContext *s, DisasOps *o)
{ {
in2_a2(s, o); in2_a2(s, o);
tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s)); tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
} }
#define SPEC_in2_m2_64 0 #define SPEC_in2_m2_64 0
static void in2_m2_64w(DisasContext *s, DisasOps *o) static void in2_m2_64w(DisasContext *s, DisasOps *o)
{ {
in2_a2(s, o); in2_a2(s, o);
tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s)); tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
gen_addi_and_wrap_i64(s, o->in2, o->in2, 0); gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
} }
#define SPEC_in2_m2_64w 0 #define SPEC_in2_m2_64w 0
@ -5879,14 +5869,14 @@ static void in2_m2_64a(DisasContext *s, DisasOps *o)
static void in2_mri2_16s(DisasContext *s, DisasOps *o) static void in2_mri2_16s(DisasContext *s, DisasOps *o)
{ {
o->in2 = tcg_temp_new_i64(); o->in2 = tcg_temp_new_i64();
tcg_gen_qemu_ld16s(o->in2, gen_ri2(s), get_mem_index(s)); tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TESW);
} }
#define SPEC_in2_mri2_16s 0 #define SPEC_in2_mri2_16s 0
static void in2_mri2_16u(DisasContext *s, DisasOps *o) static void in2_mri2_16u(DisasContext *s, DisasOps *o)
{ {
o->in2 = tcg_temp_new_i64(); o->in2 = tcg_temp_new_i64();
tcg_gen_qemu_ld16u(o->in2, gen_ri2(s), get_mem_index(s)); tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TEUW);
} }
#define SPEC_in2_mri2_16u 0 #define SPEC_in2_mri2_16u 0

View file

@ -593,6 +593,7 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
#if defined(DEBUG_MXCC) || defined(DEBUG_ASI) #if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
uint32_t last_addr = addr; uint32_t last_addr = addr;
#endif #endif
MemOpIdx oi;
do_check_align(env, addr, size - 1, GETPC()); do_check_align(env, addr, size - 1, GETPC());
switch (asi) { switch (asi) {
@ -692,19 +693,20 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
case ASI_M_IODIAG: /* Turbosparc IOTLB Diagnostic */ case ASI_M_IODIAG: /* Turbosparc IOTLB Diagnostic */
break; break;
case ASI_KERNELTXT: /* Supervisor code access */ case ASI_KERNELTXT: /* Supervisor code access */
oi = make_memop_idx(memop, cpu_mmu_index(env, true));
switch (size) { switch (size) {
case 1: case 1:
ret = cpu_ldub_code(env, addr); ret = cpu_ldb_code_mmu(env, addr, oi, GETPC());
break; break;
case 2: case 2:
ret = cpu_lduw_code(env, addr); ret = cpu_ldw_code_mmu(env, addr, oi, GETPC());
break; break;
default: default:
case 4: case 4:
ret = cpu_ldl_code(env, addr); ret = cpu_ldl_code_mmu(env, addr, oi, GETPC());
break; break;
case 8: case 8:
ret = cpu_ldq_code(env, addr); ret = cpu_ldq_code_mmu(env, addr, oi, GETPC());
break; break;
} }
break; break;

View file

@ -1899,7 +1899,7 @@ static void gen_swap(DisasContext *dc, TCGv dst, TCGv src,
TCGv addr, int mmu_idx, MemOp memop) TCGv addr, int mmu_idx, MemOp memop)
{ {
gen_address_mask(dc, addr); gen_address_mask(dc, addr);
tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop); tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop | MO_ALIGN);
} }
static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx) static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx)
@ -2155,12 +2155,12 @@ static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr,
break; break;
case GET_ASI_DIRECT: case GET_ASI_DIRECT:
gen_address_mask(dc, addr); gen_address_mask(dc, addr);
tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop); tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop | MO_ALIGN);
break; break;
default: default:
{ {
TCGv_i32 r_asi = tcg_constant_i32(da.asi); TCGv_i32 r_asi = tcg_constant_i32(da.asi);
TCGv_i32 r_mop = tcg_constant_i32(memop); TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
save_state(dc); save_state(dc);
#ifdef TARGET_SPARC64 #ifdef TARGET_SPARC64
@ -2201,7 +2201,7 @@ static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
/* fall through */ /* fall through */
case GET_ASI_DIRECT: case GET_ASI_DIRECT:
gen_address_mask(dc, addr); gen_address_mask(dc, addr);
tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop); tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop | MO_ALIGN);
break; break;
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
case GET_ASI_BCOPY: case GET_ASI_BCOPY:
@ -2233,7 +2233,7 @@ static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
default: default:
{ {
TCGv_i32 r_asi = tcg_constant_i32(da.asi); TCGv_i32 r_asi = tcg_constant_i32(da.asi);
TCGv_i32 r_mop = tcg_constant_i32(memop & MO_SIZE); TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
save_state(dc); save_state(dc);
#ifdef TARGET_SPARC64 #ifdef TARGET_SPARC64
@ -2283,7 +2283,7 @@ static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
case GET_ASI_DIRECT: case GET_ASI_DIRECT:
oldv = tcg_temp_new(); oldv = tcg_temp_new();
tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd), tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
da.mem_idx, da.memop); da.mem_idx, da.memop | MO_ALIGN);
gen_store_gpr(dc, rd, oldv); gen_store_gpr(dc, rd, oldv);
break; break;
default: default:
@ -2347,7 +2347,7 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr,
switch (size) { switch (size) {
case 4: case 4:
d32 = gen_dest_fpr_F(dc); d32 = gen_dest_fpr_F(dc);
tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop); tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN);
gen_store_fpr_F(dc, rd, d32); gen_store_fpr_F(dc, rd, d32);
break; break;
case 8: case 8:
@ -2397,7 +2397,8 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr,
/* Valid for lddfa only. */ /* Valid for lddfa only. */
if (size == 8) { if (size == 8) {
gen_address_mask(dc, addr); gen_address_mask(dc, addr);
tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop); tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
da.memop | MO_ALIGN);
} else { } else {
gen_exception(dc, TT_ILL_INSN); gen_exception(dc, TT_ILL_INSN);
} }
@ -2406,7 +2407,7 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr,
default: default:
{ {
TCGv_i32 r_asi = tcg_constant_i32(da.asi); TCGv_i32 r_asi = tcg_constant_i32(da.asi);
TCGv_i32 r_mop = tcg_constant_i32(da.memop); TCGv_i32 r_mop = tcg_constant_i32(da.memop | MO_ALIGN);
save_state(dc); save_state(dc);
/* According to the table in the UA2011 manual, the only /* According to the table in the UA2011 manual, the only
@ -2454,7 +2455,7 @@ static void gen_stf_asi(DisasContext *dc, TCGv addr,
switch (size) { switch (size) {
case 4: case 4:
d32 = gen_load_fpr_F(dc, rd); d32 = gen_load_fpr_F(dc, rd);
tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop); tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN);
break; break;
case 8: case 8:
tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
@ -2506,7 +2507,8 @@ static void gen_stf_asi(DisasContext *dc, TCGv addr,
/* Valid for stdfa only. */ /* Valid for stdfa only. */
if (size == 8) { if (size == 8) {
gen_address_mask(dc, addr); gen_address_mask(dc, addr);
tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop); tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
da.memop | MO_ALIGN);
} else { } else {
gen_exception(dc, TT_ILL_INSN); gen_exception(dc, TT_ILL_INSN);
} }
@ -2543,7 +2545,7 @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
TCGv_i64 tmp = tcg_temp_new_i64(); TCGv_i64 tmp = tcg_temp_new_i64();
gen_address_mask(dc, addr); gen_address_mask(dc, addr);
tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop); tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop | MO_ALIGN);
/* Note that LE ldda acts as if each 32-bit register /* Note that LE ldda acts as if each 32-bit register
result is byte swapped. Having just performed one result is byte swapped. Having just performed one
@ -2613,7 +2615,7 @@ static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
tcg_gen_concat32_i64(t64, hi, lo); tcg_gen_concat32_i64(t64, hi, lo);
} }
gen_address_mask(dc, addr); gen_address_mask(dc, addr);
tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop); tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
} }
break; break;
@ -2651,7 +2653,7 @@ static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
case GET_ASI_DIRECT: case GET_ASI_DIRECT:
oldv = tcg_temp_new(); oldv = tcg_temp_new();
tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd), tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
da.mem_idx, da.memop); da.mem_idx, da.memop | MO_ALIGN);
gen_store_gpr(dc, rd, oldv); gen_store_gpr(dc, rd, oldv);
break; break;
default: default:
@ -2678,7 +2680,7 @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
return; return;
case GET_ASI_DIRECT: case GET_ASI_DIRECT:
gen_address_mask(dc, addr); gen_address_mask(dc, addr);
tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop); tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
break; break;
default: default:
{ {
@ -2710,7 +2712,7 @@ static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
break; break;
case GET_ASI_DIRECT: case GET_ASI_DIRECT:
gen_address_mask(dc, addr); gen_address_mask(dc, addr);
tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop); tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
break; break;
case GET_ASI_BFILL: case GET_ASI_BFILL:
/* Store 32 bytes of T64 to ADDR. */ /* Store 32 bytes of T64 to ADDR. */
@ -5179,15 +5181,18 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
switch (xop) { switch (xop) {
case 0x0: /* ld, V9 lduw, load unsigned word */ case 0x0: /* ld, V9 lduw, load unsigned word */
gen_address_mask(dc, cpu_addr); gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx); tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
dc->mem_idx, MO_TEUL | MO_ALIGN);
break; break;
case 0x1: /* ldub, load unsigned byte */ case 0x1: /* ldub, load unsigned byte */
gen_address_mask(dc, cpu_addr); gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx); tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
dc->mem_idx, MO_UB);
break; break;
case 0x2: /* lduh, load unsigned halfword */ case 0x2: /* lduh, load unsigned halfword */
gen_address_mask(dc, cpu_addr); gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx); tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
dc->mem_idx, MO_TEUW | MO_ALIGN);
break; break;
case 0x3: /* ldd, load double word */ case 0x3: /* ldd, load double word */
if (rd & 1) if (rd & 1)
@ -5197,7 +5202,8 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
gen_address_mask(dc, cpu_addr); gen_address_mask(dc, cpu_addr);
t64 = tcg_temp_new_i64(); t64 = tcg_temp_new_i64();
tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx); tcg_gen_qemu_ld_i64(t64, cpu_addr,
dc->mem_idx, MO_TEUQ | MO_ALIGN);
tcg_gen_trunc_i64_tl(cpu_val, t64); tcg_gen_trunc_i64_tl(cpu_val, t64);
tcg_gen_ext32u_tl(cpu_val, cpu_val); tcg_gen_ext32u_tl(cpu_val, cpu_val);
gen_store_gpr(dc, rd + 1, cpu_val); gen_store_gpr(dc, rd + 1, cpu_val);
@ -5208,11 +5214,12 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
break; break;
case 0x9: /* ldsb, load signed byte */ case 0x9: /* ldsb, load signed byte */
gen_address_mask(dc, cpu_addr); gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx); tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, dc->mem_idx, MO_SB);
break; break;
case 0xa: /* ldsh, load signed halfword */ case 0xa: /* ldsh, load signed halfword */
gen_address_mask(dc, cpu_addr); gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx); tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
dc->mem_idx, MO_TESW | MO_ALIGN);
break; break;
case 0xd: /* ldstub */ case 0xd: /* ldstub */
gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx); gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx);
@ -5266,11 +5273,13 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
#ifdef TARGET_SPARC64 #ifdef TARGET_SPARC64
case 0x08: /* V9 ldsw */ case 0x08: /* V9 ldsw */
gen_address_mask(dc, cpu_addr); gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx); tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
dc->mem_idx, MO_TESL | MO_ALIGN);
break; break;
case 0x0b: /* V9 ldx */ case 0x0b: /* V9 ldx */
gen_address_mask(dc, cpu_addr); gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx); tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
dc->mem_idx, MO_TEUQ | MO_ALIGN);
break; break;
case 0x18: /* V9 ldswa */ case 0x18: /* V9 ldswa */
gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL); gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL);
@ -5321,7 +5330,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
gen_address_mask(dc, cpu_addr); gen_address_mask(dc, cpu_addr);
cpu_dst_32 = gen_dest_fpr_F(dc); cpu_dst_32 = gen_dest_fpr_F(dc);
tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr, tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
dc->mem_idx, MO_TEUL); dc->mem_idx, MO_TEUL | MO_ALIGN);
gen_store_fpr_F(dc, rd, cpu_dst_32); gen_store_fpr_F(dc, rd, cpu_dst_32);
break; break;
case 0x21: /* ldfsr, V9 ldxfsr */ case 0x21: /* ldfsr, V9 ldxfsr */
@ -5330,14 +5339,14 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
if (rd == 1) { if (rd == 1) {
TCGv_i64 t64 = tcg_temp_new_i64(); TCGv_i64 t64 = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(t64, cpu_addr, tcg_gen_qemu_ld_i64(t64, cpu_addr,
dc->mem_idx, MO_TEUQ); dc->mem_idx, MO_TEUQ | MO_ALIGN);
gen_helper_ldxfsr(cpu_fsr, cpu_env, cpu_fsr, t64); gen_helper_ldxfsr(cpu_fsr, cpu_env, cpu_fsr, t64);
break; break;
} }
#endif #endif
cpu_dst_32 = tcg_temp_new_i32(); cpu_dst_32 = tcg_temp_new_i32();
tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr, tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
dc->mem_idx, MO_TEUL); dc->mem_idx, MO_TEUL | MO_ALIGN);
gen_helper_ldfsr(cpu_fsr, cpu_env, cpu_fsr, cpu_dst_32); gen_helper_ldfsr(cpu_fsr, cpu_env, cpu_fsr, cpu_dst_32);
break; break;
case 0x22: /* ldqf, load quad fpreg */ case 0x22: /* ldqf, load quad fpreg */
@ -5369,15 +5378,17 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
switch (xop) { switch (xop) {
case 0x4: /* st, store word */ case 0x4: /* st, store word */
gen_address_mask(dc, cpu_addr); gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx); tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
dc->mem_idx, MO_TEUL | MO_ALIGN);
break; break;
case 0x5: /* stb, store byte */ case 0x5: /* stb, store byte */
gen_address_mask(dc, cpu_addr); gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx); tcg_gen_qemu_st_tl(cpu_val, cpu_addr, dc->mem_idx, MO_UB);
break; break;
case 0x6: /* sth, store halfword */ case 0x6: /* sth, store halfword */
gen_address_mask(dc, cpu_addr); gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx); tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
dc->mem_idx, MO_TEUW | MO_ALIGN);
break; break;
case 0x7: /* std, store double word */ case 0x7: /* std, store double word */
if (rd & 1) if (rd & 1)
@ -5390,7 +5401,8 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
lo = gen_load_gpr(dc, rd + 1); lo = gen_load_gpr(dc, rd + 1);
t64 = tcg_temp_new_i64(); t64 = tcg_temp_new_i64();
tcg_gen_concat_tl_i64(t64, lo, cpu_val); tcg_gen_concat_tl_i64(t64, lo, cpu_val);
tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx); tcg_gen_qemu_st_i64(t64, cpu_addr,
dc->mem_idx, MO_TEUQ | MO_ALIGN);
} }
break; break;
#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
@ -5413,7 +5425,8 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
#ifdef TARGET_SPARC64 #ifdef TARGET_SPARC64
case 0x0e: /* V9 stx */ case 0x0e: /* V9 stx */
gen_address_mask(dc, cpu_addr); gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx); tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
dc->mem_idx, MO_TEUQ | MO_ALIGN);
break; break;
case 0x1e: /* V9 stxa */ case 0x1e: /* V9 stxa */
gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ); gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ);
@ -5431,18 +5444,20 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
gen_address_mask(dc, cpu_addr); gen_address_mask(dc, cpu_addr);
cpu_src1_32 = gen_load_fpr_F(dc, rd); cpu_src1_32 = gen_load_fpr_F(dc, rd);
tcg_gen_qemu_st_i32(cpu_src1_32, cpu_addr, tcg_gen_qemu_st_i32(cpu_src1_32, cpu_addr,
dc->mem_idx, MO_TEUL); dc->mem_idx, MO_TEUL | MO_ALIGN);
break; break;
case 0x25: /* stfsr, V9 stxfsr */ case 0x25: /* stfsr, V9 stxfsr */
{ {
#ifdef TARGET_SPARC64 #ifdef TARGET_SPARC64
gen_address_mask(dc, cpu_addr); gen_address_mask(dc, cpu_addr);
if (rd == 1) { if (rd == 1) {
tcg_gen_qemu_st64(cpu_fsr, cpu_addr, dc->mem_idx); tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr,
dc->mem_idx, MO_TEUQ | MO_ALIGN);
break; break;
} }
#endif #endif
tcg_gen_qemu_st32(cpu_fsr, cpu_addr, dc->mem_idx); tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr,
dc->mem_idx, MO_TEUL | MO_ALIGN);
} }
break; break;
case 0x26: case 0x26:

View file

@ -1549,7 +1549,7 @@ static void translate_dcache(DisasContext *dc, const OpcodeArg arg[],
TCGv_i32 res = tcg_temp_new_i32(); TCGv_i32 res = tcg_temp_new_i32();
tcg_gen_addi_i32(addr, arg[0].in, arg[1].imm); tcg_gen_addi_i32(addr, arg[0].in, arg[1].imm);
tcg_gen_qemu_ld8u(res, addr, dc->cring); tcg_gen_qemu_ld_i32(res, addr, dc->cring, MO_UB);
} }
static void translate_depbits(DisasContext *dc, const OpcodeArg arg[], static void translate_depbits(DisasContext *dc, const OpcodeArg arg[],
@ -1726,7 +1726,7 @@ static void translate_l32r(DisasContext *dc, const OpcodeArg arg[],
} else { } else {
tmp = tcg_constant_i32(arg[1].imm); tmp = tcg_constant_i32(arg[1].imm);
} }
tcg_gen_qemu_ld32u(arg[0].out, tmp, dc->cring); tcg_gen_qemu_ld_i32(arg[0].out, tmp, dc->cring, MO_TEUL);
} }
static void translate_loop(DisasContext *dc, const OpcodeArg arg[], static void translate_loop(DisasContext *dc, const OpcodeArg arg[],

View file

@ -1587,6 +1587,12 @@ static void tcg_out_adr(TCGContext *s, TCGReg rd, const void *target)
tcg_out_insn(s, 3406, ADR, rd, offset); tcg_out_insn(s, 3406, ADR, rd, offset);
} }
typedef struct {
TCGReg base;
TCGReg index;
TCGType index_ext;
} HostAddress;
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
* MemOpIdx oi, uintptr_t ra) * MemOpIdx oi, uintptr_t ra)
@ -1796,32 +1802,31 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
#endif /* CONFIG_SOFTMMU */ #endif /* CONFIG_SOFTMMU */
static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext, static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
TCGReg data_r, TCGReg addr_r, TCGReg data_r, HostAddress h)
TCGType otype, TCGReg off_r)
{ {
switch (memop & MO_SSIZE) { switch (memop & MO_SSIZE) {
case MO_UB: case MO_UB:
tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, otype, off_r); tcg_out_ldst_r(s, I3312_LDRB, data_r, h.base, h.index_ext, h.index);
break; break;
case MO_SB: case MO_SB:
tcg_out_ldst_r(s, ext ? I3312_LDRSBX : I3312_LDRSBW, tcg_out_ldst_r(s, ext ? I3312_LDRSBX : I3312_LDRSBW,
data_r, addr_r, otype, off_r); data_r, h.base, h.index_ext, h.index);
break; break;
case MO_UW: case MO_UW:
tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, otype, off_r); tcg_out_ldst_r(s, I3312_LDRH, data_r, h.base, h.index_ext, h.index);
break; break;
case MO_SW: case MO_SW:
tcg_out_ldst_r(s, (ext ? I3312_LDRSHX : I3312_LDRSHW), tcg_out_ldst_r(s, (ext ? I3312_LDRSHX : I3312_LDRSHW),
data_r, addr_r, otype, off_r); data_r, h.base, h.index_ext, h.index);
break; break;
case MO_UL: case MO_UL:
tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, otype, off_r); tcg_out_ldst_r(s, I3312_LDRW, data_r, h.base, h.index_ext, h.index);
break; break;
case MO_SL: case MO_SL:
tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, otype, off_r); tcg_out_ldst_r(s, I3312_LDRSWX, data_r, h.base, h.index_ext, h.index);
break; break;
case MO_UQ: case MO_UQ:
tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, otype, off_r); tcg_out_ldst_r(s, I3312_LDRX, data_r, h.base, h.index_ext, h.index);
break; break;
default: default:
g_assert_not_reached(); g_assert_not_reached();
@ -1829,21 +1834,20 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
} }
static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop, static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop,
TCGReg data_r, TCGReg addr_r, TCGReg data_r, HostAddress h)
TCGType otype, TCGReg off_r)
{ {
switch (memop & MO_SIZE) { switch (memop & MO_SIZE) {
case MO_8: case MO_8:
tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, otype, off_r); tcg_out_ldst_r(s, I3312_STRB, data_r, h.base, h.index_ext, h.index);
break; break;
case MO_16: case MO_16:
tcg_out_ldst_r(s, I3312_STRH, data_r, addr_r, otype, off_r); tcg_out_ldst_r(s, I3312_STRH, data_r, h.base, h.index_ext, h.index);
break; break;
case MO_32: case MO_32:
tcg_out_ldst_r(s, I3312_STRW, data_r, addr_r, otype, off_r); tcg_out_ldst_r(s, I3312_STRW, data_r, h.base, h.index_ext, h.index);
break; break;
case MO_64: case MO_64:
tcg_out_ldst_r(s, I3312_STRX, data_r, addr_r, otype, off_r); tcg_out_ldst_r(s, I3312_STRX, data_r, h.base, h.index_ext, h.index);
break; break;
default: default:
g_assert_not_reached(); g_assert_not_reached();
@ -1851,22 +1855,28 @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop,
} }
static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
MemOpIdx oi, TCGType ext) MemOpIdx oi, TCGType data_type)
{ {
MemOp memop = get_memop(oi); MemOp memop = get_memop(oi);
const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; TCGType addr_type = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
HostAddress h;
/* Byte swapping is left to middle-end expansion. */ /* Byte swapping is left to middle-end expansion. */
tcg_debug_assert((memop & MO_BSWAP) == 0); tcg_debug_assert((memop & MO_BSWAP) == 0);
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
unsigned mem_index = get_mmuidx(oi);
tcg_insn_unit *label_ptr; tcg_insn_unit *label_ptr;
tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 1); tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, get_mmuidx(oi), 1);
tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
TCG_REG_X1, otype, addr_reg); h = (HostAddress){
add_qemu_ldst_label(s, true, oi, ext, data_reg, addr_reg, .base = TCG_REG_X1,
.index = addr_reg,
.index_ext = addr_type
};
tcg_out_qemu_ld_direct(s, memop, data_type, data_reg, h);
add_qemu_ldst_label(s, true, oi, data_type, data_reg, addr_reg,
s->code_ptr, label_ptr); s->code_ptr, label_ptr);
#else /* !CONFIG_SOFTMMU */ #else /* !CONFIG_SOFTMMU */
unsigned a_bits = get_alignment_bits(memop); unsigned a_bits = get_alignment_bits(memop);
@ -1874,45 +1884,65 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
tcg_out_test_alignment(s, true, addr_reg, a_bits); tcg_out_test_alignment(s, true, addr_reg, a_bits);
} }
if (USE_GUEST_BASE) { if (USE_GUEST_BASE) {
tcg_out_qemu_ld_direct(s, memop, ext, data_reg, h = (HostAddress){
TCG_REG_GUEST_BASE, otype, addr_reg); .base = TCG_REG_GUEST_BASE,
.index = addr_reg,
.index_ext = addr_type
};
} else { } else {
tcg_out_qemu_ld_direct(s, memop, ext, data_reg, h = (HostAddress){
addr_reg, TCG_TYPE_I64, TCG_REG_XZR); .base = addr_reg,
.index = TCG_REG_XZR,
.index_ext = TCG_TYPE_I64
};
} }
tcg_out_qemu_ld_direct(s, memop, data_type, data_reg, h);
#endif /* CONFIG_SOFTMMU */ #endif /* CONFIG_SOFTMMU */
} }
static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
MemOpIdx oi) MemOpIdx oi, TCGType data_type)
{ {
MemOp memop = get_memop(oi); MemOp memop = get_memop(oi);
const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; TCGType addr_type = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
HostAddress h;
/* Byte swapping is left to middle-end expansion. */ /* Byte swapping is left to middle-end expansion. */
tcg_debug_assert((memop & MO_BSWAP) == 0); tcg_debug_assert((memop & MO_BSWAP) == 0);
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
unsigned mem_index = get_mmuidx(oi);
tcg_insn_unit *label_ptr; tcg_insn_unit *label_ptr;
tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 0); tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, get_mmuidx(oi), 0);
tcg_out_qemu_st_direct(s, memop, data_reg,
TCG_REG_X1, otype, addr_reg); h = (HostAddress){
add_qemu_ldst_label(s, false, oi, (memop & MO_SIZE)== MO_64, .base = TCG_REG_X1,
data_reg, addr_reg, s->code_ptr, label_ptr); .index = addr_reg,
.index_ext = addr_type
};
tcg_out_qemu_st_direct(s, memop, data_reg, h);
add_qemu_ldst_label(s, false, oi, data_type, data_reg, addr_reg,
s->code_ptr, label_ptr);
#else /* !CONFIG_SOFTMMU */ #else /* !CONFIG_SOFTMMU */
unsigned a_bits = get_alignment_bits(memop); unsigned a_bits = get_alignment_bits(memop);
if (a_bits) { if (a_bits) {
tcg_out_test_alignment(s, false, addr_reg, a_bits); tcg_out_test_alignment(s, false, addr_reg, a_bits);
} }
if (USE_GUEST_BASE) { if (USE_GUEST_BASE) {
tcg_out_qemu_st_direct(s, memop, data_reg, h = (HostAddress){
TCG_REG_GUEST_BASE, otype, addr_reg); .base = TCG_REG_GUEST_BASE,
.index = addr_reg,
.index_ext = addr_type
};
} else { } else {
tcg_out_qemu_st_direct(s, memop, data_reg, h = (HostAddress){
addr_reg, TCG_TYPE_I64, TCG_REG_XZR); .base = addr_reg,
.index = TCG_REG_XZR,
.index_ext = TCG_TYPE_I64
};
} }
tcg_out_qemu_st_direct(s, memop, data_reg, h);
#endif /* CONFIG_SOFTMMU */ #endif /* CONFIG_SOFTMMU */
} }
@ -2249,7 +2279,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
break; break;
case INDEX_op_qemu_st_i32: case INDEX_op_qemu_st_i32:
case INDEX_op_qemu_st_i64: case INDEX_op_qemu_st_i64:
tcg_out_qemu_st(s, REG0(0), a1, a2); tcg_out_qemu_st(s, REG0(0), a1, a2, ext);
break; break;
case INDEX_op_bswap64_i64: case INDEX_op_bswap64_i64:

View file

@ -1337,6 +1337,13 @@ static void tcg_out_vldst(TCGContext *s, ARMInsn insn,
tcg_out32(s, insn | (rn << 16) | encode_vd(rd) | 0xf); tcg_out32(s, insn | (rn << 16) | encode_vd(rd) | 0xf);
} }
typedef struct {
ARMCond cond;
TCGReg base;
int index;
bool index_scratch;
} HostAddress;
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
* int mmu_idx, uintptr_t ra) * int mmu_idx, uintptr_t ra)
@ -1526,15 +1533,18 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
/* Record the context of a call to the out of line helper code for the slow /* Record the context of a call to the out of line helper code for the slow
path for a load or store, so that we can later generate the correct path for a load or store, so that we can later generate the correct
helper code. */ helper code. */
static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi, static void add_qemu_ldst_label(TCGContext *s, bool is_ld,
TCGReg datalo, TCGReg datahi, TCGReg addrlo, MemOpIdx oi, TCGType type,
TCGReg addrhi, tcg_insn_unit *raddr, TCGReg datalo, TCGReg datahi,
TCGReg addrlo, TCGReg addrhi,
tcg_insn_unit *raddr,
tcg_insn_unit *label_ptr) tcg_insn_unit *label_ptr)
{ {
TCGLabelQemuLdst *label = new_ldst_label(s); TCGLabelQemuLdst *label = new_ldst_label(s);
label->is_ld = is_ld; label->is_ld = is_ld;
label->oi = oi; label->oi = oi;
label->type = type;
label->datalo_reg = datalo; label->datalo_reg = datalo;
label->datahi_reg = datahi; label->datahi_reg = datahi;
label->addrlo_reg = addrlo; label->addrlo_reg = addrlo;
@ -1693,29 +1703,49 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
} }
#endif /* SOFTMMU */ #endif /* SOFTMMU */
static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc, static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
TCGReg datalo, TCGReg datahi, TCGReg datahi, HostAddress h)
TCGReg addrlo, TCGReg addend,
bool scratch_addend)
{ {
TCGReg base;
/* Byte swapping is left to middle-end expansion. */ /* Byte swapping is left to middle-end expansion. */
tcg_debug_assert((opc & MO_BSWAP) == 0); tcg_debug_assert((opc & MO_BSWAP) == 0);
switch (opc & MO_SSIZE) { switch (opc & MO_SSIZE) {
case MO_UB: case MO_UB:
tcg_out_ld8_r(s, COND_AL, datalo, addrlo, addend); if (h.index < 0) {
tcg_out_ld8_12(s, h.cond, datalo, h.base, 0);
} else {
tcg_out_ld8_r(s, h.cond, datalo, h.base, h.index);
}
break; break;
case MO_SB: case MO_SB:
tcg_out_ld8s_r(s, COND_AL, datalo, addrlo, addend); if (h.index < 0) {
tcg_out_ld8s_8(s, h.cond, datalo, h.base, 0);
} else {
tcg_out_ld8s_r(s, h.cond, datalo, h.base, h.index);
}
break; break;
case MO_UW: case MO_UW:
tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend); if (h.index < 0) {
tcg_out_ld16u_8(s, h.cond, datalo, h.base, 0);
} else {
tcg_out_ld16u_r(s, h.cond, datalo, h.base, h.index);
}
break; break;
case MO_SW: case MO_SW:
tcg_out_ld16s_r(s, COND_AL, datalo, addrlo, addend); if (h.index < 0) {
tcg_out_ld16s_8(s, h.cond, datalo, h.base, 0);
} else {
tcg_out_ld16s_r(s, h.cond, datalo, h.base, h.index);
}
break; break;
case MO_UL: case MO_UL:
tcg_out_ld32_r(s, COND_AL, datalo, addrlo, addend); if (h.index < 0) {
tcg_out_ld32_12(s, h.cond, datalo, h.base, 0);
} else {
tcg_out_ld32_r(s, h.cond, datalo, h.base, h.index);
}
break; break;
case MO_UQ: case MO_UQ:
/* We used pair allocation for datalo, so already should be aligned. */ /* We used pair allocation for datalo, so already should be aligned. */
@ -1723,182 +1753,112 @@ static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
tcg_debug_assert(datahi == datalo + 1); tcg_debug_assert(datahi == datalo + 1);
/* LDRD requires alignment; double-check that. */ /* LDRD requires alignment; double-check that. */
if (get_alignment_bits(opc) >= MO_64) { if (get_alignment_bits(opc) >= MO_64) {
if (h.index < 0) {
tcg_out_ldrd_8(s, h.cond, datalo, h.base, 0);
break;
}
/* /*
* Rm (the second address op) must not overlap Rt or Rt + 1. * Rm (the second address op) must not overlap Rt or Rt + 1.
* Since datalo is aligned, we can simplify the test via alignment. * Since datalo is aligned, we can simplify the test via alignment.
* Flip the two address arguments if that works. * Flip the two address arguments if that works.
*/ */
if ((addend & ~1) != datalo) { if ((h.index & ~1) != datalo) {
tcg_out_ldrd_r(s, COND_AL, datalo, addrlo, addend); tcg_out_ldrd_r(s, h.cond, datalo, h.base, h.index);
break; break;
} }
if ((addrlo & ~1) != datalo) { if ((h.base & ~1) != datalo) {
tcg_out_ldrd_r(s, COND_AL, datalo, addend, addrlo); tcg_out_ldrd_r(s, h.cond, datalo, h.index, h.base);
break; break;
} }
} }
if (scratch_addend) { if (h.index < 0) {
tcg_out_ld32_rwb(s, COND_AL, datalo, addend, addrlo); base = h.base;
tcg_out_ld32_12(s, COND_AL, datahi, addend, 4); if (datalo == h.base) {
tcg_out_mov_reg(s, h.cond, TCG_REG_TMP, base);
base = TCG_REG_TMP;
}
} else if (h.index_scratch) {
tcg_out_ld32_rwb(s, h.cond, datalo, h.index, h.base);
tcg_out_ld32_12(s, h.cond, datahi, h.index, 4);
break;
} else { } else {
tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_TMP, tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP,
addend, addrlo, SHIFT_IMM_LSL(0)); h.base, h.index, SHIFT_IMM_LSL(0));
tcg_out_ld32_12(s, COND_AL, datalo, TCG_REG_TMP, 0); base = TCG_REG_TMP;
tcg_out_ld32_12(s, COND_AL, datahi, TCG_REG_TMP, 4);
} }
tcg_out_ld32_12(s, h.cond, datalo, base, 0);
tcg_out_ld32_12(s, h.cond, datahi, base, 4);
break; break;
default: default:
g_assert_not_reached(); g_assert_not_reached();
} }
} }
#ifndef CONFIG_SOFTMMU static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo, TCGReg addrlo, TCGReg addrhi,
TCGReg datahi, TCGReg addrlo) MemOpIdx oi, TCGType data_type)
{ {
/* Byte swapping is left to middle-end expansion. */ MemOp opc = get_memop(oi);
tcg_debug_assert((opc & MO_BSWAP) == 0); HostAddress h;
switch (opc & MO_SSIZE) {
case MO_UB:
tcg_out_ld8_12(s, COND_AL, datalo, addrlo, 0);
break;
case MO_SB:
tcg_out_ld8s_8(s, COND_AL, datalo, addrlo, 0);
break;
case MO_UW:
tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0);
break;
case MO_SW:
tcg_out_ld16s_8(s, COND_AL, datalo, addrlo, 0);
break;
case MO_UL:
tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
break;
case MO_UQ:
/* We used pair allocation for datalo, so already should be aligned. */
tcg_debug_assert((datalo & 1) == 0);
tcg_debug_assert(datahi == datalo + 1);
/* LDRD requires alignment; double-check that. */
if (get_alignment_bits(opc) >= MO_64) {
tcg_out_ldrd_8(s, COND_AL, datalo, addrlo, 0);
} else if (datalo == addrlo) {
tcg_out_ld32_12(s, COND_AL, datahi, addrlo, 4);
tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
} else {
tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
tcg_out_ld32_12(s, COND_AL, datahi, addrlo, 4);
}
break;
default:
g_assert_not_reached();
}
}
#endif
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
{
TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
MemOpIdx oi;
MemOp opc;
#ifdef CONFIG_SOFTMMU
int mem_index;
TCGReg addend;
tcg_insn_unit *label_ptr;
#else
unsigned a_bits;
#endif
datalo = *args++;
datahi = (is64 ? *args++ : 0);
addrlo = *args++;
addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
oi = *args++;
opc = get_memop(oi);
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
mem_index = get_mmuidx(oi); h.cond = COND_AL;
addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 1); h.base = addrlo;
h.index_scratch = true;
h.index = tcg_out_tlb_read(s, addrlo, addrhi, opc, get_mmuidx(oi), 1);
/* This a conditional BL only to load a pointer within this opcode into LR /*
for the slow path. We will not be using the value for a tail call. */ * This a conditional BL only to load a pointer within this opcode into
label_ptr = s->code_ptr; * LR for the slow path. We will not be using the value for a tail call.
*/
tcg_insn_unit *label_ptr = s->code_ptr;
tcg_out_bl_imm(s, COND_NE, 0); tcg_out_bl_imm(s, COND_NE, 0);
tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend, true); tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h);
add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi, add_qemu_ldst_label(s, true, oi, data_type, datalo, datahi,
s->code_ptr, label_ptr); addrlo, addrhi, s->code_ptr, label_ptr);
#else /* !CONFIG_SOFTMMU */ #else
a_bits = get_alignment_bits(opc); unsigned a_bits = get_alignment_bits(opc);
if (a_bits) { if (a_bits) {
tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits); tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
} }
if (guest_base) {
tcg_out_qemu_ld_index(s, opc, datalo, datahi, h.cond = COND_AL;
addrlo, TCG_REG_GUEST_BASE, false); h.base = addrlo;
} else { h.index = guest_base ? TCG_REG_GUEST_BASE : -1;
tcg_out_qemu_ld_direct(s, opc, datalo, datahi, addrlo); h.index_scratch = false;
} tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h);
#endif #endif
} }
static void tcg_out_qemu_st_index(TCGContext *s, ARMCond cond, MemOp opc,
TCGReg datalo, TCGReg datahi,
TCGReg addrlo, TCGReg addend,
bool scratch_addend)
{
/* Byte swapping is left to middle-end expansion. */
tcg_debug_assert((opc & MO_BSWAP) == 0);
switch (opc & MO_SIZE) {
case MO_8:
tcg_out_st8_r(s, cond, datalo, addrlo, addend);
break;
case MO_16:
tcg_out_st16_r(s, cond, datalo, addrlo, addend);
break;
case MO_32:
tcg_out_st32_r(s, cond, datalo, addrlo, addend);
break;
case MO_64:
/* We used pair allocation for datalo, so already should be aligned. */
tcg_debug_assert((datalo & 1) == 0);
tcg_debug_assert(datahi == datalo + 1);
/* STRD requires alignment; double-check that. */
if (get_alignment_bits(opc) >= MO_64) {
tcg_out_strd_r(s, cond, datalo, addrlo, addend);
} else if (scratch_addend) {
tcg_out_st32_rwb(s, cond, datalo, addend, addrlo);
tcg_out_st32_12(s, cond, datahi, addend, 4);
} else {
tcg_out_dat_reg(s, cond, ARITH_ADD, TCG_REG_TMP,
addend, addrlo, SHIFT_IMM_LSL(0));
tcg_out_st32_12(s, cond, datalo, TCG_REG_TMP, 0);
tcg_out_st32_12(s, cond, datahi, TCG_REG_TMP, 4);
}
break;
default:
g_assert_not_reached();
}
}
#ifndef CONFIG_SOFTMMU
static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo, static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
TCGReg datahi, TCGReg addrlo) TCGReg datahi, HostAddress h)
{ {
/* Byte swapping is left to middle-end expansion. */ /* Byte swapping is left to middle-end expansion. */
tcg_debug_assert((opc & MO_BSWAP) == 0); tcg_debug_assert((opc & MO_BSWAP) == 0);
switch (opc & MO_SIZE) { switch (opc & MO_SIZE) {
case MO_8: case MO_8:
tcg_out_st8_12(s, COND_AL, datalo, addrlo, 0); if (h.index < 0) {
tcg_out_st8_12(s, h.cond, datalo, h.base, 0);
} else {
tcg_out_st8_r(s, h.cond, datalo, h.base, h.index);
}
break; break;
case MO_16: case MO_16:
tcg_out_st16_8(s, COND_AL, datalo, addrlo, 0); if (h.index < 0) {
tcg_out_st16_8(s, h.cond, datalo, h.base, 0);
} else {
tcg_out_st16_r(s, h.cond, datalo, h.base, h.index);
}
break; break;
case MO_32: case MO_32:
tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0); if (h.index < 0) {
tcg_out_st32_12(s, h.cond, datalo, h.base, 0);
} else {
tcg_out_st32_r(s, h.cond, datalo, h.base, h.index);
}
break; break;
case MO_64: case MO_64:
/* We used pair allocation for datalo, so already should be aligned. */ /* We used pair allocation for datalo, so already should be aligned. */
@ -1906,62 +1866,59 @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
tcg_debug_assert(datahi == datalo + 1); tcg_debug_assert(datahi == datalo + 1);
/* STRD requires alignment; double-check that. */ /* STRD requires alignment; double-check that. */
if (get_alignment_bits(opc) >= MO_64) { if (get_alignment_bits(opc) >= MO_64) {
tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0); if (h.index < 0) {
tcg_out_strd_8(s, h.cond, datalo, h.base, 0);
} else { } else {
tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0); tcg_out_strd_r(s, h.cond, datalo, h.base, h.index);
tcg_out_st32_12(s, COND_AL, datahi, addrlo, 4); }
} else if (h.index_scratch) {
tcg_out_st32_rwb(s, h.cond, datalo, h.index, h.base);
tcg_out_st32_12(s, h.cond, datahi, h.index, 4);
} else {
tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP,
h.base, h.index, SHIFT_IMM_LSL(0));
tcg_out_st32_12(s, h.cond, datalo, TCG_REG_TMP, 0);
tcg_out_st32_12(s, h.cond, datahi, TCG_REG_TMP, 4);
} }
break; break;
default: default:
g_assert_not_reached(); g_assert_not_reached();
} }
} }
#endif
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
TCGReg addrlo, TCGReg addrhi,
MemOpIdx oi, TCGType data_type)
{ {
TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); MemOp opc = get_memop(oi);
MemOpIdx oi; HostAddress h;
MemOp opc;
#ifdef CONFIG_SOFTMMU
int mem_index;
TCGReg addend;
tcg_insn_unit *label_ptr;
#else
unsigned a_bits;
#endif
datalo = *args++;
datahi = (is64 ? *args++ : 0);
addrlo = *args++;
addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
oi = *args++;
opc = get_memop(oi);
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
mem_index = get_mmuidx(oi); h.cond = COND_EQ;
addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 0); h.base = addrlo;
h.index_scratch = true;
tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi, h.index = tcg_out_tlb_read(s, addrlo, addrhi, opc, get_mmuidx(oi), 0);
addrlo, addend, true); tcg_out_qemu_st_direct(s, opc, datalo, datahi, h);
/* The conditional call must come last, as we're going to return here. */ /* The conditional call must come last, as we're going to return here. */
label_ptr = s->code_ptr; tcg_insn_unit *label_ptr = s->code_ptr;
tcg_out_bl_imm(s, COND_NE, 0); tcg_out_bl_imm(s, COND_NE, 0);
add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi, add_qemu_ldst_label(s, false, oi, data_type, datalo, datahi,
s->code_ptr, label_ptr); addrlo, addrhi, s->code_ptr, label_ptr);
#else /* !CONFIG_SOFTMMU */ #else
a_bits = get_alignment_bits(opc); unsigned a_bits = get_alignment_bits(opc);
h.cond = COND_AL;
if (a_bits) { if (a_bits) {
tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits); tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
h.cond = COND_EQ;
} }
if (guest_base) {
tcg_out_qemu_st_index(s, COND_AL, opc, datalo, datahi, h.base = addrlo;
addrlo, TCG_REG_GUEST_BASE, false); h.index = guest_base ? TCG_REG_GUEST_BASE : -1;
} else { h.index_scratch = false;
tcg_out_qemu_st_direct(s, opc, datalo, datahi, addrlo); tcg_out_qemu_st_direct(s, opc, datalo, datahi, h);
}
#endif #endif
} }
@ -2245,16 +2202,40 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
break; break;
case INDEX_op_qemu_ld_i32: case INDEX_op_qemu_ld_i32:
tcg_out_qemu_ld(s, args, 0); if (TARGET_LONG_BITS == 32) {
tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
args[2], TCG_TYPE_I32);
} else {
tcg_out_qemu_ld(s, args[0], -1, args[1], args[2],
args[3], TCG_TYPE_I32);
}
break; break;
case INDEX_op_qemu_ld_i64: case INDEX_op_qemu_ld_i64:
tcg_out_qemu_ld(s, args, 1); if (TARGET_LONG_BITS == 32) {
tcg_out_qemu_ld(s, args[0], args[1], args[2], -1,
args[3], TCG_TYPE_I64);
} else {
tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3],
args[4], TCG_TYPE_I64);
}
break; break;
case INDEX_op_qemu_st_i32: case INDEX_op_qemu_st_i32:
tcg_out_qemu_st(s, args, 0); if (TARGET_LONG_BITS == 32) {
tcg_out_qemu_st(s, args[0], -1, args[1], -1,
args[2], TCG_TYPE_I32);
} else {
tcg_out_qemu_st(s, args[0], -1, args[1], args[2],
args[3], TCG_TYPE_I32);
}
break; break;
case INDEX_op_qemu_st_i64: case INDEX_op_qemu_st_i64:
tcg_out_qemu_st(s, args, 1); if (TARGET_LONG_BITS == 32) {
tcg_out_qemu_st(s, args[0], args[1], args[2], -1,
args[3], TCG_TYPE_I64);
} else {
tcg_out_qemu_st(s, args[0], args[1], args[2], args[3],
args[4], TCG_TYPE_I64);
}
break; break;
case INDEX_op_bswap16_i32: case INDEX_op_bswap16_i32:

View file

@ -1751,6 +1751,30 @@ static void tcg_out_nopn(TCGContext *s, int n)
tcg_out8(s, 0x90); tcg_out8(s, 0x90);
} }
/* Test register R vs immediate bits I, setting Z flag for EQ/NE. */
static void __attribute__((unused))
tcg_out_testi(TCGContext *s, TCGReg r, uint32_t i)
{
/*
* This is used for testing alignment, so we can usually use testb.
* For i686, we have to use testl for %esi/%edi.
*/
if (i <= 0xff && (TCG_TARGET_REG_BITS == 64 || r < 4)) {
tcg_out_modrm(s, OPC_GRP3_Eb | P_REXB_RM, EXT3_TESTi, r);
tcg_out8(s, i);
} else {
tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_TESTi, r);
tcg_out32(s, i);
}
}
typedef struct {
TCGReg base;
int index;
int ofs;
int seg;
} HostAddress;
#if defined(CONFIG_SOFTMMU) #if defined(CONFIG_SOFTMMU)
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
* int mmu_idx, uintptr_t ra) * int mmu_idx, uintptr_t ra)
@ -1803,8 +1827,6 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
int mem_index, MemOp opc, int mem_index, MemOp opc,
tcg_insn_unit **label_ptr, int which) tcg_insn_unit **label_ptr, int which)
{ {
const TCGReg r0 = TCG_REG_L0;
const TCGReg r1 = TCG_REG_L1;
TCGType ttype = TCG_TYPE_I32; TCGType ttype = TCG_TYPE_I32;
TCGType tlbtype = TCG_TYPE_I32; TCGType tlbtype = TCG_TYPE_I32;
int trexw = 0, hrexw = 0, tlbrexw = 0; int trexw = 0, hrexw = 0, tlbrexw = 0;
@ -1828,15 +1850,15 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
} }
} }
tcg_out_mov(s, tlbtype, r0, addrlo); tcg_out_mov(s, tlbtype, TCG_REG_L0, addrlo);
tcg_out_shifti(s, SHIFT_SHR + tlbrexw, r0, tcg_out_shifti(s, SHIFT_SHR + tlbrexw, TCG_REG_L0,
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, r0, TCG_AREG0, tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, TCG_REG_L0, TCG_AREG0,
TLB_MASK_TABLE_OFS(mem_index) + TLB_MASK_TABLE_OFS(mem_index) +
offsetof(CPUTLBDescFast, mask)); offsetof(CPUTLBDescFast, mask));
tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r0, TCG_AREG0, tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, TCG_REG_L0, TCG_AREG0,
TLB_MASK_TABLE_OFS(mem_index) + TLB_MASK_TABLE_OFS(mem_index) +
offsetof(CPUTLBDescFast, table)); offsetof(CPUTLBDescFast, table));
@ -1844,19 +1866,21 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
copy the address and mask. For lesser alignments, check that we don't copy the address and mask. For lesser alignments, check that we don't
cross pages for the complete access. */ cross pages for the complete access. */
if (a_bits >= s_bits) { if (a_bits >= s_bits) {
tcg_out_mov(s, ttype, r1, addrlo); tcg_out_mov(s, ttype, TCG_REG_L1, addrlo);
} else { } else {
tcg_out_modrm_offset(s, OPC_LEA + trexw, r1, addrlo, s_mask - a_mask); tcg_out_modrm_offset(s, OPC_LEA + trexw, TCG_REG_L1,
addrlo, s_mask - a_mask);
} }
tlb_mask = (target_ulong)TARGET_PAGE_MASK | a_mask; tlb_mask = (target_ulong)TARGET_PAGE_MASK | a_mask;
tgen_arithi(s, ARITH_AND + trexw, r1, tlb_mask, 0); tgen_arithi(s, ARITH_AND + trexw, TCG_REG_L1, tlb_mask, 0);
/* cmp 0(r0), r1 */ /* cmp 0(TCG_REG_L0), TCG_REG_L1 */
tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw, r1, r0, which); tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw,
TCG_REG_L1, TCG_REG_L0, which);
/* Prepare for both the fast path add of the tlb addend, and the slow /* Prepare for both the fast path add of the tlb addend, and the slow
path function argument setup. */ path function argument setup. */
tcg_out_mov(s, ttype, r1, addrlo); tcg_out_mov(s, ttype, TCG_REG_L1, addrlo);
/* jne slow_path */ /* jne slow_path */
tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
@ -1864,8 +1888,8 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
s->code_ptr += 4; s->code_ptr += 4;
if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
/* cmp 4(r0), addrhi */ /* cmp 4(TCG_REG_L0), addrhi */
tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, r0, which + 4); tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, TCG_REG_L0, which + 4);
/* jne slow_path */ /* jne slow_path */
tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
@ -1875,8 +1899,8 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
/* TLB Hit. */ /* TLB Hit. */
/* add addend(r0), r1 */ /* add addend(TCG_REG_L0), TCG_REG_L1 */
tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r1, r0, tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, TCG_REG_L1, TCG_REG_L0,
offsetof(CPUTLBEntry, addend)); offsetof(CPUTLBEntry, addend));
} }
@ -1884,8 +1908,8 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
* Record the context of a call to the out of line helper code for the slow path * Record the context of a call to the out of line helper code for the slow path
* for a load or store, so that we can later generate the correct helper code * for a load or store, so that we can later generate the correct helper code
*/ */
static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64, static void add_qemu_ldst_label(TCGContext *s, bool is_ld,
MemOpIdx oi, TCGType type, MemOpIdx oi,
TCGReg datalo, TCGReg datahi, TCGReg datalo, TCGReg datahi,
TCGReg addrlo, TCGReg addrhi, TCGReg addrlo, TCGReg addrhi,
tcg_insn_unit *raddr, tcg_insn_unit *raddr,
@ -1895,7 +1919,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64,
label->is_ld = is_ld; label->is_ld = is_ld;
label->oi = oi; label->oi = oi;
label->type = is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32; label->type = type;
label->datalo_reg = datalo; label->datalo_reg = datalo;
label->datahi_reg = datahi; label->datahi_reg = datahi;
label->addrlo_reg = addrlo; label->addrlo_reg = addrlo;
@ -2044,18 +2068,7 @@ static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo,
unsigned a_mask = (1 << a_bits) - 1; unsigned a_mask = (1 << a_bits) - 1;
TCGLabelQemuLdst *label; TCGLabelQemuLdst *label;
/* tcg_out_testi(s, addrlo, a_mask);
* We are expecting a_bits to max out at 7, so we can usually use testb.
* For i686, we have to use testl for %esi/%edi.
*/
if (a_mask <= 0xff && (TCG_TARGET_REG_BITS == 64 || addrlo < 4)) {
tcg_out_modrm(s, OPC_GRP3_Eb | P_REXB_RM, EXT3_TESTi, addrlo);
tcg_out8(s, a_mask);
} else {
tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_TESTi, addrlo);
tcg_out32(s, a_mask);
}
/* jne slow_path */ /* jne slow_path */
tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
@ -2113,15 +2126,11 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
return tcg_out_fail_alignment(s, l); return tcg_out_fail_alignment(s, l);
} }
#if TCG_TARGET_REG_BITS == 32 static HostAddress x86_guest_base = {
# define x86_guest_base_seg 0 .index = -1
# define x86_guest_base_index -1 };
# define x86_guest_base_offset guest_base
#else #if defined(__x86_64__) && defined(__linux__)
static int x86_guest_base_seg;
static int x86_guest_base_index = -1;
static int32_t x86_guest_base_offset;
# if defined(__x86_64__) && defined(__linux__)
# include <asm/prctl.h> # include <asm/prctl.h>
# include <sys/prctl.h> # include <sys/prctl.h>
int arch_prctl(int code, unsigned long addr); int arch_prctl(int code, unsigned long addr);
@ -2132,7 +2141,8 @@ static inline int setup_guest_base_seg(void)
} }
return 0; return 0;
} }
# elif defined (__FreeBSD__) || defined (__FreeBSD_kernel__) #elif defined(__x86_64__) && \
(defined (__FreeBSD__) || defined (__FreeBSD_kernel__))
# include <machine/sysarch.h> # include <machine/sysarch.h>
static inline int setup_guest_base_seg(void) static inline int setup_guest_base_seg(void)
{ {
@ -2141,22 +2151,19 @@ static inline int setup_guest_base_seg(void)
} }
return 0; return 0;
} }
# else #else
static inline int setup_guest_base_seg(void) static inline int setup_guest_base_seg(void)
{ {
return 0; return 0;
} }
# endif #endif /* setup_guest_base_seg */
#endif
#endif /* SOFTMMU */ #endif /* SOFTMMU */
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
TCGReg base, int index, intptr_t ofs, HostAddress h, TCGType type, MemOp memop)
int seg, bool is64, MemOp memop)
{ {
TCGType type = is64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
bool use_movbe = false; bool use_movbe = false;
int rexw = is64 * P_REXW; int rexw = (type == TCG_TYPE_I32 ? 0 : P_REXW);
int movop = OPC_MOVL_GvEv; int movop = OPC_MOVL_GvEv;
/* Do big-endian loads with movbe. */ /* Do big-endian loads with movbe. */
@ -2168,77 +2175,78 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
switch (memop & MO_SSIZE) { switch (memop & MO_SSIZE) {
case MO_UB: case MO_UB:
tcg_out_modrm_sib_offset(s, OPC_MOVZBL + seg, datalo, tcg_out_modrm_sib_offset(s, OPC_MOVZBL + h.seg, datalo,
base, index, 0, ofs); h.base, h.index, 0, h.ofs);
break; break;
case MO_SB: case MO_SB:
tcg_out_modrm_sib_offset(s, OPC_MOVSBL + rexw + seg, datalo, tcg_out_modrm_sib_offset(s, OPC_MOVSBL + rexw + h.seg, datalo,
base, index, 0, ofs); h.base, h.index, 0, h.ofs);
break; break;
case MO_UW: case MO_UW:
if (use_movbe) { if (use_movbe) {
/* There is no extending movbe; only low 16-bits are modified. */ /* There is no extending movbe; only low 16-bits are modified. */
if (datalo != base && datalo != index) { if (datalo != h.base && datalo != h.index) {
/* XOR breaks dependency chains. */ /* XOR breaks dependency chains. */
tgen_arithr(s, ARITH_XOR, datalo, datalo); tgen_arithr(s, ARITH_XOR, datalo, datalo);
tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg, tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + h.seg,
datalo, base, index, 0, ofs); datalo, h.base, h.index, 0, h.ofs);
} else { } else {
tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg, tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + h.seg,
datalo, base, index, 0, ofs); datalo, h.base, h.index, 0, h.ofs);
tcg_out_ext16u(s, datalo, datalo); tcg_out_ext16u(s, datalo, datalo);
} }
} else { } else {
tcg_out_modrm_sib_offset(s, OPC_MOVZWL + seg, datalo, tcg_out_modrm_sib_offset(s, OPC_MOVZWL + h.seg, datalo,
base, index, 0, ofs); h.base, h.index, 0, h.ofs);
} }
break; break;
case MO_SW: case MO_SW:
if (use_movbe) { if (use_movbe) {
tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg, tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + h.seg,
datalo, base, index, 0, ofs); datalo, h.base, h.index, 0, h.ofs);
tcg_out_ext16s(s, type, datalo, datalo); tcg_out_ext16s(s, type, datalo, datalo);
} else { } else {
tcg_out_modrm_sib_offset(s, OPC_MOVSWL + rexw + seg, tcg_out_modrm_sib_offset(s, OPC_MOVSWL + rexw + h.seg,
datalo, base, index, 0, ofs); datalo, h.base, h.index, 0, h.ofs);
} }
break; break;
case MO_UL: case MO_UL:
tcg_out_modrm_sib_offset(s, movop + seg, datalo, base, index, 0, ofs); tcg_out_modrm_sib_offset(s, movop + h.seg, datalo,
h.base, h.index, 0, h.ofs);
break; break;
#if TCG_TARGET_REG_BITS == 64 #if TCG_TARGET_REG_BITS == 64
case MO_SL: case MO_SL:
if (use_movbe) { if (use_movbe) {
tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + seg, datalo, tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + h.seg, datalo,
base, index, 0, ofs); h.base, h.index, 0, h.ofs);
tcg_out_ext32s(s, datalo, datalo); tcg_out_ext32s(s, datalo, datalo);
} else { } else {
tcg_out_modrm_sib_offset(s, OPC_MOVSLQ + seg, datalo, tcg_out_modrm_sib_offset(s, OPC_MOVSLQ + h.seg, datalo,
base, index, 0, ofs); h.base, h.index, 0, h.ofs);
} }
break; break;
#endif #endif
case MO_UQ: case MO_UQ:
if (TCG_TARGET_REG_BITS == 64) { if (TCG_TARGET_REG_BITS == 64) {
tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo, tcg_out_modrm_sib_offset(s, movop + P_REXW + h.seg, datalo,
base, index, 0, ofs); h.base, h.index, 0, h.ofs);
} else { break;
}
if (use_movbe) { if (use_movbe) {
TCGReg t = datalo; TCGReg t = datalo;
datalo = datahi; datalo = datahi;
datahi = t; datahi = t;
} }
if (base != datalo) { if (h.base == datalo || h.index == datalo) {
tcg_out_modrm_sib_offset(s, movop + seg, datalo, tcg_out_modrm_sib_offset(s, OPC_LEA, datahi,
base, index, 0, ofs); h.base, h.index, 0, h.ofs);
tcg_out_modrm_sib_offset(s, movop + seg, datahi, tcg_out_modrm_offset(s, movop + h.seg, datalo, datahi, 0);
base, index, 0, ofs + 4); tcg_out_modrm_offset(s, movop + h.seg, datahi, datahi, 4);
} else { } else {
tcg_out_modrm_sib_offset(s, movop + seg, datahi, tcg_out_modrm_sib_offset(s, movop + h.seg, datalo,
base, index, 0, ofs + 4); h.base, h.index, 0, h.ofs);
tcg_out_modrm_sib_offset(s, movop + seg, datalo, tcg_out_modrm_sib_offset(s, movop + h.seg, datahi,
base, index, 0, ofs); h.base, h.index, 0, h.ofs + 4);
}
} }
break; break;
default: default:
@ -2246,56 +2254,43 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
} }
} }
/* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
EAX. It will be useful once fixed registers globals are less TCGReg addrlo, TCGReg addrhi,
common. */ MemOpIdx oi, TCGType data_type)
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
{ {
TCGReg datalo, datahi, addrlo; MemOp opc = get_memop(oi);
TCGReg addrhi __attribute__((unused)); HostAddress h;
MemOpIdx oi;
MemOp opc;
#if defined(CONFIG_SOFTMMU) #if defined(CONFIG_SOFTMMU)
int mem_index;
tcg_insn_unit *label_ptr[2]; tcg_insn_unit *label_ptr[2];
#else
unsigned a_bits;
#endif
datalo = *args++; tcg_out_tlb_load(s, addrlo, addrhi, get_mmuidx(oi), opc,
datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
addrlo = *args++;
addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
oi = *args++;
opc = get_memop(oi);
#if defined(CONFIG_SOFTMMU)
mem_index = get_mmuidx(oi);
tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc,
label_ptr, offsetof(CPUTLBEntry, addr_read)); label_ptr, offsetof(CPUTLBEntry, addr_read));
/* TLB Hit. */ /* TLB Hit. */
tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, is64, opc); h.base = TCG_REG_L1;
h.index = -1;
h.ofs = 0;
h.seg = 0;
tcg_out_qemu_ld_direct(s, datalo, datahi, h, data_type, opc);
/* Record the current context of a load into ldst label */ /* Record the current context of a load into ldst label */
add_qemu_ldst_label(s, true, is64, oi, datalo, datahi, addrlo, addrhi, add_qemu_ldst_label(s, true, data_type, oi, datalo, datahi,
s->code_ptr, label_ptr); addrlo, addrhi, s->code_ptr, label_ptr);
#else #else
a_bits = get_alignment_bits(opc); unsigned a_bits = get_alignment_bits(opc);
if (a_bits) { if (a_bits) {
tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits); tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
} }
tcg_out_qemu_ld_direct(s, datalo, datahi, addrlo, x86_guest_base_index, h = x86_guest_base;
x86_guest_base_offset, x86_guest_base_seg, h.base = addrlo;
is64, opc); tcg_out_qemu_ld_direct(s, datalo, datahi, h, data_type, opc);
#endif #endif
} }
static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
TCGReg base, int index, intptr_t ofs, HostAddress h, MemOp memop)
int seg, MemOp memop)
{ {
bool use_movbe = false; bool use_movbe = false;
int movop = OPC_MOVL_EvGv; int movop = OPC_MOVL_EvGv;
@ -2314,30 +2309,31 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
case MO_8: case MO_8:
/* This is handled with constraints on INDEX_op_qemu_st8_i32. */ /* This is handled with constraints on INDEX_op_qemu_st8_i32. */
tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || datalo < 4); tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || datalo < 4);
tcg_out_modrm_sib_offset(s, OPC_MOVB_EvGv + P_REXB_R + seg, tcg_out_modrm_sib_offset(s, OPC_MOVB_EvGv + P_REXB_R + h.seg,
datalo, base, index, 0, ofs); datalo, h.base, h.index, 0, h.ofs);
break; break;
case MO_16: case MO_16:
tcg_out_modrm_sib_offset(s, movop + P_DATA16 + seg, datalo, tcg_out_modrm_sib_offset(s, movop + P_DATA16 + h.seg, datalo,
base, index, 0, ofs); h.base, h.index, 0, h.ofs);
break; break;
case MO_32: case MO_32:
tcg_out_modrm_sib_offset(s, movop + seg, datalo, base, index, 0, ofs); tcg_out_modrm_sib_offset(s, movop + h.seg, datalo,
h.base, h.index, 0, h.ofs);
break; break;
case MO_64: case MO_64:
if (TCG_TARGET_REG_BITS == 64) { if (TCG_TARGET_REG_BITS == 64) {
tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo, tcg_out_modrm_sib_offset(s, movop + P_REXW + h.seg, datalo,
base, index, 0, ofs); h.base, h.index, 0, h.ofs);
} else { } else {
if (use_movbe) { if (use_movbe) {
TCGReg t = datalo; TCGReg t = datalo;
datalo = datahi; datalo = datahi;
datahi = t; datahi = t;
} }
tcg_out_modrm_sib_offset(s, movop + seg, datalo, tcg_out_modrm_sib_offset(s, movop + h.seg, datalo,
base, index, 0, ofs); h.base, h.index, 0, h.ofs);
tcg_out_modrm_sib_offset(s, movop + seg, datahi, tcg_out_modrm_sib_offset(s, movop + h.seg, datahi,
base, index, 0, ofs + 4); h.base, h.index, 0, h.ofs + 4);
} }
break; break;
default: default:
@ -2345,46 +2341,39 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
} }
} }
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
TCGReg addrlo, TCGReg addrhi,
MemOpIdx oi, TCGType data_type)
{ {
TCGReg datalo, datahi, addrlo; MemOp opc = get_memop(oi);
TCGReg addrhi __attribute__((unused)); HostAddress h;
MemOpIdx oi;
MemOp opc;
#if defined(CONFIG_SOFTMMU) #if defined(CONFIG_SOFTMMU)
int mem_index;
tcg_insn_unit *label_ptr[2]; tcg_insn_unit *label_ptr[2];
#else
unsigned a_bits;
#endif
datalo = *args++; tcg_out_tlb_load(s, addrlo, addrhi, get_mmuidx(oi), opc,
datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
addrlo = *args++;
addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
oi = *args++;
opc = get_memop(oi);
#if defined(CONFIG_SOFTMMU)
mem_index = get_mmuidx(oi);
tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc,
label_ptr, offsetof(CPUTLBEntry, addr_write)); label_ptr, offsetof(CPUTLBEntry, addr_write));
/* TLB Hit. */ /* TLB Hit. */
tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, opc); h.base = TCG_REG_L1;
h.index = -1;
h.ofs = 0;
h.seg = 0;
tcg_out_qemu_st_direct(s, datalo, datahi, h, opc);
/* Record the current context of a store into ldst label */ /* Record the current context of a store into ldst label */
add_qemu_ldst_label(s, false, is64, oi, datalo, datahi, addrlo, addrhi, add_qemu_ldst_label(s, false, data_type, oi, datalo, datahi,
s->code_ptr, label_ptr); addrlo, addrhi, s->code_ptr, label_ptr);
#else #else
a_bits = get_alignment_bits(opc); unsigned a_bits = get_alignment_bits(opc);
if (a_bits) { if (a_bits) {
tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits); tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
} }
tcg_out_qemu_st_direct(s, datalo, datahi, addrlo, x86_guest_base_index, h = x86_guest_base;
x86_guest_base_offset, x86_guest_base_seg, opc); h.base = addrlo;
tcg_out_qemu_st_direct(s, datalo, datahi, h, opc);
#endif #endif
} }
@ -2673,17 +2662,37 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
break; break;
case INDEX_op_qemu_ld_i32: case INDEX_op_qemu_ld_i32:
tcg_out_qemu_ld(s, args, 0); if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I32);
} else {
tcg_out_qemu_ld(s, a0, -1, a1, a2, args[3], TCG_TYPE_I32);
}
break; break;
case INDEX_op_qemu_ld_i64: case INDEX_op_qemu_ld_i64:
tcg_out_qemu_ld(s, args, 1); if (TCG_TARGET_REG_BITS == 64) {
tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I64);
} else if (TARGET_LONG_BITS == 32) {
tcg_out_qemu_ld(s, a0, a1, a2, -1, args[3], TCG_TYPE_I64);
} else {
tcg_out_qemu_ld(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64);
}
break; break;
case INDEX_op_qemu_st_i32: case INDEX_op_qemu_st_i32:
case INDEX_op_qemu_st8_i32: case INDEX_op_qemu_st8_i32:
tcg_out_qemu_st(s, args, 0); if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I32);
} else {
tcg_out_qemu_st(s, a0, -1, a1, a2, args[3], TCG_TYPE_I32);
}
break; break;
case INDEX_op_qemu_st_i64: case INDEX_op_qemu_st_i64:
tcg_out_qemu_st(s, args, 1); if (TCG_TARGET_REG_BITS == 64) {
tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I64);
} else if (TARGET_LONG_BITS == 32) {
tcg_out_qemu_st(s, a0, a1, a2, -1, args[3], TCG_TYPE_I64);
} else {
tcg_out_qemu_st(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64);
}
break; break;
OP_32_64(mulu2): OP_32_64(mulu2):
@ -4070,18 +4079,18 @@ static void tcg_target_qemu_prologue(TCGContext *s)
(ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4 (ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4
+ stack_addend); + stack_addend);
#else #else
# if !defined(CONFIG_SOFTMMU) && TCG_TARGET_REG_BITS == 64 # if !defined(CONFIG_SOFTMMU)
if (guest_base) { if (guest_base) {
int seg = setup_guest_base_seg(); int seg = setup_guest_base_seg();
if (seg != 0) { if (seg != 0) {
x86_guest_base_seg = seg; x86_guest_base.seg = seg;
} else if (guest_base == (int32_t)guest_base) { } else if (guest_base == (int32_t)guest_base) {
x86_guest_base_offset = guest_base; x86_guest_base.ofs = guest_base;
} else { } else {
/* Choose R12 because, as a base, it requires a SIB byte. */ /* Choose R12 because, as a base, it requires a SIB byte. */
x86_guest_base_index = TCG_REG_R12; x86_guest_base.index = TCG_REG_R12;
tcg_out_movi(s, TCG_TYPE_PTR, x86_guest_base_index, guest_base); tcg_out_movi(s, TCG_TYPE_PTR, x86_guest_base.index, guest_base);
tcg_regset_set_reg(s->reserved_regs, x86_guest_base_index); tcg_regset_set_reg(s->reserved_regs, x86_guest_base.index);
} }
} }
# endif # endif

View file

@ -1013,135 +1013,124 @@ static TCGReg tcg_out_zext_addr_if_32_bit(TCGContext *s,
return addr; return addr;
} }
static void tcg_out_qemu_ld_indexed(TCGContext *s, TCGReg rd, TCGReg rj, typedef struct {
TCGReg rk, MemOp opc, TCGType type) TCGReg base;
TCGReg index;
} HostAddress;
static void tcg_out_qemu_ld_indexed(TCGContext *s, MemOp opc, TCGType type,
TCGReg rd, HostAddress h)
{ {
/* Byte swapping is left to middle-end expansion. */ /* Byte swapping is left to middle-end expansion. */
tcg_debug_assert((opc & MO_BSWAP) == 0); tcg_debug_assert((opc & MO_BSWAP) == 0);
switch (opc & MO_SSIZE) { switch (opc & MO_SSIZE) {
case MO_UB: case MO_UB:
tcg_out_opc_ldx_bu(s, rd, rj, rk); tcg_out_opc_ldx_bu(s, rd, h.base, h.index);
break; break;
case MO_SB: case MO_SB:
tcg_out_opc_ldx_b(s, rd, rj, rk); tcg_out_opc_ldx_b(s, rd, h.base, h.index);
break; break;
case MO_UW: case MO_UW:
tcg_out_opc_ldx_hu(s, rd, rj, rk); tcg_out_opc_ldx_hu(s, rd, h.base, h.index);
break; break;
case MO_SW: case MO_SW:
tcg_out_opc_ldx_h(s, rd, rj, rk); tcg_out_opc_ldx_h(s, rd, h.base, h.index);
break; break;
case MO_UL: case MO_UL:
if (type == TCG_TYPE_I64) { if (type == TCG_TYPE_I64) {
tcg_out_opc_ldx_wu(s, rd, rj, rk); tcg_out_opc_ldx_wu(s, rd, h.base, h.index);
break; break;
} }
/* fallthrough */ /* fallthrough */
case MO_SL: case MO_SL:
tcg_out_opc_ldx_w(s, rd, rj, rk); tcg_out_opc_ldx_w(s, rd, h.base, h.index);
break; break;
case MO_UQ: case MO_UQ:
tcg_out_opc_ldx_d(s, rd, rj, rk); tcg_out_opc_ldx_d(s, rd, h.base, h.index);
break; break;
default: default:
g_assert_not_reached(); g_assert_not_reached();
} }
} }
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGType type) static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
MemOpIdx oi, TCGType data_type)
{ {
TCGReg addr_regl; MemOp opc = get_memop(oi);
TCGReg data_regl; HostAddress h;
MemOpIdx oi;
MemOp opc; #ifdef CONFIG_SOFTMMU
#if defined(CONFIG_SOFTMMU)
tcg_insn_unit *label_ptr[1]; tcg_insn_unit *label_ptr[1];
#else
unsigned a_bits;
#endif
TCGReg base;
data_regl = *args++; tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 1);
addr_regl = *args++; h.index = TCG_REG_TMP2;
oi = *args++;
opc = get_memop(oi);
#if defined(CONFIG_SOFTMMU)
tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 1);
base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
tcg_out_qemu_ld_indexed(s, data_regl, base, TCG_REG_TMP2, opc, type);
add_qemu_ldst_label(s, 1, oi, type,
data_regl, addr_regl,
s->code_ptr, label_ptr);
#else #else
a_bits = get_alignment_bits(opc); unsigned a_bits = get_alignment_bits(opc);
if (a_bits) { if (a_bits) {
tcg_out_test_alignment(s, true, addr_regl, a_bits); tcg_out_test_alignment(s, true, addr_reg, a_bits);
} }
base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0); h.index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; #endif
tcg_out_qemu_ld_indexed(s, data_regl, base, guest_base_reg, opc, type);
h.base = tcg_out_zext_addr_if_32_bit(s, addr_reg, TCG_REG_TMP0);
tcg_out_qemu_ld_indexed(s, opc, data_type, data_reg, h);
#ifdef CONFIG_SOFTMMU
add_qemu_ldst_label(s, true, oi, data_type, data_reg, addr_reg,
s->code_ptr, label_ptr);
#endif #endif
} }
static void tcg_out_qemu_st_indexed(TCGContext *s, TCGReg data, static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc,
TCGReg rj, TCGReg rk, MemOp opc) TCGReg rd, HostAddress h)
{ {
/* Byte swapping is left to middle-end expansion. */ /* Byte swapping is left to middle-end expansion. */
tcg_debug_assert((opc & MO_BSWAP) == 0); tcg_debug_assert((opc & MO_BSWAP) == 0);
switch (opc & MO_SIZE) { switch (opc & MO_SIZE) {
case MO_8: case MO_8:
tcg_out_opc_stx_b(s, data, rj, rk); tcg_out_opc_stx_b(s, rd, h.base, h.index);
break; break;
case MO_16: case MO_16:
tcg_out_opc_stx_h(s, data, rj, rk); tcg_out_opc_stx_h(s, rd, h.base, h.index);
break; break;
case MO_32: case MO_32:
tcg_out_opc_stx_w(s, data, rj, rk); tcg_out_opc_stx_w(s, rd, h.base, h.index);
break; break;
case MO_64: case MO_64:
tcg_out_opc_stx_d(s, data, rj, rk); tcg_out_opc_stx_d(s, rd, h.base, h.index);
break; break;
default: default:
g_assert_not_reached(); g_assert_not_reached();
} }
} }
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, TCGType type) static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
MemOpIdx oi, TCGType data_type)
{ {
TCGReg addr_regl; MemOp opc = get_memop(oi);
TCGReg data_regl; HostAddress h;
MemOpIdx oi;
MemOp opc; #ifdef CONFIG_SOFTMMU
#if defined(CONFIG_SOFTMMU)
tcg_insn_unit *label_ptr[1]; tcg_insn_unit *label_ptr[1];
#else
unsigned a_bits;
#endif
TCGReg base;
data_regl = *args++; tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 0);
addr_regl = *args++; h.index = TCG_REG_TMP2;
oi = *args++;
opc = get_memop(oi);
#if defined(CONFIG_SOFTMMU)
tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 0);
base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
tcg_out_qemu_st_indexed(s, data_regl, base, TCG_REG_TMP2, opc);
add_qemu_ldst_label(s, 0, oi, type,
data_regl, addr_regl,
s->code_ptr, label_ptr);
#else #else
a_bits = get_alignment_bits(opc); unsigned a_bits = get_alignment_bits(opc);
if (a_bits) { if (a_bits) {
tcg_out_test_alignment(s, false, addr_regl, a_bits); tcg_out_test_alignment(s, false, addr_reg, a_bits);
} }
base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0); h.index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; #endif
tcg_out_qemu_st_indexed(s, data_regl, base, guest_base_reg, opc);
h.base = tcg_out_zext_addr_if_32_bit(s, addr_reg, TCG_REG_TMP0);
tcg_out_qemu_st_indexed(s, opc, data_reg, h);
#ifdef CONFIG_SOFTMMU
add_qemu_ldst_label(s, false, oi, data_type, data_reg, addr_reg,
s->code_ptr, label_ptr);
#endif #endif
} }
@ -1564,16 +1553,16 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
break; break;
case INDEX_op_qemu_ld_i32: case INDEX_op_qemu_ld_i32:
tcg_out_qemu_ld(s, args, TCG_TYPE_I32); tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
break; break;
case INDEX_op_qemu_ld_i64: case INDEX_op_qemu_ld_i64:
tcg_out_qemu_ld(s, args, TCG_TYPE_I64); tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
break; break;
case INDEX_op_qemu_st_i32: case INDEX_op_qemu_st_i32:
tcg_out_qemu_st(s, args, TCG_TYPE_I32); tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
break; break;
case INDEX_op_qemu_st_i64: case INDEX_op_qemu_st_i64:
tcg_out_qemu_st(s, args, TCG_TYPE_I64); tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
break; break;
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */

View file

@ -1479,7 +1479,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
#endif /* SOFTMMU */ #endif /* SOFTMMU */
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
TCGReg base, MemOp opc, bool is_64) TCGReg base, MemOp opc, TCGType type)
{ {
switch (opc & (MO_SSIZE | MO_BSWAP)) { switch (opc & (MO_SSIZE | MO_BSWAP)) {
case MO_UB: case MO_UB:
@ -1503,7 +1503,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
tcg_out_opc_imm(s, OPC_LH, lo, base, 0); tcg_out_opc_imm(s, OPC_LH, lo, base, 0);
break; break;
case MO_UL | MO_BSWAP: case MO_UL | MO_BSWAP:
if (TCG_TARGET_REG_BITS == 64 && is_64) { if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64) {
if (use_mips32r2_instructions) { if (use_mips32r2_instructions) {
tcg_out_opc_imm(s, OPC_LWU, lo, base, 0); tcg_out_opc_imm(s, OPC_LWU, lo, base, 0);
tcg_out_bswap32(s, lo, lo, TCG_BSWAP_IZ | TCG_BSWAP_OZ); tcg_out_bswap32(s, lo, lo, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
@ -1528,7 +1528,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
} }
break; break;
case MO_UL: case MO_UL:
if (TCG_TARGET_REG_BITS == 64 && is_64) { if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64) {
tcg_out_opc_imm(s, OPC_LWU, lo, base, 0); tcg_out_opc_imm(s, OPC_LWU, lo, base, 0);
break; break;
} }
@ -1583,7 +1583,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
} }
static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi, static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
TCGReg base, MemOp opc, bool is_64) TCGReg base, MemOp opc, TCGType type)
{ {
const MIPSInsn lw1 = MIPS_BE ? OPC_LWL : OPC_LWR; const MIPSInsn lw1 = MIPS_BE ? OPC_LWL : OPC_LWR;
const MIPSInsn lw2 = MIPS_BE ? OPC_LWR : OPC_LWL; const MIPSInsn lw2 = MIPS_BE ? OPC_LWR : OPC_LWL;
@ -1623,7 +1623,7 @@ static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
case MO_UL: case MO_UL:
tcg_out_opc_imm(s, lw1, lo, base, 0); tcg_out_opc_imm(s, lw1, lo, base, 0);
tcg_out_opc_imm(s, lw2, lo, base, 3); tcg_out_opc_imm(s, lw2, lo, base, 3);
if (TCG_TARGET_REG_BITS == 64 && is_64 && !sgn) { if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64 && !sgn) {
tcg_out_ext32u(s, lo, lo); tcg_out_ext32u(s, lo, lo);
} }
break; break;
@ -1634,18 +1634,18 @@ static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
tcg_out_opc_imm(s, lw1, lo, base, 0); tcg_out_opc_imm(s, lw1, lo, base, 0);
tcg_out_opc_imm(s, lw2, lo, base, 3); tcg_out_opc_imm(s, lw2, lo, base, 3);
tcg_out_bswap32(s, lo, lo, tcg_out_bswap32(s, lo, lo,
TCG_TARGET_REG_BITS == 64 && is_64 TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64
? (sgn ? TCG_BSWAP_OS : TCG_BSWAP_OZ) : 0); ? (sgn ? TCG_BSWAP_OS : TCG_BSWAP_OZ) : 0);
} else { } else {
const tcg_insn_unit *subr = const tcg_insn_unit *subr =
(TCG_TARGET_REG_BITS == 64 && is_64 && !sgn (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64 && !sgn
? bswap32u_addr : bswap32_addr); ? bswap32u_addr : bswap32_addr);
tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 0); tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 0);
tcg_out_bswap_subr(s, subr); tcg_out_bswap_subr(s, subr);
/* delay slot */ /* delay slot */
tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 3); tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 3);
tcg_out_mov(s, is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32, lo, TCG_TMP3); tcg_out_mov(s, type, lo, TCG_TMP3);
} }
break; break;
@ -1702,68 +1702,59 @@ static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
} }
} }
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
TCGReg addrlo, TCGReg addrhi,
MemOpIdx oi, TCGType data_type)
{ {
TCGReg addr_regl, addr_regh __attribute__((unused)); MemOp opc = get_memop(oi);
TCGReg data_regl, data_regh; unsigned a_bits = get_alignment_bits(opc);
MemOpIdx oi; unsigned s_bits = opc & MO_SIZE;
MemOp opc; TCGReg base;
#if defined(CONFIG_SOFTMMU)
tcg_insn_unit *label_ptr[2];
#else
#endif
unsigned a_bits, s_bits;
TCGReg base = TCG_REG_A0;
data_regl = *args++;
data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
addr_regl = *args++;
addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
oi = *args++;
opc = get_memop(oi);
a_bits = get_alignment_bits(opc);
s_bits = opc & MO_SIZE;
/* /*
* R6 removes the left/right instructions but requires the * R6 removes the left/right instructions but requires the
* system to support misaligned memory accesses. * system to support misaligned memory accesses.
*/ */
#if defined(CONFIG_SOFTMMU) #if defined(CONFIG_SOFTMMU)
tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 1); tcg_insn_unit *label_ptr[2];
base = TCG_REG_A0;
tcg_out_tlb_load(s, base, addrlo, addrhi, oi, label_ptr, 1);
if (use_mips32r6_instructions || a_bits >= s_bits) { if (use_mips32r6_instructions || a_bits >= s_bits) {
tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); tcg_out_qemu_ld_direct(s, datalo, datahi, base, opc, data_type);
} else { } else {
tcg_out_qemu_ld_unalign(s, data_regl, data_regh, base, opc, is_64); tcg_out_qemu_ld_unalign(s, datalo, datahi, base, opc, data_type);
} }
add_qemu_ldst_label(s, 1, oi, add_qemu_ldst_label(s, true, oi, data_type, datalo, datahi,
(is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), addrlo, addrhi, s->code_ptr, label_ptr);
data_regl, data_regh, addr_regl, addr_regh,
s->code_ptr, label_ptr);
#else #else
base = addrlo;
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
tcg_out_ext32u(s, base, addr_regl); tcg_out_ext32u(s, TCG_REG_A0, base);
addr_regl = base; base = TCG_REG_A0;
} }
if (guest_base == 0 && data_regl != addr_regl) { if (guest_base) {
base = addr_regl; if (guest_base == (int16_t)guest_base) {
} else if (guest_base == (int16_t)guest_base) { tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_A0, base, guest_base);
tcg_out_opc_imm(s, ALIAS_PADDI, base, addr_regl, guest_base);
} else { } else {
tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl); tcg_out_opc_reg(s, ALIAS_PADD, TCG_REG_A0, base,
TCG_GUEST_BASE_REG);
}
base = TCG_REG_A0;
} }
if (use_mips32r6_instructions) { if (use_mips32r6_instructions) {
if (a_bits) { if (a_bits) {
tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits); tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
} }
tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); tcg_out_qemu_ld_direct(s, datalo, datahi, base, opc, data_type);
} else { } else {
if (a_bits && a_bits != s_bits) { if (a_bits && a_bits != s_bits) {
tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits); tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
} }
if (a_bits >= s_bits) { if (a_bits >= s_bits) {
tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); tcg_out_qemu_ld_direct(s, datalo, datahi, base, opc, data_type);
} else { } else {
tcg_out_qemu_ld_unalign(s, data_regl, data_regh, base, opc, is_64); tcg_out_qemu_ld_unalign(s, datalo, datahi, base, opc, data_type);
} }
} }
#endif #endif
@ -1902,67 +1893,60 @@ static void tcg_out_qemu_st_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
g_assert_not_reached(); g_assert_not_reached();
} }
} }
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
{
TCGReg addr_regl, addr_regh __attribute__((unused));
TCGReg data_regl, data_regh;
MemOpIdx oi;
MemOp opc;
#if defined(CONFIG_SOFTMMU)
tcg_insn_unit *label_ptr[2];
#endif
unsigned a_bits, s_bits;
TCGReg base = TCG_REG_A0;
data_regl = *args++; static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); TCGReg addrlo, TCGReg addrhi,
addr_regl = *args++; MemOpIdx oi, TCGType data_type)
addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); {
oi = *args++; MemOp opc = get_memop(oi);
opc = get_memop(oi); unsigned a_bits = get_alignment_bits(opc);
a_bits = get_alignment_bits(opc); unsigned s_bits = opc & MO_SIZE;
s_bits = opc & MO_SIZE; TCGReg base;
/* /*
* R6 removes the left/right instructions but requires the * R6 removes the left/right instructions but requires the
* system to support misaligned memory accesses. * system to support misaligned memory accesses.
*/ */
#if defined(CONFIG_SOFTMMU) #if defined(CONFIG_SOFTMMU)
tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 0); tcg_insn_unit *label_ptr[2];
base = TCG_REG_A0;
tcg_out_tlb_load(s, base, addrlo, addrhi, oi, label_ptr, 0);
if (use_mips32r6_instructions || a_bits >= s_bits) { if (use_mips32r6_instructions || a_bits >= s_bits) {
tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); tcg_out_qemu_st_direct(s, datalo, datahi, base, opc);
} else { } else {
tcg_out_qemu_st_unalign(s, data_regl, data_regh, base, opc); tcg_out_qemu_st_unalign(s, datalo, datahi, base, opc);
} }
add_qemu_ldst_label(s, 0, oi, add_qemu_ldst_label(s, false, oi, data_type, datalo, datahi,
(is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), addrlo, addrhi, s->code_ptr, label_ptr);
data_regl, data_regh, addr_regl, addr_regh,
s->code_ptr, label_ptr);
#else #else
base = addrlo;
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
tcg_out_ext32u(s, base, addr_regl); tcg_out_ext32u(s, TCG_REG_A0, base);
addr_regl = base; base = TCG_REG_A0;
} }
if (guest_base == 0) { if (guest_base) {
base = addr_regl; if (guest_base == (int16_t)guest_base) {
} else if (guest_base == (int16_t)guest_base) { tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_A0, base, guest_base);
tcg_out_opc_imm(s, ALIAS_PADDI, base, addr_regl, guest_base);
} else { } else {
tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl); tcg_out_opc_reg(s, ALIAS_PADD, TCG_REG_A0, base,
TCG_GUEST_BASE_REG);
}
base = TCG_REG_A0;
} }
if (use_mips32r6_instructions) { if (use_mips32r6_instructions) {
if (a_bits) { if (a_bits) {
tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits); tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
} }
tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); tcg_out_qemu_st_direct(s, datalo, datahi, base, opc);
} else { } else {
if (a_bits && a_bits != s_bits) { if (a_bits && a_bits != s_bits) {
tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits); tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
} }
if (a_bits >= s_bits) { if (a_bits >= s_bits) {
tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); tcg_out_qemu_st_direct(s, datalo, datahi, base, opc);
} else { } else {
tcg_out_qemu_st_unalign(s, data_regl, data_regh, base, opc); tcg_out_qemu_st_unalign(s, datalo, datahi, base, opc);
} }
} }
#endif #endif
@ -2425,16 +2409,36 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
break; break;
case INDEX_op_qemu_ld_i32: case INDEX_op_qemu_ld_i32:
tcg_out_qemu_ld(s, args, false); if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
tcg_out_qemu_ld(s, a0, 0, a1, 0, a2, TCG_TYPE_I32);
} else {
tcg_out_qemu_ld(s, a0, 0, a1, a2, args[3], TCG_TYPE_I32);
}
break; break;
case INDEX_op_qemu_ld_i64: case INDEX_op_qemu_ld_i64:
tcg_out_qemu_ld(s, args, true); if (TCG_TARGET_REG_BITS == 64) {
tcg_out_qemu_ld(s, a0, 0, a1, 0, a2, TCG_TYPE_I64);
} else if (TARGET_LONG_BITS == 32) {
tcg_out_qemu_ld(s, a0, a1, a2, 0, args[3], TCG_TYPE_I64);
} else {
tcg_out_qemu_ld(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64);
}
break; break;
case INDEX_op_qemu_st_i32: case INDEX_op_qemu_st_i32:
tcg_out_qemu_st(s, args, false); if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
tcg_out_qemu_st(s, a0, 0, a1, 0, a2, TCG_TYPE_I32);
} else {
tcg_out_qemu_st(s, a0, 0, a1, a2, args[3], TCG_TYPE_I32);
}
break; break;
case INDEX_op_qemu_st_i64: case INDEX_op_qemu_st_i64:
tcg_out_qemu_st(s, args, true); if (TCG_TARGET_REG_BITS == 64) {
tcg_out_qemu_st(s, a0, 0, a1, 0, a2, TCG_TYPE_I64);
} else if (TARGET_LONG_BITS == 32) {
tcg_out_qemu_st(s, a0, a1, a2, 0, args[3], TCG_TYPE_I64);
} else {
tcg_out_qemu_st(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64);
}
break; break;
case INDEX_op_add2_i32: case INDEX_op_add2_i32:

View file

@ -2118,7 +2118,8 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc,
/* Record the context of a call to the out of line helper code for the slow /* Record the context of a call to the out of line helper code for the slow
path for a load or store, so that we can later generate the correct path for a load or store, so that we can later generate the correct
helper code. */ helper code. */
static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi, static void add_qemu_ldst_label(TCGContext *s, bool is_ld,
TCGType type, MemOpIdx oi,
TCGReg datalo_reg, TCGReg datahi_reg, TCGReg datalo_reg, TCGReg datahi_reg,
TCGReg addrlo_reg, TCGReg addrhi_reg, TCGReg addrlo_reg, TCGReg addrhi_reg,
tcg_insn_unit *raddr, tcg_insn_unit *lptr) tcg_insn_unit *raddr, tcg_insn_unit *lptr)
@ -2126,6 +2127,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi,
TCGLabelQemuLdst *label = new_ldst_label(s); TCGLabelQemuLdst *label = new_ldst_label(s);
label->is_ld = is_ld; label->is_ld = is_ld;
label->type = type;
label->oi = oi; label->oi = oi;
label->datalo_reg = datalo_reg; label->datalo_reg = datalo_reg;
label->datahi_reg = datahi_reg; label->datahi_reg = datahi_reg;
@ -2285,160 +2287,140 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
{ {
return tcg_out_fail_alignment(s, l); return tcg_out_fail_alignment(s, l);
} }
#endif /* SOFTMMU */ #endif /* SOFTMMU */
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) typedef struct {
TCGReg base;
TCGReg index;
} HostAddress;
static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
TCGReg addrlo, TCGReg addrhi,
MemOpIdx oi, TCGType data_type)
{ {
TCGReg datalo, datahi, addrlo, rbase; MemOp opc = get_memop(oi);
TCGReg addrhi __attribute__((unused)); MemOp s_bits = opc & MO_SIZE;
MemOpIdx oi; HostAddress h;
MemOp opc, s_bits;
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
int mem_index;
tcg_insn_unit *label_ptr; tcg_insn_unit *label_ptr;
#else
unsigned a_bits;
#endif
datalo = *args++; h.index = tcg_out_tlb_read(s, opc, addrlo, addrhi, get_mmuidx(oi), true);
datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); h.base = TCG_REG_R3;
addrlo = *args++;
addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
oi = *args++;
opc = get_memop(oi);
s_bits = opc & MO_SIZE;
#ifdef CONFIG_SOFTMMU
mem_index = get_mmuidx(oi);
addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, true);
/* Load a pointer into the current opcode w/conditional branch-link. */ /* Load a pointer into the current opcode w/conditional branch-link. */
label_ptr = s->code_ptr; label_ptr = s->code_ptr;
tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK); tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
rbase = TCG_REG_R3;
#else /* !CONFIG_SOFTMMU */ #else /* !CONFIG_SOFTMMU */
a_bits = get_alignment_bits(opc); unsigned a_bits = get_alignment_bits(opc);
if (a_bits) { if (a_bits) {
tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits); tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
} }
rbase = guest_base ? TCG_GUEST_BASE_REG : 0; h.base = guest_base ? TCG_GUEST_BASE_REG : 0;
h.index = addrlo;
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
tcg_out_ext32u(s, TCG_REG_TMP1, addrlo); tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
addrlo = TCG_REG_TMP1; h.index = TCG_REG_TMP1;
} }
#endif #endif
if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) { if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
if (opc & MO_BSWAP) { if (opc & MO_BSWAP) {
tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo)); tcg_out32(s, LWBRX | TAB(datalo, h.base, h.index));
tcg_out32(s, LWBRX | TAB(datahi, rbase, TCG_REG_R0)); tcg_out32(s, LWBRX | TAB(datahi, h.base, TCG_REG_R0));
} else if (rbase != 0) { } else if (h.base != 0) {
tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
tcg_out32(s, LWZX | TAB(datahi, rbase, addrlo)); tcg_out32(s, LWZX | TAB(datahi, h.base, h.index));
tcg_out32(s, LWZX | TAB(datalo, rbase, TCG_REG_R0)); tcg_out32(s, LWZX | TAB(datalo, h.base, TCG_REG_R0));
} else if (addrlo == datahi) { } else if (h.index == datahi) {
tcg_out32(s, LWZ | TAI(datalo, addrlo, 4)); tcg_out32(s, LWZ | TAI(datalo, h.index, 4));
tcg_out32(s, LWZ | TAI(datahi, addrlo, 0)); tcg_out32(s, LWZ | TAI(datahi, h.index, 0));
} else { } else {
tcg_out32(s, LWZ | TAI(datahi, addrlo, 0)); tcg_out32(s, LWZ | TAI(datahi, h.index, 0));
tcg_out32(s, LWZ | TAI(datalo, addrlo, 4)); tcg_out32(s, LWZ | TAI(datalo, h.index, 4));
} }
} else { } else {
uint32_t insn = qemu_ldx_opc[opc & (MO_BSWAP | MO_SSIZE)]; uint32_t insn = qemu_ldx_opc[opc & (MO_BSWAP | MO_SSIZE)];
if (!have_isa_2_06 && insn == LDBRX) { if (!have_isa_2_06 && insn == LDBRX) {
tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo)); tcg_out32(s, LWBRX | TAB(datalo, h.base, h.index));
tcg_out32(s, LWBRX | TAB(TCG_REG_R0, rbase, TCG_REG_R0)); tcg_out32(s, LWBRX | TAB(TCG_REG_R0, h.base, TCG_REG_R0));
tcg_out_rld(s, RLDIMI, datalo, TCG_REG_R0, 32, 0); tcg_out_rld(s, RLDIMI, datalo, TCG_REG_R0, 32, 0);
} else if (insn) { } else if (insn) {
tcg_out32(s, insn | TAB(datalo, rbase, addrlo)); tcg_out32(s, insn | TAB(datalo, h.base, h.index));
} else { } else {
insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)]; insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)];
tcg_out32(s, insn | TAB(datalo, rbase, addrlo)); tcg_out32(s, insn | TAB(datalo, h.base, h.index));
tcg_out_movext(s, TCG_TYPE_REG, datalo, tcg_out_movext(s, TCG_TYPE_REG, datalo,
TCG_TYPE_REG, opc & MO_SSIZE, datalo); TCG_TYPE_REG, opc & MO_SSIZE, datalo);
} }
} }
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi, add_qemu_ldst_label(s, true, data_type, oi, datalo, datahi,
s->code_ptr, label_ptr); addrlo, addrhi, s->code_ptr, label_ptr);
#endif #endif
} }
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
TCGReg addrlo, TCGReg addrhi,
MemOpIdx oi, TCGType data_type)
{ {
TCGReg datalo, datahi, addrlo, rbase; MemOp opc = get_memop(oi);
TCGReg addrhi __attribute__((unused)); MemOp s_bits = opc & MO_SIZE;
MemOpIdx oi; HostAddress h;
MemOp opc, s_bits;
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
int mem_index;
tcg_insn_unit *label_ptr; tcg_insn_unit *label_ptr;
#else
unsigned a_bits;
#endif
datalo = *args++; h.index = tcg_out_tlb_read(s, opc, addrlo, addrhi, get_mmuidx(oi), false);
datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); h.base = TCG_REG_R3;
addrlo = *args++;
addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
oi = *args++;
opc = get_memop(oi);
s_bits = opc & MO_SIZE;
#ifdef CONFIG_SOFTMMU
mem_index = get_mmuidx(oi);
addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, false);
/* Load a pointer into the current opcode w/conditional branch-link. */ /* Load a pointer into the current opcode w/conditional branch-link. */
label_ptr = s->code_ptr; label_ptr = s->code_ptr;
tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK); tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
rbase = TCG_REG_R3;
#else /* !CONFIG_SOFTMMU */ #else /* !CONFIG_SOFTMMU */
a_bits = get_alignment_bits(opc); unsigned a_bits = get_alignment_bits(opc);
if (a_bits) { if (a_bits) {
tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits); tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
} }
rbase = guest_base ? TCG_GUEST_BASE_REG : 0; h.base = guest_base ? TCG_GUEST_BASE_REG : 0;
h.index = addrlo;
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
tcg_out_ext32u(s, TCG_REG_TMP1, addrlo); tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
addrlo = TCG_REG_TMP1; h.index = TCG_REG_TMP1;
} }
#endif #endif
if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) { if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
if (opc & MO_BSWAP) { if (opc & MO_BSWAP) {
tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo)); tcg_out32(s, STWBRX | SAB(datalo, h.base, h.index));
tcg_out32(s, STWBRX | SAB(datahi, rbase, TCG_REG_R0)); tcg_out32(s, STWBRX | SAB(datahi, h.base, TCG_REG_R0));
} else if (rbase != 0) { } else if (h.base != 0) {
tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
tcg_out32(s, STWX | SAB(datahi, rbase, addrlo)); tcg_out32(s, STWX | SAB(datahi, h.base, h.index));
tcg_out32(s, STWX | SAB(datalo, rbase, TCG_REG_R0)); tcg_out32(s, STWX | SAB(datalo, h.base, TCG_REG_R0));
} else { } else {
tcg_out32(s, STW | TAI(datahi, addrlo, 0)); tcg_out32(s, STW | TAI(datahi, h.index, 0));
tcg_out32(s, STW | TAI(datalo, addrlo, 4)); tcg_out32(s, STW | TAI(datalo, h.index, 4));
} }
} else { } else {
uint32_t insn = qemu_stx_opc[opc & (MO_BSWAP | MO_SIZE)]; uint32_t insn = qemu_stx_opc[opc & (MO_BSWAP | MO_SIZE)];
if (!have_isa_2_06 && insn == STDBRX) { if (!have_isa_2_06 && insn == STDBRX) {
tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo)); tcg_out32(s, STWBRX | SAB(datalo, h.base, h.index));
tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, addrlo, 4)); tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, h.index, 4));
tcg_out_shri64(s, TCG_REG_R0, datalo, 32); tcg_out_shri64(s, TCG_REG_R0, datalo, 32);
tcg_out32(s, STWBRX | SAB(TCG_REG_R0, rbase, TCG_REG_TMP1)); tcg_out32(s, STWBRX | SAB(TCG_REG_R0, h.base, TCG_REG_TMP1));
} else { } else {
tcg_out32(s, insn | SAB(datalo, rbase, addrlo)); tcg_out32(s, insn | SAB(datalo, h.base, h.index));
} }
} }
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi, add_qemu_ldst_label(s, false, data_type, oi, datalo, datahi,
s->code_ptr, label_ptr); addrlo, addrhi, s->code_ptr, label_ptr);
#endif #endif
} }
@ -2972,16 +2954,46 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
break; break;
case INDEX_op_qemu_ld_i32: case INDEX_op_qemu_ld_i32:
tcg_out_qemu_ld(s, args, false); if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
args[2], TCG_TYPE_I32);
} else {
tcg_out_qemu_ld(s, args[0], -1, args[1], args[2],
args[3], TCG_TYPE_I32);
}
break; break;
case INDEX_op_qemu_ld_i64: case INDEX_op_qemu_ld_i64:
tcg_out_qemu_ld(s, args, true); if (TCG_TARGET_REG_BITS == 64) {
tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
args[2], TCG_TYPE_I64);
} else if (TARGET_LONG_BITS == 32) {
tcg_out_qemu_ld(s, args[0], args[1], args[2], -1,
args[3], TCG_TYPE_I64);
} else {
tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3],
args[4], TCG_TYPE_I64);
}
break; break;
case INDEX_op_qemu_st_i32: case INDEX_op_qemu_st_i32:
tcg_out_qemu_st(s, args, false); if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
tcg_out_qemu_st(s, args[0], -1, args[1], -1,
args[2], TCG_TYPE_I32);
} else {
tcg_out_qemu_st(s, args[0], -1, args[1], args[2],
args[3], TCG_TYPE_I32);
}
break; break;
case INDEX_op_qemu_st_i64: case INDEX_op_qemu_st_i64:
tcg_out_qemu_st(s, args, true); if (TCG_TARGET_REG_BITS == 64) {
tcg_out_qemu_st(s, args[0], -1, args[1], -1,
args[2], TCG_TYPE_I64);
} else if (TARGET_LONG_BITS == 32) {
tcg_out_qemu_st(s, args[0], args[1], args[2], -1,
args[3], TCG_TYPE_I64);
} else {
tcg_out_qemu_st(s, args[0], args[1], args[2], args[3],
args[4], TCG_TYPE_I64);
}
break; break;
case INDEX_op_setcond_i32: case INDEX_op_setcond_i32:

View file

@ -13,18 +13,10 @@ C_O0_I1(r)
C_O0_I2(LZ, L) C_O0_I2(LZ, L)
C_O0_I2(rZ, r) C_O0_I2(rZ, r)
C_O0_I2(rZ, rZ) C_O0_I2(rZ, rZ)
C_O0_I3(LZ, L, L)
C_O0_I3(LZ, LZ, L)
C_O0_I4(LZ, LZ, L, L)
C_O0_I4(rZ, rZ, rZ, rZ)
C_O1_I1(r, L) C_O1_I1(r, L)
C_O1_I1(r, r) C_O1_I1(r, r)
C_O1_I2(r, L, L)
C_O1_I2(r, r, ri) C_O1_I2(r, r, ri)
C_O1_I2(r, r, rI) C_O1_I2(r, r, rI)
C_O1_I2(r, rZ, rN) C_O1_I2(r, rZ, rN)
C_O1_I2(r, rZ, rZ) C_O1_I2(r, rZ, rZ)
C_O1_I4(r, rZ, rZ, rZ, rZ)
C_O2_I1(r, r, L)
C_O2_I2(r, r, L, L)
C_O2_I4(r, r, rZ, rZ, rM, rM) C_O2_I4(r, r, rZ, rZ, rM, rM)

View file

@ -137,15 +137,7 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
#define SOFTMMU_RESERVE_REGS 0 #define SOFTMMU_RESERVE_REGS 0
#endif #endif
#define sextreg sextract64
static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
{
if (TCG_TARGET_REG_BITS == 32) {
return sextract32(val, pos, len);
} else {
return sextract64(val, pos, len);
}
}
/* test if a constant matches the constraint */ /* test if a constant matches the constraint */
static bool tcg_target_const_match(int64_t val, TCGType type, int ct) static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
@ -235,7 +227,6 @@ typedef enum {
OPC_XOR = 0x4033, OPC_XOR = 0x4033,
OPC_XORI = 0x4013, OPC_XORI = 0x4013,
#if TCG_TARGET_REG_BITS == 64
OPC_ADDIW = 0x1b, OPC_ADDIW = 0x1b,
OPC_ADDW = 0x3b, OPC_ADDW = 0x3b,
OPC_DIVUW = 0x200503b, OPC_DIVUW = 0x200503b,
@ -250,23 +241,6 @@ typedef enum {
OPC_SRLIW = 0x501b, OPC_SRLIW = 0x501b,
OPC_SRLW = 0x503b, OPC_SRLW = 0x503b,
OPC_SUBW = 0x4000003b, OPC_SUBW = 0x4000003b,
#else
/* Simplify code throughout by defining aliases for RV32. */
OPC_ADDIW = OPC_ADDI,
OPC_ADDW = OPC_ADD,
OPC_DIVUW = OPC_DIVU,
OPC_DIVW = OPC_DIV,
OPC_MULW = OPC_MUL,
OPC_REMUW = OPC_REMU,
OPC_REMW = OPC_REM,
OPC_SLLIW = OPC_SLLI,
OPC_SLLW = OPC_SLL,
OPC_SRAIW = OPC_SRAI,
OPC_SRAW = OPC_SRA,
OPC_SRLIW = OPC_SRLI,
OPC_SRLW = OPC_SRL,
OPC_SUBW = OPC_SUB,
#endif
OPC_FENCE = 0x0000000f, OPC_FENCE = 0x0000000f,
OPC_NOP = OPC_ADDI, /* nop = addi r0,r0,0 */ OPC_NOP = OPC_ADDI, /* nop = addi r0,r0,0 */
@ -500,7 +474,7 @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
tcg_target_long lo, hi, tmp; tcg_target_long lo, hi, tmp;
int shift, ret; int shift, ret;
if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { if (type == TCG_TYPE_I32) {
val = (int32_t)val; val = (int32_t)val;
} }
@ -511,7 +485,7 @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
} }
hi = val - lo; hi = val - lo;
if (TCG_TARGET_REG_BITS == 32 || val == (int32_t)val) { if (val == (int32_t)val) {
tcg_out_opc_upper(s, OPC_LUI, rd, hi); tcg_out_opc_upper(s, OPC_LUI, rd, hi);
if (lo != 0) { if (lo != 0) {
tcg_out_opc_imm(s, OPC_ADDIW, rd, rd, lo); tcg_out_opc_imm(s, OPC_ADDIW, rd, rd, lo);
@ -519,7 +493,6 @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
return; return;
} }
/* We can only be here if TCG_TARGET_REG_BITS != 32 */
tmp = tcg_pcrel_diff(s, (void *)val); tmp = tcg_pcrel_diff(s, (void *)val);
if (tmp == (int32_t)tmp) { if (tmp == (int32_t)tmp) {
tcg_out_opc_upper(s, OPC_AUIPC, rd, 0); tcg_out_opc_upper(s, OPC_AUIPC, rd, 0);
@ -668,15 +641,15 @@ static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data,
static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
TCGReg arg1, intptr_t arg2) TCGReg arg1, intptr_t arg2)
{ {
bool is32bit = (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32); RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_LW : OPC_LD;
tcg_out_ldst(s, is32bit ? OPC_LW : OPC_LD, arg, arg1, arg2); tcg_out_ldst(s, insn, arg, arg1, arg2);
} }
static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
TCGReg arg1, intptr_t arg2) TCGReg arg1, intptr_t arg2)
{ {
bool is32bit = (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32); RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SW : OPC_SD;
tcg_out_ldst(s, is32bit ? OPC_SW : OPC_SD, arg, arg1, arg2); tcg_out_ldst(s, insn, arg, arg1, arg2);
} }
static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
@ -829,20 +802,6 @@ static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
} }
} }
static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
TCGReg bl, TCGReg bh, TCGLabel *l)
{
/* todo */
g_assert_not_reached();
}
static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh)
{
/* todo */
g_assert_not_reached();
}
static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail) static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
{ {
TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA; TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
@ -853,20 +812,18 @@ static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
if (offset == sextreg(offset, 0, 20)) { if (offset == sextreg(offset, 0, 20)) {
/* short jump: -2097150 to 2097152 */ /* short jump: -2097150 to 2097152 */
tcg_out_opc_jump(s, OPC_JAL, link, offset); tcg_out_opc_jump(s, OPC_JAL, link, offset);
} else if (TCG_TARGET_REG_BITS == 32 || offset == (int32_t)offset) { } else if (offset == (int32_t)offset) {
/* long jump: -2147483646 to 2147483648 */ /* long jump: -2147483646 to 2147483648 */
tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP0, 0); tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP0, 0);
tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, 0); tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, 0);
ret = reloc_call(s->code_ptr - 2, arg); ret = reloc_call(s->code_ptr - 2, arg);
tcg_debug_assert(ret == true); tcg_debug_assert(ret == true);
} else if (TCG_TARGET_REG_BITS == 64) { } else {
/* far jump: 64-bit */ /* far jump: 64-bit */
tcg_target_long imm = sextreg((tcg_target_long)arg, 0, 12); tcg_target_long imm = sextreg((tcg_target_long)arg, 0, 12);
tcg_target_long base = (tcg_target_long)arg - imm; tcg_target_long base = (tcg_target_long)arg - imm;
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, base); tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, base);
tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, imm); tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, imm);
} else {
g_assert_not_reached();
} }
} }
@ -942,9 +899,6 @@ static void * const qemu_st_helpers[MO_SIZE + 1] = {
#endif #endif
}; };
/* We don't support oversize guests */
QEMU_BUILD_BUG_ON(TCG_TARGET_REG_BITS < TARGET_LONG_BITS);
/* We expect to use a 12-bit negative offset from ENV. */ /* We expect to use a 12-bit negative offset from ENV. */
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11)); QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11));
@ -956,8 +910,7 @@ static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
tcg_debug_assert(ok); tcg_debug_assert(ok);
} }
static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addrl, static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, MemOpIdx oi,
TCGReg addrh, MemOpIdx oi,
tcg_insn_unit **label_ptr, bool is_load) tcg_insn_unit **label_ptr, bool is_load)
{ {
MemOp opc = get_memop(oi); MemOp opc = get_memop(oi);
@ -973,7 +926,7 @@ static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addrl,
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, mask_base, mask_ofs); tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, mask_base, mask_ofs);
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, table_base, table_ofs); tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, table_base, table_ofs);
tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addrl, tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr,
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
@ -992,10 +945,10 @@ static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addrl,
/* Clear the non-page, non-alignment bits from the address. */ /* Clear the non-page, non-alignment bits from the address. */
compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1); compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
if (compare_mask == sextreg(compare_mask, 0, 12)) { if (compare_mask == sextreg(compare_mask, 0, 12)) {
tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addrl, compare_mask); tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr, compare_mask);
} else { } else {
tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask); tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask);
tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addrl); tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr);
} }
/* Compare masked address with the TLB entry. */ /* Compare masked address with the TLB entry. */
@ -1003,29 +956,26 @@ static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addrl,
tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0); tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0);
/* TLB Hit - translate address using addend. */ /* TLB Hit - translate address using addend. */
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { if (TARGET_LONG_BITS == 32) {
tcg_out_ext32u(s, TCG_REG_TMP0, addrl); tcg_out_ext32u(s, TCG_REG_TMP0, addr);
addrl = TCG_REG_TMP0; addr = TCG_REG_TMP0;
} }
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addrl); tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addr);
return TCG_REG_TMP0; return TCG_REG_TMP0;
} }
static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi, static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi,
TCGType ext, TCGType data_type, TCGReg data_reg,
TCGReg datalo, TCGReg datahi, TCGReg addr_reg, void *raddr,
TCGReg addrlo, TCGReg addrhi, tcg_insn_unit **label_ptr)
void *raddr, tcg_insn_unit **label_ptr)
{ {
TCGLabelQemuLdst *label = new_ldst_label(s); TCGLabelQemuLdst *label = new_ldst_label(s);
label->is_ld = is_ld; label->is_ld = is_ld;
label->oi = oi; label->oi = oi;
label->type = ext; label->type = data_type;
label->datalo_reg = datalo; label->datalo_reg = data_reg;
label->datahi_reg = datahi; label->addrlo_reg = addr_reg;
label->addrlo_reg = addrlo;
label->addrhi_reg = addrhi;
label->raddr = tcg_splitwx_to_rx(raddr); label->raddr = tcg_splitwx_to_rx(raddr);
label->label_ptr[0] = label_ptr[0]; label->label_ptr[0] = label_ptr[0];
} }
@ -1039,11 +989,6 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
TCGReg a2 = tcg_target_call_iarg_regs[2]; TCGReg a2 = tcg_target_call_iarg_regs[2];
TCGReg a3 = tcg_target_call_iarg_regs[3]; TCGReg a3 = tcg_target_call_iarg_regs[3];
/* We don't support oversize guests */
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
g_assert_not_reached();
}
/* resolve label address */ /* resolve label address */
if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
return false; return false;
@ -1073,11 +1018,6 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
TCGReg a3 = tcg_target_call_iarg_regs[3]; TCGReg a3 = tcg_target_call_iarg_regs[3];
TCGReg a4 = tcg_target_call_iarg_regs[4]; TCGReg a4 = tcg_target_call_iarg_regs[4];
/* We don't support oversize guests */
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
g_assert_not_reached();
}
/* resolve label address */ /* resolve label address */
if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
return false; return false;
@ -1146,85 +1086,62 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
#endif /* CONFIG_SOFTMMU */ #endif /* CONFIG_SOFTMMU */
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg val,
TCGReg base, MemOp opc, bool is_64) TCGReg base, MemOp opc, TCGType type)
{ {
/* Byte swapping is left to middle-end expansion. */ /* Byte swapping is left to middle-end expansion. */
tcg_debug_assert((opc & MO_BSWAP) == 0); tcg_debug_assert((opc & MO_BSWAP) == 0);
switch (opc & (MO_SSIZE)) { switch (opc & (MO_SSIZE)) {
case MO_UB: case MO_UB:
tcg_out_opc_imm(s, OPC_LBU, lo, base, 0); tcg_out_opc_imm(s, OPC_LBU, val, base, 0);
break; break;
case MO_SB: case MO_SB:
tcg_out_opc_imm(s, OPC_LB, lo, base, 0); tcg_out_opc_imm(s, OPC_LB, val, base, 0);
break; break;
case MO_UW: case MO_UW:
tcg_out_opc_imm(s, OPC_LHU, lo, base, 0); tcg_out_opc_imm(s, OPC_LHU, val, base, 0);
break; break;
case MO_SW: case MO_SW:
tcg_out_opc_imm(s, OPC_LH, lo, base, 0); tcg_out_opc_imm(s, OPC_LH, val, base, 0);
break; break;
case MO_UL: case MO_UL:
if (TCG_TARGET_REG_BITS == 64 && is_64) { if (type == TCG_TYPE_I64) {
tcg_out_opc_imm(s, OPC_LWU, lo, base, 0); tcg_out_opc_imm(s, OPC_LWU, val, base, 0);
break; break;
} }
/* FALLTHRU */ /* FALLTHRU */
case MO_SL: case MO_SL:
tcg_out_opc_imm(s, OPC_LW, lo, base, 0); tcg_out_opc_imm(s, OPC_LW, val, base, 0);
break; break;
case MO_UQ: case MO_UQ:
/* Prefer to load from offset 0 first, but allow for overlap. */ tcg_out_opc_imm(s, OPC_LD, val, base, 0);
if (TCG_TARGET_REG_BITS == 64) {
tcg_out_opc_imm(s, OPC_LD, lo, base, 0);
} else if (lo != base) {
tcg_out_opc_imm(s, OPC_LW, lo, base, 0);
tcg_out_opc_imm(s, OPC_LW, hi, base, 4);
} else {
tcg_out_opc_imm(s, OPC_LW, hi, base, 4);
tcg_out_opc_imm(s, OPC_LW, lo, base, 0);
}
break; break;
default: default:
g_assert_not_reached(); g_assert_not_reached();
} }
} }
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
MemOpIdx oi, TCGType data_type)
{ {
TCGReg addr_regl, addr_regh __attribute__((unused)); MemOp opc = get_memop(oi);
TCGReg data_regl, data_regh;
MemOpIdx oi;
MemOp opc;
#if defined(CONFIG_SOFTMMU)
tcg_insn_unit *label_ptr[1];
#else
unsigned a_bits;
#endif
TCGReg base; TCGReg base;
data_regl = *args++;
data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
addr_regl = *args++;
addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
oi = *args++;
opc = get_memop(oi);
#if defined(CONFIG_SOFTMMU) #if defined(CONFIG_SOFTMMU)
base = tcg_out_tlb_load(s, addr_regl, addr_regh, oi, label_ptr, 1); tcg_insn_unit *label_ptr[1];
tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
add_qemu_ldst_label(s, 1, oi, base = tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 1);
(is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), tcg_out_qemu_ld_direct(s, data_reg, base, opc, data_type);
data_regl, data_regh, addr_regl, addr_regh, add_qemu_ldst_label(s, true, oi, data_type, data_reg, addr_reg,
s->code_ptr, label_ptr); s->code_ptr, label_ptr);
#else #else
a_bits = get_alignment_bits(opc); unsigned a_bits = get_alignment_bits(opc);
if (a_bits) { if (a_bits) {
tcg_out_test_alignment(s, true, addr_regl, a_bits); tcg_out_test_alignment(s, true, addr_reg, a_bits);
} }
base = addr_regl; base = addr_reg;
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { if (TARGET_LONG_BITS == 32) {
tcg_out_ext32u(s, TCG_REG_TMP0, base); tcg_out_ext32u(s, TCG_REG_TMP0, base);
base = TCG_REG_TMP0; base = TCG_REG_TMP0;
} }
@ -1232,11 +1149,11 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_GUEST_BASE_REG, base); tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_GUEST_BASE_REG, base);
base = TCG_REG_TMP0; base = TCG_REG_TMP0;
} }
tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); tcg_out_qemu_ld_direct(s, data_reg, base, opc, data_type);
#endif #endif
} }
static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg val,
TCGReg base, MemOp opc) TCGReg base, MemOp opc)
{ {
/* Byte swapping is left to middle-end expansion. */ /* Byte swapping is left to middle-end expansion. */
@ -1244,61 +1161,42 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
switch (opc & (MO_SSIZE)) { switch (opc & (MO_SSIZE)) {
case MO_8: case MO_8:
tcg_out_opc_store(s, OPC_SB, base, lo, 0); tcg_out_opc_store(s, OPC_SB, base, val, 0);
break; break;
case MO_16: case MO_16:
tcg_out_opc_store(s, OPC_SH, base, lo, 0); tcg_out_opc_store(s, OPC_SH, base, val, 0);
break; break;
case MO_32: case MO_32:
tcg_out_opc_store(s, OPC_SW, base, lo, 0); tcg_out_opc_store(s, OPC_SW, base, val, 0);
break; break;
case MO_64: case MO_64:
if (TCG_TARGET_REG_BITS == 64) { tcg_out_opc_store(s, OPC_SD, base, val, 0);
tcg_out_opc_store(s, OPC_SD, base, lo, 0);
} else {
tcg_out_opc_store(s, OPC_SW, base, lo, 0);
tcg_out_opc_store(s, OPC_SW, base, hi, 4);
}
break; break;
default: default:
g_assert_not_reached(); g_assert_not_reached();
} }
} }
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
MemOpIdx oi, TCGType data_type)
{ {
TCGReg addr_regl, addr_regh __attribute__((unused)); MemOp opc = get_memop(oi);
TCGReg data_regl, data_regh;
MemOpIdx oi;
MemOp opc;
#if defined(CONFIG_SOFTMMU)
tcg_insn_unit *label_ptr[1];
#else
unsigned a_bits;
#endif
TCGReg base; TCGReg base;
data_regl = *args++;
data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
addr_regl = *args++;
addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
oi = *args++;
opc = get_memop(oi);
#if defined(CONFIG_SOFTMMU) #if defined(CONFIG_SOFTMMU)
base = tcg_out_tlb_load(s, addr_regl, addr_regh, oi, label_ptr, 0); tcg_insn_unit *label_ptr[1];
tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
add_qemu_ldst_label(s, 0, oi, base = tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 0);
(is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), tcg_out_qemu_st_direct(s, data_reg, base, opc);
data_regl, data_regh, addr_regl, addr_regh, add_qemu_ldst_label(s, false, oi, data_type, data_reg, addr_reg,
s->code_ptr, label_ptr); s->code_ptr, label_ptr);
#else #else
a_bits = get_alignment_bits(opc); unsigned a_bits = get_alignment_bits(opc);
if (a_bits) { if (a_bits) {
tcg_out_test_alignment(s, false, addr_regl, a_bits); tcg_out_test_alignment(s, false, addr_reg, a_bits);
} }
base = addr_regl; base = addr_reg;
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { if (TARGET_LONG_BITS == 32) {
tcg_out_ext32u(s, TCG_REG_TMP0, base); tcg_out_ext32u(s, TCG_REG_TMP0, base);
base = TCG_REG_TMP0; base = TCG_REG_TMP0;
} }
@ -1306,7 +1204,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_GUEST_BASE_REG, base); tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_GUEST_BASE_REG, base);
base = TCG_REG_TMP0; base = TCG_REG_TMP0;
} }
tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); tcg_out_qemu_st_direct(s, data_reg, base, opc);
#endif #endif
} }
@ -1585,29 +1483,23 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_brcond_i64: case INDEX_op_brcond_i64:
tcg_out_brcond(s, a2, a0, a1, arg_label(args[3])); tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
break; break;
case INDEX_op_brcond2_i32:
tcg_out_brcond2(s, args[4], a0, a1, a2, args[3], arg_label(args[5]));
break;
case INDEX_op_setcond_i32: case INDEX_op_setcond_i32:
case INDEX_op_setcond_i64: case INDEX_op_setcond_i64:
tcg_out_setcond(s, args[3], a0, a1, a2); tcg_out_setcond(s, args[3], a0, a1, a2);
break; break;
case INDEX_op_setcond2_i32:
tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]);
break;
case INDEX_op_qemu_ld_i32: case INDEX_op_qemu_ld_i32:
tcg_out_qemu_ld(s, args, false); tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
break; break;
case INDEX_op_qemu_ld_i64: case INDEX_op_qemu_ld_i64:
tcg_out_qemu_ld(s, args, true); tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
break; break;
case INDEX_op_qemu_st_i32: case INDEX_op_qemu_st_i32:
tcg_out_qemu_st(s, args, false); tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
break; break;
case INDEX_op_qemu_st_i64: case INDEX_op_qemu_st_i64:
tcg_out_qemu_st(s, args, true); tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
break; break;
case INDEX_op_extrh_i64_i32: case INDEX_op_extrh_i64_i32:
@ -1748,26 +1640,12 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_sub2_i64: case INDEX_op_sub2_i64:
return C_O2_I4(r, r, rZ, rZ, rM, rM); return C_O2_I4(r, r, rZ, rZ, rM, rM);
case INDEX_op_brcond2_i32:
return C_O0_I4(rZ, rZ, rZ, rZ);
case INDEX_op_setcond2_i32:
return C_O1_I4(r, rZ, rZ, rZ, rZ);
case INDEX_op_qemu_ld_i32: case INDEX_op_qemu_ld_i32:
return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
? C_O1_I1(r, L) : C_O1_I2(r, L, L));
case INDEX_op_qemu_st_i32:
return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
? C_O0_I2(LZ, L) : C_O0_I3(LZ, L, L));
case INDEX_op_qemu_ld_i64: case INDEX_op_qemu_ld_i64:
return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) return C_O1_I1(r, L);
: TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O2_I1(r, r, L) case INDEX_op_qemu_st_i32:
: C_O2_I2(r, r, L, L));
case INDEX_op_qemu_st_i64: case INDEX_op_qemu_st_i64:
return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(LZ, L) return C_O0_I2(LZ, L);
: TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O0_I3(LZ, LZ, L)
: C_O0_I4(LZ, LZ, L, L));
default: default:
g_assert_not_reached(); g_assert_not_reached();
@ -1843,9 +1721,7 @@ static void tcg_target_qemu_prologue(TCGContext *s)
static void tcg_target_init(TCGContext *s) static void tcg_target_init(TCGContext *s)
{ {
tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff; tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
if (TCG_TARGET_REG_BITS == 64) {
tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff; tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
}
tcg_target_call_clobber_regs = -1u; tcg_target_call_clobber_regs = -1u;
tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0); tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0);

View file

@ -25,11 +25,14 @@
#ifndef RISCV_TCG_TARGET_H #ifndef RISCV_TCG_TARGET_H
#define RISCV_TCG_TARGET_H #define RISCV_TCG_TARGET_H
#if __riscv_xlen == 32 /*
# define TCG_TARGET_REG_BITS 32 * We don't support oversize guests.
#elif __riscv_xlen == 64 * Since we will only build tcg once, this in turn requires a 64-bit host.
# define TCG_TARGET_REG_BITS 64 */
#if __riscv_xlen != 64
#error "unsupported code generation mode"
#endif #endif
#define TCG_TARGET_REG_BITS 64
#define TCG_TARGET_INSN_UNIT_SIZE 4 #define TCG_TARGET_INSN_UNIT_SIZE 4
#define TCG_TARGET_TLB_DISPLACEMENT_BITS 20 #define TCG_TARGET_TLB_DISPLACEMENT_BITS 20
@ -83,13 +86,8 @@ typedef enum {
#define TCG_TARGET_STACK_ALIGN 16 #define TCG_TARGET_STACK_ALIGN 16
#define TCG_TARGET_CALL_STACK_OFFSET 0 #define TCG_TARGET_CALL_STACK_OFFSET 0
#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL #define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
#if TCG_TARGET_REG_BITS == 32
#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN
#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN
#else
#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL #define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL #define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
#endif
#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL #define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
/* optional instructions */ /* optional instructions */
@ -106,8 +104,8 @@ typedef enum {
#define TCG_TARGET_HAS_sub2_i32 1 #define TCG_TARGET_HAS_sub2_i32 1
#define TCG_TARGET_HAS_mulu2_i32 0 #define TCG_TARGET_HAS_mulu2_i32 0
#define TCG_TARGET_HAS_muls2_i32 0 #define TCG_TARGET_HAS_muls2_i32 0
#define TCG_TARGET_HAS_muluh_i32 (TCG_TARGET_REG_BITS == 32) #define TCG_TARGET_HAS_muluh_i32 0
#define TCG_TARGET_HAS_mulsh_i32 (TCG_TARGET_REG_BITS == 32) #define TCG_TARGET_HAS_mulsh_i32 0
#define TCG_TARGET_HAS_ext8s_i32 1 #define TCG_TARGET_HAS_ext8s_i32 1
#define TCG_TARGET_HAS_ext16s_i32 1 #define TCG_TARGET_HAS_ext16s_i32 1
#define TCG_TARGET_HAS_ext8u_i32 1 #define TCG_TARGET_HAS_ext8u_i32 1
@ -128,7 +126,6 @@ typedef enum {
#define TCG_TARGET_HAS_setcond2 1 #define TCG_TARGET_HAS_setcond2 1
#define TCG_TARGET_HAS_qemu_st8_i32 0 #define TCG_TARGET_HAS_qemu_st8_i32 0
#if TCG_TARGET_REG_BITS == 64
#define TCG_TARGET_HAS_movcond_i64 0 #define TCG_TARGET_HAS_movcond_i64 0
#define TCG_TARGET_HAS_div_i64 1 #define TCG_TARGET_HAS_div_i64 1
#define TCG_TARGET_HAS_rem_i64 1 #define TCG_TARGET_HAS_rem_i64 1
@ -165,7 +162,6 @@ typedef enum {
#define TCG_TARGET_HAS_muls2_i64 0 #define TCG_TARGET_HAS_muls2_i64 0
#define TCG_TARGET_HAS_muluh_i64 1 #define TCG_TARGET_HAS_muluh_i64 1
#define TCG_TARGET_HAS_mulsh_i64 1 #define TCG_TARGET_HAS_mulsh_i64 1
#endif
#define TCG_TARGET_DEFAULT_MO (0) #define TCG_TARGET_DEFAULT_MO (0)

View file

@ -1606,58 +1606,64 @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest,
tcg_out_call_int(s, dest); tcg_out_call_int(s, dest);
} }
typedef struct {
TCGReg base;
TCGReg index;
int disp;
} HostAddress;
static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data, static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data,
TCGReg base, TCGReg index, int disp) HostAddress h)
{ {
switch (opc & (MO_SSIZE | MO_BSWAP)) { switch (opc & (MO_SSIZE | MO_BSWAP)) {
case MO_UB: case MO_UB:
tcg_out_insn(s, RXY, LLGC, data, base, index, disp); tcg_out_insn(s, RXY, LLGC, data, h.base, h.index, h.disp);
break; break;
case MO_SB: case MO_SB:
tcg_out_insn(s, RXY, LGB, data, base, index, disp); tcg_out_insn(s, RXY, LGB, data, h.base, h.index, h.disp);
break; break;
case MO_UW | MO_BSWAP: case MO_UW | MO_BSWAP:
/* swapped unsigned halfword load with upper bits zeroed */ /* swapped unsigned halfword load with upper bits zeroed */
tcg_out_insn(s, RXY, LRVH, data, base, index, disp); tcg_out_insn(s, RXY, LRVH, data, h.base, h.index, h.disp);
tcg_out_ext16u(s, data, data); tcg_out_ext16u(s, data, data);
break; break;
case MO_UW: case MO_UW:
tcg_out_insn(s, RXY, LLGH, data, base, index, disp); tcg_out_insn(s, RXY, LLGH, data, h.base, h.index, h.disp);
break; break;
case MO_SW | MO_BSWAP: case MO_SW | MO_BSWAP:
/* swapped sign-extended halfword load */ /* swapped sign-extended halfword load */
tcg_out_insn(s, RXY, LRVH, data, base, index, disp); tcg_out_insn(s, RXY, LRVH, data, h.base, h.index, h.disp);
tcg_out_ext16s(s, TCG_TYPE_REG, data, data); tcg_out_ext16s(s, TCG_TYPE_REG, data, data);
break; break;
case MO_SW: case MO_SW:
tcg_out_insn(s, RXY, LGH, data, base, index, disp); tcg_out_insn(s, RXY, LGH, data, h.base, h.index, h.disp);
break; break;
case MO_UL | MO_BSWAP: case MO_UL | MO_BSWAP:
/* swapped unsigned int load with upper bits zeroed */ /* swapped unsigned int load with upper bits zeroed */
tcg_out_insn(s, RXY, LRV, data, base, index, disp); tcg_out_insn(s, RXY, LRV, data, h.base, h.index, h.disp);
tcg_out_ext32u(s, data, data); tcg_out_ext32u(s, data, data);
break; break;
case MO_UL: case MO_UL:
tcg_out_insn(s, RXY, LLGF, data, base, index, disp); tcg_out_insn(s, RXY, LLGF, data, h.base, h.index, h.disp);
break; break;
case MO_SL | MO_BSWAP: case MO_SL | MO_BSWAP:
/* swapped sign-extended int load */ /* swapped sign-extended int load */
tcg_out_insn(s, RXY, LRV, data, base, index, disp); tcg_out_insn(s, RXY, LRV, data, h.base, h.index, h.disp);
tcg_out_ext32s(s, data, data); tcg_out_ext32s(s, data, data);
break; break;
case MO_SL: case MO_SL:
tcg_out_insn(s, RXY, LGF, data, base, index, disp); tcg_out_insn(s, RXY, LGF, data, h.base, h.index, h.disp);
break; break;
case MO_UQ | MO_BSWAP: case MO_UQ | MO_BSWAP:
tcg_out_insn(s, RXY, LRVG, data, base, index, disp); tcg_out_insn(s, RXY, LRVG, data, h.base, h.index, h.disp);
break; break;
case MO_UQ: case MO_UQ:
tcg_out_insn(s, RXY, LG, data, base, index, disp); tcg_out_insn(s, RXY, LG, data, h.base, h.index, h.disp);
break; break;
default: default:
@ -1666,44 +1672,44 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data,
} }
static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data, static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data,
TCGReg base, TCGReg index, int disp) HostAddress h)
{ {
switch (opc & (MO_SIZE | MO_BSWAP)) { switch (opc & (MO_SIZE | MO_BSWAP)) {
case MO_UB: case MO_UB:
if (disp >= 0 && disp < 0x1000) { if (h.disp >= 0 && h.disp < 0x1000) {
tcg_out_insn(s, RX, STC, data, base, index, disp); tcg_out_insn(s, RX, STC, data, h.base, h.index, h.disp);
} else { } else {
tcg_out_insn(s, RXY, STCY, data, base, index, disp); tcg_out_insn(s, RXY, STCY, data, h.base, h.index, h.disp);
} }
break; break;
case MO_UW | MO_BSWAP: case MO_UW | MO_BSWAP:
tcg_out_insn(s, RXY, STRVH, data, base, index, disp); tcg_out_insn(s, RXY, STRVH, data, h.base, h.index, h.disp);
break; break;
case MO_UW: case MO_UW:
if (disp >= 0 && disp < 0x1000) { if (h.disp >= 0 && h.disp < 0x1000) {
tcg_out_insn(s, RX, STH, data, base, index, disp); tcg_out_insn(s, RX, STH, data, h.base, h.index, h.disp);
} else { } else {
tcg_out_insn(s, RXY, STHY, data, base, index, disp); tcg_out_insn(s, RXY, STHY, data, h.base, h.index, h.disp);
} }
break; break;
case MO_UL | MO_BSWAP: case MO_UL | MO_BSWAP:
tcg_out_insn(s, RXY, STRV, data, base, index, disp); tcg_out_insn(s, RXY, STRV, data, h.base, h.index, h.disp);
break; break;
case MO_UL: case MO_UL:
if (disp >= 0 && disp < 0x1000) { if (h.disp >= 0 && h.disp < 0x1000) {
tcg_out_insn(s, RX, ST, data, base, index, disp); tcg_out_insn(s, RX, ST, data, h.base, h.index, h.disp);
} else { } else {
tcg_out_insn(s, RXY, STY, data, base, index, disp); tcg_out_insn(s, RXY, STY, data, h.base, h.index, h.disp);
} }
break; break;
case MO_UQ | MO_BSWAP: case MO_UQ | MO_BSWAP:
tcg_out_insn(s, RXY, STRVG, data, base, index, disp); tcg_out_insn(s, RXY, STRVG, data, h.base, h.index, h.disp);
break; break;
case MO_UQ: case MO_UQ:
tcg_out_insn(s, RXY, STG, data, base, index, disp); tcg_out_insn(s, RXY, STG, data, h.base, h.index, h.disp);
break; break;
default: default:
@ -1770,13 +1776,14 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc,
} }
static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi, static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi,
TCGReg data, TCGReg addr, TCGType type, TCGReg data, TCGReg addr,
tcg_insn_unit *raddr, tcg_insn_unit *label_ptr) tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
{ {
TCGLabelQemuLdst *label = new_ldst_label(s); TCGLabelQemuLdst *label = new_ldst_label(s);
label->is_ld = is_ld; label->is_ld = is_ld;
label->oi = oi; label->oi = oi;
label->type = type;
label->datalo_reg = data; label->datalo_reg = data;
label->addrlo_reg = addr; label->addrlo_reg = addr;
label->raddr = tcg_splitwx_to_rx(raddr); label->raddr = tcg_splitwx_to_rx(raddr);
@ -1882,82 +1889,89 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
return tcg_out_fail_alignment(s, l); return tcg_out_fail_alignment(s, l);
} }
static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg, static HostAddress tcg_prepare_user_ldst(TCGContext *s, TCGReg addr_reg)
TCGReg *index_reg, tcg_target_long *disp)
{ {
TCGReg index;
int disp;
if (TARGET_LONG_BITS == 32) { if (TARGET_LONG_BITS == 32) {
tcg_out_ext32u(s, TCG_TMP0, *addr_reg); tcg_out_ext32u(s, TCG_TMP0, addr_reg);
*addr_reg = TCG_TMP0; addr_reg = TCG_TMP0;
} }
if (guest_base < 0x80000) { if (guest_base < 0x80000) {
*index_reg = TCG_REG_NONE; index = TCG_REG_NONE;
*disp = guest_base; disp = guest_base;
} else { } else {
*index_reg = TCG_GUEST_BASE_REG; index = TCG_GUEST_BASE_REG;
*disp = 0; disp = 0;
} }
return (HostAddress){ .base = addr_reg, .index = index, .disp = disp };
} }
#endif /* CONFIG_SOFTMMU */ #endif /* CONFIG_SOFTMMU */
static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
MemOpIdx oi) MemOpIdx oi, TCGType data_type)
{ {
MemOp opc = get_memop(oi); MemOp opc = get_memop(oi);
HostAddress h;
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
unsigned mem_index = get_mmuidx(oi); unsigned mem_index = get_mmuidx(oi);
tcg_insn_unit *label_ptr; tcg_insn_unit *label_ptr;
TCGReg base_reg;
base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 1); h.base = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 1);
h.index = TCG_REG_R2;
h.disp = 0;
tcg_out16(s, RI_BRC | (S390_CC_NE << 4)); tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
label_ptr = s->code_ptr; label_ptr = s->code_ptr;
s->code_ptr += 1; s->code_ptr += 1;
tcg_out_qemu_ld_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0); tcg_out_qemu_ld_direct(s, opc, data_reg, h);
add_qemu_ldst_label(s, 1, oi, data_reg, addr_reg, s->code_ptr, label_ptr); add_qemu_ldst_label(s, true, oi, data_type, data_reg, addr_reg,
s->code_ptr, label_ptr);
#else #else
TCGReg index_reg;
tcg_target_long disp;
unsigned a_bits = get_alignment_bits(opc); unsigned a_bits = get_alignment_bits(opc);
if (a_bits) { if (a_bits) {
tcg_out_test_alignment(s, true, addr_reg, a_bits); tcg_out_test_alignment(s, true, addr_reg, a_bits);
} }
tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp); h = tcg_prepare_user_ldst(s, addr_reg);
tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp); tcg_out_qemu_ld_direct(s, opc, data_reg, h);
#endif #endif
} }
static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
MemOpIdx oi) MemOpIdx oi, TCGType data_type)
{ {
MemOp opc = get_memop(oi); MemOp opc = get_memop(oi);
HostAddress h;
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
unsigned mem_index = get_mmuidx(oi); unsigned mem_index = get_mmuidx(oi);
tcg_insn_unit *label_ptr; tcg_insn_unit *label_ptr;
TCGReg base_reg;
base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 0); h.base = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 0);
h.index = TCG_REG_R2;
h.disp = 0;
tcg_out16(s, RI_BRC | (S390_CC_NE << 4)); tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
label_ptr = s->code_ptr; label_ptr = s->code_ptr;
s->code_ptr += 1; s->code_ptr += 1;
tcg_out_qemu_st_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0); tcg_out_qemu_st_direct(s, opc, data_reg, h);
add_qemu_ldst_label(s, 0, oi, data_reg, addr_reg, s->code_ptr, label_ptr); add_qemu_ldst_label(s, false, oi, data_type, data_reg, addr_reg,
s->code_ptr, label_ptr);
#else #else
TCGReg index_reg;
tcg_target_long disp;
unsigned a_bits = get_alignment_bits(opc); unsigned a_bits = get_alignment_bits(opc);
if (a_bits) { if (a_bits) {
tcg_out_test_alignment(s, false, addr_reg, a_bits); tcg_out_test_alignment(s, false, addr_reg, a_bits);
} }
tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp); h = tcg_prepare_user_ldst(s, addr_reg);
tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp); tcg_out_qemu_st_direct(s, opc, data_reg, h);
#endif #endif
} }
@ -2307,13 +2321,16 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
break; break;
case INDEX_op_qemu_ld_i32: case INDEX_op_qemu_ld_i32:
/* ??? Technically we can use a non-extending instruction. */ tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I32);
break;
case INDEX_op_qemu_ld_i64: case INDEX_op_qemu_ld_i64:
tcg_out_qemu_ld(s, args[0], args[1], args[2]); tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I64);
break; break;
case INDEX_op_qemu_st_i32: case INDEX_op_qemu_st_i32:
tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I32);
break;
case INDEX_op_qemu_st_i64: case INDEX_op_qemu_st_i64:
tcg_out_qemu_st(s, args[0], args[1], args[2]); tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I64);
break; break;
case INDEX_op_ld16s_i64: case INDEX_op_ld16s_i64:

View file

@ -1178,7 +1178,7 @@ static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
}; };
static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
MemOpIdx oi, bool is_64) MemOpIdx oi, TCGType data_type)
{ {
MemOp memop = get_memop(oi); MemOp memop = get_memop(oi);
tcg_insn_unit *label_ptr; tcg_insn_unit *label_ptr;
@ -1220,7 +1220,7 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O2, oi); tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O2, oi);
/* We let the helper sign-extend SB and SW, but leave SL for here. */ /* We let the helper sign-extend SB and SW, but leave SL for here. */
if (is_64 && (memop & MO_SSIZE) == MO_SL) { if ((memop & MO_SSIZE) == MO_SL) {
tcg_out_ext32s(s, data, TCG_REG_O0); tcg_out_ext32s(s, data, TCG_REG_O0);
} else { } else {
tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0); tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
@ -1636,10 +1636,10 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
break; break;
case INDEX_op_qemu_ld_i32: case INDEX_op_qemu_ld_i32:
tcg_out_qemu_ld(s, a0, a1, a2, false); tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
break; break;
case INDEX_op_qemu_ld_i64: case INDEX_op_qemu_ld_i64:
tcg_out_qemu_ld(s, a0, a1, a2, true); tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
break; break;
case INDEX_op_qemu_st_i32: case INDEX_op_qemu_st_i32:
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32); tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);

View file

@ -58,10 +58,6 @@ typedef struct TCGCallArgumentLoc {
unsigned tmp_subindex : 2; unsigned tmp_subindex : 2;
} TCGCallArgumentLoc; } TCGCallArgumentLoc;
/* Avoid "unsigned < 0 is always false" Werror, when iarg_regs is empty. */
#define REG_P(L) \
((int)(L)->arg_slot < (int)ARRAY_SIZE(tcg_target_call_iarg_regs))
typedef struct TCGHelperInfo { typedef struct TCGHelperInfo {
void *func; void *func;
const char *name; const char *name;

View file

@ -20,20 +20,6 @@
* THE SOFTWARE. * THE SOFTWARE.
*/ */
typedef struct TCGLabelQemuLdst {
bool is_ld; /* qemu_ld: true, qemu_st: false */
MemOpIdx oi;
TCGType type; /* result type of a load */
TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */
TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */
TCGReg datalo_reg; /* reg index for low word to be loaded or stored */
TCGReg datahi_reg; /* reg index for high word to be loaded or stored */
const tcg_insn_unit *raddr; /* addr of the next IR of qemu_ld/st IR */
tcg_insn_unit *label_ptr[2]; /* label pointers to be updated */
QSIMPLEQ_ENTRY(TCGLabelQemuLdst) next;
} TCGLabelQemuLdst;
/* /*
* Generate TB finalization at the end of block * Generate TB finalization at the end of block
*/ */

View file

@ -94,6 +94,19 @@ typedef struct QEMU_PACKED {
DebugFrameFDEHeader fde; DebugFrameFDEHeader fde;
} DebugFrameHeader; } DebugFrameHeader;
typedef struct TCGLabelQemuLdst {
bool is_ld; /* qemu_ld: true, qemu_st: false */
MemOpIdx oi;
TCGType type; /* result type of a load */
TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */
TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */
TCGReg datalo_reg; /* reg index for low word to be loaded or stored */
TCGReg datahi_reg; /* reg index for high word to be loaded or stored */
const tcg_insn_unit *raddr; /* addr of the next IR of qemu_ld/st IR */
tcg_insn_unit *label_ptr[2]; /* label pointers to be updated */
QSIMPLEQ_ENTRY(TCGLabelQemuLdst) next;
} TCGLabelQemuLdst;
static void tcg_register_jit_int(const void *buf, size_t size, static void tcg_register_jit_int(const void *buf, size_t size,
const void *debug_frame, const void *debug_frame,
size_t debug_frame_size) size_t debug_frame_size)
@ -793,6 +806,25 @@ static void init_ffi_layouts(void)
} }
#endif /* CONFIG_TCG_INTERPRETER */ #endif /* CONFIG_TCG_INTERPRETER */
static inline bool arg_slot_reg_p(unsigned arg_slot)
{
/*
* Split the sizeof away from the comparison to avoid Werror from
* "unsigned < 0 is always false", when iarg_regs is empty.
*/
unsigned nreg = ARRAY_SIZE(tcg_target_call_iarg_regs);
return arg_slot < nreg;
}
static inline int arg_slot_stk_ofs(unsigned arg_slot)
{
unsigned max = TCG_STATIC_CALL_ARGS_SIZE / sizeof(tcg_target_long);
unsigned stk_slot = arg_slot - ARRAY_SIZE(tcg_target_call_iarg_regs);
tcg_debug_assert(stk_slot < max);
return TCG_TARGET_CALL_STACK_OFFSET + stk_slot * sizeof(tcg_target_long);
}
typedef struct TCGCumulativeArgs { typedef struct TCGCumulativeArgs {
int arg_idx; /* tcg_gen_callN args[] */ int arg_idx; /* tcg_gen_callN args[] */
int info_in_idx; /* TCGHelperInfo in[] */ int info_in_idx; /* TCGHelperInfo in[] */
@ -1032,6 +1064,7 @@ static void init_call_layout(TCGHelperInfo *info)
} }
} }
assert(ref_base + cum.ref_slot <= max_stk_slots); assert(ref_base + cum.ref_slot <= max_stk_slots);
ref_base += max_reg_slots;
if (ref_base != 0) { if (ref_base != 0) {
for (int i = cum.info_in_idx - 1; i >= 0; --i) { for (int i = cum.info_in_idx - 1; i >= 0; --i) {
@ -3218,7 +3251,7 @@ liveness_pass_1(TCGContext *s)
case TCG_CALL_ARG_NORMAL: case TCG_CALL_ARG_NORMAL:
case TCG_CALL_ARG_EXTEND_U: case TCG_CALL_ARG_EXTEND_U:
case TCG_CALL_ARG_EXTEND_S: case TCG_CALL_ARG_EXTEND_S:
if (REG_P(loc)) { if (arg_slot_reg_p(loc->arg_slot)) {
*la_temp_pref(ts) = 0; *la_temp_pref(ts) = 0;
break; break;
} }
@ -3245,7 +3278,7 @@ liveness_pass_1(TCGContext *s)
case TCG_CALL_ARG_NORMAL: case TCG_CALL_ARG_NORMAL:
case TCG_CALL_ARG_EXTEND_U: case TCG_CALL_ARG_EXTEND_U:
case TCG_CALL_ARG_EXTEND_S: case TCG_CALL_ARG_EXTEND_S:
if (REG_P(loc)) { if (arg_slot_reg_p(loc->arg_slot)) {
tcg_regset_set_reg(*la_temp_pref(ts), tcg_regset_set_reg(*la_temp_pref(ts),
tcg_target_call_iarg_regs[loc->arg_slot]); tcg_target_call_iarg_regs[loc->arg_slot]);
} }
@ -4803,7 +4836,7 @@ static void load_arg_reg(TCGContext *s, TCGReg reg, TCGTemp *ts,
} }
} }
static void load_arg_stk(TCGContext *s, int stk_slot, TCGTemp *ts, static void load_arg_stk(TCGContext *s, unsigned arg_slot, TCGTemp *ts,
TCGRegSet allocated_regs) TCGRegSet allocated_regs)
{ {
/* /*
@ -4813,30 +4846,27 @@ static void load_arg_stk(TCGContext *s, int stk_slot, TCGTemp *ts,
*/ */
temp_load(s, ts, tcg_target_available_regs[ts->type], allocated_regs, 0); temp_load(s, ts, tcg_target_available_regs[ts->type], allocated_regs, 0);
tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK,
TCG_TARGET_CALL_STACK_OFFSET + arg_slot_stk_ofs(arg_slot));
stk_slot * sizeof(tcg_target_long));
} }
static void load_arg_normal(TCGContext *s, const TCGCallArgumentLoc *l, static void load_arg_normal(TCGContext *s, const TCGCallArgumentLoc *l,
TCGTemp *ts, TCGRegSet *allocated_regs) TCGTemp *ts, TCGRegSet *allocated_regs)
{ {
if (REG_P(l)) { if (arg_slot_reg_p(l->arg_slot)) {
TCGReg reg = tcg_target_call_iarg_regs[l->arg_slot]; TCGReg reg = tcg_target_call_iarg_regs[l->arg_slot];
load_arg_reg(s, reg, ts, *allocated_regs); load_arg_reg(s, reg, ts, *allocated_regs);
tcg_regset_set_reg(*allocated_regs, reg); tcg_regset_set_reg(*allocated_regs, reg);
} else { } else {
load_arg_stk(s, l->arg_slot - ARRAY_SIZE(tcg_target_call_iarg_regs), load_arg_stk(s, l->arg_slot, ts, *allocated_regs);
ts, *allocated_regs);
} }
} }
static void load_arg_ref(TCGContext *s, int arg_slot, TCGReg ref_base, static void load_arg_ref(TCGContext *s, unsigned arg_slot, TCGReg ref_base,
intptr_t ref_off, TCGRegSet *allocated_regs) intptr_t ref_off, TCGRegSet *allocated_regs)
{ {
TCGReg reg; TCGReg reg;
int stk_slot = arg_slot - ARRAY_SIZE(tcg_target_call_iarg_regs);
if (stk_slot < 0) { if (arg_slot_reg_p(arg_slot)) {
reg = tcg_target_call_iarg_regs[arg_slot]; reg = tcg_target_call_iarg_regs[arg_slot];
tcg_reg_free(s, reg, *allocated_regs); tcg_reg_free(s, reg, *allocated_regs);
tcg_out_addi_ptr(s, reg, ref_base, ref_off); tcg_out_addi_ptr(s, reg, ref_base, ref_off);
@ -4846,8 +4876,7 @@ static void load_arg_ref(TCGContext *s, int arg_slot, TCGReg ref_base,
*allocated_regs, 0, false); *allocated_regs, 0, false);
tcg_out_addi_ptr(s, reg, ref_base, ref_off); tcg_out_addi_ptr(s, reg, ref_base, ref_off);
tcg_out_st(s, TCG_TYPE_PTR, reg, TCG_REG_CALL_STACK, tcg_out_st(s, TCG_TYPE_PTR, reg, TCG_REG_CALL_STACK,
TCG_TARGET_CALL_STACK_OFFSET arg_slot_stk_ofs(arg_slot));
+ stk_slot * sizeof(tcg_target_long));
} }
} }
@ -4877,8 +4906,7 @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
case TCG_CALL_ARG_BY_REF: case TCG_CALL_ARG_BY_REF:
load_arg_stk(s, loc->ref_slot, ts, allocated_regs); load_arg_stk(s, loc->ref_slot, ts, allocated_regs);
load_arg_ref(s, loc->arg_slot, TCG_REG_CALL_STACK, load_arg_ref(s, loc->arg_slot, TCG_REG_CALL_STACK,
TCG_TARGET_CALL_STACK_OFFSET arg_slot_stk_ofs(loc->ref_slot),
+ loc->ref_slot * sizeof(tcg_target_long),
&allocated_regs); &allocated_regs);
break; break;
case TCG_CALL_ARG_BY_REF_N: case TCG_CALL_ARG_BY_REF_N: