mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-09 18:44:58 -06:00
tcg: Add host memory barriers to cpu_ldst.h interfaces
Bring the helpers into line with the rest of tcg in respecting guest memory ordering. Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
c914d46d0a
commit
f86e8f3d13
3 changed files with 54 additions and 0 deletions
|
@ -2339,6 +2339,7 @@ static uint8_t do_ld1_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
|
||||||
MMULookupLocals l;
|
MMULookupLocals l;
|
||||||
bool crosspage;
|
bool crosspage;
|
||||||
|
|
||||||
|
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
||||||
crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
|
crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
|
||||||
tcg_debug_assert(!crosspage);
|
tcg_debug_assert(!crosspage);
|
||||||
|
|
||||||
|
@ -2360,6 +2361,7 @@ static uint16_t do_ld2_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
|
||||||
uint16_t ret;
|
uint16_t ret;
|
||||||
uint8_t a, b;
|
uint8_t a, b;
|
||||||
|
|
||||||
|
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
||||||
crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
|
crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
|
||||||
if (likely(!crosspage)) {
|
if (likely(!crosspage)) {
|
||||||
return do_ld_2(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
|
return do_ld_2(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
|
||||||
|
@ -2390,6 +2392,7 @@ static uint32_t do_ld4_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
|
||||||
bool crosspage;
|
bool crosspage;
|
||||||
uint32_t ret;
|
uint32_t ret;
|
||||||
|
|
||||||
|
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
||||||
crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
|
crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
|
||||||
if (likely(!crosspage)) {
|
if (likely(!crosspage)) {
|
||||||
return do_ld_4(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
|
return do_ld_4(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
|
||||||
|
@ -2417,6 +2420,7 @@ static uint64_t do_ld8_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
|
||||||
bool crosspage;
|
bool crosspage;
|
||||||
uint64_t ret;
|
uint64_t ret;
|
||||||
|
|
||||||
|
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
||||||
crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
|
crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
|
||||||
if (likely(!crosspage)) {
|
if (likely(!crosspage)) {
|
||||||
return do_ld_8(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
|
return do_ld_8(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
|
||||||
|
@ -2469,6 +2473,7 @@ static Int128 do_ld16_mmu(CPUArchState *env, vaddr addr,
|
||||||
Int128 ret;
|
Int128 ret;
|
||||||
int first;
|
int first;
|
||||||
|
|
||||||
|
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
||||||
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD, &l);
|
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD, &l);
|
||||||
if (likely(!crosspage)) {
|
if (likely(!crosspage)) {
|
||||||
/* Perform the load host endian. */
|
/* Perform the load host endian. */
|
||||||
|
@ -2802,6 +2807,7 @@ void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
|
||||||
bool crosspage;
|
bool crosspage;
|
||||||
|
|
||||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
|
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
|
||||||
|
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
|
||||||
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
|
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
|
||||||
tcg_debug_assert(!crosspage);
|
tcg_debug_assert(!crosspage);
|
||||||
|
|
||||||
|
@ -2815,6 +2821,7 @@ static void do_st2_mmu(CPUArchState *env, vaddr addr, uint16_t val,
|
||||||
bool crosspage;
|
bool crosspage;
|
||||||
uint8_t a, b;
|
uint8_t a, b;
|
||||||
|
|
||||||
|
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
|
||||||
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
|
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
|
||||||
if (likely(!crosspage)) {
|
if (likely(!crosspage)) {
|
||||||
do_st_2(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
|
do_st_2(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
|
||||||
|
@ -2843,6 +2850,7 @@ static void do_st4_mmu(CPUArchState *env, vaddr addr, uint32_t val,
|
||||||
MMULookupLocals l;
|
MMULookupLocals l;
|
||||||
bool crosspage;
|
bool crosspage;
|
||||||
|
|
||||||
|
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
|
||||||
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
|
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
|
||||||
if (likely(!crosspage)) {
|
if (likely(!crosspage)) {
|
||||||
do_st_4(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
|
do_st_4(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
|
||||||
|
@ -2870,6 +2878,7 @@ static void do_st8_mmu(CPUArchState *env, vaddr addr, uint64_t val,
|
||||||
MMULookupLocals l;
|
MMULookupLocals l;
|
||||||
bool crosspage;
|
bool crosspage;
|
||||||
|
|
||||||
|
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
|
||||||
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
|
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
|
||||||
if (likely(!crosspage)) {
|
if (likely(!crosspage)) {
|
||||||
do_st_8(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
|
do_st_8(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
|
||||||
|
@ -2899,6 +2908,7 @@ static void do_st16_mmu(CPUArchState *env, vaddr addr, Int128 val,
|
||||||
uint64_t a, b;
|
uint64_t a, b;
|
||||||
int first;
|
int first;
|
||||||
|
|
||||||
|
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
|
||||||
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
|
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
|
||||||
if (likely(!crosspage)) {
|
if (likely(!crosspage)) {
|
||||||
/* Swap to host endian if necessary, then store. */
|
/* Swap to host endian if necessary, then store. */
|
||||||
|
|
|
@ -78,4 +78,38 @@ extern int64_t max_advance;
|
||||||
|
|
||||||
extern bool one_insn_per_tb;
|
extern bool one_insn_per_tb;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* tcg_req_mo:
|
||||||
|
* @type: TCGBar
|
||||||
|
*
|
||||||
|
* Filter @type to the barrier that is required for the guest
|
||||||
|
* memory ordering vs the host memory ordering. A non-zero
|
||||||
|
* result indicates that some barrier is required.
|
||||||
|
*
|
||||||
|
* If TCG_GUEST_DEFAULT_MO is not defined, assume that the
|
||||||
|
* guest requires strict ordering.
|
||||||
|
*
|
||||||
|
* This is a macro so that it's constant even without optimization.
|
||||||
|
*/
|
||||||
|
#ifdef TCG_GUEST_DEFAULT_MO
|
||||||
|
# define tcg_req_mo(type) \
|
||||||
|
((type) & TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO)
|
||||||
|
#else
|
||||||
|
# define tcg_req_mo(type) ((type) & ~TCG_TARGET_DEFAULT_MO)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* cpu_req_mo:
|
||||||
|
* @type: TCGBar
|
||||||
|
*
|
||||||
|
* If tcg_req_mo indicates a barrier for @type is required
|
||||||
|
* for the guest memory model, issue a host memory barrier.
|
||||||
|
*/
|
||||||
|
#define cpu_req_mo(type) \
|
||||||
|
do { \
|
||||||
|
if (tcg_req_mo(type)) { \
|
||||||
|
smp_mb(); \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
#endif /* ACCEL_TCG_INTERNAL_H */
|
#endif /* ACCEL_TCG_INTERNAL_H */
|
||||||
|
|
|
@ -914,6 +914,7 @@ static uint8_t do_ld1_mmu(CPUArchState *env, abi_ptr addr,
|
||||||
uint8_t ret;
|
uint8_t ret;
|
||||||
|
|
||||||
tcg_debug_assert((mop & MO_SIZE) == MO_8);
|
tcg_debug_assert((mop & MO_SIZE) == MO_8);
|
||||||
|
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
||||||
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
|
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
|
||||||
ret = ldub_p(haddr);
|
ret = ldub_p(haddr);
|
||||||
clear_helper_retaddr();
|
clear_helper_retaddr();
|
||||||
|
@ -947,6 +948,7 @@ static uint16_t do_ld2_mmu(CPUArchState *env, abi_ptr addr,
|
||||||
uint16_t ret;
|
uint16_t ret;
|
||||||
|
|
||||||
tcg_debug_assert((mop & MO_SIZE) == MO_16);
|
tcg_debug_assert((mop & MO_SIZE) == MO_16);
|
||||||
|
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
||||||
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
|
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
|
||||||
ret = load_atom_2(env, ra, haddr, mop);
|
ret = load_atom_2(env, ra, haddr, mop);
|
||||||
clear_helper_retaddr();
|
clear_helper_retaddr();
|
||||||
|
@ -984,6 +986,7 @@ static uint32_t do_ld4_mmu(CPUArchState *env, abi_ptr addr,
|
||||||
uint32_t ret;
|
uint32_t ret;
|
||||||
|
|
||||||
tcg_debug_assert((mop & MO_SIZE) == MO_32);
|
tcg_debug_assert((mop & MO_SIZE) == MO_32);
|
||||||
|
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
||||||
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
|
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
|
||||||
ret = load_atom_4(env, ra, haddr, mop);
|
ret = load_atom_4(env, ra, haddr, mop);
|
||||||
clear_helper_retaddr();
|
clear_helper_retaddr();
|
||||||
|
@ -1021,6 +1024,7 @@ static uint64_t do_ld8_mmu(CPUArchState *env, abi_ptr addr,
|
||||||
uint64_t ret;
|
uint64_t ret;
|
||||||
|
|
||||||
tcg_debug_assert((mop & MO_SIZE) == MO_64);
|
tcg_debug_assert((mop & MO_SIZE) == MO_64);
|
||||||
|
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
||||||
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
|
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
|
||||||
ret = load_atom_8(env, ra, haddr, mop);
|
ret = load_atom_8(env, ra, haddr, mop);
|
||||||
clear_helper_retaddr();
|
clear_helper_retaddr();
|
||||||
|
@ -1052,6 +1056,7 @@ static Int128 do_ld16_mmu(CPUArchState *env, abi_ptr addr,
|
||||||
Int128 ret;
|
Int128 ret;
|
||||||
|
|
||||||
tcg_debug_assert((mop & MO_SIZE) == MO_128);
|
tcg_debug_assert((mop & MO_SIZE) == MO_128);
|
||||||
|
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
||||||
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
|
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
|
||||||
ret = load_atom_16(env, ra, haddr, mop);
|
ret = load_atom_16(env, ra, haddr, mop);
|
||||||
clear_helper_retaddr();
|
clear_helper_retaddr();
|
||||||
|
@ -1087,6 +1092,7 @@ static void do_st1_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
|
||||||
void *haddr;
|
void *haddr;
|
||||||
|
|
||||||
tcg_debug_assert((mop & MO_SIZE) == MO_8);
|
tcg_debug_assert((mop & MO_SIZE) == MO_8);
|
||||||
|
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
|
||||||
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
|
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
|
||||||
stb_p(haddr, val);
|
stb_p(haddr, val);
|
||||||
clear_helper_retaddr();
|
clear_helper_retaddr();
|
||||||
|
@ -1111,6 +1117,7 @@ static void do_st2_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
|
||||||
void *haddr;
|
void *haddr;
|
||||||
|
|
||||||
tcg_debug_assert((mop & MO_SIZE) == MO_16);
|
tcg_debug_assert((mop & MO_SIZE) == MO_16);
|
||||||
|
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
|
||||||
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
|
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
|
||||||
|
|
||||||
if (mop & MO_BSWAP) {
|
if (mop & MO_BSWAP) {
|
||||||
|
@ -1139,6 +1146,7 @@ static void do_st4_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
|
||||||
void *haddr;
|
void *haddr;
|
||||||
|
|
||||||
tcg_debug_assert((mop & MO_SIZE) == MO_32);
|
tcg_debug_assert((mop & MO_SIZE) == MO_32);
|
||||||
|
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
|
||||||
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
|
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
|
||||||
|
|
||||||
if (mop & MO_BSWAP) {
|
if (mop & MO_BSWAP) {
|
||||||
|
@ -1167,6 +1175,7 @@ static void do_st8_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
|
||||||
void *haddr;
|
void *haddr;
|
||||||
|
|
||||||
tcg_debug_assert((mop & MO_SIZE) == MO_64);
|
tcg_debug_assert((mop & MO_SIZE) == MO_64);
|
||||||
|
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
|
||||||
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
|
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
|
||||||
|
|
||||||
if (mop & MO_BSWAP) {
|
if (mop & MO_BSWAP) {
|
||||||
|
@ -1195,6 +1204,7 @@ static void do_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
|
||||||
void *haddr;
|
void *haddr;
|
||||||
|
|
||||||
tcg_debug_assert((mop & MO_SIZE) == MO_128);
|
tcg_debug_assert((mop & MO_SIZE) == MO_128);
|
||||||
|
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
|
||||||
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
|
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
|
||||||
|
|
||||||
if (mop & MO_BSWAP) {
|
if (mop & MO_BSWAP) {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue