mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-02 07:13:54 -06:00
cpu: Replace ENV_GET_CPU with env_cpu
Now that we have both ArchCPU and CPUArchState, we can define this generically instead of via macro in each target's cpu.h. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Acked-by: Alistair Francis <alistair.francis@wdc.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
2161a612b4
commit
29a0af618d
59 changed files with 175 additions and 197 deletions
|
@ -62,21 +62,21 @@
|
|||
#define ATOMIC_TRACE_RMW do { \
|
||||
uint8_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, false); \
|
||||
\
|
||||
trace_guest_mem_before_exec(ENV_GET_CPU(env), addr, info); \
|
||||
trace_guest_mem_before_exec(ENV_GET_CPU(env), addr, \
|
||||
trace_guest_mem_before_exec(env_cpu(env), addr, info); \
|
||||
trace_guest_mem_before_exec(env_cpu(env), addr, \
|
||||
info | TRACE_MEM_ST); \
|
||||
} while (0)
|
||||
|
||||
#define ATOMIC_TRACE_LD do { \
|
||||
uint8_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, false); \
|
||||
\
|
||||
trace_guest_mem_before_exec(ENV_GET_CPU(env), addr, info); \
|
||||
trace_guest_mem_before_exec(env_cpu(env), addr, info); \
|
||||
} while (0)
|
||||
|
||||
# define ATOMIC_TRACE_ST do { \
|
||||
uint8_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, true); \
|
||||
\
|
||||
trace_guest_mem_before_exec(ENV_GET_CPU(env), addr, info); \
|
||||
trace_guest_mem_before_exec(env_cpu(env), addr, info); \
|
||||
} while (0)
|
||||
|
||||
/* Define host-endian atomic operations. Note that END is used within
|
||||
|
|
|
@ -415,7 +415,7 @@ static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
|
|||
CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
|
||||
int k;
|
||||
|
||||
assert_cpu_is_self(ENV_GET_CPU(env));
|
||||
assert_cpu_is_self(env_cpu(env));
|
||||
for (k = 0; k < CPU_VTLB_SIZE; k++) {
|
||||
if (tlb_flush_entry_locked(&d->vtable[k], page)) {
|
||||
tlb_n_used_entries_dec(env, mmu_idx);
|
||||
|
@ -883,7 +883,7 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
|||
int mmu_idx, target_ulong addr, uintptr_t retaddr,
|
||||
MMUAccessType access_type, int size)
|
||||
{
|
||||
CPUState *cpu = ENV_GET_CPU(env);
|
||||
CPUState *cpu = env_cpu(env);
|
||||
hwaddr mr_offset;
|
||||
MemoryRegionSection *section;
|
||||
MemoryRegion *mr;
|
||||
|
@ -927,7 +927,7 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
|||
int mmu_idx, uint64_t val, target_ulong addr,
|
||||
uintptr_t retaddr, int size)
|
||||
{
|
||||
CPUState *cpu = ENV_GET_CPU(env);
|
||||
CPUState *cpu = env_cpu(env);
|
||||
hwaddr mr_offset;
|
||||
MemoryRegionSection *section;
|
||||
MemoryRegion *mr;
|
||||
|
@ -979,7 +979,7 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
|
|||
{
|
||||
size_t vidx;
|
||||
|
||||
assert_cpu_is_self(ENV_GET_CPU(env));
|
||||
assert_cpu_is_self(env_cpu(env));
|
||||
for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
|
||||
CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
|
||||
target_ulong cmp;
|
||||
|
@ -1029,7 +1029,7 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
|
|||
|
||||
if (unlikely(!tlb_hit(entry->addr_code, addr))) {
|
||||
if (!VICTIM_TLB_HIT(addr_code, addr)) {
|
||||
tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
|
||||
tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
|
||||
index = tlb_index(env, mmu_idx, addr);
|
||||
entry = tlb_entry(env, mmu_idx, addr);
|
||||
}
|
||||
|
@ -1067,7 +1067,7 @@ void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
|
|||
if (!tlb_hit(tlb_addr_write(entry), addr)) {
|
||||
/* TLB entry is for a different page */
|
||||
if (!VICTIM_TLB_HIT(addr_write, addr)) {
|
||||
tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
|
||||
tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
|
||||
mmu_idx, retaddr);
|
||||
}
|
||||
}
|
||||
|
@ -1101,7 +1101,7 @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
|
|||
uintptr_t index = tlb_index(env, mmu_idx, addr);
|
||||
|
||||
if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page)) {
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
CPUState *cs = env_cpu(env);
|
||||
CPUClass *cc = CPU_GET_CLASS(cs);
|
||||
|
||||
if (!cc->tlb_fill(cs, addr, 0, access_type, mmu_idx, true, 0)) {
|
||||
|
@ -1144,7 +1144,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
|||
/* Enforce guest required alignment. */
|
||||
if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
|
||||
/* ??? Maybe indicate atomic op to cpu_unaligned_access */
|
||||
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
||||
cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
|
||||
mmu_idx, retaddr);
|
||||
}
|
||||
|
||||
|
@ -1160,7 +1160,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
|||
/* Check TLB entry and enforce page permissions. */
|
||||
if (!tlb_hit(tlb_addr, addr)) {
|
||||
if (!VICTIM_TLB_HIT(addr_write, addr)) {
|
||||
tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE,
|
||||
tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE,
|
||||
mmu_idx, retaddr);
|
||||
index = tlb_index(env, mmu_idx, addr);
|
||||
tlbe = tlb_entry(env, mmu_idx, addr);
|
||||
|
@ -1177,7 +1177,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
|||
|
||||
/* Let the guest notice RMW on a write-only page. */
|
||||
if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
|
||||
tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_LOAD,
|
||||
tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD,
|
||||
mmu_idx, retaddr);
|
||||
/* Since we don't support reads and writes to different addresses,
|
||||
and we do have the proper page loaded for write, this shouldn't
|
||||
|
@ -1190,7 +1190,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
|||
ndi->active = false;
|
||||
if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
|
||||
ndi->active = true;
|
||||
memory_notdirty_write_prepare(ndi, ENV_GET_CPU(env), addr,
|
||||
memory_notdirty_write_prepare(ndi, env_cpu(env), addr,
|
||||
qemu_ram_addr_from_host_nofail(hostaddr),
|
||||
1 << s_bits);
|
||||
}
|
||||
|
@ -1198,7 +1198,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
|||
return hostaddr;
|
||||
|
||||
stop_the_world:
|
||||
cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
|
||||
cpu_loop_exit_atomic(env_cpu(env), retaddr);
|
||||
}
|
||||
|
||||
#ifdef TARGET_WORDS_BIGENDIAN
|
||||
|
@ -1263,7 +1263,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
|
|||
|
||||
/* Handle CPU specific unaligned behaviour */
|
||||
if (addr & ((1 << a_bits) - 1)) {
|
||||
cpu_unaligned_access(ENV_GET_CPU(env), addr, access_type,
|
||||
cpu_unaligned_access(env_cpu(env), addr, access_type,
|
||||
mmu_idx, retaddr);
|
||||
}
|
||||
|
||||
|
@ -1271,7 +1271,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
|
|||
if (!tlb_hit(tlb_addr, addr)) {
|
||||
if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
|
||||
addr & TARGET_PAGE_MASK)) {
|
||||
tlb_fill(ENV_GET_CPU(env), addr, size,
|
||||
tlb_fill(env_cpu(env), addr, size,
|
||||
access_type, mmu_idx, retaddr);
|
||||
index = tlb_index(env, mmu_idx, addr);
|
||||
entry = tlb_entry(env, mmu_idx, addr);
|
||||
|
@ -1292,7 +1292,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
|
|||
* repeat the MMU check here. This tlb_fill() call might
|
||||
* longjump out if this access should cause a guest exception.
|
||||
*/
|
||||
tlb_fill(ENV_GET_CPU(env), addr, size,
|
||||
tlb_fill(env_cpu(env), addr, size,
|
||||
access_type, mmu_idx, retaddr);
|
||||
index = tlb_index(env, mmu_idx, addr);
|
||||
entry = tlb_entry(env, mmu_idx, addr);
|
||||
|
@ -1511,7 +1511,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
|||
|
||||
/* Handle CPU specific unaligned behaviour */
|
||||
if (addr & ((1 << a_bits) - 1)) {
|
||||
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
||||
cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
|
||||
mmu_idx, retaddr);
|
||||
}
|
||||
|
||||
|
@ -1519,7 +1519,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
|||
if (!tlb_hit(tlb_addr, addr)) {
|
||||
if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
|
||||
addr & TARGET_PAGE_MASK)) {
|
||||
tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
|
||||
tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
|
||||
mmu_idx, retaddr);
|
||||
index = tlb_index(env, mmu_idx, addr);
|
||||
entry = tlb_entry(env, mmu_idx, addr);
|
||||
|
@ -1540,7 +1540,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
|||
* repeat the MMU check here. This tlb_fill() call might
|
||||
* longjump out if this access should cause a guest exception.
|
||||
*/
|
||||
tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
|
||||
tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
|
||||
mmu_idx, retaddr);
|
||||
index = tlb_index(env, mmu_idx, addr);
|
||||
entry = tlb_entry(env, mmu_idx, addr);
|
||||
|
@ -1580,7 +1580,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
|||
if (!tlb_hit_page(tlb_addr2, page2)
|
||||
&& !victim_tlb_hit(env, mmu_idx, index2, tlb_off,
|
||||
page2 & TARGET_PAGE_MASK)) {
|
||||
tlb_fill(ENV_GET_CPU(env), page2, size, MMU_DATA_STORE,
|
||||
tlb_fill(env_cpu(env), page2, size, MMU_DATA_STORE,
|
||||
mmu_idx, retaddr);
|
||||
}
|
||||
|
||||
|
|
|
@ -146,7 +146,7 @@ uint64_t HELPER(ctpop_i64)(uint64_t arg)
|
|||
|
||||
void *HELPER(lookup_tb_ptr)(CPUArchState *env)
|
||||
{
|
||||
CPUState *cpu = ENV_GET_CPU(env);
|
||||
CPUState *cpu = env_cpu(env);
|
||||
TranslationBlock *tb;
|
||||
target_ulong cs_base, pc;
|
||||
uint32_t flags;
|
||||
|
@ -165,5 +165,5 @@ void *HELPER(lookup_tb_ptr)(CPUArchState *env)
|
|||
|
||||
void HELPER(exit_atomic)(CPUArchState *env)
|
||||
{
|
||||
cpu_loop_exit_atomic(ENV_GET_CPU(env), GETPC());
|
||||
cpu_loop_exit_atomic(env_cpu(env), GETPC());
|
||||
}
|
||||
|
|
|
@ -1732,7 +1732,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
|||
|
||||
tcg_func_start(tcg_ctx);
|
||||
|
||||
tcg_ctx->cpu = ENV_GET_CPU(env);
|
||||
tcg_ctx->cpu = env_cpu(env);
|
||||
gen_intermediate_code(cpu, tb, max_insns);
|
||||
tcg_ctx->cpu = NULL;
|
||||
|
||||
|
|
|
@ -680,7 +680,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
|||
{
|
||||
/* Enforce qemu required alignment. */
|
||||
if (unlikely(addr & (size - 1))) {
|
||||
cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
|
||||
cpu_loop_exit_atomic(env_cpu(env), retaddr);
|
||||
}
|
||||
helper_retaddr = retaddr;
|
||||
return g2h(addr);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue