mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-08 10:13:56 -06:00
cputlb: Move env->vtlb_index to env->tlb_d.vindex
The rest of the tlb victim cache is per-tlb, the next use index should be as well. Tested-by: Emilio G. Cota <cota@braap.org> Reviewed-by: Emilio G. Cota <cota@braap.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
1308e02671
commit
d5363e5849
2 changed files with 5 additions and 5 deletions
|
@ -119,6 +119,7 @@ static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
|
|||
memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
|
||||
env->tlb_d[mmu_idx].large_page_addr = -1;
|
||||
env->tlb_d[mmu_idx].large_page_mask = -1;
|
||||
env->tlb_d[mmu_idx].vindex = 0;
|
||||
}
|
||||
|
||||
/* This is OK because CPU architectures generally permit an
|
||||
|
@ -149,8 +150,6 @@ static void tlb_flush_nocheck(CPUState *cpu)
|
|||
qemu_spin_unlock(&env->tlb_c.lock);
|
||||
|
||||
cpu_tb_jmp_cache_clear(cpu);
|
||||
|
||||
env->vtlb_index = 0;
|
||||
}
|
||||
|
||||
static void tlb_flush_global_async_work(CPUState *cpu, run_on_cpu_data data)
|
||||
|
@ -667,7 +666,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
|
|||
* different page; otherwise just overwrite the stale data.
|
||||
*/
|
||||
if (!tlb_hit_page_anyprot(te, vaddr_page)) {
|
||||
unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
|
||||
unsigned vidx = env->tlb_d[mmu_idx].vindex++ % CPU_VTLB_SIZE;
|
||||
CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx];
|
||||
|
||||
/* Evict the old entry into the victim tlb. */
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue