cputlb: used cached CPUClass in our hot-paths

Before: 35.912 s ±  0.168 s
  After: 35.565 s ±  0.087 s

Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20220811151413.3350684-5-alex.bennee@linaro.org>
Signed-off-by: Cédric Le Goater <clg@kaod.org>
Message-Id: <20220923084803.498337-5-clg@kaod.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Alex Bennée 2022-09-23 10:48:01 +02:00 committed by Richard Henderson
parent b404ca370e
commit 8810ee2ac0

View file

@ -1291,15 +1291,14 @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr,
static void tlb_fill(CPUState *cpu, target_ulong addr, int size, static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{ {
CPUClass *cc = CPU_GET_CLASS(cpu);
bool ok; bool ok;
/* /*
* This is not a probe, so only valid return is success; failure * This is not a probe, so only valid return is success; failure
* should result in exception + longjmp to the cpu loop. * should result in exception + longjmp to the cpu loop.
*/ */
ok = cc->tcg_ops->tlb_fill(cpu, addr, size, ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size,
access_type, mmu_idx, false, retaddr); access_type, mmu_idx, false, retaddr);
assert(ok); assert(ok);
} }
@ -1307,9 +1306,8 @@ static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
MMUAccessType access_type, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr) int mmu_idx, uintptr_t retaddr)
{ {
CPUClass *cc = CPU_GET_CLASS(cpu); cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type,
mmu_idx, retaddr);
cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
} }
static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr, static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
@ -1539,10 +1537,9 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
if (!tlb_hit_page(tlb_addr, page_addr)) { if (!tlb_hit_page(tlb_addr, page_addr)) {
if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) { if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) {
CPUState *cs = env_cpu(env); CPUState *cs = env_cpu(env);
CPUClass *cc = CPU_GET_CLASS(cs);
if (!cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type, if (!cs->cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
mmu_idx, nonfault, retaddr)) { mmu_idx, nonfault, retaddr)) {
/* Non-faulting page table read failed. */ /* Non-faulting page table read failed. */
*phost = NULL; *phost = NULL;
return TLB_INVALID_MASK; return TLB_INVALID_MASK;