accel/tcg: Add TCGCPUOps.tlb_fill_align

Add a new callback to handle softmmu paging.  Return the page
details directly, instead of passing them indirectly to
tlb_set_page.  Handle alignment simultaneously with paging so
that faults are handled with target-specific priority.

Route all calls of the two hooks through a tlb_fill_align
function local to cputlb.c.

As yet no targets implement the new hook.
As yet cputlb.c does not use the new alignment check.

Reviewed-by: Helge Deller <deller@gmx.de>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2024-10-07 16:34:06 -07:00
parent e5b063e81f
commit f168808d7d
4 changed files with 67 additions and 25 deletions

View file

@ -1221,22 +1221,35 @@ void tlb_set_page(CPUState *cpu, vaddr addr,
} }
/* /*
* Note: tlb_fill() can trigger a resize of the TLB. This means that all of the * Note: tlb_fill_align() can trigger a resize of the TLB.
* caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must * This means that all of the caller's prior references to the TLB table
* be discarded and looked up again (e.g. via tlb_entry()). * (e.g. CPUTLBEntry pointers) must be discarded and looked up again
* (e.g. via tlb_entry()).
*/ */
static void tlb_fill(CPUState *cpu, vaddr addr, int size, static bool tlb_fill_align(CPUState *cpu, vaddr addr, MMUAccessType type,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) int mmu_idx, MemOp memop, int size,
bool probe, uintptr_t ra)
{ {
bool ok; const TCGCPUOps *ops = cpu->cc->tcg_ops;
CPUTLBEntryFull full;
/* if (ops->tlb_fill_align) {
* This is not a probe, so only valid return is success; failure if (ops->tlb_fill_align(cpu, &full, addr, type, mmu_idx,
* should result in exception + longjmp to the cpu loop. memop, size, probe, ra)) {
*/ tlb_set_page_full(cpu, mmu_idx, addr, &full);
ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size, return true;
access_type, mmu_idx, false, retaddr); }
assert(ok); } else {
/* Legacy behaviour is alignment before paging. */
if (addr & ((1u << memop_alignment_bits(memop)) - 1)) {
ops->do_unaligned_access(cpu, addr, type, mmu_idx, ra);
}
if (ops->tlb_fill(cpu, addr, size, type, mmu_idx, probe, ra)) {
return true;
}
}
assert(probe);
return false;
} }
static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
@ -1351,22 +1364,22 @@ static int probe_access_internal(CPUState *cpu, vaddr addr,
if (!tlb_hit_page(tlb_addr, page_addr)) { if (!tlb_hit_page(tlb_addr, page_addr)) {
if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, page_addr)) { if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, page_addr)) {
if (!cpu->cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type, if (!tlb_fill_align(cpu, addr, access_type, mmu_idx,
mmu_idx, nonfault, retaddr)) { 0, fault_size, nonfault, retaddr)) {
/* Non-faulting page table read failed. */ /* Non-faulting page table read failed. */
*phost = NULL; *phost = NULL;
*pfull = NULL; *pfull = NULL;
return TLB_INVALID_MASK; return TLB_INVALID_MASK;
} }
/* TLB resize via tlb_fill may have moved the entry. */ /* TLB resize via tlb_fill_align may have moved the entry. */
index = tlb_index(cpu, mmu_idx, addr); index = tlb_index(cpu, mmu_idx, addr);
entry = tlb_entry(cpu, mmu_idx, addr); entry = tlb_entry(cpu, mmu_idx, addr);
/* /*
* With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately, * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
* to force the next access through tlb_fill. We've just * to force the next access through tlb_fill_align. We've just
* called tlb_fill, so we know that this entry *is* valid. * called tlb_fill_align, so we know that this entry *is* valid.
*/ */
flags &= ~TLB_INVALID_MASK; flags &= ~TLB_INVALID_MASK;
} }
@ -1613,7 +1626,7 @@ typedef struct MMULookupLocals {
* *
* Resolve the translation for the one page at @data.addr, filling in * Resolve the translation for the one page at @data.addr, filling in
* the rest of @data with the results. If the translation fails, * the rest of @data with the results. If the translation fails,
* tlb_fill will longjmp out. Return true if the softmmu tlb for * tlb_fill_align will longjmp out. Return true if the softmmu tlb for
* @mmu_idx may have resized. * @mmu_idx may have resized.
*/ */
static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
@ -1631,7 +1644,8 @@ static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
if (!tlb_hit(tlb_addr, addr)) { if (!tlb_hit(tlb_addr, addr)) {
if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, if (!victim_tlb_hit(cpu, mmu_idx, index, access_type,
addr & TARGET_PAGE_MASK)) { addr & TARGET_PAGE_MASK)) {
tlb_fill(cpu, addr, data->size, access_type, mmu_idx, ra); tlb_fill_align(cpu, addr, access_type, mmu_idx,
0, data->size, false, ra);
maybe_resized = true; maybe_resized = true;
index = tlb_index(cpu, mmu_idx, addr); index = tlb_index(cpu, mmu_idx, addr);
entry = tlb_entry(cpu, mmu_idx, addr); entry = tlb_entry(cpu, mmu_idx, addr);
@ -1821,8 +1835,8 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
if (!tlb_hit(tlb_addr, addr)) { if (!tlb_hit(tlb_addr, addr)) {
if (!victim_tlb_hit(cpu, mmu_idx, index, MMU_DATA_STORE, if (!victim_tlb_hit(cpu, mmu_idx, index, MMU_DATA_STORE,
addr & TARGET_PAGE_MASK)) { addr & TARGET_PAGE_MASK)) {
tlb_fill(cpu, addr, size, tlb_fill_align(cpu, addr, MMU_DATA_STORE, mmu_idx,
MMU_DATA_STORE, mmu_idx, retaddr); 0, size, false, retaddr);
index = tlb_index(cpu, mmu_idx, addr); index = tlb_index(cpu, mmu_idx, addr);
tlbe = tlb_entry(cpu, mmu_idx, addr); tlbe = tlb_entry(cpu, mmu_idx, addr);
} }
@ -1836,7 +1850,8 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
* but addr_read will only be -1 if PAGE_READ was unset. * but addr_read will only be -1 if PAGE_READ was unset.
*/ */
if (unlikely(tlbe->addr_read == -1)) { if (unlikely(tlbe->addr_read == -1)) {
tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); tlb_fill_align(cpu, addr, MMU_DATA_LOAD, mmu_idx,
0, size, false, retaddr);
/* /*
* Since we don't support reads and writes to different * Since we don't support reads and writes to different
* addresses, and we do have the proper page loaded for * addresses, and we do have the proper page loaded for

View file

@ -205,7 +205,7 @@ struct CPUClass {
* so the layout is not as critical as that of CPUTLBEntry. This is * so the layout is not as critical as that of CPUTLBEntry. This is
* also why we don't want to combine the two structs. * also why we don't want to combine the two structs.
*/ */
typedef struct CPUTLBEntryFull { struct CPUTLBEntryFull {
/* /*
* @xlat_section contains: * @xlat_section contains:
* - in the lower TARGET_PAGE_BITS, a physical section number * - in the lower TARGET_PAGE_BITS, a physical section number
@ -261,7 +261,7 @@ typedef struct CPUTLBEntryFull {
bool guarded; bool guarded;
} arm; } arm;
} extra; } extra;
} CPUTLBEntryFull; };
/* /*
* Data elements that are per MMU mode, minus the bits accessed by * Data elements that are per MMU mode, minus the bits accessed by

View file

@ -13,6 +13,7 @@
#include "exec/breakpoint.h" #include "exec/breakpoint.h"
#include "exec/hwaddr.h" #include "exec/hwaddr.h"
#include "exec/memattrs.h" #include "exec/memattrs.h"
#include "exec/memop.h"
#include "exec/mmu-access-type.h" #include "exec/mmu-access-type.h"
#include "exec/vaddr.h" #include "exec/vaddr.h"
@ -131,6 +132,31 @@ struct TCGCPUOps {
* same function signature. * same function signature.
*/ */
bool (*cpu_exec_halt)(CPUState *cpu); bool (*cpu_exec_halt)(CPUState *cpu);
/**
* @tlb_fill_align: Handle a softmmu tlb miss
* @cpu: cpu context
* @out: output page properties
* @addr: virtual address
* @access_type: read, write or execute
* @mmu_idx: mmu context
* @memop: memory operation for the access
* @size: memory access size, or 0 for whole page
* @probe: test only, no fault
* @ra: host return address for exception unwind
*
* If the access is valid, fill in @out and return true.
* Otherwise if probe is true, return false.
* Otherwise raise an exception and do not return.
*
* The alignment check for the access is deferred to this hook,
* so that the target can determine the priority of any alignment
* fault with respect to other potential faults from paging.
* Zero may be passed for @memop to skip any alignment check
* for non-memory-access operations such as probing.
*/
bool (*tlb_fill_align)(CPUState *cpu, CPUTLBEntryFull *out, vaddr addr,
MMUAccessType access_type, int mmu_idx,
MemOp memop, int size, bool probe, uintptr_t ra);
/** /**
* @tlb_fill: Handle a softmmu tlb miss * @tlb_fill: Handle a softmmu tlb miss
* *

View file

@ -40,6 +40,7 @@ typedef struct ConfidentialGuestSupport ConfidentialGuestSupport;
typedef struct CPUArchState CPUArchState; typedef struct CPUArchState CPUArchState;
typedef struct CPUPluginState CPUPluginState; typedef struct CPUPluginState CPUPluginState;
typedef struct CPUState CPUState; typedef struct CPUState CPUState;
typedef struct CPUTLBEntryFull CPUTLBEntryFull;
typedef struct DeviceState DeviceState; typedef struct DeviceState DeviceState;
typedef struct DirtyBitmapSnapshot DirtyBitmapSnapshot; typedef struct DirtyBitmapSnapshot DirtyBitmapSnapshot;
typedef struct DisasContextBase DisasContextBase; typedef struct DisasContextBase DisasContextBase;