mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-03 15:53:54 -06:00
cputlb: Fold TLB_RECHECK into TLB_INVALID_MASK
We had two different mechanisms to force a recheck of the tlb. Before TLB_RECHECK was introduced, we had a PAGE_WRITE_INV bit that would immediate set TLB_INVALID_MASK, which automatically means that a second check of the tlb entry fails. We can use the same mechanism to handle small pages. Conserve TLB_* bits by removing TLB_RECHECK. Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
0026348b48
commit
30d7e098d5
2 changed files with 24 additions and 67 deletions
|
@ -329,14 +329,11 @@ CPUArchState *cpu_copy(CPUArchState *env);
|
|||
#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS - 2))
|
||||
/* Set if TLB entry is an IO callback. */
|
||||
#define TLB_MMIO (1 << (TARGET_PAGE_BITS - 3))
|
||||
/* Set if TLB entry must have MMU lookup repeated for every access */
|
||||
#define TLB_RECHECK (1 << (TARGET_PAGE_BITS - 4))
|
||||
|
||||
/* Use this mask to check interception with an alignment mask
|
||||
* in a TCG backend.
|
||||
*/
|
||||
#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
|
||||
| TLB_RECHECK)
|
||||
#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO)
|
||||
|
||||
/**
|
||||
* tlb_hit_page: return true if page aligned @addr is a hit against the
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue