include/exec: Redefine tlb-flags with absolute values

Don't base the values on TARGET_PAGE_BITS_MIN, but do verify
that TLB_FLAGS_MASK does not overlap minimum page size.
All targets now have the same placement for these flags,
simplifying mmu management when we enable heterogeneous systems.

Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2025-03-27 15:05:12 -05:00
parent 33646c72c7
commit 6effa87475
2 changed files with 34 additions and 36 deletions

View file

@ -49,6 +49,8 @@
#endif
#include "tcg/tcg-ldst.h"
QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & ((1u < TARGET_PAGE_BITS_MIN) - 1));
/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
/* #define DEBUG_TLB */
/* #define DEBUG_TLB_LOG */

View file

@ -19,54 +19,29 @@
#ifndef TLB_FLAGS_H
#define TLB_FLAGS_H
#include "exec/cpu-defs.h"
/*
* Flags returned for lookup of a TLB virtual address.
*/
#ifdef CONFIG_USER_ONLY
/*
* Allow some level of source compatibility with softmmu. We do not
* support any of the more exotic features, so only invalid pages may
* be signaled by probe_access_flags().
* Allow some level of source compatibility with softmmu.
* Invalid is set when the page does not have requested permissions.
* MMIO is set when we want the target helper to use the functional
* interface for load/store so that plugins see the access.
*/
#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 2))
#define TLB_WATCHPOINT 0
#define TLB_INVALID_MASK (1 << 0)
#define TLB_MMIO (1 << 1)
#define TLB_WATCHPOINT 0
#else
/*
* Flags stored in the low bits of the TLB virtual address.
* These are defined so that fast path ram access is all zeros.
* The flags all must be between TARGET_PAGE_BITS and
* maximum address alignment bit.
*
* Use TARGET_PAGE_BITS_MIN so that these bits are constant
* when TARGET_PAGE_BITS_VARY is in effect.
*
* The count, if not the placement of these bits is known
* to tcg/tcg-op-ldst.c, check_max_alignment().
*/
/* Zero if TLB entry is valid. */
#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
/*
* Set if TLB entry references a clean RAM page. The iotlb entry will
* contain the page physical address.
*/
#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2))
/* Set if the slow path must be used; more flags in CPUTLBEntryFull. */
#define TLB_FORCE_SLOW (1 << (TARGET_PAGE_BITS_MIN - 3))
/*
* Use this mask to check interception with an alignment mask
* in a TCG backend.
*/
#define TLB_FLAGS_MASK \
(TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_FORCE_SLOW)
/*
* Flags stored in CPUTLBEntryFull.slow_flags[x].
* TLB_FORCE_SLOW must be set in CPUTLBEntry.addr_idx[x].
*/
/* Set if TLB entry requires byte swap. */
#define TLB_BSWAP (1 << 0)
/* Set if TLB entry contains a watchpoint. */
@ -82,6 +57,27 @@
(TLB_BSWAP | TLB_WATCHPOINT | TLB_CHECK_ALIGNED | \
TLB_DISCARD_WRITE | TLB_MMIO)
/*
* Flags stored in CPUTLBEntry.addr_idx[x].
* These must be above the largest alignment (64 bytes),
* and below the smallest page size (1024 bytes).
* This leaves bits [9:6] available for use.
*/
/* Zero if TLB entry is valid. */
#define TLB_INVALID_MASK (1 << 6)
/* Set if TLB entry references a clean RAM page. */
#define TLB_NOTDIRTY (1 << 7)
/* Set if the slow path must be used; more flags in CPUTLBEntryFull. */
#define TLB_FORCE_SLOW (1 << 8)
/*
* Use this mask to check interception with an alignment mask
* in a TCG backend.
*/
#define TLB_FLAGS_MASK \
(TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_FORCE_SLOW)
/* The two sets of flags must not overlap. */
QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK);