tcg: introduce dynamic TLB sizing

Disabled in all TCG backends for now.

Tested-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Message-Id: <20190116170114.26802-3-cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Emilio G. Cota 2019-01-16 12:01:13 -05:00 committed by Richard Henderson
parent 3cea94bbc9
commit 86e1eff8bc
12 changed files with 282 additions and 7 deletions

View file

@ -67,6 +67,28 @@ typedef uint64_t target_ulong;
#define CPU_TLB_ENTRY_BITS 5
#endif
#if TCG_TARGET_IMPLEMENTS_DYN_TLB
#define CPU_TLB_DYN_MIN_BITS 6
#define CPU_TLB_DYN_DEFAULT_BITS 8
# if HOST_LONG_BITS == 32
/* Make sure we do not require a double-word shift for the TLB load */
# define CPU_TLB_DYN_MAX_BITS (32 - TARGET_PAGE_BITS)
# else /* HOST_LONG_BITS == 64 */
/*
* Assuming TARGET_PAGE_BITS==12, with 2**22 entries we can cover 2**(22+12) ==
* 2**34 == 16G of address space. This is roughly what one would expect a
* TLB to cover in a modern (as of 2018) x86_64 CPU. For instance, Intel
* Skylake's Level-2 STLB has 16 1G entries.
* Also, make sure we do not size the TLB past the guest's address space.
*/
# define CPU_TLB_DYN_MAX_BITS \
MIN(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS)
# endif
#else /* !TCG_TARGET_IMPLEMENTS_DYN_TLB */
/* TCG_TARGET_TLB_DISPLACEMENT_BITS is used in CPU_TLB_BITS to ensure that
* the TLB is not unnecessarily small, but still small enough for the
* TLB lookup instruction sequence used by the TCG target.
@ -98,6 +120,7 @@ typedef uint64_t target_ulong;
NB_MMU_MODES <= 8 ? 3 : 4))
#define CPU_TLB_SIZE (1 << CPU_TLB_BITS)
#endif /* TCG_TARGET_IMPLEMENTS_DYN_TLB */
typedef struct CPUTLBEntry {
/* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
@ -141,6 +164,18 @@ typedef struct CPUIOTLBEntry {
MemTxAttrs attrs;
} CPUIOTLBEntry;
/**
* struct CPUTLBWindow
* @begin_ns: host time (in ns) at the beginning of the time window
* @max_entries: maximum number of entries observed in the window
*
* See also: tlb_mmu_resize_locked()
*/
typedef struct CPUTLBWindow {
int64_t begin_ns;
size_t max_entries;
} CPUTLBWindow;
typedef struct CPUTLBDesc {
/*
* Describe a region covering all of the large pages allocated
@ -152,6 +187,10 @@ typedef struct CPUTLBDesc {
target_ulong large_page_mask;
/* The next index to use in the tlb victim table. */
size_t vindex;
#if TCG_TARGET_IMPLEMENTS_DYN_TLB
CPUTLBWindow window;
size_t n_used_entries;
#endif
} CPUTLBDesc;
/*
@ -176,6 +215,20 @@ typedef struct CPUTLBCommon {
size_t elide_flush_count;
} CPUTLBCommon;
#if TCG_TARGET_IMPLEMENTS_DYN_TLB
# define CPU_TLB \
/* tlb_mask[i] contains (n_entries - 1) << CPU_TLB_ENTRY_BITS */ \
uintptr_t tlb_mask[NB_MMU_MODES]; \
CPUTLBEntry *tlb_table[NB_MMU_MODES];
# define CPU_IOTLB \
CPUIOTLBEntry *iotlb[NB_MMU_MODES];
#else
# define CPU_TLB \
CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE];
# define CPU_IOTLB \
CPUIOTLBEntry iotlb[NB_MMU_MODES][CPU_TLB_SIZE];
#endif
/*
* The meaning of each of the MMU modes is defined in the target code.
* Note that NB_MMU_MODES is not yet defined; we can only reference it
@ -184,9 +237,9 @@ typedef struct CPUTLBCommon {
#define CPU_COMMON_TLB \
CPUTLBCommon tlb_c; \
CPUTLBDesc tlb_d[NB_MMU_MODES]; \
CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \
CPU_TLB \
CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE]; \
CPUIOTLBEntry iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
CPU_IOTLB \
CPUIOTLBEntry iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE];
#else