mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-04 08:13:54 -06:00
exec: Declare tlb_flush*() in 'exec/cputlb.h'
Move CPU TLB related methods to "exec/cputlb.h". Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> Message-ID: <20241114011310.3615-19-philmd@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
bcde46f57d
commit
6ff5da1600
34 changed files with 224 additions and 211 deletions
|
@ -27,190 +27,6 @@
|
|||
#include "exec/mmu-access-type.h"
|
||||
#include "exec/translation-block.h"
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
|
||||
/* cputlb.c */
|
||||
/**
|
||||
* tlb_flush_page:
|
||||
* @cpu: CPU whose TLB should be flushed
|
||||
* @addr: virtual address of page to be flushed
|
||||
*
|
||||
* Flush one page from the TLB of the specified CPU, for all
|
||||
* MMU indexes.
|
||||
*/
|
||||
void tlb_flush_page(CPUState *cpu, vaddr addr);
|
||||
/**
|
||||
* tlb_flush_page_all_cpus_synced:
|
||||
* @cpu: src CPU of the flush
|
||||
* @addr: virtual address of page to be flushed
|
||||
*
|
||||
* Flush one page from the TLB of all CPUs, for all
|
||||
* MMU indexes.
|
||||
*
|
||||
* When this function returns, no CPUs will subsequently perform
|
||||
* translations using the flushed TLBs.
|
||||
*/
|
||||
void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr);
|
||||
/**
|
||||
* tlb_flush:
|
||||
* @cpu: CPU whose TLB should be flushed
|
||||
*
|
||||
* Flush the entire TLB for the specified CPU. Most CPU architectures
|
||||
* allow the implementation to drop entries from the TLB at any time
|
||||
* so this is generally safe. If more selective flushing is required
|
||||
* use one of the other functions for efficiency.
|
||||
*/
|
||||
void tlb_flush(CPUState *cpu);
|
||||
/**
|
||||
* tlb_flush_all_cpus_synced:
|
||||
* @cpu: src CPU of the flush
|
||||
*
|
||||
* Flush the entire TLB for all CPUs, for all MMU indexes.
|
||||
*
|
||||
* When this function returns, no CPUs will subsequently perform
|
||||
* translations using the flushed TLBs.
|
||||
*/
|
||||
void tlb_flush_all_cpus_synced(CPUState *src_cpu);
|
||||
/**
|
||||
* tlb_flush_page_by_mmuidx:
|
||||
* @cpu: CPU whose TLB should be flushed
|
||||
* @addr: virtual address of page to be flushed
|
||||
* @idxmap: bitmap of MMU indexes to flush
|
||||
*
|
||||
* Flush one page from the TLB of the specified CPU, for the specified
|
||||
* MMU indexes.
|
||||
*/
|
||||
void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr,
|
||||
uint16_t idxmap);
|
||||
/**
|
||||
* tlb_flush_page_by_mmuidx_all_cpus_synced:
|
||||
* @cpu: Originating CPU of the flush
|
||||
* @addr: virtual address of page to be flushed
|
||||
* @idxmap: bitmap of MMU indexes to flush
|
||||
*
|
||||
* Flush one page from the TLB of all CPUs, for the specified
|
||||
* MMU indexes.
|
||||
*
|
||||
* When this function returns, no CPUs will subsequently perform
|
||||
* translations using the flushed TLBs.
|
||||
*/
|
||||
void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
|
||||
uint16_t idxmap);
|
||||
/**
|
||||
* tlb_flush_by_mmuidx:
|
||||
* @cpu: CPU whose TLB should be flushed
|
||||
* @wait: If true ensure synchronisation by exiting the cpu_loop
|
||||
* @idxmap: bitmap of MMU indexes to flush
|
||||
*
|
||||
* Flush all entries from the TLB of the specified CPU, for the specified
|
||||
* MMU indexes.
|
||||
*/
|
||||
void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
|
||||
/**
|
||||
* tlb_flush_by_mmuidx_all_cpus_synced:
|
||||
* @cpu: Originating CPU of the flush
|
||||
* @idxmap: bitmap of MMU indexes to flush
|
||||
*
|
||||
* Flush all entries from the TLB of all CPUs, for the specified
|
||||
* MMU indexes.
|
||||
*
|
||||
* When this function returns, no CPUs will subsequently perform
|
||||
* translations using the flushed TLBs.
|
||||
*/
|
||||
void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
|
||||
|
||||
/**
|
||||
* tlb_flush_page_bits_by_mmuidx
|
||||
* @cpu: CPU whose TLB should be flushed
|
||||
* @addr: virtual address of page to be flushed
|
||||
* @idxmap: bitmap of mmu indexes to flush
|
||||
* @bits: number of significant bits in address
|
||||
*
|
||||
* Similar to tlb_flush_page_mask, but with a bitmap of indexes.
|
||||
*/
|
||||
void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
|
||||
uint16_t idxmap, unsigned bits);
|
||||
|
||||
/* Similarly, with broadcast and syncing. */
|
||||
void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
|
||||
(CPUState *cpu, vaddr addr, uint16_t idxmap, unsigned bits);
|
||||
|
||||
/**
|
||||
* tlb_flush_range_by_mmuidx
|
||||
* @cpu: CPU whose TLB should be flushed
|
||||
* @addr: virtual address of the start of the range to be flushed
|
||||
* @len: length of range to be flushed
|
||||
* @idxmap: bitmap of mmu indexes to flush
|
||||
* @bits: number of significant bits in address
|
||||
*
|
||||
* For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len),
|
||||
* comparing only the low @bits worth of each virtual page.
|
||||
*/
|
||||
void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
|
||||
vaddr len, uint16_t idxmap,
|
||||
unsigned bits);
|
||||
|
||||
/* Similarly, with broadcast and syncing. */
|
||||
void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
|
||||
vaddr addr,
|
||||
vaddr len,
|
||||
uint16_t idxmap,
|
||||
unsigned bits);
|
||||
|
||||
#else
|
||||
static inline void tlb_flush_page(CPUState *cpu, vaddr addr)
|
||||
{
|
||||
}
|
||||
static inline void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
|
||||
{
|
||||
}
|
||||
static inline void tlb_flush(CPUState *cpu)
|
||||
{
|
||||
}
|
||||
static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
|
||||
{
|
||||
}
|
||||
static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
|
||||
vaddr addr, uint16_t idxmap)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
|
||||
{
|
||||
}
|
||||
static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
|
||||
vaddr addr,
|
||||
uint16_t idxmap)
|
||||
{
|
||||
}
|
||||
static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
|
||||
uint16_t idxmap)
|
||||
{
|
||||
}
|
||||
static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
|
||||
vaddr addr,
|
||||
uint16_t idxmap,
|
||||
unsigned bits)
|
||||
{
|
||||
}
|
||||
static inline void
|
||||
tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
|
||||
uint16_t idxmap, unsigned bits)
|
||||
{
|
||||
}
|
||||
static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
|
||||
vaddr len, uint16_t idxmap,
|
||||
unsigned bits)
|
||||
{
|
||||
}
|
||||
static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
|
||||
vaddr addr,
|
||||
vaddr len,
|
||||
uint16_t idxmap,
|
||||
unsigned bits)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_TCG)
|
||||
|
||||
/**
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue