tcg/cputlb: Remove non-synced variants of global TLB flushes

These are no longer used.

  tlb_flush_all_cpus: removed by previous commit.
  tlb_flush_page_all_cpus: removed by previous commit.

  tlb_flush_page_bits_by_mmuidx_all_cpus: never used.
  tlb_flush_page_by_mmuidx_all_cpus: never used.
  tlb_flush_page_bits_by_mmuidx_all_cpus: never used, thus:
    tlb_flush_range_by_mmuidx_all_cpus: never used.
    tlb_flush_by_mmuidx_all_cpus: never used.

Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
This commit is contained in:
Nicholas Piggin 2024-03-27 00:04:20 +10:00
parent 82676f1fc4
commit 99cd12ced1
3 changed files with 21 additions and 196 deletions

View file

@ -67,25 +67,16 @@ void tlb_destroy(CPUState *cpu);
* MMU indexes.
*/
void tlb_flush_page(CPUState *cpu, vaddr addr);
/**
* tlb_flush_page_all_cpus:
* @cpu: src CPU of the flush
* @addr: virtual address of page to be flushed
*
* Flush one page from the TLB of the specified CPU, for all
* MMU indexes.
*/
void tlb_flush_page_all_cpus(CPUState *src, vaddr addr);
/**
* tlb_flush_page_all_cpus_synced:
* @cpu: src CPU of the flush
* @addr: virtual address of page to be flushed
*
* Flush one page from the TLB of the specified CPU, for all MMU
* indexes like tlb_flush_page_all_cpus except the source vCPUs work
* is scheduled as safe work meaning all flushes will be complete once
* the source vCPUs safe work is complete. This will depend on when
* the guests translation ends the TB.
* Flush one page from the TLB of all CPUs, for all
* MMU indexes.
*
* When this function returns, no CPUs will subsequently perform
* translations using the flushed TLBs.
*/
void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr);
/**
@ -98,19 +89,14 @@ void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr);
* use one of the other functions for efficiency.
*/
void tlb_flush(CPUState *cpu);
/**
* tlb_flush_all_cpus:
* @cpu: src CPU of the flush
*/
void tlb_flush_all_cpus(CPUState *src_cpu);
/**
* tlb_flush_all_cpus_synced:
* @cpu: src CPU of the flush
*
* Like tlb_flush_all_cpus except this except the source vCPUs work is
* scheduled as safe work meaning all flushes will be complete once
* the source vCPUs safe work is complete. This will depend on when
* the guests translation ends the TB.
* Flush the entire TLB for all CPUs, for all MMU indexes.
*
* When this function returns, no CPUs will subsequently perform
* translations using the flushed TLBs.
*/
void tlb_flush_all_cpus_synced(CPUState *src_cpu);
/**
@ -125,27 +111,16 @@ void tlb_flush_all_cpus_synced(CPUState *src_cpu);
void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr,
uint16_t idxmap);
/**
* tlb_flush_page_by_mmuidx_all_cpus:
* tlb_flush_page_by_mmuidx_all_cpus_synced:
* @cpu: Originating CPU of the flush
* @addr: virtual address of page to be flushed
* @idxmap: bitmap of MMU indexes to flush
*
* Flush one page from the TLB of all CPUs, for the specified
* MMU indexes.
*/
void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
uint16_t idxmap);
/**
* tlb_flush_page_by_mmuidx_all_cpus_synced:
* @cpu: Originating CPU of the flush
* @addr: virtual address of page to be flushed
* @idxmap: bitmap of MMU indexes to flush
*
* Flush one page from the TLB of all CPUs, for the specified MMU
* indexes like tlb_flush_page_by_mmuidx_all_cpus except the source
* vCPUs work is scheduled as safe work meaning all flushes will be
* complete once the source vCPUs safe work is complete. This will
* depend on when the guests translation ends the TB.
* When this function returns, no CPUs will subsequently perform
* translations using the flushed TLBs.
*/
void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
uint16_t idxmap);
@ -159,25 +134,16 @@ void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
* MMU indexes.
*/
void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
/**
* tlb_flush_by_mmuidx_all_cpus:
* @cpu: Originating CPU of the flush
* @idxmap: bitmap of MMU indexes to flush
*
* Flush all entries from all TLBs of all CPUs, for the specified
* MMU indexes.
*/
void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
/**
* tlb_flush_by_mmuidx_all_cpus_synced:
* @cpu: Originating CPU of the flush
* @idxmap: bitmap of MMU indexes to flush
*
* Flush all entries from all TLBs of all CPUs, for the specified
* MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source
* vCPUs work is scheduled as safe work meaning all flushes will be
* complete once the source vCPUs safe work is complete. This will
* depend on when the guests translation ends the TB.
* Flush all entries from the TLB of all CPUs, for the specified
* MMU indexes.
*
* When this function returns, no CPUs will subsequently perform
* translations using the flushed TLBs.
*/
void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
@ -194,8 +160,6 @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
uint16_t idxmap, unsigned bits);
/* Similarly, with broadcast and syncing. */
void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
uint16_t idxmap, unsigned bits);
void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
(CPUState *cpu, vaddr addr, uint16_t idxmap, unsigned bits);
@ -215,9 +179,6 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
unsigned bits);
/* Similarly, with broadcast and syncing. */
void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
vaddr len, uint16_t idxmap,
unsigned bits);
void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
vaddr addr,
vaddr len,
@ -290,18 +251,12 @@ static inline void tlb_destroy(CPUState *cpu)
static inline void tlb_flush_page(CPUState *cpu, vaddr addr)
{
}
static inline void tlb_flush_page_all_cpus(CPUState *src, vaddr addr)
{
}
static inline void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
{
}
static inline void tlb_flush(CPUState *cpu)
{
}
static inline void tlb_flush_all_cpus(CPUState *src_cpu)
{
}
static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
{
}
@ -313,20 +268,11 @@ static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
{
}
static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
vaddr addr,
uint16_t idxmap)
{
}
static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
vaddr addr,
uint16_t idxmap)
{
}
static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap)
{
}
static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
uint16_t idxmap)
{
@ -337,12 +283,6 @@ static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
unsigned bits)
{
}
static inline void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu,
vaddr addr,
uint16_t idxmap,
unsigned bits)
{
}
static inline void
tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
uint16_t idxmap, unsigned bits)
@ -353,13 +293,6 @@ static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
unsigned bits)
{
}
static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu,
vaddr addr,
vaddr len,
uint16_t idxmap,
unsigned bits)
{
}
static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
vaddr addr,
vaddr len,