tcg/cputlb: Remove non-synced variants of global TLB flushes

These are no longer used.

  tlb_flush_all_cpus: removed by previous commit.
  tlb_flush_page_all_cpus: removed by previous commit.

  tlb_flush_page_bits_by_mmuidx_all_cpus: never used.
  tlb_flush_page_by_mmuidx_all_cpus: never used.
  tlb_flush_page_bits_by_mmuidx_all_cpus: never used, thus:
    tlb_flush_range_by_mmuidx_all_cpus: never used.
    tlb_flush_by_mmuidx_all_cpus: never used.

Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
This commit is contained in:
Nicholas Piggin 2024-03-27 00:04:20 +10:00
parent 82676f1fc4
commit 99cd12ced1
3 changed files with 21 additions and 196 deletions

View file

@ -431,21 +431,6 @@ void tlb_flush(CPUState *cpu)
tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
} }
void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
{
const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
}
void tlb_flush_all_cpus(CPUState *src_cpu)
{
tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
}
void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap) void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
{ {
const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
@ -656,46 +641,6 @@ void tlb_flush_page(CPUState *cpu, vaddr addr)
tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS); tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
} }
void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, vaddr addr,
uint16_t idxmap)
{
tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
/* This should already be page aligned */
addr &= TARGET_PAGE_MASK;
/*
* Allocate memory to hold addr+idxmap only when needed.
* See tlb_flush_page_by_mmuidx for details.
*/
if (idxmap < TARGET_PAGE_SIZE) {
flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
RUN_ON_CPU_TARGET_PTR(addr | idxmap));
} else {
CPUState *dst_cpu;
/* Allocate a separate data block for each destination cpu. */
CPU_FOREACH(dst_cpu) {
if (dst_cpu != src_cpu) {
TLBFlushPageByMMUIdxData *d
= g_new(TLBFlushPageByMMUIdxData, 1);
d->addr = addr;
d->idxmap = idxmap;
async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
RUN_ON_CPU_HOST_PTR(d));
}
}
}
tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
}
void tlb_flush_page_all_cpus(CPUState *src, vaddr addr)
{
tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
}
void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
vaddr addr, vaddr addr,
uint16_t idxmap) uint16_t idxmap)
@ -887,54 +832,6 @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits); tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
} }
void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
vaddr addr, vaddr len,
uint16_t idxmap, unsigned bits)
{
TLBFlushRangeData d;
CPUState *dst_cpu;
/*
* If all bits are significant, and len is small,
* this devolves to tlb_flush_page.
*/
if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap);
return;
}
/* If no page bits are significant, this devolves to tlb_flush. */
if (bits < TARGET_PAGE_BITS) {
tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap);
return;
}
/* This should already be page aligned */
d.addr = addr & TARGET_PAGE_MASK;
d.len = len;
d.idxmap = idxmap;
d.bits = bits;
/* Allocate a separate data block for each destination cpu. */
CPU_FOREACH(dst_cpu) {
if (dst_cpu != src_cpu) {
TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
async_run_on_cpu(dst_cpu,
tlb_flush_range_by_mmuidx_async_1,
RUN_ON_CPU_HOST_PTR(p));
}
}
tlb_flush_range_by_mmuidx_async_0(src_cpu, d);
}
void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
vaddr addr, uint16_t idxmap,
unsigned bits)
{
tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
idxmap, bits);
}
void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu, void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
vaddr addr, vaddr addr,
vaddr len, vaddr len,

View file

@ -205,15 +205,10 @@ DESIGN REQUIREMENTS:
(Current solution) (Current solution)
We have updated cputlb.c to defer operations when a cross-vCPU A new set of tlb flush operations (tlb_flush_*_all_cpus_synced) force
operation with async_run_on_cpu() which ensures each vCPU sees a synchronisation by setting the source vCPUs work as "safe work" and
coherent state when it next runs its work (in a few instructions exiting the cpu run loop. This ensures that by the time execution
time). restarts all flush operations have completed.
A new set up operations (tlb_flush_*_all_cpus) take an additional flag
which when set will force synchronisation by setting the source vCPUs
work as "safe work" and exiting the cpu run loop. This ensure by the
time execution restarts all flush operations have completed.
TLB flag updates are all done atomically and are also protected by the TLB flag updates are all done atomically and are also protected by the
corresponding page lock. corresponding page lock.

View file

@ -67,25 +67,16 @@ void tlb_destroy(CPUState *cpu);
* MMU indexes. * MMU indexes.
*/ */
void tlb_flush_page(CPUState *cpu, vaddr addr); void tlb_flush_page(CPUState *cpu, vaddr addr);
/**
* tlb_flush_page_all_cpus:
* @cpu: src CPU of the flush
* @addr: virtual address of page to be flushed
*
* Flush one page from the TLB of the specified CPU, for all
* MMU indexes.
*/
void tlb_flush_page_all_cpus(CPUState *src, vaddr addr);
/** /**
* tlb_flush_page_all_cpus_synced: * tlb_flush_page_all_cpus_synced:
* @cpu: src CPU of the flush * @cpu: src CPU of the flush
* @addr: virtual address of page to be flushed * @addr: virtual address of page to be flushed
* *
* Flush one page from the TLB of the specified CPU, for all MMU * Flush one page from the TLB of all CPUs, for all
* indexes like tlb_flush_page_all_cpus except the source vCPUs work * MMU indexes.
* is scheduled as safe work meaning all flushes will be complete once *
* the source vCPUs safe work is complete. This will depend on when * When this function returns, no CPUs will subsequently perform
* the guests translation ends the TB. * translations using the flushed TLBs.
*/ */
void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr); void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr);
/** /**
@ -98,19 +89,14 @@ void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr);
* use one of the other functions for efficiency. * use one of the other functions for efficiency.
*/ */
void tlb_flush(CPUState *cpu); void tlb_flush(CPUState *cpu);
/**
* tlb_flush_all_cpus:
* @cpu: src CPU of the flush
*/
void tlb_flush_all_cpus(CPUState *src_cpu);
/** /**
* tlb_flush_all_cpus_synced: * tlb_flush_all_cpus_synced:
* @cpu: src CPU of the flush * @cpu: src CPU of the flush
* *
* Like tlb_flush_all_cpus except this except the source vCPUs work is * Flush the entire TLB for all CPUs, for all MMU indexes.
* scheduled as safe work meaning all flushes will be complete once *
* the source vCPUs safe work is complete. This will depend on when * When this function returns, no CPUs will subsequently perform
* the guests translation ends the TB. * translations using the flushed TLBs.
*/ */
void tlb_flush_all_cpus_synced(CPUState *src_cpu); void tlb_flush_all_cpus_synced(CPUState *src_cpu);
/** /**
@ -125,27 +111,16 @@ void tlb_flush_all_cpus_synced(CPUState *src_cpu);
void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr,
uint16_t idxmap); uint16_t idxmap);
/** /**
* tlb_flush_page_by_mmuidx_all_cpus: * tlb_flush_page_by_mmuidx_all_cpus_synced:
* @cpu: Originating CPU of the flush * @cpu: Originating CPU of the flush
* @addr: virtual address of page to be flushed * @addr: virtual address of page to be flushed
* @idxmap: bitmap of MMU indexes to flush * @idxmap: bitmap of MMU indexes to flush
* *
* Flush one page from the TLB of all CPUs, for the specified * Flush one page from the TLB of all CPUs, for the specified
* MMU indexes. * MMU indexes.
*/
void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
uint16_t idxmap);
/**
* tlb_flush_page_by_mmuidx_all_cpus_synced:
* @cpu: Originating CPU of the flush
* @addr: virtual address of page to be flushed
* @idxmap: bitmap of MMU indexes to flush
* *
* Flush one page from the TLB of all CPUs, for the specified MMU * When this function returns, no CPUs will subsequently perform
* indexes like tlb_flush_page_by_mmuidx_all_cpus except the source * translations using the flushed TLBs.
* vCPUs work is scheduled as safe work meaning all flushes will be
* complete once the source vCPUs safe work is complete. This will
* depend on when the guests translation ends the TB.
*/ */
void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
uint16_t idxmap); uint16_t idxmap);
@ -159,25 +134,16 @@ void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
* MMU indexes. * MMU indexes.
*/ */
void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap); void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
/**
* tlb_flush_by_mmuidx_all_cpus:
* @cpu: Originating CPU of the flush
* @idxmap: bitmap of MMU indexes to flush
*
* Flush all entries from all TLBs of all CPUs, for the specified
* MMU indexes.
*/
void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
/** /**
* tlb_flush_by_mmuidx_all_cpus_synced: * tlb_flush_by_mmuidx_all_cpus_synced:
* @cpu: Originating CPU of the flush * @cpu: Originating CPU of the flush
* @idxmap: bitmap of MMU indexes to flush * @idxmap: bitmap of MMU indexes to flush
* *
* Flush all entries from all TLBs of all CPUs, for the specified * Flush all entries from the TLB of all CPUs, for the specified
* MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source * MMU indexes.
* vCPUs work is scheduled as safe work meaning all flushes will be *
* complete once the source vCPUs safe work is complete. This will * When this function returns, no CPUs will subsequently perform
* depend on when the guests translation ends the TB. * translations using the flushed TLBs.
*/ */
void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap); void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
@ -194,8 +160,6 @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
uint16_t idxmap, unsigned bits); uint16_t idxmap, unsigned bits);
/* Similarly, with broadcast and syncing. */ /* Similarly, with broadcast and syncing. */
void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
uint16_t idxmap, unsigned bits);
void tlb_flush_page_bits_by_mmuidx_all_cpus_synced void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
(CPUState *cpu, vaddr addr, uint16_t idxmap, unsigned bits); (CPUState *cpu, vaddr addr, uint16_t idxmap, unsigned bits);
@ -215,9 +179,6 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
unsigned bits); unsigned bits);
/* Similarly, with broadcast and syncing. */ /* Similarly, with broadcast and syncing. */
void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
vaddr len, uint16_t idxmap,
unsigned bits);
void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
vaddr addr, vaddr addr,
vaddr len, vaddr len,
@ -290,18 +251,12 @@ static inline void tlb_destroy(CPUState *cpu)
static inline void tlb_flush_page(CPUState *cpu, vaddr addr) static inline void tlb_flush_page(CPUState *cpu, vaddr addr)
{ {
} }
static inline void tlb_flush_page_all_cpus(CPUState *src, vaddr addr)
{
}
static inline void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr) static inline void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
{ {
} }
static inline void tlb_flush(CPUState *cpu) static inline void tlb_flush(CPUState *cpu)
{ {
} }
static inline void tlb_flush_all_cpus(CPUState *src_cpu)
{
}
static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu) static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
{ {
} }
@ -313,20 +268,11 @@ static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
{ {
} }
static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
vaddr addr,
uint16_t idxmap)
{
}
static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
vaddr addr, vaddr addr,
uint16_t idxmap) uint16_t idxmap)
{ {
} }
static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap)
{
}
static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
uint16_t idxmap) uint16_t idxmap)
{ {
@ -337,12 +283,6 @@ static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
unsigned bits) unsigned bits)
{ {
} }
static inline void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu,
vaddr addr,
uint16_t idxmap,
unsigned bits)
{
}
static inline void static inline void
tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
uint16_t idxmap, unsigned bits) uint16_t idxmap, unsigned bits)
@ -353,13 +293,6 @@ static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
unsigned bits) unsigned bits)
{ {
} }
static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu,
vaddr addr,
vaddr len,
uint16_t idxmap,
unsigned bits)
{
}
static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
vaddr addr, vaddr addr,
vaddr len, vaddr len,