accel/tcg: Convert TARGET_HAS_PRECISE_SMC to TCGCPUOps.precise_smc

Instead of having a compile-time TARGET_HAS_PRECISE_SMC definition,
have each target set the 'precise_smc' field in the TCGCPUOps
structure.

Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2025-04-05 08:43:44 -07:00
parent 7fa0f4a70c
commit 77ad412b32
8 changed files with 27 additions and 31 deletions

View file

@ -28,6 +28,7 @@
#include "exec/mmap-lock.h" #include "exec/mmap-lock.h"
#include "exec/tb-flush.h" #include "exec/tb-flush.h"
#include "exec/target_page.h" #include "exec/target_page.h"
#include "accel/tcg/cpu-ops.h"
#include "tb-internal.h" #include "tb-internal.h"
#include "system/tcg.h" #include "system/tcg.h"
#include "tcg/tcg.h" #include "tcg/tcg.h"
@ -1042,9 +1043,7 @@ static void tb_invalidate_phys_page(tb_page_addr_t addr)
/* /*
* Called with mmap_lock held. If pc is not 0 then it indicates the * Called with mmap_lock held. If pc is not 0 then it indicates the
* host PC of the faulting store instruction that caused this invalidate. * host PC of the faulting store instruction that caused this invalidate.
* Returns true if the caller needs to abort execution of the current * Returns true if the caller needs to abort execution of the current TB.
* TB (because it was modified by this store and the guest CPU has
* precise-SMC semantics).
*/ */
bool tb_invalidate_phys_page_unwind(CPUState *cpu, tb_page_addr_t addr, bool tb_invalidate_phys_page_unwind(CPUState *cpu, tb_page_addr_t addr,
uintptr_t pc) uintptr_t pc)
@ -1059,10 +1058,7 @@ bool tb_invalidate_phys_page_unwind(CPUState *cpu, tb_page_addr_t addr,
* Without precise smc semantics, or when outside of a TB, * Without precise smc semantics, or when outside of a TB,
* we can skip to invalidate. * we can skip to invalidate.
*/ */
#ifndef TARGET_HAS_PRECISE_SMC if (!pc || !cpu || !cpu->cc->tcg_ops->precise_smc) {
pc = 0;
#endif
if (!pc) {
tb_invalidate_phys_page(addr); tb_invalidate_phys_page(addr);
return false; return false;
} }
@ -1113,14 +1109,16 @@ tb_invalidate_phys_page_range__locked(CPUState *cpu,
{ {
TranslationBlock *tb; TranslationBlock *tb;
PageForEachNext n; PageForEachNext n;
#ifdef TARGET_HAS_PRECISE_SMC
bool current_tb_modified = false; bool current_tb_modified = false;
TranslationBlock *current_tb = retaddr ? tcg_tb_lookup(retaddr) : NULL; TranslationBlock *current_tb = NULL;
#endif /* TARGET_HAS_PRECISE_SMC */
/* Range may not cross a page. */ /* Range may not cross a page. */
tcg_debug_assert(((start ^ last) & TARGET_PAGE_MASK) == 0); tcg_debug_assert(((start ^ last) & TARGET_PAGE_MASK) == 0);
if (retaddr && cpu && cpu->cc->tcg_ops->precise_smc) {
current_tb = tcg_tb_lookup(retaddr);
}
/* /*
* We remove all the TBs in the range [start, last]. * We remove all the TBs in the range [start, last].
* XXX: see if in some cases it could be faster to invalidate all the code * XXX: see if in some cases it could be faster to invalidate all the code
@ -1138,8 +1136,7 @@ tb_invalidate_phys_page_range__locked(CPUState *cpu,
tb_last = tb_start + (tb_last & ~TARGET_PAGE_MASK); tb_last = tb_start + (tb_last & ~TARGET_PAGE_MASK);
} }
if (!(tb_last < start || tb_start > last)) { if (!(tb_last < start || tb_start > last)) {
#ifdef TARGET_HAS_PRECISE_SMC if (unlikely(current_tb == tb) &&
if (current_tb == tb &&
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
/* /*
* If we are modifying the current TB, we must stop * If we are modifying the current TB, we must stop
@ -1149,9 +1146,8 @@ tb_invalidate_phys_page_range__locked(CPUState *cpu,
* restore the CPU state. * restore the CPU state.
*/ */
current_tb_modified = true; current_tb_modified = true;
cpu_restore_state_from_tb(current_cpu, current_tb, retaddr); cpu_restore_state_from_tb(cpu, current_tb, retaddr);
} }
#endif /* TARGET_HAS_PRECISE_SMC */
tb_phys_invalidate__locked(tb); tb_phys_invalidate__locked(tb);
} }
} }
@ -1161,15 +1157,13 @@ tb_invalidate_phys_page_range__locked(CPUState *cpu,
tlb_unprotect_code(start); tlb_unprotect_code(start);
} }
#ifdef TARGET_HAS_PRECISE_SMC if (unlikely(current_tb_modified)) {
if (current_tb_modified) {
page_collection_unlock(pages); page_collection_unlock(pages);
/* Force execution of one insn next time. */ /* Force execution of one insn next time. */
current_cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(current_cpu); cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
mmap_unlock(); mmap_unlock();
cpu_loop_exit_noexc(current_cpu); cpu_loop_exit_noexc(cpu);
} }
#endif
} }
/* /*

View file

@ -733,12 +733,12 @@ int page_unprotect(CPUState *cpu, tb_page_addr_t address, uintptr_t pc)
* this thread raced with another one which got here first and * this thread raced with another one which got here first and
* set the page to PAGE_WRITE and did the TB invalidate for us. * set the page to PAGE_WRITE and did the TB invalidate for us.
*/ */
#ifdef TARGET_HAS_PRECISE_SMC if (pc && cpu->cc->tcg_ops->precise_smc) {
TranslationBlock *current_tb = tcg_tb_lookup(pc); TranslationBlock *current_tb = tcg_tb_lookup(pc);
if (current_tb) { if (current_tb) {
current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID; current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
} }
#endif }
} else { } else {
int host_page_size = qemu_real_host_page_size(); int host_page_size = qemu_real_host_page_size();
target_ulong start, len, i; target_ulong start, len, i;

View file

@ -28,6 +28,13 @@ struct TCGCPUOps {
*/ */
bool mttcg_supported; bool mttcg_supported;
/**
* @precise_smc: Stores which modify code within the current TB force
* the TB to exit; the next executed instruction will see
* the result of the store.
*/
bool precise_smc;
/** /**
* @guest_default_memory_order: default barrier that is required * @guest_default_memory_order: default barrier that is required
* for the guest memory ordering. * for the guest memory ordering.

View file

@ -37,7 +37,6 @@
#pragma GCC poison TARGET_NAME #pragma GCC poison TARGET_NAME
#pragma GCC poison TARGET_BIG_ENDIAN #pragma GCC poison TARGET_BIG_ENDIAN
#pragma GCC poison TCG_GUEST_DEFAULT_MO #pragma GCC poison TCG_GUEST_DEFAULT_MO
#pragma GCC poison TARGET_HAS_PRECISE_SMC
#pragma GCC poison TARGET_LONG_BITS #pragma GCC poison TARGET_LONG_BITS
#pragma GCC poison TARGET_FMT_lx #pragma GCC poison TARGET_FMT_lx

View file

@ -35,10 +35,6 @@
#define XEN_NR_VIRQS 24 #define XEN_NR_VIRQS 24
/* support for self modifying code even if the modified instruction is
close to the modifying instruction */
#define TARGET_HAS_PRECISE_SMC
#ifdef TARGET_X86_64 #ifdef TARGET_X86_64
#define I386_ELF_MACHINE EM_X86_64 #define I386_ELF_MACHINE EM_X86_64
#define ELF_MACHINE_UNAME "x86_64" #define ELF_MACHINE_UNAME "x86_64"

View file

@ -126,6 +126,7 @@ static bool x86_debug_check_breakpoint(CPUState *cs)
const TCGCPUOps x86_tcg_ops = { const TCGCPUOps x86_tcg_ops = {
.mttcg_supported = true, .mttcg_supported = true,
.precise_smc = true,
/* /*
* The x86 has a strong memory model with some store-after-load re-ordering * The x86 has a strong memory model with some store-after-load re-ordering
*/ */

View file

@ -346,6 +346,7 @@ void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc,
static const TCGCPUOps s390_tcg_ops = { static const TCGCPUOps s390_tcg_ops = {
.mttcg_supported = true, .mttcg_supported = true,
.precise_smc = true,
/* /*
* The z/Architecture has a strong memory model with some * The z/Architecture has a strong memory model with some
* store-after-load re-ordering. * store-after-load re-ordering.

View file

@ -35,8 +35,6 @@
#define ELF_MACHINE_UNAME "S390X" #define ELF_MACHINE_UNAME "S390X"
#define TARGET_HAS_PRECISE_SMC
#define MMU_USER_IDX 0 #define MMU_USER_IDX 0
#define S390_MAX_CPUS 248 #define S390_MAX_CPUS 248