mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-08 18:23:57 -06:00
cpu: tcg_ops: move to tcg-cpu-ops.h, keep a pointer in CPUClass
we cannot in principle make the TCG Operations field definitions conditional on CONFIG_TCG in code that is included by both common_ss and specific_ss modules. Therefore, what we can do safely to restrict the TCG fields to TCG-only builds, is to move all tcg cpu operations into a separate header file, which is only included by TCG, target-specific code. This leaves just a NULL pointer in the cpu.h for the non-TCG builds. This also tidies up the code in all targets a bit, having all TCG cpu operations neatly contained by a dedicated data struct. Signed-off-by: Claudio Fontana <cfontana@suse.de> Message-Id: <20210204163931.7358-16-cfontana@suse.de> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
c73bdb35a9
commit
7827168471
36 changed files with 582 additions and 306 deletions
|
@ -21,6 +21,7 @@
|
|||
#include "qemu-common.h"
|
||||
#include "qemu/qemu-print.h"
|
||||
#include "cpu.h"
|
||||
#include "hw/core/tcg-cpu-ops.h"
|
||||
#include "trace.h"
|
||||
#include "disas/disas.h"
|
||||
#include "exec/exec-all.h"
|
||||
|
@ -213,8 +214,8 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
|
|||
TARGET_FMT_lx "] %s\n",
|
||||
last_tb->tc.ptr, last_tb->pc,
|
||||
lookup_symbol(last_tb->pc));
|
||||
if (cc->tcg_ops.synchronize_from_tb) {
|
||||
cc->tcg_ops.synchronize_from_tb(cpu, last_tb);
|
||||
if (cc->tcg_ops->synchronize_from_tb) {
|
||||
cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
|
||||
} else {
|
||||
assert(cc->set_pc);
|
||||
cc->set_pc(cpu, last_tb->pc);
|
||||
|
@ -262,8 +263,8 @@ static void cpu_exec_enter(CPUState *cpu)
|
|||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (cc->tcg_ops.cpu_exec_enter) {
|
||||
cc->tcg_ops.cpu_exec_enter(cpu);
|
||||
if (cc->tcg_ops->cpu_exec_enter) {
|
||||
cc->tcg_ops->cpu_exec_enter(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -271,8 +272,8 @@ static void cpu_exec_exit(CPUState *cpu)
|
|||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (cc->tcg_ops.cpu_exec_exit) {
|
||||
cc->tcg_ops.cpu_exec_exit(cpu);
|
||||
if (cc->tcg_ops->cpu_exec_exit) {
|
||||
cc->tcg_ops->cpu_exec_exit(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -512,8 +513,8 @@ static inline void cpu_handle_debug_exception(CPUState *cpu)
|
|||
}
|
||||
}
|
||||
|
||||
if (cc->tcg_ops.debug_excp_handler) {
|
||||
cc->tcg_ops.debug_excp_handler(cpu);
|
||||
if (cc->tcg_ops->debug_excp_handler) {
|
||||
cc->tcg_ops->debug_excp_handler(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -547,7 +548,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
|
|||
loop */
|
||||
#if defined(TARGET_I386)
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
cc->tcg_ops.do_interrupt(cpu);
|
||||
cc->tcg_ops->do_interrupt(cpu);
|
||||
#endif
|
||||
*ret = cpu->exception_index;
|
||||
cpu->exception_index = -1;
|
||||
|
@ -556,7 +557,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
|
|||
if (replay_exception()) {
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
cc->tcg_ops.do_interrupt(cpu);
|
||||
cc->tcg_ops->do_interrupt(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
cpu->exception_index = -1;
|
||||
|
||||
|
@ -655,8 +656,8 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
|||
True when it is, and we should restart on a new TB,
|
||||
and via longjmp via cpu_loop_exit. */
|
||||
else {
|
||||
if (cc->tcg_ops.cpu_exec_interrupt &&
|
||||
cc->tcg_ops.cpu_exec_interrupt(cpu, interrupt_request)) {
|
||||
if (cc->tcg_ops->cpu_exec_interrupt &&
|
||||
cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
|
||||
if (need_replay_interrupt(interrupt_request)) {
|
||||
replay_interrupt();
|
||||
}
|
||||
|
@ -834,7 +835,7 @@ void tcg_exec_realizefn(CPUState *cpu, Error **errp)
|
|||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (!tcg_target_initialized) {
|
||||
cc->tcg_ops.initialize();
|
||||
cc->tcg_ops->initialize();
|
||||
tcg_target_initialized = true;
|
||||
}
|
||||
tlb_init(cpu);
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include "qemu/osdep.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "cpu.h"
|
||||
#include "hw/core/tcg-cpu-ops.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "exec/memory.h"
|
||||
#include "exec/address-spaces.h"
|
||||
|
@ -1305,11 +1306,37 @@ static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
|
|||
* This is not a probe, so only valid return is success; failure
|
||||
* should result in exception + longjmp to the cpu loop.
|
||||
*/
|
||||
ok = cc->tcg_ops.tlb_fill(cpu, addr, size,
|
||||
access_type, mmu_idx, false, retaddr);
|
||||
ok = cc->tcg_ops->tlb_fill(cpu, addr, size,
|
||||
access_type, mmu_idx, false, retaddr);
|
||||
assert(ok);
|
||||
}
|
||||
|
||||
static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
|
||||
MMUAccessType access_type,
|
||||
int mmu_idx, uintptr_t retaddr)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
|
||||
}
|
||||
|
||||
static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
|
||||
vaddr addr, unsigned size,
|
||||
MMUAccessType access_type,
|
||||
int mmu_idx, MemTxAttrs attrs,
|
||||
MemTxResult response,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (!cpu->ignore_memory_transaction_failures &&
|
||||
cc->tcg_ops->do_transaction_failed) {
|
||||
cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
|
||||
access_type, mmu_idx, attrs,
|
||||
response, retaddr);
|
||||
}
|
||||
}
|
||||
|
||||
static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
|
||||
int mmu_idx, target_ulong addr, uintptr_t retaddr,
|
||||
MMUAccessType access_type, MemOp op)
|
||||
|
@ -1577,8 +1604,8 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
|
|||
CPUState *cs = env_cpu(env);
|
||||
CPUClass *cc = CPU_GET_CLASS(cs);
|
||||
|
||||
if (!cc->tcg_ops.tlb_fill(cs, addr, fault_size, access_type,
|
||||
mmu_idx, nonfault, retaddr)) {
|
||||
if (!cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
|
||||
mmu_idx, nonfault, retaddr)) {
|
||||
/* Non-faulting page table read failed. */
|
||||
*phost = NULL;
|
||||
return TLB_INVALID_MASK;
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
#include "qemu/osdep.h"
|
||||
#include "cpu.h"
|
||||
#include "hw/core/tcg-cpu-ops.h"
|
||||
#include "disas/disas.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "tcg/tcg.h"
|
||||
|
@ -187,8 +188,8 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info,
|
|||
clear_helper_retaddr();
|
||||
|
||||
cc = CPU_GET_CLASS(cpu);
|
||||
cc->tcg_ops.tlb_fill(cpu, address, 0, access_type,
|
||||
MMU_USER_IDX, false, pc);
|
||||
cc->tcg_ops->tlb_fill(cpu, address, 0, access_type,
|
||||
MMU_USER_IDX, false, pc);
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
|
@ -218,8 +219,8 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
|
|||
} else {
|
||||
CPUState *cpu = env_cpu(env);
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
cc->tcg_ops.tlb_fill(cpu, addr, fault_size, access_type,
|
||||
MMU_USER_IDX, false, ra);
|
||||
cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type,
|
||||
MMU_USER_IDX, false, ra);
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue