include: Remove 'exec/exec-all.h'

accel/tcg: Build tb-maint.c twice
 accel/tcg: Build cpu-exec.c twice
 accel/tcg: Build translate-all.c twice
 accel/tcg: Build tcg-all.c twice
 accel/tcg: Build cputlb.c once
 accel/tcg: Build user-exec.c once
 -----BEGIN PGP SIGNATURE-----
 
 iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmgZFdYdHHJpY2hhcmQu
 aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV/8RAf8C2NTtUNlBrjtPrQD
 hP2YiNVfI+c9e3x3Bivx++9YUYfynWyPO774axnyhqYg3cJONWs+4HJ/MQHNSG/G
 qT+7EihGIDwnjWxTvu9wp5XucvaGKBqGEQ2IZrr0JBEnvrrpuhiauqP7Bjb37eAj
 kxw50NUxxz4wqk5Ql4UZyJ0h1peH5PFNr9uozhr6HJSEET7GxPMfUy611jAa/eXc
 MDkiDwd+0JGSKkMSQaCocMO2vL4OQGr3sTBNHQZ/RalEdMp+AJiQgjJ0fFfCInwK
 4w8/8we8MKUBIwTn5kTUBjPrI7nlhJk5mFm5aV7fNvSClGf5Yb62SfPesQKm5qkE
 z3aApA==
 =Lpyu
 -----END PGP SIGNATURE-----

Merge tag 'pull-tcg-20250501-v2' of https://gitlab.com/rth7680/qemu into staging

include: Remove 'exec/exec-all.h'
accel/tcg: Build tb-maint.c twice
accel/tcg: Build cpu-exec.c twice
accel/tcg: Build translate-all.c twice
accel/tcg: Build tcg-all.c twice
accel/tcg: Build cputlb.c once
accel/tcg: Build user-exec.c once

# -----BEGIN PGP SIGNATURE-----
#
# iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmgZFdYdHHJpY2hhcmQu
# aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV/8RAf8C2NTtUNlBrjtPrQD
# hP2YiNVfI+c9e3x3Bivx++9YUYfynWyPO774axnyhqYg3cJONWs+4HJ/MQHNSG/G
# qT+7EihGIDwnjWxTvu9wp5XucvaGKBqGEQ2IZrr0JBEnvrrpuhiauqP7Bjb37eAj
# kxw50NUxxz4wqk5Ql4UZyJ0h1peH5PFNr9uozhr6HJSEET7GxPMfUy611jAa/eXc
# MDkiDwd+0JGSKkMSQaCocMO2vL4OQGr3sTBNHQZ/RalEdMp+AJiQgjJ0fFfCInwK
# 4w8/8we8MKUBIwTn5kTUBjPrI7nlhJk5mFm5aV7fNvSClGf5Yb62SfPesQKm5qkE
# z3aApA==
# =Lpyu
# -----END PGP SIGNATURE-----
# gpg: Signature made Mon 05 May 2025 15:47:34 EDT
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full]
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A  05C0 64DF 38E8 AF7E 215F

* tag 'pull-tcg-20250501-v2' of https://gitlab.com/rth7680/qemu: (59 commits)
  accel/tcg: Build user-exec.c once
  accel/tcg: Avoid abi_ptr in user-exec.c
  accel/tcg: Remove TARGET_PAGE_DATA_SIZE
  accel/tcg: Move TARGET_TAGGED_ADDRESSES to TCGCPUOps.untagged_addr
  include/user: Use vaddr in guest-host.h
  include/user: Convert GUEST_ADDR_MAX to a variable
  accel/tcg: Build cputlb.c once
  accel/tcg: Use vaddr for plugin_{load,store}_cb
  accel/tcg: Use target_long_bits() in cputlb.c
  accel/tcg: Move tlb_vaddr_to_host declaration to probe.h
  accel/tcg: Move user-only tlb_vaddr_to_host out of line
  accel/tcg: Use vaddr in cpu_loop.h
  accel/tcg: Build tcg-all.c twice
  accel/tcg: Build translate-all.c twice
  accel/tcg: Use target_long_bits() in translate-all.c
  accel/tcg: Don't use TARGET_LONG_BITS in decode_sleb128
  tcg: Define INSN_START_WORDS as constant 3
  qemu: Introduce target_long_bits()
  qemu/target_info: Add %target_cpu_type field to TargetInfo
  system/vl: Filter machine list available for a particular target binary
  ...

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2025-05-06 11:03:30 -04:00
commit 7cef6d6863
226 changed files with 1232 additions and 1366 deletions

View file

@ -168,7 +168,7 @@ F: include/exec/helper*.h.inc
F: include/exec/helper-info.c.inc
F: include/exec/page-protection.h
F: include/system/tcg.h
F: include/accel/tcg/cpu-ops.h
F: include/accel/tcg/
F: host/include/*/host/cpuinfo.h
F: util/cpuinfo-*.c
F: include/tcg/
@ -493,7 +493,6 @@ M: Richard Henderson <richard.henderson@linaro.org>
R: Paolo Bonzini <pbonzini@redhat.com>
S: Maintained
F: include/exec/cpu*.h
F: include/exec/exec-all.h
F: include/exec/target_long.h
F: include/qemu/accel.h
F: include/system/accel-*.h

View file

@ -51,7 +51,6 @@
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "system/address-spaces.h"
#include "exec/exec-all.h"
#include "gdbstub/enums.h"
#include "hw/boards.h"
#include "system/accel-ops.h"

View file

@ -22,8 +22,8 @@
#include "qapi/error.h"
#include "qapi/type-helpers.h"
#include "hw/core/cpu.h"
#include "accel/tcg/cpu-ldst.h"
#include "accel/tcg/cpu-ops.h"
#include "accel/tcg/helper-retaddr.h"
#include "trace.h"
#include "disas/disas.h"
#include "exec/cpu-common.h"
@ -36,7 +36,6 @@
#include "qemu/rcu.h"
#include "exec/log.h"
#include "qemu/main-loop.h"
#include "cpu.h"
#include "exec/icount.h"
#include "exec/replay-core.h"
#include "system/tcg.h"
@ -46,7 +45,6 @@
#include "tb-context.h"
#include "tb-internal.h"
#include "internal-common.h"
#include "internal-target.h"
/* -icount align implementation. */
@ -151,12 +149,9 @@ static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
#endif /* CONFIG USER ONLY */
struct tb_desc {
vaddr pc;
uint64_t cs_base;
TCGTBCPUState s;
CPUArchState *env;
tb_page_addr_t page_addr0;
uint32_t flags;
uint32_t cflags;
};
static bool tb_lookup_cmp(const void *p, const void *d)
@ -164,11 +159,11 @@ static bool tb_lookup_cmp(const void *p, const void *d)
const TranslationBlock *tb = p;
const struct tb_desc *desc = d;
if ((tb_cflags(tb) & CF_PCREL || tb->pc == desc->pc) &&
if ((tb_cflags(tb) & CF_PCREL || tb->pc == desc->s.pc) &&
tb_page_addr0(tb) == desc->page_addr0 &&
tb->cs_base == desc->cs_base &&
tb->flags == desc->flags &&
tb_cflags(tb) == desc->cflags) {
tb->cs_base == desc->s.cs_base &&
tb->flags == desc->s.flags &&
tb_cflags(tb) == desc->s.cflags) {
/* check next page if needed */
tb_page_addr_t tb_phys_page1 = tb_page_addr1(tb);
if (tb_phys_page1 == -1) {
@ -186,7 +181,7 @@ static bool tb_lookup_cmp(const void *p, const void *d)
* is different for the new TB. Therefore any exception raised
* here by the faulting lookup is not premature.
*/
virt_page1 = TARGET_PAGE_ALIGN(desc->pc);
virt_page1 = TARGET_PAGE_ALIGN(desc->s.pc);
phys_page1 = get_page_addr_code(desc->env, virt_page1);
if (tb_phys_page1 == phys_page1) {
return true;
@ -196,26 +191,21 @@ static bool tb_lookup_cmp(const void *p, const void *d)
return false;
}
static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc,
uint64_t cs_base, uint32_t flags,
uint32_t cflags)
static TranslationBlock *tb_htable_lookup(CPUState *cpu, TCGTBCPUState s)
{
tb_page_addr_t phys_pc;
struct tb_desc desc;
uint32_t h;
desc.s = s;
desc.env = cpu_env(cpu);
desc.cs_base = cs_base;
desc.flags = flags;
desc.cflags = cflags;
desc.pc = pc;
phys_pc = get_page_addr_code(desc.env, pc);
phys_pc = get_page_addr_code(desc.env, s.pc);
if (phys_pc == -1) {
return NULL;
}
desc.page_addr0 = phys_pc;
h = tb_hash_func(phys_pc, (cflags & CF_PCREL ? 0 : pc),
flags, cs_base, cflags);
h = tb_hash_func(phys_pc, (s.cflags & CF_PCREL ? 0 : s.pc),
s.flags, s.cs_base, s.cflags);
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
}
@ -233,35 +223,33 @@ static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc,
*
* Returns: an existing translation block or NULL.
*/
static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc,
uint64_t cs_base, uint32_t flags,
uint32_t cflags)
static inline TranslationBlock *tb_lookup(CPUState *cpu, TCGTBCPUState s)
{
TranslationBlock *tb;
CPUJumpCache *jc;
uint32_t hash;
/* we should never be trying to look up an INVALID tb */
tcg_debug_assert(!(cflags & CF_INVALID));
tcg_debug_assert(!(s.cflags & CF_INVALID));
hash = tb_jmp_cache_hash_func(pc);
hash = tb_jmp_cache_hash_func(s.pc);
jc = cpu->tb_jmp_cache;
tb = qatomic_read(&jc->array[hash].tb);
if (likely(tb &&
jc->array[hash].pc == pc &&
tb->cs_base == cs_base &&
tb->flags == flags &&
tb_cflags(tb) == cflags)) {
jc->array[hash].pc == s.pc &&
tb->cs_base == s.cs_base &&
tb->flags == s.flags &&
tb_cflags(tb) == s.cflags)) {
goto hit;
}
tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
tb = tb_htable_lookup(cpu, s);
if (tb == NULL) {
return NULL;
}
jc->array[hash].pc = pc;
jc->array[hash].pc = s.pc;
qatomic_set(&jc->array[hash].tb, tb);
hit:
@ -269,7 +257,7 @@ hit:
* As long as tb is not NULL, the contents are consistent. Therefore,
* the virtual PC has to match for non-CF_PCREL translations.
*/
assert((tb_cflags(tb) & CF_PCREL) || tb->pc == pc);
assert((tb_cflags(tb) & CF_PCREL) || tb->pc == s.pc);
return tb;
}
@ -286,14 +274,11 @@ static void log_cpu_exec(vaddr pc, CPUState *cpu,
if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
FILE *logfile = qemu_log_trylock();
if (logfile) {
int flags = 0;
int flags = CPU_DUMP_CCOP;
if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) {
flags |= CPU_DUMP_FPU;
}
#if defined(TARGET_I386)
flags |= CPU_DUMP_CCOP;
#endif
if (qemu_loglevel_mask(CPU_LOG_TB_VPU)) {
flags |= CPU_DUMP_VPU;
}
@ -389,9 +374,6 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
{
CPUState *cpu = env_cpu(env);
TranslationBlock *tb;
vaddr pc;
uint64_t cs_base;
uint32_t flags, cflags;
/*
* By definition we've just finished a TB, so I/O is OK.
@ -401,20 +383,21 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
* The next TB, if we chain to it, will clear the flag again.
*/
cpu->neg.can_do_io = true;
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
cflags = curr_cflags(cpu);
if (check_for_breakpoints(cpu, pc, &cflags)) {
TCGTBCPUState s = cpu->cc->tcg_ops->get_tb_cpu_state(cpu);
s.cflags = curr_cflags(cpu);
if (check_for_breakpoints(cpu, s.pc, &s.cflags)) {
cpu_loop_exit(cpu);
}
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
tb = tb_lookup(cpu, s);
if (tb == NULL) {
return tcg_code_gen_epilogue;
}
if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) {
log_cpu_exec(pc, cpu, tb);
log_cpu_exec(s.pc, cpu, tb);
}
return tb->tc.ptr;
@ -564,11 +547,7 @@ static void cpu_exec_longjmp_cleanup(CPUState *cpu)
void cpu_exec_step_atomic(CPUState *cpu)
{
CPUArchState *env = cpu_env(cpu);
TranslationBlock *tb;
vaddr pc;
uint64_t cs_base;
uint32_t flags, cflags;
int tb_exit;
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
@ -577,13 +556,13 @@ void cpu_exec_step_atomic(CPUState *cpu)
g_assert(!cpu->running);
cpu->running = true;
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
TCGTBCPUState s = cpu->cc->tcg_ops->get_tb_cpu_state(cpu);
s.cflags = curr_cflags(cpu);
cflags = curr_cflags(cpu);
/* Execute in a serial context. */
cflags &= ~CF_PARALLEL;
s.cflags &= ~CF_PARALLEL;
/* After 1 insn, return and release the exclusive lock. */
cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1;
s.cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1;
/*
* No need to check_for_breakpoints here.
* We only arrive in cpu_exec_step_atomic after beginning execution
@ -591,16 +570,16 @@ void cpu_exec_step_atomic(CPUState *cpu)
* Any breakpoint for this insn will have been recognized earlier.
*/
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
tb = tb_lookup(cpu, s);
if (tb == NULL) {
mmap_lock();
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
tb = tb_gen_code(cpu, s);
mmap_unlock();
}
cpu_exec_enter(cpu);
/* execute the generated code */
trace_exec_tb(tb, pc);
trace_exec_tb(tb, s.pc);
cpu_tb_exec(cpu, tb, &tb_exit);
cpu_exec_exit(cpu);
} else {
@ -733,10 +712,10 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
* If user mode only, we simulate a fake exception which will be
* handled outside the cpu execution loop.
*/
#if defined(TARGET_I386)
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
if (tcg_ops->fake_user_interrupt) {
tcg_ops->fake_user_interrupt(cpu);
#endif /* TARGET_I386 */
}
*ret = cpu->exception_index;
cpu->exception_index = -1;
return true;
@ -823,33 +802,22 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
cpu->exception_index = EXCP_HLT;
bql_unlock();
return true;
}
#if defined(TARGET_I386)
else if (interrupt_request & CPU_INTERRUPT_INIT) {
X86CPU *x86_cpu = X86_CPU(cpu);
CPUArchState *env = &x86_cpu->env;
replay_interrupt();
cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
do_cpu_init(x86_cpu);
cpu->exception_index = EXCP_HALTED;
bql_unlock();
return true;
}
#else
else if (interrupt_request & CPU_INTERRUPT_RESET) {
replay_interrupt();
cpu_reset(cpu);
bql_unlock();
return true;
}
#endif /* !TARGET_I386 */
/* The target hook has 3 exit conditions:
False when the interrupt isn't processed,
True when it is, and we should restart on a new TB,
and via longjmp via cpu_loop_exit. */
else {
} else {
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
if (interrupt_request & CPU_INTERRUPT_RESET) {
replay_interrupt();
tcg_ops->cpu_exec_reset(cpu);
bql_unlock();
return true;
}
/*
* The target hook has 3 exit conditions:
* False when the interrupt isn't processed,
* True when it is, and we should restart on a new TB,
* and via longjmp via cpu_loop_exit.
*/
if (tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
if (!tcg_ops->need_replay_interrupt ||
tcg_ops->need_replay_interrupt(interrupt_request)) {
@ -956,11 +924,8 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
while (!cpu_handle_interrupt(cpu, &last_tb)) {
TranslationBlock *tb;
vaddr pc;
uint64_t cs_base;
uint32_t flags, cflags;
cpu_get_tb_cpu_state(cpu_env(cpu), &pc, &cs_base, &flags);
TCGTBCPUState s = cpu->cc->tcg_ops->get_tb_cpu_state(cpu);
s.cflags = cpu->cflags_next_tb;
/*
* When requested, use an exact setting for cflags for the next
@ -969,33 +934,32 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
* have CF_INVALID set, -1 is a convenient invalid value that
* does not require tcg headers for cpu_common_reset.
*/
cflags = cpu->cflags_next_tb;
if (cflags == -1) {
cflags = curr_cflags(cpu);
if (s.cflags == -1) {
s.cflags = curr_cflags(cpu);
} else {
cpu->cflags_next_tb = -1;
}
if (check_for_breakpoints(cpu, pc, &cflags)) {
if (check_for_breakpoints(cpu, s.pc, &s.cflags)) {
break;
}
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
tb = tb_lookup(cpu, s);
if (tb == NULL) {
CPUJumpCache *jc;
uint32_t h;
mmap_lock();
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
tb = tb_gen_code(cpu, s);
mmap_unlock();
/*
* We add the TB in the virtual pc hash table
* for the fast lookup
*/
h = tb_jmp_cache_hash_func(pc);
h = tb_jmp_cache_hash_func(s.pc);
jc = cpu->tb_jmp_cache;
jc->array[h].pc = pc;
jc->array[h].pc = s.pc;
qatomic_set(&jc->array[h].tb, tb);
}
@ -1015,7 +979,7 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
tb_add_jump(last_tb, tb_exit, tb);
}
cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit);
cpu_loop_exec_tb(cpu, tb, s.pc, &last_tb, &tb_exit);
/* Try to align the host and virtual clocks
if the guest is in advance */
@ -1074,8 +1038,10 @@ bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
#ifndef CONFIG_USER_ONLY
assert(tcg_ops->cpu_exec_halt);
assert(tcg_ops->cpu_exec_interrupt);
assert(tcg_ops->cpu_exec_reset);
#endif /* !CONFIG_USER_ONLY */
assert(tcg_ops->translate_code);
assert(tcg_ops->get_tb_cpu_state);
assert(tcg_ops->mmu_index);
tcg_ops->initialize();
tcg_target_initialized = true;

View file

@ -19,11 +19,14 @@
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "qemu/target-info.h"
#include "accel/tcg/cpu-ops.h"
#include "exec/exec-all.h"
#include "accel/tcg/iommu.h"
#include "accel/tcg/probe.h"
#include "exec/page-protection.h"
#include "system/memory.h"
#include "accel/tcg/cpu-ldst.h"
#include "accel/tcg/cpu-ldst-common.h"
#include "accel/tcg/cpu-mmu-index.h"
#include "exec/cputlb.h"
#include "exec/tb-flush.h"
#include "system/ram_addr.h"
@ -43,7 +46,6 @@
#include "tb-internal.h"
#include "tlb-bounds.h"
#include "internal-common.h"
#include "internal-target.h"
#ifdef CONFIG_PLUGIN
#include "qemu/plugin-memory.h"
#endif
@ -771,19 +773,19 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
assert_cpu_is_self(cpu);
/* If no page bits are significant, this devolves to tlb_flush. */
if (bits < TARGET_PAGE_BITS) {
tlb_flush_by_mmuidx(cpu, idxmap);
return;
}
/*
* If all bits are significant, and len is small,
* this devolves to tlb_flush_page.
*/
if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
if (len <= TARGET_PAGE_SIZE && bits >= target_long_bits()) {
tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
return;
}
/* If no page bits are significant, this devolves to tlb_flush. */
if (bits < TARGET_PAGE_BITS) {
tlb_flush_by_mmuidx(cpu, idxmap);
return;
}
/* This should already be page aligned */
d.addr = addr & TARGET_PAGE_MASK;
@ -809,19 +811,19 @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
TLBFlushRangeData d, *p;
CPUState *dst_cpu;
/* If no page bits are significant, this devolves to tlb_flush. */
if (bits < TARGET_PAGE_BITS) {
tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
return;
}
/*
* If all bits are significant, and len is small,
* this devolves to tlb_flush_page.
*/
if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
if (len <= TARGET_PAGE_SIZE && bits >= target_long_bits()) {
tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
return;
}
/* If no page bits are significant, this devolves to tlb_flush. */
if (bits < TARGET_PAGE_BITS) {
tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
return;
}
/* This should already be page aligned */
d.addr = addr & TARGET_PAGE_MASK;
@ -1340,7 +1342,7 @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
tb_invalidate_phys_range_fast(ram_addr, size, retaddr);
tb_invalidate_phys_range_fast(cpu, ram_addr, size, retaddr);
}
/*

View file

@ -11,6 +11,8 @@
#include "exec/cpu-common.h"
#include "exec/translation-block.h"
#include "exec/mmap-lock.h"
#include "accel/tcg/tb-cpu-state.h"
extern int64_t max_delay;
extern int64_t max_advance;
@ -45,9 +47,7 @@ static inline bool cpu_plugin_mem_cbs_enabled(const CPUState *cpu)
#endif
}
TranslationBlock *tb_gen_code(CPUState *cpu, vaddr pc,
uint64_t cs_base, uint32_t flags,
int cflags);
TranslationBlock *tb_gen_code(CPUState *cpu, TCGTBCPUState s);
void page_init(void);
void tb_htable_init(void);
void tb_reset_jump(TranslationBlock *tb, int n);
@ -108,4 +108,35 @@ static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
return get_page_addr_code_hostp(env, addr, NULL);
}
/*
* Access to the various translations structures need to be serialised
* via locks for consistency. In user-mode emulation access to the
* memory related structures are protected with mmap_lock.
* In !user-mode we use per-page locks.
*/
#ifdef CONFIG_USER_ONLY
#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
#else
#define assert_memory_lock()
#endif
#if defined(CONFIG_SOFTMMU) && defined(CONFIG_DEBUG_TCG)
void assert_no_pages_locked(void);
#else
static inline void assert_no_pages_locked(void) { }
#endif
#ifdef CONFIG_USER_ONLY
static inline void page_table_config_init(void) { }
#else
void page_table_config_init(void);
#endif
#ifndef CONFIG_USER_ONLY
G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
#endif /* CONFIG_USER_ONLY */
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
#endif

View file

@ -1,46 +0,0 @@
/*
* Internal execution defines for qemu (target specific)
*
* Copyright (c) 2003 Fabrice Bellard
*
* SPDX-License-Identifier: LGPL-2.1-or-later
*/
#ifndef ACCEL_TCG_INTERNAL_TARGET_H
#define ACCEL_TCG_INTERNAL_TARGET_H
#include "cpu-param.h"
#include "exec/exec-all.h"
#include "exec/translation-block.h"
#include "tb-internal.h"
#include "exec/mmap-lock.h"
/*
* Access to the various translations structures need to be serialised
* via locks for consistency. In user-mode emulation access to the
* memory related structures are protected with mmap_lock.
* In !user-mode we use per-page locks.
*/
#ifdef CONFIG_USER_ONLY
#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
#else
#define assert_memory_lock()
#endif
#if defined(CONFIG_SOFTMMU) && defined(CONFIG_DEBUG_TCG)
void assert_no_pages_locked(void);
#else
static inline void assert_no_pages_locked(void) { }
#endif
#ifdef CONFIG_USER_ONLY
static inline void page_table_config_init(void) { }
#else
void page_table_config_init(void);
#endif
#ifndef CONFIG_USER_ONLY
G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
#endif /* CONFIG_USER_ONLY */
#endif /* ACCEL_TCG_INTERNAL_H */

View file

@ -123,7 +123,7 @@ void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
* Load helpers for cpu_ldst.h
*/
static void plugin_load_cb(CPUArchState *env, abi_ptr addr,
static void plugin_load_cb(CPUArchState *env, vaddr addr,
uint64_t value_low,
uint64_t value_high,
MemOpIdx oi)
@ -193,7 +193,7 @@ Int128 cpu_ld16_mmu(CPUArchState *env, vaddr addr,
* Store helpers for cpu_ldst.h
*/
static void plugin_store_cb(CPUArchState *env, abi_ptr addr,
static void plugin_store_cb(CPUArchState *env, vaddr addr,
uint64_t value_low,
uint64_t value_high,
MemOpIdx oi)

View file

@ -5,9 +5,13 @@ endif
tcg_ss = ss.source_set()
tcg_ss.add(files(
'cpu-exec.c',
'cpu-exec-common.c',
'tcg-runtime.c',
'tcg-runtime-gvec.c',
'tb-maint.c',
'tcg-all.c',
'translate-all.c',
'translator.c',
))
if get_option('plugins')
@ -17,25 +21,13 @@ endif
libuser_ss.add_all(tcg_ss)
libsystem_ss.add_all(tcg_ss)
tcg_specific_ss = ss.source_set()
tcg_specific_ss.add(files(
'tcg-all.c',
'cpu-exec.c',
'tb-maint.c',
'translate-all.c',
))
tcg_specific_ss.add(when: 'CONFIG_USER_ONLY', if_true: files('user-exec.c'))
specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_specific_ss)
specific_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files(
'cputlb.c',
))
libuser_ss.add(files(
'user-exec.c',
'user-exec-stub.c',
))
libsystem_ss.add(files(
'cputlb.c',
'icount-common.c',
'monitor.c',
'tcg-accel-ops.c',

View file

@ -20,8 +20,7 @@
#ifndef EXEC_TB_HASH_H
#define EXEC_TB_HASH_H
#include "exec/cpu-defs.h"
#include "exec/exec-all.h"
#include "exec/vaddr.h"
#include "exec/target_page.h"
#include "exec/translation-block.h"
#include "qemu/xxhash.h"

View file

@ -45,11 +45,11 @@ void tb_unlock_pages(TranslationBlock *);
#endif
#ifdef CONFIG_SOFTMMU
void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
unsigned size,
uintptr_t retaddr);
void tb_invalidate_phys_range_fast(CPUState *cpu, ram_addr_t ram_addr,
unsigned size, uintptr_t retaddr);
#endif /* CONFIG_SOFTMMU */
bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
bool tb_invalidate_phys_page_unwind(CPUState *cpu, tb_page_addr_t addr,
uintptr_t pc);
#endif

View file

@ -20,14 +20,13 @@
#include "qemu/osdep.h"
#include "qemu/interval-tree.h"
#include "qemu/qtree.h"
#include "cpu.h"
#include "exec/cputlb.h"
#include "exec/log.h"
#include "exec/exec-all.h"
#include "exec/page-protection.h"
#include "exec/mmap-lock.h"
#include "exec/tb-flush.h"
#include "exec/target_page.h"
#include "accel/tcg/cpu-ops.h"
#include "tb-internal.h"
#include "system/tcg.h"
#include "tcg/tcg.h"
@ -35,7 +34,6 @@
#include "tb-context.h"
#include "tb-internal.h"
#include "internal-common.h"
#include "internal-target.h"
#ifdef CONFIG_USER_ONLY
#include "user/page-protection.h"
#endif
@ -159,11 +157,7 @@ static PageForEachNext foreach_tb_next(PageForEachNext tb,
/*
* In system mode we want L1_MAP to be based on ram offsets.
*/
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
#else
# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
#endif
#define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
/* Size of the L2 (and L3, etc) page tables. */
#define V_L2_BITS 10
@ -1012,7 +1006,8 @@ TranslationBlock *tb_link_page(TranslationBlock *tb)
* Called with mmap_lock held for user-mode emulation.
* NOTE: this function must not be called while a TB is running.
*/
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
void tb_invalidate_phys_range(CPUState *cpu, tb_page_addr_t start,
tb_page_addr_t last)
{
TranslationBlock *tb;
PageForEachNext n;
@ -1035,17 +1030,16 @@ static void tb_invalidate_phys_page(tb_page_addr_t addr)
start = addr & TARGET_PAGE_MASK;
last = addr | ~TARGET_PAGE_MASK;
tb_invalidate_phys_range(start, last);
tb_invalidate_phys_range(NULL, start, last);
}
/*
* Called with mmap_lock held. If pc is not 0 then it indicates the
* host PC of the faulting store instruction that caused this invalidate.
* Returns true if the caller needs to abort execution of the current
* TB (because it was modified by this store and the guest CPU has
* precise-SMC semantics).
* Returns true if the caller needs to abort execution of the current TB.
*/
bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
bool tb_invalidate_phys_page_unwind(CPUState *cpu, tb_page_addr_t addr,
uintptr_t pc)
{
TranslationBlock *current_tb;
bool current_tb_modified;
@ -1057,10 +1051,7 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
* Without precise smc semantics, or when outside of a TB,
* we can skip to invalidate.
*/
#ifndef TARGET_HAS_PRECISE_SMC
pc = 0;
#endif
if (!pc) {
if (!pc || !cpu || !cpu->cc->tcg_ops->precise_smc) {
tb_invalidate_phys_page(addr);
return false;
}
@ -1083,15 +1074,14 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
* the CPU state.
*/
current_tb_modified = true;
cpu_restore_state_from_tb(current_cpu, current_tb, pc);
cpu_restore_state_from_tb(cpu, current_tb, pc);
}
tb_phys_invalidate__locked(tb);
}
if (current_tb_modified) {
/* Force execution of one insn next time. */
CPUState *cpu = current_cpu;
cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(current_cpu);
cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
return true;
}
return false;
@ -1100,23 +1090,28 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
/*
* @p must be non-NULL.
* Call with all @pages locked.
* (@cpu, @retaddr) may be (NULL, 0) outside of a cpu context,
* in which case precise_smc need not be detected.
*/
static void
tb_invalidate_phys_page_range__locked(struct page_collection *pages,
tb_invalidate_phys_page_range__locked(CPUState *cpu,
struct page_collection *pages,
PageDesc *p, tb_page_addr_t start,
tb_page_addr_t last,
uintptr_t retaddr)
{
TranslationBlock *tb;
PageForEachNext n;
#ifdef TARGET_HAS_PRECISE_SMC
bool current_tb_modified = false;
TranslationBlock *current_tb = retaddr ? tcg_tb_lookup(retaddr) : NULL;
#endif /* TARGET_HAS_PRECISE_SMC */
TranslationBlock *current_tb = NULL;
/* Range may not cross a page. */
tcg_debug_assert(((start ^ last) & TARGET_PAGE_MASK) == 0);
if (retaddr && cpu && cpu->cc->tcg_ops->precise_smc) {
current_tb = tcg_tb_lookup(retaddr);
}
/*
* We remove all the TBs in the range [start, last].
* XXX: see if in some cases it could be faster to invalidate all the code
@ -1134,8 +1129,7 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
tb_last = tb_start + (tb_last & ~TARGET_PAGE_MASK);
}
if (!(tb_last < start || tb_start > last)) {
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb == tb &&
if (unlikely(current_tb == tb) &&
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
/*
* If we are modifying the current TB, we must stop
@ -1145,9 +1139,8 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
* restore the CPU state.
*/
current_tb_modified = true;
cpu_restore_state_from_tb(current_cpu, current_tb, retaddr);
cpu_restore_state_from_tb(cpu, current_tb, retaddr);
}
#endif /* TARGET_HAS_PRECISE_SMC */
tb_phys_invalidate__locked(tb);
}
}
@ -1157,15 +1150,13 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
tlb_unprotect_code(start);
}
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb_modified) {
if (unlikely(current_tb_modified)) {
page_collection_unlock(pages);
/* Force execution of one insn next time. */
current_cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(current_cpu);
cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
mmap_unlock();
cpu_loop_exit_noexc(current_cpu);
cpu_loop_exit_noexc(cpu);
}
#endif
}
/*
@ -1175,7 +1166,8 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
* access: the virtual CPU will exit the current TB if code is modified inside
* this TB.
*/
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
void tb_invalidate_phys_range(CPUState *cpu, tb_page_addr_t start,
tb_page_addr_t last)
{
struct page_collection *pages;
tb_page_addr_t index, index_last;
@ -1194,44 +1186,30 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
page_start = index << TARGET_PAGE_BITS;
page_last = page_start | ~TARGET_PAGE_MASK;
page_last = MIN(page_last, last);
tb_invalidate_phys_page_range__locked(pages, pd,
tb_invalidate_phys_page_range__locked(cpu, pages, pd,
page_start, page_last, 0);
}
page_collection_unlock(pages);
}
/*
* Call with all @pages in the range [@start, @start + len[ locked.
*/
static void tb_invalidate_phys_page_fast__locked(struct page_collection *pages,
tb_page_addr_t start,
unsigned len, uintptr_t ra)
{
PageDesc *p;
p = page_find(start >> TARGET_PAGE_BITS);
if (!p) {
return;
}
assert_page_locked(p);
tb_invalidate_phys_page_range__locked(pages, p, start, start + len - 1, ra);
}
/*
* len must be <= 8 and start must be a multiple of len.
* Called via softmmu_template.h when code areas are written to with
* iothread mutex not held.
*/
void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
unsigned size,
uintptr_t retaddr)
void tb_invalidate_phys_range_fast(CPUState *cpu, ram_addr_t start,
unsigned len, uintptr_t ra)
{
struct page_collection *pages;
PageDesc *p = page_find(start >> TARGET_PAGE_BITS);
pages = page_collection_lock(ram_addr, ram_addr + size - 1);
tb_invalidate_phys_page_fast__locked(pages, ram_addr, size, retaddr);
if (p) {
ram_addr_t last = start + len - 1;
struct page_collection *pages = page_collection_lock(start, last);
tb_invalidate_phys_page_range__locked(cpu, pages, p,
start, last, ra);
page_collection_unlock(pages);
}
}
#endif /* CONFIG_USER_ONLY */

View file

@ -36,15 +36,11 @@
#include "qapi/qapi-builtin-visit.h"
#include "qemu/units.h"
#include "qemu/target-info.h"
#if defined(CONFIG_USER_ONLY)
#include "hw/qdev-core.h"
#else
#ifndef CONFIG_USER_ONLY
#include "hw/boards.h"
#include "system/tcg.h"
#endif
#include "accel/tcg/cpu-ops.h"
#include "internal-common.h"
#include "cpu-param.h"
struct TCGState {

View file

@ -7,26 +7,7 @@
#define ACCEL_TCG_TLB_BOUNDS_H
#define CPU_TLB_DYN_MIN_BITS 6
#define CPU_TLB_DYN_MAX_BITS (32 - TARGET_PAGE_BITS)
#define CPU_TLB_DYN_DEFAULT_BITS 8
# if HOST_LONG_BITS == 32
/* Make sure we do not require a double-word shift for the TLB load */
# define CPU_TLB_DYN_MAX_BITS (32 - TARGET_PAGE_BITS)
# else /* HOST_LONG_BITS == 64 */
/*
* Assuming TARGET_PAGE_BITS==12, with 2**22 entries we can cover 2**(22+12) ==
* 2**34 == 16G of address space. This is roughly what one would expect a
* TLB to cover in a modern (as of 2018) x86_64 CPU. For instance, Intel
* Skylake's Level-2 STLB has 16 1G entries.
* Also, make sure we do not size the TLB past the guest's address space.
*/
# ifdef TARGET_PAGE_BITS_VARY
# define CPU_TLB_DYN_MAX_BITS \
MIN(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS)
# else
# define CPU_TLB_DYN_MAX_BITS \
MIN_CONST(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS)
# endif
# endif
#endif /* ACCEL_TCG_TLB_BOUNDS_H */

View file

@ -21,55 +21,23 @@
#include "trace.h"
#include "disas/disas.h"
#include "exec/exec-all.h"
#include "tcg/tcg.h"
#if defined(CONFIG_USER_ONLY)
#include "qemu.h"
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
#include <sys/param.h>
#if __FreeBSD_version >= 700104
#define HAVE_KINFO_GETVMMAP
#define sigqueue sigqueue_freebsd /* avoid redefinition */
#include <sys/proc.h>
#include <machine/profile.h>
#define _KERNEL
#include <sys/user.h>
#undef _KERNEL
#undef sigqueue
#include <libutil.h>
#endif
#endif
#else
#include "system/ram_addr.h"
#endif
#include "cpu-param.h"
#include "exec/cputlb.h"
#include "exec/page-protection.h"
#include "exec/mmap-lock.h"
#include "tb-internal.h"
#include "tlb-bounds.h"
#include "exec/translator.h"
#include "exec/tb-flush.h"
#include "qemu/bitmap.h"
#include "qemu/qemu-print.h"
#include "qemu/main-loop.h"
#include "qemu/cacheinfo.h"
#include "qemu/timer.h"
#include "qemu/target-info.h"
#include "exec/log.h"
#include "exec/icount.h"
#include "system/tcg.h"
#include "qapi/error.h"
#include "accel/tcg/cpu-ops.h"
#include "tb-jmp-cache.h"
#include "tb-hash.h"
#include "tb-context.h"
#include "tb-internal.h"
#include "internal-common.h"
#include "internal-target.h"
#include "tcg/perf.h"
#include "tcg/insn-start-words.h"
#include "cpu.h"
TBContext tb_ctx;
@ -110,7 +78,7 @@ static int64_t decode_sleb128(const uint8_t **pp)
val |= (int64_t)(byte & 0x7f) << shift;
shift += 7;
} while (byte & 0x80);
if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
if (shift < 64 && (byte & 0x40)) {
val |= -(int64_t)1 << shift;
}
@ -121,7 +89,7 @@ static int64_t decode_sleb128(const uint8_t **pp)
/* Encode the data collected about the instructions while compiling TB.
Place the data at BLOCK, and return the number of bytes consumed.
The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
The logical table consists of INSN_START_WORDS uint64_t's,
which come from the target's insn_start data, followed by a uintptr_t
which comes from the host pc of the end of the code implementing the insn.
@ -141,13 +109,13 @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
for (i = 0, n = tb->icount; i < n; ++i) {
uint64_t prev, curr;
for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
for (j = 0; j < INSN_START_WORDS; ++j) {
if (i == 0) {
prev = (!(tb_cflags(tb) & CF_PCREL) && j == 0 ? tb->pc : 0);
} else {
prev = insn_data[(i - 1) * TARGET_INSN_START_WORDS + j];
prev = insn_data[(i - 1) * INSN_START_WORDS + j];
}
curr = insn_data[i * TARGET_INSN_START_WORDS + j];
curr = insn_data[i * INSN_START_WORDS + j];
p = encode_sleb128(p, curr - prev);
}
prev = (i == 0 ? 0 : insn_end_off[i - 1]);
@ -179,7 +147,7 @@ static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc,
return -1;
}
memset(data, 0, sizeof(uint64_t) * TARGET_INSN_START_WORDS);
memset(data, 0, sizeof(uint64_t) * INSN_START_WORDS);
if (!(tb_cflags(tb) & CF_PCREL)) {
data[0] = tb->pc;
}
@ -189,7 +157,7 @@ static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc,
* at which the end of the insn exceeds host_pc.
*/
for (i = 0; i < num_insns; ++i) {
for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
for (j = 0; j < INSN_START_WORDS; ++j) {
data[j] += decode_sleb128(&p);
}
iter_pc += decode_sleb128(&p);
@ -207,7 +175,7 @@ static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc,
void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
uintptr_t host_pc)
{
uint64_t data[TARGET_INSN_START_WORDS];
uint64_t data[INSN_START_WORDS];
int insns_left = cpu_unwind_data_from_tb(tb, host_pc, data);
if (insns_left < 0) {
@ -291,9 +259,7 @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
}
/* Called with mmap_lock held for user mode emulation. */
TranslationBlock *tb_gen_code(CPUState *cpu,
vaddr pc, uint64_t cs_base,
uint32_t flags, int cflags)
TranslationBlock *tb_gen_code(CPUState *cpu, TCGTBCPUState s)
{
CPUArchState *env = cpu_env(cpu);
TranslationBlock *tb, *existing_tb;
@ -306,14 +272,14 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
assert_memory_lock();
qemu_thread_jit_write();
phys_pc = get_page_addr_code_hostp(env, pc, &host_pc);
phys_pc = get_page_addr_code_hostp(env, s.pc, &host_pc);
if (phys_pc == -1) {
/* Generate a one-shot TB with 1 insn in it */
cflags = (cflags & ~CF_COUNT_MASK) | 1;
s.cflags = (s.cflags & ~CF_COUNT_MASK) | 1;
}
max_insns = cflags & CF_COUNT_MASK;
max_insns = s.cflags & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = TCG_MAX_INSNS;
}
@ -333,12 +299,12 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
gen_code_buf = tcg_ctx->code_gen_ptr;
tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf);
if (!(cflags & CF_PCREL)) {
tb->pc = pc;
if (!(s.cflags & CF_PCREL)) {
tb->pc = s.pc;
}
tb->cs_base = cs_base;
tb->flags = flags;
tb->cflags = cflags;
tb->cs_base = s.cs_base;
tb->flags = s.flags;
tb->cflags = s.cflags;
tb_set_page_addr0(tb, phys_pc);
tb_set_page_addr1(tb, -1);
if (phys_pc != -1) {
@ -346,19 +312,18 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
}
tcg_ctx->gen_tb = tb;
tcg_ctx->addr_type = TARGET_LONG_BITS == 32 ? TCG_TYPE_I32 : TCG_TYPE_I64;
tcg_ctx->addr_type = target_long_bits() == 32 ? TCG_TYPE_I32 : TCG_TYPE_I64;
#ifdef CONFIG_SOFTMMU
tcg_ctx->page_bits = TARGET_PAGE_BITS;
tcg_ctx->page_mask = TARGET_PAGE_MASK;
tcg_ctx->tlb_dyn_max_bits = CPU_TLB_DYN_MAX_BITS;
#endif
tcg_ctx->insn_start_words = TARGET_INSN_START_WORDS;
tcg_ctx->guest_mo = cpu->cc->tcg_ops->guest_default_memory_order;
restart_translate:
trace_translate_block(tb, pc, tb->tc.ptr);
trace_translate_block(tb, s.pc, tb->tc.ptr);
gen_code_size = setjmp_gen_code(env, tb, pc, host_pc, &max_insns, &ti);
gen_code_size = setjmp_gen_code(env, tb, s.pc, host_pc, &max_insns, &ti);
if (unlikely(gen_code_size < 0)) {
switch (gen_code_size) {
case -1:
@ -435,10 +400,10 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
* For CF_PCREL, attribute all executions of the generated code
* to its first mapping.
*/
perf_report_code(pc, tb, tcg_splitwx_to_rx(gen_code_buf));
perf_report_code(s.pc, tb, tcg_splitwx_to_rx(gen_code_buf));
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
qemu_log_in_addr_range(pc)) {
qemu_log_in_addr_range(s.pc)) {
FILE *logfile = qemu_log_trylock();
if (logfile) {
int code_size, data_size;
@ -460,7 +425,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
fprintf(logfile, "OUT: [size=%d]\n", gen_code_size);
fprintf(logfile,
" -- guest addr 0x%016" PRIx64 " + tb prologue\n",
tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]);
tcg_ctx->gen_insn_data[insn * INSN_START_WORDS]);
chunk_start = tcg_ctx->gen_insn_end_off[insn];
disas(logfile, tb->tc.ptr, chunk_start);
@ -473,7 +438,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
if (chunk_end > chunk_start) {
fprintf(logfile, " -- guest addr 0x%016" PRIx64 "\n",
tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]);
tcg_ctx->gen_insn_data[insn * INSN_START_WORDS]);
disas(logfile, tb->tc.ptr + chunk_start,
chunk_end - chunk_start);
chunk_start = chunk_end;
@ -591,15 +556,11 @@ void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
/* The exception probably happened in a helper. The CPU state should
have been saved before calling it. Fetch the PC from there. */
CPUArchState *env = cpu_env(cpu);
vaddr pc;
uint64_t cs_base;
tb_page_addr_t addr;
uint32_t flags;
TCGTBCPUState s = cpu->cc->tcg_ops->get_tb_cpu_state(cpu);
tb_page_addr_t addr = get_page_addr_code(env, s.pc);
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
addr = get_page_addr_code(env, pc);
if (addr != -1) {
tb_invalidate_phys_range(addr, addr);
tb_invalidate_phys_range(cpu, addr, addr);
}
}
}

View file

@ -19,19 +19,20 @@
#include "qemu/osdep.h"
#include "accel/tcg/cpu-ops.h"
#include "disas/disas.h"
#include "cpu.h"
#include "exec/vaddr.h"
#include "exec/exec-all.h"
#include "exec/tlb-flags.h"
#include "tcg/tcg.h"
#include "qemu/bitops.h"
#include "qemu/rcu.h"
#include "accel/tcg/cpu-ldst.h"
#include "accel/tcg/cpu-ldst-common.h"
#include "accel/tcg/helper-retaddr.h"
#include "accel/tcg/probe.h"
#include "user/cpu_loop.h"
#include "user/guest-host.h"
#include "qemu/main-loop.h"
#include "user/page-protection.h"
#include "exec/page-protection.h"
#include "exec/helper-proto.h"
#include "exec/helper-proto-common.h"
#include "qemu/atomic128.h"
#include "qemu/bswap.h"
#include "qemu/int128.h"
@ -39,7 +40,6 @@
#include "tcg/tcg-ldst.h"
#include "backend-ldst.h"
#include "internal-common.h"
#include "internal-target.h"
#include "tb-internal.h"
__thread uintptr_t helper_retaddr;
@ -126,9 +126,9 @@ MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write)
* guest, we'd end up in an infinite loop of retrying the faulting access.
*/
bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
uintptr_t host_pc, abi_ptr guest_addr)
uintptr_t host_pc, vaddr guest_addr)
{
switch (page_unprotect(guest_addr, host_pc)) {
switch (page_unprotect(cpu, guest_addr, host_pc)) {
case 0:
/*
* Fault not caused by a page marked unwritable to protect
@ -162,7 +162,7 @@ typedef struct PageFlagsNode {
static IntervalTreeRoot pageflags_root;
static PageFlagsNode *pageflags_find(target_ulong start, target_ulong last)
static PageFlagsNode *pageflags_find(vaddr start, vaddr last)
{
IntervalTreeNode *n;
@ -170,8 +170,7 @@ static PageFlagsNode *pageflags_find(target_ulong start, target_ulong last)
return n ? container_of(n, PageFlagsNode, itree) : NULL;
}
static PageFlagsNode *pageflags_next(PageFlagsNode *p, target_ulong start,
target_ulong last)
static PageFlagsNode *pageflags_next(PageFlagsNode *p, vaddr start, vaddr last)
{
IntervalTreeNode *n;
@ -200,13 +199,22 @@ int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
return rc;
}
static int dump_region(void *priv, target_ulong start,
target_ulong end, unsigned long prot)
static int dump_region(void *opaque, vaddr start, vaddr end, int prot)
{
FILE *f = (FILE *)priv;
FILE *f = opaque;
uint64_t mask;
int width;
fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx" "TARGET_FMT_lx" %c%c%c\n",
start, end, end - start,
if (guest_addr_max <= UINT32_MAX) {
mask = UINT32_MAX, width = 8;
} else {
mask = UINT64_MAX, width = 16;
}
fprintf(f, "%0*" PRIx64 "-%0*" PRIx64 " %0*" PRIx64 " %c%c%c\n",
width, start & mask,
width, end & mask,
width, (end - start) & mask,
((prot & PAGE_READ) ? 'r' : '-'),
((prot & PAGE_WRITE) ? 'w' : '-'),
((prot & PAGE_EXEC) ? 'x' : '-'));
@ -216,14 +224,14 @@ static int dump_region(void *priv, target_ulong start,
/* dump memory mappings */
void page_dump(FILE *f)
{
const int length = sizeof(target_ulong) * 2;
int width = guest_addr_max <= UINT32_MAX ? 8 : 16;
fprintf(f, "%-*s %-*s %-*s %s\n",
length, "start", length, "end", length, "size", "prot");
width, "start", width, "end", width, "size", "prot");
walk_memory_regions(f, dump_region);
}
int page_get_flags(target_ulong address)
int page_get_flags(vaddr address)
{
PageFlagsNode *p = pageflags_find(address, address);
@ -246,7 +254,7 @@ int page_get_flags(target_ulong address)
}
/* A subroutine of page_set_flags: insert a new node for [start,last]. */
static void pageflags_create(target_ulong start, target_ulong last, int flags)
static void pageflags_create(vaddr start, vaddr last, int flags)
{
PageFlagsNode *p = g_new(PageFlagsNode, 1);
@ -257,13 +265,13 @@ static void pageflags_create(target_ulong start, target_ulong last, int flags)
}
/* A subroutine of page_set_flags: remove everything in [start,last]. */
static bool pageflags_unset(target_ulong start, target_ulong last)
static bool pageflags_unset(vaddr start, vaddr last)
{
bool inval_tb = false;
while (true) {
PageFlagsNode *p = pageflags_find(start, last);
target_ulong p_last;
vaddr p_last;
if (!p) {
break;
@ -302,8 +310,7 @@ static bool pageflags_unset(target_ulong start, target_ulong last)
* A subroutine of page_set_flags: nothing overlaps [start,last],
* but check adjacent mappings and maybe merge into a single range.
*/
static void pageflags_create_merge(target_ulong start, target_ulong last,
int flags)
static void pageflags_create_merge(vaddr start, vaddr last, int flags)
{
PageFlagsNode *next = NULL, *prev = NULL;
@ -354,11 +361,11 @@ static void pageflags_create_merge(target_ulong start, target_ulong last,
#define PAGE_STICKY (PAGE_ANON | PAGE_PASSTHROUGH | PAGE_TARGET_STICKY)
/* A subroutine of page_set_flags: add flags to [start,last]. */
static bool pageflags_set_clear(target_ulong start, target_ulong last,
static bool pageflags_set_clear(vaddr start, vaddr last,
int set_flags, int clear_flags)
{
PageFlagsNode *p;
target_ulong p_start, p_last;
vaddr p_start, p_last;
int p_flags, merge_flags;
bool inval_tb = false;
@ -493,7 +500,7 @@ static bool pageflags_set_clear(target_ulong start, target_ulong last,
return inval_tb;
}
void page_set_flags(target_ulong start, target_ulong last, int flags)
void page_set_flags(vaddr start, vaddr last, int flags)
{
bool reset = false;
bool inval_tb = false;
@ -502,7 +509,7 @@ void page_set_flags(target_ulong start, target_ulong last, int flags)
guest address space. If this assert fires, it probably indicates
a missing call to h2g_valid. */
assert(start <= last);
assert(last <= GUEST_ADDR_MAX);
assert(last <= guest_addr_max);
/* Only set PAGE_ANON with new mappings. */
assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET));
assert_memory_lock();
@ -529,13 +536,13 @@ void page_set_flags(target_ulong start, target_ulong last, int flags)
~(reset ? 0 : PAGE_STICKY));
}
if (inval_tb) {
tb_invalidate_phys_range(start, last);
tb_invalidate_phys_range(NULL, start, last);
}
}
bool page_check_range(target_ulong start, target_ulong len, int flags)
bool page_check_range(vaddr start, vaddr len, int flags)
{
target_ulong last;
vaddr last;
int locked; /* tri-state: =0: unlocked, +1: global, -1: local */
bool ret;
@ -584,7 +591,7 @@ bool page_check_range(target_ulong start, target_ulong len, int flags)
break;
}
/* Asking about writable, but has been protected: undo. */
if (!page_unprotect(start, 0)) {
if (!page_unprotect(NULL, start, 0)) {
ret = false;
break;
}
@ -611,20 +618,19 @@ bool page_check_range(target_ulong start, target_ulong len, int flags)
return ret;
}
bool page_check_range_empty(target_ulong start, target_ulong last)
bool page_check_range_empty(vaddr start, vaddr last)
{
assert(last >= start);
assert_memory_lock();
return pageflags_find(start, last) == NULL;
}
target_ulong page_find_range_empty(target_ulong min, target_ulong max,
target_ulong len, target_ulong align)
vaddr page_find_range_empty(vaddr min, vaddr max, vaddr len, vaddr align)
{
target_ulong len_m1, align_m1;
vaddr len_m1, align_m1;
assert(min <= max);
assert(max <= GUEST_ADDR_MAX);
assert(max <= guest_addr_max);
assert(len != 0);
assert(is_power_of_2(align));
assert_memory_lock();
@ -662,7 +668,7 @@ target_ulong page_find_range_empty(target_ulong min, target_ulong max,
void tb_lock_page0(tb_page_addr_t address)
{
PageFlagsNode *p;
target_ulong start, last;
vaddr start, last;
int host_page_size = qemu_real_host_page_size();
int prot;
@ -704,11 +710,13 @@ void tb_lock_page0(tb_page_addr_t address)
* immediately exited. (We can only return 2 if the 'pc' argument is
* non-zero.)
*/
int page_unprotect(tb_page_addr_t address, uintptr_t pc)
int page_unprotect(CPUState *cpu, tb_page_addr_t address, uintptr_t pc)
{
PageFlagsNode *p;
bool current_tb_invalidated;
assert((cpu == NULL) == (pc == 0));
/*
* Technically this isn't safe inside a signal handler. However we
* know this only ever happens in a synchronous SEGV handler, so in
@ -731,15 +739,15 @@ int page_unprotect(tb_page_addr_t address, uintptr_t pc)
* this thread raced with another one which got here first and
* set the page to PAGE_WRITE and did the TB invalidate for us.
*/
#ifdef TARGET_HAS_PRECISE_SMC
if (pc && cpu->cc->tcg_ops->precise_smc) {
TranslationBlock *current_tb = tcg_tb_lookup(pc);
if (current_tb) {
current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
}
#endif
}
} else {
int host_page_size = qemu_real_host_page_size();
target_ulong start, len, i;
vaddr start, len, i;
int prot;
if (host_page_size <= TARGET_PAGE_SIZE) {
@ -747,14 +755,15 @@ int page_unprotect(tb_page_addr_t address, uintptr_t pc)
len = TARGET_PAGE_SIZE;
prot = p->flags | PAGE_WRITE;
pageflags_set_clear(start, start + len - 1, PAGE_WRITE, 0);
current_tb_invalidated = tb_invalidate_phys_page_unwind(start, pc);
current_tb_invalidated =
tb_invalidate_phys_page_unwind(cpu, start, pc);
} else {
start = address & -host_page_size;
len = host_page_size;
prot = 0;
for (i = 0; i < len; i += TARGET_PAGE_SIZE) {
target_ulong addr = start + i;
vaddr addr = start + i;
p = pageflags_find(addr, addr);
if (p) {
@ -770,7 +779,7 @@ int page_unprotect(tb_page_addr_t address, uintptr_t pc)
* the corresponding translated code.
*/
current_tb_invalidated |=
tb_invalidate_phys_page_unwind(addr, pc);
tb_invalidate_phys_page_unwind(cpu, addr, pc);
}
}
if (prot & PAGE_EXEC) {
@ -850,6 +859,12 @@ void *probe_access(CPUArchState *env, vaddr addr, int size,
return size ? g2h(env_cpu(env), addr) : NULL;
}
void *tlb_vaddr_to_host(CPUArchState *env, vaddr addr,
MMUAccessType access_type, int mmu_idx)
{
return g2h(env_cpu(env), addr);
}
tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
void **hostp)
{
@ -864,7 +879,6 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
return addr;
}
#ifdef TARGET_PAGE_DATA_SIZE
/*
* Allocate chunks of target data together. For the only current user,
* if we allocate one hunk per page, we have overhead of 40/128 or 40%.
@ -880,10 +894,16 @@ typedef struct TargetPageDataNode {
} TargetPageDataNode;
static IntervalTreeRoot targetdata_root;
static size_t target_page_data_size;
void page_reset_target_data(target_ulong start, target_ulong last)
void page_reset_target_data(vaddr start, vaddr last)
{
IntervalTreeNode *n, *next;
size_t size = target_page_data_size;
if (likely(size == 0)) {
return;
}
assert_memory_lock();
@ -895,7 +915,7 @@ void page_reset_target_data(target_ulong start, target_ulong last)
n != NULL;
n = next,
next = next ? interval_tree_iter_next(n, start, last) : NULL) {
target_ulong n_start, n_last, p_ofs, p_len;
vaddr n_start, n_last, p_ofs, p_len;
TargetPageDataNode *t = container_of(n, TargetPageDataNode, itree);
if (n->start >= start && n->last <= last) {
@ -914,16 +934,21 @@ void page_reset_target_data(target_ulong start, target_ulong last)
n_last = MIN(last, n->last);
p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS;
memset(t->data + p_ofs * TARGET_PAGE_DATA_SIZE, 0,
p_len * TARGET_PAGE_DATA_SIZE);
memset(t->data + p_ofs * size, 0, p_len * size);
}
}
void *page_get_target_data(target_ulong address)
void *page_get_target_data(vaddr address, size_t size)
{
IntervalTreeNode *n;
TargetPageDataNode *t;
target_ulong page, region, p_ofs;
vaddr page, region, p_ofs;
/* Remember the size from the first call, and it should be constant. */
if (unlikely(target_page_data_size != size)) {
assert(target_page_data_size == 0);
target_page_data_size = size;
}
page = address & TARGET_PAGE_MASK;
region = address & TBD_MASK;
@ -939,8 +964,7 @@ void *page_get_target_data(target_ulong address)
mmap_lock();
n = interval_tree_iter_first(&targetdata_root, page, page);
if (!n) {
t = g_malloc0(sizeof(TargetPageDataNode)
+ TPD_PAGES * TARGET_PAGE_DATA_SIZE);
t = g_malloc0(sizeof(TargetPageDataNode) + TPD_PAGES * size);
n = &t->itree;
n->start = region;
n->last = region | ~TBD_MASK;
@ -951,11 +975,8 @@ void *page_get_target_data(target_ulong address)
t = container_of(n, TargetPageDataNode, itree);
p_ofs = (page - region) >> TARGET_PAGE_BITS;
return t->data + p_ofs * TARGET_PAGE_DATA_SIZE;
return t->data + p_ofs * size;
}
#else
void page_reset_target_data(target_ulong start, target_ulong last) { }
#endif /* TARGET_PAGE_DATA_SIZE */
/* The system-mode versions of these helpers are in cputlb.c. */
@ -1017,7 +1038,7 @@ int cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
* be under mmap_lock() in order to prevent the creation of
* another TranslationBlock in between.
*/
tb_invalidate_phys_range(addr, addr + l - 1);
tb_invalidate_phys_range(NULL, addr, addr + l - 1);
written = pwrite(fd, buf, l,
(off_t)(uintptr_t)g2h_untagged(addr));
if (written != l) {
@ -1123,7 +1144,7 @@ static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
return ret;
}
static Int128 do_ld16_mmu(CPUState *cpu, abi_ptr addr,
static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;

View file

@ -36,7 +36,6 @@
#include "qemu/help_option.h"
#include "qemu/module.h"
#include "qemu/plugin.h"
#include "exec/exec-all.h"
#include "user/guest-base.h"
#include "user/page-protection.h"
#include "tcg/startup.h"
@ -90,6 +89,7 @@ bool have_guest_base;
#endif
unsigned long reserved_va;
unsigned long guest_addr_max;
const char *interp_prefix = CONFIG_QEMU_INTERP_PREFIX;
const char *qemu_uname_release;
@ -501,6 +501,13 @@ int main(int argc, char **argv)
/* MAX_RESERVED_VA + 1 is a large power of 2, so is aligned. */
reserved_va = max_reserved_va;
}
if (reserved_va != 0) {
guest_addr_max = reserved_va;
} else if (MIN(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) {
guest_addr_max = UINT32_MAX;
} else {
guest_addr_max = ~0ul;
}
if (getenv("QEMU_STRACE")) {
do_strace = 1;

View file

@ -23,7 +23,6 @@
#include "cpu.h"
#include "qemu/units.h"
#include "accel/tcg/cpu-ldst.h"
#include "exec/exec-all.h"
#include "user/abitypes.h"
#include "user/cpu_loop.h"

View file

@ -1030,7 +1030,7 @@ void process_pending_signals(CPUArchState *env)
ts->in_sigsuspend = false;
}
void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
void cpu_loop_exit_sigsegv(CPUState *cpu, vaddr addr,
MMUAccessType access_type, bool maperr, uintptr_t ra)
{
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
@ -1046,7 +1046,7 @@ void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
cpu_loop_exit_restore(cpu, ra);
}
void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
void cpu_loop_exit_sigbus(CPUState *cpu, vaddr addr,
MMUAccessType access_type, uintptr_t ra)
{
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;

View file

@ -1,6 +1,5 @@
#include "qemu/osdep.h"
#include "qemu/cutils.h"
#include "exec/exec-all.h"
#include "exec/cputlb.h"
#include "exec/target_long.h"
#include "helper_regs.h"

View file

@ -26,7 +26,6 @@
#include "qemu/host-utils.h"
#include "qemu/module.h"
#include "qom/object.h"
#include "exec/exec-all.h"
#include "trace.h"
#include "riscv-iommu.h"

View file

@ -36,7 +36,6 @@
#include "sh7750_regnames.h"
#include "hw/sh4/sh_intc.h"
#include "hw/timer/tmu012.h"
#include "exec/exec-all.h"
#include "exec/cputlb.h"
#include "trace.h"

View file

@ -502,62 +502,4 @@ static inline uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
return cpu_ldq_code_mmu(env, addr, oi, 0);
}
/**
* tlb_vaddr_to_host:
* @env: CPUArchState
* @addr: guest virtual address to look up
* @access_type: 0 for read, 1 for write, 2 for execute
* @mmu_idx: MMU index to use for lookup
*
* Look up the specified guest virtual index in the TCG softmmu TLB.
* If we can translate a host virtual address suitable for direct RAM
* access, without causing a guest exception, then return it.
* Otherwise (TLB entry is for an I/O access, guest software
* TLB fill required, etc) return NULL.
*/
#ifdef CONFIG_USER_ONLY
static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
MMUAccessType access_type, int mmu_idx)
{
return g2h(env_cpu(env), addr);
}
#else
void *tlb_vaddr_to_host(CPUArchState *env, vaddr addr,
MMUAccessType access_type, int mmu_idx);
#endif
/*
* For user-only, helpers that use guest to host address translation
* must protect the actual host memory access by recording 'retaddr'
* for the signal handler. This is required for a race condition in
* which another thread unmaps the page between a probe and the
* actual access.
*/
#ifdef CONFIG_USER_ONLY
extern __thread uintptr_t helper_retaddr;
static inline void set_helper_retaddr(uintptr_t ra)
{
helper_retaddr = ra;
/*
* Ensure that this write is visible to the SIGSEGV handler that
* may be invoked due to a subsequent invalid memory operation.
*/
signal_barrier();
}
static inline void clear_helper_retaddr(void)
{
/*
* Ensure that previous memory operations have succeeded before
* removing the data visible to the signal handler.
*/
signal_barrier();
helper_retaddr = 0;
}
#else
#define set_helper_retaddr(ra) do { } while (0)
#define clear_helper_retaddr() do { } while (0)
#endif
#endif /* ACCEL_TCG_CPU_LDST_H */

View file

@ -16,6 +16,7 @@
#include "exec/memop.h"
#include "exec/mmu-access-type.h"
#include "exec/vaddr.h"
#include "accel/tcg/tb-cpu-state.h"
#include "tcg/tcg-mo.h"
struct TCGCPUOps {
@ -28,6 +29,13 @@ struct TCGCPUOps {
*/
bool mttcg_supported;
/**
* @precise_smc: Stores which modify code within the current TB force
* the TB to exit; the next executed instruction will see
* the result of the store.
*/
bool precise_smc;
/**
* @guest_default_memory_order: default barrier that is required
* for the guest memory ordering.
@ -53,6 +61,12 @@ struct TCGCPUOps {
*/
void (*translate_code)(CPUState *cpu, TranslationBlock *tb,
int *max_insns, vaddr pc, void *host_pc);
/**
* @get_tb_cpu_state: Extract CPU state for a TCG #TranslationBlock
*
* Fill in all data required to select or compile a TranslationBlock.
*/
TCGTBCPUState (*get_tb_cpu_state)(CPUState *cs);
/**
* @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock
*
@ -143,11 +157,20 @@ struct TCGCPUOps {
*/
void (*record_sigbus)(CPUState *cpu, vaddr addr,
MMUAccessType access_type, uintptr_t ra);
/**
* untagged_addr: Remove an ignored tag from an address
* @cpu: cpu context
* @addr: tagged guest address
*/
vaddr (*untagged_addr)(CPUState *cs, vaddr addr);
#else
/** @do_interrupt: Callback for interrupt handling. */
void (*do_interrupt)(CPUState *cpu);
/** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */
bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
/** @cpu_exec_reset: Callback for reset in cpu_exec. */
void (*cpu_exec_reset)(CPUState *cpu);
/**
* @cpu_exec_halt: Callback for handling halt in cpu_exec.
*

View file

@ -8,10 +8,6 @@
#ifndef ACCEL_TCG_GETPC_H
#define ACCEL_TCG_GETPC_H
#ifndef CONFIG_TCG
#error Can only include this header with TCG
#endif
/* GETPC is the true target of the return instruction that we'll execute. */
#ifdef CONFIG_TCG_INTERPRETER
extern __thread uintptr_t tci_tb_ptr;

View file

@ -0,0 +1,43 @@
/*
* Get user helper pc for memory unwinding.
* SPDX-License-Identifier: LGPL-2.1-or-later
*/
#ifndef ACCEL_TCG_HELPER_RETADDR_H
#define ACCEL_TCG_HELPER_RETADDR_H
/*
* For user-only, helpers that use guest to host address translation
* must protect the actual host memory access by recording 'retaddr'
* for the signal handler. This is required for a race condition in
* which another thread unmaps the page between a probe and the
* actual access.
*/
#ifdef CONFIG_USER_ONLY
extern __thread uintptr_t helper_retaddr;
static inline void set_helper_retaddr(uintptr_t ra)
{
helper_retaddr = ra;
/*
* Ensure that this write is visible to the SIGSEGV handler that
* may be invoked due to a subsequent invalid memory operation.
*/
signal_barrier();
}
static inline void clear_helper_retaddr(void)
{
/*
* Ensure that previous memory operations have succeeded before
* removing the data visible to the signal handler.
*/
signal_barrier();
helper_retaddr = 0;
}
#else
#define set_helper_retaddr(ra) do { } while (0)
#define clear_helper_retaddr() do { } while (0)
#endif
#endif /* ACCEL_TCG_HELPER_RETADDR_H */

41
include/accel/tcg/iommu.h Normal file
View file

@ -0,0 +1,41 @@
/*
* TCG IOMMU translations.
*
* Copyright (c) 2003 Fabrice Bellard
* SPDX-License-Identifier: LGPL-2.1-or-later
*/
#ifndef ACCEL_TCG_IOMMU_H
#define ACCEL_TCG_IOMMU_H
#ifdef CONFIG_USER_ONLY
#error Cannot include accel/tcg/iommu.h from user emulation
#endif
#include "exec/hwaddr.h"
#include "exec/memattrs.h"
/**
* iotlb_to_section:
* @cpu: CPU performing the access
* @index: TCG CPU IOTLB entry
*
* Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
* it refers to. @index will have been initially created and returned
* by memory_region_section_get_iotlb().
*/
MemoryRegionSection *iotlb_to_section(CPUState *cpu,
hwaddr index, MemTxAttrs attrs);
MemoryRegionSection *address_space_translate_for_iotlb(CPUState *cpu,
int asidx,
hwaddr addr,
hwaddr *xlat,
hwaddr *plen,
MemTxAttrs attrs,
int *prot);
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
MemoryRegionSection *section);
#endif

View file

@ -1,30 +1,14 @@
/*
* internal execution defines for qemu
* Probe guest virtual addresses for access permissions.
*
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
* SPDX-License-Identifier: LGPL-2.1-or-later
*/
#ifndef EXEC_ALL_H
#define EXEC_ALL_H
#ifndef ACCEL_TCG_PROBE_H
#define ACCEL_TCG_PROBE_H
#include "exec/mmu-access-type.h"
#include "exec/translation-block.h"
#if defined(CONFIG_TCG)
#include "accel/tcg/getpc.h"
#include "exec/vaddr.h"
/**
* probe_access:
@ -118,36 +102,21 @@ int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
void **phost, CPUTLBEntryFull **pfull);
#endif /* !CONFIG_USER_ONLY */
#endif /* CONFIG_TCG */
/* TranslationBlock invalidate API */
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last);
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
#if !defined(CONFIG_USER_ONLY)
/**
* iotlb_to_section:
* @cpu: CPU performing the access
* @index: TCG CPU IOTLB entry
* tlb_vaddr_to_host:
* @env: CPUArchState
* @addr: guest virtual address to look up
* @access_type: 0 for read, 1 for write, 2 for execute
* @mmu_idx: MMU index to use for lookup
*
* Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
* it refers to. @index will have been initially created and returned
* by memory_region_section_get_iotlb().
* Look up the specified guest virtual index in the TCG softmmu TLB.
* If we can translate a host virtual address suitable for direct RAM
* access, without causing a guest exception, then return it.
* Otherwise (TLB entry is for an I/O access, guest software
* TLB fill required, etc) return NULL.
*/
struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
hwaddr index, MemTxAttrs attrs);
#endif
#if !defined(CONFIG_USER_ONLY)
MemoryRegionSection *
address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
hwaddr *xlat, hwaddr *plen,
MemTxAttrs attrs, int *prot);
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
MemoryRegionSection *section);
#endif
void *tlb_vaddr_to_host(CPUArchState *env, vaddr addr,
MMUAccessType access_type, int mmu_idx);
#endif

View file

@ -0,0 +1,18 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Definition of TCGTBCPUState.
*/
#ifndef EXEC_TB_CPU_STATE_H
#define EXEC_TB_CPU_STATE_H
#include "exec/vaddr.h"
typedef struct TCGTBCPUState {
vaddr pc;
uint32_t flags;
uint32_t cflags;
uint64_t cs_base;
} TCGTBCPUState;
#endif

View file

@ -13,4 +13,6 @@
#include "exec/helper-proto.h.inc"
#undef HELPER_H
#include "accel/tcg/getpc.h"
#endif /* HELPER_PROTO_COMMON_H */

View file

@ -37,7 +37,6 @@
#pragma GCC poison TARGET_NAME
#pragma GCC poison TARGET_BIG_ENDIAN
#pragma GCC poison TCG_GUEST_DEFAULT_MO
#pragma GCC poison TARGET_HAS_PRECISE_SMC
#pragma GCC poison TARGET_LONG_BITS
#pragma GCC poison TARGET_FMT_lx

View file

@ -207,4 +207,8 @@ static inline void tb_set_page_addr1(TranslationBlock *tb,
#endif
}
/* TranslationBlock invalidate API */
void tb_invalidate_phys_range(CPUState *cpu, tb_page_addr_t start,
tb_page_addr_t last);
#endif /* EXEC_TRANSLATION_BLOCK_H */

View file

@ -14,6 +14,12 @@
typedef struct TargetInfo {
/* runtime equivalent of TARGET_NAME definition */
const char *target_name;
/* runtime equivalent of TARGET_LONG_BITS definition */
unsigned long_bits;
/* runtime equivalent of CPU_RESOLVING_TYPE definition */
const char *cpu_type;
/* QOM typename machines for this binary must implement */
const char *machine_typename;
} TargetInfo;
/**

View file

@ -16,6 +16,21 @@
*/
const char *target_name(void);
/**
* target_long_bits:
*
* Returns: number of bits in a long type for this target (i.e. 64).
*/
unsigned target_long_bits(void);
/**
* target_machine_typename:
*
* Returns: Name of the QOM interface implemented by machines
* usable on this target binary.
*/
const char *target_machine_typename(void);
/**
* target_cpu_type:
*

View file

@ -24,7 +24,6 @@
#include "exec/cputlb.h"
#include "exec/ramlist.h"
#include "system/ramblock.h"
#include "exec/exec-all.h"
#include "system/memory.h"
#include "exec/target_page.h"
#include "qemu/rcu.h"

View file

@ -1,13 +1,12 @@
/* SPDX-License-Identifier: MIT */
/*
* Define TARGET_INSN_START_WORDS
* Define INSN_START_WORDS
* Copyright (c) 2008 Fabrice Bellard
*/
#ifndef TARGET_INSN_START_WORDS
#ifndef TCG_INSN_START_WORDS
#define TCG_INSN_START_WORDS
#include "cpu-param.h"
#define INSN_START_WORDS 3
# define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS)
#endif /* TARGET_INSN_START_WORDS */
#endif /* TCG_INSN_START_WORDS */

View file

@ -9,6 +9,7 @@
#define TCG_TCG_OP_H
#include "tcg/tcg-op-common.h"
#include "tcg/insn-start-words.h"
#include "exec/target_long.h"
#ifndef TARGET_LONG_BITS
@ -23,24 +24,34 @@
# error
#endif
#if INSN_START_WORDS != 3
# error Mismatch with insn-start-words.h
#endif
#if TARGET_INSN_START_EXTRA_WORDS == 0
static inline void tcg_gen_insn_start(target_ulong pc)
{
TCGOp *op = tcg_emit_op(INDEX_op_insn_start, 64 / TCG_TARGET_REG_BITS);
TCGOp *op = tcg_emit_op(INDEX_op_insn_start,
INSN_START_WORDS * 64 / TCG_TARGET_REG_BITS);
tcg_set_insn_start_param(op, 0, pc);
tcg_set_insn_start_param(op, 1, 0);
tcg_set_insn_start_param(op, 2, 0);
}
#elif TARGET_INSN_START_EXTRA_WORDS == 1
static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1)
{
TCGOp *op = tcg_emit_op(INDEX_op_insn_start, 2 * 64 / TCG_TARGET_REG_BITS);
TCGOp *op = tcg_emit_op(INDEX_op_insn_start,
INSN_START_WORDS * 64 / TCG_TARGET_REG_BITS);
tcg_set_insn_start_param(op, 0, pc);
tcg_set_insn_start_param(op, 1, a1);
tcg_set_insn_start_param(op, 2, 0);
}
#elif TARGET_INSN_START_EXTRA_WORDS == 2
static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1,
target_ulong a2)
{
TCGOp *op = tcg_emit_op(INDEX_op_insn_start, 3 * 64 / TCG_TARGET_REG_BITS);
TCGOp *op = tcg_emit_op(INDEX_op_insn_start,
INSN_START_WORDS * 64 / TCG_TARGET_REG_BITS);
tcg_set_insn_start_param(op, 0, pc);
tcg_set_insn_start_param(op, 1, a1);
tcg_set_insn_start_param(op, 2, a2);

View file

@ -114,8 +114,7 @@ DEF(extrh_i64_i32, 1, 1, 0, 0)
#define DATA64_ARGS (TCG_TARGET_REG_BITS == 64 ? 1 : 2)
/* There are tcg_ctx->insn_start_words here, not just one. */
DEF(insn_start, 0, 0, DATA64_ARGS, TCG_OPF_NOT_PRESENT)
DEF(insn_start, 0, 0, DATA64_ARGS * INSN_START_WORDS, TCG_OPF_NOT_PRESENT)
DEF(exit_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END | TCG_OPF_NOT_PRESENT)
DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END | TCG_OPF_NOT_PRESENT)

View file

@ -34,6 +34,7 @@
#include "tcg-target-reg-bits.h"
#include "tcg-target.h"
#include "tcg/tcg-cond.h"
#include "tcg/insn-start-words.h"
#include "tcg/debug-assert.h"
/* XXX: make safe guess about sizes */
@ -359,7 +360,6 @@ struct TCGContext {
int page_mask;
uint8_t page_bits;
uint8_t tlb_dyn_max_bits;
uint8_t insn_start_words;
TCGBar guest_mo;
TCGRegSet reserved_regs;
@ -582,18 +582,19 @@ static inline TCGv_vec temp_tcgv_vec(TCGTemp *t)
return (TCGv_vec)temp_tcgv_i32(t);
}
static inline TCGArg tcg_get_insn_param(TCGOp *op, int arg)
static inline TCGArg tcg_get_insn_param(TCGOp *op, unsigned arg)
{
return op->args[arg];
}
static inline void tcg_set_insn_param(TCGOp *op, int arg, TCGArg v)
static inline void tcg_set_insn_param(TCGOp *op, unsigned arg, TCGArg v)
{
op->args[arg] = v;
}
static inline uint64_t tcg_get_insn_start_param(TCGOp *op, int arg)
static inline uint64_t tcg_get_insn_start_param(TCGOp *op, unsigned arg)
{
tcg_debug_assert(arg < INSN_START_WORDS);
if (TCG_TARGET_REG_BITS == 64) {
return tcg_get_insn_param(op, arg);
} else {
@ -602,8 +603,9 @@ static inline uint64_t tcg_get_insn_start_param(TCGOp *op, int arg)
}
}
static inline void tcg_set_insn_start_param(TCGOp *op, int arg, uint64_t v)
static inline void tcg_set_insn_start_param(TCGOp *op, unsigned arg, uint64_t v)
{
tcg_debug_assert(arg < INSN_START_WORDS);
if (TCG_TARGET_REG_BITS == 64) {
tcg_set_insn_param(op, arg, v);
} else {

View file

@ -20,11 +20,9 @@
#ifndef USER_CPU_LOOP_H
#define USER_CPU_LOOP_H
#include "exec/abi_ptr.h"
#include "exec/vaddr.h"
#include "exec/mmu-access-type.h"
#include "exec/log.h"
#include "exec/target_long.h"
#include "special-errno.h"
/**
* adjust_signal_pc:
@ -46,7 +44,7 @@ MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write);
* Return true if the write fault has been handled, and should be re-tried.
*/
bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
uintptr_t host_pc, abi_ptr guest_addr);
uintptr_t host_pc, vaddr guest_addr);
/**
* cpu_loop_exit_sigsegv:
@ -59,7 +57,7 @@ bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
* Use the TCGCPUOps hook to record cpu state, do guest operating system
* specific things to raise SIGSEGV, and jump to the main cpu loop.
*/
G_NORETURN void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
G_NORETURN void cpu_loop_exit_sigsegv(CPUState *cpu, vaddr addr,
MMUAccessType access_type,
bool maperr, uintptr_t ra);
@ -73,7 +71,7 @@ G_NORETURN void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
* Use the TCGCPUOps hook to record cpu state, do guest operating system
* specific things to raise SIGBUS, and jump to the main cpu loop.
*/
G_NORETURN void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
G_NORETURN void cpu_loop_exit_sigbus(CPUState *cpu, vaddr addr,
MMUAccessType access_type,
uintptr_t ra);

View file

@ -8,9 +8,9 @@
#ifndef USER_GUEST_HOST_H
#define USER_GUEST_HOST_H
#include "user/abitypes.h"
#include "exec/vaddr.h"
#include "user/guest-base.h"
#include "cpu.h"
#include "accel/tcg/cpu-ops.h"
/*
* If non-zero, the guest virtual address space is a contiguous subset
@ -23,59 +23,48 @@
extern unsigned long reserved_va;
/*
* Limit the guest addresses as best we can.
*
* When not using -R reserved_va, we cannot really limit the guest
* to less address space than the host. For 32-bit guests, this
* acts as a sanity check that we're not giving the guest an address
* that it cannot even represent. For 64-bit guests... the address
* might not be what the real kernel would give, but it is at least
* representable in the guest.
*
* TODO: Improve address allocation to avoid this problem, and to
* avoid setting bits at the top of guest addresses that might need
* to be used for tags.
* The last byte of the guest address space.
* If reserved_va is non-zero, guest_addr_max matches.
* If reserved_va is zero, guest_addr_max equals the full guest space.
*/
#define GUEST_ADDR_MAX_ \
((MIN_CONST(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) ? \
UINT32_MAX : ~0ul)
#define GUEST_ADDR_MAX (reserved_va ? : GUEST_ADDR_MAX_)
extern unsigned long guest_addr_max;
#ifndef TARGET_TAGGED_ADDRESSES
static inline abi_ptr cpu_untagged_addr(CPUState *cs, abi_ptr x)
static inline vaddr cpu_untagged_addr(CPUState *cs, vaddr x)
{
const TCGCPUOps *tcg_ops = cs->cc->tcg_ops;
if (tcg_ops->untagged_addr) {
return tcg_ops->untagged_addr(cs, x);
}
return x;
}
#endif
/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
static inline void *g2h_untagged(abi_ptr x)
static inline void *g2h_untagged(vaddr x)
{
return (void *)((uintptr_t)(x) + guest_base);
}
static inline void *g2h(CPUState *cs, abi_ptr x)
static inline void *g2h(CPUState *cs, vaddr x)
{
return g2h_untagged(cpu_untagged_addr(cs, x));
}
static inline bool guest_addr_valid_untagged(abi_ulong x)
static inline bool guest_addr_valid_untagged(vaddr x)
{
return x <= GUEST_ADDR_MAX;
return x <= guest_addr_max;
}
static inline bool guest_range_valid_untagged(abi_ulong start, abi_ulong len)
static inline bool guest_range_valid_untagged(vaddr start, vaddr len)
{
return len - 1 <= GUEST_ADDR_MAX && start <= GUEST_ADDR_MAX - len + 1;
return len - 1 <= guest_addr_max && start <= guest_addr_max - len + 1;
}
#define h2g_valid(x) \
(HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS || \
(uintptr_t)(x) - guest_base <= GUEST_ADDR_MAX)
((uintptr_t)(x) - guest_base <= guest_addr_max)
#define h2g_nocheck(x) ({ \
uintptr_t __ret = (uintptr_t)(x) - guest_base; \
(abi_ptr)__ret; \
(vaddr)__ret; \
})
#define h2g(x) ({ \

View file

@ -12,13 +12,12 @@
#error Cannot include this header from system emulation
#endif
#include "cpu-param.h"
#include "exec/target_long.h"
#include "exec/vaddr.h"
#include "exec/translation-block.h"
int page_unprotect(tb_page_addr_t address, uintptr_t pc);
int page_unprotect(CPUState *cpu, tb_page_addr_t address, uintptr_t pc);
int page_get_flags(target_ulong address);
int page_get_flags(vaddr address);
/**
* page_set_flags:
@ -31,9 +30,9 @@ int page_get_flags(target_ulong address);
* The flag PAGE_WRITE_ORG is positioned automatically depending
* on PAGE_WRITE. The mmap_lock should already be held.
*/
void page_set_flags(target_ulong start, target_ulong last, int flags);
void page_set_flags(vaddr start, vaddr last, int flags);
void page_reset_target_data(target_ulong start, target_ulong last);
void page_reset_target_data(vaddr start, vaddr last);
/**
* page_check_range
@ -45,7 +44,7 @@ void page_reset_target_data(target_ulong start, target_ulong last);
* Return false if any page is unmapped. Thus testing flags == 0 is
* equivalent to testing for flags == PAGE_VALID.
*/
bool page_check_range(target_ulong start, target_ulong last, int flags);
bool page_check_range(vaddr start, vaddr last, int flags);
/**
* page_check_range_empty:
@ -57,7 +56,7 @@ bool page_check_range(target_ulong start, target_ulong last, int flags);
* The memory lock must be held so that the caller will can ensure
* the result stays true until a new mapping can be installed.
*/
bool page_check_range_empty(target_ulong start, target_ulong last);
bool page_check_range_empty(vaddr start, vaddr last);
/**
* page_find_range_empty
@ -71,26 +70,25 @@ bool page_check_range_empty(target_ulong start, target_ulong last);
* The memory lock must be held, as the caller will want to ensure
* the returned range stays empty until a new mapping can be installed.
*/
target_ulong page_find_range_empty(target_ulong min, target_ulong max,
target_ulong len, target_ulong align);
vaddr page_find_range_empty(vaddr min, vaddr max, vaddr len, vaddr align);
/**
* page_get_target_data(address)
* page_get_target_data
* @address: guest virtual address
* @size: per-page size
*
* Return TARGET_PAGE_DATA_SIZE bytes of out-of-band data to associate
* Return @size bytes of out-of-band data to associate
* with the guest page at @address, allocating it if necessary. The
* caller should already have verified that the address is valid.
* The value of @size must be the same for every call.
*
* The memory will be freed when the guest page is deallocated,
* e.g. with the munmap system call.
*/
__attribute__((returns_nonnull))
void *page_get_target_data(target_ulong address);
typedef int (*walk_memory_regions_fn)(void *, target_ulong,
target_ulong, unsigned long);
void *page_get_target_data(vaddr address, size_t size);
typedef int (*walk_memory_regions_fn)(void *, vaddr, vaddr, int);
int walk_memory_regions(void *, walk_memory_regions_fn);
void page_dump(FILE *f);

View file

@ -4059,8 +4059,7 @@ static void bswap_note(struct elf_note *en)
/*
* Calculate file (dump) size of given memory region.
*/
static size_t vma_dump_size(target_ulong start, target_ulong end,
unsigned long flags)
static size_t vma_dump_size(vaddr start, vaddr end, int flags)
{
/* The area must be readable. */
if (!(flags & PAGE_READ)) {
@ -4253,14 +4252,14 @@ static int dump_write(int fd, const void *ptr, size_t size)
return (0);
}
static int wmr_page_unprotect_regions(void *opaque, target_ulong start,
target_ulong end, unsigned long flags)
static int wmr_page_unprotect_regions(void *opaque, vaddr start,
vaddr end, int flags)
{
if ((flags & (PAGE_WRITE | PAGE_WRITE_ORG)) == PAGE_WRITE_ORG) {
size_t step = MAX(TARGET_PAGE_SIZE, qemu_real_host_page_size());
while (1) {
page_unprotect(start, 0);
page_unprotect(NULL, start, 0);
if (end - start <= step) {
break;
}
@ -4275,8 +4274,8 @@ typedef struct {
size_t size;
} CountAndSizeRegions;
static int wmr_count_and_size_regions(void *opaque, target_ulong start,
target_ulong end, unsigned long flags)
static int wmr_count_and_size_regions(void *opaque, vaddr start,
vaddr end, int flags)
{
CountAndSizeRegions *css = opaque;
@ -4290,8 +4289,8 @@ typedef struct {
off_t offset;
} FillRegionPhdr;
static int wmr_fill_region_phdr(void *opaque, target_ulong start,
target_ulong end, unsigned long flags)
static int wmr_fill_region_phdr(void *opaque, vaddr start,
vaddr end, int flags)
{
FillRegionPhdr *d = opaque;
struct elf_phdr *phdr = d->phdr;
@ -4313,8 +4312,8 @@ static int wmr_fill_region_phdr(void *opaque, target_ulong start,
return 0;
}
static int wmr_write_region(void *opaque, target_ulong start,
target_ulong end, unsigned long flags)
static int wmr_write_region(void *opaque, vaddr start,
vaddr end, int flags)
{
int fd = *(int *)opaque;
size_t size = vma_dump_size(start, end, flags);

View file

@ -40,7 +40,6 @@
#include "qemu/plugin.h"
#include "user/guest-base.h"
#include "user/page-protection.h"
#include "exec/exec-all.h"
#include "exec/gdbstub.h"
#include "gdbstub/user.h"
#include "tcg/startup.h"
@ -123,6 +122,7 @@ static const char *last_log_filename;
#endif
unsigned long reserved_va;
unsigned long guest_addr_max;
static void usage(int exitcode);
@ -859,6 +859,13 @@ int main(int argc, char **argv, char **envp)
/* MAX_RESERVED_VA + 1 is a large power of 2, so is aligned. */
reserved_va = max_reserved_va;
}
if (reserved_va != 0) {
guest_addr_max = reserved_va;
} else if (MIN(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) {
guest_addr_max = UINT32_MAX;
} else {
guest_addr_max = ~0ul;
}
/*
* Temporarily disable

View file

@ -750,7 +750,7 @@ void force_sigsegv(int oldsig)
}
#endif
void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
void cpu_loop_exit_sigsegv(CPUState *cpu, vaddr addr,
MMUAccessType access_type, bool maperr, uintptr_t ra)
{
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
@ -766,7 +766,7 @@ void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
cpu_loop_exit_restore(cpu, ra);
}
void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
void cpu_loop_exit_sigbus(CPUState *cpu, vaddr addr,
MMUAccessType access_type, uintptr_t ra)
{
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;

View file

@ -8135,8 +8135,8 @@ static void open_self_maps_4(const struct open_self_maps_data *d,
* Callback for walk_memory_regions, when read_self_maps() fails.
* Proceed without the benefit of host /proc/self/maps cross-check.
*/
static int open_self_maps_3(void *opaque, target_ulong guest_start,
target_ulong guest_end, unsigned long flags)
static int open_self_maps_3(void *opaque, vaddr guest_start,
vaddr guest_end, int flags)
{
static const MapInfo mi = { .is_priv = true };
@ -8147,8 +8147,8 @@ static int open_self_maps_3(void *opaque, target_ulong guest_start,
/*
* Callback for walk_memory_regions, when read_self_maps() succeeds.
*/
static int open_self_maps_2(void *opaque, target_ulong guest_start,
target_ulong guest_end, unsigned long flags)
static int open_self_maps_2(void *opaque, vaddr guest_start,
vaddr guest_end, int flags)
{
const struct open_self_maps_data *d = opaque;
uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start);

View file

@ -19,7 +19,6 @@
#define LINUX_USER_USER_INTERNALS_H
#include "user/thunk.h"
#include "exec/exec-all.h"
#include "qemu/log.h"
extern char *exec_path;

View file

@ -9,7 +9,7 @@
#include "qemu/osdep.h"
#include "accel/tcg/cpu-mmu-index.h"
#include "exec/exec-all.h"
#include "accel/tcg/probe.h"
#include "exec/target_page.h"
#include "exec/tlb-flags.h"
#include "semihosting/uaccess.h"

View file

@ -29,9 +29,9 @@
#ifdef CONFIG_TCG
#include "accel/tcg/cpu-ops.h"
#include "accel/tcg/iommu.h"
#endif /* CONFIG_TCG */
#include "exec/exec-all.h"
#include "exec/cputlb.h"
#include "exec/page-protection.h"
#include "exec/target_page.h"
@ -587,6 +587,8 @@ MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat,
return mr;
}
#ifdef CONFIG_TCG
typedef struct TCGIOMMUNotifier {
IOMMUNotifier n;
MemoryRegion *mr;
@ -746,6 +748,33 @@ translate_fail:
return &d->map.sections[PHYS_SECTION_UNASSIGNED];
}
MemoryRegionSection *iotlb_to_section(CPUState *cpu,
hwaddr index, MemTxAttrs attrs)
{
int asidx = cpu_asidx_from_attrs(cpu, attrs);
CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
AddressSpaceDispatch *d = cpuas->memory_dispatch;
int section_index = index & ~TARGET_PAGE_MASK;
MemoryRegionSection *ret;
assert(section_index < d->map.sections_nb);
ret = d->map.sections + section_index;
assert(ret->mr);
assert(ret->mr->ops);
return ret;
}
/* Called from RCU critical section */
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
MemoryRegionSection *section)
{
AddressSpaceDispatch *d = flatview_to_dispatch(section->fv);
return section - d->map.sections;
}
#endif /* CONFIG_TCG */
void cpu_address_space_init(CPUState *cpu, int asidx,
const char *prefix, MemoryRegion *mr)
{
@ -1002,14 +1031,6 @@ bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
return false;
}
/* Called from RCU critical section */
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
MemoryRegionSection *section)
{
AddressSpaceDispatch *d = flatview_to_dispatch(section->fv);
return section - d->map.sections;
}
static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end,
uint16_t section);
static subpage_t *subpage_init(FlatView *fv, hwaddr base);
@ -2669,23 +2690,6 @@ static uint16_t dummy_section(PhysPageMap *map, FlatView *fv, MemoryRegion *mr)
return phys_section_add(map, &section);
}
MemoryRegionSection *iotlb_to_section(CPUState *cpu,
hwaddr index, MemTxAttrs attrs)
{
int asidx = cpu_asidx_from_attrs(cpu, attrs);
CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
AddressSpaceDispatch *d = cpuas->memory_dispatch;
int section_index = index & ~TARGET_PAGE_MASK;
MemoryRegionSection *ret;
assert(section_index < d->map.sections_nb);
ret = d->map.sections + section_index;
assert(ret->mr);
assert(ret->mr->ops);
return ret;
}
static void io_mem_init(void)
{
memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
@ -2830,7 +2834,7 @@ static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
}
if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
assert(tcg_enabled());
tb_invalidate_phys_range(addr, addr + length - 1);
tb_invalidate_phys_range(NULL, addr, addr + length - 1);
dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
}
cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);

View file

@ -27,6 +27,7 @@
#include "qemu/datadir.h"
#include "qemu/units.h"
#include "qemu/module.h"
#include "qemu/target-info.h"
#include "exec/cpu-common.h"
#include "exec/page-vary.h"
#include "hw/qdev-properties.h"
@ -1564,7 +1565,7 @@ static void machine_help_func(const QDict *qdict)
GSList *el;
const char *type = qdict_get_try_str(qdict, "type");
machines = object_class_get_list(TYPE_MACHINE, false);
machines = object_class_get_list(target_machine_typename(), false);
if (type) {
ObjectClass *machine_class = OBJECT_CLASS(find_machine(type, machines));
if (machine_class) {

View file

@ -9,18 +9,17 @@
#include "qemu/osdep.h"
#include "qemu/target-info.h"
#include "qemu/target-info-impl.h"
#include "hw/boards.h"
#include "cpu.h"
static const TargetInfo target_info_stub = {
.target_name = TARGET_NAME,
.long_bits = TARGET_LONG_BITS,
.cpu_type = CPU_RESOLVING_TYPE,
.machine_typename = TYPE_MACHINE,
};
const TargetInfo *target_info(void)
{
return &target_info_stub;
}
const char *target_cpu_type(void)
{
return CPU_RESOLVING_TYPE;
}

View file

@ -14,3 +14,18 @@ const char *target_name(void)
{
return target_info()->target_name;
}
unsigned target_long_bits(void)
{
return target_info()->long_bits;
}
const char *target_cpu_type(void)
{
return target_info()->cpu_type;
}
const char *target_machine_typename(void)
{
return target_info()->machine_typename;
}

View file

@ -23,9 +23,9 @@
#include "qapi/error.h"
#include "qemu/qemu-print.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "exec/translation-block.h"
#include "exec/target_page.h"
#include "accel/tcg/cpu-ops.h"
#include "fpu/softfloat.h"
@ -41,6 +41,18 @@ static vaddr alpha_cpu_get_pc(CPUState *cs)
return env->pc;
}
static TCGTBCPUState alpha_get_tb_cpu_state(CPUState *cs)
{
CPUAlphaState *env = cpu_env(cs);
uint32_t flags = env->flags & ENV_FLAG_TB_MASK;
#ifdef CONFIG_USER_ONLY
flags |= TB_FLAG_UNALIGN * !cs->prctl_unalign_sigbus;
#endif
return (TCGTBCPUState){ .pc = env->pc, .flags = flags };
}
static void alpha_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
@ -232,8 +244,6 @@ static const struct SysemuCPUOps alpha_sysemu_ops = {
};
#endif
#include "accel/tcg/cpu-ops.h"
static const TCGCPUOps alpha_tcg_ops = {
/* Alpha processors have a weak memory model */
.guest_default_memory_order = 0,
@ -241,6 +251,7 @@ static const TCGCPUOps alpha_tcg_ops = {
.initialize = alpha_translate_init,
.translate_code = alpha_translate_code,
.get_tb_cpu_state = alpha_get_tb_cpu_state,
.synchronize_from_tb = alpha_cpu_synchronize_from_tb,
.restore_state_to_opc = alpha_restore_state_to_opc,
.mmu_index = alpha_cpu_mmu_index,
@ -252,6 +263,7 @@ static const TCGCPUOps alpha_tcg_ops = {
.tlb_fill = alpha_cpu_tlb_fill,
.cpu_exec_interrupt = alpha_cpu_exec_interrupt,
.cpu_exec_halt = alpha_cpu_has_work,
.cpu_exec_reset = cpu_reset,
.do_interrupt = alpha_cpu_do_interrupt,
.do_transaction_failed = alpha_cpu_do_transaction_failed,
.do_unaligned_access = alpha_cpu_do_unaligned_access,

View file

@ -464,17 +464,6 @@ void alpha_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
MemTxResult response, uintptr_t retaddr);
#endif
static inline void cpu_get_tb_cpu_state(CPUAlphaState *env, vaddr *pc,
uint64_t *cs_base, uint32_t *pflags)
{
*pc = env->pc;
*cs_base = 0;
*pflags = env->flags & ENV_FLAG_TB_MASK;
#ifdef CONFIG_USER_ONLY
*pflags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus;
#endif
}
#ifdef CONFIG_USER_ONLY
/* Copied from linux ieee_swcr_to_fpcr. */
static inline uint64_t alpha_ieee_swcr_to_fpcr(uint64_t swcr)

View file

@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "fpu/softfloat.h"

View file

@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "qemu/host-utils.h"

View file

@ -20,7 +20,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
#include "exec/exec-all.h"
#include "accel/tcg/cpu-ldst.h"
static void do_unaligned_access(CPUAlphaState *env, vaddr addr, uintptr_t retaddr)

View file

@ -21,7 +21,6 @@
#include "cpu.h"
#include "system/cpus.h"
#include "qemu/host-utils.h"
#include "exec/exec-all.h"
#include "tcg/tcg-op.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"

View file

@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "fpu/softfloat.h"

View file

@ -17,14 +17,9 @@
#endif
#ifdef CONFIG_USER_ONLY
# ifdef TARGET_AARCH64
# define TARGET_TAGGED_ADDRESSES
# ifdef __FreeBSD__
# define TARGET_PAGE_BITS 12
# else
# if defined(TARGET_AARCH64) && defined(CONFIG_LINUX)
/* Allow user-only to vary page size from 4k */
# define TARGET_PAGE_BITS_VARY
# endif
# else
# define TARGET_PAGE_BITS 12
# endif

View file

@ -33,7 +33,6 @@
#endif /* CONFIG_TCG */
#include "internals.h"
#include "cpu-features.h"
#include "exec/exec-all.h"
#include "exec/target_page.h"
#include "hw/qdev-properties.h"
#if !defined(CONFIG_USER_ONLY)
@ -2672,7 +2671,31 @@ static const char *arm_gdb_get_core_xml_file(CPUState *cs)
return "arm-core.xml";
}
#ifndef CONFIG_USER_ONLY
#ifdef CONFIG_USER_ONLY
/**
* aarch64_untagged_addr:
*
* Remove any address tag from @x. This is explicitly related to the
* linux syscall TIF_TAGGED_ADDR setting, not TBI in general.
*
* There should be a better place to put this, but we need this in
* include/exec/cpu_ldst.h, and not some place linux-user specific.
*
* Note that arm-*-user will never set tagged_addr_enable.
*/
static vaddr aarch64_untagged_addr(CPUState *cs, vaddr x)
{
CPUARMState *env = cpu_env(cs);
if (env->tagged_addr_enable) {
/*
* TBI is enabled for userspace but not kernelspace addresses.
* Only clear the tag if bit 55 is clear.
*/
x &= sextract64(x, 0, 56);
}
return x;
}
#else
#include "hw/core/sysemu-cpu-ops.h"
static const struct SysemuCPUOps arm_sysemu_ops = {
@ -2694,6 +2717,7 @@ static const TCGCPUOps arm_tcg_ops = {
.initialize = arm_translate_init,
.translate_code = arm_translate_code,
.get_tb_cpu_state = arm_get_tb_cpu_state,
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
.debug_excp_handler = arm_debug_excp_handler,
.restore_state_to_opc = arm_restore_state_to_opc,
@ -2702,10 +2726,12 @@ static const TCGCPUOps arm_tcg_ops = {
#ifdef CONFIG_USER_ONLY
.record_sigsegv = arm_cpu_record_sigsegv,
.record_sigbus = arm_cpu_record_sigbus,
.untagged_addr = aarch64_untagged_addr,
#else
.tlb_fill_align = arm_cpu_tlb_fill_align,
.cpu_exec_interrupt = arm_cpu_exec_interrupt,
.cpu_exec_halt = arm_cpu_exec_halt,
.cpu_exec_reset = cpu_reset,
.do_interrupt = arm_cpu_do_interrupt,
.do_transaction_failed = arm_cpu_do_transaction_failed,
.do_unaligned_access = arm_cpu_do_unaligned_access,

View file

@ -783,12 +783,9 @@ typedef struct CPUArchState {
#else /* CONFIG_USER_ONLY */
/* For usermode syscall translation. */
bool eabi;
#endif /* CONFIG_USER_ONLY */
#ifdef TARGET_TAGGED_ADDRESSES
/* Linux syscall tagged address support */
bool tagged_addr_enable;
#endif
#endif /* CONFIG_USER_ONLY */
} CPUARMState;
static inline void set_feature(CPUARMState *env, int feature)
@ -3119,9 +3116,6 @@ static inline bool bswap_code(bool sctlr_b)
#endif
}
void cpu_get_tb_cpu_state(CPUARMState *env, vaddr *pc,
uint64_t *cs_base, uint32_t *flags);
enum {
QEMU_PSCI_CONDUIT_DISABLED = 0,
QEMU_PSCI_CONDUIT_SMC = 1,
@ -3219,35 +3213,4 @@ extern const uint64_t pred_esz_masks[5];
#define LOG2_TAG_GRANULE 4
#define TAG_GRANULE (1 << LOG2_TAG_GRANULE)
#ifdef CONFIG_USER_ONLY
#define TARGET_PAGE_DATA_SIZE (TARGET_PAGE_SIZE >> (LOG2_TAG_GRANULE + 1))
#ifdef TARGET_TAGGED_ADDRESSES
/**
* cpu_untagged_addr:
* @cs: CPU context
* @x: tagged address
*
* Remove any address tag from @x. This is explicitly related to the
* linux syscall TIF_TAGGED_ADDR setting, not TBI in general.
*
* There should be a better place to put this, but we need this in
* include/exec/cpu_ldst.h, and not some place linux-user specific.
*/
static inline target_ulong cpu_untagged_addr(CPUState *cs, target_ulong x)
{
CPUARMState *env = cpu_env(cs);
if (env->tagged_addr_enable) {
/*
* TBI is enabled for userspace but not kernelspace addresses.
* Only clear the tag if bit 55 is clear.
*/
x &= sextract64(x, 0, 56);
}
return x;
}
#endif /* TARGET_TAGGED_ADDRESSES */
#endif /* CONFIG_USER_ONLY */
#endif

View file

@ -11,7 +11,6 @@
#include "internals.h"
#include "cpu-features.h"
#include "cpregs.h"
#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "exec/watchpoint.h"
#include "system/tcg.h"

View file

@ -20,7 +20,6 @@
#include "qemu/bitops.h"
#include "qemu/qemu-print.h"
#include "exec/cputlb.h"
#include "exec/exec-all.h"
#include "exec/translation-block.h"
#include "hw/irq.h"
#include "system/cpu-timers.h"
@ -30,6 +29,7 @@
#include "qapi/error.h"
#include "qemu/guest-random.h"
#ifdef CONFIG_TCG
#include "accel/tcg/probe.h"
#include "semihosting/common-semi.h"
#endif
#include "cpregs.h"
@ -4987,7 +4987,7 @@ static void ic_ivau_write(CPUARMState *env, const ARMCPRegInfo *ri,
mmap_lock();
tb_invalidate_phys_range(start_address, end_address);
tb_invalidate_phys_range(env_cpu(env), start_address, end_address);
mmap_unlock();
}
@ -11423,115 +11423,6 @@ ARMMMUIdx arm_mmu_idx(CPUARMState *env)
return arm_mmu_idx_el(env, arm_current_el(env));
}
static bool mve_no_pred(CPUARMState *env)
{
/*
* Return true if there is definitely no predication of MVE
* instructions by VPR or LTPSIZE. (Returning false even if there
* isn't any predication is OK; generated code will just be
* a little worse.)
* If the CPU does not implement MVE then this TB flag is always 0.
*
* NOTE: if you change this logic, the "recalculate s->mve_no_pred"
* logic in gen_update_fp_context() needs to be updated to match.
*
* We do not include the effect of the ECI bits here -- they are
* tracked in other TB flags. This simplifies the logic for
* "when did we emit code that changes the MVE_NO_PRED TB flag
* and thus need to end the TB?".
*/
if (cpu_isar_feature(aa32_mve, env_archcpu(env))) {
return false;
}
if (env->v7m.vpr) {
return false;
}
if (env->v7m.ltpsize < 4) {
return false;
}
return true;
}
void cpu_get_tb_cpu_state(CPUARMState *env, vaddr *pc,
uint64_t *cs_base, uint32_t *pflags)
{
CPUARMTBFlags flags;
assert_hflags_rebuild_correctly(env);
flags = env->hflags;
if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) {
*pc = env->pc;
if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
DP_TBFLAG_A64(flags, BTYPE, env->btype);
}
} else {
*pc = env->regs[15];
if (arm_feature(env, ARM_FEATURE_M)) {
if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
!= env->v7m.secure) {
DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1);
}
if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
(!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
(env->v7m.secure &&
!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
/*
* ASPEN is set, but FPCA/SFPA indicate that there is no
* active FP context; we must create a new FP context before
* executing any FP insn.
*/
DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1);
}
bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
DP_TBFLAG_M32(flags, LSPACT, 1);
}
if (mve_no_pred(env)) {
DP_TBFLAG_M32(flags, MVE_NO_PRED, 1);
}
} else {
/*
* Note that XSCALE_CPAR shares bits with VECSTRIDE.
* Note that VECLEN+VECSTRIDE are RES0 for M-profile.
*/
if (arm_feature(env, ARM_FEATURE_XSCALE)) {
DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar);
} else {
DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len);
DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride);
}
if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
DP_TBFLAG_A32(flags, VFPEN, 1);
}
}
DP_TBFLAG_AM32(flags, THUMB, env->thumb);
DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits);
}
/*
* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
* states defined in the ARM ARM for software singlestep:
* SS_ACTIVE PSTATE.SS State
* 0 x Inactive (the TB flag for SS is always 0)
* 1 0 Active-pending
* 1 1 Active-not-pending
* SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
*/
if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) {
DP_TBFLAG_ANY(flags, PSTATE__SS, 1);
}
*pflags = flags.flags;
*cs_base = flags.flags2;
}
#ifdef TARGET_AARCH64
/*
* The manual says that when SVE is enabled and VQ is widened the

View file

@ -28,6 +28,7 @@
#include "exec/hwaddr.h"
#include "exec/vaddr.h"
#include "exec/breakpoint.h"
#include "accel/tcg/tb-cpu-state.h"
#include "hw/registerfields.h"
#include "tcg/tcg-gvec-desc.h"
#include "system/memory.h"
@ -372,6 +373,7 @@ void arm_restore_state_to_opc(CPUState *cs,
const uint64_t *data);
#ifdef CONFIG_TCG
TCGTBCPUState arm_get_tb_cpu_state(CPUState *cs);
void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
/* Our implementation of TCGCPUOps::cpu_exec_halt */
@ -1906,8 +1908,6 @@ static inline bool arm_fgt_active(CPUARMState *env, int el)
(!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN));
}
void assert_hflags_rebuild_correctly(CPUARMState *env);
/*
* Although the ARM implementation of hardware assisted debugging
* allows for different breakpoints per-core, the current GDB

View file

@ -10,10 +10,10 @@
#include "qemu/log.h"
#include "qemu/range.h"
#include "qemu/main-loop.h"
#include "exec/exec-all.h"
#include "exec/page-protection.h"
#include "exec/target_page.h"
#include "exec/tlb-flags.h"
#include "accel/tcg/probe.h"
#include "cpu.h"
#include "internals.h"
#include "cpu-features.h"

View file

@ -21,10 +21,6 @@ void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
{
g_assert_not_reached();
}
/* Temporarily while cpu_get_tb_cpu_state() is still in common code */
void assert_hflags_rebuild_correctly(CPUARMState *env)
{
}
/* TLBI insns are only used by TCG, so we don't need to do anything for KVM */
void define_tlb_insn_regs(ARMCPU *cpu)

View file

@ -238,6 +238,7 @@ static const TCGCPUOps arm_v7m_tcg_ops = {
.initialize = arm_translate_init,
.translate_code = arm_translate_code,
.get_tb_cpu_state = arm_get_tb_cpu_state,
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
.debug_excp_handler = arm_debug_excp_handler,
.restore_state_to_opc = arm_restore_state_to_opc,
@ -250,6 +251,7 @@ static const TCGCPUOps arm_v7m_tcg_ops = {
.tlb_fill_align = arm_cpu_tlb_fill_align,
.cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt,
.cpu_exec_halt = arm_cpu_exec_halt,
.cpu_exec_reset = cpu_reset,
.do_interrupt = arm_v7m_cpu_do_interrupt,
.do_transaction_failed = arm_cpu_do_transaction_failed,
.do_unaligned_access = arm_cpu_do_unaligned_access,

View file

@ -29,8 +29,9 @@
#include "internals.h"
#include "qemu/crc32c.h"
#include "exec/cpu-common.h"
#include "exec/exec-all.h"
#include "accel/tcg/cpu-ldst.h"
#include "accel/tcg/helper-retaddr.h"
#include "accel/tcg/probe.h"
#include "exec/target_page.h"
#include "exec/tlb-flags.h"
#include "qemu/int128.h"

View file

@ -10,6 +10,8 @@
#include "internals.h"
#include "cpu-features.h"
#include "exec/helper-proto.h"
#include "exec/translation-block.h"
#include "accel/tcg/cpu-ops.h"
#include "cpregs.h"
static inline bool fgt_svc(CPUARMState *env, int el)
@ -498,7 +500,7 @@ void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el)
env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx);
}
void assert_hflags_rebuild_correctly(CPUARMState *env)
static void assert_hflags_rebuild_correctly(CPUARMState *env)
{
#ifdef CONFIG_DEBUG_TCG
CPUARMTBFlags c = env->hflags;
@ -513,3 +515,116 @@ void assert_hflags_rebuild_correctly(CPUARMState *env)
}
#endif
}
static bool mve_no_pred(CPUARMState *env)
{
/*
* Return true if there is definitely no predication of MVE
* instructions by VPR or LTPSIZE. (Returning false even if there
* isn't any predication is OK; generated code will just be
* a little worse.)
* If the CPU does not implement MVE then this TB flag is always 0.
*
* NOTE: if you change this logic, the "recalculate s->mve_no_pred"
* logic in gen_update_fp_context() needs to be updated to match.
*
* We do not include the effect of the ECI bits here -- they are
* tracked in other TB flags. This simplifies the logic for
* "when did we emit code that changes the MVE_NO_PRED TB flag
* and thus need to end the TB?".
*/
if (cpu_isar_feature(aa32_mve, env_archcpu(env))) {
return false;
}
if (env->v7m.vpr) {
return false;
}
if (env->v7m.ltpsize < 4) {
return false;
}
return true;
}
TCGTBCPUState arm_get_tb_cpu_state(CPUState *cs)
{
CPUARMState *env = cpu_env(cs);
CPUARMTBFlags flags;
vaddr pc;
assert_hflags_rebuild_correctly(env);
flags = env->hflags;
if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) {
pc = env->pc;
if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
DP_TBFLAG_A64(flags, BTYPE, env->btype);
}
} else {
pc = env->regs[15];
if (arm_feature(env, ARM_FEATURE_M)) {
if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
!= env->v7m.secure) {
DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1);
}
if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
(!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
(env->v7m.secure &&
!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
/*
* ASPEN is set, but FPCA/SFPA indicate that there is no
* active FP context; we must create a new FP context before
* executing any FP insn.
*/
DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1);
}
bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
DP_TBFLAG_M32(flags, LSPACT, 1);
}
if (mve_no_pred(env)) {
DP_TBFLAG_M32(flags, MVE_NO_PRED, 1);
}
} else {
/*
* Note that XSCALE_CPAR shares bits with VECSTRIDE.
* Note that VECLEN+VECSTRIDE are RES0 for M-profile.
*/
if (arm_feature(env, ARM_FEATURE_XSCALE)) {
DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar);
} else {
DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len);
DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride);
}
if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
DP_TBFLAG_A32(flags, VFPEN, 1);
}
}
DP_TBFLAG_AM32(flags, THUMB, env->thumb);
DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits);
}
/*
* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
* states defined in the ARM ARM for software singlestep:
* SS_ACTIVE PSTATE.SS State
* 0 x Inactive (the TB flag for SS is always 0)
* 1 0 Active-pending
* 1 1 Active-not-pending
* SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
*/
if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) {
DP_TBFLAG_ANY(flags, PSTATE__SS, 1);
}
return (TCGTBCPUState){
.pc = pc,
.flags = flags.flags,
.cs_base = flags.flags2,
};
}

View file

@ -15,7 +15,6 @@
#include "qemu/main-loop.h"
#include "qemu/bitops.h"
#include "qemu/log.h"
#include "exec/exec-all.h"
#include "exec/page-protection.h"
#ifdef CONFIG_TCG
#include "accel/tcg/cpu-ldst.h"

View file

@ -21,7 +21,6 @@
#include "qemu/log.h"
#include "cpu.h"
#include "internals.h"
#include "exec/exec-all.h"
#include "exec/page-protection.h"
#ifdef CONFIG_USER_ONLY
#include "user/cpu_loop.h"
@ -30,6 +29,7 @@
#include "system/ram_addr.h"
#endif
#include "accel/tcg/cpu-ldst.h"
#include "accel/tcg/probe.h"
#include "exec/helper-proto.h"
#include "exec/tlb-flags.h"
#include "accel/tcg/cpu-ops.h"
@ -37,7 +37,6 @@
#include "qemu/guest-random.h"
#include "mte_helper.h"
static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude)
{
if (exclude == 0xffff) {
@ -63,6 +62,7 @@ uint8_t *allocation_tag_mem_probe(CPUARMState *env, int ptr_mmu_idx,
bool probe, uintptr_t ra)
{
#ifdef CONFIG_USER_ONLY
const size_t page_data_size = TARGET_PAGE_SIZE >> (LOG2_TAG_GRANULE + 1);
uint64_t clean_ptr = useronly_clean_ptr(ptr);
int flags = page_get_flags(clean_ptr);
uint8_t *tags;
@ -83,7 +83,7 @@ uint8_t *allocation_tag_mem_probe(CPUARMState *env, int ptr_mmu_idx,
return NULL;
}
tags = page_get_target_data(clean_ptr);
tags = page_get_target_data(clean_ptr, page_data_size);
index = extract32(ptr, LOG2_TAG_GRANULE + 1,
TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1);

View file

@ -23,7 +23,6 @@
#include "vec_internal.h"
#include "exec/helper-proto.h"
#include "accel/tcg/cpu-ldst.h"
#include "exec/exec-all.h"
#include "tcg/tcg.h"
#include "fpu/softfloat.h"
#include "crypto/clmul.h"

View file

@ -23,8 +23,8 @@
#include "exec/target_page.h"
#include "internals.h"
#include "cpu-features.h"
#include "exec/exec-all.h"
#include "accel/tcg/cpu-ldst.h"
#include "accel/tcg/probe.h"
#include "cpregs.h"
#define SIGNBIT (uint32_t)0x80000000

View file

@ -21,7 +21,6 @@
#include "cpu.h"
#include "internals.h"
#include "cpu-features.h"
#include "exec/exec-all.h"
#include "accel/tcg/cpu-ldst.h"
#include "exec/helper-proto.h"
#include "tcg/tcg-gvec-desc.h"

View file

@ -23,7 +23,7 @@
#include "tcg/tcg-gvec-desc.h"
#include "exec/helper-proto.h"
#include "accel/tcg/cpu-ldst.h"
#include "exec/exec-all.h"
#include "accel/tcg/helper-retaddr.h"
#include "qemu/int128.h"
#include "fpu/softfloat.h"
#include "vec_internal.h"

View file

@ -20,7 +20,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "internals.h"
#include "exec/exec-all.h"
#include "exec/page-protection.h"
#include "exec/helper-proto.h"
#include "exec/target_page.h"
@ -31,7 +30,9 @@
#include "vec_internal.h"
#include "sve_ldst_internal.h"
#include "accel/tcg/cpu-ldst.h"
#include "accel/tcg/helper-retaddr.h"
#include "accel/tcg/cpu-ops.h"
#include "accel/tcg/probe.h"
#ifdef CONFIG_USER_ONLY
#include "user/page-protection.h"
#endif

View file

@ -9,7 +9,6 @@
#include "cpu.h"
#include "internals.h"
#include "cpu-features.h"
#include "exec/exec-all.h"
#include "exec/helper-proto.h"

View file

@ -17,7 +17,6 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "exec/exec-all.h"
#include "exec/target_page.h"
#include "translate.h"
#include "translate-a64.h"

View file

@ -4,7 +4,6 @@
#include "cpu.h"
#include "tcg/tcg-op.h"
#include "tcg/tcg-op-gvec.h"
#include "exec/exec-all.h"
#include "exec/translator.h"
#include "exec/translation-block.h"
#include "exec/helper-gen.h"

View file

@ -21,13 +21,13 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu/qemu-print.h"
#include "exec/exec-all.h"
#include "exec/translation-block.h"
#include "system/address-spaces.h"
#include "cpu.h"
#include "disas/dis-asm.h"
#include "tcg/debug-assert.h"
#include "hw/qdev-properties.h"
#include "accel/tcg/cpu-ops.h"
static void avr_cpu_set_pc(CPUState *cs, vaddr value)
{
@ -54,6 +54,21 @@ static int avr_cpu_mmu_index(CPUState *cs, bool ifetch)
return ifetch ? MMU_CODE_IDX : MMU_DATA_IDX;
}
static TCGTBCPUState avr_get_tb_cpu_state(CPUState *cs)
{
CPUAVRState *env = cpu_env(cs);
uint32_t flags = 0;
if (env->fullacc) {
flags |= TB_FLAGS_FULL_ACCESS;
}
if (env->skip) {
flags |= TB_FLAGS_SKIP;
}
return (TCGTBCPUState){ .pc = env->pc_w * 2, .flags = flags };
}
static void avr_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
@ -221,18 +236,18 @@ static const struct SysemuCPUOps avr_sysemu_ops = {
.get_phys_page_debug = avr_cpu_get_phys_page_debug,
};
#include "accel/tcg/cpu-ops.h"
static const TCGCPUOps avr_tcg_ops = {
.guest_default_memory_order = 0,
.mttcg_supported = false,
.initialize = avr_cpu_tcg_init,
.translate_code = avr_cpu_translate_code,
.get_tb_cpu_state = avr_get_tb_cpu_state,
.synchronize_from_tb = avr_cpu_synchronize_from_tb,
.restore_state_to_opc = avr_restore_state_to_opc,
.mmu_index = avr_cpu_mmu_index,
.cpu_exec_interrupt = avr_cpu_exec_interrupt,
.cpu_exec_halt = avr_cpu_has_work,
.cpu_exec_reset = cpu_reset,
.tlb_fill = avr_cpu_tlb_fill,
.do_interrupt = avr_cpu_do_interrupt,
};

View file

@ -205,24 +205,6 @@ enum {
TB_FLAGS_SKIP = 2,
};
static inline void cpu_get_tb_cpu_state(CPUAVRState *env, vaddr *pc,
uint64_t *cs_base, uint32_t *pflags)
{
uint32_t flags = 0;
*pc = env->pc_w * 2;
*cs_base = 0;
if (env->fullacc) {
flags |= TB_FLAGS_FULL_ACCESS;
}
if (env->skip) {
flags |= TB_FLAGS_SKIP;
}
*pflags = flags;
}
static inline int cpu_interrupts_enabled(CPUAVRState *env)
{
return env->sregI != 0;

View file

@ -23,7 +23,6 @@
#include "qemu/error-report.h"
#include "cpu.h"
#include "accel/tcg/cpu-ops.h"
#include "accel/tcg/getpc.h"
#include "exec/cputlb.h"
#include "exec/page-protection.h"
#include "exec/target_page.h"

View file

@ -22,7 +22,6 @@
#include "qemu/qemu-print.h"
#include "tcg/tcg.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "exec/translation-block.h"
#include "tcg/tcg-op.h"
#include "exec/helper-proto.h"

View file

@ -19,13 +19,13 @@
#include "qemu/qemu-print.h"
#include "cpu.h"
#include "internal.h"
#include "exec/exec-all.h"
#include "exec/translation-block.h"
#include "qapi/error.h"
#include "hw/qdev-properties.h"
#include "fpu/softfloat-helpers.h"
#include "tcg/tcg.h"
#include "exec/gdbstub.h"
#include "accel/tcg/cpu-ops.h"
static void hexagon_v66_cpu_init(Object *obj) { }
static void hexagon_v67_cpu_init(Object *obj) { }
@ -255,6 +255,22 @@ static vaddr hexagon_cpu_get_pc(CPUState *cs)
return cpu_env(cs)->gpr[HEX_REG_PC];
}
static TCGTBCPUState hexagon_get_tb_cpu_state(CPUState *cs)
{
CPUHexagonState *env = cpu_env(cs);
vaddr pc = env->gpr[HEX_REG_PC];
uint32_t hex_flags = 0;
if (pc == env->gpr[HEX_REG_SA0]) {
hex_flags = FIELD_DP32(hex_flags, TB_FLAGS, IS_TIGHT_LOOP, 1);
}
if (pc & PCALIGN_MASK) {
hexagon_raise_exception_err(env, HEX_CAUSE_PC_NOT_ALIGNED, 0);
}
return (TCGTBCPUState){ .pc = pc, .flags = hex_flags };
}
static void hexagon_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
@ -322,14 +338,13 @@ static void hexagon_cpu_init(Object *obj)
{
}
#include "accel/tcg/cpu-ops.h"
static const TCGCPUOps hexagon_tcg_ops = {
/* MTTCG not yet supported: require strict ordering */
.guest_default_memory_order = TCG_MO_ALL,
.mttcg_supported = false,
.initialize = hexagon_translate_init,
.translate_code = hexagon_translate_code,
.get_tb_cpu_state = hexagon_get_tb_cpu_state,
.synchronize_from_tb = hexagon_cpu_synchronize_from_tb,
.restore_state_to_opc = hexagon_restore_state_to_opc,
.mmu_index = hexagon_cpu_mmu_index,

View file

@ -137,21 +137,6 @@ G_NORETURN void hexagon_raise_exception_err(CPUHexagonState *env,
uint32_t exception,
uintptr_t pc);
static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, vaddr *pc,
uint64_t *cs_base, uint32_t *flags)
{
uint32_t hex_flags = 0;
*pc = env->gpr[HEX_REG_PC];
*cs_base = 0;
if (*pc == env->gpr[HEX_REG_SA0]) {
hex_flags = FIELD_DP32(hex_flags, TB_FLAGS, IS_TIGHT_LOOP, 1);
}
*flags = hex_flags;
if (*pc & PCALIGN_MASK) {
hexagon_raise_exception_err(env, HEX_CAUSE_PC_NOT_ALIGNED, 0);
}
}
typedef HexagonCPU ArchCPU;
void hexagon_translate_init(void);

View file

@ -22,6 +22,7 @@
#include "arch.h"
#include "mmvec/system_ext_mmvec.h"
#include "accel/tcg/getpc.h"
#include "accel/tcg/probe.h"
#ifndef QEMU_GENERATE
#define VdV (*(MMVector *restrict)(VdV_void))

View file

@ -17,8 +17,8 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "exec/exec-all.h"
#include "accel/tcg/cpu-ldst.h"
#include "accel/tcg/probe.h"
#include "exec/helper-proto.h"
#include "fpu/softfloat.h"
#include "cpu.h"

View file

@ -24,12 +24,12 @@
#include "qemu/timer.h"
#include "cpu.h"
#include "qemu/module.h"
#include "exec/exec-all.h"
#include "exec/translation-block.h"
#include "exec/target_page.h"
#include "fpu/softfloat.h"
#include "tcg/tcg.h"
#include "hw/hppa/hppa_hardware.h"
#include "accel/tcg/cpu-ops.h"
static void hppa_cpu_set_pc(CPUState *cs, vaddr value)
{
@ -51,11 +51,12 @@ static vaddr hppa_cpu_get_pc(CPUState *cs)
env->iaoq_f & -4);
}
void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc,
uint64_t *pcsbase, uint32_t *pflags)
static TCGTBCPUState hppa_get_tb_cpu_state(CPUState *cs)
{
CPUHPPAState *env = cpu_env(cs);
uint32_t flags = 0;
uint64_t cs_base = 0;
vaddr pc;
/*
* TB lookup assumes that PC contains the complete virtual address.
@ -63,7 +64,7 @@ void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc,
* incomplete virtual address. This also means that we must separate
* out current cpu privilege from the low bits of IAOQ_F.
*/
*pc = hppa_cpu_get_pc(env_cpu(env));
pc = hppa_cpu_get_pc(env_cpu(env));
flags |= (env->iaoq_f & 3) << TB_FLAG_PRIV_SHIFT;
/*
@ -99,8 +100,7 @@ void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc,
}
#endif
*pcsbase = cs_base;
*pflags = flags;
return (TCGTBCPUState){ .pc = pc, .flags = flags, .cs_base = cs_base };
}
static void hppa_cpu_synchronize_from_tb(CPUState *cs,
@ -250,8 +250,6 @@ static const struct SysemuCPUOps hppa_sysemu_ops = {
};
#endif
#include "accel/tcg/cpu-ops.h"
static const TCGCPUOps hppa_tcg_ops = {
/* PA-RISC 1.x processors have a strong memory model. */
/*
@ -264,6 +262,7 @@ static const TCGCPUOps hppa_tcg_ops = {
.initialize = hppa_translate_init,
.translate_code = hppa_translate_code,
.get_tb_cpu_state = hppa_get_tb_cpu_state,
.synchronize_from_tb = hppa_cpu_synchronize_from_tb,
.restore_state_to_opc = hppa_restore_state_to_opc,
.mmu_index = hppa_cpu_mmu_index,
@ -272,6 +271,7 @@ static const TCGCPUOps hppa_tcg_ops = {
.tlb_fill_align = hppa_cpu_tlb_fill_align,
.cpu_exec_interrupt = hppa_cpu_exec_interrupt,
.cpu_exec_halt = hppa_cpu_has_work,
.cpu_exec_reset = cpu_reset,
.do_interrupt = hppa_cpu_do_interrupt,
.do_unaligned_access = hppa_cpu_do_unaligned_access,
.do_transaction_failed = hppa_cpu_do_transaction_failed,

View file

@ -351,9 +351,6 @@ hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr);
#define CS_BASE_DIFFPAGE (1 << 12)
#define CS_BASE_DIFFSPACE (1 << 13)
void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc,
uint64_t *cs_base, uint32_t *pflags);
target_ulong cpu_hppa_get_psw(CPUHPPAState *env);
void cpu_hppa_put_psw(CPUHPPAState *env, target_ulong);
void update_gva_offset_mask(CPUHPPAState *env);

View file

@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "fpu/softfloat.h"

View file

@ -21,7 +21,6 @@
#include "qemu/log.h"
#include "cpu.h"
#include "fpu/softfloat.h"
#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "qemu/qemu-print.h"
#include "hw/hppa/hppa_hardware.h"

View file

@ -20,9 +20,9 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "exec/cputlb.h"
#include "accel/tcg/cpu-mmu-index.h"
#include "accel/tcg/probe.h"
#include "exec/page-protection.h"
#include "exec/target_page.h"
#include "exec/helper-proto.h"

View file

@ -20,9 +20,9 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "accel/tcg/cpu-ldst.h"
#include "accel/tcg/probe.h"
#include "qemu/timer.h"
#include "trace.h"
#ifdef CONFIG_USER_ONLY

View file

@ -20,7 +20,6 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "qemu/timer.h"
#include "system/runstate.h"

View file

@ -20,7 +20,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "qemu/host-utils.h"
#include "exec/exec-all.h"
#include "exec/page-protection.h"
#include "tcg/tcg-op.h"
#include "tcg/tcg-op-gvec.h"

View file

@ -35,10 +35,6 @@
#define XEN_NR_VIRQS 24
/* support for self modifying code even if the modified instruction is
close to the modifying instruction */
#define TARGET_HAS_PRECISE_SMC
#ifdef TARGET_X86_64
#define I386_ELF_MACHINE EM_X86_64
#define ELF_MACHINE_UNAME "x86_64"
@ -2603,20 +2599,6 @@ static inline bool is_mmu_index_32(int mmu_index)
#include "hw/i386/apic.h"
#endif
static inline void cpu_get_tb_cpu_state(CPUX86State *env, vaddr *pc,
uint64_t *cs_base, uint32_t *flags)
{
*flags = env->hflags |
(env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK));
if (env->hflags & HF_CS64_MASK) {
*cs_base = 0;
*pc = env->eip;
} else {
*cs_base = env->segs[R_CS].base;
*pc = (uint32_t)(*cs_base + env->eip);
}
}
void do_cpu_init(X86CPU *cpu);
#define MCE_INJECT_BROADCAST 1

View file

@ -526,7 +526,7 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
static inline target_ulong get_memio_eip(CPUX86State *env)
{
#ifdef CONFIG_TCG
uint64_t data[TARGET_INSN_START_WORDS];
uint64_t data[INSN_START_WORDS];
CPUState *cs = env_cpu(env);
if (!cpu_unwind_state_data(cs, cs->mem_io_pc, data)) {

View file

@ -4,7 +4,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "accel/tcg/cpu-ldst.h"
#include "exec/exec-all.h"
#include "accel/tcg/probe.h"
#include "exec/target_page.h"
#include "access.h"

View file

@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "qemu/log.h"
#include "system/runstate.h"
#include "exec/helper-proto.h"

View file

@ -20,7 +20,6 @@
#ifndef I386_HELPER_TCG_H
#define I386_HELPER_TCG_H
#include "exec/exec-all.h"
#include "qemu/host-utils.h"
/* Maximum instruction code size */

Some files were not shown because too many files have changed in this diff Show more