mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-11 03:24:58 -06:00
accel/tcg: Fix atomic_mmu_lookup vs TLB_FORCE_SLOW
linux-user: implement pgid field of /proc/self/stat target/sh4: Use MO_ALIGN for system UNALIGN() target/microblaze: Use TARGET_LONG_BITS == 32 for system mode accel/tcg: Add TCGCPUOps.pointer_wrap target/*: Populate TCGCPUOps.pointer_wrap -----BEGIN PGP SIGNATURE----- iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmg2xZAdHHJpY2hhcmQu aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV/VmAgAu5PHIARUuNqneUPQ 2JxqpZHGVbaXE0ACi9cslpfThFM/I4OXmK21ZWb1dHB3qasNiKU8cdImXSUVH3dj DLsr/tliReerZGUoHEtFsYd+VOtqb3wcrvXxnzG/xB761uZjFCnqwy4MrXMfSXVh 6w+eysWOblYHQb9rAZho4nyw6BgjYAX2vfMFxLJBcDP/fjILFB7xoXHEyqKWMmE1 0enA0KUotyLOCRXVEXSsfPDYD8szXfMkII3YcGnscthm5j58oc3skVdKFGVjNkNb /aFpyvoU7Vp3JpxkYEIWLQrRM75VSb1KzJwMipHgYy3GoV++BrY10T0jyEPrx0iq RFzK4A== =XQzq -----END PGP SIGNATURE----- Merge tag 'pull-tcg-20250528' of https://gitlab.com/rth7680/qemu into staging accel/tcg: Fix atomic_mmu_lookup vs TLB_FORCE_SLOW linux-user: implement pgid field of /proc/self/stat target/sh4: Use MO_ALIGN for system UNALIGN() target/microblaze: Use TARGET_LONG_BITS == 32 for system mode accel/tcg: Add TCGCPUOps.pointer_wrap target/*: Populate TCGCPUOps.pointer_wrap # -----BEGIN PGP SIGNATURE----- # # iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmg2xZAdHHJpY2hhcmQu # aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV/VmAgAu5PHIARUuNqneUPQ # 2JxqpZHGVbaXE0ACi9cslpfThFM/I4OXmK21ZWb1dHB3qasNiKU8cdImXSUVH3dj # DLsr/tliReerZGUoHEtFsYd+VOtqb3wcrvXxnzG/xB761uZjFCnqwy4MrXMfSXVh # 6w+eysWOblYHQb9rAZho4nyw6BgjYAX2vfMFxLJBcDP/fjILFB7xoXHEyqKWMmE1 # 0enA0KUotyLOCRXVEXSsfPDYD8szXfMkII3YcGnscthm5j58oc3skVdKFGVjNkNb # /aFpyvoU7Vp3JpxkYEIWLQrRM75VSb1KzJwMipHgYy3GoV++BrY10T0jyEPrx0iq # RFzK4A== # =XQzq # -----END PGP SIGNATURE----- # gpg: Signature made Wed 28 May 2025 04:13:04 EDT # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full] # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * tag 'pull-tcg-20250528' of https://gitlab.com/rth7680/qemu: (28 commits) accel/tcg: Assert TCGCPUOps.pointer_wrap is set target/sparc: Fill in TCGCPUOps.pointer_wrap target/s390x: Fill in TCGCPUOps.pointer_wrap target/riscv: Fill in TCGCPUOps.pointer_wrap target/ppc: Fill in TCGCPUOps.pointer_wrap target/mips: Fill in TCGCPUOps.pointer_wrap target/loongarch: Fill in TCGCPUOps.pointer_wrap target/i386: Fill in TCGCPUOps.pointer_wrap target/arm: Fill in TCGCPUOps.pointer_wrap target: Use cpu_pointer_wrap_uint32 for 32-bit targets target: Use cpu_pointer_wrap_notreached for strict align targets accel/tcg: Add TCGCPUOps.pointer_wrap target/sh4: Use MO_ALIGN for system UNALIGN() tcg: Drop TCGContext.page_{mask,bits} tcg: Drop TCGContext.tlb_dyn_max_bits target/microblaze: Simplify compute_ldst_addr_type{a,b} target/microblaze: Drop DisasContext.r0 target/microblaze: Use TARGET_LONG_BITS == 32 for system mode target/microblaze: Fix printf format in mmu_translate target/microblaze: Use TCGv_i64 for compute_ldst_addr_ea ... Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
commit
3072961b6e
47 changed files with 427 additions and 186 deletions
|
@ -1039,6 +1039,7 @@ bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
|
|||
assert(tcg_ops->cpu_exec_halt);
|
||||
assert(tcg_ops->cpu_exec_interrupt);
|
||||
assert(tcg_ops->cpu_exec_reset);
|
||||
assert(tcg_ops->pointer_wrap);
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
assert(tcg_ops->translate_code);
|
||||
assert(tcg_ops->get_tb_cpu_state);
|
||||
|
|
|
@ -1773,6 +1773,9 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
|||
l->page[1].size = l->page[0].size - size0;
|
||||
l->page[0].size = size0;
|
||||
|
||||
l->page[1].addr = cpu->cc->tcg_ops->pointer_wrap(cpu, l->mmu_idx,
|
||||
l->page[1].addr, addr);
|
||||
|
||||
/*
|
||||
* Lookup both pages, recognizing exceptions from either. If the
|
||||
* second lookup potentially resized, refresh first CPUTLBEntryFull.
|
||||
|
@ -1871,8 +1874,12 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
|||
goto stop_the_world;
|
||||
}
|
||||
|
||||
/* Collect tlb flags for read. */
|
||||
/* Finish collecting tlb flags for both read and write. */
|
||||
full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
|
||||
tlb_addr |= tlbe->addr_read;
|
||||
tlb_addr &= TLB_FLAGS_MASK & ~TLB_FORCE_SLOW;
|
||||
tlb_addr |= full->slow_flags[MMU_DATA_STORE];
|
||||
tlb_addr |= full->slow_flags[MMU_DATA_LOAD];
|
||||
|
||||
/* Notice an IO access or a needs-MMU-lookup access */
|
||||
if (unlikely(tlb_addr & (TLB_MMIO | TLB_DISCARD_WRITE))) {
|
||||
|
@ -1882,13 +1889,12 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
|||
}
|
||||
|
||||
hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
|
||||
full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
|
||||
|
||||
if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
|
||||
notdirty_write(cpu, addr, size, full, retaddr);
|
||||
}
|
||||
|
||||
if (unlikely(tlb_addr & TLB_FORCE_SLOW)) {
|
||||
if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
|
||||
int wp_flags = 0;
|
||||
|
||||
if (full->slow_flags[MMU_DATA_STORE] & TLB_WATCHPOINT) {
|
||||
|
@ -1897,11 +1903,9 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
|||
if (full->slow_flags[MMU_DATA_LOAD] & TLB_WATCHPOINT) {
|
||||
wp_flags |= BP_MEM_READ;
|
||||
}
|
||||
if (wp_flags) {
|
||||
cpu_check_watchpoint(cpu, addr, size,
|
||||
full->attrs, wp_flags, retaddr);
|
||||
}
|
||||
}
|
||||
|
||||
return hostaddr;
|
||||
|
||||
|
@ -2926,3 +2930,22 @@ uint64_t cpu_ldq_code_mmu(CPUArchState *env, vaddr addr,
|
|||
{
|
||||
return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
|
||||
}
|
||||
|
||||
/*
|
||||
* Common pointer_wrap implementations.
|
||||
*/
|
||||
|
||||
/*
|
||||
* To be used for strict alignment targets.
|
||||
* Because no accesses are unaligned, no accesses wrap either.
|
||||
*/
|
||||
vaddr cpu_pointer_wrap_notreached(CPUState *cs, int idx, vaddr res, vaddr base)
|
||||
{
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
/* To be used for strict 32-bit targets. */
|
||||
vaddr cpu_pointer_wrap_uint32(CPUState *cs, int idx, vaddr res, vaddr base)
|
||||
{
|
||||
return (uint32_t)res;
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
#include "tcg/tcg.h"
|
||||
#include "exec/mmap-lock.h"
|
||||
#include "tb-internal.h"
|
||||
#include "tlb-bounds.h"
|
||||
#include "exec/tb-flush.h"
|
||||
#include "qemu/cacheinfo.h"
|
||||
#include "qemu/target-info.h"
|
||||
|
@ -313,11 +312,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu, TCGTBCPUState s)
|
|||
|
||||
tcg_ctx->gen_tb = tb;
|
||||
tcg_ctx->addr_type = target_long_bits() == 32 ? TCG_TYPE_I32 : TCG_TYPE_I64;
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
tcg_ctx->page_bits = TARGET_PAGE_BITS;
|
||||
tcg_ctx->page_mask = TARGET_PAGE_MASK;
|
||||
tcg_ctx->tlb_dyn_max_bits = CPU_TLB_DYN_MAX_BITS;
|
||||
#endif
|
||||
tcg_ctx->guest_mo = cpu->cc->tcg_ops->guest_default_memory_order;
|
||||
|
||||
restart_translate:
|
||||
|
|
|
@ -3,6 +3,4 @@ TARGET_BIG_ENDIAN=y
|
|||
# needed by boot.c
|
||||
TARGET_NEED_FDT=y
|
||||
TARGET_XML_FILES=gdb-xml/microblaze-core.xml gdb-xml/microblaze-stack-protect.xml
|
||||
# System mode can address up to 64 bits via lea/sea instructions.
|
||||
# TODO: These bypass the mmu, so we could emulate these differently.
|
||||
TARGET_LONG_BITS=64
|
||||
TARGET_LONG_BITS=32
|
||||
|
|
|
@ -2,6 +2,4 @@ TARGET_ARCH=microblaze
|
|||
# needed by boot.c
|
||||
TARGET_NEED_FDT=y
|
||||
TARGET_XML_FILES=gdb-xml/microblaze-core.xml gdb-xml/microblaze-stack-protect.xml
|
||||
# System mode can address up to 64 bits via lea/sea instructions.
|
||||
# TODO: These bypass the mmu, so we could emulate these differently.
|
||||
TARGET_LONG_BITS=64
|
||||
TARGET_LONG_BITS=32
|
||||
|
|
|
@ -222,6 +222,13 @@ struct TCGCPUOps {
|
|||
bool (*tlb_fill)(CPUState *cpu, vaddr address, int size,
|
||||
MMUAccessType access_type, int mmu_idx,
|
||||
bool probe, uintptr_t retaddr);
|
||||
/**
|
||||
* @pointer_wrap:
|
||||
*
|
||||
* We have incremented @base to @result, resulting in a page change.
|
||||
* For the current cpu state, adjust @result for possible overflow.
|
||||
*/
|
||||
vaddr (*pointer_wrap)(CPUState *cpu, int mmu_idx, vaddr result, vaddr base);
|
||||
/**
|
||||
* @do_transaction_failed: Callback for handling failed memory transactions
|
||||
* (ie bus faults or external aborts; not MMU faults)
|
||||
|
@ -315,6 +322,12 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
|
|||
*/
|
||||
int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len);
|
||||
|
||||
/*
|
||||
* Common pointer_wrap implementations.
|
||||
*/
|
||||
vaddr cpu_pointer_wrap_notreached(CPUState *, int, vaddr, vaddr);
|
||||
vaddr cpu_pointer_wrap_uint32(CPUState *, int, vaddr, vaddr);
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* TCG_CPU_OPS_H */
|
||||
|
|
|
@ -365,10 +365,6 @@ struct TCGContext {
|
|||
int nb_indirects;
|
||||
int nb_ops;
|
||||
TCGType addr_type; /* TCG_TYPE_I32 or TCG_TYPE_I64 */
|
||||
|
||||
int page_mask;
|
||||
uint8_t page_bits;
|
||||
uint8_t tlb_dyn_max_bits;
|
||||
TCGBar guest_mo;
|
||||
|
||||
TCGRegSet reserved_regs;
|
||||
|
|
|
@ -8235,6 +8235,9 @@ static int open_self_stat(CPUArchState *cpu_env, int fd)
|
|||
} else if (i == 3) {
|
||||
/* ppid */
|
||||
g_string_printf(buf, FMT_pid " ", getppid());
|
||||
} else if (i == 4) {
|
||||
/* pgid */
|
||||
g_string_printf(buf, FMT_pid " ", getpgrp());
|
||||
} else if (i == 19) {
|
||||
/* num_threads */
|
||||
int cpus = 0;
|
||||
|
|
|
@ -69,8 +69,21 @@ int (*qemu_main)(void) = os_darwin_cfrunloop_main;
|
|||
int main(int argc, char **argv)
|
||||
{
|
||||
qemu_init(argc, argv);
|
||||
|
||||
/*
|
||||
* qemu_init acquires the BQL and replay mutex lock. BQL is acquired when
|
||||
* initializing cpus, to block associated threads until initialization is
|
||||
* complete. Replay_mutex lock is acquired on initialization, because it
|
||||
* must be held when configuring icount_mode.
|
||||
*
|
||||
* On MacOS, qemu main event loop runs in a background thread, as main
|
||||
* thread must be reserved for UI. Thus, we need to transfer lock ownership,
|
||||
* and the simplest way to do that is to release them, and reacquire them
|
||||
* from qemu_default_main.
|
||||
*/
|
||||
bql_unlock();
|
||||
replay_mutex_unlock();
|
||||
|
||||
if (qemu_main) {
|
||||
QemuThread main_loop_thread;
|
||||
qemu_thread_create(&main_loop_thread, "qemu_main",
|
||||
|
|
|
@ -261,6 +261,7 @@ static const TCGCPUOps alpha_tcg_ops = {
|
|||
.record_sigbus = alpha_cpu_record_sigbus,
|
||||
#else
|
||||
.tlb_fill = alpha_cpu_tlb_fill,
|
||||
.pointer_wrap = cpu_pointer_wrap_notreached,
|
||||
.cpu_exec_interrupt = alpha_cpu_exec_interrupt,
|
||||
.cpu_exec_halt = alpha_cpu_has_work,
|
||||
.cpu_exec_reset = cpu_reset,
|
||||
|
|
|
@ -2703,6 +2703,29 @@ static const struct SysemuCPUOps arm_sysemu_ops = {
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_TCG
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
static vaddr aprofile_pointer_wrap(CPUState *cs, int mmu_idx,
|
||||
vaddr result, vaddr base)
|
||||
{
|
||||
/*
|
||||
* The Stage2 and Phys indexes are only used for ptw on arm32,
|
||||
* and all pte's are aligned, so we never produce a wrap for these.
|
||||
* Double check that we're not truncating a 40-bit physical address.
|
||||
*/
|
||||
assert((unsigned)mmu_idx < (ARMMMUIdx_Stage2_S & ARM_MMU_IDX_COREIDX_MASK));
|
||||
|
||||
if (!is_a64(cpu_env(cs))) {
|
||||
return (uint32_t)result;
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: For FEAT_CPA2, decide how to we want to resolve
|
||||
* Unpredictable_CPACHECK in AddressIncrement.
|
||||
*/
|
||||
return result;
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
static const TCGCPUOps arm_tcg_ops = {
|
||||
.mttcg_supported = true,
|
||||
/* ARM processors have a weak memory model */
|
||||
|
@ -2722,6 +2745,7 @@ static const TCGCPUOps arm_tcg_ops = {
|
|||
.untagged_addr = aarch64_untagged_addr,
|
||||
#else
|
||||
.tlb_fill_align = arm_cpu_tlb_fill_align,
|
||||
.pointer_wrap = aprofile_pointer_wrap,
|
||||
.cpu_exec_interrupt = arm_cpu_exec_interrupt,
|
||||
.cpu_exec_halt = arm_cpu_exec_halt,
|
||||
.cpu_exec_reset = cpu_reset,
|
||||
|
|
|
@ -249,6 +249,7 @@ static const TCGCPUOps arm_v7m_tcg_ops = {
|
|||
.record_sigbus = arm_cpu_record_sigbus,
|
||||
#else
|
||||
.tlb_fill_align = arm_cpu_tlb_fill_align,
|
||||
.pointer_wrap = cpu_pointer_wrap_uint32,
|
||||
.cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt,
|
||||
.cpu_exec_halt = arm_cpu_exec_halt,
|
||||
.cpu_exec_reset = cpu_reset,
|
||||
|
|
|
@ -250,6 +250,12 @@ static const TCGCPUOps avr_tcg_ops = {
|
|||
.cpu_exec_reset = cpu_reset,
|
||||
.tlb_fill = avr_cpu_tlb_fill,
|
||||
.do_interrupt = avr_cpu_do_interrupt,
|
||||
/*
|
||||
* TODO: code and data wrapping are different, but for the most part
|
||||
* AVR only references bytes or aligned code fetches. But we use
|
||||
* non-aligned MO_16 accesses for stack push/pop.
|
||||
*/
|
||||
.pointer_wrap = cpu_pointer_wrap_uint32,
|
||||
};
|
||||
|
||||
static void avr_cpu_class_init(ObjectClass *oc, const void *data)
|
||||
|
|
|
@ -269,6 +269,7 @@ static const TCGCPUOps hppa_tcg_ops = {
|
|||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
.tlb_fill_align = hppa_cpu_tlb_fill_align,
|
||||
.pointer_wrap = cpu_pointer_wrap_notreached,
|
||||
.cpu_exec_interrupt = hppa_cpu_exec_interrupt,
|
||||
.cpu_exec_halt = hppa_cpu_has_work,
|
||||
.cpu_exec_reset = cpu_reset,
|
||||
|
|
|
@ -149,6 +149,12 @@ static void x86_cpu_exec_reset(CPUState *cs)
|
|||
do_cpu_init(env_archcpu(env));
|
||||
cs->exception_index = EXCP_HALTED;
|
||||
}
|
||||
|
||||
static vaddr x86_pointer_wrap(CPUState *cs, int mmu_idx,
|
||||
vaddr result, vaddr base)
|
||||
{
|
||||
return cpu_env(cs)->hflags & HF_CS64_MASK ? result : (uint32_t)result;
|
||||
}
|
||||
#endif
|
||||
|
||||
const TCGCPUOps x86_tcg_ops = {
|
||||
|
@ -172,6 +178,7 @@ const TCGCPUOps x86_tcg_ops = {
|
|||
.record_sigbus = x86_cpu_record_sigbus,
|
||||
#else
|
||||
.tlb_fill = x86_cpu_tlb_fill,
|
||||
.pointer_wrap = x86_pointer_wrap,
|
||||
.do_interrupt = x86_cpu_do_interrupt,
|
||||
.cpu_exec_halt = x86_cpu_exec_halt,
|
||||
.cpu_exec_interrupt = x86_cpu_exec_interrupt,
|
||||
|
|
|
@ -334,6 +334,12 @@ static bool loongarch_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
|||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static vaddr loongarch_pointer_wrap(CPUState *cs, int mmu_idx,
|
||||
vaddr result, vaddr base)
|
||||
{
|
||||
return is_va32(cpu_env(cs)) ? (uint32_t)result : result;
|
||||
}
|
||||
#endif
|
||||
|
||||
static TCGTBCPUState loongarch_get_tb_cpu_state(CPUState *cs)
|
||||
|
@ -889,6 +895,7 @@ static const TCGCPUOps loongarch_tcg_ops = {
|
|||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
.tlb_fill = loongarch_cpu_tlb_fill,
|
||||
.pointer_wrap = loongarch_pointer_wrap,
|
||||
.cpu_exec_interrupt = loongarch_cpu_exec_interrupt,
|
||||
.cpu_exec_halt = loongarch_cpu_has_work,
|
||||
.cpu_exec_reset = cpu_reset,
|
||||
|
|
|
@ -619,6 +619,7 @@ static const TCGCPUOps m68k_tcg_ops = {
|
|||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
.tlb_fill = m68k_cpu_tlb_fill,
|
||||
.pointer_wrap = cpu_pointer_wrap_uint32,
|
||||
.cpu_exec_interrupt = m68k_cpu_exec_interrupt,
|
||||
.cpu_exec_halt = m68k_cpu_has_work,
|
||||
.cpu_exec_reset = cpu_reset,
|
||||
|
|
|
@ -447,6 +447,7 @@ static const TCGCPUOps mb_tcg_ops = {
|
|||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
.tlb_fill = mb_cpu_tlb_fill,
|
||||
.pointer_wrap = cpu_pointer_wrap_uint32,
|
||||
.cpu_exec_interrupt = mb_cpu_exec_interrupt,
|
||||
.cpu_exec_halt = mb_cpu_has_work,
|
||||
.cpu_exec_reset = cpu_reset,
|
||||
|
|
|
@ -248,7 +248,7 @@ struct CPUArchState {
|
|||
uint32_t pc;
|
||||
uint32_t msr; /* All bits of MSR except MSR[C] and MSR[CC] */
|
||||
uint32_t msr_c; /* MSR[C], in low bit; other bits must be 0 */
|
||||
target_ulong ear;
|
||||
uint64_t ear;
|
||||
uint32_t esr;
|
||||
uint32_t fsr;
|
||||
uint32_t btr;
|
||||
|
|
|
@ -26,8 +26,51 @@
|
|||
#include "exec/target_page.h"
|
||||
#include "qemu/host-utils.h"
|
||||
#include "exec/log.h"
|
||||
#include "exec/helper-proto.h"
|
||||
|
||||
|
||||
G_NORETURN
|
||||
static void mb_unaligned_access_internal(CPUState *cs, uint64_t addr,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
CPUMBState *env = cpu_env(cs);
|
||||
uint32_t esr, iflags;
|
||||
|
||||
/* Recover the pc and iflags from the corresponding insn_start. */
|
||||
cpu_restore_state(cs, retaddr);
|
||||
iflags = env->iflags;
|
||||
|
||||
qemu_log_mask(CPU_LOG_INT,
|
||||
"Unaligned access addr=0x%" PRIx64 " pc=%x iflags=%x\n",
|
||||
addr, env->pc, iflags);
|
||||
|
||||
esr = ESR_EC_UNALIGNED_DATA;
|
||||
if (likely(iflags & ESR_ESS_FLAG)) {
|
||||
esr |= iflags & ESR_ESS_MASK;
|
||||
} else {
|
||||
qemu_log_mask(LOG_UNIMP, "Unaligned access without ESR_ESS_FLAG\n");
|
||||
}
|
||||
|
||||
env->ear = addr;
|
||||
env->esr = esr;
|
||||
cs->exception_index = EXCP_HW_EXCP;
|
||||
cpu_loop_exit(cs);
|
||||
}
|
||||
|
||||
void mb_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
|
||||
MMUAccessType access_type,
|
||||
int mmu_idx, uintptr_t retaddr)
|
||||
{
|
||||
mb_unaligned_access_internal(cs, addr, retaddr);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
|
||||
void HELPER(unaligned_access)(CPUMBState *env, uint64_t addr)
|
||||
{
|
||||
mb_unaligned_access_internal(env_cpu(env), addr, GETPC());
|
||||
}
|
||||
|
||||
static bool mb_cpu_access_is_secure(MicroBlazeCPU *cpu,
|
||||
MMUAccessType access_type)
|
||||
{
|
||||
|
@ -269,31 +312,3 @@ bool mb_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
|||
}
|
||||
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
void mb_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
|
||||
MMUAccessType access_type,
|
||||
int mmu_idx, uintptr_t retaddr)
|
||||
{
|
||||
MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
|
||||
uint32_t esr, iflags;
|
||||
|
||||
/* Recover the pc and iflags from the corresponding insn_start. */
|
||||
cpu_restore_state(cs, retaddr);
|
||||
iflags = cpu->env.iflags;
|
||||
|
||||
qemu_log_mask(CPU_LOG_INT,
|
||||
"Unaligned access addr=" TARGET_FMT_lx " pc=%x iflags=%x\n",
|
||||
(target_ulong)addr, cpu->env.pc, iflags);
|
||||
|
||||
esr = ESR_EC_UNALIGNED_DATA;
|
||||
if (likely(iflags & ESR_ESS_FLAG)) {
|
||||
esr |= iflags & ESR_ESS_MASK;
|
||||
} else {
|
||||
qemu_log_mask(LOG_UNIMP, "Unaligned access without ESR_ESS_FLAG\n");
|
||||
}
|
||||
|
||||
cpu->env.ear = addr;
|
||||
cpu->env.esr = esr;
|
||||
cs->exception_index = EXCP_HW_EXCP;
|
||||
cpu_loop_exit(cs);
|
||||
}
|
||||
|
|
|
@ -20,12 +20,22 @@ DEF_HELPER_FLAGS_3(fcmp_ne, TCG_CALL_NO_WG, i32, env, i32, i32)
|
|||
DEF_HELPER_FLAGS_3(fcmp_ge, TCG_CALL_NO_WG, i32, env, i32, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_2(pcmpbf, TCG_CALL_NO_RWG_SE, i32, i32, i32)
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
DEF_HELPER_FLAGS_3(mmu_read, TCG_CALL_NO_RWG, i32, env, i32, i32)
|
||||
DEF_HELPER_FLAGS_4(mmu_write, TCG_CALL_NO_RWG, void, env, i32, i32, i32)
|
||||
#endif
|
||||
|
||||
DEF_HELPER_FLAGS_2(stackprot, TCG_CALL_NO_WG, void, env, tl)
|
||||
|
||||
DEF_HELPER_FLAGS_2(get, TCG_CALL_NO_RWG, i32, i32, i32)
|
||||
DEF_HELPER_FLAGS_3(put, TCG_CALL_NO_RWG, void, i32, i32, i32)
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
DEF_HELPER_FLAGS_3(mmu_read, TCG_CALL_NO_RWG, i32, env, i32, i32)
|
||||
DEF_HELPER_FLAGS_4(mmu_write, TCG_CALL_NO_RWG, void, env, i32, i32, i32)
|
||||
DEF_HELPER_FLAGS_2(unaligned_access, TCG_CALL_NO_WG, noreturn, env, i64)
|
||||
DEF_HELPER_FLAGS_2(lbuea, TCG_CALL_NO_WG, i32, env, i64)
|
||||
DEF_HELPER_FLAGS_2(lhuea_be, TCG_CALL_NO_WG, i32, env, i64)
|
||||
DEF_HELPER_FLAGS_2(lhuea_le, TCG_CALL_NO_WG, i32, env, i64)
|
||||
DEF_HELPER_FLAGS_2(lwea_be, TCG_CALL_NO_WG, i32, env, i64)
|
||||
DEF_HELPER_FLAGS_2(lwea_le, TCG_CALL_NO_WG, i32, env, i64)
|
||||
DEF_HELPER_FLAGS_3(sbea, TCG_CALL_NO_WG, void, env, i32, i64)
|
||||
DEF_HELPER_FLAGS_3(shea_be, TCG_CALL_NO_WG, void, env, i32, i64)
|
||||
DEF_HELPER_FLAGS_3(shea_le, TCG_CALL_NO_WG, void, env, i32, i64)
|
||||
DEF_HELPER_FLAGS_3(swea_be, TCG_CALL_NO_WG, void, env, i32, i64)
|
||||
DEF_HELPER_FLAGS_3(swea_le, TCG_CALL_NO_WG, void, env, i32, i64)
|
||||
#endif
|
||||
|
|
|
@ -172,7 +172,8 @@ unsigned int mmu_translate(MicroBlazeCPU *cpu, MicroBlazeMMULookup *lu,
|
|||
}
|
||||
done:
|
||||
qemu_log_mask(CPU_LOG_MMU,
|
||||
"MMU vaddr=%" PRIx64 " rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n",
|
||||
"MMU vaddr=0x" TARGET_FMT_lx
|
||||
" rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n",
|
||||
vaddr, rw, tlb_wr, tlb_ex, hit);
|
||||
return hit;
|
||||
}
|
||||
|
|
|
@ -382,6 +382,8 @@ void helper_stackprot(CPUMBState *env, target_ulong addr)
|
|||
}
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
#include "system/memory.h"
|
||||
|
||||
/* Writes/reads to the MMU's special regs end up here. */
|
||||
uint32_t helper_mmu_read(CPUMBState *env, uint32_t ext, uint32_t rn)
|
||||
{
|
||||
|
@ -393,38 +395,90 @@ void helper_mmu_write(CPUMBState *env, uint32_t ext, uint32_t rn, uint32_t v)
|
|||
mmu_write(env, ext, rn, v);
|
||||
}
|
||||
|
||||
static void mb_transaction_failed_internal(CPUState *cs, hwaddr physaddr,
|
||||
uint64_t addr, unsigned size,
|
||||
MMUAccessType access_type,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
CPUMBState *env = cpu_env(cs);
|
||||
MicroBlazeCPU *cpu = env_archcpu(env);
|
||||
const char *access_name = "INVALID";
|
||||
bool take = env->msr & MSR_EE;
|
||||
uint32_t esr = ESR_EC_DATA_BUS;
|
||||
|
||||
switch (access_type) {
|
||||
case MMU_INST_FETCH:
|
||||
access_name = "INST_FETCH";
|
||||
esr = ESR_EC_INSN_BUS;
|
||||
take &= cpu->cfg.iopb_bus_exception;
|
||||
break;
|
||||
case MMU_DATA_LOAD:
|
||||
access_name = "DATA_LOAD";
|
||||
take &= cpu->cfg.dopb_bus_exception;
|
||||
break;
|
||||
case MMU_DATA_STORE:
|
||||
access_name = "DATA_STORE";
|
||||
take &= cpu->cfg.dopb_bus_exception;
|
||||
break;
|
||||
}
|
||||
|
||||
qemu_log_mask(CPU_LOG_INT, "Transaction failed: addr 0x%" PRIx64
|
||||
"physaddr 0x" HWADDR_FMT_plx " size %d access-type %s (%s)\n",
|
||||
addr, physaddr, size, access_name,
|
||||
take ? "TAKEN" : "DROPPED");
|
||||
|
||||
if (take) {
|
||||
env->esr = esr;
|
||||
env->ear = addr;
|
||||
cs->exception_index = EXCP_HW_EXCP;
|
||||
cpu_loop_exit_restore(cs, retaddr);
|
||||
}
|
||||
}
|
||||
|
||||
void mb_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
|
||||
unsigned size, MMUAccessType access_type,
|
||||
int mmu_idx, MemTxAttrs attrs,
|
||||
MemTxResult response, uintptr_t retaddr)
|
||||
{
|
||||
MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
|
||||
CPUMBState *env = &cpu->env;
|
||||
|
||||
qemu_log_mask(CPU_LOG_INT, "Transaction failed: vaddr 0x%" VADDR_PRIx
|
||||
" physaddr 0x" HWADDR_FMT_plx " size %d access type %s\n",
|
||||
addr, physaddr, size,
|
||||
access_type == MMU_INST_FETCH ? "INST_FETCH" :
|
||||
(access_type == MMU_DATA_LOAD ? "DATA_LOAD" : "DATA_STORE"));
|
||||
|
||||
if (!(env->msr & MSR_EE)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (access_type == MMU_INST_FETCH) {
|
||||
if (!cpu->cfg.iopb_bus_exception) {
|
||||
return;
|
||||
}
|
||||
env->esr = ESR_EC_INSN_BUS;
|
||||
} else {
|
||||
if (!cpu->cfg.dopb_bus_exception) {
|
||||
return;
|
||||
}
|
||||
env->esr = ESR_EC_DATA_BUS;
|
||||
}
|
||||
|
||||
env->ear = addr;
|
||||
cs->exception_index = EXCP_HW_EXCP;
|
||||
cpu_loop_exit_restore(cs, retaddr);
|
||||
mb_transaction_failed_internal(cs, physaddr, addr, size,
|
||||
access_type, retaddr);
|
||||
}
|
||||
|
||||
#define LD_EA(NAME, TYPE, FUNC) \
|
||||
uint32_t HELPER(NAME)(CPUMBState *env, uint64_t ea) \
|
||||
{ \
|
||||
CPUState *cs = env_cpu(env); \
|
||||
MemTxResult txres; \
|
||||
TYPE ret = FUNC(cs->as, ea, MEMTXATTRS_UNSPECIFIED, &txres); \
|
||||
if (unlikely(txres != MEMTX_OK)) { \
|
||||
mb_transaction_failed_internal(cs, ea, ea, sizeof(TYPE), \
|
||||
MMU_DATA_LOAD, GETPC()); \
|
||||
} \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
LD_EA(lbuea, uint8_t, address_space_ldub)
|
||||
LD_EA(lhuea_be, uint16_t, address_space_lduw_be)
|
||||
LD_EA(lhuea_le, uint16_t, address_space_lduw_le)
|
||||
LD_EA(lwea_be, uint32_t, address_space_ldl_be)
|
||||
LD_EA(lwea_le, uint32_t, address_space_ldl_le)
|
||||
|
||||
#define ST_EA(NAME, TYPE, FUNC) \
|
||||
void HELPER(NAME)(CPUMBState *env, uint32_t data, uint64_t ea) \
|
||||
{ \
|
||||
CPUState *cs = env_cpu(env); \
|
||||
MemTxResult txres; \
|
||||
FUNC(cs->as, ea, data, MEMTXATTRS_UNSPECIFIED, &txres); \
|
||||
if (unlikely(txres != MEMTX_OK)) { \
|
||||
mb_transaction_failed_internal(cs, ea, ea, sizeof(TYPE), \
|
||||
MMU_DATA_STORE, GETPC()); \
|
||||
} \
|
||||
}
|
||||
|
||||
ST_EA(sbea, uint8_t, address_space_stb)
|
||||
ST_EA(shea_be, uint16_t, address_space_stw_be)
|
||||
ST_EA(shea_le, uint16_t, address_space_stw_le)
|
||||
ST_EA(swea_be, uint32_t, address_space_stl_be)
|
||||
ST_EA(swea_le, uint32_t, address_space_stl_le)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -63,9 +63,6 @@ typedef struct DisasContext {
|
|||
DisasContextBase base;
|
||||
const MicroBlazeCPUConfig *cfg;
|
||||
|
||||
TCGv_i32 r0;
|
||||
bool r0_set;
|
||||
|
||||
/* Decoder. */
|
||||
uint32_t ext_imm;
|
||||
unsigned int tb_flags;
|
||||
|
@ -179,14 +176,7 @@ static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
|
|||
if (likely(reg != 0)) {
|
||||
return cpu_R[reg];
|
||||
}
|
||||
if (!dc->r0_set) {
|
||||
if (dc->r0 == NULL) {
|
||||
dc->r0 = tcg_temp_new_i32();
|
||||
}
|
||||
tcg_gen_movi_i32(dc->r0, 0);
|
||||
dc->r0_set = true;
|
||||
}
|
||||
return dc->r0;
|
||||
return tcg_constant_i32(0);
|
||||
}
|
||||
|
||||
static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
|
||||
|
@ -194,10 +184,7 @@ static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
|
|||
if (likely(reg != 0)) {
|
||||
return cpu_R[reg];
|
||||
}
|
||||
if (dc->r0 == NULL) {
|
||||
dc->r0 = tcg_temp_new_i32();
|
||||
}
|
||||
return dc->r0;
|
||||
return tcg_temp_new_i32();
|
||||
}
|
||||
|
||||
static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
|
||||
|
@ -619,19 +606,18 @@ DO_TYPEBI(xori, false, tcg_gen_xori_i32)
|
|||
|
||||
static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
|
||||
{
|
||||
TCGv ret = tcg_temp_new();
|
||||
TCGv ret;
|
||||
|
||||
/* If any of the regs is r0, set t to the value of the other reg. */
|
||||
if (ra && rb) {
|
||||
TCGv_i32 tmp = tcg_temp_new_i32();
|
||||
tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]);
|
||||
tcg_gen_extu_i32_tl(ret, tmp);
|
||||
ret = tcg_temp_new_i32();
|
||||
tcg_gen_add_i32(ret, cpu_R[ra], cpu_R[rb]);
|
||||
} else if (ra) {
|
||||
tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
|
||||
ret = cpu_R[ra];
|
||||
} else if (rb) {
|
||||
tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
|
||||
ret = cpu_R[rb];
|
||||
} else {
|
||||
tcg_gen_movi_tl(ret, 0);
|
||||
ret = tcg_constant_i32(0);
|
||||
}
|
||||
|
||||
if ((ra == 1 || rb == 1) && dc->cfg->stackprot) {
|
||||
|
@ -642,15 +628,16 @@ static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
|
|||
|
||||
static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
|
||||
{
|
||||
TCGv ret = tcg_temp_new();
|
||||
TCGv ret;
|
||||
|
||||
/* If any of the regs is r0, set t to the value of the other reg. */
|
||||
if (ra) {
|
||||
TCGv_i32 tmp = tcg_temp_new_i32();
|
||||
tcg_gen_addi_i32(tmp, cpu_R[ra], imm);
|
||||
tcg_gen_extu_i32_tl(ret, tmp);
|
||||
if (ra && imm) {
|
||||
ret = tcg_temp_new_i32();
|
||||
tcg_gen_addi_i32(ret, cpu_R[ra], imm);
|
||||
} else if (ra) {
|
||||
ret = cpu_R[ra];
|
||||
} else {
|
||||
tcg_gen_movi_tl(ret, (uint32_t)imm);
|
||||
ret = tcg_constant_i32(imm);
|
||||
}
|
||||
|
||||
if (ra == 1 && dc->cfg->stackprot) {
|
||||
|
@ -660,23 +647,23 @@ static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
|
|||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
|
||||
static TCGv_i64 compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
|
||||
{
|
||||
int addr_size = dc->cfg->addr_size;
|
||||
TCGv ret = tcg_temp_new();
|
||||
TCGv_i64 ret = tcg_temp_new_i64();
|
||||
|
||||
if (addr_size == 32 || ra == 0) {
|
||||
if (rb) {
|
||||
tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
|
||||
tcg_gen_extu_i32_i64(ret, cpu_R[rb]);
|
||||
} else {
|
||||
tcg_gen_movi_tl(ret, 0);
|
||||
return tcg_constant_i64(0);
|
||||
}
|
||||
} else {
|
||||
if (rb) {
|
||||
tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
|
||||
} else {
|
||||
tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
|
||||
tcg_gen_shli_tl(ret, ret, 32);
|
||||
tcg_gen_extu_i32_i64(ret, cpu_R[ra]);
|
||||
tcg_gen_shli_i64(ret, ret, 32);
|
||||
}
|
||||
if (addr_size < 64) {
|
||||
/* Mask off out of range bits. */
|
||||
|
@ -700,6 +687,20 @@ static void record_unaligned_ess(DisasContext *dc, int rd,
|
|||
|
||||
tcg_set_insn_start_param(dc->base.insn_start, 1, iflags);
|
||||
}
|
||||
|
||||
static void gen_alignment_check_ea(DisasContext *dc, TCGv_i64 ea, int rb,
|
||||
int rd, MemOp size, bool store)
|
||||
{
|
||||
if (rb && (dc->tb_flags & MSR_EE) && dc->cfg->unaligned_exceptions) {
|
||||
TCGLabel *over = gen_new_label();
|
||||
|
||||
record_unaligned_ess(dc, rd, size, store);
|
||||
|
||||
tcg_gen_brcondi_i64(TCG_COND_TSTEQ, ea, (1 << size) - 1, over);
|
||||
gen_helper_unaligned_access(tcg_env, ea);
|
||||
gen_set_label(over);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline MemOp mo_endian(DisasContext *dc)
|
||||
|
@ -765,10 +766,11 @@ static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
|
|||
return true;
|
||||
}
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
return true;
|
||||
g_assert_not_reached();
|
||||
#else
|
||||
TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
|
||||
return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
|
||||
TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
|
||||
gen_helper_lbuea(reg_for_write(dc, arg->rd), tcg_env, addr);
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -796,10 +798,13 @@ static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
|
|||
return true;
|
||||
}
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
return true;
|
||||
g_assert_not_reached();
|
||||
#else
|
||||
TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
|
||||
return do_load(dc, arg->rd, addr, MO_UW, MMU_NOMMU_IDX, false);
|
||||
TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
|
||||
gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_16, false);
|
||||
(mo_endian(dc) == MO_BE ? gen_helper_lhuea_be : gen_helper_lhuea_le)
|
||||
(reg_for_write(dc, arg->rd), tcg_env, addr);
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -827,10 +832,13 @@ static bool trans_lwea(DisasContext *dc, arg_typea *arg)
|
|||
return true;
|
||||
}
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
return true;
|
||||
g_assert_not_reached();
|
||||
#else
|
||||
TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
|
||||
return do_load(dc, arg->rd, addr, MO_UL, MMU_NOMMU_IDX, false);
|
||||
TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
|
||||
gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_32, false);
|
||||
(mo_endian(dc) == MO_BE ? gen_helper_lwea_be : gen_helper_lwea_le)
|
||||
(reg_for_write(dc, arg->rd), tcg_env, addr);
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -918,10 +926,11 @@ static bool trans_sbea(DisasContext *dc, arg_typea *arg)
|
|||
return true;
|
||||
}
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
return true;
|
||||
g_assert_not_reached();
|
||||
#else
|
||||
TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
|
||||
return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
|
||||
TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
|
||||
gen_helper_sbea(tcg_env, reg_for_read(dc, arg->rd), addr);
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -949,10 +958,13 @@ static bool trans_shea(DisasContext *dc, arg_typea *arg)
|
|||
return true;
|
||||
}
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
return true;
|
||||
g_assert_not_reached();
|
||||
#else
|
||||
TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
|
||||
return do_store(dc, arg->rd, addr, MO_UW, MMU_NOMMU_IDX, false);
|
||||
TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
|
||||
gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_16, true);
|
||||
(mo_endian(dc) == MO_BE ? gen_helper_shea_be : gen_helper_shea_le)
|
||||
(tcg_env, reg_for_read(dc, arg->rd), addr);
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -980,10 +992,13 @@ static bool trans_swea(DisasContext *dc, arg_typea *arg)
|
|||
return true;
|
||||
}
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
return true;
|
||||
g_assert_not_reached();
|
||||
#else
|
||||
TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
|
||||
return do_store(dc, arg->rd, addr, MO_UL, MMU_NOMMU_IDX, false);
|
||||
TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
|
||||
gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_32, true);
|
||||
(mo_endian(dc) == MO_BE ? gen_helper_swea_be : gen_helper_swea_le)
|
||||
(tcg_env, reg_for_read(dc, arg->rd), addr);
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -1607,8 +1622,6 @@ static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
|
|||
dc->cfg = &cpu->cfg;
|
||||
dc->tb_flags = dc->base.tb->flags;
|
||||
dc->ext_imm = dc->base.tb->cs_base;
|
||||
dc->r0 = NULL;
|
||||
dc->r0_set = false;
|
||||
dc->mem_index = cpu_mmu_index(cs, false);
|
||||
dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER;
|
||||
dc->jmp_dest = -1;
|
||||
|
@ -1647,11 +1660,6 @@ static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
|
|||
trap_illegal(dc, true);
|
||||
}
|
||||
|
||||
if (dc->r0) {
|
||||
dc->r0 = NULL;
|
||||
dc->r0_set = false;
|
||||
}
|
||||
|
||||
/* Discard the imm global when its contents cannot be used. */
|
||||
if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) {
|
||||
tcg_gen_discard_i32(cpu_imm);
|
||||
|
@ -1829,7 +1837,7 @@ void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
|
|||
}
|
||||
|
||||
qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
|
||||
"ear=0x" TARGET_FMT_lx " slr=0x%x shr=0x%x\n",
|
||||
"ear=0x%" PRIx64 " slr=0x%x shr=0x%x\n",
|
||||
env->esr, env->fsr, env->btr, env->edr,
|
||||
env->ear, env->slr, env->shr);
|
||||
|
||||
|
|
|
@ -560,6 +560,14 @@ static TCGTBCPUState mips_get_tb_cpu_state(CPUState *cs)
|
|||
};
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
static vaddr mips_pointer_wrap(CPUState *cs, int mmu_idx,
|
||||
vaddr result, vaddr base)
|
||||
{
|
||||
return cpu_env(cs)->hflags & MIPS_HFLAG_AWRAP ? (int32_t)result : result;
|
||||
}
|
||||
#endif
|
||||
|
||||
static const TCGCPUOps mips_tcg_ops = {
|
||||
.mttcg_supported = TARGET_LONG_BITS == 32,
|
||||
.guest_default_memory_order = 0,
|
||||
|
@ -573,6 +581,7 @@ static const TCGCPUOps mips_tcg_ops = {
|
|||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
.tlb_fill = mips_cpu_tlb_fill,
|
||||
.pointer_wrap = mips_pointer_wrap,
|
||||
.cpu_exec_interrupt = mips_cpu_exec_interrupt,
|
||||
.cpu_exec_halt = mips_cpu_has_work,
|
||||
.cpu_exec_reset = cpu_reset,
|
||||
|
|
|
@ -265,6 +265,7 @@ static const TCGCPUOps openrisc_tcg_ops = {
|
|||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
.tlb_fill = openrisc_cpu_tlb_fill,
|
||||
.pointer_wrap = cpu_pointer_wrap_uint32,
|
||||
.cpu_exec_interrupt = openrisc_cpu_exec_interrupt,
|
||||
.cpu_exec_halt = openrisc_cpu_has_work,
|
||||
.cpu_exec_reset = cpu_reset,
|
||||
|
|
|
@ -7386,6 +7386,12 @@ static void ppc_cpu_exec_exit(CPUState *cs)
|
|||
cpu->vhyp_class->cpu_exec_exit(cpu->vhyp, cpu);
|
||||
}
|
||||
}
|
||||
|
||||
static vaddr ppc_pointer_wrap(CPUState *cs, int mmu_idx,
|
||||
vaddr result, vaddr base)
|
||||
{
|
||||
return (cpu_env(cs)->hflags >> HFLAGS_64) & 1 ? result : (uint32_t)result;
|
||||
}
|
||||
#endif /* CONFIG_TCG */
|
||||
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
@ -7490,6 +7496,7 @@ static const TCGCPUOps ppc_tcg_ops = {
|
|||
.record_sigsegv = ppc_cpu_record_sigsegv,
|
||||
#else
|
||||
.tlb_fill = ppc_cpu_tlb_fill,
|
||||
.pointer_wrap = ppc_pointer_wrap,
|
||||
.cpu_exec_interrupt = ppc_cpu_exec_interrupt,
|
||||
.cpu_exec_halt = ppc_cpu_has_work,
|
||||
.cpu_exec_reset = cpu_reset,
|
||||
|
|
|
@ -237,6 +237,31 @@ static void riscv_restore_state_to_opc(CPUState *cs,
|
|||
env->excp_uw2 = data[2];
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
static vaddr riscv_pointer_wrap(CPUState *cs, int mmu_idx,
|
||||
vaddr result, vaddr base)
|
||||
{
|
||||
CPURISCVState *env = cpu_env(cs);
|
||||
uint32_t pm_len;
|
||||
bool pm_signext;
|
||||
|
||||
if (cpu_address_xl(env) == MXL_RV32) {
|
||||
return (uint32_t)result;
|
||||
}
|
||||
|
||||
pm_len = riscv_pm_get_pmlen(riscv_pm_get_pmm(env));
|
||||
if (pm_len == 0) {
|
||||
return result;
|
||||
}
|
||||
|
||||
pm_signext = riscv_cpu_virt_mem_enabled(env);
|
||||
if (pm_signext) {
|
||||
return sextract64(result, 0, 64 - pm_len);
|
||||
}
|
||||
return extract64(result, 0, 64 - pm_len);
|
||||
}
|
||||
#endif
|
||||
|
||||
const TCGCPUOps riscv_tcg_ops = {
|
||||
.mttcg_supported = true,
|
||||
.guest_default_memory_order = 0,
|
||||
|
@ -250,6 +275,7 @@ const TCGCPUOps riscv_tcg_ops = {
|
|||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
.tlb_fill = riscv_cpu_tlb_fill,
|
||||
.pointer_wrap = riscv_pointer_wrap,
|
||||
.cpu_exec_interrupt = riscv_cpu_exec_interrupt,
|
||||
.cpu_exec_halt = riscv_cpu_has_work,
|
||||
.cpu_exec_reset = cpu_reset,
|
||||
|
|
|
@ -225,6 +225,7 @@ static const TCGCPUOps rx_tcg_ops = {
|
|||
.restore_state_to_opc = rx_restore_state_to_opc,
|
||||
.mmu_index = rx_cpu_mmu_index,
|
||||
.tlb_fill = rx_cpu_tlb_fill,
|
||||
.pointer_wrap = cpu_pointer_wrap_uint32,
|
||||
|
||||
.cpu_exec_interrupt = rx_cpu_exec_interrupt,
|
||||
.cpu_exec_halt = rx_cpu_has_work,
|
||||
|
|
|
@ -347,6 +347,14 @@ static TCGTBCPUState s390x_get_tb_cpu_state(CPUState *cs)
|
|||
};
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
static vaddr s390_pointer_wrap(CPUState *cs, int mmu_idx,
|
||||
vaddr result, vaddr base)
|
||||
{
|
||||
return wrap_address(cpu_env(cs), result);
|
||||
}
|
||||
#endif
|
||||
|
||||
static const TCGCPUOps s390_tcg_ops = {
|
||||
.mttcg_supported = true,
|
||||
.precise_smc = true,
|
||||
|
@ -367,6 +375,7 @@ static const TCGCPUOps s390_tcg_ops = {
|
|||
.record_sigbus = s390_cpu_record_sigbus,
|
||||
#else
|
||||
.tlb_fill = s390_cpu_tlb_fill,
|
||||
.pointer_wrap = s390_pointer_wrap,
|
||||
.cpu_exec_interrupt = s390_cpu_exec_interrupt,
|
||||
.cpu_exec_halt = s390_cpu_has_work,
|
||||
.cpu_exec_reset = cpu_reset,
|
||||
|
|
|
@ -296,6 +296,7 @@ static const TCGCPUOps superh_tcg_ops = {
|
|||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
.tlb_fill = superh_cpu_tlb_fill,
|
||||
.pointer_wrap = cpu_pointer_wrap_notreached,
|
||||
.cpu_exec_interrupt = superh_cpu_exec_interrupt,
|
||||
.cpu_exec_halt = superh_cpu_has_work,
|
||||
.cpu_exec_reset = cpu_reset,
|
||||
|
|
|
@ -54,7 +54,7 @@ typedef struct DisasContext {
|
|||
#define UNALIGN(C) (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN)
|
||||
#else
|
||||
#define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
|
||||
#define UNALIGN(C) 0
|
||||
#define UNALIGN(C) MO_ALIGN
|
||||
#endif
|
||||
|
||||
/* Target-specific values for ctx->base.is_jmp. */
|
||||
|
|
|
@ -1002,6 +1002,18 @@ static const struct SysemuCPUOps sparc_sysemu_ops = {
|
|||
#ifdef CONFIG_TCG
|
||||
#include "accel/tcg/cpu-ops.h"
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
static vaddr sparc_pointer_wrap(CPUState *cs, int mmu_idx,
|
||||
vaddr result, vaddr base)
|
||||
{
|
||||
#ifdef TARGET_SPARC64
|
||||
return cpu_env(cs)->pstate & PS_AM ? (uint32_t)result : result;
|
||||
#else
|
||||
return (uint32_t)result;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
static const TCGCPUOps sparc_tcg_ops = {
|
||||
/*
|
||||
* From Oracle SPARC Architecture 2015:
|
||||
|
@ -1036,6 +1048,7 @@ static const TCGCPUOps sparc_tcg_ops = {
|
|||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
.tlb_fill = sparc_cpu_tlb_fill,
|
||||
.pointer_wrap = sparc_pointer_wrap,
|
||||
.cpu_exec_interrupt = sparc_cpu_exec_interrupt,
|
||||
.cpu_exec_halt = sparc_cpu_has_work,
|
||||
.cpu_exec_reset = cpu_reset,
|
||||
|
|
|
@ -190,6 +190,7 @@ static const TCGCPUOps tricore_tcg_ops = {
|
|||
.restore_state_to_opc = tricore_restore_state_to_opc,
|
||||
.mmu_index = tricore_cpu_mmu_index,
|
||||
.tlb_fill = tricore_cpu_tlb_fill,
|
||||
.pointer_wrap = cpu_pointer_wrap_uint32,
|
||||
.cpu_exec_interrupt = tricore_cpu_exec_interrupt,
|
||||
.cpu_exec_halt = tricore_cpu_has_work,
|
||||
.cpu_exec_reset = cpu_reset,
|
||||
|
|
|
@ -318,6 +318,7 @@ static const TCGCPUOps xtensa_tcg_ops = {
|
|||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
.tlb_fill = xtensa_cpu_tlb_fill,
|
||||
.pointer_wrap = cpu_pointer_wrap_uint32,
|
||||
.cpu_exec_interrupt = xtensa_cpu_exec_interrupt,
|
||||
.cpu_exec_halt = xtensa_cpu_has_work,
|
||||
.cpu_exec_reset = cpu_reset,
|
||||
|
|
|
@ -1661,7 +1661,6 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
|
|||
unsigned s_mask = (1u << s_bits) - 1;
|
||||
unsigned mem_index = get_mmuidx(oi);
|
||||
TCGReg addr_adj;
|
||||
TCGType mask_type;
|
||||
uint64_t compare_mask;
|
||||
|
||||
ldst = new_ldst_label(s);
|
||||
|
@ -1669,9 +1668,6 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
|
|||
ldst->oi = oi;
|
||||
ldst->addr_reg = addr_reg;
|
||||
|
||||
mask_type = (s->page_bits + s->tlb_dyn_max_bits > 32
|
||||
? TCG_TYPE_I64 : TCG_TYPE_I32);
|
||||
|
||||
/* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {tmp0,tmp1}. */
|
||||
QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
|
||||
QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 8);
|
||||
|
@ -1679,9 +1675,9 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
|
|||
tlb_mask_table_ofs(s, mem_index), 1, 0);
|
||||
|
||||
/* Extract the TLB index from the address into X0. */
|
||||
tcg_out_insn(s, 3502S, AND_LSR, mask_type == TCG_TYPE_I64,
|
||||
tcg_out_insn(s, 3502S, AND_LSR, TCG_TYPE_I64,
|
||||
TCG_REG_TMP0, TCG_REG_TMP0, addr_reg,
|
||||
s->page_bits - CPU_TLB_ENTRY_BITS);
|
||||
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
|
||||
|
||||
/* Add the tlb_table pointer, forming the CPUTLBEntry address. */
|
||||
tcg_out_insn(s, 3502, ADD, 1, TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP0);
|
||||
|
@ -1707,7 +1703,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
|
|||
tcg_out_insn(s, 3401, ADDI, addr_type,
|
||||
addr_adj, addr_reg, s_mask - a_mask);
|
||||
}
|
||||
compare_mask = (uint64_t)s->page_mask | a_mask;
|
||||
compare_mask = (uint64_t)TARGET_PAGE_MASK | a_mask;
|
||||
|
||||
/* Store the page mask part of the address into TMP2. */
|
||||
tcg_out_logicali(s, I3404_ANDI, addr_type, TCG_REG_TMP2,
|
||||
|
|
|
@ -1427,7 +1427,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
|
|||
|
||||
/* Extract the tlb index from the address into R0. */
|
||||
tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addr,
|
||||
SHIFT_IMM_LSR(s->page_bits - CPU_TLB_ENTRY_BITS));
|
||||
SHIFT_IMM_LSR(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS));
|
||||
|
||||
/*
|
||||
* Add the tlb_table pointer, creating the CPUTLBEntry address in R1.
|
||||
|
@ -1463,8 +1463,8 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
|
|||
tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr,
|
||||
addr, s_mask - a_mask);
|
||||
}
|
||||
if (use_armv7_instructions && s->page_bits <= 16) {
|
||||
tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(s->page_mask | a_mask));
|
||||
if (use_armv7_instructions && TARGET_PAGE_BITS <= 16) {
|
||||
tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(TARGET_PAGE_MASK | a_mask));
|
||||
tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP,
|
||||
t_addr, TCG_REG_TMP, 0);
|
||||
tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
|
||||
|
@ -1475,10 +1475,10 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
|
|||
tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addr, a_mask);
|
||||
}
|
||||
tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr,
|
||||
SHIFT_IMM_LSR(s->page_bits));
|
||||
SHIFT_IMM_LSR(TARGET_PAGE_BITS));
|
||||
tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP,
|
||||
0, TCG_REG_R2, TCG_REG_TMP,
|
||||
SHIFT_IMM_LSL(s->page_bits));
|
||||
SHIFT_IMM_LSL(TARGET_PAGE_BITS));
|
||||
}
|
||||
} else if (a_mask) {
|
||||
ldst = new_ldst_label(s);
|
||||
|
|
|
@ -2199,16 +2199,14 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
|
|||
trexw = (ttype == TCG_TYPE_I32 ? 0 : P_REXW);
|
||||
if (TCG_TYPE_PTR == TCG_TYPE_I64) {
|
||||
hrexw = P_REXW;
|
||||
if (s->page_bits + s->tlb_dyn_max_bits > 32) {
|
||||
tlbtype = TCG_TYPE_I64;
|
||||
tlbrexw = P_REXW;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tcg_out_mov(s, tlbtype, TCG_REG_L0, addr);
|
||||
tcg_out_shifti(s, SHIFT_SHR + tlbrexw, TCG_REG_L0,
|
||||
s->page_bits - CPU_TLB_ENTRY_BITS);
|
||||
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
|
||||
|
||||
tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, TCG_REG_L0, TCG_AREG0,
|
||||
fast_ofs + offsetof(CPUTLBDescFast, mask));
|
||||
|
@ -2227,7 +2225,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
|
|||
tcg_out_modrm_offset(s, OPC_LEA + trexw, TCG_REG_L1,
|
||||
addr, s_mask - a_mask);
|
||||
}
|
||||
tlb_mask = s->page_mask | a_mask;
|
||||
tlb_mask = TARGET_PAGE_MASK | a_mask;
|
||||
tgen_arithi(s, ARITH_AND + trexw, TCG_REG_L1, tlb_mask, 0);
|
||||
|
||||
/* cmp 0(TCG_REG_L0), TCG_REG_L1 */
|
||||
|
|
|
@ -1065,7 +1065,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
|
|||
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
|
||||
|
||||
tcg_out_opc_srli_d(s, TCG_REG_TMP2, addr_reg,
|
||||
s->page_bits - CPU_TLB_ENTRY_BITS);
|
||||
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
|
||||
tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
|
||||
tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
|
||||
|
||||
|
@ -1091,7 +1091,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
|
|||
tcg_out_mov(s, addr_type, TCG_REG_TMP1, addr_reg);
|
||||
}
|
||||
tcg_out_opc_bstrins_d(s, TCG_REG_TMP1, TCG_REG_ZERO,
|
||||
a_bits, s->page_bits - 1);
|
||||
a_bits, TARGET_PAGE_BITS - 1);
|
||||
|
||||
/* Compare masked address with the TLB entry. */
|
||||
ldst->label_ptr[0] = s->code_ptr;
|
||||
|
|
|
@ -1199,9 +1199,9 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
|
|||
/* Extract the TLB index from the address into TMP3. */
|
||||
if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) {
|
||||
tcg_out_opc_sa(s, OPC_SRL, TCG_TMP3, addr,
|
||||
s->page_bits - CPU_TLB_ENTRY_BITS);
|
||||
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
|
||||
} else {
|
||||
tcg_out_dsrl(s, TCG_TMP3, addr, s->page_bits - CPU_TLB_ENTRY_BITS);
|
||||
tcg_out_dsrl(s, TCG_TMP3, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
|
||||
}
|
||||
tcg_out_opc_reg(s, OPC_AND, TCG_TMP3, TCG_TMP3, TCG_TMP0);
|
||||
|
||||
|
@ -1224,7 +1224,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
|
|||
* For unaligned accesses, compare against the end of the access to
|
||||
* verify that it does not cross a page boundary.
|
||||
*/
|
||||
tcg_out_movi(s, addr_type, TCG_TMP1, s->page_mask | a_mask);
|
||||
tcg_out_movi(s, addr_type, TCG_TMP1, TARGET_PAGE_MASK | a_mask);
|
||||
if (a_mask < s_mask) {
|
||||
tcg_out_opc_imm(s, (TCG_TARGET_REG_BITS == 32
|
||||
|| addr_type == TCG_TYPE_I32
|
||||
|
|
|
@ -334,7 +334,7 @@ void perf_report_code(uint64_t guest_pc, TranslationBlock *tb,
|
|||
/* FIXME: This replicates the restore_state_to_opc() logic. */
|
||||
q[insn].address = gen_insn_data[insn * INSN_START_WORDS + 0];
|
||||
if (tb_cflags(tb) & CF_PCREL) {
|
||||
q[insn].address |= (guest_pc & qemu_target_page_mask());
|
||||
q[insn].address |= guest_pc & TARGET_PAGE_MASK;
|
||||
}
|
||||
q[insn].flags = DEBUGINFO_SYMBOL | (jitdump ? DEBUGINFO_LINE : 0);
|
||||
}
|
||||
|
|
|
@ -2440,10 +2440,10 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
|
|||
/* Extract the page index, shifted into place for tlb index. */
|
||||
if (TCG_TARGET_REG_BITS == 32) {
|
||||
tcg_out_shri32(s, TCG_REG_R0, addr,
|
||||
s->page_bits - CPU_TLB_ENTRY_BITS);
|
||||
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
|
||||
} else {
|
||||
tcg_out_shri64(s, TCG_REG_R0, addr,
|
||||
s->page_bits - CPU_TLB_ENTRY_BITS);
|
||||
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
|
||||
}
|
||||
tcg_out32(s, AND | SAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_R0));
|
||||
|
||||
|
@ -2480,7 +2480,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
|
|||
a_bits = s_bits;
|
||||
}
|
||||
tcg_out_rlw(s, RLWINM, TCG_REG_R0, addr, 0,
|
||||
(32 - a_bits) & 31, 31 - s->page_bits);
|
||||
(32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
|
||||
} else {
|
||||
TCGReg t = addr;
|
||||
|
||||
|
@ -2501,13 +2501,13 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
|
|||
/* Mask the address for the requested alignment. */
|
||||
if (addr_type == TCG_TYPE_I32) {
|
||||
tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0,
|
||||
(32 - a_bits) & 31, 31 - s->page_bits);
|
||||
(32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
|
||||
} else if (a_bits == 0) {
|
||||
tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - s->page_bits);
|
||||
tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - TARGET_PAGE_BITS);
|
||||
} else {
|
||||
tcg_out_rld(s, RLDICL, TCG_REG_R0, t,
|
||||
64 - s->page_bits, s->page_bits - a_bits);
|
||||
tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, s->page_bits, 0);
|
||||
64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - a_bits);
|
||||
tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1706,7 +1706,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
|
|||
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
|
||||
|
||||
tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr_reg,
|
||||
s->page_bits - CPU_TLB_ENTRY_BITS);
|
||||
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
|
||||
tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
|
||||
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
|
||||
|
||||
|
@ -1722,7 +1722,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
|
|||
tcg_out_opc_imm(s, addr_type == TCG_TYPE_I32 ? OPC_ADDIW : OPC_ADDI,
|
||||
addr_adj, addr_reg, s_mask - a_mask);
|
||||
}
|
||||
compare_mask = s->page_mask | a_mask;
|
||||
compare_mask = TARGET_PAGE_MASK | a_mask;
|
||||
if (compare_mask == sextreg(compare_mask, 0, 12)) {
|
||||
tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_adj, compare_mask);
|
||||
} else {
|
||||
|
|
|
@ -2004,7 +2004,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
|
|||
ldst->addr_reg = addr_reg;
|
||||
|
||||
tcg_out_sh64(s, RSY_SRLG, TCG_TMP0, addr_reg, TCG_REG_NONE,
|
||||
s->page_bits - CPU_TLB_ENTRY_BITS);
|
||||
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
|
||||
|
||||
tcg_out_insn(s, RXY, NG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, mask_off);
|
||||
tcg_out_insn(s, RXY, AG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, table_off);
|
||||
|
@ -2016,7 +2016,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
|
|||
* byte of the access.
|
||||
*/
|
||||
a_off = (a_mask >= s_mask ? 0 : s_mask - a_mask);
|
||||
tlb_mask = (uint64_t)s->page_mask | a_mask;
|
||||
tlb_mask = (uint64_t)TARGET_PAGE_MASK | a_mask;
|
||||
if (a_off == 0) {
|
||||
tgen_andi_risbg(s, TCG_REG_R0, addr_reg, tlb_mask);
|
||||
} else {
|
||||
|
|
|
@ -1120,7 +1120,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
|
|||
|
||||
/* Extract the page index, shifted into place for tlb index. */
|
||||
tcg_out_arithi(s, TCG_REG_T1, addr_reg,
|
||||
s->page_bits - CPU_TLB_ENTRY_BITS, SHIFT_SRL);
|
||||
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS, SHIFT_SRL);
|
||||
tcg_out_arith(s, TCG_REG_T1, TCG_REG_T1, TCG_REG_T2, ARITH_AND);
|
||||
|
||||
/* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */
|
||||
|
@ -1136,7 +1136,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
|
|||
h->base = TCG_REG_T1;
|
||||
|
||||
/* Mask out the page offset, except for the required alignment. */
|
||||
compare_mask = s->page_mask | a_mask;
|
||||
compare_mask = TARGET_PAGE_MASK | a_mask;
|
||||
if (check_fit_tl(compare_mask, 13)) {
|
||||
tcg_out_arithi(s, TCG_REG_T3, addr_reg, compare_mask, ARITH_AND);
|
||||
} else {
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include "tcg/tcg-temp-internal.h"
|
||||
#include "tcg/tcg-op-common.h"
|
||||
#include "tcg/tcg-mo.h"
|
||||
#include "exec/target_page.h"
|
||||
#include "exec/translation-block.h"
|
||||
#include "exec/plugin-gen.h"
|
||||
#include "tcg-internal.h"
|
||||
|
@ -40,7 +41,7 @@ static void check_max_alignment(unsigned a_bits)
|
|||
* FIXME: Must keep the count up-to-date with "exec/tlb-flags.h".
|
||||
*/
|
||||
if (tcg_use_softmmu) {
|
||||
tcg_debug_assert(a_bits + 5 <= tcg_ctx->page_bits);
|
||||
tcg_debug_assert(a_bits + 5 <= TARGET_PAGE_BITS);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include "qemu/cacheflush.h"
|
||||
#include "qemu/cacheinfo.h"
|
||||
#include "qemu/timer.h"
|
||||
#include "exec/target_page.h"
|
||||
#include "exec/translation-block.h"
|
||||
#include "exec/tlb-common.h"
|
||||
#include "tcg/startup.h"
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue