mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-27 03:51:57 -06:00
target-arm queue:
* mark MPS2/MPS3 board-internal i2c buses as 'full' so that command line user-created devices are not plugged into them * Take an exception if PSTATE.IL is set * Support an emulated ITS in the virt board * Add support for kudo-bmc board * Probe for KVM_CAP_ARM_VM_IPA_SIZE when creating scratch VM * cadence_uart: Fix clock handling issues that prevented u-boot from running -----BEGIN PGP SIGNATURE----- iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmE/ruQZHHBldGVyLm1h eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3krdD/sHLxbPua1IOA1+uxLJwRnr N7BZa0GVNX8+dKi3w3jtYHOyFG1u9NeOp/VI93I7G9k0vRvYT8eMN4cMWwsaG5rr PPjiLIFAIFwxV9QkafIONLxLYFfc6T48tstG6BYaJU2tLPwIlSZK4ZbKqrxWesAm mMw75AtESjYI77yQcsEXDflmcvbvM++IrqQAa190i2D8rizbbv/gqZtzJJpU2OGy My51t+g1SPPJvoih6edpURGmKH1vmB0UwadnOG3GFv76c9nYeVPXAtdXS+8Rs+vU QJpvJ0MSRc5ZztsltvXQefH4aseSHrZybpZGI0tNpZ1G2oRwZHIXEMDcZwtRHKlZ o5M6oeNOUZFRFrLM8FRv4ErIFhgMwWUghy+oVejCF791j1WeasDpFL+ZZTWUNYiP qmNdh6z7Dt7F1fxBxMiCw9PTRNB2zudyz/ZtymPGYEDj7leIpQ/HudRmaDKZ+zMG A8omXNEw1LFsVrTE5MjLT7tr2Eq+71V2m0OkDB+Tvmpl4AXVG9b7kCoOp6NiAXZd Y4Vdi5I8NN3OHK0yO1vMxOlNk7qo4BTqT7FYaSb1qaTZ/6TQtrWb7ThU989JJaQE 28H1p8uezMDC8NsaEBa2eBsen6Uf45jYKxgUpG0jB9QuXtRY1xUdaU06fQlz4dpn 7SyfLZbzeB0v+Bqd7z3Y9A== =7BH/ -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20210913-3' into staging target-arm queue: * mark MPS2/MPS3 board-internal i2c buses as 'full' so that command line user-created devices are not plugged into them * Take an exception if PSTATE.IL is set * Support an emulated ITS in the virt board * Add support for kudo-bmc board * Probe for KVM_CAP_ARM_VM_IPA_SIZE when creating scratch VM * cadence_uart: Fix clock handling issues that prevented u-boot from running # gpg: Signature made Mon 13 Sep 2021 21:04:52 BST # gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE # gpg: issuer "peter.maydell@linaro.org" # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [ultimate] # gpg: aka "Peter Maydell <pmaydell@gmail.com>" [ultimate] # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [ultimate] # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * remotes/pmaydell/tags/pull-target-arm-20210913-3: (23 commits) hw/arm/mps2.c: Mark internal-only I2C buses as 'full' hw/arm/mps2-tz.c: Mark internal-only I2C buses as 'full' hw/arm/mps2-tz.c: Add extra data parameter to MakeDevFn qdev: Support marking individual buses as 'full' target/arm: Merge disas_a64_insn into aarch64_tr_translate_insn target/arm: Take an exception if PSTATE.IL is set tests/data/acpi/virt: Update IORT files for ITS hw/arm/virt: add ITS support in virt GIC tests/data/acpi/virt: Add IORT files for ITS hw/intc: GICv3 redistributor ITS processing hw/intc: GICv3 ITS Feature enablement hw/intc: GICv3 ITS Command processing hw/intc: GICv3 ITS command queue framework hw/intc: GICv3 ITS register definitions added hw/intc: GICv3 ITS initial framework hw/arm: Add support for kudo-bmc board. hw/arm/virt: KVM: Probe for KVM_CAP_ARM_VM_IPA_SIZE when creating scratch VM hw/char: cadence_uart: Log a guest error when device is unclocked or in reset hw/char: cadence_uart: Ignore access when unclocked or in reset for uart_{read, write}() hw/char: cadence_uart: Convert to memop_with_attrs() ops ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
c6f5e042d8
35 changed files with 2144 additions and 210 deletions
|
@ -3455,6 +3455,7 @@ FIELD(TBFLAG_ANY, FPEXC_EL, 8, 2)
|
|||
FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 10, 2)
|
||||
/* Memory operations require alignment: SCTLR_ELx.A or CCR.UNALIGN_TRP */
|
||||
FIELD(TBFLAG_ANY, ALIGN_MEM, 12, 1)
|
||||
FIELD(TBFLAG_ANY, PSTATE__IL, 13, 1)
|
||||
|
||||
/*
|
||||
* Bit usage when in AArch32 state, both A- and M-profile.
|
||||
|
|
|
@ -1071,6 +1071,7 @@ illegal_return:
|
|||
if (!arm_singlestep_active(env)) {
|
||||
env->pstate &= ~PSTATE_SS;
|
||||
}
|
||||
helper_rebuild_hflags_a64(env, cur_el);
|
||||
qemu_log_mask(LOG_GUEST_ERROR, "Illegal exception return at EL%d: "
|
||||
"resuming execution at 0x%" PRIx64 "\n", cur_el, env->pc);
|
||||
}
|
||||
|
|
|
@ -13462,6 +13462,10 @@ static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
|
|||
DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1);
|
||||
}
|
||||
|
||||
if (env->uncached_cpsr & CPSR_IL) {
|
||||
DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
|
||||
}
|
||||
|
||||
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
|
||||
}
|
||||
|
||||
|
@ -13556,6 +13560,10 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
|
|||
}
|
||||
}
|
||||
|
||||
if (env->pstate & PSTATE_IL) {
|
||||
DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
|
||||
}
|
||||
|
||||
if (cpu_isar_feature(aa64_mte, env_archcpu(env))) {
|
||||
/*
|
||||
* Set MTE_ACTIVE if any access may be Checked, and leave clear
|
||||
|
|
|
@ -70,12 +70,17 @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
|
|||
struct kvm_vcpu_init *init)
|
||||
{
|
||||
int ret = 0, kvmfd = -1, vmfd = -1, cpufd = -1;
|
||||
int max_vm_pa_size;
|
||||
|
||||
kvmfd = qemu_open_old("/dev/kvm", O_RDWR);
|
||||
if (kvmfd < 0) {
|
||||
goto err;
|
||||
}
|
||||
vmfd = ioctl(kvmfd, KVM_CREATE_VM, 0);
|
||||
max_vm_pa_size = ioctl(kvmfd, KVM_CHECK_EXTENSION, KVM_CAP_ARM_VM_IPA_SIZE);
|
||||
if (max_vm_pa_size < 0) {
|
||||
max_vm_pa_size = 0;
|
||||
}
|
||||
vmfd = ioctl(kvmfd, KVM_CREATE_VM, max_vm_pa_size);
|
||||
if (vmfd < 0) {
|
||||
goto err;
|
||||
}
|
||||
|
|
|
@ -525,8 +525,8 @@ static inline const char *its_class_name(void)
|
|||
/* KVM implementation requires this capability */
|
||||
return kvm_direct_msi_enabled() ? "arm-its-kvm" : NULL;
|
||||
} else {
|
||||
/* Software emulation is not implemented yet */
|
||||
return NULL;
|
||||
/* Software emulation based model */
|
||||
return "arm-gicv3-its";
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -277,4 +277,9 @@ static inline uint32_t syn_wfx(int cv, int cond, int ti, bool is_16bit)
|
|||
(cv << 24) | (cond << 20) | ti;
|
||||
}
|
||||
|
||||
static inline uint32_t syn_illegalstate(void)
|
||||
{
|
||||
return (EC_ILLEGALSTATE << ARM_EL_EC_SHIFT) | ARM_EL_IL;
|
||||
}
|
||||
|
||||
#endif /* TARGET_ARM_SYNDROME_H */
|
||||
|
|
|
@ -14649,11 +14649,128 @@ static bool btype_destination_ok(uint32_t insn, bool bt, int btype)
|
|||
return false;
|
||||
}
|
||||
|
||||
/* C3.1 A64 instruction index by encoding */
|
||||
static void disas_a64_insn(CPUARMState *env, DisasContext *s)
|
||||
static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
|
||||
CPUState *cpu)
|
||||
{
|
||||
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
||||
CPUARMState *env = cpu->env_ptr;
|
||||
ARMCPU *arm_cpu = env_archcpu(env);
|
||||
CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb);
|
||||
int bound, core_mmu_idx;
|
||||
|
||||
dc->isar = &arm_cpu->isar;
|
||||
dc->condjmp = 0;
|
||||
|
||||
dc->aarch64 = 1;
|
||||
/* If we are coming from secure EL0 in a system with a 32-bit EL3, then
|
||||
* there is no secure EL1, so we route exceptions to EL3.
|
||||
*/
|
||||
dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
|
||||
!arm_el_is_aa64(env, 3);
|
||||
dc->thumb = 0;
|
||||
dc->sctlr_b = 0;
|
||||
dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE;
|
||||
dc->condexec_mask = 0;
|
||||
dc->condexec_cond = 0;
|
||||
core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX);
|
||||
dc->mmu_idx = core_to_aa64_mmu_idx(core_mmu_idx);
|
||||
dc->tbii = EX_TBFLAG_A64(tb_flags, TBII);
|
||||
dc->tbid = EX_TBFLAG_A64(tb_flags, TBID);
|
||||
dc->tcma = EX_TBFLAG_A64(tb_flags, TCMA);
|
||||
dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
dc->user = (dc->current_el == 0);
|
||||
#endif
|
||||
dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
|
||||
dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
|
||||
dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL);
|
||||
dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL);
|
||||
dc->sve_len = (EX_TBFLAG_A64(tb_flags, ZCR_LEN) + 1) * 16;
|
||||
dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE);
|
||||
dc->bt = EX_TBFLAG_A64(tb_flags, BT);
|
||||
dc->btype = EX_TBFLAG_A64(tb_flags, BTYPE);
|
||||
dc->unpriv = EX_TBFLAG_A64(tb_flags, UNPRIV);
|
||||
dc->ata = EX_TBFLAG_A64(tb_flags, ATA);
|
||||
dc->mte_active[0] = EX_TBFLAG_A64(tb_flags, MTE_ACTIVE);
|
||||
dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE);
|
||||
dc->vec_len = 0;
|
||||
dc->vec_stride = 0;
|
||||
dc->cp_regs = arm_cpu->cp_regs;
|
||||
dc->features = env->features;
|
||||
dc->dcz_blocksize = arm_cpu->dcz_blocksize;
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
/* In sve_probe_page, we assume TBI is enabled. */
|
||||
tcg_debug_assert(dc->tbid & 1);
|
||||
#endif
|
||||
|
||||
/* Single step state. The code-generation logic here is:
|
||||
* SS_ACTIVE == 0:
|
||||
* generate code with no special handling for single-stepping (except
|
||||
* that anything that can make us go to SS_ACTIVE == 1 must end the TB;
|
||||
* this happens anyway because those changes are all system register or
|
||||
* PSTATE writes).
|
||||
* SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
|
||||
* emit code for one insn
|
||||
* emit code to clear PSTATE.SS
|
||||
* emit code to generate software step exception for completed step
|
||||
* end TB (as usual for having generated an exception)
|
||||
* SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
|
||||
* emit code to generate a software step exception
|
||||
* end the TB
|
||||
*/
|
||||
dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE);
|
||||
dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS);
|
||||
dc->is_ldex = false;
|
||||
dc->debug_target_el = EX_TBFLAG_ANY(tb_flags, DEBUG_TARGET_EL);
|
||||
|
||||
/* Bound the number of insns to execute to those left on the page. */
|
||||
bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
|
||||
|
||||
/* If architectural single step active, limit to 1. */
|
||||
if (dc->ss_active) {
|
||||
bound = 1;
|
||||
}
|
||||
dc->base.max_insns = MIN(dc->base.max_insns, bound);
|
||||
|
||||
init_tmp_a64_array(dc);
|
||||
}
|
||||
|
||||
static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu)
|
||||
{
|
||||
}
|
||||
|
||||
static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
|
||||
{
|
||||
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
||||
|
||||
tcg_gen_insn_start(dc->base.pc_next, 0, 0);
|
||||
dc->insn_start = tcg_last_op();
|
||||
}
|
||||
|
||||
static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
|
||||
{
|
||||
DisasContext *s = container_of(dcbase, DisasContext, base);
|
||||
CPUARMState *env = cpu->env_ptr;
|
||||
uint32_t insn;
|
||||
|
||||
if (s->ss_active && !s->pstate_ss) {
|
||||
/* Singlestep state is Active-pending.
|
||||
* If we're in this state at the start of a TB then either
|
||||
* a) we just took an exception to an EL which is being debugged
|
||||
* and this is the first insn in the exception handler
|
||||
* b) debug exceptions were masked and we just unmasked them
|
||||
* without changing EL (eg by clearing PSTATE.D)
|
||||
* In either case we're going to take a swstep exception in the
|
||||
* "did not step an insn" case, and so the syndrome ISV and EX
|
||||
* bits should be zero.
|
||||
*/
|
||||
assert(s->base.num_insns == 1);
|
||||
gen_swstep_exception(s, 0, 0);
|
||||
s->base.is_jmp = DISAS_NORETURN;
|
||||
return;
|
||||
}
|
||||
|
||||
s->pc_curr = s->base.pc_next;
|
||||
insn = arm_ldl_code(env, s->base.pc_next, s->sctlr_b);
|
||||
s->insn = insn;
|
||||
|
@ -14662,6 +14779,16 @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s)
|
|||
s->fp_access_checked = false;
|
||||
s->sve_access_checked = false;
|
||||
|
||||
if (s->pstate_il) {
|
||||
/*
|
||||
* Illegal execution state. This has priority over BTI
|
||||
* exceptions, but comes after instruction abort exceptions.
|
||||
*/
|
||||
gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
|
||||
syn_illegalstate(), default_exception_el(s));
|
||||
return;
|
||||
}
|
||||
|
||||
if (dc_isar_feature(aa64_bti, s)) {
|
||||
if (s->base.num_insns == 1) {
|
||||
/*
|
||||
|
@ -14744,130 +14871,8 @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s)
|
|||
if (s->btype > 0 && s->base.is_jmp != DISAS_NORETURN) {
|
||||
reset_btype(s);
|
||||
}
|
||||
}
|
||||
|
||||
static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
|
||||
CPUState *cpu)
|
||||
{
|
||||
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
||||
CPUARMState *env = cpu->env_ptr;
|
||||
ARMCPU *arm_cpu = env_archcpu(env);
|
||||
CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb);
|
||||
int bound, core_mmu_idx;
|
||||
|
||||
dc->isar = &arm_cpu->isar;
|
||||
dc->condjmp = 0;
|
||||
|
||||
dc->aarch64 = 1;
|
||||
/* If we are coming from secure EL0 in a system with a 32-bit EL3, then
|
||||
* there is no secure EL1, so we route exceptions to EL3.
|
||||
*/
|
||||
dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
|
||||
!arm_el_is_aa64(env, 3);
|
||||
dc->thumb = 0;
|
||||
dc->sctlr_b = 0;
|
||||
dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE;
|
||||
dc->condexec_mask = 0;
|
||||
dc->condexec_cond = 0;
|
||||
core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX);
|
||||
dc->mmu_idx = core_to_aa64_mmu_idx(core_mmu_idx);
|
||||
dc->tbii = EX_TBFLAG_A64(tb_flags, TBII);
|
||||
dc->tbid = EX_TBFLAG_A64(tb_flags, TBID);
|
||||
dc->tcma = EX_TBFLAG_A64(tb_flags, TCMA);
|
||||
dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
dc->user = (dc->current_el == 0);
|
||||
#endif
|
||||
dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
|
||||
dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
|
||||
dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL);
|
||||
dc->sve_len = (EX_TBFLAG_A64(tb_flags, ZCR_LEN) + 1) * 16;
|
||||
dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE);
|
||||
dc->bt = EX_TBFLAG_A64(tb_flags, BT);
|
||||
dc->btype = EX_TBFLAG_A64(tb_flags, BTYPE);
|
||||
dc->unpriv = EX_TBFLAG_A64(tb_flags, UNPRIV);
|
||||
dc->ata = EX_TBFLAG_A64(tb_flags, ATA);
|
||||
dc->mte_active[0] = EX_TBFLAG_A64(tb_flags, MTE_ACTIVE);
|
||||
dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE);
|
||||
dc->vec_len = 0;
|
||||
dc->vec_stride = 0;
|
||||
dc->cp_regs = arm_cpu->cp_regs;
|
||||
dc->features = env->features;
|
||||
dc->dcz_blocksize = arm_cpu->dcz_blocksize;
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
/* In sve_probe_page, we assume TBI is enabled. */
|
||||
tcg_debug_assert(dc->tbid & 1);
|
||||
#endif
|
||||
|
||||
/* Single step state. The code-generation logic here is:
|
||||
* SS_ACTIVE == 0:
|
||||
* generate code with no special handling for single-stepping (except
|
||||
* that anything that can make us go to SS_ACTIVE == 1 must end the TB;
|
||||
* this happens anyway because those changes are all system register or
|
||||
* PSTATE writes).
|
||||
* SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
|
||||
* emit code for one insn
|
||||
* emit code to clear PSTATE.SS
|
||||
* emit code to generate software step exception for completed step
|
||||
* end TB (as usual for having generated an exception)
|
||||
* SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
|
||||
* emit code to generate a software step exception
|
||||
* end the TB
|
||||
*/
|
||||
dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE);
|
||||
dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS);
|
||||
dc->is_ldex = false;
|
||||
dc->debug_target_el = EX_TBFLAG_ANY(tb_flags, DEBUG_TARGET_EL);
|
||||
|
||||
/* Bound the number of insns to execute to those left on the page. */
|
||||
bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
|
||||
|
||||
/* If architectural single step active, limit to 1. */
|
||||
if (dc->ss_active) {
|
||||
bound = 1;
|
||||
}
|
||||
dc->base.max_insns = MIN(dc->base.max_insns, bound);
|
||||
|
||||
init_tmp_a64_array(dc);
|
||||
}
|
||||
|
||||
static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu)
|
||||
{
|
||||
}
|
||||
|
||||
static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
|
||||
{
|
||||
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
||||
|
||||
tcg_gen_insn_start(dc->base.pc_next, 0, 0);
|
||||
dc->insn_start = tcg_last_op();
|
||||
}
|
||||
|
||||
static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
|
||||
{
|
||||
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
||||
CPUARMState *env = cpu->env_ptr;
|
||||
|
||||
if (dc->ss_active && !dc->pstate_ss) {
|
||||
/* Singlestep state is Active-pending.
|
||||
* If we're in this state at the start of a TB then either
|
||||
* a) we just took an exception to an EL which is being debugged
|
||||
* and this is the first insn in the exception handler
|
||||
* b) debug exceptions were masked and we just unmasked them
|
||||
* without changing EL (eg by clearing PSTATE.D)
|
||||
* In either case we're going to take a swstep exception in the
|
||||
* "did not step an insn" case, and so the syndrome ISV and EX
|
||||
* bits should be zero.
|
||||
*/
|
||||
assert(dc->base.num_insns == 1);
|
||||
gen_swstep_exception(dc, 0, 0);
|
||||
dc->base.is_jmp = DISAS_NORETURN;
|
||||
} else {
|
||||
disas_a64_insn(env, dc);
|
||||
}
|
||||
|
||||
translator_loop_temp_check(&dc->base);
|
||||
translator_loop_temp_check(&s->base);
|
||||
}
|
||||
|
||||
static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
|
||||
|
|
|
@ -9090,6 +9090,16 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
|
|||
return;
|
||||
}
|
||||
|
||||
if (s->pstate_il) {
|
||||
/*
|
||||
* Illegal execution state. This has priority over BTI
|
||||
* exceptions, but comes after instruction abort exceptions.
|
||||
*/
|
||||
gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
|
||||
syn_illegalstate(), default_exception_el(s));
|
||||
return;
|
||||
}
|
||||
|
||||
if (cond == 0xf) {
|
||||
/* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
|
||||
* choose to UNDEF. In ARMv5 and above the space is used
|
||||
|
@ -9358,6 +9368,7 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
|
|||
#endif
|
||||
dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
|
||||
dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
|
||||
dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL);
|
||||
|
||||
if (arm_feature(env, ARM_FEATURE_M)) {
|
||||
dc->vfp_enabled = 1;
|
||||
|
@ -9621,6 +9632,16 @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
|
|||
}
|
||||
dc->insn = insn;
|
||||
|
||||
if (dc->pstate_il) {
|
||||
/*
|
||||
* Illegal execution state. This has priority over BTI
|
||||
* exceptions, but comes after instruction abort exceptions.
|
||||
*/
|
||||
gen_exception_insn(dc, dc->pc_curr, EXCP_UDEF,
|
||||
syn_illegalstate(), default_exception_el(dc));
|
||||
return;
|
||||
}
|
||||
|
||||
if (dc->eci) {
|
||||
/*
|
||||
* For M-profile continuable instructions, ECI/ICI handling
|
||||
|
|
|
@ -98,6 +98,8 @@ typedef struct DisasContext {
|
|||
bool hstr_active;
|
||||
/* True if memory operations require alignment */
|
||||
bool align_mem;
|
||||
/* True if PSTATE.IL is set */
|
||||
bool pstate_il;
|
||||
/*
|
||||
* >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI.
|
||||
* < 0, set by the current instruction.
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue