mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-07 17:53:56 -06:00
target-arm:
* cleanups converting to DEFINE_PROP_LINK * allwinner-a10: mark as not user-creatable * initial patches working towards ARMv8M support * implement generating aborts on memory transaction failures * make BXJ behave correctly (ie not UNDEF) on ARMv6-and-later -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABCAAGBQJZsUjvAAoJEDwlJe0UNgzey10P+wf1TRxRMGnoDftimLyPt9Pt cXYSP1KKF4qn618ZSJHPHJasWEx2obAP8JrrA8qLz0quWpWlXZ40bhgxKX9iKb2l 4jrt/DjfTH7RWMRs94lOb0ZOtMokLfjHMSBhP31xR4Lgia0HdlmwqUPLr2T10ffE B9BKvPbXcee9Ss7osDqQr3OMUtSMjuc3G3z3WaySwG80od9MB8mblnMU0h9gZEeT 6csGRHU8rfOkv9ZzrSJRWBuhmxC0Mrg3lB3iZffupFnI//q+PZfW2+ojAyn+pATu 3YgHjgfgw4P5N2iGlg8c4y6mrig0fQNHWIXWFk7zWp7kWCdXnq5doFpJmi+CfMlE yQqMYzuy2Bd9n2fAB036nvb1LBHEKFYfKxqPoeJzuB9wEcXjmnbwuJ+iAKo/DP94 9wE/cPNKySFmZJFEz+byAZvnEp0ynpQtDoCnaIJPbx6ytkKfL9xXX78+mmlTn8hj 55NyH2aaEXpuxJKkld1pP2O+r/amFJ603rujSEaK0Or2YGcE1fit+YZSSh1glt25 b3vEKn1ydWV4udRjBIEd0l/PIhGenILXC3bDONiWqEIPaMVeOxjhl+lvEHmELOjd t+o4ntQfU94Z6eDXPhx/bXqIZi9qtDbMZosojWL6wMAIMEiuXlB/a9vhcs9uBnRJ M0PiR5jVpZgDfLipV/8A =URgX -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20170907' into staging target-arm: * cleanups converting to DEFINE_PROP_LINK * allwinner-a10: mark as not user-creatable * initial patches working towards ARMv8M support * implement generating aborts on memory transaction failures * make BXJ behave correctly (ie not UNDEF) on ARMv6-and-later # gpg: Signature made Thu 07 Sep 2017 14:26:07 BST # gpg: using RSA key 0x3C2525ED14360CDE # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" # gpg: aka "Peter Maydell <pmaydell@gmail.com>" # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * remotes/pmaydell/tags/pull-target-arm-20170907: (31 commits) target/arm: Add Jazelle feature target/arm: Implement new do_transaction_failed hook hw/arm: Set ignore_memory_transaction_failures for most ARM boards boards.h: Define new flag ignore_memory_transaction_failures target/arm: Implement BXNS, and banked stack pointers target/arm: Move regime_is_secure() to target/arm/internals.h target/arm: Make CFSR register banked for v8M target/arm: Make MMFAR banked for v8M target/arm: Make CCR register banked for v8M target/arm: Make MPU_CTRL register banked for v8M target/arm: Make MPU_RNR register banked for v8M target/arm: Make MPU_RBAR, MPU_RLAR banked for v8M target/arm: Make MPU_MAIR0, MPU_MAIR1 registers banked for v8M target/arm: Make VTOR register banked for v8M nvic: Add NS alias SCS region target/arm: Make CONTROL register banked for v8M target/arm: Make FAULTMASK register banked for v8M target/arm: Make PRIMASK register banked for v8M target/arm: Make BASEPRI register banked for v8M target/arm: Add MMU indexes for secure v8M ... # Conflicts: # target/arm/translate.c
This commit is contained in:
commit
ef475b5dd1
48 changed files with 978 additions and 213 deletions
|
@ -185,11 +185,21 @@ static void arm_cpu_reset(CPUState *s)
|
|||
uint32_t initial_pc; /* Loaded from 0x4 */
|
||||
uint8_t *rom;
|
||||
|
||||
/* The reset value of this bit is IMPDEF, but ARM recommends
|
||||
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
|
||||
env->v7m.secure = true;
|
||||
}
|
||||
|
||||
/* In v7M the reset value of this bit is IMPDEF, but ARM recommends
|
||||
* that it resets to 1, so QEMU always does that rather than making
|
||||
* it dependent on CPU model.
|
||||
* it dependent on CPU model. In v8M it is RES1.
|
||||
*/
|
||||
env->v7m.ccr = R_V7M_CCR_STKALIGN_MASK;
|
||||
env->v7m.ccr[M_REG_NS] = R_V7M_CCR_STKALIGN_MASK;
|
||||
env->v7m.ccr[M_REG_S] = R_V7M_CCR_STKALIGN_MASK;
|
||||
if (arm_feature(env, ARM_FEATURE_V8)) {
|
||||
/* in v8M the NONBASETHRDENA bit [0] is RES1 */
|
||||
env->v7m.ccr[M_REG_NS] |= R_V7M_CCR_NONBASETHRDENA_MASK;
|
||||
env->v7m.ccr[M_REG_S] |= R_V7M_CCR_NONBASETHRDENA_MASK;
|
||||
}
|
||||
|
||||
/* Unlike A/R profile, M profile defines the reset LR value */
|
||||
env->regs[14] = 0xffffffff;
|
||||
|
@ -228,17 +238,38 @@ static void arm_cpu_reset(CPUState *s)
|
|||
env->vfp.xregs[ARM_VFP_FPEXC] = 0;
|
||||
#endif
|
||||
|
||||
if (arm_feature(env, ARM_FEATURE_PMSA) &&
|
||||
arm_feature(env, ARM_FEATURE_V7)) {
|
||||
if (arm_feature(env, ARM_FEATURE_PMSA)) {
|
||||
if (cpu->pmsav7_dregion > 0) {
|
||||
memset(env->pmsav7.drbar, 0,
|
||||
sizeof(*env->pmsav7.drbar) * cpu->pmsav7_dregion);
|
||||
memset(env->pmsav7.drsr, 0,
|
||||
sizeof(*env->pmsav7.drsr) * cpu->pmsav7_dregion);
|
||||
memset(env->pmsav7.dracr, 0,
|
||||
sizeof(*env->pmsav7.dracr) * cpu->pmsav7_dregion);
|
||||
if (arm_feature(env, ARM_FEATURE_V8)) {
|
||||
memset(env->pmsav8.rbar[M_REG_NS], 0,
|
||||
sizeof(*env->pmsav8.rbar[M_REG_NS])
|
||||
* cpu->pmsav7_dregion);
|
||||
memset(env->pmsav8.rlar[M_REG_NS], 0,
|
||||
sizeof(*env->pmsav8.rlar[M_REG_NS])
|
||||
* cpu->pmsav7_dregion);
|
||||
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
|
||||
memset(env->pmsav8.rbar[M_REG_S], 0,
|
||||
sizeof(*env->pmsav8.rbar[M_REG_S])
|
||||
* cpu->pmsav7_dregion);
|
||||
memset(env->pmsav8.rlar[M_REG_S], 0,
|
||||
sizeof(*env->pmsav8.rlar[M_REG_S])
|
||||
* cpu->pmsav7_dregion);
|
||||
}
|
||||
} else if (arm_feature(env, ARM_FEATURE_V7)) {
|
||||
memset(env->pmsav7.drbar, 0,
|
||||
sizeof(*env->pmsav7.drbar) * cpu->pmsav7_dregion);
|
||||
memset(env->pmsav7.drsr, 0,
|
||||
sizeof(*env->pmsav7.drsr) * cpu->pmsav7_dregion);
|
||||
memset(env->pmsav7.dracr, 0,
|
||||
sizeof(*env->pmsav7.dracr) * cpu->pmsav7_dregion);
|
||||
}
|
||||
}
|
||||
env->pmsav7.rnr = 0;
|
||||
env->pmsav7.rnr[M_REG_NS] = 0;
|
||||
env->pmsav7.rnr[M_REG_S] = 0;
|
||||
env->pmsav8.mair0[M_REG_NS] = 0;
|
||||
env->pmsav8.mair0[M_REG_S] = 0;
|
||||
env->pmsav8.mair1[M_REG_NS] = 0;
|
||||
env->pmsav8.mair1[M_REG_S] = 0;
|
||||
}
|
||||
|
||||
set_flush_to_zero(1, &env->vfp.standard_fp_status);
|
||||
|
@ -681,6 +712,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
|
|||
}
|
||||
if (arm_feature(env, ARM_FEATURE_V6)) {
|
||||
set_feature(env, ARM_FEATURE_V5);
|
||||
set_feature(env, ARM_FEATURE_JAZELLE);
|
||||
if (!arm_feature(env, ARM_FEATURE_M)) {
|
||||
set_feature(env, ARM_FEATURE_AUXCR);
|
||||
}
|
||||
|
@ -809,9 +841,19 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
|
|||
}
|
||||
|
||||
if (nr) {
|
||||
env->pmsav7.drbar = g_new0(uint32_t, nr);
|
||||
env->pmsav7.drsr = g_new0(uint32_t, nr);
|
||||
env->pmsav7.dracr = g_new0(uint32_t, nr);
|
||||
if (arm_feature(env, ARM_FEATURE_V8)) {
|
||||
/* PMSAv8 */
|
||||
env->pmsav8.rbar[M_REG_NS] = g_new0(uint32_t, nr);
|
||||
env->pmsav8.rlar[M_REG_NS] = g_new0(uint32_t, nr);
|
||||
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
|
||||
env->pmsav8.rbar[M_REG_S] = g_new0(uint32_t, nr);
|
||||
env->pmsav8.rlar[M_REG_S] = g_new0(uint32_t, nr);
|
||||
}
|
||||
} else {
|
||||
env->pmsav7.drbar = g_new0(uint32_t, nr);
|
||||
env->pmsav7.drsr = g_new0(uint32_t, nr);
|
||||
env->pmsav7.dracr = g_new0(uint32_t, nr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -825,22 +867,21 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
|
|||
init_cpreg_list(cpu);
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
if (cpu->has_el3) {
|
||||
cs->num_ases = 2;
|
||||
} else {
|
||||
cs->num_ases = 1;
|
||||
}
|
||||
|
||||
if (cpu->has_el3) {
|
||||
if (cpu->has_el3 || arm_feature(env, ARM_FEATURE_M_SECURITY)) {
|
||||
AddressSpace *as;
|
||||
|
||||
cs->num_ases = 2;
|
||||
|
||||
if (!cpu->secure_memory) {
|
||||
cpu->secure_memory = cs->memory;
|
||||
}
|
||||
as = address_space_init_shareable(cpu->secure_memory,
|
||||
"cpu-secure-memory");
|
||||
cpu_address_space_init(cs, as, ARMASIdx_S);
|
||||
} else {
|
||||
cs->num_ases = 1;
|
||||
}
|
||||
|
||||
cpu_address_space_init(cs,
|
||||
address_space_init_shareable(cs->memory,
|
||||
"cpu-memory"),
|
||||
|
@ -887,6 +928,7 @@ static void arm926_initfn(Object *obj)
|
|||
set_feature(&cpu->env, ARM_FEATURE_VFP);
|
||||
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
|
||||
set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN);
|
||||
set_feature(&cpu->env, ARM_FEATURE_JAZELLE);
|
||||
cpu->midr = 0x41069265;
|
||||
cpu->reset_fpsid = 0x41011090;
|
||||
cpu->ctr = 0x1dd20d2;
|
||||
|
@ -916,6 +958,7 @@ static void arm1026_initfn(Object *obj)
|
|||
set_feature(&cpu->env, ARM_FEATURE_AUXCR);
|
||||
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
|
||||
set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN);
|
||||
set_feature(&cpu->env, ARM_FEATURE_JAZELLE);
|
||||
cpu->midr = 0x4106a262;
|
||||
cpu->reset_fpsid = 0x410110a0;
|
||||
cpu->ctr = 0x1dd20d2;
|
||||
|
@ -1667,6 +1710,7 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
|
|||
#else
|
||||
cc->do_interrupt = arm_cpu_do_interrupt;
|
||||
cc->do_unaligned_access = arm_cpu_do_unaligned_access;
|
||||
cc->do_transaction_failed = arm_cpu_do_transaction_failed;
|
||||
cc->get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug;
|
||||
cc->asidx_from_attrs = arm_asidx_from_attrs;
|
||||
cc->vmsd = &vmstate_arm_cpu;
|
||||
|
|
101
target/arm/cpu.h
101
target/arm/cpu.h
|
@ -66,11 +66,24 @@
|
|||
#define ARMV7M_EXCP_MEM 4
|
||||
#define ARMV7M_EXCP_BUS 5
|
||||
#define ARMV7M_EXCP_USAGE 6
|
||||
#define ARMV7M_EXCP_SECURE 7
|
||||
#define ARMV7M_EXCP_SVC 11
|
||||
#define ARMV7M_EXCP_DEBUG 12
|
||||
#define ARMV7M_EXCP_PENDSV 14
|
||||
#define ARMV7M_EXCP_SYSTICK 15
|
||||
|
||||
/* For M profile, some registers are banked secure vs non-secure;
|
||||
* these are represented as a 2-element array where the first element
|
||||
* is the non-secure copy and the second is the secure copy.
|
||||
* When the CPU does not have implement the security extension then
|
||||
* only the first element is used.
|
||||
* This means that the copy for the current security state can be
|
||||
* accessed via env->registerfield[env->v7m.secure] (whether the security
|
||||
* extension is implemented or not).
|
||||
*/
|
||||
#define M_REG_NS 0
|
||||
#define M_REG_S 1
|
||||
|
||||
/* ARM-specific interrupt pending bits. */
|
||||
#define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1
|
||||
#define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_EXT_2
|
||||
|
@ -406,20 +419,34 @@ typedef struct CPUARMState {
|
|||
} cp15;
|
||||
|
||||
struct {
|
||||
/* M profile has up to 4 stack pointers:
|
||||
* a Main Stack Pointer and a Process Stack Pointer for each
|
||||
* of the Secure and Non-Secure states. (If the CPU doesn't support
|
||||
* the security extension then it has only two SPs.)
|
||||
* In QEMU we always store the currently active SP in regs[13],
|
||||
* and the non-active SP for the current security state in
|
||||
* v7m.other_sp. The stack pointers for the inactive security state
|
||||
* are stored in other_ss_msp and other_ss_psp.
|
||||
* switch_v7m_security_state() is responsible for rearranging them
|
||||
* when we change security state.
|
||||
*/
|
||||
uint32_t other_sp;
|
||||
uint32_t vecbase;
|
||||
uint32_t basepri;
|
||||
uint32_t control;
|
||||
uint32_t ccr; /* Configuration and Control */
|
||||
uint32_t cfsr; /* Configurable Fault Status */
|
||||
uint32_t other_ss_msp;
|
||||
uint32_t other_ss_psp;
|
||||
uint32_t vecbase[2];
|
||||
uint32_t basepri[2];
|
||||
uint32_t control[2];
|
||||
uint32_t ccr[2]; /* Configuration and Control */
|
||||
uint32_t cfsr[2]; /* Configurable Fault Status */
|
||||
uint32_t hfsr; /* HardFault Status */
|
||||
uint32_t dfsr; /* Debug Fault Status Register */
|
||||
uint32_t mmfar; /* MemManage Fault Address */
|
||||
uint32_t mmfar[2]; /* MemManage Fault Address */
|
||||
uint32_t bfar; /* BusFault Address */
|
||||
unsigned mpu_ctrl; /* MPU_CTRL */
|
||||
unsigned mpu_ctrl[2]; /* MPU_CTRL */
|
||||
int exception;
|
||||
uint32_t primask;
|
||||
uint32_t faultmask;
|
||||
uint32_t primask[2];
|
||||
uint32_t faultmask[2];
|
||||
uint32_t secure; /* Is CPU in Secure state? (not guest visible) */
|
||||
} v7m;
|
||||
|
||||
/* Information associated with an exception about to be taken:
|
||||
|
@ -519,9 +546,22 @@ typedef struct CPUARMState {
|
|||
uint32_t *drbar;
|
||||
uint32_t *drsr;
|
||||
uint32_t *dracr;
|
||||
uint32_t rnr;
|
||||
uint32_t rnr[2];
|
||||
} pmsav7;
|
||||
|
||||
/* PMSAv8 MPU */
|
||||
struct {
|
||||
/* The PMSAv8 implementation also shares some PMSAv7 config
|
||||
* and state:
|
||||
* pmsav7.rnr (region number register)
|
||||
* pmsav7_dregion (number of configured regions)
|
||||
*/
|
||||
uint32_t *rbar[2];
|
||||
uint32_t *rlar[2];
|
||||
uint32_t mair0[2];
|
||||
uint32_t mair1[2];
|
||||
} pmsav8;
|
||||
|
||||
void *nvic;
|
||||
const struct arm_boot_info *boot_info;
|
||||
/* Store GICv3CPUState to access from this struct */
|
||||
|
@ -1182,6 +1222,11 @@ FIELD(V7M_CFSR, NOCP, 16 + 3, 1)
|
|||
FIELD(V7M_CFSR, UNALIGNED, 16 + 8, 1)
|
||||
FIELD(V7M_CFSR, DIVBYZERO, 16 + 9, 1)
|
||||
|
||||
/* V7M CFSR bit masks covering all of the subregister bits */
|
||||
FIELD(V7M_CFSR, MMFSR, 0, 8)
|
||||
FIELD(V7M_CFSR, BFSR, 8, 8)
|
||||
FIELD(V7M_CFSR, UFSR, 16, 16)
|
||||
|
||||
/* V7M HFSR bits */
|
||||
FIELD(V7M_HFSR, VECTTBL, 1, 1)
|
||||
FIELD(V7M_HFSR, FORCED, 30, 1)
|
||||
|
@ -1250,6 +1295,8 @@ enum arm_features {
|
|||
ARM_FEATURE_THUMB_DSP, /* DSP insns supported in the Thumb encodings */
|
||||
ARM_FEATURE_PMU, /* has PMU support */
|
||||
ARM_FEATURE_VBAR, /* has cp15 VBAR */
|
||||
ARM_FEATURE_M_SECURITY, /* M profile Security Extension */
|
||||
ARM_FEATURE_JAZELLE, /* has (trivial) Jazelle implementation */
|
||||
};
|
||||
|
||||
static inline int arm_feature(CPUARMState *env, int feature)
|
||||
|
@ -1414,6 +1461,16 @@ void armv7m_nvic_acknowledge_irq(void *opaque);
|
|||
* (Ignoring -1, this is the same as the RETTOBASE value before completion.)
|
||||
*/
|
||||
int armv7m_nvic_complete_irq(void *opaque, int irq);
|
||||
/**
|
||||
* armv7m_nvic_raw_execution_priority: return the raw execution priority
|
||||
* @opaque: the NVIC
|
||||
*
|
||||
* Returns: the raw execution priority as defined by the v8M architecture.
|
||||
* This is the execution priority minus the effects of AIRCR.PRIS,
|
||||
* and minus any PRIMASK/FAULTMASK/BASEPRI priority boosting.
|
||||
* (v8M ARM ARM I_PKLD.)
|
||||
*/
|
||||
int armv7m_nvic_raw_execution_priority(void *opaque);
|
||||
|
||||
/* Interface for defining coprocessor registers.
|
||||
* Registers are defined in tables of arm_cp_reginfo structs
|
||||
|
@ -1643,7 +1700,8 @@ static inline bool arm_v7m_is_handler_mode(CPUARMState *env)
|
|||
static inline int arm_current_el(CPUARMState *env)
|
||||
{
|
||||
if (arm_feature(env, ARM_FEATURE_M)) {
|
||||
return arm_v7m_is_handler_mode(env) || !(env->v7m.control & 1);
|
||||
return arm_v7m_is_handler_mode(env) ||
|
||||
!(env->v7m.control[env->v7m.secure] & 1);
|
||||
}
|
||||
|
||||
if (is_a64(env)) {
|
||||
|
@ -2087,6 +2145,10 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
|
|||
* Execution priority negative (this is like privileged, but the
|
||||
* MPU HFNMIENA bit means that it may have different access permission
|
||||
* check results to normal privileged code, so can't share a TLB).
|
||||
* If the CPU supports the v8M Security Extension then there are also:
|
||||
* Secure User
|
||||
* Secure Privileged
|
||||
* Secure, execution priority negative
|
||||
*
|
||||
* The ARMMMUIdx and the mmu index value used by the core QEMU TLB code
|
||||
* are not quite the same -- different CPU types (most notably M profile
|
||||
|
@ -2124,6 +2186,9 @@ typedef enum ARMMMUIdx {
|
|||
ARMMMUIdx_MUser = 0 | ARM_MMU_IDX_M,
|
||||
ARMMMUIdx_MPriv = 1 | ARM_MMU_IDX_M,
|
||||
ARMMMUIdx_MNegPri = 2 | ARM_MMU_IDX_M,
|
||||
ARMMMUIdx_MSUser = 3 | ARM_MMU_IDX_M,
|
||||
ARMMMUIdx_MSPriv = 4 | ARM_MMU_IDX_M,
|
||||
ARMMMUIdx_MSNegPri = 5 | ARM_MMU_IDX_M,
|
||||
/* Indexes below here don't have TLBs and are used only for AT system
|
||||
* instructions or for the first stage of an S12 page table walk.
|
||||
*/
|
||||
|
@ -2145,6 +2210,9 @@ typedef enum ARMMMUIdxBit {
|
|||
ARMMMUIdxBit_MUser = 1 << 0,
|
||||
ARMMMUIdxBit_MPriv = 1 << 1,
|
||||
ARMMMUIdxBit_MNegPri = 1 << 2,
|
||||
ARMMMUIdxBit_MSUser = 1 << 3,
|
||||
ARMMMUIdxBit_MSPriv = 1 << 4,
|
||||
ARMMMUIdxBit_MSNegPri = 1 << 5,
|
||||
} ARMMMUIdxBit;
|
||||
|
||||
#define MMU_USER_IDX 0
|
||||
|
@ -2170,7 +2238,8 @@ static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
|
|||
case ARM_MMU_IDX_A:
|
||||
return mmu_idx & 3;
|
||||
case ARM_MMU_IDX_M:
|
||||
return mmu_idx == ARMMMUIdx_MUser ? 0 : 1;
|
||||
return (mmu_idx == ARMMMUIdx_MUser || mmu_idx == ARMMMUIdx_MSUser)
|
||||
? 0 : 1;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
@ -2188,8 +2257,12 @@ static inline int cpu_mmu_index(CPUARMState *env, bool ifetch)
|
|||
* we're in a HardFault or NMI handler.
|
||||
*/
|
||||
if ((env->v7m.exception > 0 && env->v7m.exception <= 3)
|
||||
|| env->v7m.faultmask) {
|
||||
return arm_to_core_mmu_idx(ARMMMUIdx_MNegPri);
|
||||
|| env->v7m.faultmask[env->v7m.secure]) {
|
||||
mmu_idx = ARMMMUIdx_MNegPri;
|
||||
}
|
||||
|
||||
if (env->v7m.secure) {
|
||||
mmu_idx += ARMMMUIdx_MSUser;
|
||||
}
|
||||
|
||||
return arm_to_core_mmu_idx(mmu_idx);
|
||||
|
|
|
@ -2385,7 +2385,7 @@ static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
|||
return 0;
|
||||
}
|
||||
|
||||
u32p += env->pmsav7.rnr;
|
||||
u32p += env->pmsav7.rnr[M_REG_NS];
|
||||
return *u32p;
|
||||
}
|
||||
|
||||
|
@ -2399,7 +2399,7 @@ static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
return;
|
||||
}
|
||||
|
||||
u32p += env->pmsav7.rnr;
|
||||
u32p += env->pmsav7.rnr[M_REG_NS];
|
||||
tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
|
||||
*u32p = value;
|
||||
}
|
||||
|
@ -2442,7 +2442,7 @@ static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
|
|||
.resetfn = arm_cp_reset_ignore },
|
||||
{ .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
|
||||
.access = PL1_RW,
|
||||
.fieldoffset = offsetof(CPUARMState, pmsav7.rnr),
|
||||
.fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
|
||||
.writefn = pmsav7_rgnr_write,
|
||||
.resetfn = arm_cp_reset_ignore },
|
||||
REGINFO_SENTINEL
|
||||
|
@ -5870,6 +5870,12 @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
|
||||
{
|
||||
/* translate.c should never generate calls here in user-only mode */
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
void switch_mode(CPUARMState *env, int mode)
|
||||
{
|
||||
ARMCPU *cpu = arm_env_get_cpu(env);
|
||||
|
@ -6044,29 +6050,103 @@ static uint32_t v7m_pop(CPUARMState *env)
|
|||
return val;
|
||||
}
|
||||
|
||||
/* Return true if we're using the process stack pointer (not the MSP) */
|
||||
static bool v7m_using_psp(CPUARMState *env)
|
||||
{
|
||||
/* Handler mode always uses the main stack; for thread mode
|
||||
* the CONTROL.SPSEL bit determines the answer.
|
||||
* Note that in v7M it is not possible to be in Handler mode with
|
||||
* CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
|
||||
*/
|
||||
return !arm_v7m_is_handler_mode(env) &&
|
||||
env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
|
||||
}
|
||||
|
||||
/* Switch to V7M main or process stack pointer. */
|
||||
static void switch_v7m_sp(CPUARMState *env, bool new_spsel)
|
||||
{
|
||||
uint32_t tmp;
|
||||
bool old_spsel = env->v7m.control & R_V7M_CONTROL_SPSEL_MASK;
|
||||
uint32_t old_control = env->v7m.control[env->v7m.secure];
|
||||
bool old_spsel = old_control & R_V7M_CONTROL_SPSEL_MASK;
|
||||
|
||||
if (old_spsel != new_spsel) {
|
||||
tmp = env->v7m.other_sp;
|
||||
env->v7m.other_sp = env->regs[13];
|
||||
env->regs[13] = tmp;
|
||||
|
||||
env->v7m.control = deposit32(env->v7m.control,
|
||||
env->v7m.control[env->v7m.secure] = deposit32(old_control,
|
||||
R_V7M_CONTROL_SPSEL_SHIFT,
|
||||
R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
|
||||
}
|
||||
}
|
||||
|
||||
/* Switch M profile security state between NS and S */
|
||||
static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
|
||||
{
|
||||
uint32_t new_ss_msp, new_ss_psp;
|
||||
|
||||
if (env->v7m.secure == new_secstate) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* All the banked state is accessed by looking at env->v7m.secure
|
||||
* except for the stack pointer; rearrange the SP appropriately.
|
||||
*/
|
||||
new_ss_msp = env->v7m.other_ss_msp;
|
||||
new_ss_psp = env->v7m.other_ss_psp;
|
||||
|
||||
if (v7m_using_psp(env)) {
|
||||
env->v7m.other_ss_psp = env->regs[13];
|
||||
env->v7m.other_ss_msp = env->v7m.other_sp;
|
||||
} else {
|
||||
env->v7m.other_ss_msp = env->regs[13];
|
||||
env->v7m.other_ss_psp = env->v7m.other_sp;
|
||||
}
|
||||
|
||||
env->v7m.secure = new_secstate;
|
||||
|
||||
if (v7m_using_psp(env)) {
|
||||
env->regs[13] = new_ss_psp;
|
||||
env->v7m.other_sp = new_ss_msp;
|
||||
} else {
|
||||
env->regs[13] = new_ss_msp;
|
||||
env->v7m.other_sp = new_ss_psp;
|
||||
}
|
||||
}
|
||||
|
||||
void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
|
||||
{
|
||||
/* Handle v7M BXNS:
|
||||
* - if the return value is a magic value, do exception return (like BX)
|
||||
* - otherwise bit 0 of the return value is the target security state
|
||||
*/
|
||||
if (dest >= 0xff000000) {
|
||||
/* This is an exception return magic value; put it where
|
||||
* do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
|
||||
* Note that if we ever add gen_ss_advance() singlestep support to
|
||||
* M profile this should count as an "instruction execution complete"
|
||||
* event (compare gen_bx_excret_final_code()).
|
||||
*/
|
||||
env->regs[15] = dest & ~1;
|
||||
env->thumb = dest & 1;
|
||||
HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT);
|
||||
/* notreached */
|
||||
}
|
||||
|
||||
/* translate.c should have made BXNS UNDEF unless we're secure */
|
||||
assert(env->v7m.secure);
|
||||
|
||||
switch_v7m_security_state(env, dest & 1);
|
||||
env->thumb = 1;
|
||||
env->regs[15] = dest & ~1;
|
||||
}
|
||||
|
||||
static uint32_t arm_v7m_load_vector(ARMCPU *cpu)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
CPUARMState *env = &cpu->env;
|
||||
MemTxResult result;
|
||||
hwaddr vec = env->v7m.vecbase + env->v7m.exception * 4;
|
||||
hwaddr vec = env->v7m.vecbase[env->v7m.secure] + env->v7m.exception * 4;
|
||||
uint32_t addr;
|
||||
|
||||
addr = address_space_ldl(cs->as, vec,
|
||||
|
@ -6112,7 +6192,8 @@ static void v7m_push_stack(ARMCPU *cpu)
|
|||
uint32_t xpsr = xpsr_read(env);
|
||||
|
||||
/* Align stack pointer if the guest wants that */
|
||||
if ((env->regs[13] & 4) && (env->v7m.ccr & R_V7M_CCR_STKALIGN_MASK)) {
|
||||
if ((env->regs[13] & 4) &&
|
||||
(env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
|
||||
env->regs[13] -= 4;
|
||||
xpsr |= XPSR_SPREALIGN;
|
||||
}
|
||||
|
@ -6166,8 +6247,20 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
|
|||
}
|
||||
|
||||
if (env->v7m.exception != ARMV7M_EXCP_NMI) {
|
||||
/* Auto-clear FAULTMASK on return from other than NMI */
|
||||
env->v7m.faultmask = 0;
|
||||
/* Auto-clear FAULTMASK on return from other than NMI.
|
||||
* If the security extension is implemented then this only
|
||||
* happens if the raw execution priority is >= 0; the
|
||||
* value of the ES bit in the exception return value indicates
|
||||
* which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
|
||||
*/
|
||||
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
|
||||
int es = type & 1;
|
||||
if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
|
||||
env->v7m.faultmask[es] = 0;
|
||||
}
|
||||
} else {
|
||||
env->v7m.faultmask[M_REG_NS] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception)) {
|
||||
|
@ -6198,7 +6291,7 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
|
|||
/* fall through */
|
||||
case 9: /* Return to Thread using Main stack */
|
||||
if (!rettobase &&
|
||||
!(env->v7m.ccr & R_V7M_CCR_NONBASETHRDENA_MASK)) {
|
||||
!(env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_NONBASETHRDENA_MASK)) {
|
||||
ufault = true;
|
||||
}
|
||||
break;
|
||||
|
@ -6210,7 +6303,7 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
|
|||
/* Bad exception return: instead of popping the exception
|
||||
* stack, directly take a usage fault on the current stack.
|
||||
*/
|
||||
env->v7m.cfsr |= R_V7M_CFSR_INVPC_MASK;
|
||||
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
|
||||
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
|
||||
v7m_exception_taken(cpu, type | 0xf0000000);
|
||||
qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
|
||||
|
@ -6252,7 +6345,7 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
|
|||
if (return_to_handler != arm_v7m_is_handler_mode(env)) {
|
||||
/* Take an INVPC UsageFault by pushing the stack again. */
|
||||
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
|
||||
env->v7m.cfsr |= R_V7M_CFSR_INVPC_MASK;
|
||||
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
|
||||
v7m_push_stack(cpu);
|
||||
v7m_exception_taken(cpu, type | 0xf0000000);
|
||||
qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
|
||||
|
@ -6311,15 +6404,15 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
|
|||
switch (cs->exception_index) {
|
||||
case EXCP_UDEF:
|
||||
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
|
||||
env->v7m.cfsr |= R_V7M_CFSR_UNDEFINSTR_MASK;
|
||||
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
|
||||
break;
|
||||
case EXCP_NOCP:
|
||||
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
|
||||
env->v7m.cfsr |= R_V7M_CFSR_NOCP_MASK;
|
||||
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
|
||||
break;
|
||||
case EXCP_INVSTATE:
|
||||
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
|
||||
env->v7m.cfsr |= R_V7M_CFSR_INVSTATE_MASK;
|
||||
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
|
||||
break;
|
||||
case EXCP_SWI:
|
||||
/* The PC already points to the next instruction. */
|
||||
|
@ -6335,11 +6428,11 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
|
|||
case 0x8: /* External Abort */
|
||||
switch (cs->exception_index) {
|
||||
case EXCP_PREFETCH_ABORT:
|
||||
env->v7m.cfsr |= R_V7M_CFSR_PRECISERR_MASK;
|
||||
env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_PRECISERR_MASK;
|
||||
qemu_log_mask(CPU_LOG_INT, "...with CFSR.PRECISERR\n");
|
||||
break;
|
||||
case EXCP_DATA_ABORT:
|
||||
env->v7m.cfsr |=
|
||||
env->v7m.cfsr[M_REG_NS] |=
|
||||
(R_V7M_CFSR_IBUSERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
|
||||
env->v7m.bfar = env->exception.vaddress;
|
||||
qemu_log_mask(CPU_LOG_INT,
|
||||
|
@ -6355,16 +6448,16 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
|
|||
*/
|
||||
switch (cs->exception_index) {
|
||||
case EXCP_PREFETCH_ABORT:
|
||||
env->v7m.cfsr |= R_V7M_CFSR_IACCVIOL_MASK;
|
||||
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
|
||||
qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
|
||||
break;
|
||||
case EXCP_DATA_ABORT:
|
||||
env->v7m.cfsr |=
|
||||
env->v7m.cfsr[env->v7m.secure] |=
|
||||
(R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
|
||||
env->v7m.mmfar = env->exception.vaddress;
|
||||
env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress;
|
||||
qemu_log_mask(CPU_LOG_INT,
|
||||
"...with CFSR.DACCVIOL and MMFAR 0x%x\n",
|
||||
env->v7m.mmfar);
|
||||
env->v7m.mmfar[env->v7m.secure]);
|
||||
break;
|
||||
}
|
||||
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
|
||||
|
@ -6397,7 +6490,7 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
|
|||
}
|
||||
|
||||
lr = 0xfffffff1;
|
||||
if (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) {
|
||||
if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK) {
|
||||
lr |= 4;
|
||||
}
|
||||
if (!arm_v7m_is_handler_mode(env)) {
|
||||
|
@ -7032,35 +7125,15 @@ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
|
|||
case ARMMMUIdx_MPriv:
|
||||
case ARMMMUIdx_MNegPri:
|
||||
case ARMMMUIdx_MUser:
|
||||
case ARMMMUIdx_MSPriv:
|
||||
case ARMMMUIdx_MSNegPri:
|
||||
case ARMMMUIdx_MSUser:
|
||||
return 1;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
|
||||
/* Return true if this address translation regime is secure */
|
||||
static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
|
||||
{
|
||||
switch (mmu_idx) {
|
||||
case ARMMMUIdx_S12NSE0:
|
||||
case ARMMMUIdx_S12NSE1:
|
||||
case ARMMMUIdx_S1NSE0:
|
||||
case ARMMMUIdx_S1NSE1:
|
||||
case ARMMMUIdx_S1E2:
|
||||
case ARMMMUIdx_S2NS:
|
||||
case ARMMMUIdx_MPriv:
|
||||
case ARMMMUIdx_MNegPri:
|
||||
case ARMMMUIdx_MUser:
|
||||
return false;
|
||||
case ARMMMUIdx_S1E3:
|
||||
case ARMMMUIdx_S1SE0:
|
||||
case ARMMMUIdx_S1SE1:
|
||||
return true;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
|
||||
/* Return the SCTLR value which controls this address translation regime */
|
||||
static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
|
||||
{
|
||||
|
@ -7072,11 +7145,12 @@ static inline bool regime_translation_disabled(CPUARMState *env,
|
|||
ARMMMUIdx mmu_idx)
|
||||
{
|
||||
if (arm_feature(env, ARM_FEATURE_M)) {
|
||||
switch (env->v7m.mpu_ctrl &
|
||||
switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] &
|
||||
(R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
|
||||
case R_V7M_MPU_CTRL_ENABLE_MASK:
|
||||
/* Enabled, but not for HardFault and NMI */
|
||||
return mmu_idx == ARMMMUIdx_MNegPri;
|
||||
return mmu_idx == ARMMMUIdx_MNegPri ||
|
||||
mmu_idx == ARMMMUIdx_MSNegPri;
|
||||
case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
|
||||
/* Enabled for all cases */
|
||||
return false;
|
||||
|
@ -8231,7 +8305,8 @@ static bool pmsav7_use_background_region(ARMCPU *cpu,
|
|||
}
|
||||
|
||||
if (arm_feature(env, ARM_FEATURE_M)) {
|
||||
return env->v7m.mpu_ctrl & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
|
||||
return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)]
|
||||
& R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
|
||||
} else {
|
||||
return regime_sctlr(env, mmu_idx) & SCTLR_BR;
|
||||
}
|
||||
|
@ -8411,6 +8486,112 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
|
|||
return !(*prot & (1 << access_type));
|
||||
}
|
||||
|
||||
static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
hwaddr *phys_ptr, int *prot, uint32_t *fsr)
|
||||
{
|
||||
ARMCPU *cpu = arm_env_get_cpu(env);
|
||||
bool is_user = regime_is_user(env, mmu_idx);
|
||||
uint32_t secure = regime_is_secure(env, mmu_idx);
|
||||
int n;
|
||||
int matchregion = -1;
|
||||
bool hit = false;
|
||||
|
||||
*phys_ptr = address;
|
||||
*prot = 0;
|
||||
|
||||
/* Unlike the ARM ARM pseudocode, we don't need to check whether this
|
||||
* was an exception vector read from the vector table (which is always
|
||||
* done using the default system address map), because those accesses
|
||||
* are done in arm_v7m_load_vector(), which always does a direct
|
||||
* read using address_space_ldl(), rather than going via this function.
|
||||
*/
|
||||
if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */
|
||||
hit = true;
|
||||
} else if (m_is_ppb_region(env, address)) {
|
||||
hit = true;
|
||||
} else if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
|
||||
hit = true;
|
||||
} else {
|
||||
for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
|
||||
/* region search */
|
||||
/* Note that the base address is bits [31:5] from the register
|
||||
* with bits [4:0] all zeroes, but the limit address is bits
|
||||
* [31:5] from the register with bits [4:0] all ones.
|
||||
*/
|
||||
uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
|
||||
uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
|
||||
|
||||
if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
|
||||
/* Region disabled */
|
||||
continue;
|
||||
}
|
||||
|
||||
if (address < base || address > limit) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (hit) {
|
||||
/* Multiple regions match -- always a failure (unlike
|
||||
* PMSAv7 where highest-numbered-region wins)
|
||||
*/
|
||||
*fsr = 0x00d; /* permission fault */
|
||||
return true;
|
||||
}
|
||||
|
||||
matchregion = n;
|
||||
hit = true;
|
||||
|
||||
if (base & ~TARGET_PAGE_MASK) {
|
||||
qemu_log_mask(LOG_UNIMP,
|
||||
"MPU_RBAR[%d]: No support for MPU region base"
|
||||
"address of 0x%" PRIx32 ". Minimum alignment is "
|
||||
"%d\n",
|
||||
n, base, TARGET_PAGE_BITS);
|
||||
continue;
|
||||
}
|
||||
if ((limit + 1) & ~TARGET_PAGE_MASK) {
|
||||
qemu_log_mask(LOG_UNIMP,
|
||||
"MPU_RBAR[%d]: No support for MPU region limit"
|
||||
"address of 0x%" PRIx32 ". Minimum alignment is "
|
||||
"%d\n",
|
||||
n, limit, TARGET_PAGE_BITS);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!hit) {
|
||||
/* background fault */
|
||||
*fsr = 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (matchregion == -1) {
|
||||
/* hit using the background region */
|
||||
get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
|
||||
} else {
|
||||
uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
|
||||
uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
|
||||
|
||||
if (m_is_system_region(env, address)) {
|
||||
/* System space is always execute never */
|
||||
xn = 1;
|
||||
}
|
||||
|
||||
*prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
|
||||
if (*prot && !xn) {
|
||||
*prot |= PAGE_EXEC;
|
||||
}
|
||||
/* We don't need to look the attribute up in the MAIR0/MAIR1
|
||||
* registers because that only tells us about cacheability.
|
||||
*/
|
||||
}
|
||||
|
||||
*fsr = 0x00d; /* Permission fault */
|
||||
return !(*prot & (1 << access_type));
|
||||
}
|
||||
|
||||
static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
hwaddr *phys_ptr, int *prot, uint32_t *fsr)
|
||||
|
@ -8580,7 +8761,11 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
|
|||
bool ret;
|
||||
*page_size = TARGET_PAGE_SIZE;
|
||||
|
||||
if (arm_feature(env, ARM_FEATURE_V7)) {
|
||||
if (arm_feature(env, ARM_FEATURE_V8)) {
|
||||
/* PMSAv8 */
|
||||
ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
|
||||
phys_ptr, prot, fsr);
|
||||
} else if (arm_feature(env, ARM_FEATURE_V7)) {
|
||||
/* PMSAv7 */
|
||||
ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
|
||||
phys_ptr, prot, fsr);
|
||||
|
@ -8699,7 +8884,7 @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
|
|||
return xpsr_read(env) & mask;
|
||||
break;
|
||||
case 20: /* CONTROL */
|
||||
return env->v7m.control;
|
||||
return env->v7m.control[env->v7m.secure];
|
||||
}
|
||||
|
||||
if (el == 0) {
|
||||
|
@ -8708,18 +8893,18 @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
|
|||
|
||||
switch (reg) {
|
||||
case 8: /* MSP */
|
||||
return (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) ?
|
||||
return (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK) ?
|
||||
env->v7m.other_sp : env->regs[13];
|
||||
case 9: /* PSP */
|
||||
return (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) ?
|
||||
return (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK) ?
|
||||
env->regs[13] : env->v7m.other_sp;
|
||||
case 16: /* PRIMASK */
|
||||
return env->v7m.primask;
|
||||
return env->v7m.primask[env->v7m.secure];
|
||||
case 17: /* BASEPRI */
|
||||
case 18: /* BASEPRI_MAX */
|
||||
return env->v7m.basepri;
|
||||
return env->v7m.basepri[env->v7m.secure];
|
||||
case 19: /* FAULTMASK */
|
||||
return env->v7m.faultmask;
|
||||
return env->v7m.faultmask[env->v7m.secure];
|
||||
default:
|
||||
qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
|
||||
" register %d\n", reg);
|
||||
|
@ -8760,32 +8945,34 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
|
|||
}
|
||||
break;
|
||||
case 8: /* MSP */
|
||||
if (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) {
|
||||
if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK) {
|
||||
env->v7m.other_sp = val;
|
||||
} else {
|
||||
env->regs[13] = val;
|
||||
}
|
||||
break;
|
||||
case 9: /* PSP */
|
||||
if (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) {
|
||||
if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK) {
|
||||
env->regs[13] = val;
|
||||
} else {
|
||||
env->v7m.other_sp = val;
|
||||
}
|
||||
break;
|
||||
case 16: /* PRIMASK */
|
||||
env->v7m.primask = val & 1;
|
||||
env->v7m.primask[env->v7m.secure] = val & 1;
|
||||
break;
|
||||
case 17: /* BASEPRI */
|
||||
env->v7m.basepri = val & 0xff;
|
||||
env->v7m.basepri[env->v7m.secure] = val & 0xff;
|
||||
break;
|
||||
case 18: /* BASEPRI_MAX */
|
||||
val &= 0xff;
|
||||
if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
|
||||
env->v7m.basepri = val;
|
||||
if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
|
||||
|| env->v7m.basepri[env->v7m.secure] == 0)) {
|
||||
env->v7m.basepri[env->v7m.secure] = val;
|
||||
}
|
||||
break;
|
||||
case 19: /* FAULTMASK */
|
||||
env->v7m.faultmask = val & 1;
|
||||
env->v7m.faultmask[env->v7m.secure] = val & 1;
|
||||
break;
|
||||
case 20: /* CONTROL */
|
||||
/* Writing to the SPSEL bit only has an effect if we are in
|
||||
|
@ -8796,8 +8983,8 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
|
|||
if (!arm_v7m_is_handler_mode(env)) {
|
||||
switch_v7m_sp(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
|
||||
}
|
||||
env->v7m.control &= ~R_V7M_CONTROL_NPRIV_MASK;
|
||||
env->v7m.control |= val & R_V7M_CONTROL_NPRIV_MASK;
|
||||
env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
|
||||
env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
|
||||
break;
|
||||
default:
|
||||
qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
|
||||
|
|
|
@ -63,6 +63,8 @@ DEF_HELPER_1(cpsr_read, i32, env)
|
|||
DEF_HELPER_3(v7m_msr, void, env, i32, i32)
|
||||
DEF_HELPER_2(v7m_mrs, i32, env, i32)
|
||||
|
||||
DEF_HELPER_2(v7m_bxns, void, env, i32)
|
||||
|
||||
DEF_HELPER_4(access_check_cp_reg, void, env, ptr, i32, i32)
|
||||
DEF_HELPER_3(set_cp_reg, void, env, ptr, i32)
|
||||
DEF_HELPER_2(get_cp_reg, i32, env, ptr)
|
||||
|
|
|
@ -472,6 +472,16 @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
|
|||
MMUAccessType access_type,
|
||||
int mmu_idx, uintptr_t retaddr);
|
||||
|
||||
/* arm_cpu_do_transaction_failed: handle a memory system error response
|
||||
* (eg "no device/memory present at address") by raising an external abort
|
||||
* exception
|
||||
*/
|
||||
void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
|
||||
vaddr addr, unsigned size,
|
||||
MMUAccessType access_type,
|
||||
int mmu_idx, MemTxAttrs attrs,
|
||||
MemTxResult response, uintptr_t retaddr);
|
||||
|
||||
/* Call the EL change hook if one has been registered */
|
||||
static inline void arm_call_el_change_hook(ARMCPU *cpu)
|
||||
{
|
||||
|
@ -480,4 +490,30 @@ static inline void arm_call_el_change_hook(ARMCPU *cpu)
|
|||
}
|
||||
}
|
||||
|
||||
/* Return true if this address translation regime is secure */
|
||||
static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
|
||||
{
|
||||
switch (mmu_idx) {
|
||||
case ARMMMUIdx_S12NSE0:
|
||||
case ARMMMUIdx_S12NSE1:
|
||||
case ARMMMUIdx_S1NSE0:
|
||||
case ARMMMUIdx_S1NSE1:
|
||||
case ARMMMUIdx_S1E2:
|
||||
case ARMMMUIdx_S2NS:
|
||||
case ARMMMUIdx_MPriv:
|
||||
case ARMMMUIdx_MNegPri:
|
||||
case ARMMMUIdx_MUser:
|
||||
return false;
|
||||
case ARMMMUIdx_S1E3:
|
||||
case ARMMMUIdx_S1SE0:
|
||||
case ARMMMUIdx_S1SE1:
|
||||
case ARMMMUIdx_MSPriv:
|
||||
case ARMMMUIdx_MSNegPri:
|
||||
case ARMMMUIdx_MSUser:
|
||||
return true;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -102,8 +102,8 @@ static const VMStateDescription vmstate_m_faultmask_primask = {
|
|||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT32(env.v7m.faultmask, ARMCPU),
|
||||
VMSTATE_UINT32(env.v7m.primask, ARMCPU),
|
||||
VMSTATE_UINT32(env.v7m.faultmask[M_REG_NS], ARMCPU),
|
||||
VMSTATE_UINT32(env.v7m.primask[M_REG_NS], ARMCPU),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
@ -114,16 +114,16 @@ static const VMStateDescription vmstate_m = {
|
|||
.minimum_version_id = 4,
|
||||
.needed = m_needed,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT32(env.v7m.vecbase, ARMCPU),
|
||||
VMSTATE_UINT32(env.v7m.basepri, ARMCPU),
|
||||
VMSTATE_UINT32(env.v7m.control, ARMCPU),
|
||||
VMSTATE_UINT32(env.v7m.ccr, ARMCPU),
|
||||
VMSTATE_UINT32(env.v7m.cfsr, ARMCPU),
|
||||
VMSTATE_UINT32(env.v7m.vecbase[M_REG_NS], ARMCPU),
|
||||
VMSTATE_UINT32(env.v7m.basepri[M_REG_NS], ARMCPU),
|
||||
VMSTATE_UINT32(env.v7m.control[M_REG_NS], ARMCPU),
|
||||
VMSTATE_UINT32(env.v7m.ccr[M_REG_NS], ARMCPU),
|
||||
VMSTATE_UINT32(env.v7m.cfsr[M_REG_NS], ARMCPU),
|
||||
VMSTATE_UINT32(env.v7m.hfsr, ARMCPU),
|
||||
VMSTATE_UINT32(env.v7m.dfsr, ARMCPU),
|
||||
VMSTATE_UINT32(env.v7m.mmfar, ARMCPU),
|
||||
VMSTATE_UINT32(env.v7m.mmfar[M_REG_NS], ARMCPU),
|
||||
VMSTATE_UINT32(env.v7m.bfar, ARMCPU),
|
||||
VMSTATE_UINT32(env.v7m.mpu_ctrl, ARMCPU),
|
||||
VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_NS], ARMCPU),
|
||||
VMSTATE_INT32(env.v7m.exception, ARMCPU),
|
||||
VMSTATE_END_OF_LIST()
|
||||
},
|
||||
|
@ -159,14 +159,15 @@ static bool pmsav7_needed(void *opaque)
|
|||
CPUARMState *env = &cpu->env;
|
||||
|
||||
return arm_feature(env, ARM_FEATURE_PMSA) &&
|
||||
arm_feature(env, ARM_FEATURE_V7);
|
||||
arm_feature(env, ARM_FEATURE_V7) &&
|
||||
!arm_feature(env, ARM_FEATURE_V8);
|
||||
}
|
||||
|
||||
static bool pmsav7_rgnr_vmstate_validate(void *opaque, int version_id)
|
||||
{
|
||||
ARMCPU *cpu = opaque;
|
||||
|
||||
return cpu->env.pmsav7.rnr < cpu->pmsav7_dregion;
|
||||
return cpu->env.pmsav7.rnr[M_REG_NS] < cpu->pmsav7_dregion;
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_pmsav7 = {
|
||||
|
@ -204,7 +205,77 @@ static const VMStateDescription vmstate_pmsav7_rnr = {
|
|||
.minimum_version_id = 1,
|
||||
.needed = pmsav7_rnr_needed,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT32(env.pmsav7.rnr, ARMCPU),
|
||||
VMSTATE_UINT32(env.pmsav7.rnr[M_REG_NS], ARMCPU),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
||||
static bool pmsav8_needed(void *opaque)
|
||||
{
|
||||
ARMCPU *cpu = opaque;
|
||||
CPUARMState *env = &cpu->env;
|
||||
|
||||
return arm_feature(env, ARM_FEATURE_PMSA) &&
|
||||
arm_feature(env, ARM_FEATURE_V8);
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_pmsav8 = {
|
||||
.name = "cpu/pmsav8",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.needed = pmsav8_needed,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_NS], ARMCPU, pmsav7_dregion,
|
||||
0, vmstate_info_uint32, uint32_t),
|
||||
VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_NS], ARMCPU, pmsav7_dregion,
|
||||
0, vmstate_info_uint32, uint32_t),
|
||||
VMSTATE_UINT32(env.pmsav8.mair0[M_REG_NS], ARMCPU),
|
||||
VMSTATE_UINT32(env.pmsav8.mair1[M_REG_NS], ARMCPU),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
||||
static bool s_rnr_vmstate_validate(void *opaque, int version_id)
|
||||
{
|
||||
ARMCPU *cpu = opaque;
|
||||
|
||||
return cpu->env.pmsav7.rnr[M_REG_S] < cpu->pmsav7_dregion;
|
||||
}
|
||||
|
||||
static bool m_security_needed(void *opaque)
|
||||
{
|
||||
ARMCPU *cpu = opaque;
|
||||
CPUARMState *env = &cpu->env;
|
||||
|
||||
return arm_feature(env, ARM_FEATURE_M_SECURITY);
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_m_security = {
|
||||
.name = "cpu/m-security",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.needed = m_security_needed,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT32(env.v7m.secure, ARMCPU),
|
||||
VMSTATE_UINT32(env.v7m.other_ss_msp, ARMCPU),
|
||||
VMSTATE_UINT32(env.v7m.other_ss_psp, ARMCPU),
|
||||
VMSTATE_UINT32(env.v7m.basepri[M_REG_S], ARMCPU),
|
||||
VMSTATE_UINT32(env.v7m.primask[M_REG_S], ARMCPU),
|
||||
VMSTATE_UINT32(env.v7m.faultmask[M_REG_S], ARMCPU),
|
||||
VMSTATE_UINT32(env.v7m.control[M_REG_S], ARMCPU),
|
||||
VMSTATE_UINT32(env.v7m.vecbase[M_REG_S], ARMCPU),
|
||||
VMSTATE_UINT32(env.pmsav8.mair0[M_REG_S], ARMCPU),
|
||||
VMSTATE_UINT32(env.pmsav8.mair1[M_REG_S], ARMCPU),
|
||||
VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_S], ARMCPU, pmsav7_dregion,
|
||||
0, vmstate_info_uint32, uint32_t),
|
||||
VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_S], ARMCPU, pmsav7_dregion,
|
||||
0, vmstate_info_uint32, uint32_t),
|
||||
VMSTATE_UINT32(env.pmsav7.rnr[M_REG_S], ARMCPU),
|
||||
VMSTATE_VALIDATE("secure MPU_RNR is valid", s_rnr_vmstate_validate),
|
||||
VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_S], ARMCPU),
|
||||
VMSTATE_UINT32(env.v7m.ccr[M_REG_S], ARMCPU),
|
||||
VMSTATE_UINT32(env.v7m.mmfar[M_REG_S], ARMCPU),
|
||||
VMSTATE_UINT32(env.v7m.cfsr[M_REG_S], ARMCPU),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
@ -225,9 +296,13 @@ static int get_cpsr(QEMUFile *f, void *opaque, size_t size,
|
|||
* differences are that the T bit is not in the same place, the
|
||||
* primask/faultmask info may be in the CPSR I and F bits, and
|
||||
* we do not want the mode bits.
|
||||
* We know that this cleanup happened before v8M, so there
|
||||
* is no complication with banked primask/faultmask.
|
||||
*/
|
||||
uint32_t newval = val;
|
||||
|
||||
assert(!arm_feature(env, ARM_FEATURE_M_SECURITY));
|
||||
|
||||
newval &= (CPSR_NZCV | CPSR_Q | CPSR_IT | CPSR_GE);
|
||||
if (val & CPSR_T) {
|
||||
newval |= XPSR_T;
|
||||
|
@ -238,10 +313,10 @@ static int get_cpsr(QEMUFile *f, void *opaque, size_t size,
|
|||
* transferred using the vmstate_m_faultmask_primask subsection.
|
||||
*/
|
||||
if (val & CPSR_F) {
|
||||
env->v7m.faultmask = 1;
|
||||
env->v7m.faultmask[M_REG_NS] = 1;
|
||||
}
|
||||
if (val & CPSR_I) {
|
||||
env->v7m.primask = 1;
|
||||
env->v7m.primask[M_REG_NS] = 1;
|
||||
}
|
||||
val = newval;
|
||||
}
|
||||
|
@ -458,6 +533,8 @@ const VMStateDescription vmstate_arm_cpu = {
|
|||
*/
|
||||
&vmstate_pmsav7_rnr,
|
||||
&vmstate_pmsav7,
|
||||
&vmstate_pmsav8,
|
||||
&vmstate_m_security,
|
||||
NULL
|
||||
}
|
||||
};
|
||||
|
|
|
@ -229,6 +229,49 @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
|
|||
deliver_fault(cpu, vaddr, access_type, fsr, fsc, &fi);
|
||||
}
|
||||
|
||||
/* arm_cpu_do_transaction_failed: handle a memory system error response
|
||||
* (eg "no device/memory present at address") by raising an external abort
|
||||
* exception
|
||||
*/
|
||||
void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
|
||||
vaddr addr, unsigned size,
|
||||
MMUAccessType access_type,
|
||||
int mmu_idx, MemTxAttrs attrs,
|
||||
MemTxResult response, uintptr_t retaddr)
|
||||
{
|
||||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
CPUARMState *env = &cpu->env;
|
||||
uint32_t fsr, fsc;
|
||||
ARMMMUFaultInfo fi = {};
|
||||
ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
|
||||
|
||||
if (retaddr) {
|
||||
/* now we have a real cpu fault */
|
||||
cpu_restore_state(cs, retaddr);
|
||||
}
|
||||
|
||||
/* The EA bit in syndromes and fault status registers is an
|
||||
* IMPDEF classification of external aborts. ARM implementations
|
||||
* usually use this to indicate AXI bus Decode error (0) or
|
||||
* Slave error (1); in QEMU we follow that.
|
||||
*/
|
||||
fi.ea = (response != MEMTX_DECODE_ERROR);
|
||||
|
||||
/* The fault status register format depends on whether we're using
|
||||
* the LPAE long descriptor format, or the short descriptor format.
|
||||
*/
|
||||
if (arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
|
||||
/* long descriptor form, STATUS 0b010000: synchronous ext abort */
|
||||
fsr = (fi.ea << 12) | (1 << 9) | 0x10;
|
||||
} else {
|
||||
/* short descriptor form, FSR 0b01000 : synchronous ext abort */
|
||||
fsr = (fi.ea << 12) | 0x8;
|
||||
}
|
||||
fsc = 0x10;
|
||||
|
||||
deliver_fault(cpu, addr, access_type, fsr, fsc, &fi);
|
||||
}
|
||||
|
||||
#endif /* !defined(CONFIG_USER_ONLY) */
|
||||
|
||||
uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
|
||||
|
|
|
@ -41,7 +41,7 @@
|
|||
#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
|
||||
/* currently all emulated v5 cores are also v5TE, so don't bother */
|
||||
#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
|
||||
#define ENABLE_ARCH_5J 0
|
||||
#define ENABLE_ARCH_5J arm_dc_feature(s, ARM_FEATURE_JAZELLE)
|
||||
#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
|
||||
#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
|
||||
#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
|
||||
|
@ -994,6 +994,25 @@ static inline void gen_bx_excret_final_code(DisasContext *s)
|
|||
gen_exception_internal(EXCP_EXCEPTION_EXIT);
|
||||
}
|
||||
|
||||
static inline void gen_bxns(DisasContext *s, int rm)
|
||||
{
|
||||
TCGv_i32 var = load_reg(s, rm);
|
||||
|
||||
/* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
|
||||
* we need to sync state before calling it, but:
|
||||
* - we don't need to do gen_set_pc_im() because the bxns helper will
|
||||
* always set the PC itself
|
||||
* - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
|
||||
* unless it's outside an IT block or the last insn in an IT block,
|
||||
* so we know that condexec == 0 (already set at the top of the TB)
|
||||
* is correct in the non-UNPREDICTABLE cases, and we can choose
|
||||
* "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
|
||||
*/
|
||||
gen_helper_v7m_bxns(cpu_env, var);
|
||||
tcg_temp_free_i32(var);
|
||||
s->base.is_jmp = DISAS_EXIT;
|
||||
}
|
||||
|
||||
/* Variant of store_reg which uses branch&exchange logic when storing
|
||||
to r15 in ARM architecture v7 and above. The source must be a temporary
|
||||
and will be marked as dead. */
|
||||
|
@ -11185,12 +11204,31 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
|
|||
*/
|
||||
bool link = insn & (1 << 7);
|
||||
|
||||
if (insn & 7) {
|
||||
if (insn & 3) {
|
||||
goto undef;
|
||||
}
|
||||
if (link) {
|
||||
ARCH(5);
|
||||
}
|
||||
if ((insn & 4)) {
|
||||
/* BXNS/BLXNS: only exists for v8M with the
|
||||
* security extensions, and always UNDEF if NonSecure.
|
||||
* We don't implement these in the user-only mode
|
||||
* either (in theory you can use them from Secure User
|
||||
* mode but they are too tied in to system emulation.)
|
||||
*/
|
||||
if (!s->v8m_secure || IS_USER_ONLY) {
|
||||
goto undef;
|
||||
}
|
||||
if (link) {
|
||||
/* BLXNS: not yet implemented */
|
||||
goto undef;
|
||||
} else {
|
||||
gen_bxns(s, rm);
|
||||
}
|
||||
break;
|
||||
}
|
||||
/* BLX/BX */
|
||||
tmp = load_reg(s, rm);
|
||||
if (link) {
|
||||
val = (uint32_t)s->pc | 1;
|
||||
|
@ -11857,6 +11895,8 @@ static int arm_tr_init_disas_context(DisasContextBase *dcbase,
|
|||
dc->vec_stride = ARM_TBFLAG_VECSTRIDE(dc->base.tb->flags);
|
||||
dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(dc->base.tb->flags);
|
||||
dc->v7m_handler_mode = ARM_TBFLAG_HANDLER(dc->base.tb->flags);
|
||||
dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
|
||||
regime_is_secure(env, dc->mmu_idx);
|
||||
dc->cp_regs = cpu->cp_regs;
|
||||
dc->features = env->features;
|
||||
|
||||
|
@ -12288,24 +12328,30 @@ void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
|
|||
if (arm_feature(env, ARM_FEATURE_M)) {
|
||||
uint32_t xpsr = xpsr_read(env);
|
||||
const char *mode;
|
||||
const char *ns_status = "";
|
||||
|
||||
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
|
||||
ns_status = env->v7m.secure ? "S " : "NS ";
|
||||
}
|
||||
|
||||
if (xpsr & XPSR_EXCP) {
|
||||
mode = "handler";
|
||||
} else {
|
||||
if (env->v7m.control & R_V7M_CONTROL_NPRIV_MASK) {
|
||||
if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
|
||||
mode = "unpriv-thread";
|
||||
} else {
|
||||
mode = "priv-thread";
|
||||
}
|
||||
}
|
||||
|
||||
cpu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s\n",
|
||||
cpu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
|
||||
xpsr,
|
||||
xpsr & XPSR_N ? 'N' : '-',
|
||||
xpsr & XPSR_Z ? 'Z' : '-',
|
||||
xpsr & XPSR_C ? 'C' : '-',
|
||||
xpsr & XPSR_V ? 'V' : '-',
|
||||
xpsr & XPSR_T ? 'T' : 'A',
|
||||
ns_status,
|
||||
mode);
|
||||
} else {
|
||||
uint32_t psr = cpsr_read(env);
|
||||
|
|
|
@ -35,6 +35,7 @@ typedef struct DisasContext {
|
|||
int vec_len;
|
||||
int vec_stride;
|
||||
bool v7m_handler_mode;
|
||||
bool v8m_secure; /* true if v8M and we're in Secure mode */
|
||||
/* Immediate value in AArch32 SVC insn; must be set if is_jmp == DISAS_SWI
|
||||
* so that top level loop can generate correct syndrome information.
|
||||
*/
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue