mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-02 15:23:53 -06:00
target/arm: Add support for Non-maskable Interrupt
This only implements the external delivery method via the GICv3. Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Message-id: 20240407081733.3231820-7-ruanjinjie@huawei.com Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
parent
5c21697461
commit
b36a32ead1
5 changed files with 193 additions and 16 deletions
|
@ -6046,15 +6046,19 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
|
|||
* and the state of the input lines from the GIC. (This requires
|
||||
* that we have the BQL, which is done by marking the
|
||||
* reginfo structs as ARM_CP_IO.)
|
||||
* Note that if a write to HCR pends a VIRQ or VFIQ it is never
|
||||
* possible for it to be taken immediately, because VIRQ and
|
||||
* VFIQ are masked unless running at EL0 or EL1, and HCR
|
||||
* can only be written at EL2.
|
||||
* Note that if a write to HCR pends a VIRQ or VFIQ or VINMI or
|
||||
* VFNMI, it is never possible for it to be taken immediately
|
||||
* because VIRQ, VFIQ, VINMI and VFNMI are masked unless running
|
||||
* at EL0 or EL1, and HCR can only be written at EL2.
|
||||
*/
|
||||
g_assert(bql_locked());
|
||||
arm_cpu_update_virq(cpu);
|
||||
arm_cpu_update_vfiq(cpu);
|
||||
arm_cpu_update_vserr(cpu);
|
||||
if (cpu_isar_feature(aa64_nmi, cpu)) {
|
||||
arm_cpu_update_vinmi(cpu);
|
||||
arm_cpu_update_vfnmi(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
|
||||
|
@ -6202,6 +6206,23 @@ static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
|
||||
/* Clear RES0 bits. */
|
||||
env->cp15.hcrx_el2 = value & valid_mask;
|
||||
|
||||
/*
|
||||
* Updates to VINMI and VFNMI require us to update the status of
|
||||
* virtual NMI, which are the logical OR of these bits
|
||||
* and the state of the input lines from the GIC. (This requires
|
||||
* that we have the BQL, which is done by marking the
|
||||
* reginfo structs as ARM_CP_IO.)
|
||||
* Note that if a write to HCRX pends a VINMI or VFNMI it is never
|
||||
* possible for it to be taken immediately, because VINMI and
|
||||
* VFNMI are masked unless running at EL0 or EL1, and HCRX
|
||||
* can only be written at EL2.
|
||||
*/
|
||||
if (cpu_isar_feature(aa64_nmi, cpu)) {
|
||||
g_assert(bql_locked());
|
||||
arm_cpu_update_vinmi(cpu);
|
||||
arm_cpu_update_vfnmi(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
static CPAccessResult access_hxen(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
|
@ -6217,6 +6238,7 @@ static CPAccessResult access_hxen(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
|
||||
static const ARMCPRegInfo hcrx_el2_reginfo = {
|
||||
.name = "HCRX_EL2", .state = ARM_CP_STATE_AA64,
|
||||
.type = ARM_CP_IO,
|
||||
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 2,
|
||||
.access = PL2_RW, .writefn = hcrx_write, .accessfn = access_hxen,
|
||||
.nv2_redirect_offset = 0xa0,
|
||||
|
@ -10799,6 +10821,9 @@ void arm_log_exception(CPUState *cs)
|
|||
[EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault",
|
||||
[EXCP_VSERR] = "Virtual SERR",
|
||||
[EXCP_GPC] = "Granule Protection Check",
|
||||
[EXCP_NMI] = "NMI",
|
||||
[EXCP_VINMI] = "Virtual IRQ NMI",
|
||||
[EXCP_VFNMI] = "Virtual FIQ NMI",
|
||||
};
|
||||
|
||||
if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue