mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-07 17:53:56 -06:00
This is the MTTCG pull-request as posted yesterday.
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJYsBZfAAoJEPvQ2wlanipElJ0H+QGoStSPeHrvKu7Q07v4F9zM Pvf05gRsaxvXl7UbwmXC4oKhvZf9rVJ6ITk0x/y0WvmK0mHCmNBWkC0nn5UFL5IH cdxetLz21Q+Ghpc36tZvqn2HYwRQFoEznge2LdtBDG0TyVA4jwquHU3HCG2D51zi BaImI6lYW1e4ejjZHw8cEInSxsj/HJZE4pPas2Tkci+uAnrJroErwBVRRcE/y/Tn aupl9TJFs2JdyJFNDibIm0kjB+i+jvCiLgYjbKZ/dR/+GZt73TtiBk/q9ZOFjdmT 7YFPI3F46QbGHoZahtzh0Xt7WMj94SlQgQ9OJ3zmNMfpXrze6Yc78xo/nbQ33U0= =wR0/ -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/stsquad/tags/pull-mttcg-240217-1' into staging This is the MTTCG pull-request as posted yesterday. # gpg: Signature made Fri 24 Feb 2017 11:17:51 GMT # gpg: using RSA key 0xFBD0DB095A9E2A44 # gpg: Good signature from "Alex Bennée (Master Work Key) <alex.bennee@linaro.org>" # Primary key fingerprint: 6685 AE99 E751 67BC AFC8 DF35 FBD0 DB09 5A9E 2A44 * remotes/stsquad/tags/pull-mttcg-240217-1: (24 commits) tcg: enable MTTCG by default for ARM on x86 hosts hw/misc/imx6_src: defer clearing of SRC_SCR reset bits target-arm: ensure all cross vCPUs TLB flushes complete target-arm: don't generate WFE/YIELD calls for MTTCG target-arm/powerctl: defer cpu reset work to CPU context cputlb: introduce tlb_flush_*_all_cpus[_synced] cputlb: atomically update tlb fields used by tlb_reset_dirty cputlb: add tlb_flush_by_mmuidx async routines cputlb and arm/sparc targets: convert mmuidx flushes from varg to bitmap cputlb: introduce tlb_flush_* async work. cputlb: tweak qemu_ram_addr_from_host_nofail reporting cputlb: add assert_cpu_is_self checks tcg: handle EXCP_ATOMIC exception for system emulation tcg: enable thread-per-vCPU tcg: enable tb_lock() for SoftMMU tcg: remove global exit_request tcg: drop global lock during TCG code execution tcg: rename tcg_current_cpu to tcg_current_rr_cpu tcg: add kick timer for single-threaded vCPU emulation tcg: add options for enabling MTTCG ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
28f997a82c
40 changed files with 1881 additions and 479 deletions
|
@ -14,6 +14,7 @@
|
|||
#include "internals.h"
|
||||
#include "arm-powerctl.h"
|
||||
#include "qemu/log.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "exec/exec-all.h"
|
||||
|
||||
#ifndef DEBUG_ARM_POWERCTL
|
||||
|
@ -48,11 +49,93 @@ CPUState *arm_get_cpu_by_id(uint64_t id)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
struct CpuOnInfo {
|
||||
uint64_t entry;
|
||||
uint64_t context_id;
|
||||
uint32_t target_el;
|
||||
bool target_aa64;
|
||||
};
|
||||
|
||||
|
||||
static void arm_set_cpu_on_async_work(CPUState *target_cpu_state,
|
||||
run_on_cpu_data data)
|
||||
{
|
||||
ARMCPU *target_cpu = ARM_CPU(target_cpu_state);
|
||||
struct CpuOnInfo *info = (struct CpuOnInfo *) data.host_ptr;
|
||||
|
||||
/* Initialize the cpu we are turning on */
|
||||
cpu_reset(target_cpu_state);
|
||||
target_cpu_state->halted = 0;
|
||||
|
||||
if (info->target_aa64) {
|
||||
if ((info->target_el < 3) && arm_feature(&target_cpu->env,
|
||||
ARM_FEATURE_EL3)) {
|
||||
/*
|
||||
* As target mode is AArch64, we need to set lower
|
||||
* exception level (the requested level 2) to AArch64
|
||||
*/
|
||||
target_cpu->env.cp15.scr_el3 |= SCR_RW;
|
||||
}
|
||||
|
||||
if ((info->target_el < 2) && arm_feature(&target_cpu->env,
|
||||
ARM_FEATURE_EL2)) {
|
||||
/*
|
||||
* As target mode is AArch64, we need to set lower
|
||||
* exception level (the requested level 1) to AArch64
|
||||
*/
|
||||
target_cpu->env.cp15.hcr_el2 |= HCR_RW;
|
||||
}
|
||||
|
||||
target_cpu->env.pstate = aarch64_pstate_mode(info->target_el, true);
|
||||
} else {
|
||||
/* We are requested to boot in AArch32 mode */
|
||||
static const uint32_t mode_for_el[] = { 0,
|
||||
ARM_CPU_MODE_SVC,
|
||||
ARM_CPU_MODE_HYP,
|
||||
ARM_CPU_MODE_SVC };
|
||||
|
||||
cpsr_write(&target_cpu->env, mode_for_el[info->target_el], CPSR_M,
|
||||
CPSRWriteRaw);
|
||||
}
|
||||
|
||||
if (info->target_el == 3) {
|
||||
/* Processor is in secure mode */
|
||||
target_cpu->env.cp15.scr_el3 &= ~SCR_NS;
|
||||
} else {
|
||||
/* Processor is not in secure mode */
|
||||
target_cpu->env.cp15.scr_el3 |= SCR_NS;
|
||||
}
|
||||
|
||||
/* We check if the started CPU is now at the correct level */
|
||||
assert(info->target_el == arm_current_el(&target_cpu->env));
|
||||
|
||||
if (info->target_aa64) {
|
||||
target_cpu->env.xregs[0] = info->context_id;
|
||||
target_cpu->env.thumb = false;
|
||||
} else {
|
||||
target_cpu->env.regs[0] = info->context_id;
|
||||
target_cpu->env.thumb = info->entry & 1;
|
||||
info->entry &= 0xfffffffe;
|
||||
}
|
||||
|
||||
/* Start the new CPU at the requested address */
|
||||
cpu_set_pc(target_cpu_state, info->entry);
|
||||
|
||||
g_free(info);
|
||||
|
||||
/* Finally set the power status */
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
target_cpu->power_state = PSCI_ON;
|
||||
}
|
||||
|
||||
int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id,
|
||||
uint32_t target_el, bool target_aa64)
|
||||
{
|
||||
CPUState *target_cpu_state;
|
||||
ARMCPU *target_cpu;
|
||||
struct CpuOnInfo *info;
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
|
||||
DPRINTF("cpu %" PRId64 " (EL %d, %s) @ 0x%" PRIx64 " with R0 = 0x%" PRIx64
|
||||
"\n", cpuid, target_el, target_aa64 ? "aarch64" : "aarch32", entry,
|
||||
|
@ -77,7 +160,7 @@ int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id,
|
|||
}
|
||||
|
||||
target_cpu = ARM_CPU(target_cpu_state);
|
||||
if (!target_cpu->powered_off) {
|
||||
if (target_cpu->power_state == PSCI_ON) {
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
"[ARM]%s: CPU %" PRId64 " is already on\n",
|
||||
__func__, cpuid);
|
||||
|
@ -109,74 +192,54 @@ int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id,
|
|||
return QEMU_ARM_POWERCTL_INVALID_PARAM;
|
||||
}
|
||||
|
||||
/* Initialize the cpu we are turning on */
|
||||
cpu_reset(target_cpu_state);
|
||||
target_cpu->powered_off = false;
|
||||
target_cpu_state->halted = 0;
|
||||
|
||||
if (target_aa64) {
|
||||
if ((target_el < 3) && arm_feature(&target_cpu->env, ARM_FEATURE_EL3)) {
|
||||
/*
|
||||
* As target mode is AArch64, we need to set lower
|
||||
* exception level (the requested level 2) to AArch64
|
||||
*/
|
||||
target_cpu->env.cp15.scr_el3 |= SCR_RW;
|
||||
}
|
||||
|
||||
if ((target_el < 2) && arm_feature(&target_cpu->env, ARM_FEATURE_EL2)) {
|
||||
/*
|
||||
* As target mode is AArch64, we need to set lower
|
||||
* exception level (the requested level 1) to AArch64
|
||||
*/
|
||||
target_cpu->env.cp15.hcr_el2 |= HCR_RW;
|
||||
}
|
||||
|
||||
target_cpu->env.pstate = aarch64_pstate_mode(target_el, true);
|
||||
} else {
|
||||
/* We are requested to boot in AArch32 mode */
|
||||
static uint32_t mode_for_el[] = { 0,
|
||||
ARM_CPU_MODE_SVC,
|
||||
ARM_CPU_MODE_HYP,
|
||||
ARM_CPU_MODE_SVC };
|
||||
|
||||
cpsr_write(&target_cpu->env, mode_for_el[target_el], CPSR_M,
|
||||
CPSRWriteRaw);
|
||||
/*
|
||||
* If another CPU has powered the target on we are in the state
|
||||
* ON_PENDING and additional attempts to power on the CPU should
|
||||
* fail (see 6.6 Implementation CPU_ON/CPU_OFF races in the PSCI
|
||||
* spec)
|
||||
*/
|
||||
if (target_cpu->power_state == PSCI_ON_PENDING) {
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
"[ARM]%s: CPU %" PRId64 " is already powering on\n",
|
||||
__func__, cpuid);
|
||||
return QEMU_ARM_POWERCTL_ON_PENDING;
|
||||
}
|
||||
|
||||
if (target_el == 3) {
|
||||
/* Processor is in secure mode */
|
||||
target_cpu->env.cp15.scr_el3 &= ~SCR_NS;
|
||||
} else {
|
||||
/* Processor is not in secure mode */
|
||||
target_cpu->env.cp15.scr_el3 |= SCR_NS;
|
||||
}
|
||||
/* To avoid racing with a CPU we are just kicking off we do the
|
||||
* final bit of preparation for the work in the target CPUs
|
||||
* context.
|
||||
*/
|
||||
info = g_new(struct CpuOnInfo, 1);
|
||||
info->entry = entry;
|
||||
info->context_id = context_id;
|
||||
info->target_el = target_el;
|
||||
info->target_aa64 = target_aa64;
|
||||
|
||||
/* We check if the started CPU is now at the correct level */
|
||||
assert(target_el == arm_current_el(&target_cpu->env));
|
||||
|
||||
if (target_aa64) {
|
||||
target_cpu->env.xregs[0] = context_id;
|
||||
target_cpu->env.thumb = false;
|
||||
} else {
|
||||
target_cpu->env.regs[0] = context_id;
|
||||
target_cpu->env.thumb = entry & 1;
|
||||
entry &= 0xfffffffe;
|
||||
}
|
||||
|
||||
/* Start the new CPU at the requested address */
|
||||
cpu_set_pc(target_cpu_state, entry);
|
||||
|
||||
qemu_cpu_kick(target_cpu_state);
|
||||
async_run_on_cpu(target_cpu_state, arm_set_cpu_on_async_work,
|
||||
RUN_ON_CPU_HOST_PTR(info));
|
||||
|
||||
/* We are good to go */
|
||||
return QEMU_ARM_POWERCTL_RET_SUCCESS;
|
||||
}
|
||||
|
||||
static void arm_set_cpu_off_async_work(CPUState *target_cpu_state,
|
||||
run_on_cpu_data data)
|
||||
{
|
||||
ARMCPU *target_cpu = ARM_CPU(target_cpu_state);
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
target_cpu->power_state = PSCI_OFF;
|
||||
target_cpu_state->halted = 1;
|
||||
target_cpu_state->exception_index = EXCP_HLT;
|
||||
}
|
||||
|
||||
int arm_set_cpu_off(uint64_t cpuid)
|
||||
{
|
||||
CPUState *target_cpu_state;
|
||||
ARMCPU *target_cpu;
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
|
||||
DPRINTF("cpu %" PRId64 "\n", cpuid);
|
||||
|
||||
/* change to the cpu we are powering up */
|
||||
|
@ -185,27 +248,34 @@ int arm_set_cpu_off(uint64_t cpuid)
|
|||
return QEMU_ARM_POWERCTL_INVALID_PARAM;
|
||||
}
|
||||
target_cpu = ARM_CPU(target_cpu_state);
|
||||
if (target_cpu->powered_off) {
|
||||
if (target_cpu->power_state == PSCI_OFF) {
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
"[ARM]%s: CPU %" PRId64 " is already off\n",
|
||||
__func__, cpuid);
|
||||
return QEMU_ARM_POWERCTL_IS_OFF;
|
||||
}
|
||||
|
||||
target_cpu->powered_off = true;
|
||||
target_cpu_state->halted = 1;
|
||||
target_cpu_state->exception_index = EXCP_HLT;
|
||||
cpu_loop_exit(target_cpu_state);
|
||||
/* notreached */
|
||||
/* Queue work to run under the target vCPUs context */
|
||||
async_run_on_cpu(target_cpu_state, arm_set_cpu_off_async_work,
|
||||
RUN_ON_CPU_NULL);
|
||||
|
||||
return QEMU_ARM_POWERCTL_RET_SUCCESS;
|
||||
}
|
||||
|
||||
static void arm_reset_cpu_async_work(CPUState *target_cpu_state,
|
||||
run_on_cpu_data data)
|
||||
{
|
||||
/* Reset the cpu */
|
||||
cpu_reset(target_cpu_state);
|
||||
}
|
||||
|
||||
int arm_reset_cpu(uint64_t cpuid)
|
||||
{
|
||||
CPUState *target_cpu_state;
|
||||
ARMCPU *target_cpu;
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
|
||||
DPRINTF("cpu %" PRId64 "\n", cpuid);
|
||||
|
||||
/* change to the cpu we are resetting */
|
||||
|
@ -214,15 +284,17 @@ int arm_reset_cpu(uint64_t cpuid)
|
|||
return QEMU_ARM_POWERCTL_INVALID_PARAM;
|
||||
}
|
||||
target_cpu = ARM_CPU(target_cpu_state);
|
||||
if (target_cpu->powered_off) {
|
||||
|
||||
if (target_cpu->power_state == PSCI_OFF) {
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
"[ARM]%s: CPU %" PRId64 " is off\n",
|
||||
__func__, cpuid);
|
||||
return QEMU_ARM_POWERCTL_IS_OFF;
|
||||
}
|
||||
|
||||
/* Reset the cpu */
|
||||
cpu_reset(target_cpu_state);
|
||||
/* Queue work to run under the target vCPUs context */
|
||||
async_run_on_cpu(target_cpu_state, arm_reset_cpu_async_work,
|
||||
RUN_ON_CPU_NULL);
|
||||
|
||||
return QEMU_ARM_POWERCTL_RET_SUCCESS;
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#define QEMU_ARM_POWERCTL_INVALID_PARAM QEMU_PSCI_RET_INVALID_PARAMS
|
||||
#define QEMU_ARM_POWERCTL_ALREADY_ON QEMU_PSCI_RET_ALREADY_ON
|
||||
#define QEMU_ARM_POWERCTL_IS_OFF QEMU_PSCI_RET_DENIED
|
||||
#define QEMU_ARM_POWERCTL_ON_PENDING QEMU_PSCI_RET_ON_PENDING
|
||||
|
||||
/*
|
||||
* arm_get_cpu_by_id:
|
||||
|
@ -43,6 +44,7 @@ CPUState *arm_get_cpu_by_id(uint64_t cpuid);
|
|||
* Returns: QEMU_ARM_POWERCTL_RET_SUCCESS on success.
|
||||
* QEMU_ARM_POWERCTL_INVALID_PARAM if bad parameters are provided.
|
||||
* QEMU_ARM_POWERCTL_ALREADY_ON if the CPU was already started.
|
||||
* QEMU_ARM_POWERCTL_ON_PENDING if the CPU is still powering up
|
||||
*/
|
||||
int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id,
|
||||
uint32_t target_el, bool target_aa64);
|
||||
|
|
|
@ -45,7 +45,7 @@ static bool arm_cpu_has_work(CPUState *cs)
|
|||
{
|
||||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
|
||||
return !cpu->powered_off
|
||||
return (cpu->power_state != PSCI_OFF)
|
||||
&& cs->interrupt_request &
|
||||
(CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
|
||||
| CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ
|
||||
|
@ -132,7 +132,7 @@ static void arm_cpu_reset(CPUState *s)
|
|||
env->vfp.xregs[ARM_VFP_MVFR1] = cpu->mvfr1;
|
||||
env->vfp.xregs[ARM_VFP_MVFR2] = cpu->mvfr2;
|
||||
|
||||
cpu->powered_off = cpu->start_powered_off;
|
||||
cpu->power_state = cpu->start_powered_off ? PSCI_OFF : PSCI_ON;
|
||||
s->halted = cpu->start_powered_off;
|
||||
|
||||
if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
|
||||
|
|
|
@ -30,6 +30,9 @@
|
|||
# define TARGET_LONG_BITS 32
|
||||
#endif
|
||||
|
||||
/* ARM processors have a weak memory model */
|
||||
#define TCG_GUEST_DEFAULT_MO (0)
|
||||
|
||||
#define CPUArchState struct CPUARMState
|
||||
|
||||
#include "qemu-common.h"
|
||||
|
@ -526,6 +529,15 @@ typedef struct CPUARMState {
|
|||
*/
|
||||
typedef void ARMELChangeHook(ARMCPU *cpu, void *opaque);
|
||||
|
||||
|
||||
/* These values map onto the return values for
|
||||
* QEMU_PSCI_0_2_FN_AFFINITY_INFO */
|
||||
typedef enum ARMPSCIState {
|
||||
PSCI_OFF = 0,
|
||||
PSCI_ON = 1,
|
||||
PSCI_ON_PENDING = 2
|
||||
} ARMPSCIState;
|
||||
|
||||
/**
|
||||
* ARMCPU:
|
||||
* @env: #CPUARMState
|
||||
|
@ -582,8 +594,10 @@ struct ARMCPU {
|
|||
|
||||
/* Should CPU start in PSCI powered-off state? */
|
||||
bool start_powered_off;
|
||||
/* CPU currently in PSCI powered-off state */
|
||||
bool powered_off;
|
||||
|
||||
/* Current power state, access guarded by BQL */
|
||||
ARMPSCIState power_state;
|
||||
|
||||
/* CPU has virtualization extension */
|
||||
bool has_el2;
|
||||
/* CPU has security extension */
|
||||
|
|
|
@ -536,41 +536,33 @@ static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
CPUState *other_cs;
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
|
||||
CPU_FOREACH(other_cs) {
|
||||
tlb_flush(other_cs);
|
||||
}
|
||||
tlb_flush_all_cpus_synced(cs);
|
||||
}
|
||||
|
||||
static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
CPUState *other_cs;
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
|
||||
CPU_FOREACH(other_cs) {
|
||||
tlb_flush(other_cs);
|
||||
}
|
||||
tlb_flush_all_cpus_synced(cs);
|
||||
}
|
||||
|
||||
static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
CPUState *other_cs;
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
|
||||
CPU_FOREACH(other_cs) {
|
||||
tlb_flush_page(other_cs, value & TARGET_PAGE_MASK);
|
||||
}
|
||||
tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
|
||||
}
|
||||
|
||||
static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
CPUState *other_cs;
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
|
||||
CPU_FOREACH(other_cs) {
|
||||
tlb_flush_page(other_cs, value & TARGET_PAGE_MASK);
|
||||
}
|
||||
tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
|
||||
}
|
||||
|
||||
static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
|
@ -578,19 +570,21 @@ static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
{
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
|
||||
tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0,
|
||||
ARMMMUIdx_S2NS, -1);
|
||||
tlb_flush_by_mmuidx(cs,
|
||||
(1 << ARMMMUIdx_S12NSE1) |
|
||||
(1 << ARMMMUIdx_S12NSE0) |
|
||||
(1 << ARMMMUIdx_S2NS));
|
||||
}
|
||||
|
||||
static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
CPUState *other_cs;
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
|
||||
CPU_FOREACH(other_cs) {
|
||||
tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
|
||||
ARMMMUIdx_S12NSE0, ARMMMUIdx_S2NS, -1);
|
||||
}
|
||||
tlb_flush_by_mmuidx_all_cpus_synced(cs,
|
||||
(1 << ARMMMUIdx_S12NSE1) |
|
||||
(1 << ARMMMUIdx_S12NSE0) |
|
||||
(1 << ARMMMUIdx_S2NS));
|
||||
}
|
||||
|
||||
static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
|
@ -611,13 +605,13 @@ static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
|
||||
pageaddr = sextract64(value << 12, 0, 40);
|
||||
|
||||
tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S2NS, -1);
|
||||
tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S2NS));
|
||||
}
|
||||
|
||||
static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
CPUState *other_cs;
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
uint64_t pageaddr;
|
||||
|
||||
if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
|
||||
|
@ -626,9 +620,8 @@ static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
|
||||
pageaddr = sextract64(value << 12, 0, 40);
|
||||
|
||||
CPU_FOREACH(other_cs) {
|
||||
tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S2NS, -1);
|
||||
}
|
||||
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
|
||||
(1 << ARMMMUIdx_S2NS));
|
||||
}
|
||||
|
||||
static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
|
@ -636,17 +629,15 @@ static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
{
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
|
||||
tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E2, -1);
|
||||
tlb_flush_by_mmuidx(cs, (1 << ARMMMUIdx_S1E2));
|
||||
}
|
||||
|
||||
static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
CPUState *other_cs;
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
|
||||
CPU_FOREACH(other_cs) {
|
||||
tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E2, -1);
|
||||
}
|
||||
tlb_flush_by_mmuidx_all_cpus_synced(cs, (1 << ARMMMUIdx_S1E2));
|
||||
}
|
||||
|
||||
static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
|
@ -655,18 +646,17 @@ static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
CPUState *cs = ENV_GET_CPU(env);
|
||||
uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
|
||||
|
||||
tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1E2, -1);
|
||||
tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S1E2));
|
||||
}
|
||||
|
||||
static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
CPUState *other_cs;
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
|
||||
|
||||
CPU_FOREACH(other_cs) {
|
||||
tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1E2, -1);
|
||||
}
|
||||
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
|
||||
(1 << ARMMMUIdx_S1E2));
|
||||
}
|
||||
|
||||
static const ARMCPRegInfo cp_reginfo[] = {
|
||||
|
@ -2542,8 +2532,10 @@ static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
|
||||
/* Accesses to VTTBR may change the VMID so we must flush the TLB. */
|
||||
if (raw_read(env, ri) != value) {
|
||||
tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0,
|
||||
ARMMMUIdx_S2NS, -1);
|
||||
tlb_flush_by_mmuidx(cs,
|
||||
(1 << ARMMMUIdx_S12NSE1) |
|
||||
(1 << ARMMMUIdx_S12NSE0) |
|
||||
(1 << ARMMMUIdx_S2NS));
|
||||
raw_write(env, ri, value);
|
||||
}
|
||||
}
|
||||
|
@ -2898,29 +2890,33 @@ static CPAccessResult aa64_cacheop_access(CPUARMState *env,
|
|||
static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
ARMCPU *cpu = arm_env_get_cpu(env);
|
||||
CPUState *cs = CPU(cpu);
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
|
||||
if (arm_is_secure_below_el3(env)) {
|
||||
tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
|
||||
tlb_flush_by_mmuidx(cs,
|
||||
(1 << ARMMMUIdx_S1SE1) |
|
||||
(1 << ARMMMUIdx_S1SE0));
|
||||
} else {
|
||||
tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, -1);
|
||||
tlb_flush_by_mmuidx(cs,
|
||||
(1 << ARMMMUIdx_S12NSE1) |
|
||||
(1 << ARMMMUIdx_S12NSE0));
|
||||
}
|
||||
}
|
||||
|
||||
static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
bool sec = arm_is_secure_below_el3(env);
|
||||
CPUState *other_cs;
|
||||
|
||||
CPU_FOREACH(other_cs) {
|
||||
if (sec) {
|
||||
tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
|
||||
} else {
|
||||
tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
|
||||
ARMMMUIdx_S12NSE0, -1);
|
||||
}
|
||||
if (sec) {
|
||||
tlb_flush_by_mmuidx_all_cpus_synced(cs,
|
||||
(1 << ARMMMUIdx_S1SE1) |
|
||||
(1 << ARMMMUIdx_S1SE0));
|
||||
} else {
|
||||
tlb_flush_by_mmuidx_all_cpus_synced(cs,
|
||||
(1 << ARMMMUIdx_S12NSE1) |
|
||||
(1 << ARMMMUIdx_S12NSE0));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2935,13 +2931,19 @@ static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
CPUState *cs = CPU(cpu);
|
||||
|
||||
if (arm_is_secure_below_el3(env)) {
|
||||
tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
|
||||
tlb_flush_by_mmuidx(cs,
|
||||
(1 << ARMMMUIdx_S1SE1) |
|
||||
(1 << ARMMMUIdx_S1SE0));
|
||||
} else {
|
||||
if (arm_feature(env, ARM_FEATURE_EL2)) {
|
||||
tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0,
|
||||
ARMMMUIdx_S2NS, -1);
|
||||
tlb_flush_by_mmuidx(cs,
|
||||
(1 << ARMMMUIdx_S12NSE1) |
|
||||
(1 << ARMMMUIdx_S12NSE0) |
|
||||
(1 << ARMMMUIdx_S2NS));
|
||||
} else {
|
||||
tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, -1);
|
||||
tlb_flush_by_mmuidx(cs,
|
||||
(1 << ARMMMUIdx_S12NSE1) |
|
||||
(1 << ARMMMUIdx_S12NSE0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2952,7 +2954,7 @@ static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
ARMCPU *cpu = arm_env_get_cpu(env);
|
||||
CPUState *cs = CPU(cpu);
|
||||
|
||||
tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E2, -1);
|
||||
tlb_flush_by_mmuidx(cs, (1 << ARMMMUIdx_S1E2));
|
||||
}
|
||||
|
||||
static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
|
@ -2961,7 +2963,7 @@ static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
ARMCPU *cpu = arm_env_get_cpu(env);
|
||||
CPUState *cs = CPU(cpu);
|
||||
|
||||
tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E3, -1);
|
||||
tlb_flush_by_mmuidx(cs, (1 << ARMMMUIdx_S1E3));
|
||||
}
|
||||
|
||||
static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
|
@ -2971,41 +2973,40 @@ static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
* stage 2 translations, whereas most other scopes only invalidate
|
||||
* stage 1 translations.
|
||||
*/
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
bool sec = arm_is_secure_below_el3(env);
|
||||
bool has_el2 = arm_feature(env, ARM_FEATURE_EL2);
|
||||
CPUState *other_cs;
|
||||
|
||||
CPU_FOREACH(other_cs) {
|
||||
if (sec) {
|
||||
tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
|
||||
} else if (has_el2) {
|
||||
tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
|
||||
ARMMMUIdx_S12NSE0, ARMMMUIdx_S2NS, -1);
|
||||
} else {
|
||||
tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
|
||||
ARMMMUIdx_S12NSE0, -1);
|
||||
}
|
||||
if (sec) {
|
||||
tlb_flush_by_mmuidx_all_cpus_synced(cs,
|
||||
(1 << ARMMMUIdx_S1SE1) |
|
||||
(1 << ARMMMUIdx_S1SE0));
|
||||
} else if (has_el2) {
|
||||
tlb_flush_by_mmuidx_all_cpus_synced(cs,
|
||||
(1 << ARMMMUIdx_S12NSE1) |
|
||||
(1 << ARMMMUIdx_S12NSE0) |
|
||||
(1 << ARMMMUIdx_S2NS));
|
||||
} else {
|
||||
tlb_flush_by_mmuidx_all_cpus_synced(cs,
|
||||
(1 << ARMMMUIdx_S12NSE1) |
|
||||
(1 << ARMMMUIdx_S12NSE0));
|
||||
}
|
||||
}
|
||||
|
||||
static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
CPUState *other_cs;
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
|
||||
CPU_FOREACH(other_cs) {
|
||||
tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E2, -1);
|
||||
}
|
||||
tlb_flush_by_mmuidx_all_cpus_synced(cs, (1 << ARMMMUIdx_S1E2));
|
||||
}
|
||||
|
||||
static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
CPUState *other_cs;
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
|
||||
CPU_FOREACH(other_cs) {
|
||||
tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E3, -1);
|
||||
}
|
||||
tlb_flush_by_mmuidx_all_cpus_synced(cs, (1 << ARMMMUIdx_S1E3));
|
||||
}
|
||||
|
||||
static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
|
@ -3021,11 +3022,13 @@ static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
uint64_t pageaddr = sextract64(value << 12, 0, 56);
|
||||
|
||||
if (arm_is_secure_below_el3(env)) {
|
||||
tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1SE1,
|
||||
ARMMMUIdx_S1SE0, -1);
|
||||
tlb_flush_page_by_mmuidx(cs, pageaddr,
|
||||
(1 << ARMMMUIdx_S1SE1) |
|
||||
(1 << ARMMMUIdx_S1SE0));
|
||||
} else {
|
||||
tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S12NSE1,
|
||||
ARMMMUIdx_S12NSE0, -1);
|
||||
tlb_flush_page_by_mmuidx(cs, pageaddr,
|
||||
(1 << ARMMMUIdx_S12NSE1) |
|
||||
(1 << ARMMMUIdx_S12NSE0));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3040,7 +3043,7 @@ static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
CPUState *cs = CPU(cpu);
|
||||
uint64_t pageaddr = sextract64(value << 12, 0, 56);
|
||||
|
||||
tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1E2, -1);
|
||||
tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S1E2));
|
||||
}
|
||||
|
||||
static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
|
@ -3054,47 +3057,46 @@ static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
CPUState *cs = CPU(cpu);
|
||||
uint64_t pageaddr = sextract64(value << 12, 0, 56);
|
||||
|
||||
tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1E3, -1);
|
||||
tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S1E3));
|
||||
}
|
||||
|
||||
static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
ARMCPU *cpu = arm_env_get_cpu(env);
|
||||
CPUState *cs = CPU(cpu);
|
||||
bool sec = arm_is_secure_below_el3(env);
|
||||
CPUState *other_cs;
|
||||
uint64_t pageaddr = sextract64(value << 12, 0, 56);
|
||||
|
||||
CPU_FOREACH(other_cs) {
|
||||
if (sec) {
|
||||
tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1SE1,
|
||||
ARMMMUIdx_S1SE0, -1);
|
||||
} else {
|
||||
tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S12NSE1,
|
||||
ARMMMUIdx_S12NSE0, -1);
|
||||
}
|
||||
if (sec) {
|
||||
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
|
||||
(1 << ARMMMUIdx_S1SE1) |
|
||||
(1 << ARMMMUIdx_S1SE0));
|
||||
} else {
|
||||
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
|
||||
(1 << ARMMMUIdx_S12NSE1) |
|
||||
(1 << ARMMMUIdx_S12NSE0));
|
||||
}
|
||||
}
|
||||
|
||||
static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
CPUState *other_cs;
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
uint64_t pageaddr = sextract64(value << 12, 0, 56);
|
||||
|
||||
CPU_FOREACH(other_cs) {
|
||||
tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1E2, -1);
|
||||
}
|
||||
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
|
||||
(1 << ARMMMUIdx_S1E2));
|
||||
}
|
||||
|
||||
static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
CPUState *other_cs;
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
uint64_t pageaddr = sextract64(value << 12, 0, 56);
|
||||
|
||||
CPU_FOREACH(other_cs) {
|
||||
tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1E3, -1);
|
||||
}
|
||||
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
|
||||
(1 << ARMMMUIdx_S1E3));
|
||||
}
|
||||
|
||||
static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
|
@ -3116,13 +3118,13 @@ static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
|
||||
pageaddr = sextract64(value << 12, 0, 48);
|
||||
|
||||
tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S2NS, -1);
|
||||
tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S2NS));
|
||||
}
|
||||
|
||||
static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
CPUState *other_cs;
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
uint64_t pageaddr;
|
||||
|
||||
if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
|
||||
|
@ -3131,9 +3133,8 @@ static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
|
||||
pageaddr = sextract64(value << 12, 0, 48);
|
||||
|
||||
CPU_FOREACH(other_cs) {
|
||||
tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S2NS, -1);
|
||||
}
|
||||
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
|
||||
(1 << ARMMMUIdx_S2NS));
|
||||
}
|
||||
|
||||
static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
|
@ -6769,6 +6770,12 @@ void arm_cpu_do_interrupt(CPUState *cs)
|
|||
arm_cpu_do_interrupt_aarch32(cs);
|
||||
}
|
||||
|
||||
/* Hooks may change global state so BQL should be held, also the
|
||||
* BQL needs to be held for any modification of
|
||||
* cs->interrupt_request.
|
||||
*/
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
|
||||
arm_call_el_change_hook(cpu);
|
||||
|
||||
if (!kvm_enabled()) {
|
||||
|
|
|
@ -488,8 +488,8 @@ int kvm_arm_sync_mpstate_to_kvm(ARMCPU *cpu)
|
|||
{
|
||||
if (cap_has_mp_state) {
|
||||
struct kvm_mp_state mp_state = {
|
||||
.mp_state =
|
||||
cpu->powered_off ? KVM_MP_STATE_STOPPED : KVM_MP_STATE_RUNNABLE
|
||||
.mp_state = (cpu->power_state == PSCI_OFF) ?
|
||||
KVM_MP_STATE_STOPPED : KVM_MP_STATE_RUNNABLE
|
||||
};
|
||||
int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
|
||||
if (ret) {
|
||||
|
@ -515,7 +515,8 @@ int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu)
|
|||
__func__, ret, strerror(-ret));
|
||||
abort();
|
||||
}
|
||||
cpu->powered_off = (mp_state.mp_state == KVM_MP_STATE_STOPPED);
|
||||
cpu->power_state = (mp_state.mp_state == KVM_MP_STATE_STOPPED) ?
|
||||
PSCI_OFF : PSCI_ON;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -211,6 +211,38 @@ static const VMStateInfo vmstate_cpsr = {
|
|||
.put = put_cpsr,
|
||||
};
|
||||
|
||||
static int get_power(QEMUFile *f, void *opaque, size_t size,
|
||||
VMStateField *field)
|
||||
{
|
||||
ARMCPU *cpu = opaque;
|
||||
bool powered_off = qemu_get_byte(f);
|
||||
cpu->power_state = powered_off ? PSCI_OFF : PSCI_ON;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int put_power(QEMUFile *f, void *opaque, size_t size,
|
||||
VMStateField *field, QJSON *vmdesc)
|
||||
{
|
||||
ARMCPU *cpu = opaque;
|
||||
|
||||
/* Migration should never happen while we transition power states */
|
||||
|
||||
if (cpu->power_state == PSCI_ON ||
|
||||
cpu->power_state == PSCI_OFF) {
|
||||
bool powered_off = (cpu->power_state == PSCI_OFF) ? true : false;
|
||||
qemu_put_byte(f, powered_off);
|
||||
return 0;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
static const VMStateInfo vmstate_powered_off = {
|
||||
.name = "powered_off",
|
||||
.get = get_power,
|
||||
.put = put_power,
|
||||
};
|
||||
|
||||
static void cpu_pre_save(void *opaque)
|
||||
{
|
||||
ARMCPU *cpu = opaque;
|
||||
|
@ -329,7 +361,14 @@ const VMStateDescription vmstate_arm_cpu = {
|
|||
VMSTATE_UINT64(env.exception.vaddress, ARMCPU),
|
||||
VMSTATE_TIMER_PTR(gt_timer[GTIMER_PHYS], ARMCPU),
|
||||
VMSTATE_TIMER_PTR(gt_timer[GTIMER_VIRT], ARMCPU),
|
||||
VMSTATE_BOOL(powered_off, ARMCPU),
|
||||
{
|
||||
.name = "power_state",
|
||||
.version_id = 0,
|
||||
.size = sizeof(bool),
|
||||
.info = &vmstate_powered_off,
|
||||
.flags = VMS_SINGLE,
|
||||
.offset = 0,
|
||||
},
|
||||
VMSTATE_END_OF_LIST()
|
||||
},
|
||||
.subsections = (const VMStateDescription*[]) {
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/log.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "cpu.h"
|
||||
#include "exec/helper-proto.h"
|
||||
#include "internals.h"
|
||||
|
@ -435,6 +436,13 @@ void HELPER(yield)(CPUARMState *env)
|
|||
ARMCPU *cpu = arm_env_get_cpu(env);
|
||||
CPUState *cs = CPU(cpu);
|
||||
|
||||
/* When running in MTTCG we don't generate jumps to the yield and
|
||||
* WFE helpers as it won't affect the scheduling of other vCPUs.
|
||||
* If we wanted to more completely model WFE/SEV so we don't busy
|
||||
* spin unnecessarily we would need to do something more involved.
|
||||
*/
|
||||
g_assert(!parallel_cpus);
|
||||
|
||||
/* This is a non-trappable hint instruction that generally indicates
|
||||
* that the guest is currently busy-looping. Yield control back to the
|
||||
* top level loop so that a more deserving VCPU has a chance to run.
|
||||
|
@ -487,7 +495,9 @@ void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
|
|||
*/
|
||||
env->regs[15] &= (env->thumb ? ~1 : ~3);
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
arm_call_el_change_hook(arm_env_get_cpu(env));
|
||||
qemu_mutex_unlock_iothread();
|
||||
}
|
||||
|
||||
/* Access to user mode registers from privileged modes. */
|
||||
|
@ -735,28 +745,58 @@ void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
|
|||
{
|
||||
const ARMCPRegInfo *ri = rip;
|
||||
|
||||
ri->writefn(env, ri, value);
|
||||
if (ri->type & ARM_CP_IO) {
|
||||
qemu_mutex_lock_iothread();
|
||||
ri->writefn(env, ri, value);
|
||||
qemu_mutex_unlock_iothread();
|
||||
} else {
|
||||
ri->writefn(env, ri, value);
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
|
||||
{
|
||||
const ARMCPRegInfo *ri = rip;
|
||||
uint32_t res;
|
||||
|
||||
return ri->readfn(env, ri);
|
||||
if (ri->type & ARM_CP_IO) {
|
||||
qemu_mutex_lock_iothread();
|
||||
res = ri->readfn(env, ri);
|
||||
qemu_mutex_unlock_iothread();
|
||||
} else {
|
||||
res = ri->readfn(env, ri);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
|
||||
{
|
||||
const ARMCPRegInfo *ri = rip;
|
||||
|
||||
ri->writefn(env, ri, value);
|
||||
if (ri->type & ARM_CP_IO) {
|
||||
qemu_mutex_lock_iothread();
|
||||
ri->writefn(env, ri, value);
|
||||
qemu_mutex_unlock_iothread();
|
||||
} else {
|
||||
ri->writefn(env, ri, value);
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
|
||||
{
|
||||
const ARMCPRegInfo *ri = rip;
|
||||
uint64_t res;
|
||||
|
||||
return ri->readfn(env, ri);
|
||||
if (ri->type & ARM_CP_IO) {
|
||||
qemu_mutex_lock_iothread();
|
||||
res = ri->readfn(env, ri);
|
||||
qemu_mutex_unlock_iothread();
|
||||
} else {
|
||||
res = ri->readfn(env, ri);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
|
||||
|
@ -989,7 +1029,9 @@ void HELPER(exception_return)(CPUARMState *env)
|
|||
cur_el, new_el, env->pc);
|
||||
}
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
arm_call_el_change_hook(arm_env_get_cpu(env));
|
||||
qemu_mutex_unlock_iothread();
|
||||
|
||||
return;
|
||||
|
||||
|
|
|
@ -127,7 +127,9 @@ void arm_handle_psci_call(ARMCPU *cpu)
|
|||
break;
|
||||
}
|
||||
target_cpu = ARM_CPU(target_cpu_state);
|
||||
ret = target_cpu->powered_off ? 1 : 0;
|
||||
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
ret = target_cpu->power_state;
|
||||
break;
|
||||
default:
|
||||
/* Everything above affinity level 0 is always on. */
|
||||
|
|
|
@ -1328,10 +1328,14 @@ static void handle_hint(DisasContext *s, uint32_t insn,
|
|||
s->is_jmp = DISAS_WFI;
|
||||
return;
|
||||
case 1: /* YIELD */
|
||||
s->is_jmp = DISAS_YIELD;
|
||||
if (!parallel_cpus) {
|
||||
s->is_jmp = DISAS_YIELD;
|
||||
}
|
||||
return;
|
||||
case 2: /* WFE */
|
||||
s->is_jmp = DISAS_WFE;
|
||||
if (!parallel_cpus) {
|
||||
s->is_jmp = DISAS_WFE;
|
||||
}
|
||||
return;
|
||||
case 4: /* SEV */
|
||||
case 5: /* SEVL */
|
||||
|
|
|
@ -4404,20 +4404,32 @@ static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
|
|||
gen_rfe(s, pc, load_cpu_field(spsr));
|
||||
}
|
||||
|
||||
/*
|
||||
* For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
|
||||
* only call the helper when running single threaded TCG code to ensure
|
||||
* the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
|
||||
* just skip this instruction. Currently the SEV/SEVL instructions
|
||||
* which are *one* of many ways to wake the CPU from WFE are not
|
||||
* implemented so we can't sleep like WFI does.
|
||||
*/
|
||||
static void gen_nop_hint(DisasContext *s, int val)
|
||||
{
|
||||
switch (val) {
|
||||
case 1: /* yield */
|
||||
gen_set_pc_im(s, s->pc);
|
||||
s->is_jmp = DISAS_YIELD;
|
||||
if (!parallel_cpus) {
|
||||
gen_set_pc_im(s, s->pc);
|
||||
s->is_jmp = DISAS_YIELD;
|
||||
}
|
||||
break;
|
||||
case 3: /* wfi */
|
||||
gen_set_pc_im(s, s->pc);
|
||||
s->is_jmp = DISAS_WFI;
|
||||
break;
|
||||
case 2: /* wfe */
|
||||
gen_set_pc_im(s, s->pc);
|
||||
s->is_jmp = DISAS_WFE;
|
||||
if (!parallel_cpus) {
|
||||
gen_set_pc_im(s, s->pc);
|
||||
s->is_jmp = DISAS_WFE;
|
||||
}
|
||||
break;
|
||||
case 4: /* sev */
|
||||
case 5: /* sevl */
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "cpu.h"
|
||||
#include "exec/helper-proto.h"
|
||||
#include "exec/log.h"
|
||||
|
@ -42,11 +43,14 @@ void helper_rsm(CPUX86State *env)
|
|||
#define SMM_REVISION_ID 0x00020000
|
||||
#endif
|
||||
|
||||
/* Called with iothread lock taken */
|
||||
void cpu_smm_update(X86CPU *cpu)
|
||||
{
|
||||
CPUX86State *env = &cpu->env;
|
||||
bool smm_enabled = (env->hflags & HF_SMM_MASK);
|
||||
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
|
||||
if (cpu->smram) {
|
||||
memory_region_set_enabled(cpu->smram, smm_enabled);
|
||||
}
|
||||
|
@ -333,7 +337,10 @@ void helper_rsm(CPUX86State *env)
|
|||
}
|
||||
env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
|
||||
env->hflags &= ~HF_SMM_MASK;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
cpu_smm_update(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
|
||||
qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
|
||||
log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP);
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include "exec/helper-proto.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "qemu/timer.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "exec/address-spaces.h"
|
||||
#ifdef CONFIG_KVM
|
||||
#include <linux/kvm.h>
|
||||
|
@ -109,11 +110,13 @@ void program_interrupt(CPUS390XState *env, uint32_t code, int ilen)
|
|||
/* SCLP service call */
|
||||
uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2)
|
||||
{
|
||||
qemu_mutex_lock_iothread();
|
||||
int r = sclp_service_call(env, r1, r2);
|
||||
if (r < 0) {
|
||||
program_interrupt(env, -r, 4);
|
||||
return 0;
|
||||
r = 0;
|
||||
}
|
||||
qemu_mutex_unlock_iothread();
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -1768,13 +1768,15 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
|
|||
case 1:
|
||||
env->dmmu.mmu_primary_context = val;
|
||||
env->immu.mmu_primary_context = val;
|
||||
tlb_flush_by_mmuidx(CPU(cpu), MMU_USER_IDX, MMU_KERNEL_IDX, -1);
|
||||
tlb_flush_by_mmuidx(CPU(cpu),
|
||||
(1 << MMU_USER_IDX) | (1 << MMU_KERNEL_IDX));
|
||||
break;
|
||||
case 2:
|
||||
env->dmmu.mmu_secondary_context = val;
|
||||
env->immu.mmu_secondary_context = val;
|
||||
tlb_flush_by_mmuidx(CPU(cpu), MMU_USER_SECONDARY_IDX,
|
||||
MMU_KERNEL_SECONDARY_IDX, -1);
|
||||
tlb_flush_by_mmuidx(CPU(cpu),
|
||||
(1 << MMU_USER_SECONDARY_IDX) |
|
||||
(1 << MMU_KERNEL_SECONDARY_IDX));
|
||||
break;
|
||||
default:
|
||||
cpu_unassigned_access(cs, addr, true, false, 1, size);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue