target/arm: Reorganize PMCCNTR accesses

pmccntr_read and pmccntr_write contained duplicate code that was already
being handled by pmccntr_sync. Consolidate the duplicated code into two
functions: pmccntr_op_start and pmccntr_op_finish. Add a companion to
c15_ccnt in CPUARMState so that we can simultaneously save both the
architectural register value and the last underlying cycle count - this
ensures time isn't lost and will also allow us to access the 'old'
architectural register value in order to detect overflows in later
patches.

Signed-off-by: Aaron Lindsay <alindsay@codeaurora.org>
Signed-off-by: Aaron Lindsay <aclindsa@gmail.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Message-id: 20181211151945.29137-3-aaron@os.amperecomputing.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Aaron Lindsay 2019-01-21 10:23:13 +00:00 committed by Peter Maydell
parent 8c07559fc7
commit 5d05b9d462
2 changed files with 100 additions and 55 deletions

View file

@ -1085,28 +1085,63 @@ static inline bool arm_ccnt_enabled(CPUARMState *env)
return true;
}
void pmccntr_sync(CPUARMState *env)
/*
* Ensure c15_ccnt is the guest-visible count so that operations such as
* enabling/disabling the counter or filtering, modifying the count itself,
* etc. can be done logically. This is essentially a no-op if the counter is
* not enabled at the time of the call.
*/
void pmccntr_op_start(CPUARMState *env)
{
uint64_t temp_ticks;
temp_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
uint64_t cycles = 0;
cycles = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
if (env->cp15.c9_pmcr & PMCRD) {
/* Increment once every 64 processor clock cycles */
temp_ticks /= 64;
}
if (arm_ccnt_enabled(env)) {
env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
uint64_t eff_cycles = cycles;
if (env->cp15.c9_pmcr & PMCRD) {
/* Increment once every 64 processor clock cycles */
eff_cycles /= 64;
}
env->cp15.c15_ccnt = eff_cycles - env->cp15.c15_ccnt_delta;
}
env->cp15.c15_ccnt_delta = cycles;
}
/*
* If PMCCNTR is enabled, recalculate the delta between the clock and the
* guest-visible count. A call to pmccntr_op_finish should follow every call to
* pmccntr_op_start.
*/
void pmccntr_op_finish(CPUARMState *env)
{
if (arm_ccnt_enabled(env)) {
uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
if (env->cp15.c9_pmcr & PMCRD) {
/* Increment once every 64 processor clock cycles */
prev_cycles /= 64;
}
env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
}
}
void pmu_op_start(CPUARMState *env)
{
pmccntr_op_start(env);
}
void pmu_op_finish(CPUARMState *env)
{
pmccntr_op_finish(env);
}
static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
pmccntr_sync(env);
pmu_op_start(env);
if (value & PMCRC) {
/* The counter has been reset */
@ -1117,26 +1152,16 @@ static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
env->cp15.c9_pmcr &= ~0x39;
env->cp15.c9_pmcr |= (value & 0x39);
pmccntr_sync(env);
pmu_op_finish(env);
}
static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
uint64_t total_ticks;
if (!arm_ccnt_enabled(env)) {
/* Counter is disabled, do not change value */
return env->cp15.c15_ccnt;
}
total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
if (env->cp15.c9_pmcr & PMCRD) {
/* Increment once every 64 processor clock cycles */
total_ticks /= 64;
}
return total_ticks - env->cp15.c15_ccnt;
uint64_t ret;
pmccntr_op_start(env);
ret = env->cp15.c15_ccnt;
pmccntr_op_finish(env);
return ret;
}
static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
@ -1153,22 +1178,9 @@ static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
uint64_t total_ticks;
if (!arm_ccnt_enabled(env)) {
/* Counter is disabled, set the absolute value */
env->cp15.c15_ccnt = value;
return;
}
total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
if (env->cp15.c9_pmcr & PMCRD) {
/* Increment once every 64 processor clock cycles */
total_ticks /= 64;
}
env->cp15.c15_ccnt = total_ticks - value;
pmccntr_op_start(env);
env->cp15.c15_ccnt = value;
pmccntr_op_finish(env);
}
static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
@ -1181,7 +1193,19 @@ static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
#else /* CONFIG_USER_ONLY */
void pmccntr_sync(CPUARMState *env)
void pmccntr_op_start(CPUARMState *env)
{
}
void pmccntr_op_finish(CPUARMState *env)
{
}
void pmu_op_start(CPUARMState *env)
{
}
void pmu_op_finish(CPUARMState *env)
{
}
@ -1190,9 +1214,9 @@ void pmccntr_sync(CPUARMState *env)
static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
pmccntr_sync(env);
pmccntr_op_start(env);
env->cp15.pmccfiltr_el0 = value & 0xfc000000;
pmccntr_sync(env);
pmccntr_op_finish(env);
}
static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,