target/riscv: Implement privilege mode filtering for cycle/instret

Privilege mode filtering can also be emulated for cycle/instret by
tracking host_ticks/icount during each privilege mode switch. This
patch implements that for both cycle/instret and mhpmcounters. The
first one requires Smcntrpmf while the other one requires Sscofpmf
to be enabled.

The cycle/instret are still computed using host ticks when icount
is not enabled. Otherwise, they are computed using raw icount which
is more accurate in icount mode.

Co-Developed-by: Rajnesh Kanwal <rkanwal@rivosinc.com>
Signed-off-by: Rajnesh Kanwal <rkanwal@rivosinc.com>
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Acked-by: Alistair Francis <alistair.francis@wdc.com>
Signed-off-by: Atish Patra <atishp@rivosinc.com>
Message-ID: <20240711-smcntrpmf_v7-v8-7-b7c38ae7b263@rivosinc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
This commit is contained in:
Atish Patra 2024-07-11 15:31:10 -07:00 committed by Alistair Francis
parent 3b31b7baff
commit b2d7a7c7e4
5 changed files with 194 additions and 37 deletions

View file

@ -787,36 +787,16 @@ static RISCVException write_vcsr(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
#if defined(CONFIG_USER_ONLY)
/* User Timers and Counters */
static target_ulong get_ticks(bool shift, bool instructions)
static target_ulong get_ticks(bool shift)
{
int64_t val;
target_ulong result;
#if !defined(CONFIG_USER_ONLY)
if (icount_enabled()) {
if (instructions) {
val = icount_get_raw();
} else {
val = icount_get();
}
} else {
val = cpu_get_host_ticks();
}
#else
val = cpu_get_host_ticks();
#endif
if (shift) {
result = val >> 32;
} else {
result = val;
}
int64_t val = cpu_get_host_ticks();
target_ulong result = shift ? val >> 32 : val;
return result;
}
#if defined(CONFIG_USER_ONLY)
static RISCVException read_time(CPURISCVState *env, int csrno,
target_ulong *val)
{
@ -834,14 +814,14 @@ static RISCVException read_timeh(CPURISCVState *env, int csrno,
static RISCVException read_hpmcounter(CPURISCVState *env, int csrno,
target_ulong *val)
{
*val = get_ticks(false, (csrno == CSR_INSTRET));
*val = get_ticks(false);
return RISCV_EXCP_NONE;
}
static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
target_ulong *val)
{
*val = get_ticks(true, (csrno == CSR_INSTRETH));
*val = get_ticks(true);
return RISCV_EXCP_NONE;
}
@ -1025,17 +1005,82 @@ static RISCVException write_mhpmeventh(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
static target_ulong riscv_pmu_ctr_get_fixed_counters_val(CPURISCVState *env,
int counter_idx,
bool upper_half)
{
int inst = riscv_pmu_ctr_monitor_instructions(env, counter_idx);
uint64_t *counter_arr_virt = env->pmu_fixed_ctrs[inst].counter_virt;
uint64_t *counter_arr = env->pmu_fixed_ctrs[inst].counter;
target_ulong result = 0;
uint64_t curr_val = 0;
uint64_t cfg_val = 0;
if (counter_idx == 0) {
cfg_val = upper_half ? ((uint64_t)env->mcyclecfgh << 32) :
env->mcyclecfg;
} else if (counter_idx == 2) {
cfg_val = upper_half ? ((uint64_t)env->minstretcfgh << 32) :
env->minstretcfg;
} else {
cfg_val = upper_half ?
((uint64_t)env->mhpmeventh_val[counter_idx] << 32) :
env->mhpmevent_val[counter_idx];
cfg_val &= MHPMEVENT_FILTER_MASK;
}
if (!cfg_val) {
if (icount_enabled()) {
curr_val = inst ? icount_get_raw() : icount_get();
} else {
curr_val = cpu_get_host_ticks();
}
goto done;
}
if (!(cfg_val & MCYCLECFG_BIT_MINH)) {
curr_val += counter_arr[PRV_M];
}
if (!(cfg_val & MCYCLECFG_BIT_SINH)) {
curr_val += counter_arr[PRV_S];
}
if (!(cfg_val & MCYCLECFG_BIT_UINH)) {
curr_val += counter_arr[PRV_U];
}
if (!(cfg_val & MCYCLECFG_BIT_VSINH)) {
curr_val += counter_arr_virt[PRV_S];
}
if (!(cfg_val & MCYCLECFG_BIT_VUINH)) {
curr_val += counter_arr_virt[PRV_U];
}
done:
if (riscv_cpu_mxl(env) == MXL_RV32) {
result = upper_half ? curr_val >> 32 : curr_val;
} else {
result = curr_val;
}
return result;
}
static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno,
target_ulong val)
{
int ctr_idx = csrno - CSR_MCYCLE;
PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
uint64_t mhpmctr_val = val;
bool instr = riscv_pmu_ctr_monitor_instructions(env, ctr_idx);
counter->mhpmcounter_val = val;
if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || instr) {
counter->mhpmcounter_prev = get_ticks(false, instr);
if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
counter->mhpmcounter_prev = riscv_pmu_ctr_get_fixed_counters_val(env,
ctr_idx, false);
if (ctr_idx > 2) {
if (riscv_cpu_mxl(env) == MXL_RV32) {
mhpmctr_val = mhpmctr_val |
@ -1058,12 +1103,13 @@ static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
uint64_t mhpmctr_val = counter->mhpmcounter_val;
uint64_t mhpmctrh_val = val;
bool instr = riscv_pmu_ctr_monitor_instructions(env, ctr_idx);
counter->mhpmcounterh_val = val;
mhpmctr_val = mhpmctr_val | (mhpmctrh_val << 32);
if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || instr) {
counter->mhpmcounterh_prev = get_ticks(true, instr);
if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
counter->mhpmcounterh_prev = riscv_pmu_ctr_get_fixed_counters_val(env,
ctr_idx, true);
if (ctr_idx > 2) {
riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
}
@ -1082,7 +1128,6 @@ static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
counter->mhpmcounter_prev;
target_ulong ctr_val = upper_half ? counter->mhpmcounterh_val :
counter->mhpmcounter_val;
bool instr = riscv_pmu_ctr_monitor_instructions(env, ctr_idx);
if (get_field(env->mcountinhibit, BIT(ctr_idx))) {
/*
@ -1103,8 +1148,10 @@ static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
* The kernel computes the perf delta by subtracting the current value from
* the value it initialized previously (ctr_val).
*/
if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || instr) {
*val = get_ticks(upper_half, instr) - ctr_prev + ctr_val;
if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
*val = riscv_pmu_ctr_get_fixed_counters_val(env, ctr_idx, upper_half) -
ctr_prev + ctr_val;
} else {
*val = ctr_val;
}