qemu/hw/riscv/riscv-iommu-hpm.c
Tomasz Jeznach 2cf2a6c027 hw/riscv/riscv-iommu: add IOCOUNTINH mmio writes
RISCV_IOMMU_REG_IOCOUNTINH is done by riscv_iommu_process_iocntinh_cy(),
which is called during riscv_iommu_mmio_write() callback via a new
riscv_iommu_pricess_hpm_writes() helper.

Signed-off-by: Tomasz Jeznach <tjeznach@rivosinc.com>
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Acked-by: Alistair Francis <alistair.francis@wdc.com>
Message-ID: <20250224190826.1858473-7-dbarboza@ventanamicro.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2025-03-04 15:42:54 +10:00

264 lines
8.7 KiB
C

/*
* RISC-V IOMMU - Hardware Performance Monitor (HPM) helpers
*
* Copyright (C) 2022-2023 Rivos Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2 or later, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "qemu/timer.h"
#include "cpu_bits.h"
#include "riscv-iommu-hpm.h"
#include "riscv-iommu.h"
#include "riscv-iommu-bits.h"
#include "trace.h"
/* For now we assume IOMMU HPM frequency to be 1GHz so 1-cycle is of 1-ns. */
static inline uint64_t get_cycles(void)
{
return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
}
uint64_t riscv_iommu_hpmcycle_read(RISCVIOMMUState *s)
{
const uint64_t cycle = riscv_iommu_reg_get64(
s, RISCV_IOMMU_REG_IOHPMCYCLES);
const uint32_t inhibit = riscv_iommu_reg_get32(
s, RISCV_IOMMU_REG_IOCOUNTINH);
const uint64_t ctr_prev = s->hpmcycle_prev;
const uint64_t ctr_val = s->hpmcycle_val;
if (get_field(inhibit, RISCV_IOMMU_IOCOUNTINH_CY)) {
/*
* Counter should not increment if inhibit bit is set. We can't really
* stop the QEMU_CLOCK_VIRTUAL, so we just return the last updated
* counter value to indicate that counter was not incremented.
*/
return (ctr_val & RISCV_IOMMU_IOHPMCYCLES_COUNTER) |
(cycle & RISCV_IOMMU_IOHPMCYCLES_OVF);
}
return (ctr_val + get_cycles() - ctr_prev) |
(cycle & RISCV_IOMMU_IOHPMCYCLES_OVF);
}
static void hpm_incr_ctr(RISCVIOMMUState *s, uint32_t ctr_idx)
{
const uint32_t off = ctr_idx << 3;
uint64_t cntr_val;
cntr_val = ldq_le_p(&s->regs_rw[RISCV_IOMMU_REG_IOHPMCTR_BASE + off]);
stq_le_p(&s->regs_rw[RISCV_IOMMU_REG_IOHPMCTR_BASE + off], cntr_val + 1);
/* Handle the overflow scenario. */
if (cntr_val == UINT64_MAX) {
/*
* Generate interrupt only if OF bit is clear. +1 to offset the cycle
* register OF bit.
*/
const uint32_t ovf =
riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IOCOUNTOVF,
BIT(ctr_idx + 1), 0);
if (!get_field(ovf, BIT(ctr_idx + 1))) {
riscv_iommu_reg_mod64(s,
RISCV_IOMMU_REG_IOHPMEVT_BASE + off,
RISCV_IOMMU_IOHPMEVT_OF,
0);
riscv_iommu_notify(s, RISCV_IOMMU_INTR_PM);
}
}
}
void riscv_iommu_hpm_incr_ctr(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
unsigned event_id)
{
const uint32_t inhibit = riscv_iommu_reg_get32(
s, RISCV_IOMMU_REG_IOCOUNTINH);
uint32_t did_gscid;
uint32_t pid_pscid;
uint32_t ctr_idx;
gpointer value;
uint32_t ctrs;
uint64_t evt;
if (!(s->cap & RISCV_IOMMU_CAP_HPM)) {
return;
}
value = g_hash_table_lookup(s->hpm_event_ctr_map,
GUINT_TO_POINTER(event_id));
if (value == NULL) {
return;
}
for (ctrs = GPOINTER_TO_UINT(value); ctrs != 0; ctrs &= ctrs - 1) {
ctr_idx = ctz32(ctrs);
if (get_field(inhibit, BIT(ctr_idx + 1))) {
continue;
}
evt = riscv_iommu_reg_get64(s,
RISCV_IOMMU_REG_IOHPMEVT_BASE + (ctr_idx << 3));
/*
* It's quite possible that event ID has been changed in counter
* but hashtable hasn't been updated yet. We don't want to increment
* counter for the old event ID.
*/
if (event_id != get_field(evt, RISCV_IOMMU_IOHPMEVT_EVENT_ID)) {
continue;
}
if (get_field(evt, RISCV_IOMMU_IOHPMEVT_IDT)) {
did_gscid = get_field(ctx->gatp, RISCV_IOMMU_DC_IOHGATP_GSCID);
pid_pscid = get_field(ctx->ta, RISCV_IOMMU_DC_TA_PSCID);
} else {
did_gscid = ctx->devid;
pid_pscid = ctx->process_id;
}
if (get_field(evt, RISCV_IOMMU_IOHPMEVT_PV_PSCV)) {
/*
* If the transaction does not have a valid process_id, counter
* increments if device_id matches DID_GSCID. If the transaction
* has a valid process_id, counter increments if device_id
* matches DID_GSCID and process_id matches PID_PSCID. See
* IOMMU Specification, Chapter 5.23. Performance-monitoring
* event selector.
*/
if (ctx->process_id &&
get_field(evt, RISCV_IOMMU_IOHPMEVT_PID_PSCID) != pid_pscid) {
continue;
}
}
if (get_field(evt, RISCV_IOMMU_IOHPMEVT_DV_GSCV)) {
uint32_t mask = ~0;
if (get_field(evt, RISCV_IOMMU_IOHPMEVT_DMASK)) {
/*
* 1001 1011 mask = GSCID
* 0000 0111 mask = mask ^ (mask + 1)
* 1111 1000 mask = ~mask;
*/
mask = get_field(evt, RISCV_IOMMU_IOHPMEVT_DID_GSCID);
mask = mask ^ (mask + 1);
mask = ~mask;
}
if ((get_field(evt, RISCV_IOMMU_IOHPMEVT_DID_GSCID) & mask) !=
(did_gscid & mask)) {
continue;
}
}
hpm_incr_ctr(s, ctr_idx);
}
}
/* Timer callback for cycle counter overflow. */
void riscv_iommu_hpm_timer_cb(void *priv)
{
RISCVIOMMUState *s = priv;
const uint32_t inhibit = riscv_iommu_reg_get32(
s, RISCV_IOMMU_REG_IOCOUNTINH);
uint32_t ovf;
if (get_field(inhibit, RISCV_IOMMU_IOCOUNTINH_CY)) {
return;
}
if (s->irq_overflow_left > 0) {
uint64_t irq_trigger_at =
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + s->irq_overflow_left;
timer_mod_anticipate_ns(s->hpm_timer, irq_trigger_at);
s->irq_overflow_left = 0;
return;
}
ovf = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTOVF);
if (!get_field(ovf, RISCV_IOMMU_IOCOUNTOVF_CY)) {
/*
* We don't need to set hpmcycle_val to zero and update hpmcycle_prev to
* current clock value. The way we calculate iohpmcycs will overflow
* and return the correct value. This avoids the need to synchronize
* timer callback and write callback.
*/
riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IOCOUNTOVF,
RISCV_IOMMU_IOCOUNTOVF_CY, 0);
riscv_iommu_reg_mod64(s, RISCV_IOMMU_REG_IOHPMCYCLES,
RISCV_IOMMU_IOHPMCYCLES_OVF, 0);
riscv_iommu_notify(s, RISCV_IOMMU_INTR_PM);
}
}
static void hpm_setup_timer(RISCVIOMMUState *s, uint64_t value)
{
const uint32_t inhibit = riscv_iommu_reg_get32(
s, RISCV_IOMMU_REG_IOCOUNTINH);
uint64_t overflow_at, overflow_ns;
if (get_field(inhibit, RISCV_IOMMU_IOCOUNTINH_CY)) {
return;
}
/*
* We are using INT64_MAX here instead to UINT64_MAX because cycle counter
* has 63-bit precision and INT64_MAX is the maximum it can store.
*/
if (value) {
overflow_ns = INT64_MAX - value + 1;
} else {
overflow_ns = INT64_MAX;
}
overflow_at = (uint64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + overflow_ns;
if (overflow_at > INT64_MAX) {
s->irq_overflow_left = overflow_at - INT64_MAX;
overflow_at = INT64_MAX;
}
timer_mod_anticipate_ns(s->hpm_timer, overflow_at);
}
/* Updates the internal cycle counter state when iocntinh:CY is changed. */
void riscv_iommu_process_iocntinh_cy(RISCVIOMMUState *s, bool prev_cy_inh)
{
const uint32_t inhibit = riscv_iommu_reg_get32(
s, RISCV_IOMMU_REG_IOCOUNTINH);
/* We only need to process CY bit toggle. */
if (!(inhibit ^ prev_cy_inh)) {
return;
}
if (!(inhibit & RISCV_IOMMU_IOCOUNTINH_CY)) {
/*
* Cycle counter is enabled. Just start the timer again and update
* the clock snapshot value to point to the current time to make
* sure iohpmcycles read is correct.
*/
s->hpmcycle_prev = get_cycles();
hpm_setup_timer(s, s->hpmcycle_val);
} else {
/*
* Cycle counter is disabled. Stop the timer and update the cycle
* counter to record the current value which is last programmed
* value + the cycles passed so far.
*/
s->hpmcycle_val = s->hpmcycle_val + (get_cycles() - s->hpmcycle_prev);
timer_del(s->hpm_timer);
}
}