hw/riscv/riscv-iommu: add riscv_iommu_hpm_incr_ctr()

This function will increment a specific counter, generating an interrupt
when an overflow occurs.

Some extra changes in riscv-iommu.c  were required to add this new
helper in riscv-iommu-hpm.c:

- RISCVIOMMUContext was moved to riscv-iommu.h, making it visible in
  riscv-iommu-hpm.c;

- riscv_iommu_notify() is now public.

No behavior change is made since HPM support is not being advertised
yet.

Signed-off-by: Tomasz Jeznach <tjeznach@rivosinc.com>
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Acked-by: Alistair Francis <alistair.francis@wdc.com>
Message-ID: <20250224190826.1858473-5-dbarboza@ventanamicro.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
This commit is contained in:
Tomasz Jeznach 2025-02-24 16:08:19 -03:00 committed by Alistair Francis
parent 4faea7e084
commit 11ecf24c7e
4 changed files with 162 additions and 15 deletions

View file

@ -52,3 +52,117 @@ uint64_t riscv_iommu_hpmcycle_read(RISCVIOMMUState *s)
return (ctr_val + get_cycles() - ctr_prev) |
(cycle & RISCV_IOMMU_IOHPMCYCLES_OVF);
}
static void hpm_incr_ctr(RISCVIOMMUState *s, uint32_t ctr_idx)
{
const uint32_t off = ctr_idx << 3;
uint64_t cntr_val;
cntr_val = ldq_le_p(&s->regs_rw[RISCV_IOMMU_REG_IOHPMCTR_BASE + off]);
stq_le_p(&s->regs_rw[RISCV_IOMMU_REG_IOHPMCTR_BASE + off], cntr_val + 1);
/* Handle the overflow scenario. */
if (cntr_val == UINT64_MAX) {
/*
* Generate interrupt only if OF bit is clear. +1 to offset the cycle
* register OF bit.
*/
const uint32_t ovf =
riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IOCOUNTOVF,
BIT(ctr_idx + 1), 0);
if (!get_field(ovf, BIT(ctr_idx + 1))) {
riscv_iommu_reg_mod64(s,
RISCV_IOMMU_REG_IOHPMEVT_BASE + off,
RISCV_IOMMU_IOHPMEVT_OF,
0);
riscv_iommu_notify(s, RISCV_IOMMU_INTR_PM);
}
}
}
void riscv_iommu_hpm_incr_ctr(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
unsigned event_id)
{
const uint32_t inhibit = riscv_iommu_reg_get32(
s, RISCV_IOMMU_REG_IOCOUNTINH);
uint32_t did_gscid;
uint32_t pid_pscid;
uint32_t ctr_idx;
gpointer value;
uint32_t ctrs;
uint64_t evt;
if (!(s->cap & RISCV_IOMMU_CAP_HPM)) {
return;
}
value = g_hash_table_lookup(s->hpm_event_ctr_map,
GUINT_TO_POINTER(event_id));
if (value == NULL) {
return;
}
for (ctrs = GPOINTER_TO_UINT(value); ctrs != 0; ctrs &= ctrs - 1) {
ctr_idx = ctz32(ctrs);
if (get_field(inhibit, BIT(ctr_idx + 1))) {
continue;
}
evt = riscv_iommu_reg_get64(s,
RISCV_IOMMU_REG_IOHPMEVT_BASE + (ctr_idx << 3));
/*
* It's quite possible that event ID has been changed in counter
* but hashtable hasn't been updated yet. We don't want to increment
* counter for the old event ID.
*/
if (event_id != get_field(evt, RISCV_IOMMU_IOHPMEVT_EVENT_ID)) {
continue;
}
if (get_field(evt, RISCV_IOMMU_IOHPMEVT_IDT)) {
did_gscid = get_field(ctx->gatp, RISCV_IOMMU_DC_IOHGATP_GSCID);
pid_pscid = get_field(ctx->ta, RISCV_IOMMU_DC_TA_PSCID);
} else {
did_gscid = ctx->devid;
pid_pscid = ctx->process_id;
}
if (get_field(evt, RISCV_IOMMU_IOHPMEVT_PV_PSCV)) {
/*
* If the transaction does not have a valid process_id, counter
* increments if device_id matches DID_GSCID. If the transaction
* has a valid process_id, counter increments if device_id
* matches DID_GSCID and process_id matches PID_PSCID. See
* IOMMU Specification, Chapter 5.23. Performance-monitoring
* event selector.
*/
if (ctx->process_id &&
get_field(evt, RISCV_IOMMU_IOHPMEVT_PID_PSCID) != pid_pscid) {
continue;
}
}
if (get_field(evt, RISCV_IOMMU_IOHPMEVT_DV_GSCV)) {
uint32_t mask = ~0;
if (get_field(evt, RISCV_IOMMU_IOHPMEVT_DMASK)) {
/*
* 1001 1011 mask = GSCID
* 0000 0111 mask = mask ^ (mask + 1)
* 1111 1000 mask = ~mask;
*/
mask = get_field(evt, RISCV_IOMMU_IOHPMEVT_DID_GSCID);
mask = mask ^ (mask + 1);
mask = ~mask;
}
if ((get_field(evt, RISCV_IOMMU_IOHPMEVT_DID_GSCID) & mask) !=
(did_gscid & mask)) {
continue;
}
}
hpm_incr_ctr(s, ctr_idx);
}
}

View file

@ -23,5 +23,7 @@
#include "hw/riscv/riscv-iommu.h"
uint64_t riscv_iommu_hpmcycle_read(RISCVIOMMUState *s);
void riscv_iommu_hpm_incr_ctr(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
unsigned event_id);
#endif

View file

@ -39,7 +39,6 @@
#define PPN_PHYS(ppn) ((ppn) << TARGET_PAGE_BITS)
#define PPN_DOWN(phy) ((phy) >> TARGET_PAGE_BITS)
typedef struct RISCVIOMMUContext RISCVIOMMUContext;
typedef struct RISCVIOMMUEntry RISCVIOMMUEntry;
/* Device assigned I/O address space */
@ -52,19 +51,6 @@ struct RISCVIOMMUSpace {
QLIST_ENTRY(RISCVIOMMUSpace) list;
};
/* Device translation context state. */
struct RISCVIOMMUContext {
uint64_t devid:24; /* Requester Id, AKA device_id */
uint64_t process_id:20; /* Process ID. PASID for PCIe */
uint64_t tc; /* Translation Control */
uint64_t ta; /* Translation Attributes */
uint64_t satp; /* S-Stage address translation and protection */
uint64_t gatp; /* G-Stage address translation and protection */
uint64_t msi_addr_mask; /* MSI filtering - address mask */
uint64_t msi_addr_pattern; /* MSI filtering - address pattern */
uint64_t msiptp; /* MSI redirection page table pointer */
};
typedef enum RISCVIOMMUTransTag {
RISCV_IOMMU_TRANS_TAG_BY, /* Bypass */
RISCV_IOMMU_TRANS_TAG_SS, /* Single Stage */
@ -101,7 +87,7 @@ static uint8_t riscv_iommu_get_icvec_vector(uint32_t icvec, uint32_t vec_type)
}
}
static void riscv_iommu_notify(RISCVIOMMUState *s, int vec_type)
void riscv_iommu_notify(RISCVIOMMUState *s, int vec_type)
{
uint32_t ipsr, icvec, vector;
@ -423,6 +409,13 @@ static int riscv_iommu_spa_fetch(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
}
}
if (pass == S_STAGE) {
riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_S_VS_WALKS);
} else {
riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_G_WALKS);
}
/* Read page table entry */
if (sc[pass].ptesize == 4) {
uint32_t pte32 = 0;
@ -941,6 +934,7 @@ static int riscv_iommu_ctx_fetch(RISCVIOMMUState *s, RISCVIOMMUContext *ctx)
/* Device directory tree walk */
for (; depth-- > 0; ) {
riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_DD_WALK);
/*
* Select device id index bits based on device directory tree level
* and device context format.
@ -968,6 +962,8 @@ static int riscv_iommu_ctx_fetch(RISCVIOMMUState *s, RISCVIOMMUContext *ctx)
addr = PPN_PHYS(get_field(de, RISCV_IOMMU_DDTE_PPN));
}
riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_DD_WALK);
/* index into device context entry page */
addr |= (ctx->devid * dc_len) & ~TARGET_PAGE_MASK;
@ -1033,6 +1029,8 @@ static int riscv_iommu_ctx_fetch(RISCVIOMMUState *s, RISCVIOMMUContext *ctx)
}
for (depth = mode - RISCV_IOMMU_DC_FSC_PDTP_MODE_PD8; depth-- > 0; ) {
riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_PD_WALK);
/*
* Select process id index bits based on process directory tree
* level. See IOMMU Specification, 2.2. Process-Directory-Table.
@ -1050,6 +1048,8 @@ static int riscv_iommu_ctx_fetch(RISCVIOMMUState *s, RISCVIOMMUContext *ctx)
addr = PPN_PHYS(get_field(de, RISCV_IOMMU_PC_FSC_PPN));
}
riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_PD_WALK);
/* Leaf entry in PDT */
addr |= (ctx->process_id << 4) & ~TARGET_PAGE_MASK;
if (dma_memory_read(s->target_as, addr, &dc.ta, sizeof(uint64_t) * 2,
@ -1419,6 +1419,8 @@ static int riscv_iommu_translate(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
GHashTable *iot_cache;
int fault;
riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_URQ);
iot_cache = g_hash_table_ref(s->iot_cache);
/*
* TC[32] is reserved for custom extensions, used here to temporarily
@ -1429,6 +1431,7 @@ static int riscv_iommu_translate(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
/* Check for ATS request. */
if (iotlb->perm == IOMMU_NONE) {
riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_ATS_RQ);
/* Check if ATS is disabled. */
if (!(ctx->tc & RISCV_IOMMU_DC_TC_EN_ATS)) {
enable_pri = false;
@ -1447,6 +1450,8 @@ static int riscv_iommu_translate(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
goto done;
}
riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_TLB_MISS);
/* Translate using device directory / page table information. */
fault = riscv_iommu_spa_fetch(s, ctx, iotlb);
@ -2375,6 +2380,10 @@ static void riscv_iommu_realize(DeviceState *dev, Error **errp)
memory_region_init_io(&s->trap_mr, OBJECT(dev), &riscv_iommu_trap_ops, s,
"riscv-iommu-trap", ~0ULL);
address_space_init(&s->trap_as, &s->trap_mr, "riscv-iommu-trap-as");
if (s->cap & RISCV_IOMMU_CAP_HPM) {
s->hpm_event_ctr_map = g_hash_table_new(g_direct_hash, g_direct_equal);
}
}
static void riscv_iommu_unrealize(DeviceState *dev)
@ -2383,6 +2392,10 @@ static void riscv_iommu_unrealize(DeviceState *dev)
g_hash_table_unref(s->iot_cache);
g_hash_table_unref(s->ctx_cache);
if (s->cap & RISCV_IOMMU_CAP_HPM) {
g_hash_table_unref(s->hpm_event_ctr_map);
}
}
void riscv_iommu_reset(RISCVIOMMUState *s)

View file

@ -85,12 +85,30 @@ struct RISCVIOMMUState {
/* HPM cycle counter */
uint64_t hpmcycle_val; /* Current value of cycle register */
uint64_t hpmcycle_prev; /* Saved value of QEMU_CLOCK_VIRTUAL clock */
/* HPM event counters */
GHashTable *hpm_event_ctr_map; /* Mapping of events to counters */
};
void riscv_iommu_pci_setup_iommu(RISCVIOMMUState *iommu, PCIBus *bus,
Error **errp);
void riscv_iommu_set_cap_igs(RISCVIOMMUState *s, riscv_iommu_igs_mode mode);
void riscv_iommu_reset(RISCVIOMMUState *s);
void riscv_iommu_notify(RISCVIOMMUState *s, int vec_type);
typedef struct RISCVIOMMUContext RISCVIOMMUContext;
/* Device translation context state. */
struct RISCVIOMMUContext {
uint64_t devid:24; /* Requester Id, AKA device_id */
uint64_t process_id:20; /* Process ID. PASID for PCIe */
uint64_t tc; /* Translation Control */
uint64_t ta; /* Translation Attributes */
uint64_t satp; /* S-Stage address translation and protection */
uint64_t gatp; /* G-Stage address translation and protection */
uint64_t msi_addr_mask; /* MSI filtering - address mask */
uint64_t msi_addr_pattern; /* MSI filtering - address pattern */
uint64_t msiptp; /* MSI redirection page table pointer */
};
/* private helpers */