mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-31 14:02:05 -06:00
target/riscv: Make PMP region count configurable
Previously, the number of PMP regions was hardcoded to 16 in QEMU. This patch replaces the fixed value with a new `pmp_regions` field, allowing platforms to configure the number of PMP regions. If no specific value is provided, the default number of PMP regions remains 16 to preserve the existing behavior. A new CPU parameter num-pmp-regions has been introduced to the QEMU command line. For example: -cpu rv64, g=true, c=true, pmp=true, num-pmp-regions=8 Signed-off-by: Jay Chang <jay.chang@sifive.com> Reviewed-by: Frank Chang <frank.chang@sifive.com> Reviewed-by: Alistair Francis <alistair.francis@wdc.com> Message-ID: <20250606072525.17313-3-jay.chang@sifive.com> Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
This commit is contained in:
parent
b0175841fa
commit
cd633bea8b
6 changed files with 74 additions and 14 deletions
|
@ -1119,6 +1119,7 @@ static void riscv_cpu_init(Object *obj)
|
|||
cpu->cfg.cbom_blocksize = 64;
|
||||
cpu->cfg.cbop_blocksize = 64;
|
||||
cpu->cfg.cboz_blocksize = 64;
|
||||
cpu->cfg.pmp_regions = 16;
|
||||
cpu->env.vext_ver = VEXT_VERSION_1_00_0;
|
||||
cpu->cfg.max_satp_mode = -1;
|
||||
|
||||
|
@ -1563,6 +1564,46 @@ static const PropertyInfo prop_pmp = {
|
|||
.set = prop_pmp_set,
|
||||
};
|
||||
|
||||
static void prop_num_pmp_regions_set(Object *obj, Visitor *v, const char *name,
|
||||
void *opaque, Error **errp)
|
||||
{
|
||||
RISCVCPU *cpu = RISCV_CPU(obj);
|
||||
uint8_t value;
|
||||
|
||||
visit_type_uint8(v, name, &value, errp);
|
||||
|
||||
if (cpu->cfg.pmp_regions != value && riscv_cpu_is_vendor(obj)) {
|
||||
cpu_set_prop_err(cpu, name, errp);
|
||||
return;
|
||||
}
|
||||
|
||||
if (cpu->env.priv_ver < PRIV_VERSION_1_12_0 && value > OLD_MAX_RISCV_PMPS) {
|
||||
error_setg(errp, "Number of PMP regions exceeds maximum available");
|
||||
return;
|
||||
} else if (value > MAX_RISCV_PMPS) {
|
||||
error_setg(errp, "Number of PMP regions exceeds maximum available");
|
||||
return;
|
||||
}
|
||||
|
||||
cpu_option_add_user_setting(name, value);
|
||||
cpu->cfg.pmp_regions = value;
|
||||
}
|
||||
|
||||
static void prop_num_pmp_regions_get(Object *obj, Visitor *v, const char *name,
|
||||
void *opaque, Error **errp)
|
||||
{
|
||||
uint8_t value = RISCV_CPU(obj)->cfg.pmp_regions;
|
||||
|
||||
visit_type_uint8(v, name, &value, errp);
|
||||
}
|
||||
|
||||
static const PropertyInfo prop_num_pmp_regions = {
|
||||
.type = "uint8",
|
||||
.description = "num-pmp-regions",
|
||||
.get = prop_num_pmp_regions_get,
|
||||
.set = prop_num_pmp_regions_set,
|
||||
};
|
||||
|
||||
static int priv_spec_from_str(const char *priv_spec_str)
|
||||
{
|
||||
int priv_version = -1;
|
||||
|
@ -2562,6 +2603,7 @@ static const Property riscv_cpu_properties[] = {
|
|||
|
||||
{.name = "mmu", .info = &prop_mmu},
|
||||
{.name = "pmp", .info = &prop_pmp},
|
||||
{.name = "num-pmp-regions", .info = &prop_num_pmp_regions},
|
||||
|
||||
{.name = "priv_spec", .info = &prop_priv_spec},
|
||||
{.name = "vext_spec", .info = &prop_vext_spec},
|
||||
|
@ -2932,7 +2974,8 @@ static const TypeInfo riscv_cpu_type_infos[] = {
|
|||
.cfg.max_satp_mode = VM_1_10_MBARE,
|
||||
.cfg.ext_zifencei = true,
|
||||
.cfg.ext_zicsr = true,
|
||||
.cfg.pmp = true
|
||||
.cfg.pmp = true,
|
||||
.cfg.pmp_regions = 8
|
||||
),
|
||||
|
||||
DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_U, TYPE_RISCV_VENDOR_CPU,
|
||||
|
@ -2943,7 +2986,8 @@ static const TypeInfo riscv_cpu_type_infos[] = {
|
|||
.cfg.ext_zifencei = true,
|
||||
.cfg.ext_zicsr = true,
|
||||
.cfg.mmu = true,
|
||||
.cfg.pmp = true
|
||||
.cfg.pmp = true,
|
||||
.cfg.pmp_regions = 8
|
||||
),
|
||||
|
||||
#if defined(TARGET_RISCV32) || \
|
||||
|
|
|
@ -174,7 +174,8 @@ extern RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[];
|
|||
|
||||
#define MMU_USER_IDX 3
|
||||
|
||||
#define MAX_RISCV_PMPS (16)
|
||||
#define MAX_RISCV_PMPS (64)
|
||||
#define OLD_MAX_RISCV_PMPS (16)
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
#include "pmp.h"
|
||||
|
|
|
@ -163,6 +163,7 @@ TYPED_FIELD(uint16_t, elen, 0)
|
|||
TYPED_FIELD(uint16_t, cbom_blocksize, 0)
|
||||
TYPED_FIELD(uint16_t, cbop_blocksize, 0)
|
||||
TYPED_FIELD(uint16_t, cboz_blocksize, 0)
|
||||
TYPED_FIELD(uint8_t, pmp_regions, 0)
|
||||
|
||||
TYPED_FIELD(int8_t, max_satp_mode, -1)
|
||||
|
||||
|
|
|
@ -738,7 +738,10 @@ static RISCVException dbltrp_hmode(CPURISCVState *env, int csrno)
|
|||
static RISCVException pmp(CPURISCVState *env, int csrno)
|
||||
{
|
||||
if (riscv_cpu_cfg(env)->pmp) {
|
||||
if (csrno <= CSR_PMPCFG3) {
|
||||
int max_pmpcfg = (env->priv_ver >= PRIV_VERSION_1_12_0) ?
|
||||
+ CSR_PMPCFG15 : CSR_PMPCFG3;
|
||||
|
||||
if (csrno <= max_pmpcfg) {
|
||||
uint32_t reg_index = csrno - CSR_PMPCFG0;
|
||||
|
||||
/* TODO: RV128 restriction check */
|
||||
|
|
|
@ -36,8 +36,9 @@ static int pmp_post_load(void *opaque, int version_id)
|
|||
RISCVCPU *cpu = opaque;
|
||||
CPURISCVState *env = &cpu->env;
|
||||
int i;
|
||||
uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
|
||||
|
||||
for (i = 0; i < MAX_RISCV_PMPS; i++) {
|
||||
for (i = 0; i < pmp_regions; i++) {
|
||||
pmp_update_rule_addr(env, i);
|
||||
}
|
||||
pmp_update_rule_nums(env);
|
||||
|
|
|
@ -122,7 +122,9 @@ uint32_t pmp_get_num_rules(CPURISCVState *env)
|
|||
*/
|
||||
static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
|
||||
{
|
||||
if (pmp_index < MAX_RISCV_PMPS) {
|
||||
uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
|
||||
|
||||
if (pmp_index < pmp_regions) {
|
||||
return env->pmp_state.pmp[pmp_index].cfg_reg;
|
||||
}
|
||||
|
||||
|
@ -136,7 +138,9 @@ static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
|
|||
*/
|
||||
static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
|
||||
{
|
||||
if (pmp_index < MAX_RISCV_PMPS) {
|
||||
uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
|
||||
|
||||
if (pmp_index < pmp_regions) {
|
||||
if (env->pmp_state.pmp[pmp_index].cfg_reg == val) {
|
||||
/* no change */
|
||||
return false;
|
||||
|
@ -236,9 +240,10 @@ void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index)
|
|||
void pmp_update_rule_nums(CPURISCVState *env)
|
||||
{
|
||||
int i;
|
||||
uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
|
||||
|
||||
env->pmp_state.num_rules = 0;
|
||||
for (i = 0; i < MAX_RISCV_PMPS; i++) {
|
||||
for (i = 0; i < pmp_regions; i++) {
|
||||
const uint8_t a_field =
|
||||
pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
|
||||
if (PMP_AMATCH_OFF != a_field) {
|
||||
|
@ -332,6 +337,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
|
|||
int pmp_size = 0;
|
||||
hwaddr s = 0;
|
||||
hwaddr e = 0;
|
||||
uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
|
||||
|
||||
/* Short cut if no rules */
|
||||
if (0 == pmp_get_num_rules(env)) {
|
||||
|
@ -356,7 +362,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
|
|||
* 1.10 draft priv spec states there is an implicit order
|
||||
* from low to high
|
||||
*/
|
||||
for (i = 0; i < MAX_RISCV_PMPS; i++) {
|
||||
for (i = 0; i < pmp_regions; i++) {
|
||||
s = pmp_is_in_range(env, i, addr);
|
||||
e = pmp_is_in_range(env, i, addr + pmp_size - 1);
|
||||
|
||||
|
@ -527,8 +533,9 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
|
|||
{
|
||||
trace_pmpaddr_csr_write(env->mhartid, addr_index, val);
|
||||
bool is_next_cfg_tor = false;
|
||||
uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
|
||||
|
||||
if (addr_index < MAX_RISCV_PMPS) {
|
||||
if (addr_index < pmp_regions) {
|
||||
if (env->pmp_state.pmp[addr_index].addr_reg == val) {
|
||||
/* no change */
|
||||
return;
|
||||
|
@ -538,7 +545,7 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
|
|||
* In TOR mode, need to check the lock bit of the next pmp
|
||||
* (if there is a next).
|
||||
*/
|
||||
if (addr_index + 1 < MAX_RISCV_PMPS) {
|
||||
if (addr_index + 1 < pmp_regions) {
|
||||
uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg;
|
||||
is_next_cfg_tor = PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg);
|
||||
|
||||
|
@ -573,8 +580,9 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
|
|||
target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index)
|
||||
{
|
||||
target_ulong val = 0;
|
||||
uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
|
||||
|
||||
if (addr_index < MAX_RISCV_PMPS) {
|
||||
if (addr_index < pmp_regions) {
|
||||
val = env->pmp_state.pmp[addr_index].addr_reg;
|
||||
trace_pmpaddr_csr_read(env->mhartid, addr_index, val);
|
||||
} else {
|
||||
|
@ -592,6 +600,7 @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
|
|||
{
|
||||
int i;
|
||||
uint64_t mask = MSECCFG_MMWP | MSECCFG_MML;
|
||||
uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
|
||||
/* Update PMM field only if the value is valid according to Zjpm v1.0 */
|
||||
if (riscv_cpu_cfg(env)->ext_smmpm &&
|
||||
riscv_cpu_mxl(env) == MXL_RV64 &&
|
||||
|
@ -603,7 +612,7 @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
|
|||
|
||||
/* RLB cannot be enabled if it's already 0 and if any regions are locked */
|
||||
if (!MSECCFG_RLB_ISSET(env)) {
|
||||
for (i = 0; i < MAX_RISCV_PMPS; i++) {
|
||||
for (i = 0; i < pmp_regions; i++) {
|
||||
if (pmp_is_locked(env, i)) {
|
||||
val &= ~MSECCFG_RLB;
|
||||
break;
|
||||
|
@ -659,6 +668,7 @@ target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr)
|
|||
hwaddr tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1);
|
||||
hwaddr tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1;
|
||||
int i;
|
||||
uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
|
||||
|
||||
/*
|
||||
* If PMP is not supported or there are no PMP rules, the TLB page will not
|
||||
|
@ -669,7 +679,7 @@ target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr)
|
|||
return TARGET_PAGE_SIZE;
|
||||
}
|
||||
|
||||
for (i = 0; i < MAX_RISCV_PMPS; i++) {
|
||||
for (i = 0; i < pmp_regions; i++) {
|
||||
if (pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg) == PMP_AMATCH_OFF) {
|
||||
continue;
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue