Second RISC-V PR for 10.0

* Reduce the overhead for simple RISC-V vector unit-stride loads and stores
 * Add V bit to GDB priv reg
 * Add 'sha' support
 * Add traces for exceptions in user mode
 * Update Pointer Masking to Zjpm v1.0
 * Add Smrnmi support
 * Fix timebase-frequency when using KVM acceleration
 * Add RISC-V Counter delegation ISA extension support
 * Add support for Smdbltrp and Ssdbltrp extensions
 * Introduce a translation tag for the IOMMU page table cache
 * Support Supm and Sspm as part of Zjpm v1.0
 * Convert htif debug prints to trace event
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEaukCtqfKh31tZZKWr3yVEwxTgBMFAmeMUUwACgkQr3yVEwxT
 gBNgDQ/+JeqcsbJRX+PZQJEV06tDIJpk+mfaBHUYSGdNkjI9fzowNaxFIEB2vaLt
 4+xAGMnJ4vMcjJyBcPOn1FKAlowM7MsUNITOF9Rstnyriqnj2UsUZ9YBtkuG6gWH
 ZHoYEKu7mAZoZw5RRx4TatHDXw7TYfUsrDPrn+x6yeCZTq9ruRTlHkzp2LC725Vq
 KTnbWAP7WlqiJaSxB5eIFYT5tYP1Blp0yD358B037C57EU9j5zm2FQdFmVK1+xRF
 dFg/urBIzfAjjkCS/t9DmH+S6NgMEut6udUhllk/KUJAzWvsggc4wZZlWjFOJFJY
 fIxx3alhY3pcm1PYjFpf15Poz6Pqva/KGjwgZafirKQtPbRSzfRkUwcHOYRTQT9j
 abeiB44XPaeIl8Jvw7GLxcWtlJ5NmBrZho+2Z9mIhB/Ix5H3PDgs18Oc/s73P2qQ
 JFLRb7cpYy1HbRc0ugvwAmOTY1t6HX8HAtT+3rNhiXpXnj4RW2C/WU1cEqrg8QkM
 cTPiy2zHoBhAWt9aDK1Kvbhb1vur3JaF7rk9jeKlriFr87Ly+yPU+8mnEDw40NMR
 Tc9nivqmOqqXS5AM9O/W1uzTWzpxIUy7XBy3cuSk0uZCoge4IE2Or7P2Rb2uyaNZ
 RkAo/PL2N1cMjP7gB3kLRtYY7FA+nal66KhfbHPRHqj+ZwUAxzs=
 =F3IG
 -----END PGP SIGNATURE-----

Merge tag 'pull-riscv-to-apply-20250119-1' of https://github.com/alistair23/qemu into staging

Second RISC-V PR for 10.0

* Reduce the overhead for simple RISC-V vector unit-stride loads and stores
* Add V bit to GDB priv reg
* Add 'sha' support
* Add traces for exceptions in user mode
* Update Pointer Masking to Zjpm v1.0
* Add Smrnmi support
* Fix timebase-frequency when using KVM acceleration
* Add RISC-V Counter delegation ISA extension support
* Add support for Smdbltrp and Ssdbltrp extensions
* Introduce a translation tag for the IOMMU page table cache
* Support Supm and Sspm as part of Zjpm v1.0
* Convert htif debug prints to trace event

# -----BEGIN PGP SIGNATURE-----
#
# iQIzBAABCAAdFiEEaukCtqfKh31tZZKWr3yVEwxTgBMFAmeMUUwACgkQr3yVEwxT
# gBNgDQ/+JeqcsbJRX+PZQJEV06tDIJpk+mfaBHUYSGdNkjI9fzowNaxFIEB2vaLt
# 4+xAGMnJ4vMcjJyBcPOn1FKAlowM7MsUNITOF9Rstnyriqnj2UsUZ9YBtkuG6gWH
# ZHoYEKu7mAZoZw5RRx4TatHDXw7TYfUsrDPrn+x6yeCZTq9ruRTlHkzp2LC725Vq
# KTnbWAP7WlqiJaSxB5eIFYT5tYP1Blp0yD358B037C57EU9j5zm2FQdFmVK1+xRF
# dFg/urBIzfAjjkCS/t9DmH+S6NgMEut6udUhllk/KUJAzWvsggc4wZZlWjFOJFJY
# fIxx3alhY3pcm1PYjFpf15Poz6Pqva/KGjwgZafirKQtPbRSzfRkUwcHOYRTQT9j
# abeiB44XPaeIl8Jvw7GLxcWtlJ5NmBrZho+2Z9mIhB/Ix5H3PDgs18Oc/s73P2qQ
# JFLRb7cpYy1HbRc0ugvwAmOTY1t6HX8HAtT+3rNhiXpXnj4RW2C/WU1cEqrg8QkM
# cTPiy2zHoBhAWt9aDK1Kvbhb1vur3JaF7rk9jeKlriFr87Ly+yPU+8mnEDw40NMR
# Tc9nivqmOqqXS5AM9O/W1uzTWzpxIUy7XBy3cuSk0uZCoge4IE2Or7P2Rb2uyaNZ
# RkAo/PL2N1cMjP7gB3kLRtYY7FA+nal66KhfbHPRHqj+ZwUAxzs=
# =F3IG
# -----END PGP SIGNATURE-----
# gpg: Signature made Sat 18 Jan 2025 20:11:40 EST
# gpg:                using RSA key 6AE902B6A7CA877D6D659296AF7C95130C538013
# gpg: Good signature from "Alistair Francis <alistair@alistair23.me>" [unknown]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 6AE9 02B6 A7CA 877D 6D65  9296 AF7C 9513 0C53 8013

* tag 'pull-riscv-to-apply-20250119-1' of https://github.com/alistair23/qemu: (50 commits)
  hw/char/riscv_htif: Convert HTIF_DEBUG() to trace events
  target/riscv: Support Supm and Sspm as part of Zjpm v1.0
  hw/riscv/riscv-iommu.c: Introduce a translation tag for the page table cache
  target/riscv: Add Smdbltrp ISA extension enable switch
  target/riscv: Implement Smdbltrp behavior
  target/riscv: Implement Smdbltrp sret, mret and mnret behavior
  target/riscv: Add Smdbltrp CSRs handling
  target/riscv: Add Ssdbltrp ISA extension enable switch
  target/riscv: Implement Ssdbltrp exception handling
  target/riscv: Implement Ssdbltrp sret, mret and mnret behavior
  target/riscv: Add Ssdbltrp CSRs handling
  target/riscv: Fix henvcfg potentially containing stale bits
  target/riscv: Add configuration for S[m|s]csrind, Smcdeleg/Ssccfg
  target/riscv: Add implied rule for counter delegation extensions
  target/riscv: Invoke pmu init after feature enable
  target/riscv: Add counter delegation/configuration support
  target/riscv: Add select value range check for counter delegation
  target/riscv: Add counter delegation definitions
  target/riscv: Add properties for counter delegation ISA extensions
  target/riscv: Support generic CSR indirect access
  ...

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2025-01-19 08:55:46 -05:00
commit d6430c17d7
28 changed files with 1826 additions and 684 deletions

View file

@ -32,14 +32,7 @@
#include "exec/tswap.h"
#include "system/dma.h"
#include "system/runstate.h"
#define RISCV_DEBUG_HTIF 0
#define HTIF_DEBUG(fmt, ...) \
do { \
if (RISCV_DEBUG_HTIF) { \
qemu_log_mask(LOG_TRACE, "%s: " fmt "\n", __func__, ##__VA_ARGS__);\
} \
} while (0)
#include "trace.h"
#define HTIF_DEV_SHIFT 56
#define HTIF_CMD_SHIFT 48
@ -159,8 +152,7 @@ static void htif_handle_tohost_write(HTIFState *s, uint64_t val_written)
uint64_t payload = val_written & 0xFFFFFFFFFFFFULL;
int resp = 0;
HTIF_DEBUG("mtohost write: device: %d cmd: %d what: %02" PRIx64
" -payload: %016" PRIx64 "\n", device, cmd, payload & 0xFF, payload);
trace_htif_uart_write_to_host(device, cmd, payload);
/*
* Currently, there is a fixed mapping of devices:
@ -251,8 +243,7 @@ static void htif_handle_tohost_write(HTIFState *s, uint64_t val_written)
}
} else {
qemu_log("HTIF unknown device or command\n");
HTIF_DEBUG("device: %d cmd: %d what: %02" PRIx64
" payload: %016" PRIx64, device, cmd, payload & 0xFF, payload);
trace_htif_uart_unknown_device_command(device, cmd, payload);
}
/*
* Latest bbl does not set fromhost to 0 if there is a value in tohost.

View file

@ -136,3 +136,7 @@ stm32f2xx_usart_read(char *id, unsigned size, uint64_t ofs, uint64_t val) " %s s
stm32f2xx_usart_write(char *id, unsigned size, uint64_t ofs, uint64_t val) "%s size %d ofs 0x%02" PRIx64 " <- 0x%02" PRIx64
stm32f2xx_usart_drop(char *id) " %s dropping the chars"
stm32f2xx_usart_receive(char *id, uint8_t chr) " %s receiving '%c'"
# riscv_htif.c
htif_uart_write_to_host(uint8_t device, uint8_t cmd, uint64_t payload) "device: %u cmd: %02u payload: %016" PRIx64
htif_uart_unknown_device_command(uint8_t device, uint8_t cmd, uint64_t payload) "device: %u cmd: %02u payload: %016" PRIx64

View file

@ -64,8 +64,16 @@ struct RISCVIOMMUContext {
uint64_t msiptp; /* MSI redirection page table pointer */
};
typedef enum RISCVIOMMUTransTag {
RISCV_IOMMU_TRANS_TAG_BY, /* Bypass */
RISCV_IOMMU_TRANS_TAG_SS, /* Single Stage */
RISCV_IOMMU_TRANS_TAG_VG, /* G-stage only */
RISCV_IOMMU_TRANS_TAG_VN, /* Nested translation */
} RISCVIOMMUTransTag;
/* Address translation cache entry */
struct RISCVIOMMUEntry {
RISCVIOMMUTransTag tag; /* Translation Tag */
uint64_t iova:44; /* IOVA Page Number */
uint64_t pscid:20; /* Process Soft-Context identifier */
uint64_t phys:44; /* Physical Page Number */
@ -1227,7 +1235,7 @@ static gboolean riscv_iommu_iot_equal(gconstpointer v1, gconstpointer v2)
RISCVIOMMUEntry *t1 = (RISCVIOMMUEntry *) v1;
RISCVIOMMUEntry *t2 = (RISCVIOMMUEntry *) v2;
return t1->gscid == t2->gscid && t1->pscid == t2->pscid &&
t1->iova == t2->iova;
t1->iova == t2->iova && t1->tag == t2->tag;
}
static guint riscv_iommu_iot_hash(gconstpointer v)
@ -1236,67 +1244,115 @@ static guint riscv_iommu_iot_hash(gconstpointer v)
return (guint)t->iova;
}
/* GV: 1 PSCV: 1 AV: 1 */
/* GV: 0 AV: 0 PSCV: 0 GVMA: 0 */
/* GV: 0 AV: 0 GVMA: 1 */
static
void riscv_iommu_iot_inval_all(gpointer key, gpointer value, gpointer data)
{
RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value;
RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data;
if (iot->tag == arg->tag) {
iot->perm = IOMMU_NONE;
}
}
/* GV: 0 AV: 0 PSCV: 1 GVMA: 0 */
static
void riscv_iommu_iot_inval_pscid(gpointer key, gpointer value, gpointer data)
{
RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value;
RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data;
if (iot->tag == arg->tag &&
iot->pscid == arg->pscid) {
iot->perm = IOMMU_NONE;
}
}
/* GV: 0 AV: 1 PSCV: 0 GVMA: 0 */
static
void riscv_iommu_iot_inval_iova(gpointer key, gpointer value, gpointer data)
{
RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value;
RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data;
if (iot->tag == arg->tag &&
iot->iova == arg->iova) {
iot->perm = IOMMU_NONE;
}
}
/* GV: 0 AV: 1 PSCV: 1 GVMA: 0 */
static void riscv_iommu_iot_inval_pscid_iova(gpointer key, gpointer value,
gpointer data)
{
RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value;
RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data;
if (iot->gscid == arg->gscid &&
if (iot->tag == arg->tag &&
iot->pscid == arg->pscid &&
iot->iova == arg->iova) {
iot->perm = IOMMU_NONE;
}
}
/* GV: 1 PSCV: 1 AV: 0 */
static void riscv_iommu_iot_inval_pscid(gpointer key, gpointer value,
gpointer data)
/* GV: 1 AV: 0 PSCV: 0 GVMA: 0 */
/* GV: 1 AV: 0 GVMA: 1 */
static
void riscv_iommu_iot_inval_gscid(gpointer key, gpointer value, gpointer data)
{
RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value;
RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data;
if (iot->gscid == arg->gscid &&
if (iot->tag == arg->tag &&
iot->gscid == arg->gscid) {
iot->perm = IOMMU_NONE;
}
}
/* GV: 1 AV: 0 PSCV: 1 GVMA: 0 */
static void riscv_iommu_iot_inval_gscid_pscid(gpointer key, gpointer value,
gpointer data)
{
RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value;
RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data;
if (iot->tag == arg->tag &&
iot->gscid == arg->gscid &&
iot->pscid == arg->pscid) {
iot->perm = IOMMU_NONE;
}
}
/* GV: 1 GVMA: 1 */
static void riscv_iommu_iot_inval_gscid_gpa(gpointer key, gpointer value,
gpointer data)
/* GV: 1 AV: 1 PSCV: 0 GVMA: 0 */
/* GV: 1 AV: 1 GVMA: 1 */
static void riscv_iommu_iot_inval_gscid_iova(gpointer key, gpointer value,
gpointer data)
{
RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value;
RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data;
if (iot->gscid == arg->gscid) {
/* simplified cache, no GPA matching */
if (iot->tag == arg->tag &&
iot->gscid == arg->gscid &&
iot->iova == arg->iova) {
iot->perm = IOMMU_NONE;
}
}
/* GV: 1 GVMA: 0 */
static void riscv_iommu_iot_inval_gscid(gpointer key, gpointer value,
gpointer data)
/* GV: 1 AV: 1 PSCV: 1 GVMA: 0 */
static void riscv_iommu_iot_inval_gscid_pscid_iova(gpointer key, gpointer value,
gpointer data)
{
RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value;
RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data;
if (iot->gscid == arg->gscid) {
if (iot->tag == arg->tag &&
iot->gscid == arg->gscid &&
iot->pscid == arg->pscid &&
iot->iova == arg->iova) {
iot->perm = IOMMU_NONE;
}
}
/* GV: 0 */
static void riscv_iommu_iot_inval_all(gpointer key, gpointer value,
gpointer data)
{
RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value;
iot->perm = IOMMU_NONE;
}
/* caller should keep ref-count for iot_cache object */
static RISCVIOMMUEntry *riscv_iommu_iot_lookup(RISCVIOMMUContext *ctx,
GHashTable *iot_cache, hwaddr iova)
GHashTable *iot_cache, hwaddr iova, RISCVIOMMUTransTag transtag)
{
RISCVIOMMUEntry key = {
.tag = transtag,
.gscid = get_field(ctx->gatp, RISCV_IOMMU_DC_IOHGATP_GSCID),
.pscid = get_field(ctx->ta, RISCV_IOMMU_DC_TA_PSCID),
.iova = PPN_DOWN(iova),
@ -1322,10 +1378,11 @@ static void riscv_iommu_iot_update(RISCVIOMMUState *s,
}
static void riscv_iommu_iot_inval(RISCVIOMMUState *s, GHFunc func,
uint32_t gscid, uint32_t pscid, hwaddr iova)
uint32_t gscid, uint32_t pscid, hwaddr iova, RISCVIOMMUTransTag transtag)
{
GHashTable *iot_cache;
RISCVIOMMUEntry key = {
.tag = transtag,
.gscid = gscid,
.pscid = pscid,
.iova = PPN_DOWN(iova),
@ -1336,9 +1393,24 @@ static void riscv_iommu_iot_inval(RISCVIOMMUState *s, GHFunc func,
g_hash_table_unref(iot_cache);
}
static RISCVIOMMUTransTag riscv_iommu_get_transtag(RISCVIOMMUContext *ctx)
{
uint64_t satp = get_field(ctx->satp, RISCV_IOMMU_ATP_MODE_FIELD);
uint64_t gatp = get_field(ctx->gatp, RISCV_IOMMU_ATP_MODE_FIELD);
if (satp == RISCV_IOMMU_DC_FSC_MODE_BARE) {
return (gatp == RISCV_IOMMU_DC_IOHGATP_MODE_BARE) ?
RISCV_IOMMU_TRANS_TAG_BY : RISCV_IOMMU_TRANS_TAG_VG;
} else {
return (gatp == RISCV_IOMMU_DC_IOHGATP_MODE_BARE) ?
RISCV_IOMMU_TRANS_TAG_SS : RISCV_IOMMU_TRANS_TAG_VN;
}
}
static int riscv_iommu_translate(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
IOMMUTLBEntry *iotlb, bool enable_cache)
{
RISCVIOMMUTransTag transtag = riscv_iommu_get_transtag(ctx);
RISCVIOMMUEntry *iot;
IOMMUAccessFlags perm;
bool enable_pid;
@ -1364,7 +1436,7 @@ static int riscv_iommu_translate(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
}
}
iot = riscv_iommu_iot_lookup(ctx, iot_cache, iotlb->iova);
iot = riscv_iommu_iot_lookup(ctx, iot_cache, iotlb->iova, transtag);
perm = iot ? iot->perm : IOMMU_NONE;
if (perm != IOMMU_NONE) {
iotlb->translated_addr = PPN_PHYS(iot->phys);
@ -1395,6 +1467,7 @@ static int riscv_iommu_translate(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
iot->gscid = get_field(ctx->gatp, RISCV_IOMMU_DC_IOHGATP_GSCID);
iot->pscid = get_field(ctx->ta, RISCV_IOMMU_DC_TA_PSCID);
iot->perm = iotlb->perm;
iot->tag = transtag;
riscv_iommu_iot_update(s, iot_cache, iot);
}
@ -1602,44 +1675,72 @@ static void riscv_iommu_process_cq_tail(RISCVIOMMUState *s)
case RISCV_IOMMU_CMD(RISCV_IOMMU_CMD_IOTINVAL_FUNC_GVMA,
RISCV_IOMMU_CMD_IOTINVAL_OPCODE):
if (cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_PSCV) {
{
bool gv = !!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_GV);
bool av = !!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_AV);
bool pscv = !!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_PSCV);
uint32_t gscid = get_field(cmd.dword0,
RISCV_IOMMU_CMD_IOTINVAL_GSCID);
uint32_t pscid = get_field(cmd.dword0,
RISCV_IOMMU_CMD_IOTINVAL_PSCID);
hwaddr iova = (cmd.dword1 << 2) & TARGET_PAGE_MASK;
if (pscv) {
/* illegal command arguments IOTINVAL.GVMA & PSCV == 1 */
goto cmd_ill;
} else if (!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_GV)) {
/* invalidate all cache mappings */
func = riscv_iommu_iot_inval_all;
} else if (!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_AV)) {
/* invalidate cache matching GSCID */
func = riscv_iommu_iot_inval_gscid;
} else {
/* invalidate cache matching GSCID and ADDR (GPA) */
func = riscv_iommu_iot_inval_gscid_gpa;
}
riscv_iommu_iot_inval(s, func,
get_field(cmd.dword0, RISCV_IOMMU_CMD_IOTINVAL_GSCID), 0,
cmd.dword1 << 2 & TARGET_PAGE_MASK);
func = riscv_iommu_iot_inval_all;
if (gv) {
func = (av) ? riscv_iommu_iot_inval_gscid_iova :
riscv_iommu_iot_inval_gscid;
}
riscv_iommu_iot_inval(
s, func, gscid, pscid, iova, RISCV_IOMMU_TRANS_TAG_VG);
riscv_iommu_iot_inval(
s, func, gscid, pscid, iova, RISCV_IOMMU_TRANS_TAG_VN);
break;
}
case RISCV_IOMMU_CMD(RISCV_IOMMU_CMD_IOTINVAL_FUNC_VMA,
RISCV_IOMMU_CMD_IOTINVAL_OPCODE):
if (!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_GV)) {
/* invalidate all cache mappings, simplified model */
func = riscv_iommu_iot_inval_all;
} else if (!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_PSCV)) {
/* invalidate cache matching GSCID, simplified model */
func = riscv_iommu_iot_inval_gscid;
} else if (!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_AV)) {
/* invalidate cache matching GSCID and PSCID */
func = riscv_iommu_iot_inval_pscid;
{
bool gv = !!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_GV);
bool av = !!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_AV);
bool pscv = !!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_PSCV);
uint32_t gscid = get_field(cmd.dword0,
RISCV_IOMMU_CMD_IOTINVAL_GSCID);
uint32_t pscid = get_field(cmd.dword0,
RISCV_IOMMU_CMD_IOTINVAL_PSCID);
hwaddr iova = (cmd.dword1 << 2) & TARGET_PAGE_MASK;
RISCVIOMMUTransTag transtag;
if (gv) {
transtag = RISCV_IOMMU_TRANS_TAG_VN;
if (pscv) {
func = (av) ? riscv_iommu_iot_inval_gscid_pscid_iova :
riscv_iommu_iot_inval_gscid_pscid;
} else {
func = (av) ? riscv_iommu_iot_inval_gscid_iova :
riscv_iommu_iot_inval_gscid;
}
} else {
/* invalidate cache matching GSCID and PSCID and ADDR (IOVA) */
func = riscv_iommu_iot_inval_pscid_iova;
transtag = RISCV_IOMMU_TRANS_TAG_SS;
if (pscv) {
func = (av) ? riscv_iommu_iot_inval_pscid_iova :
riscv_iommu_iot_inval_pscid;
} else {
func = (av) ? riscv_iommu_iot_inval_iova :
riscv_iommu_iot_inval_all;
}
}
riscv_iommu_iot_inval(s, func,
get_field(cmd.dword0, RISCV_IOMMU_CMD_IOTINVAL_GSCID),
get_field(cmd.dword0, RISCV_IOMMU_CMD_IOTINVAL_PSCID),
cmd.dword1 << 2 & TARGET_PAGE_MASK);
riscv_iommu_iot_inval(s, func, gscid, pscid, iova, transtag);
break;
}
case RISCV_IOMMU_CMD(RISCV_IOMMU_CMD_IODIR_FUNC_INVAL_DDT,
RISCV_IOMMU_CMD_IODIR_OPCODE):

View file

@ -28,6 +28,7 @@
#include "target/riscv/cpu.h"
#include "hw/qdev-properties.h"
#include "hw/riscv/riscv_hart.h"
#include "qemu/error-report.h"
static const Property riscv_harts_props[] = {
DEFINE_PROP_UINT32("num-harts", RISCVHartArrayState, num_harts, 1),
@ -35,6 +36,23 @@ static const Property riscv_harts_props[] = {
DEFINE_PROP_STRING("cpu-type", RISCVHartArrayState, cpu_type),
DEFINE_PROP_UINT64("resetvec", RISCVHartArrayState, resetvec,
DEFAULT_RSTVEC),
/*
* Smrnmi implementation-defined interrupt and exception trap handlers.
*
* When an RNMI interrupt is detected, the hart then enters M-mode and
* jumps to the address defined by "rnmi-interrupt-vector".
*
* When the hart encounters an exception while executing in M-mode with
* the mnstatus.NMIE bit clear, the hart then jumps to the address
* defined by "rnmi-exception-vector".
*/
DEFINE_PROP_ARRAY("rnmi-interrupt-vector", RISCVHartArrayState,
num_rnmi_irqvec, rnmi_irqvec, qdev_prop_uint64,
uint64_t),
DEFINE_PROP_ARRAY("rnmi-exception-vector", RISCVHartArrayState,
num_rnmi_excpvec, rnmi_excpvec, qdev_prop_uint64,
uint64_t),
};
static void riscv_harts_cpu_reset(void *opaque)
@ -97,6 +115,29 @@ static bool riscv_hart_realize(RISCVHartArrayState *s, int idx,
{
object_initialize_child(OBJECT(s), "harts[*]", &s->harts[idx], cpu_type);
qdev_prop_set_uint64(DEVICE(&s->harts[idx]), "resetvec", s->resetvec);
if (s->harts[idx].cfg.ext_smrnmi) {
if (idx < s->num_rnmi_irqvec) {
qdev_prop_set_uint64(DEVICE(&s->harts[idx]),
"rnmi-interrupt-vector", s->rnmi_irqvec[idx]);
}
if (idx < s->num_rnmi_excpvec) {
qdev_prop_set_uint64(DEVICE(&s->harts[idx]),
"rnmi-exception-vector", s->rnmi_excpvec[idx]);
}
} else {
if (s->num_rnmi_irqvec > 0) {
warn_report_once("rnmi-interrupt-vector property is ignored "
"because Smrnmi extension is not enabled.");
}
if (s->num_rnmi_excpvec > 0) {
warn_report_once("rnmi-exception-vector property is ignored "
"because Smrnmi extension is not enabled.");
}
}
s->harts[idx].env.mhartid = s->hartid_base + idx;
qemu_register_reset(riscv_harts_cpu_reset, &s->harts[idx]);
return qdev_realize(DEVICE(&s->harts[idx]), NULL, errp);

View file

@ -750,7 +750,7 @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
qemu_fdt_add_subnode(ms->fdt, "/cpus");
qemu_fdt_setprop_cell(ms->fdt, "/cpus", "timebase-frequency",
kvm_enabled() ?
kvm_riscv_get_timebase_frequency(first_cpu) :
kvm_riscv_get_timebase_frequency(&s->soc->harts[0]) :
RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ);
qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#size-cells", 0x0);
qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#address-cells", 0x1);

View file

@ -38,6 +38,10 @@ struct RISCVHartArrayState {
uint32_t hartid_base;
char *cpu_type;
uint64_t resetvec;
uint32_t num_rnmi_irqvec;
uint64_t *rnmi_irqvec;
uint32_t num_rnmi_excpvec;
uint64_t *rnmi_excpvec;
RISCVCPU *harts;
};

View file

@ -42,7 +42,7 @@
/* RISC-V CPU definitions */
static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH";
const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV,
RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0};
RVC, RVS, RVU, RVH, RVG, RVB, 0};
/*
* From vector_helper.c
@ -183,18 +183,37 @@ const RISCVIsaExtData isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt),
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
ISA_EXT_DATA_ENTRY(shcounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
ISA_EXT_DATA_ENTRY(sha, PRIV_VERSION_1_12_0, ext_sha),
ISA_EXT_DATA_ENTRY(shgatpa, PRIV_VERSION_1_12_0, has_priv_1_12),
ISA_EXT_DATA_ENTRY(shtvala, PRIV_VERSION_1_12_0, has_priv_1_12),
ISA_EXT_DATA_ENTRY(shvsatpa, PRIV_VERSION_1_12_0, has_priv_1_12),
ISA_EXT_DATA_ENTRY(shvstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
ISA_EXT_DATA_ENTRY(shvstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
ISA_EXT_DATA_ENTRY(smcdeleg, PRIV_VERSION_1_13_0, ext_smcdeleg),
ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf),
ISA_EXT_DATA_ENTRY(smcsrind, PRIV_VERSION_1_13_0, ext_smcsrind),
ISA_EXT_DATA_ENTRY(smdbltrp, PRIV_VERSION_1_13_0, ext_smdbltrp),
ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
ISA_EXT_DATA_ENTRY(smrnmi, PRIV_VERSION_1_12_0, ext_smrnmi),
ISA_EXT_DATA_ENTRY(smmpm, PRIV_VERSION_1_13_0, ext_smmpm),
ISA_EXT_DATA_ENTRY(smnpm, PRIV_VERSION_1_13_0, ext_smnpm),
ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
ISA_EXT_DATA_ENTRY(ssccfg, PRIV_VERSION_1_13_0, ext_ssccfg),
ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11),
ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
ISA_EXT_DATA_ENTRY(sscsrind, PRIV_VERSION_1_12_0, ext_sscsrind),
ISA_EXT_DATA_ENTRY(ssdbltrp, PRIV_VERSION_1_13_0, ext_ssdbltrp),
ISA_EXT_DATA_ENTRY(ssnpm, PRIV_VERSION_1_13_0, ext_ssnpm),
ISA_EXT_DATA_ENTRY(sspm, PRIV_VERSION_1_13_0, ext_sspm),
ISA_EXT_DATA_ENTRY(ssstateen, PRIV_VERSION_1_12_0, ext_ssstateen),
ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
ISA_EXT_DATA_ENTRY(supm, PRIV_VERSION_1_13_0, ext_supm),
ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade),
ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
@ -288,7 +307,7 @@ static const char * const riscv_excp_names[] = {
"load_page_fault",
"reserved",
"store_page_fault",
"reserved",
"double_trap",
"reserved",
"reserved",
"reserved",
@ -889,13 +908,6 @@ static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
CSR_MSCRATCH,
CSR_SSCRATCH,
CSR_SATP,
CSR_MMTE,
CSR_UPMBASE,
CSR_UPMMASK,
CSR_SPMBASE,
CSR_SPMMASK,
CSR_MPMBASE,
CSR_MPMMASK,
};
for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
@ -1055,6 +1067,9 @@ static void riscv_cpu_reset_hold(Object *obj, ResetType type)
env->mstatus_hs = set_field(env->mstatus_hs,
MSTATUS64_UXL, env->misa_mxl);
}
if (riscv_cpu_cfg(env)->ext_smdbltrp) {
env->mstatus = set_field(env->mstatus, MSTATUS_MDT, 1);
}
}
env->mcause = 0;
env->miclaim = MIP_SGEIP;
@ -1081,8 +1096,6 @@ static void riscv_cpu_reset_hold(Object *obj, ResetType type)
}
i++;
}
/* mmte is supposed to have pm.current hardwired to 1 */
env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
/*
* Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor
@ -1114,7 +1127,6 @@ static void riscv_cpu_reset_hold(Object *obj, ResetType type)
env->ssp = 0;
env->xl = riscv_cpu_mxl(env);
riscv_cpu_update_mask(env);
cs->exception_index = RISCV_EXCP_NONE;
env->load_res = -1;
set_default_nan_mode(1, &env->fp_status);
@ -1127,6 +1139,11 @@ static void riscv_cpu_reset_hold(Object *obj, ResetType type)
riscv_trigger_reset_hold(env);
}
if (cpu->cfg.ext_smrnmi) {
env->rnmip = 0;
env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false);
}
if (kvm_enabled()) {
kvm_riscv_reset_vcpu(cpu);
}
@ -1407,6 +1424,11 @@ static void riscv_cpu_set_irq(void *opaque, int irq, int level)
g_assert_not_reached();
}
}
static void riscv_cpu_set_nmi(void *opaque, int irq, int level)
{
riscv_cpu_set_rnmi(RISCV_CPU(opaque), irq, level);
}
#endif /* CONFIG_USER_ONLY */
static bool riscv_cpu_is_dynamic(Object *cpu_obj)
@ -1430,6 +1452,8 @@ static void riscv_cpu_init(Object *obj)
#ifndef CONFIG_USER_ONLY
qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq,
IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
qdev_init_gpio_in_named(DEVICE(cpu), riscv_cpu_set_nmi,
"riscv.cpu.rnmi", RNMI_MAX);
#endif /* CONFIG_USER_ONLY */
general_user_opts = g_hash_table_new(g_str_hash, g_str_equal);
@ -1504,7 +1528,6 @@ static const MISAExtInfo misa_ext_info_arr[] = {
MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
MISA_EXT_INFO(RVU, "u", "User-level instructions"),
MISA_EXT_INFO(RVH, "h", "Hypervisor"),
MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"),
MISA_EXT_INFO(RVV, "v", "Vector operations"),
MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)")
@ -1571,6 +1594,10 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
/* Defaults for standard extensions */
MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false),
MULTI_EXT_CFG_BOOL("smcsrind", ext_smcsrind, false),
MULTI_EXT_CFG_BOOL("smcdeleg", ext_smcdeleg, false),
MULTI_EXT_CFG_BOOL("sscsrind", ext_sscsrind, false),
MULTI_EXT_CFG_BOOL("ssccfg", ext_ssccfg, false),
MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
MULTI_EXT_CFG_BOOL("zicfilp", ext_zicfilp, false),
MULTI_EXT_CFG_BOOL("zicfiss", ext_zicfiss, false),
@ -1599,11 +1626,19 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false),
MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false),
MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true),
MULTI_EXT_CFG_BOOL("ssnpm", ext_ssnpm, false),
MULTI_EXT_CFG_BOOL("sspm", ext_sspm, false),
MULTI_EXT_CFG_BOOL("supm", ext_supm, false),
MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false),
MULTI_EXT_CFG_BOOL("smdbltrp", ext_smdbltrp, false),
MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false),
MULTI_EXT_CFG_BOOL("smrnmi", ext_smrnmi, false),
MULTI_EXT_CFG_BOOL("smmpm", ext_smmpm, false),
MULTI_EXT_CFG_BOOL("smnpm", ext_smnpm, false),
MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false),
MULTI_EXT_CFG_BOOL("ssdbltrp", ext_ssdbltrp, false),
MULTI_EXT_CFG_BOOL("svade", ext_svade, false),
MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true),
MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false),
@ -1708,6 +1743,7 @@ const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true),
MULTI_EXT_CFG_BOOL("ssstateen", ext_ssstateen, true),
MULTI_EXT_CFG_BOOL("sha", ext_sha, true),
{ },
};
@ -2739,6 +2775,34 @@ static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = {
},
};
static RISCVCPUImpliedExtsRule SSCFG_IMPLIED = {
.ext = CPU_CFG_OFFSET(ext_ssccfg),
.implied_multi_exts = {
CPU_CFG_OFFSET(ext_smcsrind), CPU_CFG_OFFSET(ext_sscsrind),
CPU_CFG_OFFSET(ext_smcdeleg),
RISCV_IMPLIED_EXTS_RULE_END
},
};
static RISCVCPUImpliedExtsRule SUPM_IMPLIED = {
.ext = CPU_CFG_OFFSET(ext_supm),
.implied_multi_exts = {
CPU_CFG_OFFSET(ext_ssnpm), CPU_CFG_OFFSET(ext_smnpm),
RISCV_IMPLIED_EXTS_RULE_END
},
};
static RISCVCPUImpliedExtsRule SSPM_IMPLIED = {
.ext = CPU_CFG_OFFSET(ext_sspm),
.implied_multi_exts = {
CPU_CFG_OFFSET(ext_smnpm),
RISCV_IMPLIED_EXTS_RULE_END
},
};
RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = {
&RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED,
&RVM_IMPLIED, &RVV_IMPLIED, NULL
@ -2756,7 +2820,8 @@ RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = {
&ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED,
&ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED,
&ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED,
&ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED,
&ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED, &SSCFG_IMPLIED,
&SUPM_IMPLIED, &SSPM_IMPLIED,
NULL
};
@ -2785,6 +2850,10 @@ static const Property riscv_cpu_properties[] = {
#ifndef CONFIG_USER_ONLY
DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
DEFINE_PROP_UINT64("rnmi-interrupt-vector", RISCVCPU, env.rnmi_irqvec,
DEFAULT_RNMI_IRQVEC),
DEFINE_PROP_UINT64("rnmi-exception-vector", RISCVCPU, env.rnmi_excpvec,
DEFAULT_RNMI_EXCPVEC),
#endif
DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),

View file

@ -71,7 +71,6 @@ typedef struct CPUArchState CPURISCVState;
#define RVS RV('S')
#define RVU RV('U')
#define RVH RV('H')
#define RVJ RV('J')
#define RVG RV('G')
#define RVB RV('B')
@ -129,6 +128,14 @@ typedef enum {
EXT_STATUS_DIRTY,
} RISCVExtStatus;
/* Enum holds PMM field values for Zjpm v1.0 extension */
typedef enum {
PMM_FIELD_DISABLED = 0,
PMM_FIELD_RESERVED = 1,
PMM_FIELD_PMLEN7 = 2,
PMM_FIELD_PMLEN16 = 3,
} RISCVPmPmm;
typedef struct riscv_cpu_implied_exts_rule {
#ifndef CONFIG_USER_ONLY
/*
@ -385,6 +392,7 @@ struct CPUArchState {
uint32_t scounteren;
uint32_t mcounteren;
uint32_t scountinhibit;
uint32_t mcountinhibit;
/* PMU cycle & instret privilege mode filtering */
@ -451,24 +459,11 @@ struct CPUArchState {
/* True if in debugger mode. */
bool debugger;
/*
* CSRs for PointerMasking extension
*/
target_ulong mmte;
target_ulong mpmmask;
target_ulong mpmbase;
target_ulong spmmask;
target_ulong spmbase;
target_ulong upmmask;
target_ulong upmbase;
uint64_t mstateen[SMSTATEEN_MAX_COUNT];
uint64_t hstateen[SMSTATEEN_MAX_COUNT];
uint64_t sstateen[SMSTATEEN_MAX_COUNT];
uint64_t henvcfg;
#endif
target_ulong cur_pmmask;
target_ulong cur_pmbase;
/* Fields from here on are preserved across CPU reset. */
QEMUTimer *stimer; /* Internal timer for S-mode interrupt */
@ -486,6 +481,15 @@ struct CPUArchState {
uint64_t kvm_timer_state;
uint64_t kvm_timer_frequency;
#endif /* CONFIG_KVM */
/* RNMI */
target_ulong mnscratch;
target_ulong mnepc;
target_ulong mncause; /* mncause without bit XLEN-1 set to 1 */
target_ulong mnstatus;
target_ulong rnmip;
uint64_t rnmi_irqvec;
uint64_t rnmi_excpvec;
};
/*
@ -560,6 +564,7 @@ void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
int riscv_env_mmu_index(CPURISCVState *env, bool ifetch);
bool cpu_get_fcfien(CPURISCVState *env);
bool cpu_get_bcfien(CPURISCVState *env);
bool riscv_env_smode_dbltrp_enabled(CPURISCVState *env, bool virt);
G_NORETURN void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr);
@ -584,6 +589,7 @@ void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env);
int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts);
uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask,
uint64_t value);
void riscv_cpu_set_rnmi(RISCVCPU *cpu, uint32_t irq, bool level);
void riscv_cpu_interrupt(CPURISCVState *env);
#define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */
void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
@ -606,7 +612,8 @@ void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
int *max_insns, vaddr pc, void *host_pc);
G_NORETURN void riscv_raise_exception(CPURISCVState *env,
uint32_t exception, uintptr_t pc);
RISCVException exception,
uintptr_t pc);
target_ulong riscv_cpu_get_fflags(CPURISCVState *env);
void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong);
@ -627,19 +634,22 @@ FIELD(TB_FLAGS, XL, 16, 2)
/* If PointerMasking should be applied */
FIELD(TB_FLAGS, PM_MASK_ENABLED, 18, 1)
FIELD(TB_FLAGS, PM_BASE_ENABLED, 19, 1)
FIELD(TB_FLAGS, VTA, 20, 1)
FIELD(TB_FLAGS, VMA, 21, 1)
FIELD(TB_FLAGS, VTA, 18, 1)
FIELD(TB_FLAGS, VMA, 19, 1)
/* Native debug itrigger */
FIELD(TB_FLAGS, ITRIGGER, 22, 1)
FIELD(TB_FLAGS, ITRIGGER, 20, 1)
/* Virtual mode enabled */
FIELD(TB_FLAGS, VIRT_ENABLED, 23, 1)
FIELD(TB_FLAGS, PRIV, 24, 2)
FIELD(TB_FLAGS, AXL, 26, 2)
FIELD(TB_FLAGS, VIRT_ENABLED, 21, 1)
FIELD(TB_FLAGS, PRIV, 22, 2)
FIELD(TB_FLAGS, AXL, 24, 2)
/* zicfilp needs a TB flag to track indirect branches */
FIELD(TB_FLAGS, FCFI_ENABLED, 28, 1)
FIELD(TB_FLAGS, FCFI_LP_EXPECTED, 29, 1)
FIELD(TB_FLAGS, FCFI_ENABLED, 26, 1)
FIELD(TB_FLAGS, FCFI_LP_EXPECTED, 27, 1)
/* zicfiss needs a TB flag so that correct TB is located based on tb flags */
FIELD(TB_FLAGS, BCFI_ENABLED, 30, 1)
FIELD(TB_FLAGS, BCFI_ENABLED, 28, 1)
/* If pointer masking should be applied and address sign extended */
FIELD(TB_FLAGS, PM_PMM, 29, 2)
FIELD(TB_FLAGS, PM_SIGNEXTEND, 31, 1)
#ifdef TARGET_RISCV32
#define riscv_cpu_mxl(env) ((void)(env), MXL_RV32)
@ -775,11 +785,16 @@ static inline uint32_t vext_get_vlmax(uint32_t vlenb, uint32_t vsew,
void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
uint64_t *cs_base, uint32_t *pflags);
void riscv_cpu_update_mask(CPURISCVState *env);
bool riscv_cpu_is_32bit(RISCVCPU *cpu);
bool riscv_cpu_virt_mem_enabled(CPURISCVState *env);
RISCVPmPmm riscv_pm_get_pmm(CPURISCVState *env);
RISCVPmPmm riscv_pm_get_virt_pmm(CPURISCVState *env);
uint32_t riscv_pm_get_pmlen(RISCVPmPmm pmm);
RISCVException riscv_csrr(CPURISCVState *env, int csrno,
target_ulong *ret_value);
RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
target_ulong *ret_value,
target_ulong new_value, target_ulong write_mask);

View file

@ -173,6 +173,13 @@
#define CSR_MISELECT 0x350
#define CSR_MIREG 0x351
/* Machine Indirect Register Alias */
#define CSR_MIREG2 0x352
#define CSR_MIREG3 0x353
#define CSR_MIREG4 0x355
#define CSR_MIREG5 0x356
#define CSR_MIREG6 0x357
/* Machine-Level Interrupts (AIA) */
#define CSR_MTOPEI 0x35c
#define CSR_MTOPI 0xfb0
@ -203,6 +210,9 @@
#define CSR_SSTATEEN2 0x10E
#define CSR_SSTATEEN3 0x10F
/* Supervisor Counter Delegation */
#define CSR_SCOUNTINHIBIT 0x120
/* Supervisor Trap Handling */
#define CSR_SSCRATCH 0x140
#define CSR_SEPC 0x141
@ -222,6 +232,13 @@
#define CSR_SISELECT 0x150
#define CSR_SIREG 0x151
/* Supervisor Indirect Register Alias */
#define CSR_SIREG2 0x152
#define CSR_SIREG3 0x153
#define CSR_SIREG4 0x155
#define CSR_SIREG5 0x156
#define CSR_SIREG6 0x157
/* Supervisor-Level Interrupts (AIA) */
#define CSR_STOPEI 0x15c
#define CSR_STOPI 0xdb0
@ -288,6 +305,13 @@
#define CSR_VSISELECT 0x250
#define CSR_VSIREG 0x251
/* Virtual Supervisor Indirect Alias */
#define CSR_VSIREG2 0x252
#define CSR_VSIREG3 0x253
#define CSR_VSIREG4 0x255
#define CSR_VSIREG5 0x256
#define CSR_VSIREG6 0x257
/* VS-Level Interrupts (H-extension with AIA) */
#define CSR_VSTOPEI 0x25c
#define CSR_VSTOPI 0xeb0
@ -353,6 +377,12 @@
#define CSR_PMPADDR14 0x3be
#define CSR_PMPADDR15 0x3bf
/* RNMI */
#define CSR_MNSCRATCH 0x740
#define CSR_MNEPC 0x741
#define CSR_MNCAUSE 0x742
#define CSR_MNSTATUS 0x744
/* Debug/Trace Registers (shared with Debug Mode) */
#define CSR_TSELECT 0x7a0
#define CSR_TDATA1 0x7a1
@ -497,37 +527,6 @@
#define CSR_MHPMCOUNTER30H 0xb9e
#define CSR_MHPMCOUNTER31H 0xb9f
/*
* User PointerMasking registers
* NB: actual CSR numbers might be changed in future
*/
#define CSR_UMTE 0x4c0
#define CSR_UPMMASK 0x4c1
#define CSR_UPMBASE 0x4c2
/*
* Machine PointerMasking registers
* NB: actual CSR numbers might be changed in future
*/
#define CSR_MMTE 0x3c0
#define CSR_MPMMASK 0x3c1
#define CSR_MPMBASE 0x3c2
/*
* Supervisor PointerMaster registers
* NB: actual CSR numbers might be changed in future
*/
#define CSR_SMTE 0x1c0
#define CSR_SPMMASK 0x1c1
#define CSR_SPMBASE 0x1c2
/*
* Hypervisor PointerMaster registers
* NB: actual CSR numbers might be changed in future
*/
#define CSR_VSMTE 0x2c0
#define CSR_VSPMMASK 0x2c1
#define CSR_VSPMBASE 0x2c2
#define CSR_SCOUNTOVF 0xda0
/* Crypto Extension */
@ -556,9 +555,11 @@
#define MSTATUS_TW 0x00200000 /* since: priv-1.10 */
#define MSTATUS_TSR 0x00400000 /* since: priv-1.10 */
#define MSTATUS_SPELP 0x00800000 /* zicfilp */
#define MSTATUS_SDT 0x01000000
#define MSTATUS_MPELP 0x020000000000 /* zicfilp */
#define MSTATUS_GVA 0x4000000000ULL
#define MSTATUS_MPV 0x8000000000ULL
#define MSTATUS_MDT 0x40000000000ULL /* Smdbltrp extension */
#define MSTATUS64_UXL 0x0000000300000000ULL
#define MSTATUS64_SXL 0x0000000C00000000ULL
@ -588,6 +589,7 @@ typedef enum {
#define SSTATUS_SUM 0x00040000 /* since: priv-1.10 */
#define SSTATUS_MXR 0x00080000
#define SSTATUS_SPELP MSTATUS_SPELP /* zicfilp */
#define SSTATUS_SDT MSTATUS_SDT
#define SSTATUS64_UXL 0x0000000300000000ULL
@ -606,6 +608,7 @@ typedef enum {
#define HSTATUS_VTSR 0x00400000
#define HSTATUS_HUKTE 0x01000000
#define HSTATUS_VSXL 0x300000000
#define HSTATUS_HUPMM 0x3000000000000
#define HSTATUS32_WPRI 0xFF8FF87E
#define HSTATUS64_WPRI 0xFFFFFFFFFF8FF87EULL
@ -634,6 +637,12 @@ typedef enum {
#define SATP64_ASID 0x0FFFF00000000000ULL
#define SATP64_PPN 0x00000FFFFFFFFFFFULL
/* RNMI mnstatus CSR mask */
#define MNSTATUS_NMIE 0x00000008
#define MNSTATUS_MNPV 0x00000080
#define MNSTATUS_MNPELP 0x00000200
#define MNSTATUS_MNPP 0x00001800
/* VM modes (satp.mode) privileged ISA 1.10 */
#define VM_1_10_MBARE 0
#define VM_1_10_SV32 1
@ -669,6 +678,12 @@ typedef enum {
/* Default Reset Vector address */
#define DEFAULT_RSTVEC 0x1000
/* Default RNMI Interrupt Vector address */
#define DEFAULT_RNMI_IRQVEC 0x0
/* Default RNMI Exception Vector address */
#define DEFAULT_RNMI_EXCPVEC 0x0
/* Exception causes */
typedef enum RISCVException {
RISCV_EXCP_NONE = -1, /* sentinel value */
@ -687,6 +702,7 @@ typedef enum RISCVException {
RISCV_EXCP_INST_PAGE_FAULT = 0xc, /* since: priv-1.10.0 */
RISCV_EXCP_LOAD_PAGE_FAULT = 0xd, /* since: priv-1.10.0 */
RISCV_EXCP_STORE_PAGE_FAULT = 0xf, /* since: priv-1.10.0 */
RISCV_EXCP_DOUBLE_TRAP = 0x10,
RISCV_EXCP_SW_CHECK = 0x12, /* since: priv-1.13.0 */
RISCV_EXCP_HW_ERR = 0x13, /* since: priv-1.13.0 */
RISCV_EXCP_INST_GUEST_PAGE_FAULT = 0x14,
@ -723,6 +739,9 @@ typedef enum RISCVException {
/* -1 is due to bit zero of hgeip and hgeie being ROZ. */
#define IRQ_LOCAL_GUEST_MAX (TARGET_LONG_BITS - 1)
/* RNMI causes */
#define RNMI_MAX 16
/* mip masks */
#define MIP_USIP (1 << IRQ_U_SOFT)
#define MIP_SSIP (1 << IRQ_S_SOFT)
@ -759,11 +778,6 @@ typedef enum RISCVException {
#define VS_MODE_INTERRUPTS ((uint64_t)(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP))
#define HS_MODE_INTERRUPTS ((uint64_t)(MIP_SGEIP | VS_MODE_INTERRUPTS))
/* General PointerMasking CSR bits */
#define PM_ENABLE 0x00000001ULL
#define PM_CURRENT 0x00000002ULL
#define PM_INSN 0x00000004ULL
/* Execution environment configuration bits */
#define MENVCFG_FIOM BIT(0)
#define MENVCFG_LPE BIT(2) /* zicfilp */
@ -771,11 +785,15 @@ typedef enum RISCVException {
#define MENVCFG_CBIE (3UL << 4)
#define MENVCFG_CBCFE BIT(6)
#define MENVCFG_CBZE BIT(7)
#define MENVCFG_PMM (3ULL << 32)
#define MENVCFG_DTE (1ULL << 59)
#define MENVCFG_CDE (1ULL << 60)
#define MENVCFG_ADUE (1ULL << 61)
#define MENVCFG_PBMTE (1ULL << 62)
#define MENVCFG_STCE (1ULL << 63)
/* For RV32 */
#define MENVCFGH_DTE BIT(27)
#define MENVCFGH_ADUE BIT(29)
#define MENVCFGH_PBMTE BIT(30)
#define MENVCFGH_STCE BIT(31)
@ -787,6 +805,7 @@ typedef enum RISCVException {
#define SENVCFG_CBCFE MENVCFG_CBCFE
#define SENVCFG_CBZE MENVCFG_CBZE
#define SENVCFG_UKTE BIT(8)
#define SENVCFG_PMM MENVCFG_PMM
#define HENVCFG_FIOM MENVCFG_FIOM
#define HENVCFG_LPE MENVCFG_LPE
@ -794,66 +813,18 @@ typedef enum RISCVException {
#define HENVCFG_CBIE MENVCFG_CBIE
#define HENVCFG_CBCFE MENVCFG_CBCFE
#define HENVCFG_CBZE MENVCFG_CBZE
#define HENVCFG_PMM MENVCFG_PMM
#define HENVCFG_DTE MENVCFG_DTE
#define HENVCFG_ADUE MENVCFG_ADUE
#define HENVCFG_PBMTE MENVCFG_PBMTE
#define HENVCFG_STCE MENVCFG_STCE
/* For RV32 */
#define HENVCFGH_DTE MENVCFGH_DTE
#define HENVCFGH_ADUE MENVCFGH_ADUE
#define HENVCFGH_PBMTE MENVCFGH_PBMTE
#define HENVCFGH_STCE MENVCFGH_STCE
/* Offsets for every pair of control bits per each priv level */
#define XS_OFFSET 0ULL
#define U_OFFSET 2ULL
#define S_OFFSET 5ULL
#define M_OFFSET 8ULL
#define PM_XS_BITS (EXT_STATUS_MASK << XS_OFFSET)
#define U_PM_ENABLE (PM_ENABLE << U_OFFSET)
#define U_PM_CURRENT (PM_CURRENT << U_OFFSET)
#define U_PM_INSN (PM_INSN << U_OFFSET)
#define S_PM_ENABLE (PM_ENABLE << S_OFFSET)
#define S_PM_CURRENT (PM_CURRENT << S_OFFSET)
#define S_PM_INSN (PM_INSN << S_OFFSET)
#define M_PM_ENABLE (PM_ENABLE << M_OFFSET)
#define M_PM_CURRENT (PM_CURRENT << M_OFFSET)
#define M_PM_INSN (PM_INSN << M_OFFSET)
/* mmte CSR bits */
#define MMTE_PM_XS_BITS PM_XS_BITS
#define MMTE_U_PM_ENABLE U_PM_ENABLE
#define MMTE_U_PM_CURRENT U_PM_CURRENT
#define MMTE_U_PM_INSN U_PM_INSN
#define MMTE_S_PM_ENABLE S_PM_ENABLE
#define MMTE_S_PM_CURRENT S_PM_CURRENT
#define MMTE_S_PM_INSN S_PM_INSN
#define MMTE_M_PM_ENABLE M_PM_ENABLE
#define MMTE_M_PM_CURRENT M_PM_CURRENT
#define MMTE_M_PM_INSN M_PM_INSN
#define MMTE_MASK (MMTE_U_PM_ENABLE | MMTE_U_PM_CURRENT | MMTE_U_PM_INSN | \
MMTE_S_PM_ENABLE | MMTE_S_PM_CURRENT | MMTE_S_PM_INSN | \
MMTE_M_PM_ENABLE | MMTE_M_PM_CURRENT | MMTE_M_PM_INSN | \
MMTE_PM_XS_BITS)
/* (v)smte CSR bits */
#define SMTE_PM_XS_BITS PM_XS_BITS
#define SMTE_U_PM_ENABLE U_PM_ENABLE
#define SMTE_U_PM_CURRENT U_PM_CURRENT
#define SMTE_U_PM_INSN U_PM_INSN
#define SMTE_S_PM_ENABLE S_PM_ENABLE
#define SMTE_S_PM_CURRENT S_PM_CURRENT
#define SMTE_S_PM_INSN S_PM_INSN
#define SMTE_MASK (SMTE_U_PM_ENABLE | SMTE_U_PM_CURRENT | SMTE_U_PM_INSN | \
SMTE_S_PM_ENABLE | SMTE_S_PM_CURRENT | SMTE_S_PM_INSN | \
SMTE_PM_XS_BITS)
/* umte CSR bits */
#define UMTE_U_PM_ENABLE U_PM_ENABLE
#define UMTE_U_PM_CURRENT U_PM_CURRENT
#define UMTE_U_PM_INSN U_PM_INSN
#define UMTE_MASK (UMTE_U_PM_ENABLE | MMTE_U_PM_CURRENT | UMTE_U_PM_INSN)
/* MISELECT, SISELECT, and VSISELECT bits (AIA) */
#define ISELECT_IPRIO0 0x30
#define ISELECT_IPRIO15 0x3f
@ -865,10 +836,15 @@ typedef enum RISCVException {
#define ISELECT_IMSIC_EIE63 0xff
#define ISELECT_IMSIC_FIRST ISELECT_IMSIC_EIDELIVERY
#define ISELECT_IMSIC_LAST ISELECT_IMSIC_EIE63
#define ISELECT_MASK 0x1ff
#define ISELECT_MASK_AIA 0x1ff
/* [M|S|VS]SELCT value for Indirect CSR Access Extension */
#define ISELECT_CD_FIRST 0x40
#define ISELECT_CD_LAST 0x5f
#define ISELECT_MASK_SXCSRIND 0xfff
/* Dummy [M|S|VS]ISELECT value for emulating [M|S|VS]TOPEI CSRs */
#define ISELECT_IMSIC_TOPEI (ISELECT_MASK + 1)
#define ISELECT_IMSIC_TOPEI (ISELECT_MASK_AIA + 1)
/* IMSIC bits (AIA) */
#define IMSIC_TOPEI_IID_SHIFT 16
@ -961,6 +937,9 @@ typedef enum RISCVException {
#define MHPMEVENT_IDX_MASK 0xFFFFF
#define MHPMEVENT_SSCOF_RESVD 16
/* RISC-V-specific interrupt pending bits. */
#define CPU_INTERRUPT_RNMI CPU_INTERRUPT_TGT_EXT_0
/* JVT CSR bits */
#define JVT_MODE 0x3F
#define JVT_BASE (~0x3F)

View file

@ -78,7 +78,13 @@ struct RISCVCPUConfig {
bool ext_ztso;
bool ext_smstateen;
bool ext_sstc;
bool ext_smcdeleg;
bool ext_ssccfg;
bool ext_smcntrpmf;
bool ext_smcsrind;
bool ext_sscsrind;
bool ext_ssdbltrp;
bool ext_smdbltrp;
bool ext_svadu;
bool ext_svinval;
bool ext_svnapot;
@ -129,6 +135,12 @@ struct RISCVCPUConfig {
bool ext_ssaia;
bool ext_sscofpmf;
bool ext_smepmp;
bool ext_smrnmi;
bool ext_ssnpm;
bool ext_smnpm;
bool ext_smmpm;
bool ext_sspm;
bool ext_supm;
bool rvv_ta_all_1s;
bool rvv_ma_all_1s;
bool rvv_vl_half_avl;
@ -141,6 +153,7 @@ struct RISCVCPUConfig {
bool ext_svade;
bool ext_zic64b;
bool ext_ssstateen;
bool ext_sha;
/*
* Always 'true' booleans for named features

View file

@ -120,12 +120,26 @@ bool cpu_get_bcfien(CPURISCVState *env)
}
}
bool riscv_env_smode_dbltrp_enabled(CPURISCVState *env, bool virt)
{
#ifdef CONFIG_USER_ONLY
return false;
#else
if (virt) {
return (env->henvcfg & HENVCFG_DTE) != 0;
} else {
return (env->menvcfg & MENVCFG_DTE) != 0;
}
#endif
}
void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
uint64_t *cs_base, uint32_t *pflags)
{
RISCVCPU *cpu = env_archcpu(env);
RISCVExtStatus fs, vs;
uint32_t flags = 0;
bool pm_signext = riscv_cpu_virt_mem_enabled(env);
*pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc;
*cs_base = 0;
@ -210,58 +224,106 @@ void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
flags = FIELD_DP32(flags, TB_FLAGS, VS, vs);
flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
flags = FIELD_DP32(flags, TB_FLAGS, AXL, cpu_address_xl(env));
if (env->cur_pmmask != 0) {
flags = FIELD_DP32(flags, TB_FLAGS, PM_MASK_ENABLED, 1);
}
if (env->cur_pmbase != 0) {
flags = FIELD_DP32(flags, TB_FLAGS, PM_BASE_ENABLED, 1);
}
flags = FIELD_DP32(flags, TB_FLAGS, PM_PMM, riscv_pm_get_pmm(env));
flags = FIELD_DP32(flags, TB_FLAGS, PM_SIGNEXTEND, pm_signext);
*pflags = flags;
}
void riscv_cpu_update_mask(CPURISCVState *env)
RISCVPmPmm riscv_pm_get_pmm(CPURISCVState *env)
{
target_ulong mask = 0, base = 0;
RISCVMXL xl = env->xl;
/*
* TODO: Current RVJ spec does not specify
* how the extension interacts with XLEN.
*/
#ifndef CONFIG_USER_ONLY
int mode = cpu_address_mode(env);
xl = cpu_get_xl(env, mode);
if (riscv_has_ext(env, RVJ)) {
switch (mode) {
case PRV_M:
if (env->mmte & M_PM_ENABLE) {
mask = env->mpmmask;
base = env->mpmbase;
int priv_mode = cpu_address_mode(env);
if (get_field(env->mstatus, MSTATUS_MPRV) &&
get_field(env->mstatus, MSTATUS_MXR)) {
return PMM_FIELD_DISABLED;
}
/* Get current PMM field */
switch (priv_mode) {
case PRV_M:
if (riscv_cpu_cfg(env)->ext_smmpm) {
return get_field(env->mseccfg, MSECCFG_PMM);
}
break;
case PRV_S:
if (riscv_cpu_cfg(env)->ext_smnpm) {
if (get_field(env->mstatus, MSTATUS_MPV)) {
return get_field(env->henvcfg, HENVCFG_PMM);
} else {
return get_field(env->menvcfg, MENVCFG_PMM);
}
break;
case PRV_S:
if (env->mmte & S_PM_ENABLE) {
mask = env->spmmask;
base = env->spmbase;
}
break;
case PRV_U:
if (riscv_has_ext(env, RVS)) {
if (riscv_cpu_cfg(env)->ext_ssnpm) {
return get_field(env->senvcfg, SENVCFG_PMM);
}
break;
case PRV_U:
if (env->mmte & U_PM_ENABLE) {
mask = env->upmmask;
base = env->upmbase;
} else {
if (riscv_cpu_cfg(env)->ext_smnpm) {
return get_field(env->menvcfg, MENVCFG_PMM);
}
break;
default:
g_assert_not_reached();
}
break;
default:
g_assert_not_reached();
}
return PMM_FIELD_DISABLED;
#else
return PMM_FIELD_DISABLED;
#endif
}
RISCVPmPmm riscv_pm_get_virt_pmm(CPURISCVState *env)
{
#ifndef CONFIG_USER_ONLY
int priv_mode = cpu_address_mode(env);
if (priv_mode == PRV_U) {
return get_field(env->hstatus, HSTATUS_HUPMM);
} else {
if (get_field(env->hstatus, HSTATUS_SPVP)) {
return get_field(env->henvcfg, HENVCFG_PMM);
} else {
return get_field(env->senvcfg, SENVCFG_PMM);
}
}
#else
return PMM_FIELD_DISABLED;
#endif
if (xl == MXL_RV32) {
env->cur_pmmask = mask & UINT32_MAX;
env->cur_pmbase = base & UINT32_MAX;
}
bool riscv_cpu_virt_mem_enabled(CPURISCVState *env)
{
#ifndef CONFIG_USER_ONLY
int satp_mode = 0;
int priv_mode = cpu_address_mode(env);
if (riscv_cpu_mxl(env) == MXL_RV32) {
satp_mode = get_field(env->satp, SATP32_MODE);
} else {
env->cur_pmmask = mask;
env->cur_pmbase = base;
satp_mode = get_field(env->satp, SATP64_MODE);
}
return ((satp_mode != VM_1_10_MBARE) && (priv_mode != PRV_M));
#else
return false;
#endif
}
uint32_t riscv_pm_get_pmlen(RISCVPmPmm pmm)
{
switch (pmm) {
case PMM_FIELD_DISABLED:
return 0;
case PMM_FIELD_PMLEN7:
return 7;
case PMM_FIELD_PMLEN16:
return 16;
default:
g_assert_not_reached();
}
}
@ -505,6 +567,18 @@ static int riscv_cpu_local_irq_pending(CPURISCVState *env)
uint64_t vsbits, irq_delegated;
int virq;
/* Priority: RNMI > Other interrupt. */
if (riscv_cpu_cfg(env)->ext_smrnmi) {
/* If mnstatus.NMIE == 0, all interrupts are disabled. */
if (!get_field(env->mnstatus, MNSTATUS_NMIE)) {
return RISCV_EXCP_NONE;
}
if (env->rnmip) {
return ctz64(env->rnmip); /* since non-zero */
}
}
/* Determine interrupt enable state of all privilege modes */
if (env->virt_enabled) {
mie = 1;
@ -567,7 +641,9 @@ static int riscv_cpu_local_irq_pending(CPURISCVState *env)
bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
if (interrupt_request & CPU_INTERRUPT_HARD) {
uint32_t mask = CPU_INTERRUPT_HARD | CPU_INTERRUPT_RNMI;
if (interrupt_request & mask) {
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
int interruptno = riscv_cpu_local_irq_pending(env);
@ -628,6 +704,10 @@ void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
g_assert(riscv_has_ext(env, RVH));
if (riscv_env_smode_dbltrp_enabled(env, current_virt)) {
mstatus_mask |= MSTATUS_SDT;
}
if (current_virt) {
/* Current V=1 and we are about to change to V=0 */
env->vsstatus = env->mstatus & mstatus_mask;
@ -699,6 +779,30 @@ void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen)
env->geilen = geilen;
}
void riscv_cpu_set_rnmi(RISCVCPU *cpu, uint32_t irq, bool level)
{
CPURISCVState *env = &cpu->env;
CPUState *cs = CPU(cpu);
bool release_lock = false;
if (!bql_locked()) {
release_lock = true;
bql_lock();
}
if (level) {
env->rnmip |= 1 << irq;
cpu_interrupt(cs, CPU_INTERRUPT_RNMI);
} else {
env->rnmip &= ~(1 << irq);
cpu_reset_interrupt(cs, CPU_INTERRUPT_RNMI);
}
if (release_lock) {
bql_unlock();
}
}
int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts)
{
CPURISCVState *env = &cpu->env;
@ -786,7 +890,6 @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en)
/* tlb_flush is unnecessary as mode is contained in mmu_idx */
env->priv = newpriv;
env->xl = cpu_recompute_xl(env);
riscv_cpu_update_mask(env);
/*
* Clear the load reservation - otherwise a reservation placed in one
@ -1835,6 +1938,24 @@ static target_ulong promote_load_fault(target_ulong orig_cause)
/* if no promotion, return original cause */
return orig_cause;
}
static void riscv_do_nmi(CPURISCVState *env, target_ulong cause, bool virt)
{
env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false);
env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPV, virt);
env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPP, env->priv);
env->mncause = cause;
env->mnepc = env->pc;
env->pc = env->rnmi_irqvec;
if (cpu_get_fcfien(env)) {
env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP, env->elp);
}
/* Trapping to M mode, virt is disabled */
riscv_cpu_set_mode(env, PRV_M, false);
}
/*
* Handle Traps
*
@ -1848,7 +1969,9 @@ void riscv_cpu_do_interrupt(CPUState *cs)
bool virt = env->virt_enabled;
bool write_gva = false;
bool always_storeamo = (env->excp_uw2 & RISCV_UW2_ALWAYS_STORE_AMO);
bool vsmode_exc;
uint64_t s;
int mode;
/*
* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
@ -1861,12 +1984,21 @@ void riscv_cpu_do_interrupt(CPUState *cs)
!(env->mip & (1ULL << cause));
bool vs_injected = env->hvip & (1ULL << cause) & env->hvien &&
!(env->mip & (1ULL << cause));
bool smode_double_trap = false;
uint64_t hdeleg = async ? env->hideleg : env->hedeleg;
target_ulong tval = 0;
target_ulong tinst = 0;
target_ulong htval = 0;
target_ulong mtval2 = 0;
int sxlen = 0;
int mxlen = 0;
int mxlen = 16 << riscv_cpu_mxl(env);
bool nnmi_excep = false;
if (cpu->cfg.ext_smrnmi && env->rnmip && async) {
riscv_do_nmi(env, cause | ((target_ulong)1U << (mxlen - 1)),
env->virt_enabled);
return;
}
if (!async) {
/* set tval to badaddr for traps with address information */
@ -1960,8 +2092,34 @@ void riscv_cpu_do_interrupt(CPUState *cs)
__func__, env->mhartid, async, cause, env->pc, tval,
riscv_cpu_get_trap_name(cause, async));
if (env->priv <= PRV_S && cause < 64 &&
(((deleg >> cause) & 1) || s_injected || vs_injected)) {
mode = env->priv <= PRV_S && cause < 64 &&
(((deleg >> cause) & 1) || s_injected || vs_injected) ? PRV_S : PRV_M;
vsmode_exc = env->virt_enabled && (((hdeleg >> cause) & 1) || vs_injected);
/*
* Check double trap condition only if already in S-mode and targeting
* S-mode
*/
if (cpu->cfg.ext_ssdbltrp && env->priv == PRV_S && mode == PRV_S) {
bool dte = (env->menvcfg & MENVCFG_DTE) != 0;
bool sdt = (env->mstatus & MSTATUS_SDT) != 0;
/* In VS or HS */
if (riscv_has_ext(env, RVH)) {
if (vsmode_exc) {
/* VS -> VS, use henvcfg instead of menvcfg*/
dte = (env->henvcfg & HENVCFG_DTE) != 0;
} else if (env->virt_enabled) {
/* VS -> HS, use mstatus_hs */
sdt = (env->mstatus_hs & MSTATUS_SDT) != 0;
}
}
smode_double_trap = dte && sdt;
if (smode_double_trap) {
mode = PRV_M;
}
}
if (mode == PRV_S) {
/* handle the trap in S-mode */
/* save elp status */
if (cpu_get_fcfien(env)) {
@ -1969,10 +2127,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
}
if (riscv_has_ext(env, RVH)) {
uint64_t hdeleg = async ? env->hideleg : env->hedeleg;
if (env->virt_enabled &&
(((hdeleg >> cause) & 1) || vs_injected)) {
if (vsmode_exc) {
/* Trap to VS mode */
/*
* See if we need to adjust cause. Yes if its VS mode interrupt
@ -2005,6 +2160,9 @@ void riscv_cpu_do_interrupt(CPUState *cs)
s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE));
s = set_field(s, MSTATUS_SPP, env->priv);
s = set_field(s, MSTATUS_SIE, 0);
if (riscv_env_smode_dbltrp_enabled(env, virt)) {
s = set_field(s, MSTATUS_SDT, 1);
}
env->mstatus = s;
sxlen = 16 << riscv_cpu_sxl(env);
env->scause = cause | ((target_ulong)async << (sxlen - 1));
@ -2016,10 +2174,23 @@ void riscv_cpu_do_interrupt(CPUState *cs)
((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
riscv_cpu_set_mode(env, PRV_S, virt);
} else {
/*
* If the hart encounters an exception while executing in M-mode
* with the mnstatus.NMIE bit clear, the exception is an RNMI exception.
*/
nnmi_excep = cpu->cfg.ext_smrnmi &&
!get_field(env->mnstatus, MNSTATUS_NMIE) &&
!async;
/* handle the trap in M-mode */
/* save elp status */
if (cpu_get_fcfien(env)) {
env->mstatus = set_field(env->mstatus, MSTATUS_MPELP, env->elp);
if (nnmi_excep) {
env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP,
env->elp);
} else {
env->mstatus = set_field(env->mstatus, MSTATUS_MPELP, env->elp);
}
}
if (riscv_has_ext(env, RVH)) {
@ -2037,20 +2208,54 @@ void riscv_cpu_do_interrupt(CPUState *cs)
/* Trapping to M mode, virt is disabled */
virt = false;
}
/*
* If the hart encounters an exception while executing in M-mode,
* with the mnstatus.NMIE bit clear, the program counter is set to
* the RNMI exception trap handler address.
*/
nnmi_excep = cpu->cfg.ext_smrnmi &&
!get_field(env->mnstatus, MNSTATUS_NMIE) &&
!async;
s = env->mstatus;
s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE));
s = set_field(s, MSTATUS_MPP, env->priv);
s = set_field(s, MSTATUS_MIE, 0);
if (cpu->cfg.ext_smdbltrp) {
if (env->mstatus & MSTATUS_MDT) {
assert(env->priv == PRV_M);
if (!cpu->cfg.ext_smrnmi || nnmi_excep) {
cpu_abort(CPU(cpu), "M-mode double trap\n");
} else {
riscv_do_nmi(env, cause, false);
return;
}
}
s = set_field(s, MSTATUS_MDT, 1);
}
env->mstatus = s;
mxlen = 16 << riscv_cpu_mxl(env);
env->mcause = cause | ((target_ulong)async << (mxlen - 1));
if (smode_double_trap) {
env->mtval2 = env->mcause;
env->mcause = RISCV_EXCP_DOUBLE_TRAP;
} else {
env->mtval2 = mtval2;
}
env->mepc = env->pc;
env->mtval = tval;
env->mtval2 = mtval2;
env->mtinst = tinst;
env->pc = (env->mtvec >> 2 << 2) +
((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
/*
* For RNMI exception, program counter is set to the RNMI exception
* trap handler address.
*/
if (nnmi_excep) {
env->pc = env->rnmi_excpvec;
} else {
env->pc = (env->mtvec >> 2 << 2) +
((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
}
riscv_cpu_set_mode(env, PRV_M, virt);
}

File diff suppressed because it is too large Load diff

View file

@ -213,7 +213,10 @@ static int riscv_gdb_get_virtual(CPUState *cs, GByteArray *buf, int n)
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
return gdb_get_regl(buf, env->priv);
/* Per RiscV debug spec v1.0.0 rc4 */
target_ulong vbit = (env->virt_enabled) ? BIT(2) : 0;
return gdb_get_regl(buf, env->priv | vbit);
#endif
}
return 0;
@ -226,10 +229,22 @@ static int riscv_gdb_set_virtual(CPUState *cs, uint8_t *mem_buf, int n)
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
env->priv = ldtul_p(mem_buf) & 0x3;
if (env->priv == PRV_RESERVED) {
env->priv = PRV_S;
target_ulong new_priv = ldtul_p(mem_buf) & 0x3;
bool new_virt = 0;
if (new_priv == PRV_RESERVED) {
new_priv = PRV_S;
}
if (new_priv != PRV_M) {
new_virt = (ldtul_p(mem_buf) & BIT(2)) >> 2;
}
if (riscv_has_ext(env, RVH) && new_virt != env->virt_enabled) {
riscv_cpu_swap_hypervisor_regs(env);
}
riscv_cpu_set_mode(env, new_priv, new_virt);
#endif
return sizeof(target_ulong);
}

View file

@ -131,6 +131,7 @@ DEF_HELPER_6(csrrw_i128, tl, env, int, tl, tl, tl, tl)
#ifndef CONFIG_USER_ONLY
DEF_HELPER_1(sret, tl, env)
DEF_HELPER_1(mret, tl, env)
DEF_HELPER_1(mnret, tl, env)
DEF_HELPER_1(wfi, void, env)
DEF_HELPER_1(wrs_nto, void, env)
DEF_HELPER_1(tlb_flush, void, env)

View file

@ -121,6 +121,9 @@ wfi 0001000 00101 00000 000 00000 1110011
sfence_vma 0001001 ..... ..... 000 00000 1110011 @sfence_vma
sfence_vm 0001000 00100 ..... 000 00000 1110011 @sfence_vm
# *** NMI ***
mnret 0111000 00010 00000 000 00000 1110011
# *** RV32I Base Instruction Set ***
lui .................... ..... 0110111 @u
{

View file

@ -18,6 +18,12 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define REQUIRE_SMRNMI(ctx) do { \
if (!ctx->cfg_ptr->ext_smrnmi) { \
return false; \
} \
} while (0)
static bool trans_ecall(DisasContext *ctx, arg_ecall *a)
{
/* always generates U-level ECALL, fixed in do_interrupt handler */
@ -106,6 +112,20 @@ static bool trans_mret(DisasContext *ctx, arg_mret *a)
#endif
}
static bool trans_mnret(DisasContext *ctx, arg_mnret *a)
{
#ifndef CONFIG_USER_ONLY
REQUIRE_SMRNMI(ctx);
decode_save_opc(ctx, 0);
gen_helper_mnret(cpu_pc, tcg_env);
tcg_gen_exit_tb(NULL, 0); /* no chaining */
ctx->base.is_jmp = DISAS_NORETURN;
return true;
#else
return false;
#endif
}
static bool trans_wfi(DisasContext *ctx, arg_wfi *a)
{
#ifndef CONFIG_USER_ONLY

View file

@ -145,4 +145,58 @@ static inline float16 check_nanbox_h(CPURISCVState *env, uint64_t f)
/* Our implementation of CPUClass::has_work */
bool riscv_cpu_has_work(CPUState *cs);
/* Zjpm addr masking routine */
static inline target_ulong adjust_addr_body(CPURISCVState *env,
target_ulong addr,
bool is_virt_addr)
{
RISCVPmPmm pmm = PMM_FIELD_DISABLED;
uint32_t pmlen = 0;
bool signext = false;
/* do nothing for rv32 mode */
if (riscv_cpu_mxl(env) == MXL_RV32) {
return addr;
}
/* get pmm field depending on whether addr is */
if (is_virt_addr) {
pmm = riscv_pm_get_virt_pmm(env);
} else {
pmm = riscv_pm_get_pmm(env);
}
/* if pointer masking is disabled, return original addr */
if (pmm == PMM_FIELD_DISABLED) {
return addr;
}
if (!is_virt_addr) {
signext = riscv_cpu_virt_mem_enabled(env);
}
addr = addr << pmlen;
pmlen = riscv_pm_get_pmlen(pmm);
/* sign/zero extend masked address by N-1 bit */
if (signext) {
addr = (target_long)addr >> pmlen;
} else {
addr = addr >> pmlen;
}
return addr;
}
static inline target_ulong adjust_addr(CPURISCVState *env,
target_ulong addr)
{
return adjust_addr_body(env, addr, false);
}
static inline target_ulong adjust_addr_virt(CPURISCVState *env,
target_ulong addr)
{
return adjust_addr_body(env, addr, true);
}
#endif

View file

@ -758,11 +758,11 @@ static void kvm_riscv_put_regs_timer(CPUState *cs)
env->kvm_timer_dirty = false;
}
uint64_t kvm_riscv_get_timebase_frequency(CPUState *cs)
uint64_t kvm_riscv_get_timebase_frequency(RISCVCPU *cpu)
{
uint64_t reg;
KVM_RISCV_GET_TIMER(cs, frequency, reg);
KVM_RISCV_GET_TIMER(CPU(cpu), frequency, reg);
return reg;
}

View file

@ -19,6 +19,8 @@
#ifndef QEMU_KVM_RISCV_H
#define QEMU_KVM_RISCV_H
#include "target/riscv/cpu-qom.h"
void kvm_riscv_reset_vcpu(RISCVCPU *cpu);
void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level);
void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
@ -28,6 +30,6 @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
void riscv_kvm_aplic_request(void *opaque, int irq, int level);
int kvm_riscv_sync_mpstate_to_kvm(RISCVCPU *cpu, int state);
void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp);
uint64_t kvm_riscv_get_timebase_frequency(CPUState *cs);
uint64_t kvm_riscv_get_timebase_frequency(RISCVCPU *cpu);
#endif

View file

@ -152,25 +152,15 @@ static const VMStateDescription vmstate_vector = {
static bool pointermasking_needed(void *opaque)
{
RISCVCPU *cpu = opaque;
CPURISCVState *env = &cpu->env;
return riscv_has_ext(env, RVJ);
return false;
}
static const VMStateDescription vmstate_pointermasking = {
.name = "cpu/pointer_masking",
.version_id = 1,
.minimum_version_id = 1,
.version_id = 2,
.minimum_version_id = 2,
.needed = pointermasking_needed,
.fields = (const VMStateField[]) {
VMSTATE_UINTTL(env.mmte, RISCVCPU),
VMSTATE_UINTTL(env.mpmmask, RISCVCPU),
VMSTATE_UINTTL(env.mpmbase, RISCVCPU),
VMSTATE_UINTTL(env.spmmask, RISCVCPU),
VMSTATE_UINTTL(env.spmbase, RISCVCPU),
VMSTATE_UINTTL(env.upmmask, RISCVCPU),
VMSTATE_UINTTL(env.upmbase, RISCVCPU),
VMSTATE_END_OF_LIST()
}
@ -266,7 +256,6 @@ static int riscv_cpu_post_load(void *opaque, int version_id)
CPURISCVState *env = &cpu->env;
env->xl = cpu_recompute_xl(env);
riscv_cpu_update_mask(env);
return 0;
}
@ -434,6 +423,7 @@ const VMStateDescription vmstate_riscv_cpu = {
VMSTATE_UINTTL(env.siselect, RISCVCPU),
VMSTATE_UINT32(env.scounteren, RISCVCPU),
VMSTATE_UINT32(env.mcounteren, RISCVCPU),
VMSTATE_UINT32(env.scountinhibit, RISCVCPU),
VMSTATE_UINT32(env.mcountinhibit, RISCVCPU),
VMSTATE_STRUCT_ARRAY(env.pmu_ctrs, RISCVCPU, RV_MAX_MHPMCOUNTERS, 0,
vmstate_pmu_ctr_state, PMUCTRState),

View file

@ -24,12 +24,19 @@
#include "exec/exec-all.h"
#include "exec/cpu_ldst.h"
#include "exec/helper-proto.h"
#include "trace.h"
/* Exceptions processing helpers */
G_NORETURN void riscv_raise_exception(CPURISCVState *env,
uint32_t exception, uintptr_t pc)
RISCVException exception,
uintptr_t pc)
{
CPUState *cs = env_cpu(env);
trace_riscv_exception(exception,
riscv_cpu_get_trap_name(exception, false),
env->pc);
cs->exception_index = exception;
cpu_loop_exit_restore(cs, pc);
}
@ -287,6 +294,21 @@ target_ulong helper_sret(CPURISCVState *env)
get_field(mstatus, MSTATUS_SPIE));
mstatus = set_field(mstatus, MSTATUS_SPIE, 1);
mstatus = set_field(mstatus, MSTATUS_SPP, PRV_U);
if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
if (riscv_has_ext(env, RVH)) {
target_ulong prev_vu = get_field(env->hstatus, HSTATUS_SPV) &&
prev_priv == PRV_U;
/* Returning to VU from HS, vsstatus.sdt = 0 */
if (!env->virt_enabled && prev_vu) {
env->vsstatus = set_field(env->vsstatus, MSTATUS_SDT, 0);
}
}
mstatus = set_field(mstatus, MSTATUS_SDT, 0);
}
if (riscv_cpu_cfg(env)->ext_smdbltrp && env->priv >= PRV_M) {
mstatus = set_field(mstatus, MSTATUS_MDT, 0);
}
if (env->priv_ver >= PRIV_VERSION_1_12_0) {
mstatus = set_field(mstatus, MSTATUS_MPRV, 0);
}
@ -297,7 +319,6 @@ target_ulong helper_sret(CPURISCVState *env)
target_ulong hstatus = env->hstatus;
prev_virt = get_field(hstatus, HSTATUS_SPV);
hstatus = set_field(hstatus, HSTATUS_SPV, 0);
env->hstatus = hstatus;
@ -321,24 +342,46 @@ target_ulong helper_sret(CPURISCVState *env)
return retpc;
}
target_ulong helper_mret(CPURISCVState *env)
static void check_ret_from_m_mode(CPURISCVState *env, target_ulong retpc,
target_ulong prev_priv)
{
if (!(env->priv >= PRV_M)) {
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
}
target_ulong retpc = env->mepc;
if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) {
riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC());
}
uint64_t mstatus = env->mstatus;
target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP);
if (riscv_cpu_cfg(env)->pmp &&
!pmp_get_num_rules(env) && (prev_priv != PRV_M)) {
riscv_raise_exception(env, RISCV_EXCP_INST_ACCESS_FAULT, GETPC());
}
}
static target_ulong ssdbltrp_mxret(CPURISCVState *env, target_ulong mstatus,
target_ulong prev_priv,
target_ulong prev_virt)
{
/* If returning to U, VS or VU, sstatus.sdt = 0 */
if (prev_priv == PRV_U || (prev_virt &&
(prev_priv == PRV_S || prev_priv == PRV_U))) {
mstatus = set_field(mstatus, MSTATUS_SDT, 0);
/* If returning to VU, vsstatus.sdt = 0 */
if (prev_virt && prev_priv == PRV_U) {
env->vsstatus = set_field(env->vsstatus, MSTATUS_SDT, 0);
}
}
return mstatus;
}
target_ulong helper_mret(CPURISCVState *env)
{
target_ulong retpc = env->mepc;
uint64_t mstatus = env->mstatus;
target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP);
check_ret_from_m_mode(env, retpc, prev_priv);
target_ulong prev_virt = get_field(env->mstatus, MSTATUS_MPV) &&
(prev_priv != PRV_M);
@ -348,6 +391,12 @@ target_ulong helper_mret(CPURISCVState *env)
mstatus = set_field(mstatus, MSTATUS_MPP,
riscv_has_ext(env, RVU) ? PRV_U : PRV_M);
mstatus = set_field(mstatus, MSTATUS_MPV, 0);
if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
mstatus = ssdbltrp_mxret(env, mstatus, prev_priv, prev_virt);
}
if (riscv_cpu_cfg(env)->ext_smdbltrp) {
mstatus = set_field(mstatus, MSTATUS_MDT, 0);
}
if ((env->priv_ver >= PRIV_VERSION_1_12_0) && (prev_priv != PRV_M)) {
mstatus = set_field(mstatus, MSTATUS_MPRV, 0);
}
@ -370,6 +419,53 @@ target_ulong helper_mret(CPURISCVState *env)
return retpc;
}
target_ulong helper_mnret(CPURISCVState *env)
{
target_ulong retpc = env->mnepc;
target_ulong prev_priv = get_field(env->mnstatus, MNSTATUS_MNPP);
target_ulong prev_virt;
check_ret_from_m_mode(env, retpc, prev_priv);
prev_virt = get_field(env->mnstatus, MNSTATUS_MNPV) &&
(prev_priv != PRV_M);
env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, true);
/*
* If MNRET changes the privilege mode to a mode
* less privileged than M, it also sets mstatus.MPRV to 0.
*/
if (prev_priv < PRV_M) {
env->mstatus = set_field(env->mstatus, MSTATUS_MPRV, false);
}
if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
env->mstatus = ssdbltrp_mxret(env, env->mstatus, prev_priv, prev_virt);
}
if (riscv_cpu_cfg(env)->ext_smdbltrp) {
if (prev_priv < PRV_M) {
env->mstatus = set_field(env->mstatus, MSTATUS_MDT, 0);
}
}
if (riscv_has_ext(env, RVH) && prev_virt) {
riscv_cpu_swap_hypervisor_regs(env);
}
riscv_cpu_set_mode(env, prev_priv, prev_virt);
/*
* If forward cfi enabled for new priv, restore elp status
* and clear mnpelp in mnstatus
*/
if (cpu_get_fcfien(env)) {
env->elp = get_field(env->mnstatus, MNSTATUS_MNPELP);
}
env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP, 0);
return retpc;
}
void helper_wfi(CPURISCVState *env)
{
CPUState *cs = env_cpu(env);
@ -472,7 +568,7 @@ target_ulong helper_hyp_hlv_bu(CPURISCVState *env, target_ulong addr)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
return cpu_ldb_mmu(env, addr, oi, ra);
return cpu_ldb_mmu(env, adjust_addr_virt(env, addr), oi, ra);
}
target_ulong helper_hyp_hlv_hu(CPURISCVState *env, target_ulong addr)
@ -481,7 +577,7 @@ target_ulong helper_hyp_hlv_hu(CPURISCVState *env, target_ulong addr)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_TEUW, mmu_idx);
return cpu_ldw_mmu(env, addr, oi, ra);
return cpu_ldw_mmu(env, adjust_addr_virt(env, addr), oi, ra);
}
target_ulong helper_hyp_hlv_wu(CPURISCVState *env, target_ulong addr)
@ -490,7 +586,7 @@ target_ulong helper_hyp_hlv_wu(CPURISCVState *env, target_ulong addr)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_TEUL, mmu_idx);
return cpu_ldl_mmu(env, addr, oi, ra);
return cpu_ldl_mmu(env, adjust_addr_virt(env, addr), oi, ra);
}
target_ulong helper_hyp_hlv_d(CPURISCVState *env, target_ulong addr)
@ -499,7 +595,7 @@ target_ulong helper_hyp_hlv_d(CPURISCVState *env, target_ulong addr)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_TEUQ, mmu_idx);
return cpu_ldq_mmu(env, addr, oi, ra);
return cpu_ldq_mmu(env, adjust_addr_virt(env, addr), oi, ra);
}
void helper_hyp_hsv_b(CPURISCVState *env, target_ulong addr, target_ulong val)
@ -508,7 +604,7 @@ void helper_hyp_hsv_b(CPURISCVState *env, target_ulong addr, target_ulong val)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
cpu_stb_mmu(env, addr, val, oi, ra);
cpu_stb_mmu(env, adjust_addr_virt(env, addr), val, oi, ra);
}
void helper_hyp_hsv_h(CPURISCVState *env, target_ulong addr, target_ulong val)
@ -517,7 +613,7 @@ void helper_hyp_hsv_h(CPURISCVState *env, target_ulong addr, target_ulong val)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_TEUW, mmu_idx);
cpu_stw_mmu(env, addr, val, oi, ra);
cpu_stw_mmu(env, adjust_addr_virt(env, addr), val, oi, ra);
}
void helper_hyp_hsv_w(CPURISCVState *env, target_ulong addr, target_ulong val)
@ -526,7 +622,7 @@ void helper_hyp_hsv_w(CPURISCVState *env, target_ulong addr, target_ulong val)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_TEUL, mmu_idx);
cpu_stl_mmu(env, addr, val, oi, ra);
cpu_stl_mmu(env, adjust_addr_virt(env, addr), val, oi, ra);
}
void helper_hyp_hsv_d(CPURISCVState *env, target_ulong addr, target_ulong val)
@ -535,7 +631,7 @@ void helper_hyp_hsv_d(CPURISCVState *env, target_ulong addr, target_ulong val)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_TEUQ, mmu_idx);
cpu_stq_mmu(env, addr, val, oi, ra);
cpu_stq_mmu(env, adjust_addr_virt(env, addr), val, oi, ra);
}
/*

View file

@ -575,6 +575,13 @@ target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index)
void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
{
int i;
uint64_t mask = MSECCFG_MMWP | MSECCFG_MML;
/* Update PMM field only if the value is valid according to Zjpm v1.0 */
if (riscv_cpu_cfg(env)->ext_smmpm &&
riscv_cpu_mxl(env) == MXL_RV64 &&
get_field(val, MSECCFG_PMM) != PMM_FIELD_RESERVED) {
mask |= MSECCFG_PMM;
}
trace_mseccfg_csr_write(env->mhartid, val);
@ -590,12 +597,13 @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
if (riscv_cpu_cfg(env)->ext_smepmp) {
/* Sticky bits */
val |= (env->mseccfg & (MSECCFG_MMWP | MSECCFG_MML));
if ((val ^ env->mseccfg) & (MSECCFG_MMWP | MSECCFG_MML)) {
val |= (env->mseccfg & mask);
if ((val ^ env->mseccfg) & mask) {
tlb_flush(env_cpu(env));
}
} else {
val &= ~(MSECCFG_MMWP | MSECCFG_MML | MSECCFG_RLB);
mask |= MSECCFG_RLB;
val &= ~(mask);
}
/* M-mode forward cfi to be enabled if cfi extension is implemented */

View file

@ -46,6 +46,7 @@ typedef enum {
MSECCFG_USEED = 1 << 8,
MSECCFG_SSEED = 1 << 9,
MSECCFG_MLPE = 1 << 10,
MSECCFG_PMM = 3ULL << 32,
} mseccfg_field_t;
typedef struct {

View file

@ -212,6 +212,11 @@ static void riscv_cpu_enable_named_feat(RISCVCPU *cpu, uint32_t feat_offset)
cpu->cfg.cbop_blocksize = 64;
cpu->cfg.cboz_blocksize = 64;
break;
case CPU_CFG_OFFSET(ext_sha):
if (!cpu_misa_ext_is_user_set(RVH)) {
riscv_cpu_write_misa_bit(cpu, RVH, true);
}
/* fallthrough */
case CPU_CFG_OFFSET(ext_ssstateen):
cpu->cfg.ext_smstateen = true;
break;
@ -352,6 +357,9 @@ static void riscv_cpu_update_named_features(RISCVCPU *cpu)
cpu->cfg.cboz_blocksize == 64;
cpu->cfg.ext_ssstateen = cpu->cfg.ext_smstateen;
cpu->cfg.ext_sha = riscv_has_ext(&cpu->env, RVH) &&
cpu->cfg.ext_ssstateen;
}
static void riscv_cpu_validate_g(RISCVCPU *cpu)
@ -955,6 +963,20 @@ void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
error_propagate(errp, local_err);
return;
}
#ifndef CONFIG_USER_ONLY
if (cpu->cfg.pmu_mask) {
riscv_pmu_init(cpu, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
return;
}
if (cpu->cfg.ext_sscofpmf) {
cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
riscv_pmu_timer_cb, cpu);
}
}
#endif
}
void riscv_tcg_cpu_finalize_dynamic_decoder(RISCVCPU *cpu)
@ -1002,7 +1024,6 @@ static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp)
#ifndef CONFIG_USER_ONLY
CPURISCVState *env = &cpu->env;
Error *local_err = NULL;
tcg_cflags_set(CPU(cs), CF_PCREL);
@ -1010,19 +1031,6 @@ static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp)
riscv_timer_init(cpu);
}
if (cpu->cfg.pmu_mask) {
riscv_pmu_init(cpu, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
return false;
}
if (cpu->cfg.ext_sscofpmf) {
cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
riscv_pmu_timer_cb, cpu);
}
}
/* With H-Ext, VSSIP, VSTIP, VSEIP and SGEIP are hardwired to one. */
if (riscv_has_ext(env, RVH)) {
env->mideleg = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP | MIP_SGEIP;
@ -1107,7 +1115,6 @@ static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = {
MISA_CFG(RVS, true),
MISA_CFG(RVU, true),
MISA_CFG(RVH, true),
MISA_CFG(RVJ, false),
MISA_CFG(RVV, false),
MISA_CFG(RVG, false),
MISA_CFG(RVB, false),
@ -1394,8 +1401,8 @@ static void riscv_init_max_cpu_extensions(Object *obj)
CPURISCVState *env = &cpu->env;
const RISCVCPUMultiExtConfig *prop;
/* Enable RVG, RVJ and RVV that are disabled by default */
riscv_cpu_set_misa_ext(env, env->misa_ext | RVB | RVG | RVJ | RVV);
/* Enable RVG and RVV that are disabled by default */
riscv_cpu_set_misa_ext(env, env->misa_ext | RVB | RVG | RVV);
for (prop = riscv_cpu_extensions; prop && prop->name; prop++) {
isa_ext_update_enabled(cpu, prop->offset, true);
@ -1423,6 +1430,25 @@ static void riscv_init_max_cpu_extensions(Object *obj)
if (env->misa_mxl != MXL_RV32) {
isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcf), false);
}
/*
* ext_smrnmi requires OpenSBI changes that our current
* image does not have. Disable it for now.
*/
if (cpu->cfg.ext_smrnmi) {
isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smrnmi), false);
qemu_log("Smrnmi is disabled in the 'max' type CPU\n");
}
/*
* ext_smdbltrp requires the firmware to clear MSTATUS.MDT on startup to
* avoid generating a double trap. OpenSBI does not currently support it,
* disable it for now.
*/
if (cpu->cfg.ext_smdbltrp) {
isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smdbltrp), false);
qemu_log("Smdbltrp is disabled in the 'max' type CPU\n");
}
}
static bool riscv_cpu_has_max_extensions(Object *cpu_obj)

View file

@ -9,3 +9,6 @@ pmpaddr_csr_write(uint64_t mhartid, uint32_t addr_index, uint64_t val) "hart %"
mseccfg_csr_read(uint64_t mhartid, uint64_t val) "hart %" PRIu64 ": read mseccfg, val: 0x%" PRIx64
mseccfg_csr_write(uint64_t mhartid, uint64_t val) "hart %" PRIu64 ": write mseccfg, val: 0x%" PRIx64
# op_helper.c
riscv_exception(uint32_t exception, const char *desc, uint64_t epc) "%u (%s) on epc 0x%"PRIx64""

View file

@ -42,9 +42,6 @@ static TCGv cpu_gpr[32], cpu_gprh[32], cpu_pc, cpu_vl, cpu_vstart;
static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
static TCGv load_res;
static TCGv load_val;
/* globals for PM CSRs */
static TCGv pm_mask;
static TCGv pm_base;
/*
* If an operation is being performed on less than TARGET_LONG_BITS,
@ -106,9 +103,9 @@ typedef struct DisasContext {
bool vl_eq_vlmax;
CPUState *cs;
TCGv zero;
/* PointerMasking extension */
bool pm_mask_enabled;
bool pm_base_enabled;
/* actual address width */
uint8_t addr_xl;
bool addr_signed;
/* Ztso */
bool ztso;
/* Use icount trigger for native debug */
@ -245,7 +242,7 @@ static void gen_update_pc(DisasContext *ctx, target_long diff)
ctx->pc_save = ctx->base.pc_next + diff;
}
static void generate_exception(DisasContext *ctx, int excp)
static void generate_exception(DisasContext *ctx, RISCVException excp)
{
gen_update_pc(ctx, 0);
gen_helper_raise_exception(tcg_env, tcg_constant_i32(excp));
@ -592,13 +589,10 @@ static TCGv get_address(DisasContext *ctx, int rs1, int imm)
TCGv src1 = get_gpr(ctx, rs1, EXT_NONE);
tcg_gen_addi_tl(addr, src1, imm);
if (ctx->pm_mask_enabled) {
tcg_gen_andc_tl(addr, addr, pm_mask);
} else if (get_address_xl(ctx) == MXL_RV32) {
tcg_gen_ext32u_tl(addr, addr);
}
if (ctx->pm_base_enabled) {
tcg_gen_or_tl(addr, addr, pm_base);
if (ctx->addr_signed) {
tcg_gen_sextract_tl(addr, addr, 0, ctx->addr_xl);
} else {
tcg_gen_extract_tl(addr, addr, 0, ctx->addr_xl);
}
return addr;
@ -611,14 +605,12 @@ static TCGv get_address_indexed(DisasContext *ctx, int rs1, TCGv offs)
TCGv src1 = get_gpr(ctx, rs1, EXT_NONE);
tcg_gen_add_tl(addr, src1, offs);
if (ctx->pm_mask_enabled) {
tcg_gen_andc_tl(addr, addr, pm_mask);
} else if (get_xl(ctx) == MXL_RV32) {
tcg_gen_ext32u_tl(addr, addr);
}
if (ctx->pm_base_enabled) {
tcg_gen_or_tl(addr, addr, pm_base);
if (ctx->addr_signed) {
tcg_gen_sextract_tl(addr, addr, 0, ctx->addr_xl);
} else {
tcg_gen_extract_tl(addr, addr, 0, ctx->addr_xl);
}
return addr;
}
@ -1246,8 +1238,14 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
ctx->xl = FIELD_EX32(tb_flags, TB_FLAGS, XL);
ctx->address_xl = FIELD_EX32(tb_flags, TB_FLAGS, AXL);
ctx->cs = cs;
ctx->pm_mask_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_MASK_ENABLED);
ctx->pm_base_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_BASE_ENABLED);
if (get_xl(ctx) == MXL_RV32) {
ctx->addr_xl = 32;
ctx->addr_signed = false;
} else {
int pm_pmm = FIELD_EX32(tb_flags, TB_FLAGS, PM_PMM);
ctx->addr_xl = 64 - riscv_pm_get_pmlen(pm_pmm);
ctx->addr_signed = FIELD_EX32(tb_flags, TB_FLAGS, PM_SIGNEXTEND);
}
ctx->ztso = cpu->cfg.ext_ztso;
ctx->itrigger = FIELD_EX32(tb_flags, TB_FLAGS, ITRIGGER);
ctx->bcfi_enabled = FIELD_EX32(tb_flags, TB_FLAGS, BCFI_ENABLED);
@ -1386,9 +1384,4 @@ void riscv_translate_init(void)
"load_res");
load_val = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, load_val),
"load_val");
/* Assign PM CSRs to tcg globals */
pm_mask = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, cur_pmmask),
"pmmask");
pm_base = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, cur_pmbase),
"pmbase");
}

View file

@ -105,11 +105,6 @@ static inline uint32_t vext_max_elems(uint32_t desc, uint32_t log2_esz)
return scale < 0 ? vlenb >> -scale : vlenb << scale;
}
static inline target_ulong adjust_addr(CPURISCVState *env, target_ulong addr)
{
return (addr & ~env->cur_pmmask) | env->cur_pmbase;
}
/*
* This function checks watchpoint before real load operation.
*
@ -195,7 +190,7 @@ GEN_VEXT_ST_ELEM(ste_w, uint32_t, H4, stl)
GEN_VEXT_ST_ELEM(ste_d, uint64_t, H8, stq)
static inline QEMU_ALWAYS_INLINE void
vext_continus_ldst_tlb(CPURISCVState *env, vext_ldst_elem_fn_tlb *ldst_tlb,
vext_continuous_ldst_tlb(CPURISCVState *env, vext_ldst_elem_fn_tlb *ldst_tlb,
void *vd, uint32_t evl, target_ulong addr,
uint32_t reg_start, uintptr_t ra, uint32_t esz,
bool is_load)
@ -207,7 +202,7 @@ vext_continus_ldst_tlb(CPURISCVState *env, vext_ldst_elem_fn_tlb *ldst_tlb,
}
static inline QEMU_ALWAYS_INLINE void
vext_continus_ldst_host(CPURISCVState *env, vext_ldst_elem_fn_host *ldst_host,
vext_continuous_ldst_host(CPURISCVState *env, vext_ldst_elem_fn_host *ldst_host,
void *vd, uint32_t evl, uint32_t reg_start, void *host,
uint32_t esz, bool is_load)
{
@ -342,8 +337,8 @@ vext_page_ldst_us(CPURISCVState *env, void *vd, target_ulong addr,
if (flags == 0) {
if (nf == 1) {
vext_continus_ldst_host(env, ldst_host, vd, evl, env->vstart, host,
esz, is_load);
vext_continuous_ldst_host(env, ldst_host, vd, evl, env->vstart,
host, esz, is_load);
} else {
for (i = env->vstart; i < evl; ++i) {
k = 0;
@ -357,7 +352,7 @@ vext_page_ldst_us(CPURISCVState *env, void *vd, target_ulong addr,
env->vstart += elems;
} else {
if (nf == 1) {
vext_continus_ldst_tlb(env, ldst_tlb, vd, evl, addr, env->vstart,
vext_continuous_ldst_tlb(env, ldst_tlb, vd, evl, addr, env->vstart,
ra, esz, is_load);
} else {
/* load bytes from guest memory */
@ -393,6 +388,22 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
return;
}
#if defined(CONFIG_USER_ONLY)
/*
* For data sizes <= 6 bytes we get better performance by simply calling
* vext_continuous_ldst_tlb
*/
if (nf == 1 && (evl << log2_esz) <= 6) {
addr = base + (env->vstart << log2_esz);
vext_continuous_ldst_tlb(env, ldst_tlb, vd, evl, addr, env->vstart, ra,
esz, is_load);
env->vstart = 0;
vext_set_tail_elems_1s(evl, vd, desc, nf, esz, max_elems);
return;
}
#endif
/* Calculate the page range of first page */
addr = base + ((env->vstart * nf) << log2_esz);
page_split = -(addr | TARGET_PAGE_MASK);

Binary file not shown.