mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-09 18:44:58 -06:00
RISC-V PR for 9.2
* Fix an access to VXSAT * Expose RV32 cpu to RV64 QEMU * Don't clear PLIC pending bits on IRQ lowering * Make PLIC zeroth priority register read-only * Set vtype.vill on CPU reset * Check and update APLIC pending when write sourcecfg * Avoid dropping charecters with HTIF * Apply FIFO backpressure to guests using SiFive UART * Support for control flow integrity extensions * Support for the IOMMU with the virt machine * set 'aia_mode' to default in error path * clarify how 'riscv-aia' default works -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEaukCtqfKh31tZZKWr3yVEwxTgBMFAmci/tQACgkQr3yVEwxT gBNPAQ//dZKjjJm4Sh+UFdUslivBJYtL1rl2UUG2UqiNn/UoYh/vcHoSArljHTjt 8riEStnaQqXziOpMIJjIMLJ4KoiIk2SMvjNfFtcmPiPZEDEpjsTxfUxBFsBee+fI 4KNQKKFeljq4pa+VzVvXEqzCNJIzCThFXTZhZmer00M91HPA8ZQIHpv2JL1sWlgZ /HW24XEDFLGc/JsR55fxpPftlAqP+BfOrqMmbWy7x2Y+G8WI05hM2zTP/W8pnIz3 z0GCRYSBlADtrp+3RqzTwQfK5pXoFc0iDktWVYlhoXaeEmOwo8IYxTjrvBGhnBq+ ySX1DzTa23QmOIxSYYvCRuOxyOK9ziNn+EQ9FiFBt1h1o251CYMil1bwmYXMCMNJ rZwF1HfUx0g2GQW1ZOqh1eeyLO29JiOdV3hxlDO7X4bbISNgU6il5MXmnvf0/XVW Af3YhALeeDbHgHL1iVfjafzaviQc9+YrEX13eX6N2AjcgE5a3F7XNmGfFpFJ+mfQ CPgiwVBXat6UpBUGAt14UM+6wzp+crSgQR5IEGth+mKMKdkWoykvo7A2oHdu39zn 2cdzsshg2qcLLUPTFy06OOTXX382kCWXuykhHOjZ4uu2SJJ7R0W3PlYV8HSde2Vu Rj+89ZlUSICJNXXweQB39r87hNbtRuDIO22V0B9XrApQbJj6/yE= =rPaa -----END PGP SIGNATURE----- Merge tag 'pull-riscv-to-apply-20241031-1' of https://github.com/alistair23/qemu into staging RISC-V PR for 9.2 * Fix an access to VXSAT * Expose RV32 cpu to RV64 QEMU * Don't clear PLIC pending bits on IRQ lowering * Make PLIC zeroth priority register read-only * Set vtype.vill on CPU reset * Check and update APLIC pending when write sourcecfg * Avoid dropping charecters with HTIF * Apply FIFO backpressure to guests using SiFive UART * Support for control flow integrity extensions * Support for the IOMMU with the virt machine * set 'aia_mode' to default in error path * clarify how 'riscv-aia' default works # -----BEGIN PGP SIGNATURE----- # # iQIzBAABCAAdFiEEaukCtqfKh31tZZKWr3yVEwxTgBMFAmci/tQACgkQr3yVEwxT # gBNPAQ//dZKjjJm4Sh+UFdUslivBJYtL1rl2UUG2UqiNn/UoYh/vcHoSArljHTjt # 8riEStnaQqXziOpMIJjIMLJ4KoiIk2SMvjNfFtcmPiPZEDEpjsTxfUxBFsBee+fI # 4KNQKKFeljq4pa+VzVvXEqzCNJIzCThFXTZhZmer00M91HPA8ZQIHpv2JL1sWlgZ # /HW24XEDFLGc/JsR55fxpPftlAqP+BfOrqMmbWy7x2Y+G8WI05hM2zTP/W8pnIz3 # z0GCRYSBlADtrp+3RqzTwQfK5pXoFc0iDktWVYlhoXaeEmOwo8IYxTjrvBGhnBq+ # ySX1DzTa23QmOIxSYYvCRuOxyOK9ziNn+EQ9FiFBt1h1o251CYMil1bwmYXMCMNJ # rZwF1HfUx0g2GQW1ZOqh1eeyLO29JiOdV3hxlDO7X4bbISNgU6il5MXmnvf0/XVW # Af3YhALeeDbHgHL1iVfjafzaviQc9+YrEX13eX6N2AjcgE5a3F7XNmGfFpFJ+mfQ # CPgiwVBXat6UpBUGAt14UM+6wzp+crSgQR5IEGth+mKMKdkWoykvo7A2oHdu39zn # 2cdzsshg2qcLLUPTFy06OOTXX382kCWXuykhHOjZ4uu2SJJ7R0W3PlYV8HSde2Vu # Rj+89ZlUSICJNXXweQB39r87hNbtRuDIO22V0B9XrApQbJj6/yE= # =rPaa # -----END PGP SIGNATURE----- # gpg: Signature made Thu 31 Oct 2024 03:51:48 GMT # gpg: using RSA key 6AE902B6A7CA877D6D659296AF7C95130C538013 # gpg: Good signature from "Alistair Francis <alistair@alistair23.me>" [unknown] # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: 6AE9 02B6 A7CA 877D 6D65 9296 AF7C 9513 0C53 8013 * tag 'pull-riscv-to-apply-20241031-1' of https://github.com/alistair23/qemu: (50 commits) target/riscv: Fix vcompress with rvv_ta_all_1s target/riscv/kvm: clarify how 'riscv-aia' default works target/riscv/kvm: set 'aia_mode' to default in error path docs/specs: add riscv-iommu qtest/riscv-iommu-test: add init queues test hw/riscv/riscv-iommu: add DBG support hw/riscv/riscv-iommu: add ATS support hw/riscv/riscv-iommu: add Address Translation Cache (IOATC) test/qtest: add riscv-iommu-pci tests hw/riscv/virt.c: support for RISC-V IOMMU PCIDevice hotplug hw/riscv: add riscv-iommu-pci reference device pci-ids.rst: add Red Hat pci-id for RISC-V IOMMU device hw/riscv: add RISC-V IOMMU base emulation hw/riscv: add riscv-iommu-bits.h exec/memtxattr: add process identifier to the transaction attributes target/riscv: Expose zicfiss extension as a cpu property disas/riscv: enable disassembly for compressed sspush/sspopchk disas/riscv: enable disassembly for zicfiss instructions target/riscv: compressed encodings for sspush and sspopchk target/riscv: implement zicfiss instructions ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
92ec780519
65 changed files with 4790 additions and 139 deletions
|
@ -30,6 +30,7 @@
|
|||
#define RISCV_CPU_TYPE_NAME(name) (name RISCV_CPU_TYPE_SUFFIX)
|
||||
|
||||
#define TYPE_RISCV_CPU_MAX RISCV_CPU_TYPE_NAME("max")
|
||||
#define TYPE_RISCV_CPU_MAX32 RISCV_CPU_TYPE_NAME("max32")
|
||||
#define TYPE_RISCV_CPU_BASE32 RISCV_CPU_TYPE_NAME("rv32")
|
||||
#define TYPE_RISCV_CPU_BASE64 RISCV_CPU_TYPE_NAME("rv64")
|
||||
#define TYPE_RISCV_CPU_BASE128 RISCV_CPU_TYPE_NAME("x-rv128")
|
||||
|
|
|
@ -106,6 +106,8 @@ const RISCVIsaExtData isa_edata_arr[] = {
|
|||
ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11),
|
||||
ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11),
|
||||
ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, has_priv_1_11),
|
||||
ISA_EXT_DATA_ENTRY(zicfilp, PRIV_VERSION_1_12_0, ext_zicfilp),
|
||||
ISA_EXT_DATA_ENTRY(zicfiss, PRIV_VERSION_1_13_0, ext_zicfiss),
|
||||
ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
|
||||
ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr),
|
||||
ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr),
|
||||
|
@ -449,11 +451,9 @@ static void riscv_max_cpu_init(Object *obj)
|
|||
|
||||
env->priv_ver = PRIV_VERSION_LATEST;
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
#ifdef TARGET_RISCV32
|
||||
set_satp_mode_max_supported(cpu, VM_1_10_SV32);
|
||||
#else
|
||||
set_satp_mode_max_supported(cpu, VM_1_10_SV57);
|
||||
#endif
|
||||
set_satp_mode_max_supported(RISCV_CPU(obj),
|
||||
riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ?
|
||||
VM_1_10_SV32 : VM_1_10_SV57);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -615,7 +615,10 @@ static void rv64e_bare_cpu_init(Object *obj)
|
|||
riscv_cpu_set_misa_ext(env, RVE);
|
||||
}
|
||||
|
||||
#else /* !TARGET_RISCV64 */
|
||||
#endif /* !TARGET_RISCV64 */
|
||||
|
||||
#if defined(TARGET_RISCV32) || \
|
||||
(defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY))
|
||||
|
||||
static void rv32_base_cpu_init(Object *obj)
|
||||
{
|
||||
|
@ -1003,12 +1006,23 @@ static void riscv_cpu_reset_hold(Object *obj, ResetType type)
|
|||
}
|
||||
|
||||
pmp_unlock_entries(env);
|
||||
#else
|
||||
env->priv = PRV_U;
|
||||
env->senvcfg = 0;
|
||||
env->menvcfg = 0;
|
||||
#endif
|
||||
|
||||
/* on reset elp is clear */
|
||||
env->elp = false;
|
||||
/* on reset ssp is set to 0 */
|
||||
env->ssp = 0;
|
||||
|
||||
env->xl = riscv_cpu_mxl(env);
|
||||
riscv_cpu_update_mask(env);
|
||||
cs->exception_index = RISCV_EXCP_NONE;
|
||||
env->load_res = -1;
|
||||
set_default_nan_mode(1, &env->fp_status);
|
||||
env->vill = true;
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
if (cpu->cfg.debug) {
|
||||
|
@ -1460,6 +1474,8 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
|
|||
MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
|
||||
MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false),
|
||||
MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
|
||||
MULTI_EXT_CFG_BOOL("zicfilp", ext_zicfilp, false),
|
||||
MULTI_EXT_CFG_BOOL("zicfiss", ext_zicfiss, false),
|
||||
MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
|
||||
MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
|
||||
MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true),
|
||||
|
@ -2941,6 +2957,12 @@ static const TypeInfo riscv_cpu_type_infos[] = {
|
|||
},
|
||||
#if defined(TARGET_RISCV32)
|
||||
DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init),
|
||||
#elif defined(TARGET_RISCV64)
|
||||
DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init),
|
||||
#endif
|
||||
|
||||
#if defined(TARGET_RISCV32) || \
|
||||
(defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY))
|
||||
DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init),
|
||||
DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init),
|
||||
DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init),
|
||||
|
@ -2948,8 +2970,13 @@ static const TypeInfo riscv_cpu_type_infos[] = {
|
|||
DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init),
|
||||
DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init),
|
||||
DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init),
|
||||
#elif defined(TARGET_RISCV64)
|
||||
DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init),
|
||||
#endif
|
||||
|
||||
#if (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY))
|
||||
DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX32, MXL_RV32, riscv_max_cpu_init),
|
||||
#endif
|
||||
|
||||
#if defined(TARGET_RISCV64)
|
||||
DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init),
|
||||
DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init),
|
||||
DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init),
|
||||
|
|
|
@ -46,8 +46,13 @@ typedef struct CPUArchState CPURISCVState;
|
|||
/*
|
||||
* RISC-V-specific extra insn start words:
|
||||
* 1: Original instruction opcode
|
||||
* 2: more information about instruction
|
||||
*/
|
||||
#define TARGET_INSN_START_EXTRA_WORDS 1
|
||||
#define TARGET_INSN_START_EXTRA_WORDS 2
|
||||
/*
|
||||
* b0: Whether a instruction always raise a store AMO or not.
|
||||
*/
|
||||
#define RISCV_UW2_ALWAYS_STORE_AMO 1
|
||||
|
||||
#define RV(x) ((target_ulong)1 << (x - 'A'))
|
||||
|
||||
|
@ -230,12 +235,24 @@ struct CPUArchState {
|
|||
|
||||
target_ulong jvt;
|
||||
|
||||
/* elp state for zicfilp extension */
|
||||
bool elp;
|
||||
/* shadow stack register for zicfiss extension */
|
||||
target_ulong ssp;
|
||||
/* env place holder for extra word 2 during unwind */
|
||||
target_ulong excp_uw2;
|
||||
/* sw check code for sw check exception */
|
||||
target_ulong sw_check_code;
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
uint32_t elf_flags;
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
target_ulong priv;
|
||||
/* CSRs for execution environment configuration */
|
||||
uint64_t menvcfg;
|
||||
target_ulong senvcfg;
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
/* This contains QEMU specific information about the virt state. */
|
||||
bool virt_enabled;
|
||||
target_ulong geilen;
|
||||
|
@ -445,12 +462,9 @@ struct CPUArchState {
|
|||
target_ulong upmmask;
|
||||
target_ulong upmbase;
|
||||
|
||||
/* CSRs for execution environment configuration */
|
||||
uint64_t menvcfg;
|
||||
uint64_t mstateen[SMSTATEEN_MAX_COUNT];
|
||||
uint64_t hstateen[SMSTATEEN_MAX_COUNT];
|
||||
uint64_t sstateen[SMSTATEEN_MAX_COUNT];
|
||||
target_ulong senvcfg;
|
||||
uint64_t henvcfg;
|
||||
#endif
|
||||
target_ulong cur_pmmask;
|
||||
|
@ -544,6 +558,8 @@ void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen);
|
|||
bool riscv_cpu_vector_enabled(CPURISCVState *env);
|
||||
void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
|
||||
int riscv_env_mmu_index(CPURISCVState *env, bool ifetch);
|
||||
bool cpu_get_fcfien(CPURISCVState *env);
|
||||
bool cpu_get_bcfien(CPURISCVState *env);
|
||||
G_NORETURN void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
|
||||
MMUAccessType access_type,
|
||||
int mmu_idx, uintptr_t retaddr);
|
||||
|
@ -616,6 +632,11 @@ FIELD(TB_FLAGS, ITRIGGER, 22, 1)
|
|||
FIELD(TB_FLAGS, VIRT_ENABLED, 23, 1)
|
||||
FIELD(TB_FLAGS, PRIV, 24, 2)
|
||||
FIELD(TB_FLAGS, AXL, 26, 2)
|
||||
/* zicfilp needs a TB flag to track indirect branches */
|
||||
FIELD(TB_FLAGS, FCFI_ENABLED, 28, 1)
|
||||
FIELD(TB_FLAGS, FCFI_LP_EXPECTED, 29, 1)
|
||||
/* zicfiss needs a TB flag so that correct TB is located based on tb flags */
|
||||
FIELD(TB_FLAGS, BCFI_ENABLED, 30, 1)
|
||||
|
||||
#ifdef TARGET_RISCV32
|
||||
#define riscv_cpu_mxl(env) ((void)(env), MXL_RV32)
|
||||
|
@ -709,8 +730,11 @@ static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env)
|
|||
#ifdef CONFIG_USER_ONLY
|
||||
return env->misa_mxl;
|
||||
#else
|
||||
return get_field(env->mstatus, MSTATUS64_SXL);
|
||||
if (env->misa_mxl != MXL_RV32) {
|
||||
return get_field(env->mstatus, MSTATUS64_SXL);
|
||||
}
|
||||
#endif
|
||||
return MXL_RV32;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -34,6 +34,9 @@
|
|||
|
||||
/* Control and Status Registers */
|
||||
|
||||
/* zicfiss user ssp csr */
|
||||
#define CSR_SSP 0x011
|
||||
|
||||
/* User Trap Setup */
|
||||
#define CSR_USTATUS 0x000
|
||||
#define CSR_UIE 0x004
|
||||
|
@ -552,6 +555,8 @@
|
|||
#define MSTATUS_TVM 0x00100000 /* since: priv-1.10 */
|
||||
#define MSTATUS_TW 0x00200000 /* since: priv-1.10 */
|
||||
#define MSTATUS_TSR 0x00400000 /* since: priv-1.10 */
|
||||
#define MSTATUS_SPELP 0x00800000 /* zicfilp */
|
||||
#define MSTATUS_MPELP 0x020000000000 /* zicfilp */
|
||||
#define MSTATUS_GVA 0x4000000000ULL
|
||||
#define MSTATUS_MPV 0x8000000000ULL
|
||||
|
||||
|
@ -582,6 +587,7 @@ typedef enum {
|
|||
#define SSTATUS_XS 0x00018000
|
||||
#define SSTATUS_SUM 0x00040000 /* since: priv-1.10 */
|
||||
#define SSTATUS_MXR 0x00080000
|
||||
#define SSTATUS_SPELP MSTATUS_SPELP /* zicfilp */
|
||||
|
||||
#define SSTATUS64_UXL 0x0000000300000000ULL
|
||||
|
||||
|
@ -689,6 +695,11 @@ typedef enum RISCVException {
|
|||
RISCV_EXCP_SEMIHOST = 0x3f,
|
||||
} RISCVException;
|
||||
|
||||
/* zicfilp defines lp violation results in sw check with tval = 2*/
|
||||
#define RISCV_EXCP_SW_CHECK_FCFI_TVAL 2
|
||||
/* zicfiss defines ss violation results in sw check with tval = 3*/
|
||||
#define RISCV_EXCP_SW_CHECK_BCFI_TVAL 3
|
||||
|
||||
#define RISCV_EXCP_INT_FLAG 0x80000000
|
||||
#define RISCV_EXCP_INT_MASK 0x7fffffff
|
||||
|
||||
|
@ -754,6 +765,8 @@ typedef enum RISCVException {
|
|||
|
||||
/* Execution environment configuration bits */
|
||||
#define MENVCFG_FIOM BIT(0)
|
||||
#define MENVCFG_LPE BIT(2) /* zicfilp */
|
||||
#define MENVCFG_SSE BIT(3) /* zicfiss */
|
||||
#define MENVCFG_CBIE (3UL << 4)
|
||||
#define MENVCFG_CBCFE BIT(6)
|
||||
#define MENVCFG_CBZE BIT(7)
|
||||
|
@ -767,11 +780,15 @@ typedef enum RISCVException {
|
|||
#define MENVCFGH_STCE BIT(31)
|
||||
|
||||
#define SENVCFG_FIOM MENVCFG_FIOM
|
||||
#define SENVCFG_LPE MENVCFG_LPE
|
||||
#define SENVCFG_SSE MENVCFG_SSE
|
||||
#define SENVCFG_CBIE MENVCFG_CBIE
|
||||
#define SENVCFG_CBCFE MENVCFG_CBCFE
|
||||
#define SENVCFG_CBZE MENVCFG_CBZE
|
||||
|
||||
#define HENVCFG_FIOM MENVCFG_FIOM
|
||||
#define HENVCFG_LPE MENVCFG_LPE
|
||||
#define HENVCFG_SSE MENVCFG_SSE
|
||||
#define HENVCFG_CBIE MENVCFG_CBIE
|
||||
#define HENVCFG_CBCFE MENVCFG_CBCFE
|
||||
#define HENVCFG_CBZE MENVCFG_CBZE
|
||||
|
|
|
@ -67,6 +67,8 @@ struct RISCVCPUConfig {
|
|||
bool ext_zicbom;
|
||||
bool ext_zicbop;
|
||||
bool ext_zicboz;
|
||||
bool ext_zicfilp;
|
||||
bool ext_zicfiss;
|
||||
bool ext_zicond;
|
||||
bool ext_zihintntl;
|
||||
bool ext_zihintpause;
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
#include "cpu_bits.h"
|
||||
#include "debug.h"
|
||||
#include "tcg/oversized-guest.h"
|
||||
#include "pmp.h"
|
||||
|
||||
int riscv_env_mmu_index(CPURISCVState *env, bool ifetch)
|
||||
{
|
||||
|
@ -63,6 +64,62 @@ int riscv_env_mmu_index(CPURISCVState *env, bool ifetch)
|
|||
#endif
|
||||
}
|
||||
|
||||
bool cpu_get_fcfien(CPURISCVState *env)
|
||||
{
|
||||
/* no cfi extension, return false */
|
||||
if (!env_archcpu(env)->cfg.ext_zicfilp) {
|
||||
return false;
|
||||
}
|
||||
|
||||
switch (env->priv) {
|
||||
case PRV_U:
|
||||
if (riscv_has_ext(env, RVS)) {
|
||||
return env->senvcfg & SENVCFG_LPE;
|
||||
}
|
||||
return env->menvcfg & MENVCFG_LPE;
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
case PRV_S:
|
||||
if (env->virt_enabled) {
|
||||
return env->henvcfg & HENVCFG_LPE;
|
||||
}
|
||||
return env->menvcfg & MENVCFG_LPE;
|
||||
case PRV_M:
|
||||
return env->mseccfg & MSECCFG_MLPE;
|
||||
#endif
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
|
||||
bool cpu_get_bcfien(CPURISCVState *env)
|
||||
{
|
||||
/* no cfi extension, return false */
|
||||
if (!env_archcpu(env)->cfg.ext_zicfiss) {
|
||||
return false;
|
||||
}
|
||||
|
||||
switch (env->priv) {
|
||||
case PRV_U:
|
||||
/*
|
||||
* If S is not implemented then shadow stack for U can't be turned on
|
||||
* It is checked in `riscv_cpu_validate_set_extensions`, so no need to
|
||||
* check here or assert here
|
||||
*/
|
||||
return env->senvcfg & SENVCFG_SSE;
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
case PRV_S:
|
||||
if (env->virt_enabled) {
|
||||
return env->henvcfg & HENVCFG_SSE;
|
||||
}
|
||||
return env->menvcfg & MENVCFG_SSE;
|
||||
case PRV_M: /* M-mode shadow stack is always off */
|
||||
return false;
|
||||
#endif
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
|
||||
void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
|
||||
uint64_t *cs_base, uint32_t *pflags)
|
||||
{
|
||||
|
@ -104,6 +161,20 @@ void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
|
|||
flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1);
|
||||
}
|
||||
|
||||
if (cpu_get_fcfien(env)) {
|
||||
/*
|
||||
* For Forward CFI, only the expectation of a lpad at
|
||||
* the start of the block is tracked via env->elp. env->elp
|
||||
* is turned on during jalr translation.
|
||||
*/
|
||||
flags = FIELD_DP32(flags, TB_FLAGS, FCFI_LP_EXPECTED, env->elp);
|
||||
flags = FIELD_DP32(flags, TB_FLAGS, FCFI_ENABLED, 1);
|
||||
}
|
||||
|
||||
if (cpu_get_bcfien(env)) {
|
||||
flags = FIELD_DP32(flags, TB_FLAGS, BCFI_ENABLED, 1);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
fs = EXT_STATUS_DIRTY;
|
||||
vs = EXT_STATUS_DIRTY;
|
||||
|
@ -546,6 +617,15 @@ void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
|
|||
}
|
||||
bool current_virt = env->virt_enabled;
|
||||
|
||||
/*
|
||||
* If zicfilp extension available and henvcfg.LPE = 1,
|
||||
* then apply SPELP mask on mstatus
|
||||
*/
|
||||
if (env_archcpu(env)->cfg.ext_zicfilp &&
|
||||
get_field(env->henvcfg, HENVCFG_LPE)) {
|
||||
mstatus_mask |= SSTATUS_SPELP;
|
||||
}
|
||||
|
||||
g_assert(riscv_has_ext(env, RVH));
|
||||
|
||||
if (current_virt) {
|
||||
|
@ -804,7 +884,7 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
|
|||
target_ulong *fault_pte_addr,
|
||||
int access_type, int mmu_idx,
|
||||
bool first_stage, bool two_stage,
|
||||
bool is_debug)
|
||||
bool is_debug, bool is_probe)
|
||||
{
|
||||
/*
|
||||
* NOTE: the env->pc value visible here will not be
|
||||
|
@ -818,6 +898,8 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
|
|||
hwaddr ppn;
|
||||
int napot_bits = 0;
|
||||
target_ulong napot_mask;
|
||||
bool is_sstack_idx = ((mmu_idx & MMU_IDX_SS_WRITE) == MMU_IDX_SS_WRITE);
|
||||
bool sstack_page = false;
|
||||
|
||||
/*
|
||||
* Check if we should use the background registers for the two
|
||||
|
@ -890,12 +972,14 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
|
|||
|
||||
CPUState *cs = env_cpu(env);
|
||||
int va_bits = PGSHIFT + levels * ptidxbits + widened;
|
||||
int sxlen = 16 << riscv_cpu_sxl(env);
|
||||
int sxlen_bytes = sxlen / 8;
|
||||
|
||||
if (first_stage == true) {
|
||||
target_ulong mask, masked_msbs;
|
||||
|
||||
if (TARGET_LONG_BITS > (va_bits - 1)) {
|
||||
mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1;
|
||||
if (sxlen > (va_bits - 1)) {
|
||||
mask = (1L << (sxlen - (va_bits - 1))) - 1;
|
||||
} else {
|
||||
mask = 0;
|
||||
}
|
||||
|
@ -948,7 +1032,7 @@ restart:
|
|||
int vbase_ret = get_physical_address(env, &vbase, &vbase_prot,
|
||||
base, NULL, MMU_DATA_LOAD,
|
||||
MMUIdx_U, false, true,
|
||||
is_debug);
|
||||
is_debug, false);
|
||||
|
||||
if (vbase_ret != TRANSLATE_SUCCESS) {
|
||||
if (fault_pte_addr) {
|
||||
|
@ -964,7 +1048,7 @@ restart:
|
|||
|
||||
int pmp_prot;
|
||||
int pmp_ret = get_physical_address_pmp(env, &pmp_prot, pte_addr,
|
||||
sizeof(target_ulong),
|
||||
sxlen_bytes,
|
||||
MMU_DATA_LOAD, PRV_S);
|
||||
if (pmp_ret != TRANSLATE_SUCCESS) {
|
||||
return TRANSLATE_PMP_FAIL;
|
||||
|
@ -1026,21 +1110,43 @@ restart:
|
|||
return TRANSLATE_FAIL;
|
||||
}
|
||||
|
||||
target_ulong rwx = pte & (PTE_R | PTE_W | PTE_X);
|
||||
/* Check for reserved combinations of RWX flags. */
|
||||
switch (pte & (PTE_R | PTE_W | PTE_X)) {
|
||||
case PTE_W:
|
||||
switch (rwx) {
|
||||
case PTE_W | PTE_X:
|
||||
return TRANSLATE_FAIL;
|
||||
case PTE_W:
|
||||
/* if bcfi enabled, PTE_W is not reserved and shadow stack page */
|
||||
if (cpu_get_bcfien(env) && first_stage) {
|
||||
sstack_page = true;
|
||||
/*
|
||||
* if ss index, read and write allowed. else if not a probe
|
||||
* then only read allowed
|
||||
*/
|
||||
rwx = is_sstack_idx ? (PTE_R | PTE_W) : (is_probe ? 0 : PTE_R);
|
||||
break;
|
||||
}
|
||||
return TRANSLATE_FAIL;
|
||||
case PTE_R:
|
||||
/*
|
||||
* no matter what's the `access_type`, shadow stack access to readonly
|
||||
* memory are always store page faults. During unwind, loads will be
|
||||
* promoted as store fault.
|
||||
*/
|
||||
if (is_sstack_idx) {
|
||||
return TRANSLATE_FAIL;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
int prot = 0;
|
||||
if (pte & PTE_R) {
|
||||
if (rwx & PTE_R) {
|
||||
prot |= PAGE_READ;
|
||||
}
|
||||
if (pte & PTE_W) {
|
||||
if (rwx & PTE_W) {
|
||||
prot |= PAGE_WRITE;
|
||||
}
|
||||
if (pte & PTE_X) {
|
||||
if (rwx & PTE_X) {
|
||||
bool mxr = false;
|
||||
|
||||
/*
|
||||
|
@ -1084,8 +1190,11 @@ restart:
|
|||
}
|
||||
|
||||
if (!((prot >> access_type) & 1)) {
|
||||
/* Access check failed */
|
||||
return TRANSLATE_FAIL;
|
||||
/*
|
||||
* Access check failed, access check failures for shadow stack are
|
||||
* access faults.
|
||||
*/
|
||||
return sstack_page ? TRANSLATE_PMP_FAIL : TRANSLATE_FAIL;
|
||||
}
|
||||
|
||||
target_ulong updated_pte = pte;
|
||||
|
@ -1116,7 +1225,7 @@ restart:
|
|||
* it is no longer valid and we must re-walk the page table.
|
||||
*/
|
||||
MemoryRegion *mr;
|
||||
hwaddr l = sizeof(target_ulong), addr1;
|
||||
hwaddr l = sxlen_bytes, addr1;
|
||||
mr = address_space_translate(cs->as, pte_addr, &addr1, &l,
|
||||
false, MEMTXATTRS_UNSPECIFIED);
|
||||
if (memory_region_is_ram(mr)) {
|
||||
|
@ -1128,7 +1237,12 @@ restart:
|
|||
*/
|
||||
*pte_pa = pte = updated_pte;
|
||||
#else
|
||||
target_ulong old_pte = qatomic_cmpxchg(pte_pa, pte, updated_pte);
|
||||
target_ulong old_pte;
|
||||
if (riscv_cpu_sxl(env) == MXL_RV32) {
|
||||
old_pte = qatomic_cmpxchg((uint32_t *)pte_pa, pte, updated_pte);
|
||||
} else {
|
||||
old_pte = qatomic_cmpxchg(pte_pa, pte, updated_pte);
|
||||
}
|
||||
if (old_pte != pte) {
|
||||
goto restart;
|
||||
}
|
||||
|
@ -1223,13 +1337,13 @@ hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
|
|||
int mmu_idx = riscv_env_mmu_index(&cpu->env, false);
|
||||
|
||||
if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx,
|
||||
true, env->virt_enabled, true)) {
|
||||
true, env->virt_enabled, true, false)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (env->virt_enabled) {
|
||||
if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL,
|
||||
0, MMUIdx_U, false, true, true)) {
|
||||
0, MMUIdx_U, false, true, true, false)) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
@ -1272,9 +1386,17 @@ void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
|
|||
break;
|
||||
case MMU_DATA_LOAD:
|
||||
cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
|
||||
/* shadow stack mis aligned accesses are access faults */
|
||||
if (mmu_idx & MMU_IDX_SS_WRITE) {
|
||||
cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
|
||||
}
|
||||
break;
|
||||
case MMU_DATA_STORE:
|
||||
cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
|
||||
/* shadow stack mis aligned accesses are access faults */
|
||||
if (mmu_idx & MMU_IDX_SS_WRITE) {
|
||||
cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
|
@ -1335,7 +1457,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
|||
/* Two stage lookup */
|
||||
ret = get_physical_address(env, &pa, &prot, address,
|
||||
&env->guest_phys_fault_addr, access_type,
|
||||
mmu_idx, true, true, false);
|
||||
mmu_idx, true, true, false, probe);
|
||||
|
||||
/*
|
||||
* A G-stage exception may be triggered during two state lookup.
|
||||
|
@ -1358,7 +1480,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
|||
|
||||
ret = get_physical_address(env, &pa, &prot2, im_address, NULL,
|
||||
access_type, MMUIdx_U, false, true,
|
||||
false);
|
||||
false, probe);
|
||||
|
||||
qemu_log_mask(CPU_LOG_MMU,
|
||||
"%s 2nd-stage address=%" VADDR_PRIx
|
||||
|
@ -1395,7 +1517,8 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
|||
} else {
|
||||
/* Single stage lookup */
|
||||
ret = get_physical_address(env, &pa, &prot, address, NULL,
|
||||
access_type, mmu_idx, true, false, false);
|
||||
access_type, mmu_idx, true, false, false,
|
||||
probe);
|
||||
|
||||
qemu_log_mask(CPU_LOG_MMU,
|
||||
"%s address=%" VADDR_PRIx " ret %d physical "
|
||||
|
@ -1641,6 +1764,22 @@ static target_ulong riscv_transformed_insn(CPURISCVState *env,
|
|||
return xinsn;
|
||||
}
|
||||
|
||||
static target_ulong promote_load_fault(target_ulong orig_cause)
|
||||
{
|
||||
switch (orig_cause) {
|
||||
case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
|
||||
return RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT;
|
||||
|
||||
case RISCV_EXCP_LOAD_ACCESS_FAULT:
|
||||
return RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
|
||||
|
||||
case RISCV_EXCP_LOAD_PAGE_FAULT:
|
||||
return RISCV_EXCP_STORE_PAGE_FAULT;
|
||||
}
|
||||
|
||||
/* if no promotion, return original cause */
|
||||
return orig_cause;
|
||||
}
|
||||
/*
|
||||
* Handle Traps
|
||||
*
|
||||
|
@ -1653,6 +1792,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
|
|||
CPURISCVState *env = &cpu->env;
|
||||
bool virt = env->virt_enabled;
|
||||
bool write_gva = false;
|
||||
bool always_storeamo = (env->excp_uw2 & RISCV_UW2_ALWAYS_STORE_AMO);
|
||||
uint64_t s;
|
||||
|
||||
/*
|
||||
|
@ -1670,6 +1810,8 @@ void riscv_cpu_do_interrupt(CPUState *cs)
|
|||
target_ulong tinst = 0;
|
||||
target_ulong htval = 0;
|
||||
target_ulong mtval2 = 0;
|
||||
int sxlen = 0;
|
||||
int mxlen = 0;
|
||||
|
||||
if (!async) {
|
||||
/* set tval to badaddr for traps with address information */
|
||||
|
@ -1688,6 +1830,9 @@ void riscv_cpu_do_interrupt(CPUState *cs)
|
|||
case RISCV_EXCP_STORE_AMO_ACCESS_FAULT:
|
||||
case RISCV_EXCP_LOAD_PAGE_FAULT:
|
||||
case RISCV_EXCP_STORE_PAGE_FAULT:
|
||||
if (always_storeamo) {
|
||||
cause = promote_load_fault(cause);
|
||||
}
|
||||
write_gva = env->two_stage_lookup;
|
||||
tval = env->badaddr;
|
||||
if (env->two_stage_indirect_lookup) {
|
||||
|
@ -1729,6 +1874,9 @@ void riscv_cpu_do_interrupt(CPUState *cs)
|
|||
cs->watchpoint_hit = NULL;
|
||||
}
|
||||
break;
|
||||
case RISCV_EXCP_SW_CHECK:
|
||||
tval = env->sw_check_code;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -1760,6 +1908,11 @@ void riscv_cpu_do_interrupt(CPUState *cs)
|
|||
if (env->priv <= PRV_S && cause < 64 &&
|
||||
(((deleg >> cause) & 1) || s_injected || vs_injected)) {
|
||||
/* handle the trap in S-mode */
|
||||
/* save elp status */
|
||||
if (cpu_get_fcfien(env)) {
|
||||
env->mstatus = set_field(env->mstatus, MSTATUS_SPELP, env->elp);
|
||||
}
|
||||
|
||||
if (riscv_has_ext(env, RVH)) {
|
||||
uint64_t hdeleg = async ? env->hideleg : env->hedeleg;
|
||||
|
||||
|
@ -1798,7 +1951,8 @@ void riscv_cpu_do_interrupt(CPUState *cs)
|
|||
s = set_field(s, MSTATUS_SPP, env->priv);
|
||||
s = set_field(s, MSTATUS_SIE, 0);
|
||||
env->mstatus = s;
|
||||
env->scause = cause | ((target_ulong)async << (TARGET_LONG_BITS - 1));
|
||||
sxlen = 16 << riscv_cpu_sxl(env);
|
||||
env->scause = cause | ((target_ulong)async << (sxlen - 1));
|
||||
env->sepc = env->pc;
|
||||
env->stval = tval;
|
||||
env->htval = htval;
|
||||
|
@ -1808,6 +1962,11 @@ void riscv_cpu_do_interrupt(CPUState *cs)
|
|||
riscv_cpu_set_mode(env, PRV_S, virt);
|
||||
} else {
|
||||
/* handle the trap in M-mode */
|
||||
/* save elp status */
|
||||
if (cpu_get_fcfien(env)) {
|
||||
env->mstatus = set_field(env->mstatus, MSTATUS_MPELP, env->elp);
|
||||
}
|
||||
|
||||
if (riscv_has_ext(env, RVH)) {
|
||||
if (env->virt_enabled) {
|
||||
riscv_cpu_swap_hypervisor_regs(env);
|
||||
|
@ -1829,7 +1988,8 @@ void riscv_cpu_do_interrupt(CPUState *cs)
|
|||
s = set_field(s, MSTATUS_MPP, env->priv);
|
||||
s = set_field(s, MSTATUS_MIE, 0);
|
||||
env->mstatus = s;
|
||||
env->mcause = cause | ~(((target_ulong)-1) >> async);
|
||||
mxlen = 16 << riscv_cpu_mxl(env);
|
||||
env->mcause = cause | ((target_ulong)async << (mxlen - 1));
|
||||
env->mepc = env->pc;
|
||||
env->mtval = tval;
|
||||
env->mtval2 = mtval2;
|
||||
|
@ -1839,6 +1999,13 @@ void riscv_cpu_do_interrupt(CPUState *cs)
|
|||
riscv_cpu_set_mode(env, PRV_M, virt);
|
||||
}
|
||||
|
||||
/*
|
||||
* Interrupt/exception/trap delivery is asynchronous event and as per
|
||||
* zicfilp spec CPU should clear up the ELP state. No harm in clearing
|
||||
* unconditionally.
|
||||
*/
|
||||
env->elp = false;
|
||||
|
||||
/*
|
||||
* NOTE: it is not necessary to yield load reservations here. It is only
|
||||
* necessary for an SC from "another hart" to cause a load reservation
|
||||
|
|
|
@ -15,5 +15,6 @@
|
|||
#define xA6 16
|
||||
#define xA7 17 /* syscall number for RVI ABI */
|
||||
#define xT0 5 /* syscall number for RVE ABI */
|
||||
#define xT2 7
|
||||
|
||||
#endif
|
||||
|
|
|
@ -184,6 +184,25 @@ static RISCVException zcmt(CPURISCVState *env, int csrno)
|
|||
return RISCV_EXCP_NONE;
|
||||
}
|
||||
|
||||
static RISCVException cfi_ss(CPURISCVState *env, int csrno)
|
||||
{
|
||||
if (!env_archcpu(env)->cfg.ext_zicfiss) {
|
||||
return RISCV_EXCP_ILLEGAL_INST;
|
||||
}
|
||||
|
||||
/* if bcfi not active for current env, access to csr is illegal */
|
||||
if (!cpu_get_bcfien(env)) {
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
if (env->debugger) {
|
||||
return RISCV_EXCP_NONE;
|
||||
}
|
||||
#endif
|
||||
return RISCV_EXCP_ILLEGAL_INST;
|
||||
}
|
||||
|
||||
return RISCV_EXCP_NONE;
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
static RISCVException mctr(CPURISCVState *env, int csrno)
|
||||
{
|
||||
|
@ -622,6 +641,19 @@ static RISCVException seed(CPURISCVState *env, int csrno)
|
|||
#endif
|
||||
}
|
||||
|
||||
/* zicfiss CSR_SSP read and write */
|
||||
static int read_ssp(CPURISCVState *env, int csrno, target_ulong *val)
|
||||
{
|
||||
*val = env->ssp;
|
||||
return RISCV_EXCP_NONE;
|
||||
}
|
||||
|
||||
static int write_ssp(CPURISCVState *env, int csrno, target_ulong val)
|
||||
{
|
||||
env->ssp = val;
|
||||
return RISCV_EXCP_NONE;
|
||||
}
|
||||
|
||||
/* User Floating-Point CSRs */
|
||||
static RISCVException read_fflags(CPURISCVState *env, int csrno,
|
||||
target_ulong *val)
|
||||
|
@ -734,7 +766,7 @@ static RISCVException write_vxrm(CPURISCVState *env, int csrno,
|
|||
static RISCVException read_vxsat(CPURISCVState *env, int csrno,
|
||||
target_ulong *val)
|
||||
{
|
||||
*val = env->vxsat;
|
||||
*val = env->vxsat & BIT(0);
|
||||
return RISCV_EXCP_NONE;
|
||||
}
|
||||
|
||||
|
@ -744,7 +776,7 @@ static RISCVException write_vxsat(CPURISCVState *env, int csrno,
|
|||
#if !defined(CONFIG_USER_ONLY)
|
||||
env->mstatus |= MSTATUS_VS;
|
||||
#endif
|
||||
env->vxsat = val;
|
||||
env->vxsat = val & BIT(0);
|
||||
return RISCV_EXCP_NONE;
|
||||
}
|
||||
|
||||
|
@ -1377,6 +1409,7 @@ static const uint64_t all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS |
|
|||
(1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) | \
|
||||
(1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) | \
|
||||
(1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) | \
|
||||
(1ULL << (RISCV_EXCP_SW_CHECK)) | \
|
||||
(1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | \
|
||||
(1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | \
|
||||
(1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | \
|
||||
|
@ -1598,6 +1631,11 @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno,
|
|||
}
|
||||
}
|
||||
|
||||
/* If cfi lp extension is available, then apply cfi lp mask */
|
||||
if (env_archcpu(env)->cfg.ext_zicfilp) {
|
||||
mask |= (MSTATUS_MPELP | MSTATUS_SPELP);
|
||||
}
|
||||
|
||||
mstatus = (mstatus & ~mask) | (val & mask);
|
||||
|
||||
env->mstatus = mstatus;
|
||||
|
@ -2344,6 +2382,14 @@ static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
|
|||
mask |= (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
|
||||
(cfg->ext_sstc ? MENVCFG_STCE : 0) |
|
||||
(cfg->ext_svadu ? MENVCFG_ADUE : 0);
|
||||
|
||||
if (env_archcpu(env)->cfg.ext_zicfilp) {
|
||||
mask |= MENVCFG_LPE;
|
||||
}
|
||||
|
||||
if (env_archcpu(env)->cfg.ext_zicfiss) {
|
||||
mask |= MENVCFG_SSE;
|
||||
}
|
||||
}
|
||||
env->menvcfg = (env->menvcfg & ~mask) | (val & mask);
|
||||
|
||||
|
@ -2396,6 +2442,17 @@ static RISCVException write_senvcfg(CPURISCVState *env, int csrno,
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (env_archcpu(env)->cfg.ext_zicfilp) {
|
||||
mask |= SENVCFG_LPE;
|
||||
}
|
||||
|
||||
/* Higher mode SSE must be ON for next-less mode SSE to be ON */
|
||||
if (env_archcpu(env)->cfg.ext_zicfiss &&
|
||||
get_field(env->menvcfg, MENVCFG_SSE) &&
|
||||
(env->virt_enabled ? get_field(env->henvcfg, HENVCFG_SSE) : true)) {
|
||||
mask |= SENVCFG_SSE;
|
||||
}
|
||||
|
||||
env->senvcfg = (env->senvcfg & ~mask) | (val & mask);
|
||||
return RISCV_EXCP_NONE;
|
||||
}
|
||||
|
@ -2433,6 +2490,16 @@ static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
|
|||
|
||||
if (riscv_cpu_mxl(env) == MXL_RV64) {
|
||||
mask |= env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE);
|
||||
|
||||
if (env_archcpu(env)->cfg.ext_zicfilp) {
|
||||
mask |= HENVCFG_LPE;
|
||||
}
|
||||
|
||||
/* H can light up SSE for VS only if HS had it from menvcfg */
|
||||
if (env_archcpu(env)->cfg.ext_zicfiss &&
|
||||
get_field(env->menvcfg, MENVCFG_SSE)) {
|
||||
mask |= HENVCFG_SSE;
|
||||
}
|
||||
}
|
||||
|
||||
env->henvcfg = (env->henvcfg & ~mask) | (val & mask);
|
||||
|
@ -2897,6 +2964,10 @@ static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno,
|
|||
mask |= SSTATUS64_UXL;
|
||||
}
|
||||
|
||||
if (env_archcpu(env)->cfg.ext_zicfilp) {
|
||||
mask |= SSTATUS_SPELP;
|
||||
}
|
||||
|
||||
*val = int128_make128(sstatus, add_status_sd(MXL_RV128, sstatus));
|
||||
return RISCV_EXCP_NONE;
|
||||
}
|
||||
|
@ -2908,6 +2979,11 @@ static RISCVException read_sstatus(CPURISCVState *env, int csrno,
|
|||
if (env->xl != MXL_RV32 || env->debugger) {
|
||||
mask |= SSTATUS64_UXL;
|
||||
}
|
||||
|
||||
if (env_archcpu(env)->cfg.ext_zicfilp) {
|
||||
mask |= SSTATUS_SPELP;
|
||||
}
|
||||
|
||||
/* TODO: Use SXL not MXL. */
|
||||
*val = add_status_sd(riscv_cpu_mxl(env), env->mstatus & mask);
|
||||
return RISCV_EXCP_NONE;
|
||||
|
@ -2923,6 +2999,11 @@ static RISCVException write_sstatus(CPURISCVState *env, int csrno,
|
|||
mask |= SSTATUS64_UXL;
|
||||
}
|
||||
}
|
||||
|
||||
if (env_archcpu(env)->cfg.ext_zicfilp) {
|
||||
mask |= SSTATUS_SPELP;
|
||||
}
|
||||
|
||||
target_ulong newval = (env->mstatus & ~mask) | (val & mask);
|
||||
return write_mstatus(env, CSR_MSTATUS, newval);
|
||||
}
|
||||
|
@ -4934,6 +5015,9 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
|
|||
/* Zcmt Extension */
|
||||
[CSR_JVT] = {"jvt", zcmt, read_jvt, write_jvt},
|
||||
|
||||
/* zicfiss Extension, shadow stack register */
|
||||
[CSR_SSP] = { "ssp", cfi_ss, read_ssp, write_ssp },
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
/* Machine Timers and Counters */
|
||||
[CSR_MCYCLE] = { "mcycle", any, read_hpmcounter,
|
||||
|
|
|
@ -140,6 +140,10 @@ sw 110 ... ... .. ... 00 @cs_w
|
|||
addi 000 . ..... ..... 01 @ci
|
||||
addi 010 . ..... ..... 01 @c_li
|
||||
{
|
||||
# c.sspush x1 carving out of zcmops
|
||||
sspush 011 0 00001 00000 01 &r2_s rs2=1 rs1=0
|
||||
# c.sspopchk x5 carving out of zcmops
|
||||
sspopchk 011 0 00101 00000 01 &r2 rs1=5 rd=0
|
||||
c_mop_n 011 0 0 n:3 1 00000 01
|
||||
illegal 011 0 ----- 00000 01 # c.addi16sp and c.lui, RES nzimm=0
|
||||
addi 011 . 00010 ..... 01 @c_addi16sp
|
||||
|
|
|
@ -123,7 +123,10 @@ sfence_vm 0001000 00100 ..... 000 00000 1110011 @sfence_vm
|
|||
|
||||
# *** RV32I Base Instruction Set ***
|
||||
lui .................... ..... 0110111 @u
|
||||
auipc .................... ..... 0010111 @u
|
||||
{
|
||||
lpad label:20 00000 0010111
|
||||
auipc .................... ..... 0010111 @u
|
||||
}
|
||||
jal .................... ..... 1101111 @j
|
||||
jalr ............ ..... 000 ..... 1100111 @i
|
||||
beq ....... ..... ..... 000 ..... 1100011 @b
|
||||
|
@ -243,6 +246,7 @@ remud 0000001 ..... ..... 111 ..... 1111011 @r
|
|||
lr_w 00010 . . 00000 ..... 010 ..... 0101111 @atom_ld
|
||||
sc_w 00011 . . ..... ..... 010 ..... 0101111 @atom_st
|
||||
amoswap_w 00001 . . ..... ..... 010 ..... 0101111 @atom_st
|
||||
ssamoswap_w 01001 . . ..... ..... 010 ..... 0101111 @atom_st
|
||||
amoadd_w 00000 . . ..... ..... 010 ..... 0101111 @atom_st
|
||||
amoxor_w 00100 . . ..... ..... 010 ..... 0101111 @atom_st
|
||||
amoand_w 01100 . . ..... ..... 010 ..... 0101111 @atom_st
|
||||
|
@ -256,6 +260,7 @@ amomaxu_w 11100 . . ..... ..... 010 ..... 0101111 @atom_st
|
|||
lr_d 00010 . . 00000 ..... 011 ..... 0101111 @atom_ld
|
||||
sc_d 00011 . . ..... ..... 011 ..... 0101111 @atom_st
|
||||
amoswap_d 00001 . . ..... ..... 011 ..... 0101111 @atom_st
|
||||
ssamoswap_d 01001 . . ..... ..... 011 ..... 0101111 @atom_st
|
||||
amoadd_d 00000 . . ..... ..... 011 ..... 0101111 @atom_st
|
||||
amoxor_d 00100 . . ..... ..... 011 ..... 0101111 @atom_st
|
||||
amoand_d 01100 . . ..... ..... 011 ..... 0101111 @atom_st
|
||||
|
@ -1019,8 +1024,23 @@ amocas_d 00101 . . ..... ..... 011 ..... 0101111 @atom_st
|
|||
amocas_q 00101 . . ..... ..... 100 ..... 0101111 @atom_st
|
||||
|
||||
# *** Zimop may-be-operation extension ***
|
||||
mop_r_n 1 . 00 .. 0111 .. ..... 100 ..... 1110011 @mop5
|
||||
mop_rr_n 1 . 00 .. 1 ..... ..... 100 ..... 1110011 @mop3
|
||||
{
|
||||
# zicfiss instructions carved out of mop.r
|
||||
[
|
||||
ssrdp 1100110 11100 00000 100 rd:5 1110011
|
||||
sspopchk 1100110 11100 00001 100 00000 1110011 &r2 rs1=1 rd=0
|
||||
sspopchk 1100110 11100 00101 100 00000 1110011 &r2 rs1=5 rd=0
|
||||
]
|
||||
mop_r_n 1 . 00 .. 0111 .. ..... 100 ..... 1110011 @mop5
|
||||
}
|
||||
{
|
||||
# zicfiss instruction carved out of mop.rr
|
||||
[
|
||||
sspush 1100111 00001 00000 100 00000 1110011 &r2_s rs2=1 rs1=0
|
||||
sspush 1100111 00101 00000 100 00000 1110011 &r2_s rs2=5 rs1=0
|
||||
]
|
||||
mop_rr_n 1 . 00 .. 1 ..... ..... 100 ..... 1110011 @mop3
|
||||
}
|
||||
|
||||
# *** Zabhb Standard Extension ***
|
||||
amoswap_b 00001 . . ..... ..... 000 ..... 0101111 @atom_st
|
||||
|
|
|
@ -78,7 +78,7 @@ static bool trans_sret(DisasContext *ctx, arg_sret *a)
|
|||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
if (has_ext(ctx, RVS)) {
|
||||
decode_save_opc(ctx);
|
||||
decode_save_opc(ctx, 0);
|
||||
translator_io_start(&ctx->base);
|
||||
gen_helper_sret(cpu_pc, tcg_env);
|
||||
exit_tb(ctx); /* no chaining */
|
||||
|
@ -95,7 +95,7 @@ static bool trans_sret(DisasContext *ctx, arg_sret *a)
|
|||
static bool trans_mret(DisasContext *ctx, arg_mret *a)
|
||||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
decode_save_opc(ctx);
|
||||
decode_save_opc(ctx, 0);
|
||||
translator_io_start(&ctx->base);
|
||||
gen_helper_mret(cpu_pc, tcg_env);
|
||||
exit_tb(ctx); /* no chaining */
|
||||
|
@ -109,7 +109,7 @@ static bool trans_mret(DisasContext *ctx, arg_mret *a)
|
|||
static bool trans_wfi(DisasContext *ctx, arg_wfi *a)
|
||||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
decode_save_opc(ctx);
|
||||
decode_save_opc(ctx, 0);
|
||||
gen_update_pc(ctx, ctx->cur_insn_len);
|
||||
gen_helper_wfi(tcg_env);
|
||||
return true;
|
||||
|
@ -121,7 +121,7 @@ static bool trans_wfi(DisasContext *ctx, arg_wfi *a)
|
|||
static bool trans_sfence_vma(DisasContext *ctx, arg_sfence_vma *a)
|
||||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
decode_save_opc(ctx);
|
||||
decode_save_opc(ctx, 0);
|
||||
gen_helper_tlb_flush(tcg_env);
|
||||
return true;
|
||||
#endif
|
||||
|
|
|
@ -34,7 +34,7 @@ static bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop)
|
|||
{
|
||||
TCGv src1;
|
||||
|
||||
decode_save_opc(ctx);
|
||||
decode_save_opc(ctx, 0);
|
||||
src1 = get_address(ctx, a->rs1, 0);
|
||||
if (a->rl) {
|
||||
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
|
||||
|
@ -61,7 +61,7 @@ static bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop)
|
|||
TCGLabel *l1 = gen_new_label();
|
||||
TCGLabel *l2 = gen_new_label();
|
||||
|
||||
decode_save_opc(ctx);
|
||||
decode_save_opc(ctx, 0);
|
||||
src1 = get_address(ctx, a->rs1, 0);
|
||||
tcg_gen_brcond_tl(TCG_COND_NE, load_res, src1, l1);
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ static bool trans_fld(DisasContext *ctx, arg_fld *a)
|
|||
memop |= MO_ATOM_IFALIGN;
|
||||
}
|
||||
|
||||
decode_save_opc(ctx);
|
||||
decode_save_opc(ctx, 0);
|
||||
addr = get_address(ctx, a->rs1, a->imm);
|
||||
tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], addr, ctx->mem_idx, memop);
|
||||
|
||||
|
@ -85,7 +85,7 @@ static bool trans_fsd(DisasContext *ctx, arg_fsd *a)
|
|||
memop |= MO_ATOM_IFALIGN;
|
||||
}
|
||||
|
||||
decode_save_opc(ctx);
|
||||
decode_save_opc(ctx, 0);
|
||||
addr = get_address(ctx, a->rs1, a->imm);
|
||||
tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, memop);
|
||||
return true;
|
||||
|
|
|
@ -52,7 +52,7 @@ static bool trans_flw(DisasContext *ctx, arg_flw *a)
|
|||
memop |= MO_ATOM_WITHIN16;
|
||||
}
|
||||
|
||||
decode_save_opc(ctx);
|
||||
decode_save_opc(ctx, 0);
|
||||
addr = get_address(ctx, a->rs1, a->imm);
|
||||
dest = cpu_fpr[a->rd];
|
||||
tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, memop);
|
||||
|
@ -74,7 +74,7 @@ static bool trans_fsw(DisasContext *ctx, arg_fsw *a)
|
|||
memop |= MO_ATOM_WITHIN16;
|
||||
}
|
||||
|
||||
decode_save_opc(ctx);
|
||||
decode_save_opc(ctx, 0);
|
||||
addr = get_address(ctx, a->rs1, a->imm);
|
||||
tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, memop);
|
||||
return true;
|
||||
|
|
|
@ -44,7 +44,7 @@ static bool do_hlv(DisasContext *ctx, arg_r2 *a,
|
|||
TCGv dest = dest_gpr(ctx, a->rd);
|
||||
TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE);
|
||||
|
||||
decode_save_opc(ctx);
|
||||
decode_save_opc(ctx, 0);
|
||||
func(dest, tcg_env, addr);
|
||||
gen_set_gpr(ctx, a->rd, dest);
|
||||
return true;
|
||||
|
@ -56,7 +56,7 @@ static bool do_hsv(DisasContext *ctx, arg_r2_s *a,
|
|||
TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE);
|
||||
TCGv data = get_gpr(ctx, a->rs2, EXT_NONE);
|
||||
|
||||
decode_save_opc(ctx);
|
||||
decode_save_opc(ctx, 0);
|
||||
func(tcg_env, addr, data);
|
||||
return true;
|
||||
}
|
||||
|
@ -147,7 +147,7 @@ static bool trans_hfence_gvma(DisasContext *ctx, arg_sfence_vma *a)
|
|||
{
|
||||
REQUIRE_EXT(ctx, RVH);
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
decode_save_opc(ctx);
|
||||
decode_save_opc(ctx, 0);
|
||||
gen_helper_hyp_gvma_tlb_flush(tcg_env);
|
||||
return true;
|
||||
#endif
|
||||
|
@ -158,7 +158,7 @@ static bool trans_hfence_vvma(DisasContext *ctx, arg_sfence_vma *a)
|
|||
{
|
||||
REQUIRE_EXT(ctx, RVH);
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
decode_save_opc(ctx);
|
||||
decode_save_opc(ctx, 0);
|
||||
gen_helper_hyp_tlb_flush(tcg_env);
|
||||
return true;
|
||||
#endif
|
||||
|
|
|
@ -36,6 +36,49 @@ static bool trans_lui(DisasContext *ctx, arg_lui *a)
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool trans_lpad(DisasContext *ctx, arg_lpad *a)
|
||||
{
|
||||
/*
|
||||
* fcfi_lp_expected can set only if fcfi was eanbled.
|
||||
* translate further only if fcfi_lp_expected set.
|
||||
* lpad comes from NOP space anyways, so return true if
|
||||
* fcfi_lp_expected is false.
|
||||
*/
|
||||
if (!ctx->fcfi_lp_expected) {
|
||||
return true;
|
||||
}
|
||||
|
||||
ctx->fcfi_lp_expected = false;
|
||||
if ((ctx->base.pc_next) & 0x3) {
|
||||
/*
|
||||
* misaligned, according to spec we should raise sw check exception
|
||||
*/
|
||||
tcg_gen_st_tl(tcg_constant_tl(RISCV_EXCP_SW_CHECK_FCFI_TVAL),
|
||||
tcg_env, offsetof(CPURISCVState, sw_check_code));
|
||||
gen_helper_raise_exception(tcg_env,
|
||||
tcg_constant_i32(RISCV_EXCP_SW_CHECK));
|
||||
return true;
|
||||
}
|
||||
|
||||
/* per spec, label check performed only when embedded label non-zero */
|
||||
if (a->label != 0) {
|
||||
TCGLabel *skip = gen_new_label();
|
||||
TCGv tmp = tcg_temp_new();
|
||||
tcg_gen_extract_tl(tmp, get_gpr(ctx, xT2, EXT_NONE), 12, 20);
|
||||
tcg_gen_brcondi_tl(TCG_COND_EQ, tmp, a->label, skip);
|
||||
tcg_gen_st_tl(tcg_constant_tl(RISCV_EXCP_SW_CHECK_FCFI_TVAL),
|
||||
tcg_env, offsetof(CPURISCVState, sw_check_code));
|
||||
gen_helper_raise_exception(tcg_env,
|
||||
tcg_constant_i32(RISCV_EXCP_SW_CHECK));
|
||||
gen_set_label(skip);
|
||||
}
|
||||
|
||||
tcg_gen_st8_tl(tcg_constant_tl(0), tcg_env,
|
||||
offsetof(CPURISCVState, elp));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool trans_auipc(DisasContext *ctx, arg_auipc *a)
|
||||
{
|
||||
TCGv target_pc = dest_gpr(ctx, a->rd);
|
||||
|
@ -75,6 +118,18 @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
|
|||
gen_set_gpr(ctx, a->rd, succ_pc);
|
||||
|
||||
tcg_gen_mov_tl(cpu_pc, target_pc);
|
||||
if (ctx->fcfi_enabled) {
|
||||
/*
|
||||
* return from functions (i.e. rs1 == xRA || rs1 == xT0) are not
|
||||
* tracked. zicfilp introduces sw guarded branch as well. sw guarded
|
||||
* branch are not tracked. rs1 == xT2 is a sw guarded branch.
|
||||
*/
|
||||
if (a->rs1 != xRA && a->rs1 != xT0 && a->rs1 != xT2) {
|
||||
tcg_gen_st8_tl(tcg_constant_tl(1),
|
||||
tcg_env, offsetof(CPURISCVState, elp));
|
||||
}
|
||||
}
|
||||
|
||||
lookup_and_goto_ptr(ctx);
|
||||
|
||||
if (misaligned) {
|
||||
|
@ -271,7 +326,7 @@ static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
|
|||
if (ctx->cfg_ptr->ext_zama16b) {
|
||||
memop |= MO_ATOM_WITHIN16;
|
||||
}
|
||||
decode_save_opc(ctx);
|
||||
decode_save_opc(ctx, 0);
|
||||
if (get_xl(ctx) == MXL_RV128) {
|
||||
out = gen_load_i128(ctx, a, memop);
|
||||
} else {
|
||||
|
@ -372,7 +427,7 @@ static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
|
|||
if (ctx->cfg_ptr->ext_zama16b) {
|
||||
memop |= MO_ATOM_WITHIN16;
|
||||
}
|
||||
decode_save_opc(ctx);
|
||||
decode_save_opc(ctx, 0);
|
||||
if (get_xl(ctx) == MXL_RV128) {
|
||||
return gen_store_i128(ctx, a, memop);
|
||||
} else {
|
||||
|
@ -834,7 +889,7 @@ static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
|
|||
static bool do_csr_post(DisasContext *ctx)
|
||||
{
|
||||
/* The helper may raise ILLEGAL_INSN -- record binv for unwind. */
|
||||
decode_save_opc(ctx);
|
||||
decode_save_opc(ctx, 0);
|
||||
/* We may have changed important cpu state -- exit to main loop. */
|
||||
gen_update_pc(ctx, ctx->cur_insn_len);
|
||||
exit_tb(ctx);
|
||||
|
|
|
@ -249,7 +249,7 @@ GEN_OPIVI_WIDEN_TRANS(vwsll_vi, IMM_ZX, vwsll_vx, vwsll_vx_check)
|
|||
\
|
||||
if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { \
|
||||
/* save opcode for unwinding in case we throw an exception */ \
|
||||
decode_save_opc(s); \
|
||||
decode_save_opc(s, 0); \
|
||||
egs = tcg_constant_i32(EGS); \
|
||||
gen_helper_egs_check(egs, tcg_env); \
|
||||
} \
|
||||
|
@ -322,7 +322,7 @@ GEN_V_UNMASKED_TRANS(vaesem_vs, vaes_check_vs, ZVKNED_EGS)
|
|||
\
|
||||
if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { \
|
||||
/* save opcode for unwinding in case we throw an exception */ \
|
||||
decode_save_opc(s); \
|
||||
decode_save_opc(s, 0); \
|
||||
egs = tcg_constant_i32(EGS); \
|
||||
gen_helper_egs_check(egs, tcg_env); \
|
||||
} \
|
||||
|
@ -389,7 +389,7 @@ GEN_VI_UNMASKED_TRANS(vaeskf2_vi, vaeskf2_check, ZVKNED_EGS)
|
|||
\
|
||||
if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { \
|
||||
/* save opcode for unwinding in case we throw an exception */ \
|
||||
decode_save_opc(s); \
|
||||
decode_save_opc(s, 0); \
|
||||
egs = tcg_constant_i32(EGS); \
|
||||
gen_helper_egs_check(egs, tcg_env); \
|
||||
} \
|
||||
|
@ -440,7 +440,7 @@ static bool trans_vsha2cl_vv(DisasContext *s, arg_rmrr *a)
|
|||
|
||||
if (!s->vstart_eq_zero || !s->vl_eq_vlmax) {
|
||||
/* save opcode for unwinding in case we throw an exception */
|
||||
decode_save_opc(s);
|
||||
decode_save_opc(s, 0);
|
||||
egs = tcg_constant_i32(ZVKNH_EGS);
|
||||
gen_helper_egs_check(egs, tcg_env);
|
||||
}
|
||||
|
@ -471,7 +471,7 @@ static bool trans_vsha2ch_vv(DisasContext *s, arg_rmrr *a)
|
|||
|
||||
if (!s->vstart_eq_zero || !s->vl_eq_vlmax) {
|
||||
/* save opcode for unwinding in case we throw an exception */
|
||||
decode_save_opc(s);
|
||||
decode_save_opc(s, 0);
|
||||
egs = tcg_constant_i32(ZVKNH_EGS);
|
||||
gen_helper_egs_check(egs, tcg_env);
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ static bool gen_cmpxchg64(DisasContext *ctx, arg_atomic *a, MemOp mop)
|
|||
TCGv src1 = get_address(ctx, a->rs1, 0);
|
||||
TCGv_i64 src2 = get_gpr_pair(ctx, a->rs2);
|
||||
|
||||
decode_save_opc(ctx);
|
||||
decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO);
|
||||
tcg_gen_atomic_cmpxchg_i64(dest, src1, dest, src2, ctx->mem_idx, mop);
|
||||
|
||||
gen_set_gpr_pair(ctx, a->rd, dest);
|
||||
|
@ -121,7 +121,7 @@ static bool trans_amocas_q(DisasContext *ctx, arg_amocas_q *a)
|
|||
|
||||
tcg_gen_concat_i64_i128(src2, src2l, src2h);
|
||||
tcg_gen_concat_i64_i128(dest, destl, desth);
|
||||
decode_save_opc(ctx);
|
||||
decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO);
|
||||
tcg_gen_atomic_cmpxchg_i128(dest, src1, dest, src2, ctx->mem_idx,
|
||||
(MO_ALIGN | MO_TEUO));
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@ static bool trans_flh(DisasContext *ctx, arg_flh *a)
|
|||
REQUIRE_FPU;
|
||||
REQUIRE_ZFHMIN_OR_ZFBFMIN(ctx);
|
||||
|
||||
decode_save_opc(ctx);
|
||||
decode_save_opc(ctx, 0);
|
||||
t0 = get_gpr(ctx, a->rs1, EXT_NONE);
|
||||
if (a->imm) {
|
||||
TCGv temp = tcg_temp_new();
|
||||
|
@ -71,7 +71,7 @@ static bool trans_fsh(DisasContext *ctx, arg_fsh *a)
|
|||
REQUIRE_FPU;
|
||||
REQUIRE_ZFHMIN_OR_ZFBFMIN(ctx);
|
||||
|
||||
decode_save_opc(ctx);
|
||||
decode_save_opc(ctx, 0);
|
||||
t0 = get_gpr(ctx, a->rs1, EXT_NONE);
|
||||
if (a->imm) {
|
||||
TCGv temp = tcg_temp_new();
|
||||
|
|
114
target/riscv/insn_trans/trans_rvzicfiss.c.inc
Normal file
114
target/riscv/insn_trans/trans_rvzicfiss.c.inc
Normal file
|
@ -0,0 +1,114 @@
|
|||
/*
|
||||
* RISC-V translation routines for the Control-Flow Integrity Extension
|
||||
*
|
||||
* Copyright (c) 2024 Rivos Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2 or later, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
static bool trans_sspopchk(DisasContext *ctx, arg_sspopchk *a)
|
||||
{
|
||||
if (!ctx->bcfi_enabled) {
|
||||
return false;
|
||||
}
|
||||
|
||||
TCGv addr = tcg_temp_new();
|
||||
TCGLabel *skip = gen_new_label();
|
||||
uint32_t tmp = (get_xl(ctx) == MXL_RV64) ? 8 : 4;
|
||||
TCGv data = tcg_temp_new();
|
||||
tcg_gen_ld_tl(addr, tcg_env, offsetof(CPURISCVState, ssp));
|
||||
decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO);
|
||||
tcg_gen_qemu_ld_tl(data, addr, SS_MMU_INDEX(ctx),
|
||||
mxl_memop(ctx) | MO_ALIGN);
|
||||
TCGv rs1 = get_gpr(ctx, a->rs1, EXT_NONE);
|
||||
tcg_gen_brcond_tl(TCG_COND_EQ, data, rs1, skip);
|
||||
tcg_gen_st_tl(tcg_constant_tl(RISCV_EXCP_SW_CHECK_BCFI_TVAL),
|
||||
tcg_env, offsetof(CPURISCVState, sw_check_code));
|
||||
gen_helper_raise_exception(tcg_env,
|
||||
tcg_constant_i32(RISCV_EXCP_SW_CHECK));
|
||||
gen_set_label(skip);
|
||||
tcg_gen_addi_tl(addr, addr, tmp);
|
||||
tcg_gen_st_tl(addr, tcg_env, offsetof(CPURISCVState, ssp));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool trans_sspush(DisasContext *ctx, arg_sspush *a)
|
||||
{
|
||||
if (!ctx->bcfi_enabled) {
|
||||
return false;
|
||||
}
|
||||
|
||||
TCGv addr = tcg_temp_new();
|
||||
int tmp = (get_xl(ctx) == MXL_RV64) ? -8 : -4;
|
||||
TCGv data = get_gpr(ctx, a->rs2, EXT_NONE);
|
||||
decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO);
|
||||
tcg_gen_ld_tl(addr, tcg_env, offsetof(CPURISCVState, ssp));
|
||||
tcg_gen_addi_tl(addr, addr, tmp);
|
||||
tcg_gen_qemu_st_tl(data, addr, SS_MMU_INDEX(ctx),
|
||||
mxl_memop(ctx) | MO_ALIGN);
|
||||
tcg_gen_st_tl(addr, tcg_env, offsetof(CPURISCVState, ssp));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool trans_ssrdp(DisasContext *ctx, arg_ssrdp *a)
|
||||
{
|
||||
if (!ctx->bcfi_enabled || a->rd == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
TCGv dest = dest_gpr(ctx, a->rd);
|
||||
tcg_gen_ld_tl(dest, tcg_env, offsetof(CPURISCVState, ssp));
|
||||
gen_set_gpr(ctx, a->rd, dest);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool trans_ssamoswap_w(DisasContext *ctx, arg_amoswap_w *a)
|
||||
{
|
||||
REQUIRE_A_OR_ZAAMO(ctx);
|
||||
if (!ctx->bcfi_enabled) {
|
||||
return false;
|
||||
}
|
||||
|
||||
TCGv dest = dest_gpr(ctx, a->rd);
|
||||
TCGv src1, src2 = get_gpr(ctx, a->rs2, EXT_NONE);
|
||||
|
||||
decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO);
|
||||
src1 = get_address(ctx, a->rs1, 0);
|
||||
|
||||
tcg_gen_atomic_xchg_tl(dest, src1, src2, SS_MMU_INDEX(ctx),
|
||||
(MO_ALIGN | MO_TESL));
|
||||
gen_set_gpr(ctx, a->rd, dest);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool trans_ssamoswap_d(DisasContext *ctx, arg_amoswap_w *a)
|
||||
{
|
||||
REQUIRE_64BIT(ctx);
|
||||
REQUIRE_A_OR_ZAAMO(ctx);
|
||||
if (!ctx->bcfi_enabled) {
|
||||
return false;
|
||||
}
|
||||
|
||||
TCGv dest = dest_gpr(ctx, a->rd);
|
||||
TCGv src1, src2 = get_gpr(ctx, a->rs2, EXT_NONE);
|
||||
|
||||
decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO);
|
||||
src1 = get_address(ctx, a->rs1, 0);
|
||||
|
||||
tcg_gen_atomic_xchg_tl(dest, src1, src2, SS_MMU_INDEX(ctx),
|
||||
(MO_ALIGN | MO_TESQ));
|
||||
gen_set_gpr(ctx, a->rd, dest);
|
||||
return true;
|
||||
}
|
|
@ -28,7 +28,7 @@ static bool trans_sinval_vma(DisasContext *ctx, arg_sinval_vma *a)
|
|||
/* Do the same as sfence.vma currently */
|
||||
REQUIRE_EXT(ctx, RVS);
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
decode_save_opc(ctx);
|
||||
decode_save_opc(ctx, 0);
|
||||
gen_helper_tlb_flush(tcg_env);
|
||||
return true;
|
||||
#endif
|
||||
|
@ -57,7 +57,7 @@ static bool trans_hinval_vvma(DisasContext *ctx, arg_hinval_vvma *a)
|
|||
/* Do the same as hfence.vvma currently */
|
||||
REQUIRE_EXT(ctx, RVH);
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
decode_save_opc(ctx);
|
||||
decode_save_opc(ctx, 0);
|
||||
gen_helper_hyp_tlb_flush(tcg_env);
|
||||
return true;
|
||||
#endif
|
||||
|
@ -70,7 +70,7 @@ static bool trans_hinval_gvma(DisasContext *ctx, arg_hinval_gvma *a)
|
|||
/* Do the same as hfence.gvma currently */
|
||||
REQUIRE_EXT(ctx, RVH);
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
decode_save_opc(ctx);
|
||||
decode_save_opc(ctx, 0);
|
||||
gen_helper_hyp_gvma_tlb_flush(tcg_env);
|
||||
return true;
|
||||
#endif
|
||||
|
|
|
@ -30,12 +30,15 @@
|
|||
* - U+2STAGE 0b100
|
||||
* - S+2STAGE 0b101
|
||||
* - S+SUM+2STAGE 0b110
|
||||
* - Shadow stack+U 0b1000
|
||||
* - Shadow stack+S 0b1001
|
||||
*/
|
||||
#define MMUIdx_U 0
|
||||
#define MMUIdx_S 1
|
||||
#define MMUIdx_S_SUM 2
|
||||
#define MMUIdx_M 3
|
||||
#define MMU_2STAGE_BIT (1 << 2)
|
||||
#define MMU_IDX_SS_WRITE (1 << 3)
|
||||
|
||||
static inline int mmuidx_priv(int mmu_idx)
|
||||
{
|
||||
|
|
|
@ -1676,9 +1676,9 @@ void kvm_arch_accel_class_init(ObjectClass *oc)
|
|||
object_class_property_add_str(oc, "riscv-aia", riscv_get_kvm_aia,
|
||||
riscv_set_kvm_aia);
|
||||
object_class_property_set_description(oc, "riscv-aia",
|
||||
"Set KVM AIA mode. Valid values are "
|
||||
"emul, hwaccel, and auto. Default "
|
||||
"is auto.");
|
||||
"Set KVM AIA mode. Valid values are 'emul', 'hwaccel' and 'auto'. "
|
||||
"Changing KVM AIA modes relies on host support. Defaults to 'auto' "
|
||||
"if the host supports it");
|
||||
object_property_set_default_str(object_class_property_find(oc, "riscv-aia"),
|
||||
"auto");
|
||||
}
|
||||
|
@ -1711,18 +1711,20 @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
|
|||
error_report("KVM AIA: failed to get current KVM AIA mode");
|
||||
exit(1);
|
||||
}
|
||||
qemu_log("KVM AIA: default mode is %s\n",
|
||||
kvm_aia_mode_str(default_aia_mode));
|
||||
|
||||
if (default_aia_mode != aia_mode) {
|
||||
ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
|
||||
KVM_DEV_RISCV_AIA_CONFIG_MODE,
|
||||
&aia_mode, true, NULL);
|
||||
if (ret < 0)
|
||||
warn_report("KVM AIA: failed to set KVM AIA mode");
|
||||
else
|
||||
qemu_log("KVM AIA: set current mode to %s\n",
|
||||
kvm_aia_mode_str(aia_mode));
|
||||
if (ret < 0) {
|
||||
warn_report("KVM AIA: failed to set KVM AIA mode '%s', using "
|
||||
"default host mode '%s'",
|
||||
kvm_aia_mode_str(aia_mode),
|
||||
kvm_aia_mode_str(default_aia_mode));
|
||||
|
||||
/* failed to change AIA mode, use default */
|
||||
aia_mode = default_aia_mode;
|
||||
}
|
||||
}
|
||||
|
||||
ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
|
||||
|
|
|
@ -350,6 +350,42 @@ static const VMStateDescription vmstate_jvt = {
|
|||
}
|
||||
};
|
||||
|
||||
static bool elp_needed(void *opaque)
|
||||
{
|
||||
RISCVCPU *cpu = opaque;
|
||||
|
||||
return cpu->cfg.ext_zicfilp;
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_elp = {
|
||||
.name = "cpu/elp",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.needed = elp_needed,
|
||||
.fields = (const VMStateField[]) {
|
||||
VMSTATE_BOOL(env.elp, RISCVCPU),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
||||
static bool ssp_needed(void *opaque)
|
||||
{
|
||||
RISCVCPU *cpu = opaque;
|
||||
|
||||
return cpu->cfg.ext_zicfiss;
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_ssp = {
|
||||
.name = "cpu/ssp",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.needed = ssp_needed,
|
||||
.fields = (const VMStateField[]) {
|
||||
VMSTATE_UINTTL(env.ssp, RISCVCPU),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
||||
const VMStateDescription vmstate_riscv_cpu = {
|
||||
.name = "cpu",
|
||||
.version_id = 10,
|
||||
|
@ -422,6 +458,8 @@ const VMStateDescription vmstate_riscv_cpu = {
|
|||
&vmstate_debug,
|
||||
&vmstate_smstateen,
|
||||
&vmstate_jvt,
|
||||
&vmstate_elp,
|
||||
&vmstate_ssp,
|
||||
NULL
|
||||
}
|
||||
};
|
||||
|
|
|
@ -309,6 +309,15 @@ target_ulong helper_sret(CPURISCVState *env)
|
|||
|
||||
riscv_cpu_set_mode(env, prev_priv, prev_virt);
|
||||
|
||||
/*
|
||||
* If forward cfi enabled for new priv, restore elp status
|
||||
* and clear spelp in mstatus
|
||||
*/
|
||||
if (cpu_get_fcfien(env)) {
|
||||
env->elp = get_field(env->mstatus, MSTATUS_SPELP);
|
||||
}
|
||||
env->mstatus = set_field(env->mstatus, MSTATUS_SPELP, 0);
|
||||
|
||||
return retpc;
|
||||
}
|
||||
|
||||
|
@ -349,6 +358,14 @@ target_ulong helper_mret(CPURISCVState *env)
|
|||
}
|
||||
|
||||
riscv_cpu_set_mode(env, prev_priv, prev_virt);
|
||||
/*
|
||||
* If forward cfi enabled for new priv, restore elp status
|
||||
* and clear mpelp in mstatus
|
||||
*/
|
||||
if (cpu_get_fcfien(env)) {
|
||||
env->elp = get_field(env->mstatus, MSTATUS_MPELP);
|
||||
}
|
||||
env->mstatus = set_field(env->mstatus, MSTATUS_MPELP, 0);
|
||||
|
||||
return retpc;
|
||||
}
|
||||
|
|
|
@ -326,7 +326,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
|
|||
*/
|
||||
pmp_size = -(addr | TARGET_PAGE_MASK);
|
||||
} else {
|
||||
pmp_size = sizeof(target_ulong);
|
||||
pmp_size = 2 << riscv_cpu_mxl(env);
|
||||
}
|
||||
} else {
|
||||
pmp_size = size;
|
||||
|
@ -598,6 +598,11 @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
|
|||
val &= ~(MSECCFG_MMWP | MSECCFG_MML | MSECCFG_RLB);
|
||||
}
|
||||
|
||||
/* M-mode forward cfi to be enabled if cfi extension is implemented */
|
||||
if (env_archcpu(env)->cfg.ext_zicfilp) {
|
||||
val |= (val & MSECCFG_MLPE);
|
||||
}
|
||||
|
||||
env->mseccfg = val;
|
||||
}
|
||||
|
||||
|
|
|
@ -44,7 +44,8 @@ typedef enum {
|
|||
MSECCFG_MMWP = 1 << 1,
|
||||
MSECCFG_RLB = 1 << 2,
|
||||
MSECCFG_USEED = 1 << 8,
|
||||
MSECCFG_SSEED = 1 << 9
|
||||
MSECCFG_SSEED = 1 << 9,
|
||||
MSECCFG_MLPE = 1 << 10,
|
||||
} mseccfg_field_t;
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -129,6 +129,7 @@ static void riscv_restore_state_to_opc(CPUState *cs,
|
|||
env->pc = pc;
|
||||
}
|
||||
env->bins = data[1];
|
||||
env->excp_uw2 = data[2];
|
||||
}
|
||||
|
||||
static const TCGCPUOps riscv_tcg_ops = {
|
||||
|
@ -618,11 +619,39 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
|
|||
cpu->cfg.ext_zihpm = false;
|
||||
}
|
||||
|
||||
if (cpu->cfg.ext_zicfiss) {
|
||||
if (!cpu->cfg.ext_zicsr) {
|
||||
error_setg(errp, "zicfiss extension requires zicsr extension");
|
||||
return;
|
||||
}
|
||||
if (!riscv_has_ext(env, RVA)) {
|
||||
error_setg(errp, "zicfiss extension requires A extension");
|
||||
return;
|
||||
}
|
||||
if (!riscv_has_ext(env, RVS)) {
|
||||
error_setg(errp, "zicfiss extension requires S");
|
||||
return;
|
||||
}
|
||||
if (!cpu->cfg.ext_zimop) {
|
||||
error_setg(errp, "zicfiss extension requires zimop extension");
|
||||
return;
|
||||
}
|
||||
if (cpu->cfg.ext_zca && !cpu->cfg.ext_zcmop) {
|
||||
error_setg(errp, "zicfiss with zca requires zcmop extension");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (!cpu->cfg.ext_zihpm) {
|
||||
cpu->cfg.pmu_mask = 0;
|
||||
cpu->pmu_avail_ctrs = 0;
|
||||
}
|
||||
|
||||
if (cpu->cfg.ext_zicfilp && !cpu->cfg.ext_zicsr) {
|
||||
error_setg(errp, "zicfilp extension requires zicsr extension");
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable isa extensions based on priv spec after we
|
||||
* validated and set everything we need.
|
||||
|
|
|
@ -116,6 +116,11 @@ typedef struct DisasContext {
|
|||
bool frm_valid;
|
||||
bool insn_start_updated;
|
||||
const GPtrArray *decoders;
|
||||
/* zicfilp extension. fcfi_enabled, lp expected or not */
|
||||
bool fcfi_enabled;
|
||||
bool fcfi_lp_expected;
|
||||
/* zicfiss extension, if shadow stack was enabled during TB gen */
|
||||
bool bcfi_enabled;
|
||||
} DisasContext;
|
||||
|
||||
static inline bool has_ext(DisasContext *ctx, uint32_t ext)
|
||||
|
@ -139,6 +144,8 @@ static inline bool has_ext(DisasContext *ctx, uint32_t ext)
|
|||
#define get_address_xl(ctx) ((ctx)->address_xl)
|
||||
#endif
|
||||
|
||||
#define mxl_memop(ctx) ((get_xl(ctx) + 1) | MO_TE)
|
||||
|
||||
/* The word size for this machine mode. */
|
||||
static inline int __attribute__((unused)) get_xlen(DisasContext *ctx)
|
||||
{
|
||||
|
@ -204,11 +211,12 @@ static void gen_check_nanbox_s(TCGv_i64 out, TCGv_i64 in)
|
|||
tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan);
|
||||
}
|
||||
|
||||
static void decode_save_opc(DisasContext *ctx)
|
||||
static void decode_save_opc(DisasContext *ctx, target_ulong excp_uw2)
|
||||
{
|
||||
assert(!ctx->insn_start_updated);
|
||||
ctx->insn_start_updated = true;
|
||||
tcg_set_insn_start_param(ctx->base.insn_start, 1, ctx->opcode);
|
||||
tcg_set_insn_start_param(ctx->base.insn_start, 2, excp_uw2);
|
||||
}
|
||||
|
||||
static void gen_pc_plus_diff(TCGv target, DisasContext *ctx,
|
||||
|
@ -694,7 +702,7 @@ static void gen_set_rm(DisasContext *ctx, int rm)
|
|||
}
|
||||
|
||||
/* The helper may raise ILLEGAL_INSN -- record binv for unwind. */
|
||||
decode_save_opc(ctx);
|
||||
decode_save_opc(ctx, 0);
|
||||
gen_helper_set_rounding_mode(tcg_env, tcg_constant_i32(rm));
|
||||
}
|
||||
|
||||
|
@ -707,7 +715,7 @@ static void gen_set_rm_chkfrm(DisasContext *ctx, int rm)
|
|||
ctx->frm_valid = true;
|
||||
|
||||
/* The helper may raise ILLEGAL_INSN -- record binv for unwind. */
|
||||
decode_save_opc(ctx);
|
||||
decode_save_opc(ctx, 0);
|
||||
gen_helper_set_rounding_mode_chkfrm(tcg_env, tcg_constant_i32(rm));
|
||||
}
|
||||
|
||||
|
@ -1091,7 +1099,7 @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a,
|
|||
mop |= MO_ALIGN;
|
||||
}
|
||||
|
||||
decode_save_opc(ctx);
|
||||
decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO);
|
||||
src1 = get_address(ctx, a->rs1, 0);
|
||||
func(dest, src1, src2, ctx->mem_idx, mop);
|
||||
|
||||
|
@ -1105,7 +1113,7 @@ static bool gen_cmpxchg(DisasContext *ctx, arg_atomic *a, MemOp mop)
|
|||
TCGv src1 = get_address(ctx, a->rs1, 0);
|
||||
TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
|
||||
|
||||
decode_save_opc(ctx);
|
||||
decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO);
|
||||
tcg_gen_atomic_cmpxchg_tl(dest, src1, dest, src2, ctx->mem_idx, mop);
|
||||
|
||||
gen_set_gpr(ctx, a->rd, dest);
|
||||
|
@ -1121,6 +1129,8 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
|
|||
return translator_ldl(env, &ctx->base, pc);
|
||||
}
|
||||
|
||||
#define SS_MMU_INDEX(ctx) (ctx->mem_idx | MMU_IDX_SS_WRITE)
|
||||
|
||||
/* Include insn module translation function */
|
||||
#include "insn_trans/trans_rvi.c.inc"
|
||||
#include "insn_trans/trans_rvm.c.inc"
|
||||
|
@ -1151,6 +1161,7 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
|
|||
#include "decode-insn16.c.inc"
|
||||
#include "insn_trans/trans_rvzce.c.inc"
|
||||
#include "insn_trans/trans_rvzcmop.c.inc"
|
||||
#include "insn_trans/trans_rvzicfiss.c.inc"
|
||||
|
||||
/* Include decoders for factored-out extensions */
|
||||
#include "decode-XVentanaCondOps.c.inc"
|
||||
|
@ -1238,6 +1249,9 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
|
|||
ctx->pm_base_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_BASE_ENABLED);
|
||||
ctx->ztso = cpu->cfg.ext_ztso;
|
||||
ctx->itrigger = FIELD_EX32(tb_flags, TB_FLAGS, ITRIGGER);
|
||||
ctx->bcfi_enabled = FIELD_EX32(tb_flags, TB_FLAGS, BCFI_ENABLED);
|
||||
ctx->fcfi_lp_expected = FIELD_EX32(tb_flags, TB_FLAGS, FCFI_LP_EXPECTED);
|
||||
ctx->fcfi_enabled = FIELD_EX32(tb_flags, TB_FLAGS, FCFI_ENABLED);
|
||||
ctx->zero = tcg_constant_tl(0);
|
||||
ctx->virt_inst_excp = false;
|
||||
ctx->decoders = cpu->decoders;
|
||||
|
@ -1256,7 +1270,7 @@ static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
|
|||
pc_next &= ~TARGET_PAGE_MASK;
|
||||
}
|
||||
|
||||
tcg_gen_insn_start(pc_next, 0);
|
||||
tcg_gen_insn_start(pc_next, 0, 0);
|
||||
ctx->insn_start_updated = false;
|
||||
}
|
||||
|
||||
|
@ -1270,6 +1284,24 @@ static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
|
|||
decode_opc(env, ctx, opcode16);
|
||||
ctx->base.pc_next += ctx->cur_insn_len;
|
||||
|
||||
/*
|
||||
* If 'fcfi_lp_expected' is still true after processing the instruction,
|
||||
* then we did not see an 'lpad' instruction, and must raise an exception.
|
||||
* Insert code to raise the exception at the start of the insn; any other
|
||||
* code the insn may have emitted will be deleted as dead code following
|
||||
* the noreturn exception
|
||||
*/
|
||||
if (ctx->fcfi_lp_expected) {
|
||||
/* Emit after insn_start, i.e. before the op following insn_start. */
|
||||
tcg_ctx->emit_before_op = QTAILQ_NEXT(ctx->base.insn_start, link);
|
||||
tcg_gen_st_tl(tcg_constant_tl(RISCV_EXCP_SW_CHECK_FCFI_TVAL),
|
||||
tcg_env, offsetof(CPURISCVState, sw_check_code));
|
||||
gen_helper_raise_exception(tcg_env,
|
||||
tcg_constant_i32(RISCV_EXCP_SW_CHECK));
|
||||
tcg_ctx->emit_before_op = NULL;
|
||||
ctx->base.is_jmp = DISAS_NORETURN;
|
||||
}
|
||||
|
||||
/* Only the first insn within a TB is allowed to cross a page boundary. */
|
||||
if (ctx->base.is_jmp == DISAS_NEXT) {
|
||||
if (ctx->itrigger || !is_same_page(&ctx->base, ctx->base.pc_next)) {
|
||||
|
|
|
@ -5132,7 +5132,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
|
|||
} \
|
||||
env->vstart = 0; \
|
||||
/* set tail elements to 1s */ \
|
||||
vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
|
||||
vext_set_elems_1s(vd, vta, num * esz, total_elems * esz); \
|
||||
}
|
||||
|
||||
/* Compress into vd elements of vs2 where vs1 is enabled */
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue