Third RISC-V PR for 10.0

* CSR coverity fixes
 * Fix unexpected behavior of vector reduction instructions when vl is 0
 * Fix incorrect vlen comparison in prop_vlen_set
 * Throw debug exception before page fault
 * Remove redundant "hart_idx" masking from APLIC
 * Add support for Control Transfer Records Ext
 * Remove redundant struct members from the IOMMU
 * Remove duplicate definitions from the IOMMU
 * Fix tick_offset migration for Goldfish RTC
 * Add serial alias in virt machine DTB
 * Remove Bin Meng from RISC-V maintainers
 * Add support for Control Transfer Records Ext
 * Log guest errors when reserved bits are set in PTEs
 * Add missing Sdtrig disas CSRs
 * Correct the hpmevent sscofpmf mask
 * Mask upper sscofpmf bits during validation
 * Remove warnings about Smdbltrp/Smrnmi being disabled
 * Respect mseccfg.RLB bit for TOR mode PMP entry
 * Update KVM support to Linux 6.14-rc3
 * IOMMU HPM support
 * Support Sscofpmf/Svade/Svadu/Smnpm/Ssnpm extensions in KVM
 * Add --ignore-family option to binfmt
 * Refinement for AIA with KVM acceleration
 * Reset time changes for KVM
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEaukCtqfKh31tZZKWr3yVEwxTgBMFAmfHrkEACgkQr3yVEwxT
 gBNGTA/+N9nBPZt5cv0E/0EDZMQS8RQrQvz1yHRgAXOq8RnOdcL72v8wovGAfnVu
 l0BXDoVBvw4f2Xm9Q4ptlfH8HAefCeQ4E/K9j5Lwxr8OqZHFg6e+JQIyZOt6wBWI
 hJbz1/laJIbXq3cGgwcE/l0aGfb2UAAsA4dsZVt/MnjAV8GS7BF9RCkgCPxD4FZA
 0PLiq9dF+4o4q7PxnxAbUVz/uhLzqmcnQemQFHbf9Wms3tZEDKmPSoKP/v+01Rkw
 tm+cgy7OocpgygbMc0nykYG50P+raUBSesk/jFGeKj8cU4IeMuzDsVPWcd4rG+0X
 Z+nENfOY7vOqMCXgaQCW2r4vEQx2Gj0yQG6xmVAemRWzFHJdz5W01/uUSHzJSB+L
 +VbAH55HYKr6sbgecqInQ/rsHKyw6D5QFcj/guz+kvhsH9rJ5q60uywrWL5OEuaK
 vKv7cSZghlf9bwy6soassXxk8z+j4psJ7WnnVpynNKMew9yFFDhayuIFbo9952gH
 3+NCm2cQrkTYJOXAJwkxBD+I4AXxNSuxNjaVANk9q80uqbT9JiHM7pcvbJI00Fji
 OutJSPYtVXEin9Ev3sJ05YQHsIcZ/Noi3O5IdaRI0AMk/8gyGyhFCVgSpV52dH59
 HguPK05e5cW/xgElGUPHrU+UtzE05p18HnSoVPclF/B5rc8QXN0=
 =dobk
 -----END PGP SIGNATURE-----

Merge tag 'pull-riscv-to-apply-20250305-1' of https://github.com/alistair23/qemu into staging

Third RISC-V PR for 10.0

* CSR coverity fixes
* Fix unexpected behavior of vector reduction instructions when vl is 0
* Fix incorrect vlen comparison in prop_vlen_set
* Throw debug exception before page fault
* Remove redundant "hart_idx" masking from APLIC
* Add support for Control Transfer Records Ext
* Remove redundant struct members from the IOMMU
* Remove duplicate definitions from the IOMMU
* Fix tick_offset migration for Goldfish RTC
* Add serial alias in virt machine DTB
* Remove Bin Meng from RISC-V maintainers
* Add support for Control Transfer Records Ext
* Log guest errors when reserved bits are set in PTEs
* Add missing Sdtrig disas CSRs
* Correct the hpmevent sscofpmf mask
* Mask upper sscofpmf bits during validation
* Remove warnings about Smdbltrp/Smrnmi being disabled
* Respect mseccfg.RLB bit for TOR mode PMP entry
* Update KVM support to Linux 6.14-rc3
* IOMMU HPM support
* Support Sscofpmf/Svade/Svadu/Smnpm/Ssnpm extensions in KVM
* Add --ignore-family option to binfmt
* Refinement for AIA with KVM acceleration
* Reset time changes for KVM

# -----BEGIN PGP SIGNATURE-----
#
# iQIzBAABCAAdFiEEaukCtqfKh31tZZKWr3yVEwxTgBMFAmfHrkEACgkQr3yVEwxT
# gBNGTA/+N9nBPZt5cv0E/0EDZMQS8RQrQvz1yHRgAXOq8RnOdcL72v8wovGAfnVu
# l0BXDoVBvw4f2Xm9Q4ptlfH8HAefCeQ4E/K9j5Lwxr8OqZHFg6e+JQIyZOt6wBWI
# hJbz1/laJIbXq3cGgwcE/l0aGfb2UAAsA4dsZVt/MnjAV8GS7BF9RCkgCPxD4FZA
# 0PLiq9dF+4o4q7PxnxAbUVz/uhLzqmcnQemQFHbf9Wms3tZEDKmPSoKP/v+01Rkw
# tm+cgy7OocpgygbMc0nykYG50P+raUBSesk/jFGeKj8cU4IeMuzDsVPWcd4rG+0X
# Z+nENfOY7vOqMCXgaQCW2r4vEQx2Gj0yQG6xmVAemRWzFHJdz5W01/uUSHzJSB+L
# +VbAH55HYKr6sbgecqInQ/rsHKyw6D5QFcj/guz+kvhsH9rJ5q60uywrWL5OEuaK
# vKv7cSZghlf9bwy6soassXxk8z+j4psJ7WnnVpynNKMew9yFFDhayuIFbo9952gH
# 3+NCm2cQrkTYJOXAJwkxBD+I4AXxNSuxNjaVANk9q80uqbT9JiHM7pcvbJI00Fji
# OutJSPYtVXEin9Ev3sJ05YQHsIcZ/Noi3O5IdaRI0AMk/8gyGyhFCVgSpV52dH59
# HguPK05e5cW/xgElGUPHrU+UtzE05p18HnSoVPclF/B5rc8QXN0=
# =dobk
# -----END PGP SIGNATURE-----
# gpg: Signature made Wed 05 Mar 2025 09:52:01 HKT
# gpg:                using RSA key 6AE902B6A7CA877D6D659296AF7C95130C538013
# gpg: Good signature from "Alistair Francis <alistair@alistair23.me>" [unknown]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 6AE9 02B6 A7CA 877D 6D65  9296 AF7C 9513 0C53 8013

* tag 'pull-riscv-to-apply-20250305-1' of https://github.com/alistair23/qemu: (59 commits)
  target/riscv/kvm: add missing KVM CSRs
  target/riscv/kvm: add kvm_riscv_reset_regs_csr()
  target/riscv/cpu: remove unneeded !kvm_enabled() check
  hw/intc/aplic: refine kvm_msicfgaddr
  hw/intc/aplic: refine the APLIC realize
  hw/intc/imsic: refine the IMSIC realize
  binfmt: Add --ignore-family option
  binfmt: Normalize host CPU architecture
  binfmt: Shuffle things around
  target/riscv/kvm: Add some exts support
  docs/specs/riscv-iommu.rst: add HPM support info
  hw/riscv: add IOMMU HPM trace events
  hw/riscv/riscv-iommu.c: add RISCV_IOMMU_CAP_HPM cap
  hw/riscv/riscv-iommu: add hpm events mmio write
  hw/riscv/riscv-iommu: add IOHPMCYCLES mmio write
  hw/riscv/riscv-iommu: add IOCOUNTINH mmio writes
  hw/riscv/riscv-iommu: instantiate hpm_timer
  hw/riscv/riscv-iommu: add riscv_iommu_hpm_incr_ctr()
  hw/riscv/riscv-iommu: add riscv-iommu-hpm file
  hw/riscv/riscv-iommu-bits.h: HPM bits
  ...

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2025-03-05 21:56:46 +08:00
commit 9ee7278020
50 changed files with 2106 additions and 271 deletions

View file

@ -319,7 +319,6 @@ F: tests/functional/test_ppc_74xx.py
RISC-V TCG CPUs
M: Palmer Dabbelt <palmer@dabbelt.com>
M: Alistair Francis <alistair.francis@wdc.com>
M: Bin Meng <bmeng.cn@gmail.com>
R: Weiwei Li <liwei1518@gmail.com>
R: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
R: Liu Zhiwei <zhiwei_liu@linux.alibaba.com>
@ -1618,7 +1617,6 @@ F: include/hw/riscv/opentitan.h
F: include/hw/*/ibex_*.h
Microchip PolarFire SoC Icicle Kit
M: Bin Meng <bmeng.cn@gmail.com>
L: qemu-riscv@nongnu.org
S: Supported
F: docs/system/riscv/microchip-icicle-kit.rst
@ -1645,7 +1643,6 @@ F: include/hw/char/shakti_uart.h
SiFive Machines
M: Alistair Francis <Alistair.Francis@wdc.com>
M: Bin Meng <bmeng.cn@gmail.com>
M: Palmer Dabbelt <palmer@dabbelt.com>
L: qemu-riscv@nongnu.org
S: Supported
@ -3761,7 +3758,7 @@ S: Orphan
F: hw/i386/amd_iommu.?
OpenSBI Firmware
M: Bin Meng <bmeng.cn@gmail.com>
L: qemu-riscv@nongnu.org
S: Supported
F: pc-bios/opensbi-*
F: .gitlab-ci.d/opensbi.yml

View file

@ -1662,7 +1662,7 @@ const rv_opcode_data rvi_opcode_data[] = {
{ "aes32esi", rv_codec_k_bs, rv_fmt_rs1_rs2_bs, NULL, 0, 0, 0 },
{ "aes32dsmi", rv_codec_k_bs, rv_fmt_rs1_rs2_bs, NULL, 0, 0, 0 },
{ "aes32dsi", rv_codec_k_bs, rv_fmt_rs1_rs2_bs, NULL, 0, 0, 0 },
{ "aes64ks1i", rv_codec_k_rnum, rv_fmt_rd_rs1_rnum, NULL, 0, 0, 0 },
{ "aes64ks1i", rv_codec_k_rnum, rv_fmt_rd_rs1_rnum, NULL, 0, 0, 0 },
{ "aes64ks2", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
{ "aes64im", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "aes64esm", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
@ -2214,11 +2214,11 @@ const rv_opcode_data rvi_opcode_data[] = {
{ "mop.rr.5", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
{ "mop.rr.6", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
{ "mop.rr.7", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
{ "c.mop.1", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
{ "c.mop.3", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
{ "c.mop.5", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
{ "c.mop.7", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
{ "c.mop.9", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
{ "c.mop.1", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
{ "c.mop.3", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
{ "c.mop.5", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
{ "c.mop.7", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
{ "c.mop.9", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
{ "c.mop.11", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
{ "c.mop.13", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
{ "c.mop.15", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
@ -2438,9 +2438,11 @@ static const char *csr_name(int csrno)
case 0x07a1: return "tdata1";
case 0x07a2: return "tdata2";
case 0x07a3: return "tdata3";
case 0x07a4: return "tinfo";
case 0x07b0: return "dcsr";
case 0x07b1: return "dpc";
case 0x07b2: return "dscratch";
case 0x07b2: return "dscratch0";
case 0x07b3: return "dscratch1";
case 0x0b00: return "mcycle";
case 0x0b01: return "mtime";
case 0x0b02: return "minstret";

View file

@ -82,6 +82,8 @@ Several options are available to control the capabilities of the device, namely:
- "off" (Out-of-reset translation mode: 'on' for DMA disabled, 'off' for 'BARE' (passthrough))
- "s-stage": enable s-stage support
- "g-stage": enable g-stage support
- "hpm-counters": number of hardware performance counters available. Maximum value is 31.
Default value is 31. Use 0 (zero) to disable HPM support
riscv-iommu-sys device
----------------------

View file

@ -181,8 +181,10 @@ void riscv_aplic_set_kvm_msicfgaddr(RISCVAPLICState *aplic, hwaddr addr)
{
#ifdef CONFIG_KVM
if (riscv_use_emulated_aplic(aplic->msimode)) {
addr >>= APLIC_xMSICFGADDR_PPN_SHIFT;
aplic->kvm_msicfgaddr = extract64(addr, 0, 32);
aplic->kvm_msicfgaddrH = extract64(addr, 32, 32);
aplic->kvm_msicfgaddrH = extract64(addr, 32, 32) &
APLIC_xMSICFGADDRH_VALID_MASK;
}
#endif
}
@ -403,12 +405,17 @@ static void riscv_aplic_msi_send(RISCVAPLICState *aplic,
}
}
if (aplic->mmode) {
msicfgaddr = aplic_m->mmsicfgaddr;
msicfgaddrH = aplic_m->mmsicfgaddrH;
if (aplic->kvm_splitmode) {
msicfgaddr = aplic->kvm_msicfgaddr;
msicfgaddrH = ((uint64_t)aplic->kvm_msicfgaddrH << 32);
} else {
msicfgaddr = aplic_m->smsicfgaddr;
msicfgaddrH = aplic_m->smsicfgaddrH;
if (aplic->mmode) {
msicfgaddr = aplic_m->mmsicfgaddr;
msicfgaddrH = aplic_m->mmsicfgaddrH;
} else {
msicfgaddr = aplic_m->smsicfgaddr;
msicfgaddrH = aplic_m->smsicfgaddrH;
}
}
lhxs = (msicfgaddrH >> APLIC_xMSICFGADDRH_LHXS_SHIFT) &
@ -421,7 +428,6 @@ static void riscv_aplic_msi_send(RISCVAPLICState *aplic,
APLIC_xMSICFGADDRH_HHXW_MASK;
group_idx = hart_idx >> lhxw;
hart_idx &= APLIC_xMSICFGADDR_PPN_LHX_MASK(lhxw);
addr = msicfgaddr;
addr |= ((uint64_t)(msicfgaddrH & APLIC_xMSICFGADDRH_BAPPN_MASK)) << 32;
@ -432,11 +438,6 @@ static void riscv_aplic_msi_send(RISCVAPLICState *aplic,
addr |= (uint64_t)(guest_idx & APLIC_xMSICFGADDR_PPN_HART(lhxs));
addr <<= APLIC_xMSICFGADDR_PPN_SHIFT;
if (aplic->kvm_splitmode) {
addr |= aplic->kvm_msicfgaddr;
addr |= ((uint64_t)aplic->kvm_msicfgaddrH << 32);
}
address_space_stl_le(&address_space_memory, addr,
eiid, MEMTXATTRS_UNSPECIFIED, &result);
if (result != MEMTX_OK) {
@ -894,6 +895,26 @@ static void riscv_aplic_realize(DeviceState *dev, Error **errp)
RISCVAPLICState *aplic = RISCV_APLIC(dev);
if (riscv_use_emulated_aplic(aplic->msimode)) {
/* Create output IRQ lines for non-MSI mode */
if (!aplic->msimode) {
/* Claim the CPU interrupt to be triggered by this APLIC */
for (i = 0; i < aplic->num_harts; i++) {
RISCVCPU *cpu;
cpu = RISCV_CPU(cpu_by_arch_id(aplic->hartid_base + i));
if (riscv_cpu_claim_interrupts(cpu,
(aplic->mmode) ? MIP_MEIP : MIP_SEIP) < 0) {
error_report("%s already claimed",
(aplic->mmode) ? "MEIP" : "SEIP");
exit(1);
}
}
aplic->external_irqs = g_malloc(sizeof(qemu_irq) *
aplic->num_harts);
qdev_init_gpio_out(dev, aplic->external_irqs, aplic->num_harts);
}
aplic->bitfield_words = (aplic->num_irqs + 31) >> 5;
aplic->sourcecfg = g_new0(uint32_t, aplic->num_irqs);
aplic->state = g_new0(uint32_t, aplic->num_irqs);
@ -928,23 +949,6 @@ static void riscv_aplic_realize(DeviceState *dev, Error **errp)
}
}
/* Create output IRQ lines for non-MSI mode */
if (!aplic->msimode) {
aplic->external_irqs = g_malloc(sizeof(qemu_irq) * aplic->num_harts);
qdev_init_gpio_out(dev, aplic->external_irqs, aplic->num_harts);
/* Claim the CPU interrupt to be triggered by this APLIC */
for (i = 0; i < aplic->num_harts; i++) {
RISCVCPU *cpu = RISCV_CPU(cpu_by_arch_id(aplic->hartid_base + i));
if (riscv_cpu_claim_interrupts(cpu,
(aplic->mmode) ? MIP_MEIP : MIP_SEIP) < 0) {
error_report("%s already claimed",
(aplic->mmode) ? "MEIP" : "SEIP");
exit(1);
}
}
}
msi_nonbroken = true;
}
@ -1068,15 +1072,15 @@ DeviceState *riscv_aplic_create(hwaddr addr, hwaddr size,
if (riscv_use_emulated_aplic(msimode)) {
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr);
}
if (!msimode) {
for (i = 0; i < num_harts; i++) {
CPUState *cpu = cpu_by_arch_id(hartid_base + i);
if (!msimode) {
for (i = 0; i < num_harts; i++) {
CPUState *cpu = cpu_by_arch_id(hartid_base + i);
qdev_connect_gpio_out_named(dev, NULL, i,
qdev_get_gpio_in(DEVICE(cpu),
qdev_connect_gpio_out_named(dev, NULL, i,
qdev_get_gpio_in(DEVICE(cpu),
(mmode) ? IRQ_M_EXT : IRQ_S_EXT));
}
}
}

View file

@ -349,7 +349,19 @@ static void riscv_imsic_realize(DeviceState *dev, Error **errp)
CPUState *cpu = cpu_by_arch_id(imsic->hartid);
CPURISCVState *env = cpu ? cpu_env(cpu) : NULL;
/* Claim the CPU interrupt to be triggered by this IMSIC */
if (riscv_cpu_claim_interrupts(rcpu,
(imsic->mmode) ? MIP_MEIP : MIP_SEIP) < 0) {
error_setg(errp, "%s already claimed",
(imsic->mmode) ? "MEIP" : "SEIP");
return;
}
if (!kvm_irqchip_in_kernel()) {
/* Create output IRQ lines */
imsic->external_irqs = g_malloc(sizeof(qemu_irq) * imsic->num_pages);
qdev_init_gpio_out(dev, imsic->external_irqs, imsic->num_pages);
imsic->num_eistate = imsic->num_pages * imsic->num_irqs;
imsic->eidelivery = g_new0(uint32_t, imsic->num_pages);
imsic->eithreshold = g_new0(uint32_t, imsic->num_pages);
@ -361,18 +373,6 @@ static void riscv_imsic_realize(DeviceState *dev, Error **errp)
IMSIC_MMIO_SIZE(imsic->num_pages));
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &imsic->mmio);
/* Claim the CPU interrupt to be triggered by this IMSIC */
if (riscv_cpu_claim_interrupts(rcpu,
(imsic->mmode) ? MIP_MEIP : MIP_SEIP) < 0) {
error_setg(errp, "%s already claimed",
(imsic->mmode) ? "MEIP" : "SEIP");
return;
}
/* Create output IRQ lines */
imsic->external_irqs = g_malloc(sizeof(qemu_irq) * imsic->num_pages);
qdev_init_gpio_out(dev, imsic->external_irqs, imsic->num_pages);
/* Force select AIA feature and setup CSR read-modify-write callback */
if (env) {
if (!imsic->mmode) {
@ -381,8 +381,11 @@ static void riscv_imsic_realize(DeviceState *dev, Error **errp)
} else {
rcpu->cfg.ext_smaia = true;
}
riscv_cpu_set_aia_ireg_rmw_fn(env, (imsic->mmode) ? PRV_M : PRV_S,
riscv_imsic_rmw, imsic);
if (!kvm_irqchip_in_kernel()) {
riscv_cpu_set_aia_ireg_rmw_fn(env, (imsic->mmode) ? PRV_M : PRV_S,
riscv_imsic_rmw, imsic);
}
}
msi_nonbroken = true;
@ -464,15 +467,17 @@ DeviceState *riscv_imsic_create(hwaddr addr, uint32_t hartid, bool mmode,
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr);
for (i = 0; i < num_pages; i++) {
if (!i) {
qdev_connect_gpio_out_named(dev, NULL, i,
qdev_get_gpio_in(DEVICE(cpu),
if (!kvm_irqchip_in_kernel()) {
for (i = 0; i < num_pages; i++) {
if (!i) {
qdev_connect_gpio_out_named(dev, NULL, i,
qdev_get_gpio_in(DEVICE(cpu),
(mmode) ? IRQ_M_EXT : IRQ_S_EXT));
} else {
qdev_connect_gpio_out_named(dev, NULL, i,
qdev_get_gpio_in(DEVICE(cpu),
} else {
qdev_connect_gpio_out_named(dev, NULL, i,
qdev_get_gpio_in(DEVICE(cpu),
IRQ_LOCAL_MAX + i - 1));
}
}
}

View file

@ -10,7 +10,8 @@ riscv_ss.add(when: 'CONFIG_SIFIVE_U', if_true: files('sifive_u.c'))
riscv_ss.add(when: 'CONFIG_SPIKE', if_true: files('spike.c'))
riscv_ss.add(when: 'CONFIG_MICROCHIP_PFSOC', if_true: files('microchip_pfsoc.c'))
riscv_ss.add(when: 'CONFIG_ACPI', if_true: files('virt-acpi-build.c'))
riscv_ss.add(when: 'CONFIG_RISCV_IOMMU', if_true: files('riscv-iommu.c', 'riscv-iommu-pci.c', 'riscv-iommu-sys.c'))
riscv_ss.add(when: 'CONFIG_RISCV_IOMMU', if_true: files(
'riscv-iommu.c', 'riscv-iommu-pci.c', 'riscv-iommu-sys.c', 'riscv-iommu-hpm.c'))
riscv_ss.add(when: 'CONFIG_MICROBLAZE_V', if_true: files('microblaze-v-generic.c'))
hw_arch += {'riscv': riscv_ss}

View file

@ -50,8 +50,14 @@ struct riscv_iommu_pq_record {
#define RISCV_IOMMU_PREQ_HDR_PRIV BIT_ULL(33)
#define RISCV_IOMMU_PREQ_HDR_EXEC BIT_ULL(34)
#define RISCV_IOMMU_PREQ_HDR_DID GENMASK_ULL(63, 40)
/* Payload fields */
#define RISCV_IOMMU_PREQ_PAYLOAD_R BIT_ULL(0)
#define RISCV_IOMMU_PREQ_PAYLOAD_W BIT_ULL(1)
#define RISCV_IOMMU_PREQ_PAYLOAD_L BIT_ULL(2)
#define RISCV_IOMMU_PREQ_PAYLOAD_M GENMASK_ULL(2, 0)
#define RISCV_IOMMU_PREQ_PRG_INDEX GENMASK_ULL(11, 3)
#define RISCV_IOMMU_PREQ_UADDR GENMASK_ULL(63, 12)
/* Common field positions */
#define RISCV_IOMMU_PPN_FIELD GENMASK_ULL(53, 10)
@ -82,6 +88,7 @@ struct riscv_iommu_pq_record {
#define RISCV_IOMMU_CAP_ATS BIT_ULL(25)
#define RISCV_IOMMU_CAP_T2GPA BIT_ULL(26)
#define RISCV_IOMMU_CAP_IGS GENMASK_ULL(29, 28)
#define RISCV_IOMMU_CAP_HPM BIT_ULL(30)
#define RISCV_IOMMU_CAP_DBG BIT_ULL(31)
#define RISCV_IOMMU_CAP_PAS GENMASK_ULL(37, 32)
#define RISCV_IOMMU_CAP_PD8 BIT_ULL(38)
@ -191,6 +198,52 @@ enum {
RISCV_IOMMU_INTR_COUNT
};
#define RISCV_IOMMU_IOCOUNT_NUM 31
/* 5.19 Performance monitoring counter overflow status (32bits) */
#define RISCV_IOMMU_REG_IOCOUNTOVF 0x0058
#define RISCV_IOMMU_IOCOUNTOVF_CY BIT(0)
/* 5.20 Performance monitoring counter inhibits (32bits) */
#define RISCV_IOMMU_REG_IOCOUNTINH 0x005C
#define RISCV_IOMMU_IOCOUNTINH_CY BIT(0)
/* 5.21 Performance monitoring cycles counter (64bits) */
#define RISCV_IOMMU_REG_IOHPMCYCLES 0x0060
#define RISCV_IOMMU_IOHPMCYCLES_COUNTER GENMASK_ULL(62, 0)
#define RISCV_IOMMU_IOHPMCYCLES_OVF BIT_ULL(63)
/* 5.22 Performance monitoring event counters (31 * 64bits) */
#define RISCV_IOMMU_REG_IOHPMCTR_BASE 0x0068
#define RISCV_IOMMU_REG_IOHPMCTR(_n) \
(RISCV_IOMMU_REG_IOHPMCTR_BASE + (_n * 0x8))
/* 5.23 Performance monitoring event selectors (31 * 64bits) */
#define RISCV_IOMMU_REG_IOHPMEVT_BASE 0x0160
#define RISCV_IOMMU_REG_IOHPMEVT(_n) \
(RISCV_IOMMU_REG_IOHPMEVT_BASE + (_n * 0x8))
#define RISCV_IOMMU_IOHPMEVT_EVENT_ID GENMASK_ULL(14, 0)
#define RISCV_IOMMU_IOHPMEVT_DMASK BIT_ULL(15)
#define RISCV_IOMMU_IOHPMEVT_PID_PSCID GENMASK_ULL(35, 16)
#define RISCV_IOMMU_IOHPMEVT_DID_GSCID GENMASK_ULL(59, 36)
#define RISCV_IOMMU_IOHPMEVT_PV_PSCV BIT_ULL(60)
#define RISCV_IOMMU_IOHPMEVT_DV_GSCV BIT_ULL(61)
#define RISCV_IOMMU_IOHPMEVT_IDT BIT_ULL(62)
#define RISCV_IOMMU_IOHPMEVT_OF BIT_ULL(63)
enum RISCV_IOMMU_HPMEVENT_id {
RISCV_IOMMU_HPMEVENT_INVALID = 0,
RISCV_IOMMU_HPMEVENT_URQ = 1,
RISCV_IOMMU_HPMEVENT_TRQ = 2,
RISCV_IOMMU_HPMEVENT_ATS_RQ = 3,
RISCV_IOMMU_HPMEVENT_TLB_MISS = 4,
RISCV_IOMMU_HPMEVENT_DD_WALK = 5,
RISCV_IOMMU_HPMEVENT_PD_WALK = 6,
RISCV_IOMMU_HPMEVENT_S_VS_WALKS = 7,
RISCV_IOMMU_HPMEVENT_G_WALKS = 8,
RISCV_IOMMU_HPMEVENT_MAX = 9
};
/* 5.24 Translation request IOVA (64bits) */
#define RISCV_IOMMU_REG_TR_REQ_IOVA 0x0258
@ -382,22 +435,6 @@ enum riscv_iommu_fq_ttypes {
RISCV_IOMMU_FW_TTYPE_PCIE_MSG_REQ = 9,
};
/* Header fields */
#define RISCV_IOMMU_PREQ_HDR_PID GENMASK_ULL(31, 12)
#define RISCV_IOMMU_PREQ_HDR_PV BIT_ULL(32)
#define RISCV_IOMMU_PREQ_HDR_PRIV BIT_ULL(33)
#define RISCV_IOMMU_PREQ_HDR_EXEC BIT_ULL(34)
#define RISCV_IOMMU_PREQ_HDR_DID GENMASK_ULL(63, 40)
/* Payload fields */
#define RISCV_IOMMU_PREQ_PAYLOAD_R BIT_ULL(0)
#define RISCV_IOMMU_PREQ_PAYLOAD_W BIT_ULL(1)
#define RISCV_IOMMU_PREQ_PAYLOAD_L BIT_ULL(2)
#define RISCV_IOMMU_PREQ_PAYLOAD_M GENMASK_ULL(2, 0)
#define RISCV_IOMMU_PREQ_PRG_INDEX GENMASK_ULL(11, 3)
#define RISCV_IOMMU_PREQ_UADDR GENMASK_ULL(63, 12)
/*
* struct riscv_iommu_msi_pte - MSI Page Table Entry
*/

381
hw/riscv/riscv-iommu-hpm.c Normal file
View file

@ -0,0 +1,381 @@
/*
* RISC-V IOMMU - Hardware Performance Monitor (HPM) helpers
*
* Copyright (C) 2022-2023 Rivos Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2 or later, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "qemu/timer.h"
#include "cpu_bits.h"
#include "riscv-iommu-hpm.h"
#include "riscv-iommu.h"
#include "riscv-iommu-bits.h"
#include "trace.h"
/* For now we assume IOMMU HPM frequency to be 1GHz so 1-cycle is of 1-ns. */
static inline uint64_t get_cycles(void)
{
return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
}
uint64_t riscv_iommu_hpmcycle_read(RISCVIOMMUState *s)
{
const uint64_t cycle = riscv_iommu_reg_get64(
s, RISCV_IOMMU_REG_IOHPMCYCLES);
const uint32_t inhibit = riscv_iommu_reg_get32(
s, RISCV_IOMMU_REG_IOCOUNTINH);
const uint64_t ctr_prev = s->hpmcycle_prev;
const uint64_t ctr_val = s->hpmcycle_val;
trace_riscv_iommu_hpm_read(cycle, inhibit, ctr_prev, ctr_val);
if (get_field(inhibit, RISCV_IOMMU_IOCOUNTINH_CY)) {
/*
* Counter should not increment if inhibit bit is set. We can't really
* stop the QEMU_CLOCK_VIRTUAL, so we just return the last updated
* counter value to indicate that counter was not incremented.
*/
return (ctr_val & RISCV_IOMMU_IOHPMCYCLES_COUNTER) |
(cycle & RISCV_IOMMU_IOHPMCYCLES_OVF);
}
return (ctr_val + get_cycles() - ctr_prev) |
(cycle & RISCV_IOMMU_IOHPMCYCLES_OVF);
}
static void hpm_incr_ctr(RISCVIOMMUState *s, uint32_t ctr_idx)
{
const uint32_t off = ctr_idx << 3;
uint64_t cntr_val;
cntr_val = ldq_le_p(&s->regs_rw[RISCV_IOMMU_REG_IOHPMCTR_BASE + off]);
stq_le_p(&s->regs_rw[RISCV_IOMMU_REG_IOHPMCTR_BASE + off], cntr_val + 1);
trace_riscv_iommu_hpm_incr_ctr(cntr_val);
/* Handle the overflow scenario. */
if (cntr_val == UINT64_MAX) {
/*
* Generate interrupt only if OF bit is clear. +1 to offset the cycle
* register OF bit.
*/
const uint32_t ovf =
riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IOCOUNTOVF,
BIT(ctr_idx + 1), 0);
if (!get_field(ovf, BIT(ctr_idx + 1))) {
riscv_iommu_reg_mod64(s,
RISCV_IOMMU_REG_IOHPMEVT_BASE + off,
RISCV_IOMMU_IOHPMEVT_OF,
0);
riscv_iommu_notify(s, RISCV_IOMMU_INTR_PM);
}
}
}
void riscv_iommu_hpm_incr_ctr(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
unsigned event_id)
{
const uint32_t inhibit = riscv_iommu_reg_get32(
s, RISCV_IOMMU_REG_IOCOUNTINH);
uint32_t did_gscid;
uint32_t pid_pscid;
uint32_t ctr_idx;
gpointer value;
uint32_t ctrs;
uint64_t evt;
if (!(s->cap & RISCV_IOMMU_CAP_HPM)) {
return;
}
value = g_hash_table_lookup(s->hpm_event_ctr_map,
GUINT_TO_POINTER(event_id));
if (value == NULL) {
return;
}
for (ctrs = GPOINTER_TO_UINT(value); ctrs != 0; ctrs &= ctrs - 1) {
ctr_idx = ctz32(ctrs);
if (get_field(inhibit, BIT(ctr_idx + 1))) {
continue;
}
evt = riscv_iommu_reg_get64(s,
RISCV_IOMMU_REG_IOHPMEVT_BASE + (ctr_idx << 3));
/*
* It's quite possible that event ID has been changed in counter
* but hashtable hasn't been updated yet. We don't want to increment
* counter for the old event ID.
*/
if (event_id != get_field(evt, RISCV_IOMMU_IOHPMEVT_EVENT_ID)) {
continue;
}
if (get_field(evt, RISCV_IOMMU_IOHPMEVT_IDT)) {
did_gscid = get_field(ctx->gatp, RISCV_IOMMU_DC_IOHGATP_GSCID);
pid_pscid = get_field(ctx->ta, RISCV_IOMMU_DC_TA_PSCID);
} else {
did_gscid = ctx->devid;
pid_pscid = ctx->process_id;
}
if (get_field(evt, RISCV_IOMMU_IOHPMEVT_PV_PSCV)) {
/*
* If the transaction does not have a valid process_id, counter
* increments if device_id matches DID_GSCID. If the transaction
* has a valid process_id, counter increments if device_id
* matches DID_GSCID and process_id matches PID_PSCID. See
* IOMMU Specification, Chapter 5.23. Performance-monitoring
* event selector.
*/
if (ctx->process_id &&
get_field(evt, RISCV_IOMMU_IOHPMEVT_PID_PSCID) != pid_pscid) {
continue;
}
}
if (get_field(evt, RISCV_IOMMU_IOHPMEVT_DV_GSCV)) {
uint32_t mask = ~0;
if (get_field(evt, RISCV_IOMMU_IOHPMEVT_DMASK)) {
/*
* 1001 1011 mask = GSCID
* 0000 0111 mask = mask ^ (mask + 1)
* 1111 1000 mask = ~mask;
*/
mask = get_field(evt, RISCV_IOMMU_IOHPMEVT_DID_GSCID);
mask = mask ^ (mask + 1);
mask = ~mask;
}
if ((get_field(evt, RISCV_IOMMU_IOHPMEVT_DID_GSCID) & mask) !=
(did_gscid & mask)) {
continue;
}
}
hpm_incr_ctr(s, ctr_idx);
}
}
/* Timer callback for cycle counter overflow. */
void riscv_iommu_hpm_timer_cb(void *priv)
{
RISCVIOMMUState *s = priv;
const uint32_t inhibit = riscv_iommu_reg_get32(
s, RISCV_IOMMU_REG_IOCOUNTINH);
uint32_t ovf;
if (get_field(inhibit, RISCV_IOMMU_IOCOUNTINH_CY)) {
return;
}
if (s->irq_overflow_left > 0) {
uint64_t irq_trigger_at =
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + s->irq_overflow_left;
timer_mod_anticipate_ns(s->hpm_timer, irq_trigger_at);
s->irq_overflow_left = 0;
return;
}
ovf = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTOVF);
if (!get_field(ovf, RISCV_IOMMU_IOCOUNTOVF_CY)) {
/*
* We don't need to set hpmcycle_val to zero and update hpmcycle_prev to
* current clock value. The way we calculate iohpmcycs will overflow
* and return the correct value. This avoids the need to synchronize
* timer callback and write callback.
*/
riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IOCOUNTOVF,
RISCV_IOMMU_IOCOUNTOVF_CY, 0);
riscv_iommu_reg_mod64(s, RISCV_IOMMU_REG_IOHPMCYCLES,
RISCV_IOMMU_IOHPMCYCLES_OVF, 0);
riscv_iommu_notify(s, RISCV_IOMMU_INTR_PM);
}
}
static void hpm_setup_timer(RISCVIOMMUState *s, uint64_t value)
{
const uint32_t inhibit = riscv_iommu_reg_get32(
s, RISCV_IOMMU_REG_IOCOUNTINH);
uint64_t overflow_at, overflow_ns;
if (get_field(inhibit, RISCV_IOMMU_IOCOUNTINH_CY)) {
return;
}
/*
* We are using INT64_MAX here instead to UINT64_MAX because cycle counter
* has 63-bit precision and INT64_MAX is the maximum it can store.
*/
if (value) {
overflow_ns = INT64_MAX - value + 1;
} else {
overflow_ns = INT64_MAX;
}
overflow_at = (uint64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + overflow_ns;
if (overflow_at > INT64_MAX) {
s->irq_overflow_left = overflow_at - INT64_MAX;
overflow_at = INT64_MAX;
}
timer_mod_anticipate_ns(s->hpm_timer, overflow_at);
}
/* Updates the internal cycle counter state when iocntinh:CY is changed. */
void riscv_iommu_process_iocntinh_cy(RISCVIOMMUState *s, bool prev_cy_inh)
{
const uint32_t inhibit = riscv_iommu_reg_get32(
s, RISCV_IOMMU_REG_IOCOUNTINH);
/* We only need to process CY bit toggle. */
if (!(inhibit ^ prev_cy_inh)) {
return;
}
trace_riscv_iommu_hpm_iocntinh_cy(prev_cy_inh);
if (!(inhibit & RISCV_IOMMU_IOCOUNTINH_CY)) {
/*
* Cycle counter is enabled. Just start the timer again and update
* the clock snapshot value to point to the current time to make
* sure iohpmcycles read is correct.
*/
s->hpmcycle_prev = get_cycles();
hpm_setup_timer(s, s->hpmcycle_val);
} else {
/*
* Cycle counter is disabled. Stop the timer and update the cycle
* counter to record the current value which is last programmed
* value + the cycles passed so far.
*/
s->hpmcycle_val = s->hpmcycle_val + (get_cycles() - s->hpmcycle_prev);
timer_del(s->hpm_timer);
}
}
void riscv_iommu_process_hpmcycle_write(RISCVIOMMUState *s)
{
const uint64_t val = riscv_iommu_reg_get64(s, RISCV_IOMMU_REG_IOHPMCYCLES);
const uint32_t ovf = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTOVF);
trace_riscv_iommu_hpm_cycle_write(ovf, val);
/*
* Clear OF bit in IOCNTOVF if it's being cleared in IOHPMCYCLES register.
*/
if (get_field(ovf, RISCV_IOMMU_IOCOUNTOVF_CY) &&
!get_field(val, RISCV_IOMMU_IOHPMCYCLES_OVF)) {
riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IOCOUNTOVF, 0,
RISCV_IOMMU_IOCOUNTOVF_CY);
}
s->hpmcycle_val = val & ~RISCV_IOMMU_IOHPMCYCLES_OVF;
s->hpmcycle_prev = get_cycles();
hpm_setup_timer(s, s->hpmcycle_val);
}
static inline bool check_valid_event_id(unsigned event_id)
{
return event_id > RISCV_IOMMU_HPMEVENT_INVALID &&
event_id < RISCV_IOMMU_HPMEVENT_MAX;
}
static gboolean hpm_event_equal(gpointer key, gpointer value, gpointer udata)
{
uint32_t *pair = udata;
if (GPOINTER_TO_UINT(value) & (1 << pair[0])) {
pair[1] = GPOINTER_TO_UINT(key);
return true;
}
return false;
}
/* Caller must check ctr_idx against hpm_ctrs to see if its supported or not. */
static void update_event_map(RISCVIOMMUState *s, uint64_t value,
uint32_t ctr_idx)
{
unsigned event_id = get_field(value, RISCV_IOMMU_IOHPMEVT_EVENT_ID);
uint32_t pair[2] = { ctr_idx, RISCV_IOMMU_HPMEVENT_INVALID };
uint32_t new_value = 1 << ctr_idx;
gpointer data;
/*
* If EventID field is RISCV_IOMMU_HPMEVENT_INVALID
* remove the current mapping.
*/
if (event_id == RISCV_IOMMU_HPMEVENT_INVALID) {
data = g_hash_table_find(s->hpm_event_ctr_map, hpm_event_equal, pair);
new_value = GPOINTER_TO_UINT(data) & ~(new_value);
if (new_value != 0) {
g_hash_table_replace(s->hpm_event_ctr_map,
GUINT_TO_POINTER(pair[1]),
GUINT_TO_POINTER(new_value));
} else {
g_hash_table_remove(s->hpm_event_ctr_map,
GUINT_TO_POINTER(pair[1]));
}
return;
}
/* Update the counter mask if the event is already enabled. */
if (g_hash_table_lookup_extended(s->hpm_event_ctr_map,
GUINT_TO_POINTER(event_id),
NULL,
&data)) {
new_value |= GPOINTER_TO_UINT(data);
}
g_hash_table_insert(s->hpm_event_ctr_map,
GUINT_TO_POINTER(event_id),
GUINT_TO_POINTER(new_value));
}
void riscv_iommu_process_hpmevt_write(RISCVIOMMUState *s, uint32_t evt_reg)
{
const uint32_t ctr_idx = (evt_reg - RISCV_IOMMU_REG_IOHPMEVT_BASE) >> 3;
const uint32_t ovf = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTOVF);
uint64_t val = riscv_iommu_reg_get64(s, evt_reg);
if (ctr_idx >= s->hpm_cntrs) {
return;
}
trace_riscv_iommu_hpm_evt_write(ctr_idx, ovf, val);
/* Clear OF bit in IOCNTOVF if it's being cleared in IOHPMEVT register. */
if (get_field(ovf, BIT(ctr_idx + 1)) &&
!get_field(val, RISCV_IOMMU_IOHPMEVT_OF)) {
/* +1 to offset CYCLE register OF bit. */
riscv_iommu_reg_mod32(
s, RISCV_IOMMU_REG_IOCOUNTOVF, 0, BIT(ctr_idx + 1));
}
if (!check_valid_event_id(get_field(val, RISCV_IOMMU_IOHPMEVT_EVENT_ID))) {
/* Reset EventID (WARL) field to invalid. */
val = set_field(val, RISCV_IOMMU_IOHPMEVT_EVENT_ID,
RISCV_IOMMU_HPMEVENT_INVALID);
riscv_iommu_reg_set64(s, evt_reg, val);
}
update_event_map(s, val, ctr_idx);
}

View file

@ -0,0 +1,33 @@
/*
* RISC-V IOMMU - Hardware Performance Monitor (HPM) helpers
*
* Copyright (C) 2022-2023 Rivos Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2 or later, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef HW_RISCV_IOMMU_HPM_H
#define HW_RISCV_IOMMU_HPM_H
#include "qom/object.h"
#include "hw/riscv/riscv-iommu.h"
uint64_t riscv_iommu_hpmcycle_read(RISCVIOMMUState *s);
void riscv_iommu_hpm_incr_ctr(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
unsigned event_id);
void riscv_iommu_hpm_timer_cb(void *priv);
void riscv_iommu_process_iocntinh_cy(RISCVIOMMUState *s, bool prev_cy_inh);
void riscv_iommu_process_hpmcycle_write(RISCVIOMMUState *s);
void riscv_iommu_process_hpmevt_write(RISCVIOMMUState *s, uint32_t evt_reg);
#endif

View file

@ -29,6 +29,7 @@
#include "cpu_bits.h"
#include "riscv-iommu.h"
#include "riscv-iommu-bits.h"
#include "riscv-iommu-hpm.h"
#include "trace.h"
#define LIMIT_CACHE_CTX (1U << 7)
@ -38,7 +39,6 @@
#define PPN_PHYS(ppn) ((ppn) << TARGET_PAGE_BITS)
#define PPN_DOWN(phy) ((phy) >> TARGET_PAGE_BITS)
typedef struct RISCVIOMMUContext RISCVIOMMUContext;
typedef struct RISCVIOMMUEntry RISCVIOMMUEntry;
/* Device assigned I/O address space */
@ -51,19 +51,6 @@ struct RISCVIOMMUSpace {
QLIST_ENTRY(RISCVIOMMUSpace) list;
};
/* Device translation context state. */
struct RISCVIOMMUContext {
uint64_t devid:24; /* Requester Id, AKA device_id */
uint64_t process_id:20; /* Process ID. PASID for PCIe */
uint64_t tc; /* Translation Control */
uint64_t ta; /* Translation Attributes */
uint64_t satp; /* S-Stage address translation and protection */
uint64_t gatp; /* G-Stage address translation and protection */
uint64_t msi_addr_mask; /* MSI filtering - address mask */
uint64_t msi_addr_pattern; /* MSI filtering - address pattern */
uint64_t msiptp; /* MSI redirection page table pointer */
};
typedef enum RISCVIOMMUTransTag {
RISCV_IOMMU_TRANS_TAG_BY, /* Bypass */
RISCV_IOMMU_TRANS_TAG_SS, /* Single Stage */
@ -100,7 +87,7 @@ static uint8_t riscv_iommu_get_icvec_vector(uint32_t icvec, uint32_t vec_type)
}
}
static void riscv_iommu_notify(RISCVIOMMUState *s, int vec_type)
void riscv_iommu_notify(RISCVIOMMUState *s, int vec_type)
{
uint32_t ipsr, icvec, vector;
@ -422,6 +409,13 @@ static int riscv_iommu_spa_fetch(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
}
}
if (pass == S_STAGE) {
riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_S_VS_WALKS);
} else {
riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_G_WALKS);
}
/* Read page table entry */
if (sc[pass].ptesize == 4) {
uint32_t pte32 = 0;
@ -940,6 +934,7 @@ static int riscv_iommu_ctx_fetch(RISCVIOMMUState *s, RISCVIOMMUContext *ctx)
/* Device directory tree walk */
for (; depth-- > 0; ) {
riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_DD_WALK);
/*
* Select device id index bits based on device directory tree level
* and device context format.
@ -967,6 +962,8 @@ static int riscv_iommu_ctx_fetch(RISCVIOMMUState *s, RISCVIOMMUContext *ctx)
addr = PPN_PHYS(get_field(de, RISCV_IOMMU_DDTE_PPN));
}
riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_DD_WALK);
/* index into device context entry page */
addr |= (ctx->devid * dc_len) & ~TARGET_PAGE_MASK;
@ -1032,6 +1029,8 @@ static int riscv_iommu_ctx_fetch(RISCVIOMMUState *s, RISCVIOMMUContext *ctx)
}
for (depth = mode - RISCV_IOMMU_DC_FSC_PDTP_MODE_PD8; depth-- > 0; ) {
riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_PD_WALK);
/*
* Select process id index bits based on process directory tree
* level. See IOMMU Specification, 2.2. Process-Directory-Table.
@ -1049,6 +1048,8 @@ static int riscv_iommu_ctx_fetch(RISCVIOMMUState *s, RISCVIOMMUContext *ctx)
addr = PPN_PHYS(get_field(de, RISCV_IOMMU_PC_FSC_PPN));
}
riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_PD_WALK);
/* Leaf entry in PDT */
addr |= (ctx->process_id << 4) & ~TARGET_PAGE_MASK;
if (dma_memory_read(s->target_as, addr, &dc.ta, sizeof(uint64_t) * 2,
@ -1418,6 +1419,8 @@ static int riscv_iommu_translate(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
GHashTable *iot_cache;
int fault;
riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_URQ);
iot_cache = g_hash_table_ref(s->iot_cache);
/*
* TC[32] is reserved for custom extensions, used here to temporarily
@ -1428,6 +1431,7 @@ static int riscv_iommu_translate(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
/* Check for ATS request. */
if (iotlb->perm == IOMMU_NONE) {
riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_ATS_RQ);
/* Check if ATS is disabled. */
if (!(ctx->tc & RISCV_IOMMU_DC_TC_EN_ATS)) {
enable_pri = false;
@ -1446,6 +1450,8 @@ static int riscv_iommu_translate(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
goto done;
}
riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_TLB_MISS);
/* Translate using device directory / page table information. */
fault = riscv_iommu_spa_fetch(s, ctx, iotlb);
@ -2018,6 +2024,27 @@ static void riscv_iommu_update_ipsr(RISCVIOMMUState *s, uint64_t data)
riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IPSR, ipsr_set, ipsr_clr);
}
static void riscv_iommu_process_hpm_writes(RISCVIOMMUState *s,
uint32_t regb,
bool prev_cy_inh)
{
switch (regb) {
case RISCV_IOMMU_REG_IOCOUNTINH:
riscv_iommu_process_iocntinh_cy(s, prev_cy_inh);
break;
case RISCV_IOMMU_REG_IOHPMCYCLES:
case RISCV_IOMMU_REG_IOHPMCYCLES + 4:
riscv_iommu_process_hpmcycle_write(s);
break;
case RISCV_IOMMU_REG_IOHPMEVT_BASE ...
RISCV_IOMMU_REG_IOHPMEVT(RISCV_IOMMU_IOCOUNT_NUM) + 4:
riscv_iommu_process_hpmevt_write(s, regb & ~7);
break;
}
}
/*
* Write the resulting value of 'data' for the reg specified
* by 'reg_addr', after considering read-only/read-write/write-clear
@ -2045,6 +2072,7 @@ static MemTxResult riscv_iommu_mmio_write(void *opaque, hwaddr addr,
uint32_t regb = addr & ~3;
uint32_t busy = 0;
uint64_t val = 0;
bool cy_inh = false;
if ((addr & (size - 1)) != 0) {
/* Unsupported MMIO alignment or access size */
@ -2112,6 +2140,16 @@ static MemTxResult riscv_iommu_mmio_write(void *opaque, hwaddr addr,
busy = RISCV_IOMMU_TR_REQ_CTL_GO_BUSY;
break;
case RISCV_IOMMU_REG_IOCOUNTINH:
if (addr != RISCV_IOMMU_REG_IOCOUNTINH) {
break;
}
/* Store previous value of CY bit. */
cy_inh = !!(riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTINH) &
RISCV_IOMMU_IOCOUNTINH_CY);
break;
default:
break;
}
@ -2130,6 +2168,12 @@ static MemTxResult riscv_iommu_mmio_write(void *opaque, hwaddr addr,
stl_le_p(&s->regs_rw[regb], rw | busy);
}
/* Process HPM writes and update any internal state if needed. */
if (regb >= RISCV_IOMMU_REG_IOCOUNTOVF &&
regb <= (RISCV_IOMMU_REG_IOHPMEVT(RISCV_IOMMU_IOCOUNT_NUM) + 4)) {
riscv_iommu_process_hpm_writes(s, regb, cy_inh);
}
if (process_fn) {
process_fn(s);
}
@ -2153,7 +2197,28 @@ static MemTxResult riscv_iommu_mmio_read(void *opaque, hwaddr addr,
return MEMTX_ACCESS_ERROR;
}
ptr = &s->regs_rw[addr];
/* Compute cycle register value. */
if ((addr & ~7) == RISCV_IOMMU_REG_IOHPMCYCLES) {
val = riscv_iommu_hpmcycle_read(s);
ptr = (uint8_t *)&val + (addr & 7);
} else if ((addr & ~3) == RISCV_IOMMU_REG_IOCOUNTOVF) {
/*
* Software can read RISCV_IOMMU_REG_IOCOUNTOVF before timer
* callback completes. In which case CY_OF bit in
* RISCV_IOMMU_IOHPMCYCLES_OVF would be 0. Here we take the
* CY_OF bit state from RISCV_IOMMU_REG_IOHPMCYCLES register as
* it's not dependent over the timer callback and is computed
* from cycle overflow.
*/
val = ldq_le_p(&s->regs_rw[addr]);
val |= (riscv_iommu_hpmcycle_read(s) & RISCV_IOMMU_IOHPMCYCLES_OVF)
? RISCV_IOMMU_IOCOUNTOVF_CY
: 0;
ptr = (uint8_t *)&val + (addr & 3);
} else {
ptr = &s->regs_rw[addr];
}
val = ldn_le_p(ptr, size);
*data = val;
@ -2292,6 +2357,15 @@ static void riscv_iommu_realize(DeviceState *dev, Error **errp)
RISCV_IOMMU_CAP_SV48X4 | RISCV_IOMMU_CAP_SV57X4;
}
if (s->hpm_cntrs > 0) {
/* Clip number of HPM counters to maximum supported (31). */
if (s->hpm_cntrs > RISCV_IOMMU_IOCOUNT_NUM) {
s->hpm_cntrs = RISCV_IOMMU_IOCOUNT_NUM;
}
/* Enable hardware performance monitor interface */
s->cap |= RISCV_IOMMU_CAP_HPM;
}
/* Out-of-reset translation mode: OFF (DMA disabled) BARE (passthrough) */
s->ddtp = set_field(0, RISCV_IOMMU_DDTP_MODE, s->enable_off ?
RISCV_IOMMU_DDTP_MODE_OFF : RISCV_IOMMU_DDTP_MODE_BARE);
@ -2339,6 +2413,18 @@ static void riscv_iommu_realize(DeviceState *dev, Error **errp)
RISCV_IOMMU_TR_REQ_CTL_GO_BUSY);
}
/* If HPM registers are enabled. */
if (s->cap & RISCV_IOMMU_CAP_HPM) {
/* +1 for cycle counter bit. */
stl_le_p(&s->regs_ro[RISCV_IOMMU_REG_IOCOUNTINH],
~((2 << s->hpm_cntrs) - 1));
stq_le_p(&s->regs_ro[RISCV_IOMMU_REG_IOHPMCYCLES], 0);
memset(&s->regs_ro[RISCV_IOMMU_REG_IOHPMCTR_BASE],
0x00, s->hpm_cntrs * 8);
memset(&s->regs_ro[RISCV_IOMMU_REG_IOHPMEVT_BASE],
0x00, s->hpm_cntrs * 8);
}
/* Memory region for downstream access, if specified. */
if (s->target_mr) {
s->target_as = g_new0(AddressSpace, 1);
@ -2353,6 +2439,12 @@ static void riscv_iommu_realize(DeviceState *dev, Error **errp)
memory_region_init_io(&s->trap_mr, OBJECT(dev), &riscv_iommu_trap_ops, s,
"riscv-iommu-trap", ~0ULL);
address_space_init(&s->trap_as, &s->trap_mr, "riscv-iommu-trap-as");
if (s->cap & RISCV_IOMMU_CAP_HPM) {
s->hpm_timer =
timer_new_ns(QEMU_CLOCK_VIRTUAL, riscv_iommu_hpm_timer_cb, s);
s->hpm_event_ctr_map = g_hash_table_new(g_direct_hash, g_direct_equal);
}
}
static void riscv_iommu_unrealize(DeviceState *dev)
@ -2361,6 +2453,11 @@ static void riscv_iommu_unrealize(DeviceState *dev)
g_hash_table_unref(s->iot_cache);
g_hash_table_unref(s->ctx_cache);
if (s->cap & RISCV_IOMMU_CAP_HPM) {
g_hash_table_unref(s->hpm_event_ctr_map);
timer_free(s->hpm_timer);
}
}
void riscv_iommu_reset(RISCVIOMMUState *s)
@ -2411,6 +2508,8 @@ static const Property riscv_iommu_properties[] = {
DEFINE_PROP_BOOL("g-stage", RISCVIOMMUState, enable_g_stage, TRUE),
DEFINE_PROP_LINK("downstream-mr", RISCVIOMMUState, target_mr,
TYPE_MEMORY_REGION, MemoryRegion *),
DEFINE_PROP_UINT8("hpm-counters", RISCVIOMMUState, hpm_cntrs,
RISCV_IOMMU_IOCOUNT_NUM),
};
static void riscv_iommu_class_init(ObjectClass *klass, void* data)

View file

@ -20,6 +20,8 @@
#define HW_RISCV_IOMMU_STATE_H
#include "qom/object.h"
#include "hw/qdev-properties.h"
#include "system/dma.h"
#include "hw/riscv/iommu.h"
#include "hw/riscv/riscv-iommu-bits.h"
@ -58,11 +60,6 @@ struct RISCVIOMMUState {
/* interrupt notifier */
void (*notify)(RISCVIOMMUState *iommu, unsigned vector);
/* IOMMU State Machine */
QemuThread core_proc; /* Background processing thread */
QemuCond core_cond; /* Background processing wake up signal */
unsigned core_exec; /* Processing thread execution actions */
/* IOMMU target address space */
AddressSpace *target_as;
MemoryRegion *target_mr;
@ -84,12 +81,37 @@ struct RISCVIOMMUState {
QLIST_ENTRY(RISCVIOMMUState) iommus;
QLIST_HEAD(, RISCVIOMMUSpace) spaces;
/* HPM cycle counter */
QEMUTimer *hpm_timer;
uint64_t hpmcycle_val; /* Current value of cycle register */
uint64_t hpmcycle_prev; /* Saved value of QEMU_CLOCK_VIRTUAL clock */
uint64_t irq_overflow_left; /* Value beyond INT64_MAX after overflow */
/* HPM event counters */
GHashTable *hpm_event_ctr_map; /* Mapping of events to counters */
uint8_t hpm_cntrs;
};
void riscv_iommu_pci_setup_iommu(RISCVIOMMUState *iommu, PCIBus *bus,
Error **errp);
void riscv_iommu_set_cap_igs(RISCVIOMMUState *s, riscv_iommu_igs_mode mode);
void riscv_iommu_reset(RISCVIOMMUState *s);
void riscv_iommu_notify(RISCVIOMMUState *s, int vec_type);
typedef struct RISCVIOMMUContext RISCVIOMMUContext;
/* Device translation context state. */
struct RISCVIOMMUContext {
uint64_t devid:24; /* Requester Id, AKA device_id */
uint64_t process_id:20; /* Process ID. PASID for PCIe */
uint64_t tc; /* Translation Control */
uint64_t ta; /* Translation Attributes */
uint64_t satp; /* S-Stage address translation and protection */
uint64_t gatp; /* G-Stage address translation and protection */
uint64_t msi_addr_mask; /* MSI filtering - address mask */
uint64_t msi_addr_pattern; /* MSI filtering - address pattern */
uint64_t msiptp; /* MSI redirection page table pointer */
};
/* private helpers */

View file

@ -19,3 +19,8 @@ riscv_iommu_sys_irq_sent(uint32_t vector) "IRQ sent to vector %u"
riscv_iommu_sys_msi_sent(uint32_t vector, uint64_t msi_addr, uint32_t msi_data, uint32_t result) "MSI sent to vector %u msi_addr 0x%"PRIx64" msi_data 0x%x result %u"
riscv_iommu_sys_reset_hold(int reset_type) "reset type %d"
riscv_iommu_pci_reset_hold(int reset_type) "reset type %d"
riscv_iommu_hpm_read(uint64_t cycle, uint32_t inhibit, uint64_t ctr_prev, uint64_t ctr_val) "cycle 0x%"PRIx64" inhibit 0x%x ctr_prev 0x%"PRIx64" ctr_val 0x%"PRIx64
riscv_iommu_hpm_incr_ctr(uint64_t cntr_val) "cntr_val 0x%"PRIx64
riscv_iommu_hpm_iocntinh_cy(bool prev_cy_inh) "prev_cy_inh %d"
riscv_iommu_hpm_cycle_write(uint32_t ovf, uint64_t val) "ovf 0x%x val 0x%"PRIx64
riscv_iommu_hpm_evt_write(uint32_t ctr_idx, uint32_t ovf, uint64_t val) "ctr_idx 0x%x ovf 0x%x val 0x%"PRIx64

View file

@ -971,6 +971,7 @@ static void create_fdt_uart(RISCVVirtState *s, const MemMapEntry *memmap,
}
qemu_fdt_setprop_string(ms->fdt, "/chosen", "stdout-path", name);
qemu_fdt_setprop_string(ms->fdt, "/aliases", "serial0", name);
}
static void create_fdt_rtc(RISCVVirtState *s, const MemMapEntry *memmap,
@ -1180,6 +1181,8 @@ static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap)
qemu_fdt_setprop(ms->fdt, "/chosen", "rng-seed",
rng_seed, sizeof(rng_seed));
qemu_fdt_add_subnode(ms->fdt, "/aliases");
create_fdt_flash(s, memmap);
create_fdt_fw_cfg(s, memmap);
create_fdt_pmu(s);

View file

@ -178,38 +178,21 @@ static void goldfish_rtc_write(void *opaque, hwaddr offset,
trace_goldfish_rtc_write(offset, value);
}
static int goldfish_rtc_pre_save(void *opaque)
{
uint64_t delta;
GoldfishRTCState *s = opaque;
/*
* We want to migrate this offset, which sounds straightforward.
* Unfortunately, we cannot directly pass tick_offset because
* rtc_clock on destination Host might not be same source Host.
*
* To tackle, this we pass tick_offset relative to vm_clock from
* source Host and make it relative to rtc_clock at destination Host.
*/
delta = qemu_clock_get_ns(rtc_clock) -
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
s->tick_offset_vmstate = s->tick_offset + delta;
return 0;
}
static int goldfish_rtc_post_load(void *opaque, int version_id)
{
uint64_t delta;
GoldfishRTCState *s = opaque;
/*
* We extract tick_offset from tick_offset_vmstate by doing
* reverse math compared to pre_save() function.
*/
delta = qemu_clock_get_ns(rtc_clock) -
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
s->tick_offset = s->tick_offset_vmstate - delta;
if (version_id < 3) {
/*
* Previous versions didn't migrate tick_offset directly. Instead, they
* migrated tick_offset_vmstate, which is a recalculation based on
* QEMU_CLOCK_VIRTUAL. We use tick_offset_vmstate when migrating from
* older versions.
*/
uint64_t delta = qemu_clock_get_ns(rtc_clock) -
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
s->tick_offset = s->tick_offset_vmstate - delta;
}
goldfish_rtc_set_alarm(s);
@ -239,8 +222,7 @@ static const MemoryRegionOps goldfish_rtc_ops[2] = {
static const VMStateDescription goldfish_rtc_vmstate = {
.name = TYPE_GOLDFISH_RTC,
.version_id = 2,
.pre_save = goldfish_rtc_pre_save,
.version_id = 3,
.post_load = goldfish_rtc_post_load,
.fields = (const VMStateField[]) {
VMSTATE_UINT64(tick_offset_vmstate, GoldfishRTCState),
@ -249,6 +231,7 @@ static const VMStateDescription goldfish_rtc_vmstate = {
VMSTATE_UINT32(irq_pending, GoldfishRTCState),
VMSTATE_UINT32(irq_enabled, GoldfishRTCState),
VMSTATE_UINT32(time_high, GoldfishRTCState),
VMSTATE_UINT64_V(tick_offset, GoldfishRTCState, 3),
VMSTATE_END_OF_LIST()
}
};

View file

@ -681,6 +681,8 @@ enum ethtool_link_ext_substate_module {
* @ETH_SS_STATS_ETH_MAC: names of IEEE 802.3 MAC statistics
* @ETH_SS_STATS_ETH_CTRL: names of IEEE 802.3 MAC Control statistics
* @ETH_SS_STATS_RMON: names of RMON statistics
* @ETH_SS_STATS_PHY: names of PHY(dev) statistics
* @ETH_SS_TS_FLAGS: hardware timestamping flags
*
* @ETH_SS_COUNT: number of defined string sets
*/
@ -706,6 +708,8 @@ enum ethtool_stringset {
ETH_SS_STATS_ETH_MAC,
ETH_SS_STATS_ETH_CTRL,
ETH_SS_STATS_RMON,
ETH_SS_STATS_PHY,
ETH_SS_TS_FLAGS,
/* add new constants above here */
ETH_SS_COUNT

View file

@ -220,6 +220,15 @@
*
* 7.41
* - add FUSE_ALLOW_IDMAP
* 7.42
* - Add FUSE_OVER_IO_URING and all other io-uring related flags and data
* structures:
* - struct fuse_uring_ent_in_out
* - struct fuse_uring_req_header
* - struct fuse_uring_cmd_req
* - FUSE_URING_IN_OUT_HEADER_SZ
* - FUSE_URING_OP_IN_OUT_SZ
* - enum fuse_uring_cmd
*/
#ifndef _LINUX_FUSE_H
@ -251,7 +260,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
#define FUSE_KERNEL_MINOR_VERSION 41
#define FUSE_KERNEL_MINOR_VERSION 42
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@ -421,6 +430,7 @@ struct fuse_file_lock {
* FUSE_HAS_RESEND: kernel supports resending pending requests, and the high bit
* of the request ID indicates resend requests
* FUSE_ALLOW_IDMAP: allow creation of idmapped mounts
* FUSE_OVER_IO_URING: Indicate that client supports io-uring
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@ -467,6 +477,7 @@ struct fuse_file_lock {
/* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */
#define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP
#define FUSE_ALLOW_IDMAP (1ULL << 40)
#define FUSE_OVER_IO_URING (1ULL << 41)
/**
* CUSE INIT request/reply flags
@ -1202,4 +1213,67 @@ struct fuse_supp_groups {
uint32_t groups[];
};
/**
* Size of the ring buffer header
*/
#define FUSE_URING_IN_OUT_HEADER_SZ 128
#define FUSE_URING_OP_IN_OUT_SZ 128
/* Used as part of the fuse_uring_req_header */
struct fuse_uring_ent_in_out {
uint64_t flags;
/*
* commit ID to be used in a reply to a ring request (see also
* struct fuse_uring_cmd_req)
*/
uint64_t commit_id;
/* size of user payload buffer */
uint32_t payload_sz;
uint32_t padding;
uint64_t reserved;
};
/**
* Header for all fuse-io-uring requests
*/
struct fuse_uring_req_header {
/* struct fuse_in_header / struct fuse_out_header */
char in_out[FUSE_URING_IN_OUT_HEADER_SZ];
/* per op code header */
char op_in[FUSE_URING_OP_IN_OUT_SZ];
struct fuse_uring_ent_in_out ring_ent_in_out;
};
/**
* sqe commands to the kernel
*/
enum fuse_uring_cmd {
FUSE_IO_URING_CMD_INVALID = 0,
/* register the request buffer and fetch a fuse request */
FUSE_IO_URING_CMD_REGISTER = 1,
/* commit fuse request result and fetch next request */
FUSE_IO_URING_CMD_COMMIT_AND_FETCH = 2,
};
/**
* In the 80B command area of the SQE.
*/
struct fuse_uring_cmd_req {
uint64_t flags;
/* entry identifier for commits */
uint64_t commit_id;
/* queue the command is for (queue index) */
uint16_t qid;
uint8_t padding[6];
};
#endif /* _LINUX_FUSE_H */

View file

@ -519,6 +519,7 @@
#define KEY_NOTIFICATION_CENTER 0x1bc /* Show/hide the notification center */
#define KEY_PICKUP_PHONE 0x1bd /* Answer incoming call */
#define KEY_HANGUP_PHONE 0x1be /* Decline incoming call */
#define KEY_LINK_PHONE 0x1bf /* AL Phone Syncing */
#define KEY_DEL_EOL 0x1c0
#define KEY_DEL_EOS 0x1c1

View file

@ -533,7 +533,7 @@
#define PCI_EXP_DEVSTA_TRPND 0x0020 /* Transactions Pending */
#define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V1 12 /* v1 endpoints without link end here */
#define PCI_EXP_LNKCAP 0x0c /* Link Capabilities */
#define PCI_EXP_LNKCAP_SLS 0x0000000f /* Supported Link Speeds */
#define PCI_EXP_LNKCAP_SLS 0x0000000f /* Max Link Speed (prior to PCIe r3.0: Supported Link Speeds) */
#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */
#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */
#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */
@ -665,6 +665,7 @@
#define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */
#define PCI_EXP_DEVCAP2_OBFF_WAKE 0x00080000 /* Re-use WAKE# for OBFF */
#define PCI_EXP_DEVCAP2_EE_PREFIX 0x00200000 /* End-End TLP Prefix */
#define PCI_EXP_DEVCAP2_EE_PREFIX_MAX 0x00c00000 /* Max End-End TLP Prefixes */
#define PCI_EXP_DEVCTL2 0x28 /* Device Control 2 */
#define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */
#define PCI_EXP_DEVCTL2_COMP_TMOUT_DIS 0x0010 /* Completion Timeout Disable */
@ -789,10 +790,11 @@
/* Same bits as above */
#define PCI_ERR_CAP 0x18 /* Advanced Error Capabilities & Ctrl*/
#define PCI_ERR_CAP_FEP(x) ((x) & 0x1f) /* First Error Pointer */
#define PCI_ERR_CAP_ECRC_GENC 0x00000020 /* ECRC Generation Capable */
#define PCI_ERR_CAP_ECRC_GENE 0x00000040 /* ECRC Generation Enable */
#define PCI_ERR_CAP_ECRC_CHKC 0x00000080 /* ECRC Check Capable */
#define PCI_ERR_CAP_ECRC_CHKE 0x00000100 /* ECRC Check Enable */
#define PCI_ERR_CAP_ECRC_GENC 0x00000020 /* ECRC Generation Capable */
#define PCI_ERR_CAP_ECRC_GENE 0x00000040 /* ECRC Generation Enable */
#define PCI_ERR_CAP_ECRC_CHKC 0x00000080 /* ECRC Check Capable */
#define PCI_ERR_CAP_ECRC_CHKE 0x00000100 /* ECRC Check Enable */
#define PCI_ERR_CAP_PREFIX_LOG_PRESENT 0x00000800 /* TLP Prefix Log Present */
#define PCI_ERR_HEADER_LOG 0x1c /* Header Log Register (16 bytes) */
#define PCI_ERR_ROOT_COMMAND 0x2c /* Root Error Command */
#define PCI_ERR_ROOT_CMD_COR_EN 0x00000001 /* Correctable Err Reporting Enable */
@ -808,6 +810,7 @@
#define PCI_ERR_ROOT_FATAL_RCV 0x00000040 /* Fatal Received */
#define PCI_ERR_ROOT_AER_IRQ 0xf8000000 /* Advanced Error Interrupt Message Number */
#define PCI_ERR_ROOT_ERR_SRC 0x34 /* Error Source Identification */
#define PCI_ERR_PREFIX_LOG 0x38 /* TLP Prefix LOG Register (up to 16 bytes) */
/* Virtual Channel */
#define PCI_VC_PORT_CAP1 0x04
@ -1001,9 +1004,6 @@
#define PCI_ACS_CTRL 0x06 /* ACS Control Register */
#define PCI_ACS_EGRESS_CTL_V 0x08 /* ACS Egress Control Vector */
#define PCI_VSEC_HDR 4 /* extended cap - vendor-specific */
#define PCI_VSEC_HDR_LEN_SHIFT 20 /* shift for length field */
/* SATA capability */
#define PCI_SATA_REGS 4 /* SATA REGs specifier */
#define PCI_SATA_REGS_MASK 0xF /* location - BAR#/inline */

View file

@ -116,6 +116,8 @@
#define VIRTIO_PCI_CAP_PCI_CFG 5
/* Additional shared memory capability */
#define VIRTIO_PCI_CAP_SHARED_MEMORY_CFG 8
/* PCI vendor data configuration */
#define VIRTIO_PCI_CAP_VENDOR_CFG 9
/* This is the PCI capability header: */
struct virtio_pci_cap {
@ -130,6 +132,18 @@ struct virtio_pci_cap {
uint32_t length; /* Length of the structure, in bytes. */
};
/* This is the PCI vendor data capability header: */
struct virtio_pci_vndr_data {
uint8_t cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */
uint8_t cap_next; /* Generic PCI field: next ptr. */
uint8_t cap_len; /* Generic PCI field: capability length */
uint8_t cfg_type; /* Identifies the structure. */
uint16_t vendor_id; /* Identifies the vendor-specific format. */
/* For Vendor Definition */
/* Pads structure to a multiple of 4 bytes */
/* Reads must not have side effects */
};
struct virtio_pci_cap64 {
struct virtio_pci_cap cap;
uint32_t offset_hi; /* Most sig 32 bits of offset */

View file

@ -43,9 +43,6 @@
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#define KVM_DIRTY_LOG_PAGE_OFFSET 64
#define KVM_REG_SIZE(id) \
(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
struct kvm_regs {
struct user_pt_regs regs; /* sp = sp_el0 */

View file

@ -17,5 +17,6 @@
#define KVM_FEATURE_STEAL_TIME 2
/* BIT 24 - 31 are features configurable by user space vmm */
#define KVM_FEATURE_VIRT_EXTIOI 24
#define KVM_FEATURE_USER_HCALL 25
#endif /* _ASM_KVM_PARA_H */

View file

@ -179,6 +179,9 @@ enum KVM_RISCV_ISA_EXT_ID {
KVM_RISCV_ISA_EXT_SSNPM,
KVM_RISCV_ISA_EXT_SVADE,
KVM_RISCV_ISA_EXT_SVADU,
KVM_RISCV_ISA_EXT_SVVPTC,
KVM_RISCV_ISA_EXT_ZABHA,
KVM_RISCV_ISA_EXT_ZICCRSE,
KVM_RISCV_ISA_EXT_MAX,
};
@ -198,6 +201,7 @@ enum KVM_RISCV_SBI_EXT_ID {
KVM_RISCV_SBI_EXT_VENDOR,
KVM_RISCV_SBI_EXT_DBCN,
KVM_RISCV_SBI_EXT_STA,
KVM_RISCV_SBI_EXT_SUSP,
KVM_RISCV_SBI_EXT_MAX,
};
@ -211,9 +215,6 @@ struct kvm_riscv_sbi_sta {
#define KVM_RISCV_TIMER_STATE_OFF 0
#define KVM_RISCV_TIMER_STATE_ON 1
#define KVM_REG_SIZE(id) \
(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
/* If you need to interpret the index values, here is the key: */
#define KVM_REG_RISCV_TYPE_MASK 0x00000000FF000000
#define KVM_REG_RISCV_TYPE_SHIFT 24

View file

@ -923,5 +923,6 @@ struct kvm_hyperv_eventfd {
#define KVM_X86_SEV_VM 2
#define KVM_X86_SEV_ES_VM 3
#define KVM_X86_SNP_VM 4
#define KVM_X86_TDX_VM 5
#endif /* _ASM_X86_KVM_H */

View file

@ -297,7 +297,7 @@ struct iommu_ioas_unmap {
* ioctl(IOMMU_OPTION_HUGE_PAGES)
* @IOMMU_OPTION_RLIMIT_MODE:
* Change how RLIMIT_MEMLOCK accounting works. The caller must have privilege
* to invoke this. Value 0 (default) is user based accouting, 1 uses process
* to invoke this. Value 0 (default) is user based accounting, 1 uses process
* based accounting. Global option, object_id must be 0
* @IOMMU_OPTION_HUGE_PAGES:
* Value 1 (default) allows contiguous pages to be combined when generating
@ -390,7 +390,7 @@ struct iommu_vfio_ioas {
* @IOMMU_HWPT_ALLOC_PASID: Requests a domain that can be used with PASID. The
* domain can be attached to any PASID on the device.
* Any domain attached to the non-PASID part of the
* device must also be flaged, otherwise attaching a
* device must also be flagged, otherwise attaching a
* PASID will blocked.
* If IOMMU does not support PASID it will return
* error (-EOPNOTSUPP).
@ -558,16 +558,25 @@ struct iommu_hw_info_vtd {
* For the details of @idr, @iidr and @aidr, please refer to the chapters
* from 6.3.1 to 6.3.6 in the SMMUv3 Spec.
*
* User space should read the underlying ARM SMMUv3 hardware information for
* the list of supported features.
* This reports the raw HW capability, and not all bits are meaningful to be
* read by userspace. Only the following fields should be used:
*
* Note that these values reflect the raw HW capability, without any insight if
* any required kernel driver support is present. Bits may be set indicating the
* HW has functionality that is lacking kernel software support, such as BTM. If
* a VMM is using this information to construct emulated copies of these
* registers it should only forward bits that it knows it can support.
* idr[0]: ST_LEVEL, TERM_MODEL, STALL_MODEL, TTENDIAN , CD2L, ASID16, TTF
* idr[1]: SIDSIZE, SSIDSIZE
* idr[3]: BBML, RIL
* idr[5]: VAX, GRAN64K, GRAN16K, GRAN4K
*
* In future, presence of required kernel support will be indicated in flags.
* - S1P should be assumed to be true if a NESTED HWPT can be created
* - VFIO/iommufd only support platforms with COHACC, it should be assumed to be
* true.
* - ATS is a per-device property. If the VMM describes any devices as ATS
* capable in ACPI/DT it should set the corresponding idr.
*
* This list may expand in future (eg E0PD, AIE, PBHA, D128, DS etc). It is
* important that VMMs do not read bits outside the list to allow for
* compatibility with future kernels. Several features in the SMMUv3
* architecture are not currently supported by the kernel for nesting: HTTU,
* BTM, MPAM and others.
*/
struct iommu_hw_info_arm_smmuv3 {
__u32 flags;
@ -766,7 +775,7 @@ struct iommu_hwpt_vtd_s1_invalidate {
};
/**
* struct iommu_viommu_arm_smmuv3_invalidate - ARM SMMUv3 cahce invalidation
* struct iommu_viommu_arm_smmuv3_invalidate - ARM SMMUv3 cache invalidation
* (IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3)
* @cmd: 128-bit cache invalidation command that runs in SMMU CMDQ.
* Must be little-endian.
@ -859,6 +868,7 @@ enum iommu_hwpt_pgfault_perm {
* @pasid: Process Address Space ID
* @grpid: Page Request Group Index
* @perm: Combination of enum iommu_hwpt_pgfault_perm
* @__reserved: Must be 0.
* @addr: Fault address
* @length: a hint of how much data the requestor is expecting to fetch. For
* example, if the PRI initiator knows it is going to do a 10MB
@ -874,7 +884,8 @@ struct iommu_hwpt_pgfault {
__u32 pasid;
__u32 grpid;
__u32 perm;
__u64 addr;
__u32 __reserved;
__aligned_u64 addr;
__u32 length;
__u32 cookie;
};

View file

@ -609,10 +609,6 @@ struct kvm_ioeventfd {
#define KVM_X86_DISABLE_EXITS_HLT (1 << 1)
#define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2)
#define KVM_X86_DISABLE_EXITS_CSTATE (1 << 3)
#define KVM_X86_DISABLE_VALID_EXITS (KVM_X86_DISABLE_EXITS_MWAIT | \
KVM_X86_DISABLE_EXITS_HLT | \
KVM_X86_DISABLE_EXITS_PAUSE | \
KVM_X86_DISABLE_EXITS_CSTATE)
/* for KVM_ENABLE_CAP */
struct kvm_enable_cap {
@ -1062,6 +1058,10 @@ struct kvm_dirty_tlb {
#define KVM_REG_SIZE_SHIFT 52
#define KVM_REG_SIZE_MASK 0x00f0000000000000ULL
#define KVM_REG_SIZE(id) \
(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
#define KVM_REG_SIZE_U8 0x0000000000000000ULL
#define KVM_REG_SIZE_U16 0x0010000000000000ULL
#define KVM_REG_SIZE_U32 0x0020000000000000ULL

View file

@ -8,6 +8,13 @@
#define __always_inline __inline__
#endif
/* Not all C++ standards support type declarations inside an anonymous union */
#ifndef __cplusplus
#define __struct_group_tag(TAG) TAG
#else
#define __struct_group_tag(TAG)
#endif
/**
* __struct_group() - Create a mirrored named and anonyomous struct
*
@ -20,13 +27,13 @@
* and size: one anonymous and one named. The former's members can be used
* normally without sub-struct naming, and the latter can be used to
* reason about the start, end, and size of the group of struct members.
* The named struct can also be explicitly tagged for layer reuse, as well
* as both having struct attributes appended.
* The named struct can also be explicitly tagged for layer reuse (C only),
* as well as both having struct attributes appended.
*/
#define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \
union { \
struct { MEMBERS } ATTRS; \
struct TAG { MEMBERS } ATTRS NAME; \
struct __struct_group_tag(TAG) { MEMBERS } ATTRS NAME; \
} ATTRS
#ifdef __cplusplus

View file

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
#ifndef _VDUSE_H_
#define _VDUSE_H_

View file

@ -144,35 +144,35 @@ loongarch64_magic='\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x
loongarch64_mask='\xff\xff\xff\xff\xff\xff\xff\xfc\x00\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff'
loongarch64_family=loongarch
qemu_get_family() {
cpu=${HOST_ARCH:-$(uname -m)}
# Converts the name of a host CPU architecture to the corresponding QEMU
# target.
#
# FIXME: This can probably be simplified a lot by dropping most entries.
# Remember that the script is only used on Linux, so we only need to
# handle the strings Linux uses to report the host CPU architecture.
qemu_normalize() {
cpu="$1"
case "$cpu" in
amd64|i386|i486|i586|i686|i86pc|BePC|x86_64)
i[3-6]86)
echo "i386"
;;
mips*)
echo "mips"
amd64)
echo "x86_64"
;;
"Power Macintosh"|ppc64|powerpc|ppc)
powerpc)
echo "ppc"
;;
ppc64el|ppc64le)
echo "ppcle"
ppc64el)
echo "ppc64le"
;;
arm|armel|armhf|arm64|armv[4-9]*l|aarch64)
armel|armhf|armv[4-9]*l)
echo "arm"
;;
armeb|armv[4-9]*b|aarch64_be)
armv[4-9]*b)
echo "armeb"
;;
sparc*)
echo "sparc"
;;
riscv*)
echo "riscv"
;;
loongarch*)
echo "loongarch"
arm64)
echo "aarch64"
;;
*)
echo "$cpu"
@ -205,6 +205,9 @@ Usage: qemu-binfmt-conf.sh [--qemu-path PATH][--debian][--systemd CPU]
--persistent: if yes, the interpreter is loaded when binfmt is
configured and remains in memory. All future uses
are cloned from the open file.
--ignore-family: if yes, it is assumed that the host CPU (e.g. riscv64)
can't natively run programs targeting a CPU that is
part of the same family (e.g. riscv32).
--preserve-argv0 preserve argv[0]
To import templates with update-binfmts, use :
@ -309,7 +312,13 @@ EOF
qemu_set_binfmts() {
# probe cpu type
host_family=$(qemu_get_family)
host_cpu=$(qemu_normalize ${HOST_ARCH:-$(uname -m)})
host_family=$(eval echo \$${host_cpu}_family)
if [ "$host_family" = "" ] ; then
echo "INTERNAL ERROR: unknown host cpu $host_cpu" 1>&2
exit 1
fi
# register the interpreter for each cpu except for the native one
@ -318,20 +327,28 @@ qemu_set_binfmts() {
mask=$(eval echo \$${cpu}_mask)
family=$(eval echo \$${cpu}_family)
target="$cpu"
if [ "$cpu" = "i486" ] ; then
target="i386"
fi
qemu="$QEMU_PATH/qemu-$target$QEMU_SUFFIX"
if [ "$magic" = "" ] || [ "$mask" = "" ] || [ "$family" = "" ] ; then
echo "INTERNAL ERROR: unknown cpu $cpu" 1>&2
continue
fi
qemu="$QEMU_PATH/qemu-$cpu"
if [ "$cpu" = "i486" ] ; then
qemu="$QEMU_PATH/qemu-i386"
if [ "$host_family" = "$family" ] ; then
# When --ignore-family is used, we have to generate rules even
# for targets that are in the same family as the host CPU. The
# only exception is of course when the CPU types exactly match
if [ "$target" = "$host_cpu" ] || [ "$IGNORE_FAMILY" = "no" ] ; then
continue
fi
fi
qemu="$qemu$QEMU_SUFFIX"
if [ "$host_family" != "$family" ] ; then
$BINFMT_SET
fi
$BINFMT_SET
done
}
@ -346,10 +363,11 @@ CREDENTIAL=no
PERSISTENT=no
PRESERVE_ARG0=no
QEMU_SUFFIX=""
IGNORE_FAMILY=no
_longopts="debian,systemd:,qemu-path:,qemu-suffix:,exportdir:,help,credential:,\
persistent:,preserve-argv0:"
options=$(getopt -o ds:Q:S:e:hc:p:g:F: -l ${_longopts} -- "$@")
persistent:,preserve-argv0:,ignore-family:"
options=$(getopt -o ds:Q:S:e:hc:p:g:F:i: -l ${_longopts} -- "$@")
eval set -- "$options"
while true ; do
@ -409,6 +427,10 @@ while true ; do
shift
PRESERVE_ARG0="$1"
;;
-i|--ignore-family)
shift
IGNORE_FAMILY="$1"
;;
*)
break
;;

View file

@ -40,6 +40,8 @@
#define TYPE_RISCV_CPU_RV64E RISCV_CPU_TYPE_NAME("rv64e")
#define TYPE_RISCV_CPU_RVA22U64 RISCV_CPU_TYPE_NAME("rva22u64")
#define TYPE_RISCV_CPU_RVA22S64 RISCV_CPU_TYPE_NAME("rva22s64")
#define TYPE_RISCV_CPU_RVA23U64 RISCV_CPU_TYPE_NAME("rva23u64")
#define TYPE_RISCV_CPU_RVA23S64 RISCV_CPU_TYPE_NAME("rva23s64")
#define TYPE_RISCV_CPU_IBEX RISCV_CPU_TYPE_NAME("lowrisc-ibex")
#define TYPE_RISCV_CPU_SHAKTI_C RISCV_CPU_TYPE_NAME("shakti-c")
#define TYPE_RISCV_CPU_SIFIVE_E31 RISCV_CPU_TYPE_NAME("sifive-e31")

View file

@ -105,7 +105,7 @@ const RISCVIsaExtData isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11),
ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11),
ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11),
ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, has_priv_1_11),
ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, ext_ziccrse),
ISA_EXT_DATA_ENTRY(zicfilp, PRIV_VERSION_1_12_0, ext_zicfilp),
ISA_EXT_DATA_ENTRY(zicfiss, PRIV_VERSION_1_13_0, ext_zicfiss),
ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
@ -213,8 +213,11 @@ const RISCVIsaExtData isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
ISA_EXT_DATA_ENTRY(ssu64xl, PRIV_VERSION_1_12_0, has_priv_1_12),
ISA_EXT_DATA_ENTRY(supm, PRIV_VERSION_1_13_0, ext_supm),
ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade),
ISA_EXT_DATA_ENTRY(smctr, PRIV_VERSION_1_12_0, ext_smctr),
ISA_EXT_DATA_ENTRY(ssctr, PRIV_VERSION_1_12_0, ext_ssctr),
ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot),
@ -1591,6 +1594,8 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
MULTI_EXT_CFG_BOOL("smcdeleg", ext_smcdeleg, false),
MULTI_EXT_CFG_BOOL("sscsrind", ext_sscsrind, false),
MULTI_EXT_CFG_BOOL("ssccfg", ext_ssccfg, false),
MULTI_EXT_CFG_BOOL("smctr", ext_smctr, false),
MULTI_EXT_CFG_BOOL("ssctr", ext_ssctr, false),
MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
MULTI_EXT_CFG_BOOL("zicfilp", ext_zicfilp, false),
MULTI_EXT_CFG_BOOL("zicfiss", ext_zicfiss, false),
@ -1737,6 +1742,7 @@ const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true),
MULTI_EXT_CFG_BOOL("ssstateen", ext_ssstateen, true),
MULTI_EXT_CFG_BOOL("sha", ext_sha, true),
MULTI_EXT_CFG_BOOL("ziccrse", ext_ziccrse, true),
{ },
};
@ -2027,6 +2033,7 @@ static void prop_vlen_set(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
RISCVCPU *cpu = RISCV_CPU(obj);
uint16_t cpu_vlen = cpu->cfg.vlenb << 3;
uint16_t value;
if (!visit_type_uint16(v, name, &value, errp)) {
@ -2038,10 +2045,10 @@ static void prop_vlen_set(Object *obj, Visitor *v, const char *name,
return;
}
if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) {
if (value != cpu_vlen && riscv_cpu_is_vendor(obj)) {
cpu_set_prop_err(cpu, name, errp);
error_append_hint(errp, "Current '%s' val: %u\n",
name, cpu->cfg.vlenb << 3);
name, cpu_vlen);
return;
}
@ -2341,9 +2348,10 @@ static const PropertyInfo prop_marchid = {
* doesn't need to be manually enabled by the profile.
*/
static RISCVCPUProfile RVA22U64 = {
.parent = NULL,
.u_parent = NULL,
.s_parent = NULL,
.name = "rva22u64",
.misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU,
.misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVB | RVU,
.priv_spec = RISCV_PROFILE_ATTR_UNUSED,
.satp_mode = RISCV_PROFILE_ATTR_UNUSED,
.ext_offsets = {
@ -2373,7 +2381,8 @@ static RISCVCPUProfile RVA22U64 = {
* The remaining features/extensions comes from RVA22U64.
*/
static RISCVCPUProfile RVA22S64 = {
.parent = &RVA22U64,
.u_parent = &RVA22U64,
.s_parent = NULL,
.name = "rva22s64",
.misa_ext = RVS,
.priv_spec = PRIV_VERSION_1_12_0,
@ -2387,9 +2396,65 @@ static RISCVCPUProfile RVA22S64 = {
}
};
/*
* All mandatory extensions from RVA22U64 are present
* in RVA23U64 so set RVA22 as a parent. We need to
* declare just the newly added mandatory extensions.
*/
static RISCVCPUProfile RVA23U64 = {
.u_parent = &RVA22U64,
.s_parent = NULL,
.name = "rva23u64",
.misa_ext = RVV,
.priv_spec = RISCV_PROFILE_ATTR_UNUSED,
.satp_mode = RISCV_PROFILE_ATTR_UNUSED,
.ext_offsets = {
CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zvbb),
CPU_CFG_OFFSET(ext_zvkt), CPU_CFG_OFFSET(ext_zihintntl),
CPU_CFG_OFFSET(ext_zicond), CPU_CFG_OFFSET(ext_zimop),
CPU_CFG_OFFSET(ext_zcmop), CPU_CFG_OFFSET(ext_zcb),
CPU_CFG_OFFSET(ext_zfa), CPU_CFG_OFFSET(ext_zawrs),
CPU_CFG_OFFSET(ext_supm),
RISCV_PROFILE_EXT_LIST_END
}
};
/*
* As with RVA23U64, RVA23S64 also defines 'named features'.
*
* Cache related features that we consider enabled since we don't
* implement cache: Ssccptr
*
* Other named features that we already implement: Sstvecd, Sstvala,
* Sscounterenw, Ssu64xl
*
* The remaining features/extensions comes from RVA23S64.
*/
static RISCVCPUProfile RVA23S64 = {
.u_parent = &RVA23U64,
.s_parent = &RVA22S64,
.name = "rva23s64",
.misa_ext = RVS,
.priv_spec = PRIV_VERSION_1_13_0,
.satp_mode = VM_1_10_SV39,
.ext_offsets = {
/* New in RVA23S64 */
CPU_CFG_OFFSET(ext_svnapot), CPU_CFG_OFFSET(ext_sstc),
CPU_CFG_OFFSET(ext_sscofpmf), CPU_CFG_OFFSET(ext_ssnpm),
/* Named features: Sha */
CPU_CFG_OFFSET(ext_sha),
RISCV_PROFILE_EXT_LIST_END
}
};
RISCVCPUProfile *riscv_profiles[] = {
&RVA22U64,
&RVA22S64,
&RVA23U64,
&RVA23S64,
NULL,
};
@ -2796,6 +2861,26 @@ static RISCVCPUImpliedExtsRule SSPM_IMPLIED = {
},
};
static RISCVCPUImpliedExtsRule SMCTR_IMPLIED = {
.ext = CPU_CFG_OFFSET(ext_smctr),
.implied_misa_exts = RVS,
.implied_multi_exts = {
CPU_CFG_OFFSET(ext_sscsrind),
RISCV_IMPLIED_EXTS_RULE_END
},
};
static RISCVCPUImpliedExtsRule SSCTR_IMPLIED = {
.ext = CPU_CFG_OFFSET(ext_ssctr),
.implied_misa_exts = RVS,
.implied_multi_exts = {
CPU_CFG_OFFSET(ext_sscsrind),
RISCV_IMPLIED_EXTS_RULE_END
},
};
RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = {
&RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED,
&RVM_IMPLIED, &RVV_IMPLIED, NULL
@ -2814,7 +2899,7 @@ RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = {
&ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED,
&ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED,
&ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED, &SSCFG_IMPLIED,
&SUPM_IMPLIED, &SSPM_IMPLIED,
&SUPM_IMPLIED, &SSPM_IMPLIED, &SMCTR_IMPLIED, &SSCTR_IMPLIED,
NULL
};
@ -2876,6 +2961,20 @@ static void rva22s64_profile_cpu_init(Object *obj)
RVA22S64.enabled = true;
}
static void rva23u64_profile_cpu_init(Object *obj)
{
rv64i_bare_cpu_init(obj);
RVA23U64.enabled = true;
}
static void rva23s64_profile_cpu_init(Object *obj)
{
rv64i_bare_cpu_init(obj);
RVA23S64.enabled = true;
}
#endif
static const gchar *riscv_gdb_arch_name(CPUState *cs)
@ -3146,6 +3245,8 @@ static const TypeInfo riscv_cpu_type_infos[] = {
DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init),
DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init),
DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init),
DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23U64, MXL_RV64, rva23u64_profile_cpu_init),
DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23S64, MXL_RV64, rva23s64_profile_cpu_init),
#endif /* TARGET_RISCV64 */
};

View file

@ -81,7 +81,8 @@ const char *riscv_get_misa_ext_description(uint32_t bit);
#define CPU_CFG_OFFSET(_prop) offsetof(struct RISCVCPUConfig, _prop)
typedef struct riscv_cpu_profile {
struct riscv_cpu_profile *parent;
struct riscv_cpu_profile *u_parent;
struct riscv_cpu_profile *s_parent;
const char *name;
uint32_t misa_ext;
bool enabled;
@ -312,6 +313,15 @@ struct CPUArchState {
target_ulong mcause;
target_ulong mtval; /* since: priv-1.10.0 */
uint64_t mctrctl;
uint32_t sctrdepth;
uint32_t sctrstatus;
uint64_t vsctrctl;
uint64_t ctr_src[16 << SCTRDEPTH_MAX];
uint64_t ctr_dst[16 << SCTRDEPTH_MAX];
uint64_t ctr_data[16 << SCTRDEPTH_MAX];
/* Machine and Supervisor interrupt priorities */
uint8_t miprio[64];
uint8_t siprio[64];
@ -607,6 +617,10 @@ RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
void riscv_ctr_add_entry(CPURISCVState *env, target_long src, target_long dst,
enum CTRType type, target_ulong prev_priv, bool prev_virt);
void riscv_ctr_clear(CPURISCVState *env);
void riscv_translate_init(void);
void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
int *max_insns, vaddr pc, void *host_pc);

View file

@ -247,6 +247,17 @@
#define CSR_SIEH 0x114
#define CSR_SIPH 0x154
/* Machine-Level Control transfer records CSRs */
#define CSR_MCTRCTL 0x34e
/* Supervisor-Level Control transfer records CSRs */
#define CSR_SCTRCTL 0x14e
#define CSR_SCTRSTATUS 0x14f
#define CSR_SCTRDEPTH 0x15f
/* VS-Level Control transfer records CSRs */
#define CSR_VSCTRCTL 0x24e
/* Hpervisor CSRs */
#define CSR_HSTATUS 0x600
#define CSR_HEDELEG 0x602
@ -344,6 +355,7 @@
#define SMSTATEEN0_CS (1ULL << 0)
#define SMSTATEEN0_FCSR (1ULL << 1)
#define SMSTATEEN0_JVT (1ULL << 2)
#define SMSTATEEN0_CTR (1ULL << 54)
#define SMSTATEEN0_P1P13 (1ULL << 56)
#define SMSTATEEN0_HSCONTXT (1ULL << 57)
#define SMSTATEEN0_IMSIC (1ULL << 58)
@ -825,6 +837,139 @@ typedef enum RISCVException {
#define HENVCFGH_PBMTE MENVCFGH_PBMTE
#define HENVCFGH_STCE MENVCFGH_STCE
/* Offsets for every pair of control bits per each priv level */
#define XS_OFFSET 0ULL
#define U_OFFSET 2ULL
#define S_OFFSET 5ULL
#define M_OFFSET 8ULL
#define PM_XS_BITS (EXT_STATUS_MASK << XS_OFFSET)
#define U_PM_ENABLE (PM_ENABLE << U_OFFSET)
#define U_PM_CURRENT (PM_CURRENT << U_OFFSET)
#define U_PM_INSN (PM_INSN << U_OFFSET)
#define S_PM_ENABLE (PM_ENABLE << S_OFFSET)
#define S_PM_CURRENT (PM_CURRENT << S_OFFSET)
#define S_PM_INSN (PM_INSN << S_OFFSET)
#define M_PM_ENABLE (PM_ENABLE << M_OFFSET)
#define M_PM_CURRENT (PM_CURRENT << M_OFFSET)
#define M_PM_INSN (PM_INSN << M_OFFSET)
/* mmte CSR bits */
#define MMTE_PM_XS_BITS PM_XS_BITS
#define MMTE_U_PM_ENABLE U_PM_ENABLE
#define MMTE_U_PM_CURRENT U_PM_CURRENT
#define MMTE_U_PM_INSN U_PM_INSN
#define MMTE_S_PM_ENABLE S_PM_ENABLE
#define MMTE_S_PM_CURRENT S_PM_CURRENT
#define MMTE_S_PM_INSN S_PM_INSN
#define MMTE_M_PM_ENABLE M_PM_ENABLE
#define MMTE_M_PM_CURRENT M_PM_CURRENT
#define MMTE_M_PM_INSN M_PM_INSN
#define MMTE_MASK (MMTE_U_PM_ENABLE | MMTE_U_PM_CURRENT | MMTE_U_PM_INSN | \
MMTE_S_PM_ENABLE | MMTE_S_PM_CURRENT | MMTE_S_PM_INSN | \
MMTE_M_PM_ENABLE | MMTE_M_PM_CURRENT | MMTE_M_PM_INSN | \
MMTE_PM_XS_BITS)
/* (v)smte CSR bits */
#define SMTE_PM_XS_BITS PM_XS_BITS
#define SMTE_U_PM_ENABLE U_PM_ENABLE
#define SMTE_U_PM_CURRENT U_PM_CURRENT
#define SMTE_U_PM_INSN U_PM_INSN
#define SMTE_S_PM_ENABLE S_PM_ENABLE
#define SMTE_S_PM_CURRENT S_PM_CURRENT
#define SMTE_S_PM_INSN S_PM_INSN
#define SMTE_MASK (SMTE_U_PM_ENABLE | SMTE_U_PM_CURRENT | SMTE_U_PM_INSN | \
SMTE_S_PM_ENABLE | SMTE_S_PM_CURRENT | SMTE_S_PM_INSN | \
SMTE_PM_XS_BITS)
/* umte CSR bits */
#define UMTE_U_PM_ENABLE U_PM_ENABLE
#define UMTE_U_PM_CURRENT U_PM_CURRENT
#define UMTE_U_PM_INSN U_PM_INSN
#define UMTE_MASK (UMTE_U_PM_ENABLE | MMTE_U_PM_CURRENT | UMTE_U_PM_INSN)
/* CTR control register commom fields */
#define XCTRCTL_U BIT_ULL(0)
#define XCTRCTL_S BIT_ULL(1)
#define XCTRCTL_RASEMU BIT_ULL(7)
#define XCTRCTL_STE BIT_ULL(8)
#define XCTRCTL_BPFRZ BIT_ULL(11)
#define XCTRCTL_LCOFIFRZ BIT_ULL(12)
#define XCTRCTL_EXCINH BIT_ULL(33)
#define XCTRCTL_INTRINH BIT_ULL(34)
#define XCTRCTL_TRETINH BIT_ULL(35)
#define XCTRCTL_NTBREN BIT_ULL(36)
#define XCTRCTL_TKBRINH BIT_ULL(37)
#define XCTRCTL_INDCALLINH BIT_ULL(40)
#define XCTRCTL_DIRCALLINH BIT_ULL(41)
#define XCTRCTL_INDJMPINH BIT_ULL(42)
#define XCTRCTL_DIRJMPINH BIT_ULL(43)
#define XCTRCTL_CORSWAPINH BIT_ULL(44)
#define XCTRCTL_RETINH BIT_ULL(45)
#define XCTRCTL_INDLJMPINH BIT_ULL(46)
#define XCTRCTL_DIRLJMPINH BIT_ULL(47)
#define XCTRCTL_MASK (XCTRCTL_U | XCTRCTL_S | XCTRCTL_RASEMU | \
XCTRCTL_STE | XCTRCTL_BPFRZ | XCTRCTL_LCOFIFRZ | \
XCTRCTL_EXCINH | XCTRCTL_INTRINH | XCTRCTL_TRETINH | \
XCTRCTL_NTBREN | XCTRCTL_TKBRINH | XCTRCTL_INDCALLINH | \
XCTRCTL_DIRCALLINH | XCTRCTL_INDJMPINH | \
XCTRCTL_DIRJMPINH | XCTRCTL_CORSWAPINH | \
XCTRCTL_RETINH | XCTRCTL_INDLJMPINH | XCTRCTL_DIRLJMPINH)
#define XCTRCTL_INH_START 32U
/* CTR mctrctl bits */
#define MCTRCTL_M BIT_ULL(2)
#define MCTRCTL_MTE BIT_ULL(9)
#define MCTRCTL_MASK (XCTRCTL_MASK | MCTRCTL_M | MCTRCTL_MTE)
#define SCTRCTL_MASK XCTRCTL_MASK
#define VSCTRCTL_MASK XCTRCTL_MASK
/* sctrstatus CSR bits. */
#define SCTRSTATUS_WRPTR_MASK 0xFF
#define SCTRSTATUS_FROZEN BIT(31)
#define SCTRSTATUS_MASK (SCTRSTATUS_WRPTR_MASK | SCTRSTATUS_FROZEN)
/* sctrdepth CSR bits. */
#define SCTRDEPTH_MASK 0x7
#define SCTRDEPTH_MIN 0U /* 16 Entries. */
#define SCTRDEPTH_MAX 4U /* 256 Entries. */
#define CTR_ENTRIES_FIRST 0x200
#define CTR_ENTRIES_LAST 0x2ff
#define CTRSOURCE_VALID BIT(0)
#define CTRTARGET_MISP BIT(0)
#define CTRDATA_TYPE_MASK 0xF
#define CTRDATA_CCV BIT(15)
#define CTRDATA_CCM_MASK 0xFFF0000
#define CTRDATA_CCE_MASK 0xF0000000
#define CTRDATA_MASK (CTRDATA_TYPE_MASK | CTRDATA_CCV | \
CTRDATA_CCM_MASK | CTRDATA_CCE_MASK)
typedef enum CTRType {
CTRDATA_TYPE_NONE = 0,
CTRDATA_TYPE_EXCEPTION = 1,
CTRDATA_TYPE_INTERRUPT = 2,
CTRDATA_TYPE_EXCEP_INT_RET = 3,
CTRDATA_TYPE_NONTAKEN_BRANCH = 4,
CTRDATA_TYPE_TAKEN_BRANCH = 5,
CTRDATA_TYPE_RESERVED_0 = 6,
CTRDATA_TYPE_RESERVED_1 = 7,
CTRDATA_TYPE_INDIRECT_CALL = 8,
CTRDATA_TYPE_DIRECT_CALL = 9,
CTRDATA_TYPE_INDIRECT_JUMP = 10,
CTRDATA_TYPE_DIRECT_JUMP = 11,
CTRDATA_TYPE_CO_ROUTINE_SWAP = 12,
CTRDATA_TYPE_RETURN = 13,
CTRDATA_TYPE_OTHER_INDIRECT_JUMP = 14,
CTRDATA_TYPE_OTHER_DIRECT_JUMP = 15,
} CTRType;
/* MISELECT, SISELECT, and VSISELECT bits (AIA) */
#define ISELECT_IPRIO0 0x30
#define ISELECT_IPRIO15 0x3f
@ -933,9 +1078,8 @@ typedef enum RISCVException {
MHPMEVENTH_BIT_VSINH | \
MHPMEVENTH_BIT_VUINH)
#define MHPMEVENT_SSCOF_MASK _ULL(0xFFFF000000000000)
#define MHPMEVENT_IDX_MASK 0xFFFFF
#define MHPMEVENT_SSCOF_RESVD 16
#define MHPMEVENT_SSCOF_MASK MAKE_64BIT_MASK(63, 56)
#define MHPMEVENT_IDX_MASK (~MHPMEVENT_SSCOF_MASK)
/* RISC-V-specific interrupt pending bits. */
#define CPU_INTERRUPT_RNMI CPU_INTERRUPT_TGT_EXT_0

View file

@ -133,6 +133,8 @@ struct RISCVCPUConfig {
bool ext_zvfhmin;
bool ext_smaia;
bool ext_ssaia;
bool ext_smctr;
bool ext_ssctr;
bool ext_sscofpmf;
bool ext_smepmp;
bool ext_smrnmi;
@ -164,6 +166,9 @@ struct RISCVCPUConfig {
bool has_priv_1_12;
bool has_priv_1_11;
/* Always enabled for TCG if has_priv_1_11 */
bool ext_ziccrse;
/* Vendor-specific custom extensions */
bool ext_xtheadba;
bool ext_xtheadbb;

View file

@ -27,6 +27,7 @@
#include "exec/page-protection.h"
#include "instmap.h"
#include "tcg/tcg-op.h"
#include "hw/core/tcg-cpu-ops.h"
#include "trace.h"
#include "semihosting/common-semi.h"
#include "system/cpu-timers.h"
@ -874,6 +875,254 @@ void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
}
}
static void riscv_ctr_freeze(CPURISCVState *env, uint64_t freeze_mask,
bool virt)
{
uint64_t ctl = virt ? env->vsctrctl : env->mctrctl;
assert((freeze_mask & (~(XCTRCTL_BPFRZ | XCTRCTL_LCOFIFRZ))) == 0);
if (ctl & freeze_mask) {
env->sctrstatus |= SCTRSTATUS_FROZEN;
}
}
void riscv_ctr_clear(CPURISCVState *env)
{
memset(env->ctr_src, 0x0, sizeof(env->ctr_src));
memset(env->ctr_dst, 0x0, sizeof(env->ctr_dst));
memset(env->ctr_data, 0x0, sizeof(env->ctr_data));
}
static uint64_t riscv_ctr_priv_to_mask(target_ulong priv, bool virt)
{
switch (priv) {
case PRV_M:
return MCTRCTL_M;
case PRV_S:
if (virt) {
return XCTRCTL_S;
}
return XCTRCTL_S;
case PRV_U:
if (virt) {
return XCTRCTL_U;
}
return XCTRCTL_U;
}
g_assert_not_reached();
}
static uint64_t riscv_ctr_get_control(CPURISCVState *env, target_long priv,
bool virt)
{
switch (priv) {
case PRV_M:
return env->mctrctl;
case PRV_S:
case PRV_U:
if (virt) {
return env->vsctrctl;
}
return env->mctrctl;
}
g_assert_not_reached();
}
/*
* This function assumes that src privilege and target privilege are not same
* and src privilege is less than target privilege. This includes the virtual
* state as well.
*/
static bool riscv_ctr_check_xte(CPURISCVState *env, target_long src_prv,
bool src_virt)
{
target_long tgt_prv = env->priv;
bool res = true;
/*
* VS and U mode are same in terms of xTE bits required to record an
* external trap. See 6.1.2. External Traps, table 8 External Trap Enable
* Requirements. This changes VS to U to simplify the logic a bit.
*/
if (src_virt && src_prv == PRV_S) {
src_prv = PRV_U;
} else if (env->virt_enabled && tgt_prv == PRV_S) {
tgt_prv = PRV_U;
}
/* VU mode is an outlier here. */
if (src_virt && src_prv == PRV_U) {
res &= !!(env->vsctrctl & XCTRCTL_STE);
}
switch (src_prv) {
case PRV_U:
if (tgt_prv == PRV_U) {
break;
}
res &= !!(env->mctrctl & XCTRCTL_STE);
/* fall-through */
case PRV_S:
if (tgt_prv == PRV_S) {
break;
}
res &= !!(env->mctrctl & MCTRCTL_MTE);
/* fall-through */
case PRV_M:
break;
}
return res;
}
/*
* Special cases for traps and trap returns:
*
* 1- Traps, and trap returns, between enabled modes are recorded as normal.
* 2- Traps from an inhibited mode to an enabled mode, and trap returns from an
* enabled mode back to an inhibited mode, are partially recorded. In such
* cases, the PC from the inhibited mode (source PC for traps, and target PC
* for trap returns) is 0.
*
* 3- Trap returns from an inhibited mode to an enabled mode are not recorded.
* Traps from an enabled mode to an inhibited mode, known as external traps,
* receive special handling.
* By default external traps are not recorded, but a handshake mechanism exists
* to allow partial recording. Software running in the target mode of the trap
* can opt-in to allowing CTR to record traps into that mode even when the mode
* is inhibited. The MTE, STE, and VSTE bits allow M-mode, S-mode, and VS-mode,
* respectively, to opt-in. When an External Trap occurs, and xTE=1, such that
* x is the target privilege mode of the trap, will CTR record the trap. In such
* cases, the target PC is 0.
*/
/*
* CTR arrays are implemented as circular buffers and new entry is stored at
* sctrstatus.WRPTR, but they are presented to software as moving circular
* buffers. Which means, software get's the illusion that whenever a new entry
* is added the whole buffer is moved by one place and the new entry is added at
* the start keeping new entry at idx 0 and older ones follow.
*
* Depth = 16.
*
* buffer [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
* WRPTR W
* entry 7 6 5 4 3 2 1 0 F E D C B A 9 8
*
* When a new entry is added:
* buffer [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
* WRPTR W
* entry 8 7 6 5 4 3 2 1 0 F E D C B A 9
*
* entry here denotes the logical entry number that software can access
* using ctrsource, ctrtarget and ctrdata registers. So xiselect 0x200
* will return entry 0 i-e buffer[8] and 0x201 will return entry 1 i-e
* buffer[7]. Here is how we convert entry to buffer idx.
*
* entry = isel - CTR_ENTRIES_FIRST;
* idx = (sctrstatus.WRPTR - entry - 1) & (depth - 1);
*/
void riscv_ctr_add_entry(CPURISCVState *env, target_long src, target_long dst,
enum CTRType type, target_ulong src_priv, bool src_virt)
{
bool tgt_virt = env->virt_enabled;
uint64_t src_mask = riscv_ctr_priv_to_mask(src_priv, src_virt);
uint64_t tgt_mask = riscv_ctr_priv_to_mask(env->priv, tgt_virt);
uint64_t src_ctrl = riscv_ctr_get_control(env, src_priv, src_virt);
uint64_t tgt_ctrl = riscv_ctr_get_control(env, env->priv, tgt_virt);
uint64_t depth, head;
bool ext_trap = false;
/*
* Return immediately if both target and src recording is disabled or if
* CTR is in frozen state.
*/
if ((!(src_ctrl & src_mask) && !(tgt_ctrl & tgt_mask)) ||
env->sctrstatus & SCTRSTATUS_FROZEN) {
return;
}
/*
* With RAS Emul enabled, only allow Indirect, direct calls, Function
* returns and Co-routine swap types.
*/
if (tgt_ctrl & XCTRCTL_RASEMU &&
type != CTRDATA_TYPE_INDIRECT_CALL &&
type != CTRDATA_TYPE_DIRECT_CALL &&
type != CTRDATA_TYPE_RETURN &&
type != CTRDATA_TYPE_CO_ROUTINE_SWAP) {
return;
}
if (type == CTRDATA_TYPE_EXCEPTION || type == CTRDATA_TYPE_INTERRUPT) {
/* Case 2 for traps. */
if (!(src_ctrl & src_mask)) {
src = 0;
} else if (!(tgt_ctrl & tgt_mask)) {
/* Check if target priv-mode has allowed external trap recording. */
if (!riscv_ctr_check_xte(env, src_priv, src_virt)) {
return;
}
ext_trap = true;
dst = 0;
}
} else if (type == CTRDATA_TYPE_EXCEP_INT_RET) {
/*
* Case 3 for trap returns. Trap returns from inhibited mode are not
* recorded.
*/
if (!(src_ctrl & src_mask)) {
return;
}
/* Case 2 for trap returns. */
if (!(tgt_ctrl & tgt_mask)) {
dst = 0;
}
}
/* Ignore filters in case of RASEMU mode or External trap. */
if (!(tgt_ctrl & XCTRCTL_RASEMU) && !ext_trap) {
/*
* Check if the specific type is inhibited. Not taken branch filter is
* an enable bit and needs to be checked separatly.
*/
bool check = tgt_ctrl & BIT_ULL(type + XCTRCTL_INH_START);
if ((type == CTRDATA_TYPE_NONTAKEN_BRANCH && !check) ||
(type != CTRDATA_TYPE_NONTAKEN_BRANCH && check)) {
return;
}
}
head = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
if (tgt_ctrl & XCTRCTL_RASEMU && type == CTRDATA_TYPE_RETURN) {
head = (head - 1) & (depth - 1);
env->ctr_src[head] &= ~CTRSOURCE_VALID;
env->sctrstatus =
set_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK, head);
return;
}
/* In case of Co-routine SWAP we overwrite latest entry. */
if (tgt_ctrl & XCTRCTL_RASEMU && type == CTRDATA_TYPE_CO_ROUTINE_SWAP) {
head = (head - 1) & (depth - 1);
}
env->ctr_src[head] = src | CTRSOURCE_VALID;
env->ctr_dst[head] = dst & ~CTRTARGET_MISP;
env->ctr_data[head] = set_field(0, CTRDATA_TYPE_MASK, type);
head = (head + 1) & (depth - 1);
env->sctrstatus = set_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK, head);
}
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en)
{
g_assert(newpriv <= PRV_M && newpriv != PRV_RESERVED);
@ -1223,14 +1472,27 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
ppn = pte >> PTE_PPN_SHIFT;
} else {
if (pte & PTE_RESERVED) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: reserved bits set in PTE: "
"addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
__func__, pte_addr, pte);
return TRANSLATE_FAIL;
}
if (!pbmte && (pte & PTE_PBMT)) {
/* Reserved without Svpbmt. */
qemu_log_mask(LOG_GUEST_ERROR, "%s: PBMT bits set in PTE, "
"and Svpbmt extension is disabled: "
"addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
__func__, pte_addr, pte);
return TRANSLATE_FAIL;
}
if (!riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) {
/* Reserved without Svnapot extension */
qemu_log_mask(LOG_GUEST_ERROR, "%s: N bit set in PTE, "
"and Svnapot extension is disabled: "
"addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
__func__, pte_addr, pte);
return TRANSLATE_FAIL;
}
@ -1241,14 +1503,19 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
/* Invalid PTE */
return TRANSLATE_FAIL;
}
if (pte & (PTE_R | PTE_W | PTE_X)) {
goto leaf;
}
/* Inner PTE, continue walking */
if (pte & (PTE_D | PTE_A | PTE_U | PTE_ATTR)) {
/* D, A, and U bits are reserved in non-leaf/inner PTEs */
qemu_log_mask(LOG_GUEST_ERROR, "%s: D, A, or U bits set in non-leaf PTE: "
"addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
__func__, pte_addr, pte);
return TRANSLATE_FAIL;
}
/* Inner PTE, continue walking */
base = ppn << PGSHIFT;
}
@ -1258,10 +1525,17 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
leaf:
if (ppn & ((1ULL << ptshift) - 1)) {
/* Misaligned PPN */
qemu_log_mask(LOG_GUEST_ERROR, "%s: PPN bits in PTE is misaligned: "
"addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
__func__, pte_addr, pte);
return TRANSLATE_FAIL;
}
if (!pbmte && (pte & PTE_PBMT)) {
/* Reserved without Svpbmt. */
qemu_log_mask(LOG_GUEST_ERROR, "%s: PBMT bits set in PTE, "
"and Svpbmt extension is disabled: "
"addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
__func__, pte_addr, pte);
return TRANSLATE_FAIL;
}
@ -1697,6 +1971,23 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
} else if (probe) {
return false;
} else {
int wp_access = 0;
if (access_type == MMU_DATA_LOAD) {
wp_access |= BP_MEM_READ;
} else if (access_type == MMU_DATA_STORE) {
wp_access |= BP_MEM_WRITE;
}
/*
* If a watchpoint isn't found for 'addr' this will
* be a no-op and we'll resume the mmu_exception path.
* Otherwise we'll throw a debug exception and execution
* will continue elsewhere.
*/
cpu_check_watchpoint(cs, address, size, MEMTXATTRS_UNSPECIFIED,
wp_access, retaddr);
raise_mmu_exception(env, address, access_type, pmp_violation,
first_stage_error, two_stage_lookup,
two_stage_indirect_error);
@ -1975,10 +2266,13 @@ void riscv_cpu_do_interrupt(CPUState *cs)
!(env->mip & (1ULL << cause));
bool smode_double_trap = false;
uint64_t hdeleg = async ? env->hideleg : env->hedeleg;
const bool prev_virt = env->virt_enabled;
const target_ulong prev_priv = env->priv;
target_ulong tval = 0;
target_ulong tinst = 0;
target_ulong htval = 0;
target_ulong mtval2 = 0;
target_ulong src;
int sxlen = 0;
int mxlen = 16 << riscv_cpu_mxl(env);
bool nnmi_excep = false;
@ -2084,7 +2378,9 @@ void riscv_cpu_do_interrupt(CPUState *cs)
mode = env->priv <= PRV_S && cause < 64 &&
(((deleg >> cause) & 1) || s_injected || vs_injected) ? PRV_S : PRV_M;
vsmode_exc = env->virt_enabled && (((hdeleg >> cause) & 1) || vs_injected);
vsmode_exc = env->virt_enabled && cause < 64 &&
(((hdeleg >> cause) & 1) || vs_injected);
/*
* Check double trap condition only if already in S-mode and targeting
* S-mode
@ -2162,6 +2458,8 @@ void riscv_cpu_do_interrupt(CPUState *cs)
env->pc = (env->stvec >> 2 << 2) +
((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
riscv_cpu_set_mode(env, PRV_S, virt);
src = env->sepc;
} else {
/*
* If the hart encounters an exception while executing in M-mode
@ -2246,6 +2544,19 @@ void riscv_cpu_do_interrupt(CPUState *cs)
((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
}
riscv_cpu_set_mode(env, PRV_M, virt);
src = env->mepc;
}
if (riscv_cpu_cfg(env)->ext_smctr || riscv_cpu_cfg(env)->ext_ssctr) {
if (async && cause == IRQ_PMU_OVF) {
riscv_ctr_freeze(env, XCTRCTL_LCOFIFRZ, virt);
} else if (!async && cause == RISCV_EXCP_BREAKPOINT) {
riscv_ctr_freeze(env, XCTRCTL_BPFRZ, virt);
}
riscv_ctr_add_entry(env, src, env->pc,
async ? CTRDATA_TYPE_INTERRUPT : CTRDATA_TYPE_EXCEPTION,
prev_priv, prev_virt);
}
/*

View file

@ -376,10 +376,6 @@ static RISCVException aia_smode32(CPURISCVState *env, int csrno)
return ret;
}
if (ret != RISCV_EXCP_NONE) {
return ret;
}
return smode32(env, csrno);
}
@ -639,6 +635,48 @@ static RISCVException hgatp(CPURISCVState *env, int csrno)
return hmode(env, csrno);
}
/*
* M-mode:
* Without ext_smctr raise illegal inst excep.
* Otherwise everything is accessible to m-mode.
*
* S-mode:
* Without ext_ssctr or mstateen.ctr raise illegal inst excep.
* Otherwise everything other than mctrctl is accessible.
*
* VS-mode:
* Without ext_ssctr or mstateen.ctr raise illegal inst excep.
* Without hstateen.ctr raise virtual illegal inst excep.
* Otherwise allow sctrctl (vsctrctl), sctrstatus, 0x200-0x2ff entry range.
* Always raise illegal instruction exception for sctrdepth.
*/
static RISCVException ctr_mmode(CPURISCVState *env, int csrno)
{
/* Check if smctr-ext is present */
if (riscv_cpu_cfg(env)->ext_smctr) {
return RISCV_EXCP_NONE;
}
return RISCV_EXCP_ILLEGAL_INST;
}
static RISCVException ctr_smode(CPURISCVState *env, int csrno)
{
const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
if (!cfg->ext_smctr && !cfg->ext_ssctr) {
return RISCV_EXCP_ILLEGAL_INST;
}
RISCVException ret = smstateen_acc_ok(env, 0, SMSTATEEN0_CTR);
if (ret == RISCV_EXCP_NONE && csrno == CSR_SCTRDEPTH &&
env->virt_enabled) {
return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
}
return ret;
}
static RISCVException aia_hmode(CPURISCVState *env, int csrno)
{
int ret;
@ -2389,6 +2427,13 @@ static bool xiselect_cd_range(target_ulong isel)
return (ISELECT_CD_FIRST <= isel && isel <= ISELECT_CD_LAST);
}
static bool xiselect_ctr_range(int csrno, target_ulong isel)
{
/* MIREG-MIREG6 for the range 0x200-0x2ff are not used by CTR. */
return CTR_ENTRIES_FIRST <= isel && isel <= CTR_ENTRIES_LAST &&
csrno < CSR_MIREG;
}
static int rmw_iprio(target_ulong xlen,
target_ulong iselect, uint8_t *iprio,
target_ulong *val, target_ulong new_val,
@ -2434,6 +2479,124 @@ static int rmw_iprio(target_ulong xlen,
return 0;
}
static int rmw_ctrsource(CPURISCVState *env, int isel, target_ulong *val,
target_ulong new_val, target_ulong wr_mask)
{
/*
* CTR arrays are treated as circular buffers and TOS always points to next
* empty slot, keeping TOS - 1 always pointing to latest entry. Given entry
* 0 is always the latest one, traversal is a bit different here. See the
* below example.
*
* Depth = 16.
*
* idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
* TOS H
* entry 6 5 4 3 2 1 0 F E D C B A 9 8 7
*/
const uint64_t entry = isel - CTR_ENTRIES_FIRST;
const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
uint64_t idx;
/* Entry greater than depth-1 is read-only zero */
if (entry >= depth) {
if (val) {
*val = 0;
}
return 0;
}
idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
idx = (idx - entry - 1) & (depth - 1);
if (val) {
*val = env->ctr_src[idx];
}
env->ctr_src[idx] = (env->ctr_src[idx] & ~wr_mask) | (new_val & wr_mask);
return 0;
}
static int rmw_ctrtarget(CPURISCVState *env, int isel, target_ulong *val,
target_ulong new_val, target_ulong wr_mask)
{
/*
* CTR arrays are treated as circular buffers and TOS always points to next
* empty slot, keeping TOS - 1 always pointing to latest entry. Given entry
* 0 is always the latest one, traversal is a bit different here. See the
* below example.
*
* Depth = 16.
*
* idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
* head H
* entry 6 5 4 3 2 1 0 F E D C B A 9 8 7
*/
const uint64_t entry = isel - CTR_ENTRIES_FIRST;
const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
uint64_t idx;
/* Entry greater than depth-1 is read-only zero */
if (entry >= depth) {
if (val) {
*val = 0;
}
return 0;
}
idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
idx = (idx - entry - 1) & (depth - 1);
if (val) {
*val = env->ctr_dst[idx];
}
env->ctr_dst[idx] = (env->ctr_dst[idx] & ~wr_mask) | (new_val & wr_mask);
return 0;
}
static int rmw_ctrdata(CPURISCVState *env, int isel, target_ulong *val,
target_ulong new_val, target_ulong wr_mask)
{
/*
* CTR arrays are treated as circular buffers and TOS always points to next
* empty slot, keeping TOS - 1 always pointing to latest entry. Given entry
* 0 is always the latest one, traversal is a bit different here. See the
* below example.
*
* Depth = 16.
*
* idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
* head H
* entry 6 5 4 3 2 1 0 F E D C B A 9 8 7
*/
const uint64_t entry = isel - CTR_ENTRIES_FIRST;
const uint64_t mask = wr_mask & CTRDATA_MASK;
const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
uint64_t idx;
/* Entry greater than depth-1 is read-only zero */
if (entry >= depth) {
if (val) {
*val = 0;
}
return 0;
}
idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
idx = (idx - entry - 1) & (depth - 1);
if (val) {
*val = env->ctr_data[idx];
}
env->ctr_data[idx] = (env->ctr_data[idx] & ~mask) | (new_val & mask);
return 0;
}
static RISCVException rmw_xireg_aia(CPURISCVState *env, int csrno,
target_ulong isel, target_ulong *val,
target_ulong new_val, target_ulong wr_mask)
@ -2586,6 +2749,27 @@ done:
return ret;
}
static int rmw_xireg_ctr(CPURISCVState *env, int csrno,
target_ulong isel, target_ulong *val,
target_ulong new_val, target_ulong wr_mask)
{
if (!riscv_cpu_cfg(env)->ext_smctr && !riscv_cpu_cfg(env)->ext_ssctr) {
return -EINVAL;
}
if (csrno == CSR_SIREG || csrno == CSR_VSIREG) {
return rmw_ctrsource(env, isel, val, new_val, wr_mask);
} else if (csrno == CSR_SIREG2 || csrno == CSR_VSIREG2) {
return rmw_ctrtarget(env, isel, val, new_val, wr_mask);
} else if (csrno == CSR_SIREG3 || csrno == CSR_VSIREG3) {
return rmw_ctrdata(env, isel, val, new_val, wr_mask);
} else if (val) {
*val = 0;
}
return 0;
}
/*
* rmw_xireg_csrind: Perform indirect access to xireg and xireg2-xireg6
*
@ -2597,11 +2781,13 @@ static int rmw_xireg_csrind(CPURISCVState *env, int csrno,
target_ulong isel, target_ulong *val,
target_ulong new_val, target_ulong wr_mask)
{
int ret = -EINVAL;
bool virt = csrno == CSR_VSIREG ? true : false;
int ret = -EINVAL;
if (xiselect_cd_range(isel)) {
ret = rmw_xireg_cd(env, csrno, isel, val, new_val, wr_mask);
} else if (xiselect_ctr_range(csrno, isel)) {
ret = rmw_xireg_ctr(env, csrno, isel, val, new_val, wr_mask);
} else {
/*
* As per the specification, access to unimplented region is undefined
@ -2621,7 +2807,6 @@ static int rmw_xireg_csrind(CPURISCVState *env, int csrno,
static int rmw_xiregi(CPURISCVState *env, int csrno, target_ulong *val,
target_ulong new_val, target_ulong wr_mask)
{
bool virt = false;
int ret = -EINVAL;
target_ulong isel;
@ -2642,23 +2827,17 @@ static int rmw_xiregi(CPURISCVState *env, int csrno, target_ulong *val,
} else if (CSR_VSIREG <= csrno && csrno <= CSR_VSIREG6 &&
csrno != CSR_VSIREG4 - 1) {
isel = env->vsiselect;
virt = true;
} else {
goto done;
return RISCV_EXCP_ILLEGAL_INST;
}
return rmw_xireg_csrind(env, csrno, isel, val, new_val, wr_mask);
done:
return (env->virt_enabled && virt) ?
RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
}
static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
target_ulong *val, target_ulong new_val,
target_ulong wr_mask)
{
bool virt = false;
int ret = -EINVAL;
target_ulong isel;
@ -2680,10 +2859,9 @@ static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
break;
case CSR_VSIREG:
isel = env->vsiselect;
virt = true;
break;
default:
goto done;
goto done;
};
/*
@ -2699,16 +2877,10 @@ static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
} else if (riscv_cpu_cfg(env)->ext_smcsrind ||
riscv_cpu_cfg(env)->ext_sscsrind) {
return rmw_xireg_csrind(env, csrno, isel, val, new_val, wr_mask);
} else {
return RISCV_EXCP_ILLEGAL_INST;
}
done:
if (ret) {
return (env->virt_enabled && virt) ?
RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
}
return RISCV_EXCP_NONE;
return RISCV_EXCP_ILLEGAL_INST;
}
static RISCVException rmw_xtopei(CPURISCVState *env, int csrno,
@ -3234,6 +3406,10 @@ static RISCVException write_mstateen0(CPURISCVState *env, int csrno,
wr_mask |= (SMSTATEEN0_AIA | SMSTATEEN0_IMSIC);
}
if (riscv_cpu_cfg(env)->ext_ssctr) {
wr_mask |= SMSTATEEN0_CTR;
}
return write_mstateen(env, csrno, wr_mask, new_val);
}
@ -3273,6 +3449,10 @@ static RISCVException write_mstateen0h(CPURISCVState *env, int csrno,
wr_mask |= SMSTATEEN0_P1P13;
}
if (riscv_cpu_cfg(env)->ext_ssctr) {
wr_mask |= SMSTATEEN0_CTR;
}
return write_mstateenh(env, csrno, wr_mask, new_val);
}
@ -3327,6 +3507,10 @@ static RISCVException write_hstateen0(CPURISCVState *env, int csrno,
wr_mask |= (SMSTATEEN0_AIA | SMSTATEEN0_IMSIC);
}
if (riscv_cpu_cfg(env)->ext_ssctr) {
wr_mask |= SMSTATEEN0_CTR;
}
return write_hstateen(env, csrno, wr_mask, new_val);
}
@ -3366,6 +3550,10 @@ static RISCVException write_hstateen0h(CPURISCVState *env, int csrno,
{
uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
if (riscv_cpu_cfg(env)->ext_ssctr) {
wr_mask |= SMSTATEEN0_CTR;
}
return write_hstateenh(env, csrno, wr_mask, new_val);
}
@ -4086,6 +4274,86 @@ static RISCVException write_satp(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
static RISCVException rmw_sctrdepth(CPURISCVState *env, int csrno,
target_ulong *ret_val,
target_ulong new_val, target_ulong wr_mask)
{
uint64_t mask = wr_mask & SCTRDEPTH_MASK;
if (ret_val) {
*ret_val = env->sctrdepth;
}
env->sctrdepth = (env->sctrdepth & ~mask) | (new_val & mask);
/* Correct depth. */
if (mask) {
uint64_t depth = get_field(env->sctrdepth, SCTRDEPTH_MASK);
if (depth > SCTRDEPTH_MAX) {
depth = SCTRDEPTH_MAX;
env->sctrdepth = set_field(env->sctrdepth, SCTRDEPTH_MASK, depth);
}
/* Update sctrstatus.WRPTR with a legal value */
depth = 16 << depth;
env->sctrstatus =
env->sctrstatus & (~SCTRSTATUS_WRPTR_MASK | (depth - 1));
}
return RISCV_EXCP_NONE;
}
static RISCVException rmw_sctrstatus(CPURISCVState *env, int csrno,
target_ulong *ret_val,
target_ulong new_val, target_ulong wr_mask)
{
uint32_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
uint32_t mask = wr_mask & SCTRSTATUS_MASK;
if (ret_val) {
*ret_val = env->sctrstatus;
}
env->sctrstatus = (env->sctrstatus & ~mask) | (new_val & mask);
/* Update sctrstatus.WRPTR with a legal value */
env->sctrstatus = env->sctrstatus & (~SCTRSTATUS_WRPTR_MASK | (depth - 1));
return RISCV_EXCP_NONE;
}
static RISCVException rmw_xctrctl(CPURISCVState *env, int csrno,
target_ulong *ret_val,
target_ulong new_val, target_ulong wr_mask)
{
uint64_t csr_mask, mask = wr_mask;
uint64_t *ctl_ptr = &env->mctrctl;
if (csrno == CSR_MCTRCTL) {
csr_mask = MCTRCTL_MASK;
} else if (csrno == CSR_SCTRCTL && !env->virt_enabled) {
csr_mask = SCTRCTL_MASK;
} else {
/*
* This is for csrno == CSR_SCTRCTL and env->virt_enabled == true
* or csrno == CSR_VSCTRCTL.
*/
csr_mask = VSCTRCTL_MASK;
ctl_ptr = &env->vsctrctl;
}
mask &= csr_mask;
if (ret_val) {
*ret_val = *ctl_ptr & csr_mask;
}
*ctl_ptr = (*ctl_ptr & ~mask) | (new_val & mask);
return RISCV_EXCP_NONE;
}
static RISCVException read_vstopi(CPURISCVState *env, int csrno,
target_ulong *val)
{
@ -5839,6 +6107,12 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
[CSR_TINFO] = { "tinfo", debug, read_tinfo, write_ignore },
[CSR_MCONTEXT] = { "mcontext", debug, read_mcontext, write_mcontext },
[CSR_MCTRCTL] = { "mctrctl", ctr_mmode, NULL, NULL, rmw_xctrctl },
[CSR_SCTRCTL] = { "sctrctl", ctr_smode, NULL, NULL, rmw_xctrctl },
[CSR_VSCTRCTL] = { "vsctrctl", ctr_smode, NULL, NULL, rmw_xctrctl },
[CSR_SCTRDEPTH] = { "sctrdepth", ctr_smode, NULL, NULL, rmw_sctrdepth },
[CSR_SCTRSTATUS] = { "sctrstatus", ctr_smode, NULL, NULL, rmw_sctrstatus },
/* Performance Counters */
[CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_hpmcounter },
[CSR_HPMCOUNTER4] = { "hpmcounter4", ctr, read_hpmcounter },

View file

@ -478,7 +478,7 @@ static void type2_breakpoint_insert(CPURISCVState *env, target_ulong index)
bool enabled = type2_breakpoint_enabled(ctrl);
CPUState *cs = env_cpu(env);
int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
uint32_t size;
uint32_t size, def_size;
if (!enabled) {
return;
@ -501,7 +501,9 @@ static void type2_breakpoint_insert(CPURISCVState *env, target_ulong index)
cpu_watchpoint_insert(cs, addr, size, flags,
&env->cpu_watchpoint[index]);
} else {
cpu_watchpoint_insert(cs, addr, 8, flags,
def_size = riscv_cpu_mxl(env) == MXL_RV64 ? 8 : 4;
cpu_watchpoint_insert(cs, addr, def_size, flags,
&env->cpu_watchpoint[index]);
}
}

View file

@ -132,10 +132,12 @@ DEF_HELPER_6(csrrw_i128, tl, env, int, tl, tl, tl, tl)
DEF_HELPER_1(sret, tl, env)
DEF_HELPER_1(mret, tl, env)
DEF_HELPER_1(mnret, tl, env)
DEF_HELPER_1(ctr_clear, void, env)
DEF_HELPER_1(wfi, void, env)
DEF_HELPER_1(wrs_nto, void, env)
DEF_HELPER_1(tlb_flush, void, env)
DEF_HELPER_1(tlb_flush_all, void, env)
DEF_HELPER_4(ctr_add_entry, void, env, tl, tl, tl)
/* Native Debug */
DEF_HELPER_1(itrigger_match, void, env)
#endif

View file

@ -114,12 +114,12 @@
# *** Privileged Instructions ***
ecall 000000000000 00000 000 00000 1110011
ebreak 000000000001 00000 000 00000 1110011
sctrclr 000100000100 00000 000 00000 1110011
uret 0000000 00010 00000 000 00000 1110011
sret 0001000 00010 00000 000 00000 1110011
mret 0011000 00010 00000 000 00000 1110011
wfi 0001000 00101 00000 000 00000 1110011
sfence_vma 0001001 ..... ..... 000 00000 1110011 @sfence_vma
sfence_vm 0001000 00100 ..... 000 00000 1110011 @sfence_vm
# *** NMI ***
mnret 0111000 00010 00000 000 00000 1110011

View file

@ -75,6 +75,17 @@ static bool trans_ebreak(DisasContext *ctx, arg_ebreak *a)
return true;
}
static bool trans_sctrclr(DisasContext *ctx, arg_sctrclr *a)
{
#ifndef CONFIG_USER_ONLY
if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
gen_helper_ctr_clear(tcg_env);
return true;
}
#endif
return false;
}
static bool trans_uret(DisasContext *ctx, arg_uret *a)
{
return false;
@ -86,6 +97,7 @@ static bool trans_sret(DisasContext *ctx, arg_sret *a)
if (has_ext(ctx, RVS)) {
decode_save_opc(ctx, 0);
translator_io_start(&ctx->base);
gen_update_pc(ctx, 0);
gen_helper_sret(cpu_pc, tcg_env);
exit_tb(ctx); /* no chaining */
ctx->base.is_jmp = DISAS_NORETURN;
@ -103,6 +115,7 @@ static bool trans_mret(DisasContext *ctx, arg_mret *a)
#ifndef CONFIG_USER_ONLY
decode_save_opc(ctx, 0);
translator_io_start(&ctx->base);
gen_update_pc(ctx, 0);
gen_helper_mret(cpu_pc, tcg_env);
exit_tb(ctx); /* no chaining */
ctx->base.is_jmp = DISAS_NORETURN;
@ -147,8 +160,3 @@ static bool trans_sfence_vma(DisasContext *ctx, arg_sfence_vma *a)
#endif
return false;
}
static bool trans_sfence_vm(DisasContext *ctx, arg_sfence_vm *a)
{
return false;
}

View file

@ -93,6 +93,51 @@ static bool trans_jal(DisasContext *ctx, arg_jal *a)
return true;
}
#ifndef CONFIG_USER_ONLY
/*
* Indirect calls
* - jalr x1, rs where rs != x5;
* - jalr x5, rs where rs != x1;
* - c.jalr rs1 where rs1 != x5;
*
* Indirect jumps
* - jalr x0, rs where rs != x1 and rs != x5;
* - c.jr rs1 where rs1 != x1 and rs1 != x5.
*
* Returns
* - jalr rd, rs where (rs == x1 or rs == x5) and rd != x1 and rd != x5;
* - c.jr rs1 where rs1 == x1 or rs1 == x5.
*
* Co-routine swap
* - jalr x1, x5;
* - jalr x5, x1;
* - c.jalr x5.
*
* Other indirect jumps
* - jalr rd, rs where rs != x1, rs != x5, rd != x0, rd != x1 and rd != x5.
*/
static void gen_ctr_jalr(DisasContext *ctx, arg_jalr *a, TCGv dest)
{
TCGv src = tcg_temp_new();
TCGv type;
if ((a->rd == 1 && a->rs1 != 5) || (a->rd == 5 && a->rs1 != 1)) {
type = tcg_constant_tl(CTRDATA_TYPE_INDIRECT_CALL);
} else if (a->rd == 0 && a->rs1 != 1 && a->rs1 != 5) {
type = tcg_constant_tl(CTRDATA_TYPE_INDIRECT_JUMP);
} else if ((a->rs1 == 1 || a->rs1 == 5) && (a->rd != 1 && a->rd != 5)) {
type = tcg_constant_tl(CTRDATA_TYPE_RETURN);
} else if ((a->rs1 == 1 && a->rd == 5) || (a->rs1 == 5 && a->rd == 1)) {
type = tcg_constant_tl(CTRDATA_TYPE_CO_ROUTINE_SWAP);
} else {
type = tcg_constant_tl(CTRDATA_TYPE_OTHER_INDIRECT_JUMP);
}
gen_pc_plus_diff(src, ctx, 0);
gen_helper_ctr_add_entry(tcg_env, src, dest, type);
}
#endif
static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
{
TCGLabel *misaligned = NULL;
@ -117,6 +162,12 @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
gen_pc_plus_diff(succ_pc, ctx, ctx->cur_insn_len);
gen_set_gpr(ctx, a->rd, succ_pc);
#ifndef CONFIG_USER_ONLY
if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
gen_ctr_jalr(ctx, a, target_pc);
}
#endif
tcg_gen_mov_tl(cpu_pc, target_pc);
if (ctx->fcfi_enabled) {
/*
@ -231,6 +282,19 @@ static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
} else {
tcg_gen_brcond_tl(cond, src1, src2, l);
}
#ifndef CONFIG_USER_ONLY
if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
TCGv type = tcg_constant_tl(CTRDATA_TYPE_NONTAKEN_BRANCH);
TCGv dest = tcg_temp_new();
TCGv src = tcg_temp_new();
gen_pc_plus_diff(src, ctx, 0);
gen_pc_plus_diff(dest, ctx, ctx->cur_insn_len);
gen_helper_ctr_add_entry(tcg_env, src, dest, type);
}
#endif
gen_goto_tb(ctx, 1, ctx->cur_insn_len);
ctx->pc_save = orig_pc_save;
@ -243,6 +307,17 @@ static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
gen_pc_plus_diff(target_pc, ctx, a->imm);
gen_exception_inst_addr_mis(ctx, target_pc);
} else {
#ifndef CONFIG_USER_ONLY
if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
TCGv type = tcg_constant_tl(CTRDATA_TYPE_TAKEN_BRANCH);
TCGv dest = tcg_temp_new();
TCGv src = tcg_temp_new();
gen_pc_plus_diff(src, ctx, 0);
gen_pc_plus_diff(dest, ctx, a->imm);
gen_helper_ctr_add_entry(tcg_env, src, dest, type);
}
#endif
gen_goto_tb(ctx, 0, a->imm);
}
ctx->pc_save = -1;

View file

@ -203,6 +203,14 @@ static bool gen_pop(DisasContext *ctx, arg_cmpp *a, bool ret, bool ret_val)
if (ret) {
TCGv ret_addr = get_gpr(ctx, xRA, EXT_SIGN);
#ifndef CONFIG_USER_ONLY
if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
TCGv type = tcg_constant_tl(CTRDATA_TYPE_RETURN);
TCGv src = tcg_temp_new();
gen_pc_plus_diff(src, ctx, 0);
gen_helper_ctr_add_entry(tcg_env, src, ret_addr, type);
}
#endif
tcg_gen_mov_tl(cpu_pc, ret_addr);
tcg_gen_lookup_and_goto_ptr();
ctx->base.is_jmp = DISAS_NORETURN;
@ -309,6 +317,19 @@ static bool trans_cm_jalt(DisasContext *ctx, arg_cm_jalt *a)
gen_set_gpr(ctx, xRA, succ_pc);
}
#ifndef CONFIG_USER_ONLY
if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
if (a->index >= 32) {
TCGv type = tcg_constant_tl(CTRDATA_TYPE_DIRECT_CALL);
gen_helper_ctr_add_entry(tcg_env, cpu_pc, addr, type);
} else {
TCGv type = tcg_constant_tl(CTRDATA_TYPE_DIRECT_JUMP);
gen_helper_ctr_add_entry(tcg_env, cpu_pc, addr, type);
}
}
#endif
tcg_gen_mov_tl(cpu_pc, addr);
tcg_gen_lookup_and_goto_ptr();

View file

@ -274,6 +274,7 @@ static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs)
static KVMCPUConfig kvm_multi_ext_cfgs[] = {
KVM_EXT_CFG("zicbom", ext_zicbom, KVM_RISCV_ISA_EXT_ZICBOM),
KVM_EXT_CFG("zicboz", ext_zicboz, KVM_RISCV_ISA_EXT_ZICBOZ),
KVM_EXT_CFG("ziccrse", ext_ziccrse, KVM_RISCV_ISA_EXT_ZICCRSE),
KVM_EXT_CFG("zicntr", ext_zicntr, KVM_RISCV_ISA_EXT_ZICNTR),
KVM_EXT_CFG("zicond", ext_zicond, KVM_RISCV_ISA_EXT_ZICOND),
KVM_EXT_CFG("zicsr", ext_zicsr, KVM_RISCV_ISA_EXT_ZICSR),
@ -283,6 +284,7 @@ static KVMCPUConfig kvm_multi_ext_cfgs[] = {
KVM_EXT_CFG("zihpm", ext_zihpm, KVM_RISCV_ISA_EXT_ZIHPM),
KVM_EXT_CFG("zimop", ext_zimop, KVM_RISCV_ISA_EXT_ZIMOP),
KVM_EXT_CFG("zcmop", ext_zcmop, KVM_RISCV_ISA_EXT_ZCMOP),
KVM_EXT_CFG("zabha", ext_zabha, KVM_RISCV_ISA_EXT_ZABHA),
KVM_EXT_CFG("zacas", ext_zacas, KVM_RISCV_ISA_EXT_ZACAS),
KVM_EXT_CFG("zawrs", ext_zawrs, KVM_RISCV_ISA_EXT_ZAWRS),
KVM_EXT_CFG("zfa", ext_zfa, KVM_RISCV_ISA_EXT_ZFA),
@ -319,12 +321,18 @@ static KVMCPUConfig kvm_multi_ext_cfgs[] = {
KVM_EXT_CFG("zvksed", ext_zvksed, KVM_RISCV_ISA_EXT_ZVKSED),
KVM_EXT_CFG("zvksh", ext_zvksh, KVM_RISCV_ISA_EXT_ZVKSH),
KVM_EXT_CFG("zvkt", ext_zvkt, KVM_RISCV_ISA_EXT_ZVKT),
KVM_EXT_CFG("smnpm", ext_smnpm, KVM_RISCV_ISA_EXT_SMNPM),
KVM_EXT_CFG("smstateen", ext_smstateen, KVM_RISCV_ISA_EXT_SMSTATEEN),
KVM_EXT_CFG("ssaia", ext_ssaia, KVM_RISCV_ISA_EXT_SSAIA),
KVM_EXT_CFG("sscofpmf", ext_sscofpmf, KVM_RISCV_ISA_EXT_SSCOFPMF),
KVM_EXT_CFG("ssnpm", ext_ssnpm, KVM_RISCV_ISA_EXT_SSNPM),
KVM_EXT_CFG("sstc", ext_sstc, KVM_RISCV_ISA_EXT_SSTC),
KVM_EXT_CFG("svade", ext_svade, KVM_RISCV_ISA_EXT_SVADE),
KVM_EXT_CFG("svadu", ext_svadu, KVM_RISCV_ISA_EXT_SVADU),
KVM_EXT_CFG("svinval", ext_svinval, KVM_RISCV_ISA_EXT_SVINVAL),
KVM_EXT_CFG("svnapot", ext_svnapot, KVM_RISCV_ISA_EXT_SVNAPOT),
KVM_EXT_CFG("svpbmt", ext_svpbmt, KVM_RISCV_ISA_EXT_SVPBMT),
KVM_EXT_CFG("svvptc", ext_svvptc, KVM_RISCV_ISA_EXT_SVVPTC),
};
static void *kvmconfig_get_cfg_addr(RISCVCPU *cpu, KVMCPUConfig *kvmcfg)
@ -605,6 +613,21 @@ static int kvm_riscv_put_regs_core(CPUState *cs)
return ret;
}
static void kvm_riscv_reset_regs_csr(CPURISCVState *env)
{
env->mstatus = 0;
env->mie = 0;
env->stvec = 0;
env->sscratch = 0;
env->sepc = 0;
env->scause = 0;
env->stval = 0;
env->mip = 0;
env->satp = 0;
env->scounteren = 0;
env->senvcfg = 0;
}
static int kvm_riscv_get_regs_csr(CPUState *cs)
{
CPURISCVState *env = &RISCV_CPU(cs)->env;
@ -618,6 +641,8 @@ static int kvm_riscv_get_regs_csr(CPUState *cs)
KVM_RISCV_GET_CSR(cs, env, stval, env->stval);
KVM_RISCV_GET_CSR(cs, env, sip, env->mip);
KVM_RISCV_GET_CSR(cs, env, satp, env->satp);
KVM_RISCV_GET_CSR(cs, env, scounteren, env->scounteren);
KVM_RISCV_GET_CSR(cs, env, senvcfg, env->senvcfg);
return 0;
}
@ -635,6 +660,8 @@ static int kvm_riscv_put_regs_csr(CPUState *cs)
KVM_RISCV_SET_CSR(cs, env, stval, env->stval);
KVM_RISCV_SET_CSR(cs, env, sip, env->mip);
KVM_RISCV_SET_CSR(cs, env, satp, env->satp);
KVM_RISCV_SET_CSR(cs, env, scounteren, env->scounteren);
KVM_RISCV_SET_CSR(cs, env, senvcfg, env->senvcfg);
return 0;
}
@ -1603,23 +1630,14 @@ void kvm_riscv_reset_vcpu(RISCVCPU *cpu)
CPURISCVState *env = &cpu->env;
int i;
if (!kvm_enabled()) {
return;
}
for (i = 0; i < 32; i++) {
env->gpr[i] = 0;
}
env->pc = cpu->env.kernel_addr;
env->gpr[10] = kvm_arch_vcpu_id(CPU(cpu)); /* a0 */
env->gpr[11] = cpu->env.fdt_addr; /* a1 */
env->satp = 0;
env->mie = 0;
env->stvec = 0;
env->sscratch = 0;
env->sepc = 0;
env->scause = 0;
env->stval = 0;
env->mip = 0;
kvm_riscv_reset_regs_csr(env);
}
void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level)

View file

@ -300,6 +300,30 @@ static const VMStateDescription vmstate_envcfg = {
}
};
static bool ctr_needed(void *opaque)
{
RISCVCPU *cpu = opaque;
return cpu->cfg.ext_smctr || cpu->cfg.ext_ssctr;
}
static const VMStateDescription vmstate_ctr = {
.name = "cpu/ctr",
.version_id = 1,
.minimum_version_id = 1,
.needed = ctr_needed,
.fields = (const VMStateField[]) {
VMSTATE_UINT64(env.mctrctl, RISCVCPU),
VMSTATE_UINT32(env.sctrdepth, RISCVCPU),
VMSTATE_UINT32(env.sctrstatus, RISCVCPU),
VMSTATE_UINT64(env.vsctrctl, RISCVCPU),
VMSTATE_UINT64_ARRAY(env.ctr_src, RISCVCPU, 16 << SCTRDEPTH_MAX),
VMSTATE_UINT64_ARRAY(env.ctr_dst, RISCVCPU, 16 << SCTRDEPTH_MAX),
VMSTATE_UINT64_ARRAY(env.ctr_data, RISCVCPU, 16 << SCTRDEPTH_MAX),
VMSTATE_END_OF_LIST()
}
};
static bool pmu_needed(void *opaque)
{
RISCVCPU *cpu = opaque;
@ -450,6 +474,7 @@ const VMStateDescription vmstate_riscv_cpu = {
&vmstate_jvt,
&vmstate_elp,
&vmstate_ssp,
&vmstate_ctr,
NULL
}
};

View file

@ -270,6 +270,8 @@ target_ulong helper_sret(CPURISCVState *env)
{
uint64_t mstatus;
target_ulong prev_priv, prev_virt = env->virt_enabled;
const target_ulong src_priv = env->priv;
const bool src_virt = env->virt_enabled;
if (!(env->priv >= PRV_S)) {
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
@ -339,6 +341,11 @@ target_ulong helper_sret(CPURISCVState *env)
}
env->mstatus = set_field(env->mstatus, MSTATUS_SPELP, 0);
if (riscv_cpu_cfg(env)->ext_smctr || riscv_cpu_cfg(env)->ext_ssctr) {
riscv_ctr_add_entry(env, env->pc, retpc, CTRDATA_TYPE_EXCEP_INT_RET,
src_priv, src_virt);
}
return retpc;
}
@ -416,6 +423,11 @@ target_ulong helper_mret(CPURISCVState *env)
}
env->mstatus = set_field(env->mstatus, MSTATUS_MPELP, 0);
if (riscv_cpu_cfg(env)->ext_smctr || riscv_cpu_cfg(env)->ext_ssctr) {
riscv_ctr_add_entry(env, env->pc, retpc, CTRDATA_TYPE_EXCEP_INT_RET,
PRV_M, false);
}
return retpc;
}
@ -466,6 +478,42 @@ target_ulong helper_mnret(CPURISCVState *env)
return retpc;
}
void helper_ctr_add_entry(CPURISCVState *env, target_ulong src,
target_ulong dest, target_ulong type)
{
riscv_ctr_add_entry(env, src, dest, (enum CTRType)type,
env->priv, env->virt_enabled);
}
void helper_ctr_clear(CPURISCVState *env)
{
/*
* It's safe to call smstateen_acc_ok() for umode access regardless of the
* state of bit 54 (CTR bit in case of m/hstateen) of sstateen. If the bit
* is zero, smstateen_acc_ok() will return the correct exception code and
* if it's one, smstateen_acc_ok() will return RISCV_EXCP_NONE. In that
* scenario the U-mode check below will handle that case.
*/
RISCVException ret = smstateen_acc_ok(env, 0, SMSTATEEN0_CTR);
if (ret != RISCV_EXCP_NONE) {
riscv_raise_exception(env, ret, GETPC());
}
if (env->priv == PRV_U) {
/*
* One corner case is when sctrclr is executed from VU-mode and
* mstateen.CTR = 0, in which case we are supposed to raise
* RISCV_EXCP_ILLEGAL_INST. This case is already handled in
* smstateen_acc_ok().
*/
uint32_t excep = env->virt_enabled ? RISCV_EXCP_VIRT_INSTRUCTION_FAULT :
RISCV_EXCP_ILLEGAL_INST;
riscv_raise_exception(env, excep, GETPC());
}
riscv_ctr_clear(env);
}
void helper_wfi(CPURISCVState *env)
{
CPUState *cs = env_cpu(env);

View file

@ -524,7 +524,7 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg;
is_next_cfg_tor = PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg);
if (pmp_cfg & PMP_LOCK && is_next_cfg_tor) {
if (pmp_is_locked(env, addr_index + 1) && is_next_cfg_tor) {
qemu_log_mask(LOG_GUEST_ERROR,
"ignoring pmpaddr write - pmpcfg + 1 locked\n");
return;

View file

@ -390,7 +390,7 @@ int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value,
* Expected mhpmevent value is zero for reset case. Remove the current
* mapping.
*/
if (!value) {
if (!(value & MHPMEVENT_IDX_MASK)) {
g_hash_table_foreach_remove(cpu->pmu_event_ctr_map,
pmu_remove_event_map,
GUINT_TO_POINTER(ctr_idx));

View file

@ -360,6 +360,8 @@ static void riscv_cpu_update_named_features(RISCVCPU *cpu)
cpu->cfg.ext_sha = riscv_has_ext(&cpu->env, RVH) &&
cpu->cfg.ext_ssstateen;
cpu->cfg.ext_ziccrse = cpu->cfg.has_priv_1_11;
}
static void riscv_cpu_validate_g(RISCVCPU *cpu)
@ -681,6 +683,17 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
return;
}
if ((cpu->cfg.ext_smctr || cpu->cfg.ext_ssctr) &&
(!riscv_has_ext(env, RVS) || !cpu->cfg.ext_sscsrind)) {
if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_smctr)) ||
cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_ssctr))) {
error_setg(errp, "Smctr and Ssctr require S-mode and Sscsrind");
return;
}
cpu->cfg.ext_smctr = false;
cpu->cfg.ext_ssctr = false;
}
/*
* Disable isa extensions based on priv spec after we
* validated and set everything we need.
@ -713,13 +726,29 @@ static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu,
}
#endif
static void riscv_cpu_check_parent_profile(RISCVCPU *cpu,
RISCVCPUProfile *profile,
RISCVCPUProfile *parent)
{
const char *parent_name;
bool parent_enabled;
if (!profile->enabled || !parent) {
return;
}
parent_name = parent->name;
parent_enabled = object_property_get_bool(OBJECT(cpu), parent_name, NULL);
profile->enabled = parent_enabled;
}
static void riscv_cpu_validate_profile(RISCVCPU *cpu,
RISCVCPUProfile *profile)
{
CPURISCVState *env = &cpu->env;
const char *warn_msg = "Profile %s mandates disabled extension %s";
bool send_warn = profile->user_set && profile->enabled;
bool parent_enabled, profile_impl = true;
bool profile_impl = true;
int i;
#ifndef CONFIG_USER_ONLY
@ -730,7 +759,7 @@ static void riscv_cpu_validate_profile(RISCVCPU *cpu,
#endif
if (profile->priv_spec != RISCV_PROFILE_ATTR_UNUSED &&
profile->priv_spec != env->priv_ver) {
profile->priv_spec > env->priv_ver) {
profile_impl = false;
if (send_warn) {
@ -773,12 +802,8 @@ static void riscv_cpu_validate_profile(RISCVCPU *cpu,
profile->enabled = profile_impl;
if (profile->parent != NULL) {
parent_enabled = object_property_get_bool(OBJECT(cpu),
profile->parent->name,
NULL);
profile->enabled = profile->enabled && parent_enabled;
}
riscv_cpu_check_parent_profile(cpu, profile, profile->u_parent);
riscv_cpu_check_parent_profile(cpu, profile, profile->s_parent);
}
static void riscv_cpu_validate_profiles(RISCVCPU *cpu)
@ -1190,8 +1215,13 @@ static void cpu_set_profile(Object *obj, Visitor *v, const char *name,
profile->user_set = true;
profile->enabled = value;
if (profile->parent != NULL) {
object_property_set_bool(obj, profile->parent->name,
if (profile->u_parent != NULL) {
object_property_set_bool(obj, profile->u_parent->name,
profile->enabled, NULL);
}
if (profile->s_parent != NULL) {
object_property_set_bool(obj, profile->s_parent->name,
profile->enabled, NULL);
}
@ -1441,22 +1471,20 @@ static void riscv_init_max_cpu_extensions(Object *obj)
}
/*
* ext_smrnmi requires OpenSBI changes that our current
* TODO: ext_smrnmi requires OpenSBI changes that our current
* image does not have. Disable it for now.
*/
if (cpu->cfg.ext_smrnmi) {
isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smrnmi), false);
qemu_log("Smrnmi is disabled in the 'max' type CPU\n");
}
/*
* ext_smdbltrp requires the firmware to clear MSTATUS.MDT on startup to
* avoid generating a double trap. OpenSBI does not currently support it,
* TODO: ext_smdbltrp requires the firmware to clear MSTATUS.MDT on startup
* to avoid generating a double trap. OpenSBI does not currently support it,
* disable it for now.
*/
if (cpu->cfg.ext_smdbltrp) {
isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smdbltrp), false);
qemu_log("Smdbltrp is disabled in the 'max' type CPU\n");
}
}

View file

@ -561,6 +561,46 @@ static void gen_set_fpr_d(DisasContext *ctx, int reg_num, TCGv_i64 t)
}
}
#ifndef CONFIG_USER_ONLY
/*
* Direct calls
* - jal x1;
* - jal x5;
* - c.jal.
* - cm.jalt.
*
* Direct jumps
* - jal x0;
* - c.j;
* - cm.jt.
*
* Other direct jumps
* - jal rd where rd != x1 and rd != x5 and rd != x0;
*/
static void gen_ctr_jal(DisasContext *ctx, int rd, target_ulong imm)
{
TCGv dest = tcg_temp_new();
TCGv src = tcg_temp_new();
TCGv type;
/*
* If rd is x1 or x5 link registers, treat this as direct call otherwise
* its a direct jump.
*/
if (rd == 1 || rd == 5) {
type = tcg_constant_tl(CTRDATA_TYPE_DIRECT_CALL);
} else if (rd == 0) {
type = tcg_constant_tl(CTRDATA_TYPE_DIRECT_JUMP);
} else {
type = tcg_constant_tl(CTRDATA_TYPE_OTHER_DIRECT_JUMP);
}
gen_pc_plus_diff(dest, ctx, imm);
gen_pc_plus_diff(src, ctx, 0);
gen_helper_ctr_add_entry(tcg_env, src, dest, type);
}
#endif
static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
{
TCGv succ_pc = dest_gpr(ctx, rd);
@ -575,6 +615,12 @@ static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
}
}
#ifndef CONFIG_USER_ONLY
if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
gen_ctr_jal(ctx, rd, imm);
}
#endif
gen_pc_plus_diff(succ_pc, ctx, ctx->cur_insn_len);
gen_set_gpr(ctx, rd, succ_pc);

View file

@ -4659,7 +4659,9 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
} \
s1 = OP(s1, (TD)s2); \
} \
*((TD *)vd + HD(0)) = s1; \
if (vl > 0) { \
*((TD *)vd + HD(0)) = s1; \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
vext_set_elems_1s(vd, vta, esz, vlenb); \
@ -4745,7 +4747,9 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
} \
s1 = OP(s1, (TD)s2, &env->fp_status); \
} \
*((TD *)vd + HD(0)) = s1; \
if (vl > 0) { \
*((TD *)vd + HD(0)) = s1; \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
vext_set_elems_1s(vd, vta, esz, vlenb); \

Binary file not shown.