First RISC-V PR for 10.1

* Add support for RIMT to virt machine ACPI
 * Don't allow PMP RLB to bypass rule privileges
 * Fix checks on writes to pmpcfg in Smepmp MML mode
 * Generate strided vector loads/stores with tcg nodes
 * Improve Microchip Polarfire SoC customization
 * Use tcg ops generation to emulate whole reg rvv loads/stores
 * Expand the probe_pages helper function to handle probe flags
 * Fix type conflict of GLib function pointers
 * Fix endless translation loop on big endian systems
 * Use tail pseudoinstruction for calling tail
 * Fix some RISC-V vector instruction corner cases
 * MAINTAINERS: Add common-user/host/riscv to RISC-V section
 * Fix write_misa vs aligned next_pc
 * KVM CSR fixes
 * Virt machine memmap usage cleanup
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEaukCtqfKh31tZZKWr3yVEwxTgBMFAmgqreUACgkQr3yVEwxT
 gBMurA//WAX4X+aNi6keCr20Ffv3gUnkPPeVewLLKOrZZ7h5oyWtRcSQvwDBSr8C
 vzs4Rf492iNljnbRj09KYkLB8Qq/QH2Ixl+IgG8ueJAHJ8WdG6HYrrDxcONwgL1K
 lQrRaDrMCFjU35uDlejfuvWWEmb4GKDMfj/wxs5wgNvnHgNhThmVaWQysx8KHYU+
 tevPQj5s4mvVGztGRSHIZQ8bth7oVUQR5CsKngGEmVLS2L6yVYTUN5ZhB5g6Ikrq
 jKn44cROLfvv+0zPB9Lgu7eKauA/h8SckjCV+dGoMDB5J5M/TvwS118VHOjNVEQZ
 yLx6/BdLp3SRPwbvu9jn6vjms95iZSJXXdnY+Cg4MqAbuIRQK66n6Zoa3LjbEk2U
 zsf03uYUla9CBs6VQJmF7yjks3AbWokpZ9WSsGIcHJgXe8pEUeEsWOgiBhifJ5bD
 Hljlkiw1gHCjhtc2aWEH0aBwM7896lmdct70JmkjQ4uZKkFSqJ8y5l0Gm3lvgvdO
 OEPyMbZ6hSO4UZovcpeFtOoXteCUVthvqz0mADoQAtG+2qoX0k+9h8v305fhlti+
 bXJfu1QPsQqCYMrv4ZSTbRg0uC8m7CHnA+coC0XnCcMwEk9ZBl373/cnQYWsmqo8
 lxZmXLvpaN9CEzjOmZhXUsOvIqAJdM1sYPWnoXJ71t7mn4QEOPA=
 =dFjk
 -----END PGP SIGNATURE-----

Merge tag 'pull-riscv-to-apply-20250519' of https://github.com/alistair23/qemu into staging

First RISC-V PR for 10.1

* Add support for RIMT to virt machine ACPI
* Don't allow PMP RLB to bypass rule privileges
* Fix checks on writes to pmpcfg in Smepmp MML mode
* Generate strided vector loads/stores with tcg nodes
* Improve Microchip Polarfire SoC customization
* Use tcg ops generation to emulate whole reg rvv loads/stores
* Expand the probe_pages helper function to handle probe flags
* Fix type conflict of GLib function pointers
* Fix endless translation loop on big endian systems
* Use tail pseudoinstruction for calling tail
* Fix some RISC-V vector instruction corner cases
* MAINTAINERS: Add common-user/host/riscv to RISC-V section
* Fix write_misa vs aligned next_pc
* KVM CSR fixes
* Virt machine memmap usage cleanup

# -----BEGIN PGP SIGNATURE-----
#
# iQIzBAABCgAdFiEEaukCtqfKh31tZZKWr3yVEwxTgBMFAmgqreUACgkQr3yVEwxT
# gBMurA//WAX4X+aNi6keCr20Ffv3gUnkPPeVewLLKOrZZ7h5oyWtRcSQvwDBSr8C
# vzs4Rf492iNljnbRj09KYkLB8Qq/QH2Ixl+IgG8ueJAHJ8WdG6HYrrDxcONwgL1K
# lQrRaDrMCFjU35uDlejfuvWWEmb4GKDMfj/wxs5wgNvnHgNhThmVaWQysx8KHYU+
# tevPQj5s4mvVGztGRSHIZQ8bth7oVUQR5CsKngGEmVLS2L6yVYTUN5ZhB5g6Ikrq
# jKn44cROLfvv+0zPB9Lgu7eKauA/h8SckjCV+dGoMDB5J5M/TvwS118VHOjNVEQZ
# yLx6/BdLp3SRPwbvu9jn6vjms95iZSJXXdnY+Cg4MqAbuIRQK66n6Zoa3LjbEk2U
# zsf03uYUla9CBs6VQJmF7yjks3AbWokpZ9WSsGIcHJgXe8pEUeEsWOgiBhifJ5bD
# Hljlkiw1gHCjhtc2aWEH0aBwM7896lmdct70JmkjQ4uZKkFSqJ8y5l0Gm3lvgvdO
# OEPyMbZ6hSO4UZovcpeFtOoXteCUVthvqz0mADoQAtG+2qoX0k+9h8v305fhlti+
# bXJfu1QPsQqCYMrv4ZSTbRg0uC8m7CHnA+coC0XnCcMwEk9ZBl373/cnQYWsmqo8
# lxZmXLvpaN9CEzjOmZhXUsOvIqAJdM1sYPWnoXJ71t7mn4QEOPA=
# =dFjk
# -----END PGP SIGNATURE-----
# gpg: Signature made Mon 19 May 2025 00:04:53 EDT
# gpg:                using RSA key 6AE902B6A7CA877D6D659296AF7C95130C538013
# gpg: Good signature from "Alistair Francis <alistair@alistair23.me>" [unknown]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 6AE9 02B6 A7CA 877D 6D65  9296 AF7C 9513 0C53 8013

* tag 'pull-riscv-to-apply-20250519' of https://github.com/alistair23/qemu: (56 commits)
  hw/riscv/virt.c: remove 'long' casts in fmt strings
  hw/riscv/virt.c: use s->memmap in finalize_fdt() functions
  hw/riscv/virt.c: use s->memmap in create_fdt_virtio()
  hw/riscv/virt.c: use s->memmap in create_fdt_sockets() path
  hw/riscv/virt.c: use s->memmap in create_fdt() path
  hw/riscv/virt.c: add 'base' arg in create_fw_cfg()
  hw/riscv/virt.c: use s->memmap in virt_machine_done()
  hw/riscv/virt.c: remove trivial virt_memmap references
  hw/riscv/virt.c: enforce s->memmap use in machine_init()
  target/riscv/kvm: add scounteren CSR
  target/riscv/kvm: read/write KVM regs via env size
  target/riscv/kvm: add senvcfg CSR
  target/riscv/kvm: do not read unavailable CSRs
  target/riscv/kvm: add kvm_csr_cfgs[]
  target/riscv/kvm: turn kvm_riscv_reg_id_ulong() into a macro
  target/riscv/kvm: turn u32/u64 reg functions into macros
  target/riscv/kvm: fix leak in kvm_riscv_init_multiext_cfg()
  target/riscv/kvm: minor fixes/tweaks
  target/riscv: Fix write_misa vs aligned next_pc
  target/riscv: Move insn_len to internals.h
  ...

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2025-05-19 14:00:54 -04:00
commit 2af4a82ab2
22 changed files with 1573 additions and 758 deletions

View file

@ -328,6 +328,7 @@ F: include/hw/char/riscv_htif.h
F: include/hw/riscv/
F: linux-user/host/riscv32/
F: linux-user/host/riscv64/
F: common-user/host/riscv*
F: tests/functional/test_riscv*
F: tests/tcg/riscv64/

View file

@ -69,11 +69,11 @@ safe_syscall_end:
/* code path setting errno */
0: neg a0, a0
j safe_syscall_set_errno_tail
tail safe_syscall_set_errno_tail
/* code path when we didn't execute the syscall */
2: li a0, QEMU_ERESTARTSYS
j safe_syscall_set_errno_tail
tail safe_syscall_set_errno_tail
.cfi_endproc
.size safe_syscall_base, .-safe_syscall_base

View file

@ -5,10 +5,10 @@ Microchip PolarFire SoC Icicle Kit integrates a PolarFire SoC, with one
SiFive's E51 plus four U54 cores and many on-chip peripherals and an FPGA.
For more details about Microchip PolarFire SoC, please see:
https://www.microsemi.com/product-directory/soc-fpgas/5498-polarfire-soc-fpga
https://www.microchip.com/en-us/products/fpgas-and-plds/system-on-chip-fpgas/polarfire-soc-fpgas
The Icicle Kit board information can be found here:
https://www.microsemi.com/existing-parts/parts/152514
https://www.microchip.com/en-us/development-tool/mpfs-icicle-kit-es
Supported devices
-----------------
@ -26,95 +26,48 @@ The ``microchip-icicle-kit`` machine supports the following devices:
* 2 GEM Ethernet controllers
* 1 SDHC storage controller
The memory is set to 1537 MiB by default. A sanity check on RAM size is
performed in the machine init routine to prompt user to increase the RAM size
to > 1537 MiB when less than 1537 MiB RAM is detected.
Boot options
------------
The ``microchip-icicle-kit`` machine can start using the standard -bios
functionality for loading its BIOS image, aka Hart Software Services (HSS_).
HSS loads the second stage bootloader U-Boot from an SD card. Then a kernel
can be loaded from U-Boot. It also supports direct kernel booting via the
-kernel option along with the device tree blob via -dtb. When direct kernel
boot is used, the OpenSBI fw_dynamic BIOS image is used to boot a payload
like U-Boot or OS kernel directly.
The ``microchip-icicle-kit`` machine provides some options to run a firmware
(BIOS) or a kernel image. QEMU follows below truth table to select the
firmware:
The user provided DTB should have the following requirements:
* The /cpus node should contain at least one subnode for E51 and the number
of subnodes should match QEMU's ``-smp`` option
* The /memory reg size should match QEMUs selected ram_size via ``-m``
* Should contain a node for the CLINT device with a compatible string
"riscv,clint0"
QEMU follows below truth table to select which payload to execute:
===== ========== ========== =======
-bios -kernel -dtb payload
===== ========== ========== =======
N N don't care HSS
Y don't care don't care HSS
N Y Y kernel
===== ========== ========== =======
The memory is set to 1537 MiB by default which is the minimum required high
memory size by HSS. A sanity check on ram size is performed in the machine
init routine to prompt user to increase the RAM size to > 1537 MiB when less
than 1537 MiB ram is detected.
Running HSS
-----------
HSS 2020.12 release is tested at the time of writing. To build an HSS image
that can be booted by the ``microchip-icicle-kit`` machine, type the following
in the HSS source tree:
.. code-block:: bash
$ export CROSS_COMPILE=riscv64-linux-
$ cp boards/mpfs-icicle-kit-es/def_config .config
$ make BOARD=mpfs-icicle-kit-es
Download the official SD card image released by Microchip and prepare it for
QEMU usage:
.. code-block:: bash
$ wget ftp://ftpsoc.microsemi.com/outgoing/core-image-minimal-dev-icicle-kit-es-sd-20201009141623.rootfs.wic.gz
$ gunzip core-image-minimal-dev-icicle-kit-es-sd-20201009141623.rootfs.wic.gz
$ qemu-img resize core-image-minimal-dev-icicle-kit-es-sd-20201009141623.rootfs.wic 4G
Then we can boot the machine by:
.. code-block:: bash
$ qemu-system-riscv64 -M microchip-icicle-kit -smp 5 \
-bios path/to/hss.bin -sd path/to/sdcard.img \
-nic user,model=cadence_gem \
-nic tap,ifname=tap,model=cadence_gem,script=no \
-display none -serial stdio \
-chardev socket,id=serial1,path=serial1.sock,server=on,wait=on \
-serial chardev:serial1
With above command line, current terminal session will be used for the first
serial port. Open another terminal window, and use ``minicom`` to connect the
second serial port.
.. code-block:: bash
$ minicom -D unix\#serial1.sock
HSS output is on the first serial port (stdio) and U-Boot outputs on the
second serial port. U-Boot will automatically load the Linux kernel from
the SD card image.
============= =========== ======================================
-bios -kernel firmware
============= =========== ======================================
none N this is an error
none Y the kernel image
NULL, default N hss.bin
NULL, default Y opensbi-riscv64-generic-fw_dynamic.bin
other don't care the BIOS image
============= =========== ======================================
Direct Kernel Boot
------------------
Sometimes we just want to test booting a new kernel, and transforming the
kernel image to the format required by the HSS bootflow is tedious. We can
use '-kernel' for direct kernel booting just like other RISC-V machines do.
Use the ``-kernel`` option to directly run a kernel image. When a direct
kernel boot is requested, a device tree blob may be specified via the ``-dtb``
option. Unlike other QEMU machines, this machine does not generate a device
tree for the kernel. It shall be provided by the user. The user provided DTB
should meet the following requirements:
In this mode, the OpenSBI fw_dynamic BIOS image for 'generic' platform is
used to boot an S-mode payload like U-Boot or OS kernel directly.
* The ``/cpus`` node should contain at least one subnode for E51 and the number
of subnodes should match QEMU's ``-smp`` option.
* The ``/memory`` reg size should match QEMUs selected RAM size via the ``-m``
option.
* It should contain a node for the CLINT device with a compatible string
"riscv,clint0".
When ``-bios`` is not specified or set to ``default``, the OpenSBI
``fw_dynamic`` BIOS image for the ``generic`` platform is used to boot an
S-mode payload like U-Boot or OS kernel directly.
For example, the following commands show building a U-Boot image from U-Boot
mainline v2021.07 for the Microchip Icicle Kit board:
@ -146,4 +99,13 @@ CAVEATS:
``u-boot.bin`` has to be used which does contain one. To use the ELF image,
we need to change to CONFIG_OF_EMBED or CONFIG_OF_PRIOR_STAGE.
Running HSS
-----------
The machine ``microchip-icicle-kit`` used to run the Hart Software Services
(HSS_), however, the HSS development progressed and the QEMU machine
implementation lacks behind. Currently, running the HSS no longer works.
There is missing support in the clock and memory controller devices. In
particular, reading from the SD card does not work.
.. _HSS: https://github.com/polarfire-soc/hart-software-services

View file

@ -27,7 +27,9 @@
#include "hw/irq.h"
#include "hw/sysbus.h"
#include "hw/misc/mchp_pfsoc_sysreg.h"
#include "system/runstate.h"
#define MSS_RESET_CR 0x18
#define ENVM_CR 0xb8
#define MESSAGE_INT 0x118c
@ -56,6 +58,11 @@ static void mchp_pfsoc_sysreg_write(void *opaque, hwaddr offset,
{
MchpPfSoCSysregState *s = opaque;
switch (offset) {
case MSS_RESET_CR:
if (value == 0xdead) {
qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
}
break;
case MESSAGE_INT:
qemu_irq_lower(s->irq);
break;

View file

@ -39,6 +39,7 @@
#include "qemu/units.h"
#include "qemu/cutils.h"
#include "qapi/error.h"
#include "qapi/visitor.h"
#include "hw/boards.h"
#include "hw/loader.h"
#include "hw/sysbus.h"
@ -61,9 +62,6 @@
#define BIOS_FILENAME "hss.bin"
#define RESET_VECTOR 0x20220000
/* CLINT timebase frequency */
#define CLINT_TIMEBASE_FREQ 1000000
/* GEM version */
#define GEM_REVISION 0x0107010c
@ -193,6 +191,7 @@ static void microchip_pfsoc_soc_instance_init(Object *obj)
static void microchip_pfsoc_soc_realize(DeviceState *dev, Error **errp)
{
MachineState *ms = MACHINE(qdev_get_machine());
MicrochipIcicleKitState *iks = MICROCHIP_ICICLE_KIT_MACHINE(ms);
MicrochipPFSoCState *s = MICROCHIP_PFSOC(dev);
const MemMapEntry *memmap = microchip_pfsoc_memmap;
MemoryRegion *system_memory = get_system_memory();
@ -253,7 +252,7 @@ static void microchip_pfsoc_soc_realize(DeviceState *dev, Error **errp)
memmap[MICROCHIP_PFSOC_CLINT].base + RISCV_ACLINT_SWI_SIZE,
RISCV_ACLINT_DEFAULT_MTIMER_SIZE, 0, ms->smp.cpus,
RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME,
CLINT_TIMEBASE_FREQ, false);
iks->clint_timebase_freq, false);
/* L2 cache controller */
create_unimplemented_device("microchip.pfsoc.l2cc",
@ -516,7 +515,6 @@ static void microchip_icicle_kit_machine_init(MachineState *machine)
uint64_t mem_low_size, mem_high_size;
hwaddr firmware_load_addr;
const char *firmware_name;
bool kernel_as_payload = false;
target_ulong firmware_end_addr, kernel_start_addr;
uint64_t kernel_entry;
uint64_t fdt_load_addr;
@ -579,45 +577,50 @@ static void microchip_icicle_kit_machine_init(MachineState *machine)
}
/*
* We follow the following table to select which payload we execute.
* We follow the following table to select which firmware we use.
*
* -bios | -kernel | payload
* -------+------------+--------
* N | N | HSS
* Y | don't care | HSS
* N | Y | kernel
*
* This ensures backwards compatibility with how we used to expose -bios
* to users but allows them to run through direct kernel booting as well.
*
* When -kernel is used for direct boot, -dtb must be present to provide
* a valid device tree for the board, as we don't generate device tree.
* -bios | -kernel | firmware
* --------------+------------+--------
* none | N | error
* none | Y | kernel
* NULL, default | N | BIOS_FILENAME
* NULL, default | Y | RISCV64_BIOS_BIN
* other | don't care | other
*/
if (machine->kernel_filename && machine->dtb) {
int fdt_size;
machine->fdt = load_device_tree(machine->dtb, &fdt_size);
if (!machine->fdt) {
error_report("load_device_tree() failed");
if (machine->firmware && !strcmp(machine->firmware, "none")) {
if (!machine->kernel_filename) {
error_report("for -bios none, a kernel is required");
exit(1);
}
firmware_name = RISCV64_BIOS_BIN;
firmware_load_addr = memmap[MICROCHIP_PFSOC_DRAM_LO].base;
kernel_as_payload = true;
}
if (!kernel_as_payload) {
firmware_name = BIOS_FILENAME;
firmware_name = NULL;
firmware_load_addr = RESET_VECTOR;
} else if (!machine->firmware || !strcmp(machine->firmware, "default")) {
if (machine->kernel_filename) {
firmware_name = RISCV64_BIOS_BIN;
firmware_load_addr = memmap[MICROCHIP_PFSOC_DRAM_LO].base;
} else {
firmware_name = BIOS_FILENAME;
firmware_load_addr = RESET_VECTOR;
}
} else {
firmware_name = machine->firmware;
firmware_load_addr = RESET_VECTOR;
}
/* Load the firmware */
firmware_end_addr = riscv_find_and_load_firmware(machine, firmware_name,
&firmware_load_addr, NULL);
/* Load the firmware if necessary */
firmware_end_addr = firmware_load_addr;
if (firmware_name) {
char *filename = riscv_find_firmware(firmware_name, NULL);
if (filename) {
firmware_end_addr = riscv_load_firmware(filename,
&firmware_load_addr, NULL);
g_free(filename);
}
}
riscv_boot_info_init(&boot_info, &s->soc.u_cpus);
if (kernel_as_payload) {
if (machine->kernel_filename) {
kernel_start_addr = riscv_calc_kernel_start_addr(&boot_info,
firmware_end_addr);
@ -625,20 +628,82 @@ static void microchip_icicle_kit_machine_init(MachineState *machine)
true, NULL);
kernel_entry = boot_info.image_low_addr;
/* Compute the fdt load address in dram */
fdt_load_addr = riscv_compute_fdt_addr(memmap[MICROCHIP_PFSOC_DRAM_LO].base,
memmap[MICROCHIP_PFSOC_DRAM_LO].size,
machine, &boot_info);
riscv_load_fdt(fdt_load_addr, machine->fdt);
if (machine->dtb) {
int fdt_size;
machine->fdt = load_device_tree(machine->dtb, &fdt_size);
if (!machine->fdt) {
error_report("load_device_tree() failed");
exit(1);
}
/* Compute the FDT load address in DRAM */
hwaddr kernel_ram_base = memmap[MICROCHIP_PFSOC_DRAM_LO].base;
hwaddr kernel_ram_size = memmap[MICROCHIP_PFSOC_DRAM_LO].size;
if (kernel_entry - kernel_ram_base >= kernel_ram_size) {
kernel_ram_base = memmap[MICROCHIP_PFSOC_DRAM_HI].base;
kernel_ram_size = mem_high_size;
}
fdt_load_addr = riscv_compute_fdt_addr(kernel_ram_base, kernel_ram_size,
machine, &boot_info);
riscv_load_fdt(fdt_load_addr, machine->fdt);
} else {
warn_report_once("The QEMU microchip-icicle-kit machine does not "
"generate a device tree, so no device tree is "
"being provided to the guest.");
fdt_load_addr = 0;
}
hwaddr start_addr;
if (firmware_name) {
start_addr = firmware_load_addr;
} else {
start_addr = kernel_entry;
}
/* Load the reset vector */
riscv_setup_rom_reset_vec(machine, &s->soc.u_cpus, firmware_load_addr,
riscv_setup_rom_reset_vec(machine, &s->soc.u_cpus, start_addr,
memmap[MICROCHIP_PFSOC_ENVM_DATA].base,
memmap[MICROCHIP_PFSOC_ENVM_DATA].size,
kernel_entry, fdt_load_addr);
}
}
static void microchip_icicle_kit_set_clint_timebase_freq(Object *obj,
Visitor *v,
const char *name,
void *opaque,
Error **errp)
{
MicrochipIcicleKitState *s = MICROCHIP_ICICLE_KIT_MACHINE(obj);
uint32_t value;
if (!visit_type_uint32(v, name, &value, errp)) {
return;
}
s->clint_timebase_freq = value;
}
static void microchip_icicle_kit_get_clint_timebase_freq(Object *obj,
Visitor *v,
const char *name,
void *opaque,
Error **errp)
{
MicrochipIcicleKitState *s = MICROCHIP_ICICLE_KIT_MACHINE(obj);
uint32_t value = s->clint_timebase_freq;
visit_type_uint32(v, name, &value, errp);
}
static void microchip_icicle_kit_machine_instance_init(Object *obj)
{
MicrochipIcicleKitState *m = MICROCHIP_ICICLE_KIT_MACHINE(obj);
m->clint_timebase_freq = 1000000;
}
static void microchip_icicle_kit_machine_class_init(ObjectClass *oc,
const void *data)
{
@ -661,12 +726,20 @@ static void microchip_icicle_kit_machine_class_init(ObjectClass *oc,
* See memory_tests() in mss_ddr.c in the HSS source code.
*/
mc->default_ram_size = 1537 * MiB;
object_class_property_add(oc, "clint-timebase-frequency", "uint32_t",
microchip_icicle_kit_get_clint_timebase_freq,
microchip_icicle_kit_set_clint_timebase_freq,
NULL, NULL);
object_class_property_set_description(oc, "clint-timebase-frequency",
"Set CLINT timebase frequency in Hz.");
}
static const TypeInfo microchip_icicle_kit_machine_typeinfo = {
.name = MACHINE_TYPE_NAME("microchip-icicle-kit"),
.parent = TYPE_MACHINE,
.class_init = microchip_icicle_kit_machine_class_init,
.instance_init = microchip_icicle_kit_machine_instance_init,
.instance_size = sizeof(MicrochipIcicleKitState),
};

View file

@ -72,7 +72,7 @@ static void csr_call(char *cmd, uint64_t cpu_num, int csrno, uint64_t *val)
ret = riscv_csrr(env, csrno, (target_ulong *)val);
} else if (strcmp(cmd, "set_csr") == 0) {
ret = riscv_csrrw(env, csrno, NULL, *(target_ulong *)val,
MAKE_64BIT_MASK(0, TARGET_LONG_BITS));
MAKE_64BIT_MASK(0, TARGET_LONG_BITS), 0);
}
g_assert(ret == RISCV_EXCP_NONE);
@ -104,8 +104,11 @@ static bool csr_qtest_callback(CharBackend *chr, gchar **words)
static void riscv_cpu_register_csr_qtest_callback(void)
{
static GOnce once;
g_once(&once, (GThreadFunc)qtest_set_command_cb, csr_qtest_callback);
static bool first = true;
if (first) {
first = false;
qtest_set_command_cb(csr_qtest_callback);
}
}
#endif

View file

@ -198,6 +198,32 @@ acpi_dsdt_add_uart(Aml *scope, const MemMapEntry *uart_memmap,
aml_append(scope, dev);
}
/*
* Add DSDT entry for the IOMMU platform device.
* ACPI ID for IOMMU is defined in the section 6.2 of RISC-V BRS spec.
* https://github.com/riscv-non-isa/riscv-brs/releases/download/v0.8/riscv-brs-spec.pdf
*/
static void acpi_dsdt_add_iommu_sys(Aml *scope, const MemMapEntry *iommu_memmap,
uint32_t iommu_irq)
{
uint32_t i;
Aml *dev = aml_device("IMU0");
aml_append(dev, aml_name_decl("_HID", aml_string("RSCV0004")));
aml_append(dev, aml_name_decl("_UID", aml_int(0)));
Aml *crs = aml_resource_template();
aml_append(crs, aml_memory32_fixed(iommu_memmap->base,
iommu_memmap->size, AML_READ_WRITE));
for (i = iommu_irq; i < iommu_irq + 4; i++) {
aml_append(crs, aml_interrupt(AML_CONSUMER, AML_EDGE, AML_ACTIVE_LOW,
AML_EXCLUSIVE, &i, 1));
}
aml_append(dev, aml_name_decl("_CRS", crs));
aml_append(scope, dev);
}
/*
* Serial Port Console Redirection Table (SPCR)
* Rev: 1.10
@ -450,6 +476,9 @@ static void build_dsdt(GArray *table_data,
}
acpi_dsdt_add_uart(scope, &memmap[VIRT_UART0], UART0_IRQ);
if (virt_is_iommu_sys_enabled(s)) {
acpi_dsdt_add_iommu_sys(scope, &memmap[VIRT_IOMMU_SYS], IOMMU_SYS_IRQ);
}
if (socket_count == 1) {
virtio_acpi_dsdt_add(scope, memmap[VIRT_VIRTIO].base,
@ -602,6 +631,187 @@ static void build_madt(GArray *table_data,
acpi_table_end(linker, &table);
}
#define ID_MAPPING_ENTRY_SIZE 20
#define IOMMU_ENTRY_SIZE 40
#define RISCV_INTERRUPT_WIRE_OFFSSET 40
#define ROOT_COMPLEX_ENTRY_SIZE 20
#define RIMT_NODE_OFFSET 48
/*
* ID Mapping Structure
*/
static void build_rimt_id_mapping(GArray *table_data, uint32_t source_id_base,
uint32_t num_ids, uint32_t dest_id_base)
{
/* Source ID Base */
build_append_int_noprefix(table_data, source_id_base, 4);
/* Number of IDs */
build_append_int_noprefix(table_data, num_ids, 4);
/* Destination Device ID Base */
build_append_int_noprefix(table_data, source_id_base, 4);
/* Destination IOMMU Offset */
build_append_int_noprefix(table_data, dest_id_base, 4);
/* Flags */
build_append_int_noprefix(table_data, 0, 4);
}
struct AcpiRimtIdMapping {
uint32_t source_id_base;
uint32_t num_ids;
};
typedef struct AcpiRimtIdMapping AcpiRimtIdMapping;
/* Build the rimt ID mapping to IOMMU for a given PCI host bridge */
static int rimt_host_bridges(Object *obj, void *opaque)
{
GArray *idmap_blob = opaque;
if (object_dynamic_cast(obj, TYPE_PCI_HOST_BRIDGE)) {
PCIBus *bus = PCI_HOST_BRIDGE(obj)->bus;
if (bus && !pci_bus_bypass_iommu(bus)) {
int min_bus, max_bus;
pci_bus_range(bus, &min_bus, &max_bus);
AcpiRimtIdMapping idmap = {
.source_id_base = min_bus << 8,
.num_ids = (max_bus - min_bus + 1) << 8,
};
g_array_append_val(idmap_blob, idmap);
}
}
return 0;
}
static int rimt_idmap_compare(gconstpointer a, gconstpointer b)
{
AcpiRimtIdMapping *idmap_a = (AcpiRimtIdMapping *)a;
AcpiRimtIdMapping *idmap_b = (AcpiRimtIdMapping *)b;
return idmap_a->source_id_base - idmap_b->source_id_base;
}
/*
* RISC-V IO Mapping Table (RIMT)
* https://github.com/riscv-non-isa/riscv-acpi-rimt/releases/download/v0.99/rimt-spec.pdf
*/
static void build_rimt(GArray *table_data, BIOSLinker *linker,
RISCVVirtState *s)
{
int i, nb_nodes, rc_mapping_count;
size_t node_size, iommu_offset = 0;
uint32_t id = 0;
g_autoptr(GArray) iommu_idmaps = g_array_new(false, true,
sizeof(AcpiRimtIdMapping));
AcpiTable table = { .sig = "RIMT", .rev = 1, .oem_id = s->oem_id,
.oem_table_id = s->oem_table_id };
acpi_table_begin(&table, table_data);
object_child_foreach_recursive(object_get_root(),
rimt_host_bridges, iommu_idmaps);
/* Sort the ID mapping by Source ID Base*/
g_array_sort(iommu_idmaps, rimt_idmap_compare);
nb_nodes = 2; /* RC, IOMMU */
rc_mapping_count = iommu_idmaps->len;
/* Number of RIMT Nodes */
build_append_int_noprefix(table_data, nb_nodes, 4);
/* Offset to Array of RIMT Nodes */
build_append_int_noprefix(table_data, RIMT_NODE_OFFSET, 4);
build_append_int_noprefix(table_data, 0, 4); /* Reserved */
iommu_offset = table_data->len - table.table_offset;
/* IOMMU Device Structure */
build_append_int_noprefix(table_data, 0, 1); /* Type - IOMMU*/
build_append_int_noprefix(table_data, 1, 1); /* Revision */
node_size = IOMMU_ENTRY_SIZE;
build_append_int_noprefix(table_data, node_size, 2); /* Length */
build_append_int_noprefix(table_data, 0, 2); /* Reserved */
build_append_int_noprefix(table_data, id++, 2); /* ID */
if (virt_is_iommu_sys_enabled(s)) {
/* Hardware ID */
build_append_int_noprefix(table_data, 'R', 1);
build_append_int_noprefix(table_data, 'S', 1);
build_append_int_noprefix(table_data, 'C', 1);
build_append_int_noprefix(table_data, 'V', 1);
build_append_int_noprefix(table_data, '0', 1);
build_append_int_noprefix(table_data, '0', 1);
build_append_int_noprefix(table_data, '0', 1);
build_append_int_noprefix(table_data, '4', 1);
/* Base Address */
build_append_int_noprefix(table_data,
s->memmap[VIRT_IOMMU_SYS].base, 8);
build_append_int_noprefix(table_data, 0, 4); /* Flags */
} else {
/* Hardware ID */
build_append_int_noprefix(table_data, '0', 1);
build_append_int_noprefix(table_data, '0', 1);
build_append_int_noprefix(table_data, '1', 1);
build_append_int_noprefix(table_data, '0', 1);
build_append_int_noprefix(table_data, '0', 1);
build_append_int_noprefix(table_data, '0', 1);
build_append_int_noprefix(table_data, '1', 1);
build_append_int_noprefix(table_data, '4', 1);
build_append_int_noprefix(table_data, 0, 8); /* Base Address */
build_append_int_noprefix(table_data, 1, 4); /* Flags */
}
build_append_int_noprefix(table_data, 0, 4); /* Proximity Domain */
build_append_int_noprefix(table_data, 0, 2); /* PCI Segment number */
/* PCIe B/D/F */
if (virt_is_iommu_sys_enabled(s)) {
build_append_int_noprefix(table_data, 0, 2);
} else {
build_append_int_noprefix(table_data, s->pci_iommu_bdf, 2);
}
/* Number of interrupt wires */
build_append_int_noprefix(table_data, 0, 2);
/* Interrupt wire array offset */
build_append_int_noprefix(table_data, RISCV_INTERRUPT_WIRE_OFFSSET, 2);
/* PCIe Root Complex Node */
build_append_int_noprefix(table_data, 1, 1); /* Type */
build_append_int_noprefix(table_data, 1, 1); /* Revision */
node_size = ROOT_COMPLEX_ENTRY_SIZE +
ID_MAPPING_ENTRY_SIZE * rc_mapping_count;
build_append_int_noprefix(table_data, node_size, 2); /* Length */
build_append_int_noprefix(table_data, 0, 2); /* Reserved */
build_append_int_noprefix(table_data, id++, 2); /* ID */
build_append_int_noprefix(table_data, 0, 4); /* Flags */
build_append_int_noprefix(table_data, 0, 2); /* Reserved */
/* PCI Segment number */
build_append_int_noprefix(table_data, 0, 2);
/* ID mapping array offset */
build_append_int_noprefix(table_data, ROOT_COMPLEX_ENTRY_SIZE, 2);
/* Number of ID mappings */
build_append_int_noprefix(table_data, rc_mapping_count, 2);
/* Output Reference */
AcpiRimtIdMapping *range;
/* ID mapping array */
for (i = 0; i < iommu_idmaps->len; i++) {
range = &g_array_index(iommu_idmaps, AcpiRimtIdMapping, i);
if (virt_is_iommu_sys_enabled(s)) {
range->source_id_base = 0;
} else {
range->source_id_base = s->pci_iommu_bdf + 1;
}
range->num_ids = 0xffff - s->pci_iommu_bdf;
build_rimt_id_mapping(table_data, range->source_id_base,
range->num_ids, iommu_offset);
}
acpi_table_end(linker, &table);
}
/*
* ACPI spec, Revision 6.5+
* 5.2.16 System Resource Affinity Table (SRAT)
@ -679,6 +889,11 @@ static void virt_acpi_build(RISCVVirtState *s, AcpiBuildTables *tables)
acpi_add_table(table_offsets, tables_blob);
build_rhct(tables_blob, tables->linker, s);
if (virt_is_iommu_sys_enabled(s) || s->pci_iommu_bdf) {
acpi_add_table(table_offsets, tables_blob);
build_rimt(tables_blob, tables->linker, s);
}
acpi_add_table(table_offsets, tables_blob);
spcr_setup(tables_blob, tables->linker, s);

View file

@ -166,8 +166,8 @@ static void virt_flash_map1(PFlashCFI01 *flash,
static void virt_flash_map(RISCVVirtState *s,
MemoryRegion *sysmem)
{
hwaddr flashsize = virt_memmap[VIRT_FLASH].size / 2;
hwaddr flashbase = virt_memmap[VIRT_FLASH].base;
hwaddr flashsize = s->memmap[VIRT_FLASH].size / 2;
hwaddr flashbase = s->memmap[VIRT_FLASH].base;
virt_flash_map1(s->flash[0], flashbase, flashsize,
sysmem);
@ -301,16 +301,16 @@ static void create_fdt_socket_cpus(RISCVVirtState *s, int socket,
}
}
static void create_fdt_socket_memory(RISCVVirtState *s,
const MemMapEntry *memmap, int socket)
static void create_fdt_socket_memory(RISCVVirtState *s, int socket)
{
g_autofree char *mem_name = NULL;
uint64_t addr, size;
hwaddr addr;
uint64_t size;
MachineState *ms = MACHINE(s);
addr = memmap[VIRT_DRAM].base + riscv_socket_mem_offset(ms, socket);
addr = s->memmap[VIRT_DRAM].base + riscv_socket_mem_offset(ms, socket);
size = riscv_socket_mem_size(ms, socket);
mem_name = g_strdup_printf("/memory@%lx", (long)addr);
mem_name = g_strdup_printf("/memory@%"HWADDR_PRIx, addr);
qemu_fdt_add_subnode(ms->fdt, mem_name);
qemu_fdt_setprop_cells(ms->fdt, mem_name, "reg",
addr >> 32, addr, size >> 32, size);
@ -319,7 +319,7 @@ static void create_fdt_socket_memory(RISCVVirtState *s,
}
static void create_fdt_socket_clint(RISCVVirtState *s,
const MemMapEntry *memmap, int socket,
int socket,
uint32_t *intc_phandles)
{
int cpu;
@ -340,21 +340,22 @@ static void create_fdt_socket_clint(RISCVVirtState *s,
clint_cells[cpu * 4 + 3] = cpu_to_be32(IRQ_M_TIMER);
}
clint_addr = memmap[VIRT_CLINT].base + (memmap[VIRT_CLINT].size * socket);
clint_addr = s->memmap[VIRT_CLINT].base +
(s->memmap[VIRT_CLINT].size * socket);
clint_name = g_strdup_printf("/soc/clint@%lx", clint_addr);
qemu_fdt_add_subnode(ms->fdt, clint_name);
qemu_fdt_setprop_string_array(ms->fdt, clint_name, "compatible",
(char **)&clint_compat,
ARRAY_SIZE(clint_compat));
qemu_fdt_setprop_cells(ms->fdt, clint_name, "reg",
0x0, clint_addr, 0x0, memmap[VIRT_CLINT].size);
0x0, clint_addr, 0x0, s->memmap[VIRT_CLINT].size);
qemu_fdt_setprop(ms->fdt, clint_name, "interrupts-extended",
clint_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 4);
riscv_socket_fdt_write_id(ms, clint_name, socket);
}
static void create_fdt_socket_aclint(RISCVVirtState *s,
const MemMapEntry *memmap, int socket,
int socket,
uint32_t *intc_phandles)
{
int cpu;
@ -381,8 +382,10 @@ static void create_fdt_socket_aclint(RISCVVirtState *s,
aclint_cells_size = s->soc[socket].num_harts * sizeof(uint32_t) * 2;
if (s->aia_type != VIRT_AIA_TYPE_APLIC_IMSIC) {
addr = memmap[VIRT_CLINT].base + (memmap[VIRT_CLINT].size * socket);
addr = s->memmap[VIRT_CLINT].base +
(s->memmap[VIRT_CLINT].size * socket);
name = g_strdup_printf("/soc/mswi@%lx", addr);
qemu_fdt_add_subnode(ms->fdt, name);
qemu_fdt_setprop_string(ms->fdt, name, "compatible",
"riscv,aclint-mswi");
@ -397,13 +400,13 @@ static void create_fdt_socket_aclint(RISCVVirtState *s,
}
if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
addr = memmap[VIRT_CLINT].base +
addr = s->memmap[VIRT_CLINT].base +
(RISCV_ACLINT_DEFAULT_MTIMER_SIZE * socket);
size = RISCV_ACLINT_DEFAULT_MTIMER_SIZE;
} else {
addr = memmap[VIRT_CLINT].base + RISCV_ACLINT_SWI_SIZE +
(memmap[VIRT_CLINT].size * socket);
size = memmap[VIRT_CLINT].size - RISCV_ACLINT_SWI_SIZE;
addr = s->memmap[VIRT_CLINT].base + RISCV_ACLINT_SWI_SIZE +
(s->memmap[VIRT_CLINT].size * socket);
size = s->memmap[VIRT_CLINT].size - RISCV_ACLINT_SWI_SIZE;
}
name = g_strdup_printf("/soc/mtimer@%lx", addr);
qemu_fdt_add_subnode(ms->fdt, name);
@ -420,14 +423,15 @@ static void create_fdt_socket_aclint(RISCVVirtState *s,
g_free(name);
if (s->aia_type != VIRT_AIA_TYPE_APLIC_IMSIC) {
addr = memmap[VIRT_ACLINT_SSWI].base +
(memmap[VIRT_ACLINT_SSWI].size * socket);
addr = s->memmap[VIRT_ACLINT_SSWI].base +
(s->memmap[VIRT_ACLINT_SSWI].size * socket);
name = g_strdup_printf("/soc/sswi@%lx", addr);
qemu_fdt_add_subnode(ms->fdt, name);
qemu_fdt_setprop_string(ms->fdt, name, "compatible",
"riscv,aclint-sswi");
qemu_fdt_setprop_cells(ms->fdt, name, "reg",
0x0, addr, 0x0, memmap[VIRT_ACLINT_SSWI].size);
0x0, addr, 0x0, s->memmap[VIRT_ACLINT_SSWI].size);
qemu_fdt_setprop(ms->fdt, name, "interrupts-extended",
aclint_sswi_cells, aclint_cells_size);
qemu_fdt_setprop(ms->fdt, name, "interrupt-controller", NULL, 0);
@ -438,7 +442,7 @@ static void create_fdt_socket_aclint(RISCVVirtState *s,
}
static void create_fdt_socket_plic(RISCVVirtState *s,
const MemMapEntry *memmap, int socket,
int socket,
uint32_t *phandle, uint32_t *intc_phandles,
uint32_t *plic_phandles)
{
@ -452,7 +456,8 @@ static void create_fdt_socket_plic(RISCVVirtState *s,
};
plic_phandles[socket] = (*phandle)++;
plic_addr = memmap[VIRT_PLIC].base + (memmap[VIRT_PLIC].size * socket);
plic_addr = s->memmap[VIRT_PLIC].base +
(s->memmap[VIRT_PLIC].size * socket);
plic_name = g_strdup_printf("/soc/plic@%lx", plic_addr);
qemu_fdt_add_subnode(ms->fdt, plic_name);
qemu_fdt_setprop_cell(ms->fdt, plic_name,
@ -491,7 +496,7 @@ static void create_fdt_socket_plic(RISCVVirtState *s,
}
qemu_fdt_setprop_cells(ms->fdt, plic_name, "reg",
0x0, plic_addr, 0x0, memmap[VIRT_PLIC].size);
0x0, plic_addr, 0x0, s->memmap[VIRT_PLIC].size);
qemu_fdt_setprop_cell(ms->fdt, plic_name, "riscv,ndev",
VIRT_IRQCHIP_NUM_SOURCES - 1);
riscv_socket_fdt_write_id(ms, plic_name, socket);
@ -500,8 +505,8 @@ static void create_fdt_socket_plic(RISCVVirtState *s,
if (!socket) {
platform_bus_add_all_fdt_nodes(ms->fdt, plic_name,
memmap[VIRT_PLATFORM_BUS].base,
memmap[VIRT_PLATFORM_BUS].size,
s->memmap[VIRT_PLATFORM_BUS].base,
s->memmap[VIRT_PLATFORM_BUS].size,
VIRT_PLATFORM_BUS_IRQ);
}
}
@ -588,7 +593,7 @@ static void create_fdt_one_imsic(RISCVVirtState *s, hwaddr base_addr,
qemu_fdt_setprop_cell(ms->fdt, imsic_name, "phandle", msi_phandle);
}
static void create_fdt_imsic(RISCVVirtState *s, const MemMapEntry *memmap,
static void create_fdt_imsic(RISCVVirtState *s,
uint32_t *phandle, uint32_t *intc_phandles,
uint32_t *msi_m_phandle, uint32_t *msi_s_phandle)
{
@ -597,12 +602,12 @@ static void create_fdt_imsic(RISCVVirtState *s, const MemMapEntry *memmap,
if (!kvm_enabled()) {
/* M-level IMSIC node */
create_fdt_one_imsic(s, memmap[VIRT_IMSIC_M].base, intc_phandles,
create_fdt_one_imsic(s, s->memmap[VIRT_IMSIC_M].base, intc_phandles,
*msi_m_phandle, true, 0);
}
/* S-level IMSIC node */
create_fdt_one_imsic(s, memmap[VIRT_IMSIC_S].base, intc_phandles,
create_fdt_one_imsic(s, s->memmap[VIRT_IMSIC_S].base, intc_phandles,
*msi_s_phandle, false,
imsic_num_bits(s->aia_guests + 1));
@ -679,7 +684,7 @@ static void create_fdt_one_aplic(RISCVVirtState *s, int socket,
}
static void create_fdt_socket_aplic(RISCVVirtState *s,
const MemMapEntry *memmap, int socket,
int socket,
uint32_t msi_m_phandle,
uint32_t msi_s_phandle,
uint32_t *phandle,
@ -696,18 +701,19 @@ static void create_fdt_socket_aplic(RISCVVirtState *s,
if (!kvm_enabled()) {
/* M-level APLIC node */
aplic_addr = memmap[VIRT_APLIC_M].base +
(memmap[VIRT_APLIC_M].size * socket);
create_fdt_one_aplic(s, socket, aplic_addr, memmap[VIRT_APLIC_M].size,
aplic_addr = s->memmap[VIRT_APLIC_M].base +
(s->memmap[VIRT_APLIC_M].size * socket);
create_fdt_one_aplic(s, socket, aplic_addr,
s->memmap[VIRT_APLIC_M].size,
msi_m_phandle, intc_phandles,
aplic_m_phandle, aplic_s_phandle,
true, num_harts);
}
/* S-level APLIC node */
aplic_addr = memmap[VIRT_APLIC_S].base +
(memmap[VIRT_APLIC_S].size * socket);
create_fdt_one_aplic(s, socket, aplic_addr, memmap[VIRT_APLIC_S].size,
aplic_addr = s->memmap[VIRT_APLIC_S].base +
(s->memmap[VIRT_APLIC_S].size * socket);
create_fdt_one_aplic(s, socket, aplic_addr, s->memmap[VIRT_APLIC_S].size,
msi_s_phandle, intc_phandles,
aplic_s_phandle, 0,
false, num_harts);
@ -715,8 +721,8 @@ static void create_fdt_socket_aplic(RISCVVirtState *s,
if (!socket) {
g_autofree char *aplic_name = fdt_get_aplic_nodename(aplic_addr);
platform_bus_add_all_fdt_nodes(ms->fdt, aplic_name,
memmap[VIRT_PLATFORM_BUS].base,
memmap[VIRT_PLATFORM_BUS].size,
s->memmap[VIRT_PLATFORM_BUS].base,
s->memmap[VIRT_PLATFORM_BUS].size,
VIRT_PLATFORM_BUS_IRQ);
}
@ -734,7 +740,7 @@ static void create_fdt_pmu(RISCVVirtState *s)
riscv_pmu_generate_fdt_node(ms->fdt, hart.pmu_avail_ctrs, pmu_name);
}
static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
static void create_fdt_sockets(RISCVVirtState *s,
uint32_t *phandle,
uint32_t *irq_mmio_phandle,
uint32_t *irq_pcie_phandle,
@ -770,20 +776,20 @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
create_fdt_socket_cpus(s, socket, clust_name, phandle,
&intc_phandles[phandle_pos]);
create_fdt_socket_memory(s, memmap, socket);
create_fdt_socket_memory(s, socket);
if (virt_aclint_allowed() && s->have_aclint) {
create_fdt_socket_aclint(s, memmap, socket,
create_fdt_socket_aclint(s, socket,
&intc_phandles[phandle_pos]);
} else if (tcg_enabled()) {
create_fdt_socket_clint(s, memmap, socket,
create_fdt_socket_clint(s, socket,
&intc_phandles[phandle_pos]);
}
}
if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
create_fdt_imsic(s, memmap, phandle, intc_phandles,
&msi_m_phandle, &msi_s_phandle);
create_fdt_imsic(s, phandle, intc_phandles,
&msi_m_phandle, &msi_s_phandle);
*msi_pcie_phandle = msi_s_phandle;
}
@ -792,7 +798,7 @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
* mode, we'll use only one APLIC instance.
*/
if (!virt_use_emulated_aplic(s->aia_type)) {
create_fdt_socket_aplic(s, memmap, 0,
create_fdt_socket_aplic(s, 0,
msi_m_phandle, msi_s_phandle, phandle,
&intc_phandles[0], xplic_phandles,
ms->smp.cpus);
@ -806,11 +812,11 @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
phandle_pos -= s->soc[socket].num_harts;
if (s->aia_type == VIRT_AIA_TYPE_NONE) {
create_fdt_socket_plic(s, memmap, socket, phandle,
create_fdt_socket_plic(s, socket, phandle,
&intc_phandles[phandle_pos],
xplic_phandles);
} else {
create_fdt_socket_aplic(s, memmap, socket,
create_fdt_socket_aplic(s, socket,
msi_m_phandle, msi_s_phandle, phandle,
&intc_phandles[phandle_pos],
xplic_phandles,
@ -837,21 +843,24 @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
riscv_socket_fdt_write_distance_matrix(ms);
}
static void create_fdt_virtio(RISCVVirtState *s, const MemMapEntry *memmap,
uint32_t irq_virtio_phandle)
static void create_fdt_virtio(RISCVVirtState *s, uint32_t irq_virtio_phandle)
{
int i;
MachineState *ms = MACHINE(s);
hwaddr virtio_base = s->memmap[VIRT_VIRTIO].base;
for (i = 0; i < VIRTIO_COUNT; i++) {
g_autofree char *name = g_strdup_printf("/soc/virtio_mmio@%lx",
(long)(memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size));
g_autofree char *name = NULL;
uint64_t size = s->memmap[VIRT_VIRTIO].size;
hwaddr addr = virtio_base + i * size;
name = g_strdup_printf("/soc/virtio_mmio@%"HWADDR_PRIx, addr);
qemu_fdt_add_subnode(ms->fdt, name);
qemu_fdt_setprop_string(ms->fdt, name, "compatible", "virtio,mmio");
qemu_fdt_setprop_cells(ms->fdt, name, "reg",
0x0, memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size,
0x0, memmap[VIRT_VIRTIO].size);
0x0, addr,
0x0, size);
qemu_fdt_setprop_cell(ms->fdt, name, "interrupt-parent",
irq_virtio_phandle);
if (s->aia_type == VIRT_AIA_TYPE_NONE) {
@ -864,7 +873,7 @@ static void create_fdt_virtio(RISCVVirtState *s, const MemMapEntry *memmap,
}
}
static void create_fdt_pcie(RISCVVirtState *s, const MemMapEntry *memmap,
static void create_fdt_pcie(RISCVVirtState *s,
uint32_t irq_pcie_phandle,
uint32_t msi_pcie_phandle,
uint32_t iommu_sys_phandle)
@ -872,8 +881,8 @@ static void create_fdt_pcie(RISCVVirtState *s, const MemMapEntry *memmap,
g_autofree char *name = NULL;
MachineState *ms = MACHINE(s);
name = g_strdup_printf("/soc/pci@%lx",
(long) memmap[VIRT_PCIE_ECAM].base);
name = g_strdup_printf("/soc/pci@%"HWADDR_PRIx,
s->memmap[VIRT_PCIE_ECAM].base);
qemu_fdt_setprop_cell(ms->fdt, name, "#address-cells",
FDT_PCI_ADDR_CELLS);
qemu_fdt_setprop_cell(ms->fdt, name, "#interrupt-cells",
@ -884,19 +893,19 @@ static void create_fdt_pcie(RISCVVirtState *s, const MemMapEntry *memmap,
qemu_fdt_setprop_string(ms->fdt, name, "device_type", "pci");
qemu_fdt_setprop_cell(ms->fdt, name, "linux,pci-domain", 0);
qemu_fdt_setprop_cells(ms->fdt, name, "bus-range", 0,
memmap[VIRT_PCIE_ECAM].size / PCIE_MMCFG_SIZE_MIN - 1);
s->memmap[VIRT_PCIE_ECAM].size / PCIE_MMCFG_SIZE_MIN - 1);
qemu_fdt_setprop(ms->fdt, name, "dma-coherent", NULL, 0);
if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
qemu_fdt_setprop_cell(ms->fdt, name, "msi-parent", msi_pcie_phandle);
}
qemu_fdt_setprop_cells(ms->fdt, name, "reg", 0,
memmap[VIRT_PCIE_ECAM].base, 0, memmap[VIRT_PCIE_ECAM].size);
s->memmap[VIRT_PCIE_ECAM].base, 0, s->memmap[VIRT_PCIE_ECAM].size);
qemu_fdt_setprop_sized_cells(ms->fdt, name, "ranges",
1, FDT_PCI_RANGE_IOPORT, 2, 0,
2, memmap[VIRT_PCIE_PIO].base, 2, memmap[VIRT_PCIE_PIO].size,
2, s->memmap[VIRT_PCIE_PIO].base, 2, s->memmap[VIRT_PCIE_PIO].size,
1, FDT_PCI_RANGE_MMIO,
2, memmap[VIRT_PCIE_MMIO].base,
2, memmap[VIRT_PCIE_MMIO].base, 2, memmap[VIRT_PCIE_MMIO].size,
2, s->memmap[VIRT_PCIE_MMIO].base,
2, s->memmap[VIRT_PCIE_MMIO].base, 2, s->memmap[VIRT_PCIE_MMIO].size,
1, FDT_PCI_RANGE_MMIO_64BIT,
2, virt_high_pcie_memmap.base,
2, virt_high_pcie_memmap.base, 2, virt_high_pcie_memmap.size);
@ -910,16 +919,15 @@ static void create_fdt_pcie(RISCVVirtState *s, const MemMapEntry *memmap,
create_pcie_irq_map(s, ms->fdt, name, irq_pcie_phandle);
}
static void create_fdt_reset(RISCVVirtState *s, const MemMapEntry *memmap,
uint32_t *phandle)
static void create_fdt_reset(RISCVVirtState *s, uint32_t *phandle)
{
char *name;
uint32_t test_phandle;
MachineState *ms = MACHINE(s);
test_phandle = (*phandle)++;
name = g_strdup_printf("/soc/test@%lx",
(long)memmap[VIRT_TEST].base);
name = g_strdup_printf("/soc/test@%"HWADDR_PRIx,
s->memmap[VIRT_TEST].base);
qemu_fdt_add_subnode(ms->fdt, name);
{
static const char * const compat[3] = {
@ -929,7 +937,7 @@ static void create_fdt_reset(RISCVVirtState *s, const MemMapEntry *memmap,
(char **)&compat, ARRAY_SIZE(compat));
}
qemu_fdt_setprop_cells(ms->fdt, name, "reg",
0x0, memmap[VIRT_TEST].base, 0x0, memmap[VIRT_TEST].size);
0x0, s->memmap[VIRT_TEST].base, 0x0, s->memmap[VIRT_TEST].size);
qemu_fdt_setprop_cell(ms->fdt, name, "phandle", test_phandle);
test_phandle = qemu_fdt_get_phandle(ms->fdt, name);
g_free(name);
@ -951,18 +959,19 @@ static void create_fdt_reset(RISCVVirtState *s, const MemMapEntry *memmap,
g_free(name);
}
static void create_fdt_uart(RISCVVirtState *s, const MemMapEntry *memmap,
static void create_fdt_uart(RISCVVirtState *s,
uint32_t irq_mmio_phandle)
{
g_autofree char *name = NULL;
MachineState *ms = MACHINE(s);
name = g_strdup_printf("/soc/serial@%lx", (long)memmap[VIRT_UART0].base);
name = g_strdup_printf("/soc/serial@%"HWADDR_PRIx,
s->memmap[VIRT_UART0].base);
qemu_fdt_add_subnode(ms->fdt, name);
qemu_fdt_setprop_string(ms->fdt, name, "compatible", "ns16550a");
qemu_fdt_setprop_cells(ms->fdt, name, "reg",
0x0, memmap[VIRT_UART0].base,
0x0, memmap[VIRT_UART0].size);
0x0, s->memmap[VIRT_UART0].base,
0x0, s->memmap[VIRT_UART0].size);
qemu_fdt_setprop_cell(ms->fdt, name, "clock-frequency", 3686400);
qemu_fdt_setprop_cell(ms->fdt, name, "interrupt-parent", irq_mmio_phandle);
if (s->aia_type == VIRT_AIA_TYPE_NONE) {
@ -975,18 +984,19 @@ static void create_fdt_uart(RISCVVirtState *s, const MemMapEntry *memmap,
qemu_fdt_setprop_string(ms->fdt, "/aliases", "serial0", name);
}
static void create_fdt_rtc(RISCVVirtState *s, const MemMapEntry *memmap,
static void create_fdt_rtc(RISCVVirtState *s,
uint32_t irq_mmio_phandle)
{
g_autofree char *name = NULL;
MachineState *ms = MACHINE(s);
name = g_strdup_printf("/soc/rtc@%lx", (long)memmap[VIRT_RTC].base);
name = g_strdup_printf("/soc/rtc@%"HWADDR_PRIx,
s->memmap[VIRT_RTC].base);
qemu_fdt_add_subnode(ms->fdt, name);
qemu_fdt_setprop_string(ms->fdt, name, "compatible",
"google,goldfish-rtc");
qemu_fdt_setprop_cells(ms->fdt, name, "reg",
0x0, memmap[VIRT_RTC].base, 0x0, memmap[VIRT_RTC].size);
0x0, s->memmap[VIRT_RTC].base, 0x0, s->memmap[VIRT_RTC].size);
qemu_fdt_setprop_cell(ms->fdt, name, "interrupt-parent",
irq_mmio_phandle);
if (s->aia_type == VIRT_AIA_TYPE_NONE) {
@ -996,11 +1006,11 @@ static void create_fdt_rtc(RISCVVirtState *s, const MemMapEntry *memmap,
}
}
static void create_fdt_flash(RISCVVirtState *s, const MemMapEntry *memmap)
static void create_fdt_flash(RISCVVirtState *s)
{
MachineState *ms = MACHINE(s);
hwaddr flashsize = virt_memmap[VIRT_FLASH].size / 2;
hwaddr flashbase = virt_memmap[VIRT_FLASH].base;
hwaddr flashsize = s->memmap[VIRT_FLASH].size / 2;
hwaddr flashbase = s->memmap[VIRT_FLASH].base;
g_autofree char *name = g_strdup_printf("/flash@%" PRIx64, flashbase);
qemu_fdt_add_subnode(ms->fdt, name);
@ -1011,11 +1021,11 @@ static void create_fdt_flash(RISCVVirtState *s, const MemMapEntry *memmap)
qemu_fdt_setprop_cell(ms->fdt, name, "bank-width", 4);
}
static void create_fdt_fw_cfg(RISCVVirtState *s, const MemMapEntry *memmap)
static void create_fdt_fw_cfg(RISCVVirtState *s)
{
MachineState *ms = MACHINE(s);
hwaddr base = memmap[VIRT_FW_CFG].base;
hwaddr size = memmap[VIRT_FW_CFG].size;
hwaddr base = s->memmap[VIRT_FW_CFG].base;
hwaddr size = s->memmap[VIRT_FW_CFG].size;
g_autofree char *nodename = g_strdup_printf("/fw-cfg@%" PRIx64, base);
qemu_fdt_add_subnode(ms->fdt, nodename);
@ -1034,8 +1044,8 @@ static void create_fdt_virtio_iommu(RISCVVirtState *s, uint16_t bdf)
g_autofree char *iommu_node = NULL;
g_autofree char *pci_node = NULL;
pci_node = g_strdup_printf("/soc/pci@%lx",
(long) virt_memmap[VIRT_PCIE_ECAM].base);
pci_node = g_strdup_printf("/soc/pci@%"HWADDR_PRIx,
s->memmap[VIRT_PCIE_ECAM].base);
iommu_node = g_strdup_printf("%s/virtio_iommu@%x,%x", pci_node,
PCI_SLOT(bdf), PCI_FUNC(bdf));
iommu_phandle = qemu_fdt_alloc_phandle(fdt);
@ -1103,8 +1113,8 @@ static void create_fdt_iommu(RISCVVirtState *s, uint16_t bdf)
g_autofree char *iommu_node = NULL;
g_autofree char *pci_node = NULL;
pci_node = g_strdup_printf("/soc/pci@%lx",
(long) virt_memmap[VIRT_PCIE_ECAM].base);
pci_node = g_strdup_printf("/soc/pci@%"HWADDR_PRIx,
s->memmap[VIRT_PCIE_ECAM].base);
iommu_node = g_strdup_printf("%s/iommu@%x", pci_node, bdf);
iommu_phandle = qemu_fdt_alloc_phandle(fdt);
qemu_fdt_add_subnode(fdt, iommu_node);
@ -1117,6 +1127,7 @@ static void create_fdt_iommu(RISCVVirtState *s, uint16_t bdf)
qemu_fdt_setprop_cells(fdt, pci_node, "iommu-map",
0, iommu_phandle, 0, bdf,
bdf + 1, iommu_phandle, bdf + 1, 0xffff - bdf);
s->pci_iommu_bdf = bdf;
}
static void finalize_fdt(RISCVVirtState *s)
@ -1125,27 +1136,27 @@ static void finalize_fdt(RISCVVirtState *s)
uint32_t irq_pcie_phandle = 1, irq_virtio_phandle = 1;
uint32_t iommu_sys_phandle = 1;
create_fdt_sockets(s, virt_memmap, &phandle, &irq_mmio_phandle,
create_fdt_sockets(s, &phandle, &irq_mmio_phandle,
&irq_pcie_phandle, &irq_virtio_phandle,
&msi_pcie_phandle);
create_fdt_virtio(s, virt_memmap, irq_virtio_phandle);
create_fdt_virtio(s, irq_virtio_phandle);
if (virt_is_iommu_sys_enabled(s)) {
create_fdt_iommu_sys(s, irq_mmio_phandle, msi_pcie_phandle,
&iommu_sys_phandle);
}
create_fdt_pcie(s, virt_memmap, irq_pcie_phandle, msi_pcie_phandle,
create_fdt_pcie(s, irq_pcie_phandle, msi_pcie_phandle,
iommu_sys_phandle);
create_fdt_reset(s, virt_memmap, &phandle);
create_fdt_reset(s, &phandle);
create_fdt_uart(s, virt_memmap, irq_mmio_phandle);
create_fdt_uart(s, irq_mmio_phandle);
create_fdt_rtc(s, virt_memmap, irq_mmio_phandle);
create_fdt_rtc(s, irq_mmio_phandle);
}
static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap)
static void create_fdt(RISCVVirtState *s)
{
MachineState *ms = MACHINE(s);
uint8_t rng_seed[32];
@ -1172,7 +1183,8 @@ static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap)
* The "/soc/pci@..." node is needed for PCIE hotplugs
* that might happen before finalize_fdt().
*/
name = g_strdup_printf("/soc/pci@%lx", (long) memmap[VIRT_PCIE_ECAM].base);
name = g_strdup_printf("/soc/pci@%"HWADDR_PRIx,
s->memmap[VIRT_PCIE_ECAM].base);
qemu_fdt_add_subnode(ms->fdt, name);
qemu_fdt_add_subnode(ms->fdt, "/chosen");
@ -1184,8 +1196,8 @@ static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap)
qemu_fdt_add_subnode(ms->fdt, "/aliases");
create_fdt_flash(s, memmap);
create_fdt_fw_cfg(s, memmap);
create_fdt_flash(s);
create_fdt_fw_cfg(s);
create_fdt_pmu(s);
}
@ -1261,9 +1273,8 @@ static inline DeviceState *gpex_pcie_init(MemoryRegion *sys_mem,
return dev;
}
static FWCfgState *create_fw_cfg(const MachineState *ms)
static FWCfgState *create_fw_cfg(const MachineState *ms, hwaddr base)
{
hwaddr base = virt_memmap[VIRT_FW_CFG].base;
FWCfgState *fw_cfg;
fw_cfg = fw_cfg_init_mem_wide(base + 8, base, 8, base + 16,
@ -1360,14 +1371,13 @@ static void create_platform_bus(RISCVVirtState *s, DeviceState *irqchip)
{
DeviceState *dev;
SysBusDevice *sysbus;
const MemMapEntry *memmap = virt_memmap;
int i;
MemoryRegion *sysmem = get_system_memory();
dev = qdev_new(TYPE_PLATFORM_BUS_DEVICE);
dev->id = g_strdup(TYPE_PLATFORM_BUS_DEVICE);
qdev_prop_set_uint32(dev, "num_irqs", VIRT_PLATFORM_BUS_NUM_IRQS);
qdev_prop_set_uint32(dev, "mmio_size", memmap[VIRT_PLATFORM_BUS].size);
qdev_prop_set_uint32(dev, "mmio_size", s->memmap[VIRT_PLATFORM_BUS].size);
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
s->platform_bus_dev = dev;
@ -1378,7 +1388,7 @@ static void create_platform_bus(RISCVVirtState *s, DeviceState *irqchip)
}
memory_region_add_subregion(sysmem,
memmap[VIRT_PLATFORM_BUS].base,
s->memmap[VIRT_PLATFORM_BUS].base,
sysbus_mmio_get_region(sysbus, 0));
}
@ -1425,9 +1435,8 @@ static void virt_machine_done(Notifier *notifier, void *data)
{
RISCVVirtState *s = container_of(notifier, RISCVVirtState,
machine_done);
const MemMapEntry *memmap = virt_memmap;
MachineState *machine = MACHINE(s);
hwaddr start_addr = memmap[VIRT_DRAM].base;
hwaddr start_addr = s->memmap[VIRT_DRAM].base;
target_ulong firmware_end_addr, kernel_start_addr;
const char *firmware_name = riscv_default_firmware_name(&s->soc[0]);
uint64_t fdt_load_addr;
@ -1471,14 +1480,14 @@ static void virt_machine_done(Notifier *notifier, void *data)
* let's overwrite the address we jump to after reset to
* the base of the flash.
*/
start_addr = virt_memmap[VIRT_FLASH].base;
start_addr = s->memmap[VIRT_FLASH].base;
} else {
/*
* Pflash was supplied but either KVM guest or bios is not none.
* In this case, base of the flash would contain S-mode payload.
*/
riscv_setup_firmware_boot(machine);
kernel_entry = virt_memmap[VIRT_FLASH].base;
kernel_entry = s->memmap[VIRT_FLASH].base;
}
}
@ -1492,15 +1501,15 @@ static void virt_machine_done(Notifier *notifier, void *data)
kernel_entry = boot_info.image_low_addr;
}
fdt_load_addr = riscv_compute_fdt_addr(memmap[VIRT_DRAM].base,
memmap[VIRT_DRAM].size,
fdt_load_addr = riscv_compute_fdt_addr(s->memmap[VIRT_DRAM].base,
s->memmap[VIRT_DRAM].size,
machine, &boot_info);
riscv_load_fdt(fdt_load_addr, machine->fdt);
/* load the reset vector */
riscv_setup_rom_reset_vec(machine, &s->soc[0], start_addr,
virt_memmap[VIRT_MROM].base,
virt_memmap[VIRT_MROM].size, kernel_entry,
s->memmap[VIRT_MROM].base,
s->memmap[VIRT_MROM].size, kernel_entry,
fdt_load_addr);
/*
@ -1521,7 +1530,6 @@ static void virt_machine_done(Notifier *notifier, void *data)
static void virt_machine_init(MachineState *machine)
{
const MemMapEntry *memmap = virt_memmap;
RISCVVirtState *s = RISCV_VIRT_MACHINE(machine);
MemoryRegion *system_memory = get_system_memory();
MemoryRegion *mask_rom = g_new(MemoryRegion, 1);
@ -1529,6 +1537,8 @@ static void virt_machine_init(MachineState *machine)
int i, base_hartid, hart_count;
int socket_count = riscv_socket_count(machine);
s->memmap = virt_memmap;
/* Check socket count limit */
if (VIRT_SOCKETS_MAX < socket_count) {
error_report("number of sockets/nodes should be less than %d",
@ -1576,7 +1586,7 @@ static void virt_machine_init(MachineState *machine)
if (virt_aclint_allowed() && s->have_aclint) {
if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
/* Per-socket ACLINT MTIMER */
riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base +
riscv_aclint_mtimer_create(s->memmap[VIRT_CLINT].base +
i * RISCV_ACLINT_DEFAULT_MTIMER_SIZE,
RISCV_ACLINT_DEFAULT_MTIMER_SIZE,
base_hartid, hart_count,
@ -1585,28 +1595,28 @@ static void virt_machine_init(MachineState *machine)
RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
} else {
/* Per-socket ACLINT MSWI, MTIMER, and SSWI */
riscv_aclint_swi_create(memmap[VIRT_CLINT].base +
i * memmap[VIRT_CLINT].size,
riscv_aclint_swi_create(s->memmap[VIRT_CLINT].base +
i * s->memmap[VIRT_CLINT].size,
base_hartid, hart_count, false);
riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base +
i * memmap[VIRT_CLINT].size +
riscv_aclint_mtimer_create(s->memmap[VIRT_CLINT].base +
i * s->memmap[VIRT_CLINT].size +
RISCV_ACLINT_SWI_SIZE,
RISCV_ACLINT_DEFAULT_MTIMER_SIZE,
base_hartid, hart_count,
RISCV_ACLINT_DEFAULT_MTIMECMP,
RISCV_ACLINT_DEFAULT_MTIME,
RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
riscv_aclint_swi_create(memmap[VIRT_ACLINT_SSWI].base +
i * memmap[VIRT_ACLINT_SSWI].size,
riscv_aclint_swi_create(s->memmap[VIRT_ACLINT_SSWI].base +
i * s->memmap[VIRT_ACLINT_SSWI].size,
base_hartid, hart_count, true);
}
} else if (tcg_enabled()) {
/* Per-socket SiFive CLINT */
riscv_aclint_swi_create(
memmap[VIRT_CLINT].base + i * memmap[VIRT_CLINT].size,
s->memmap[VIRT_CLINT].base + i * s->memmap[VIRT_CLINT].size,
base_hartid, hart_count, false);
riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base +
i * memmap[VIRT_CLINT].size + RISCV_ACLINT_SWI_SIZE,
riscv_aclint_mtimer_create(s->memmap[VIRT_CLINT].base +
i * s->memmap[VIRT_CLINT].size + RISCV_ACLINT_SWI_SIZE,
RISCV_ACLINT_DEFAULT_MTIMER_SIZE, base_hartid, hart_count,
RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME,
RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
@ -1614,11 +1624,11 @@ static void virt_machine_init(MachineState *machine)
/* Per-socket interrupt controller */
if (s->aia_type == VIRT_AIA_TYPE_NONE) {
s->irqchip[i] = virt_create_plic(memmap, i,
s->irqchip[i] = virt_create_plic(s->memmap, i,
base_hartid, hart_count);
} else {
s->irqchip[i] = virt_create_aia(s->aia_type, s->aia_guests,
memmap, i, base_hartid,
s->memmap, i, base_hartid,
hart_count);
}
@ -1640,8 +1650,8 @@ static void virt_machine_init(MachineState *machine)
if (kvm_enabled() && virt_use_kvm_aia_aplic_imsic(s->aia_type)) {
kvm_riscv_aia_create(machine, IMSIC_MMIO_GROUP_MIN_SHIFT,
VIRT_IRQCHIP_NUM_SOURCES, VIRT_IRQCHIP_NUM_MSIS,
memmap[VIRT_APLIC_S].base,
memmap[VIRT_IMSIC_S].base,
s->memmap[VIRT_APLIC_S].base,
s->memmap[VIRT_IMSIC_S].base,
s->aia_guests);
}
@ -1657,37 +1667,36 @@ static void virt_machine_init(MachineState *machine)
virt_high_pcie_memmap.size = VIRT32_HIGH_PCIE_MMIO_SIZE;
} else {
virt_high_pcie_memmap.size = VIRT64_HIGH_PCIE_MMIO_SIZE;
virt_high_pcie_memmap.base = memmap[VIRT_DRAM].base + machine->ram_size;
virt_high_pcie_memmap.base = s->memmap[VIRT_DRAM].base +
machine->ram_size;
virt_high_pcie_memmap.base =
ROUND_UP(virt_high_pcie_memmap.base, virt_high_pcie_memmap.size);
}
s->memmap = virt_memmap;
/* register system main memory (actual RAM) */
memory_region_add_subregion(system_memory, memmap[VIRT_DRAM].base,
machine->ram);
memory_region_add_subregion(system_memory, s->memmap[VIRT_DRAM].base,
machine->ram);
/* boot rom */
memory_region_init_rom(mask_rom, NULL, "riscv_virt_board.mrom",
memmap[VIRT_MROM].size, &error_fatal);
memory_region_add_subregion(system_memory, memmap[VIRT_MROM].base,
s->memmap[VIRT_MROM].size, &error_fatal);
memory_region_add_subregion(system_memory, s->memmap[VIRT_MROM].base,
mask_rom);
/*
* Init fw_cfg. Must be done before riscv_load_fdt, otherwise the
* device tree cannot be altered and we get FDT_ERR_NOSPACE.
*/
s->fw_cfg = create_fw_cfg(machine);
s->fw_cfg = create_fw_cfg(machine, s->memmap[VIRT_FW_CFG].base);
rom_set_fw(s->fw_cfg);
/* SiFive Test MMIO device */
sifive_test_create(memmap[VIRT_TEST].base);
sifive_test_create(s->memmap[VIRT_TEST].base);
/* VirtIO MMIO devices */
for (i = 0; i < VIRTIO_COUNT; i++) {
sysbus_create_simple("virtio-mmio",
memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size,
s->memmap[VIRT_VIRTIO].base + i * s->memmap[VIRT_VIRTIO].size,
qdev_get_gpio_in(virtio_irqchip, VIRTIO_IRQ + i));
}
@ -1695,11 +1704,11 @@ static void virt_machine_init(MachineState *machine)
create_platform_bus(s, mmio_irqchip);
serial_mm_init(system_memory, memmap[VIRT_UART0].base,
serial_mm_init(system_memory, s->memmap[VIRT_UART0].base,
0, qdev_get_gpio_in(mmio_irqchip, UART0_IRQ), 399193,
serial_hd(0), DEVICE_LITTLE_ENDIAN);
sysbus_create_simple("goldfish_rtc", memmap[VIRT_RTC].base,
sysbus_create_simple("goldfish_rtc", s->memmap[VIRT_RTC].base,
qdev_get_gpio_in(mmio_irqchip, RTC_IRQ));
for (i = 0; i < ARRAY_SIZE(s->flash); i++) {
@ -1717,7 +1726,7 @@ static void virt_machine_init(MachineState *machine)
exit(1);
}
} else {
create_fdt(s, memmap);
create_fdt(s);
}
if (virt_is_iommu_sys_enabled(s)) {

View file

@ -67,6 +67,7 @@ typedef struct MicrochipIcicleKitState {
MachineState parent_obj;
/*< public >*/
uint32_t clint_timebase_freq;
MicrochipPFSoCState soc;
} MicrochipIcicleKitState;

View file

@ -63,6 +63,7 @@ struct RISCVVirtState {
const MemMapEntry *memmap;
struct GPEXHost *gpex_host;
OnOffAuto iommu_sys;
uint16_t pci_iommu_bdf;
};
enum {

View file

@ -75,6 +75,7 @@ const char *riscv_get_misa_ext_name(uint32_t bit);
const char *riscv_get_misa_ext_description(uint32_t bit);
#define CPU_CFG_OFFSET(_prop) offsetof(struct RISCVCPUConfig, _prop)
#define ENV_CSR_OFFSET(_csr) offsetof(CPURISCVState, _csr)
typedef struct riscv_cpu_profile {
struct riscv_cpu_profile *u_parent;
@ -813,8 +814,8 @@ RISCVException riscv_csrr(CPURISCVState *env, int csrno,
target_ulong *ret_value);
RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
target_ulong *ret_value,
target_ulong new_value, target_ulong write_mask);
target_ulong *ret_value, target_ulong new_value,
target_ulong write_mask, uintptr_t ra);
RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
target_ulong *ret_value,
target_ulong new_value,
@ -823,13 +824,13 @@ RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
static inline void riscv_csr_write(CPURISCVState *env, int csrno,
target_ulong val)
{
riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS));
riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS), 0);
}
static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno)
{
target_ulong val = 0;
riscv_csrrw(env, csrno, &val, 0, 0);
riscv_csrrw(env, csrno, &val, 0, 0, 0);
return val;
}
@ -838,7 +839,8 @@ typedef RISCVException (*riscv_csr_predicate_fn)(CPURISCVState *env,
typedef RISCVException (*riscv_csr_read_fn)(CPURISCVState *env, int csrno,
target_ulong *ret_value);
typedef RISCVException (*riscv_csr_write_fn)(CPURISCVState *env, int csrno,
target_ulong new_value);
target_ulong new_value,
uintptr_t ra);
typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno,
target_ulong *ret_value,
target_ulong new_value,
@ -847,8 +849,8 @@ typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno,
RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
Int128 *ret_value);
RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
Int128 *ret_value,
Int128 new_value, Int128 write_mask);
Int128 *ret_value, Int128 new_value,
Int128 write_mask, uintptr_t ra);
typedef RISCVException (*riscv_csr_read128_fn)(CPURISCVState *env, int csrno,
Int128 *ret_value);

View file

@ -1566,9 +1566,11 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
target_ulong *pte_pa = qemu_map_ram_ptr(mr->ram_block, addr1);
target_ulong old_pte;
if (riscv_cpu_sxl(env) == MXL_RV32) {
old_pte = qatomic_cmpxchg((uint32_t *)pte_pa, pte, updated_pte);
old_pte = qatomic_cmpxchg((uint32_t *)pte_pa, cpu_to_le32(pte), cpu_to_le32(updated_pte));
old_pte = le32_to_cpu(old_pte);
} else {
old_pte = qatomic_cmpxchg(pte_pa, pte, updated_pte);
old_pte = qatomic_cmpxchg(pte_pa, cpu_to_le64(pte), cpu_to_le64(updated_pte));
old_pte = le64_to_cpu(old_pte);
}
if (old_pte != pte) {
goto restart;

View file

@ -30,6 +30,8 @@
#include "accel/tcg/getpc.h"
#include "qemu/guest-random.h"
#include "qapi/error.h"
#include "tcg/insn-start-words.h"
#include "internals.h"
#include <stdbool.h>
/* CSR function table public API */
@ -830,13 +832,15 @@ static RISCVException seed(CPURISCVState *env, int csrno)
}
/* zicfiss CSR_SSP read and write */
static int read_ssp(CPURISCVState *env, int csrno, target_ulong *val)
static RISCVException read_ssp(CPURISCVState *env, int csrno,
target_ulong *val)
{
*val = env->ssp;
return RISCV_EXCP_NONE;
}
static int write_ssp(CPURISCVState *env, int csrno, target_ulong val)
static RISCVException write_ssp(CPURISCVState *env, int csrno,
target_ulong val, uintptr_t ra)
{
env->ssp = val;
return RISCV_EXCP_NONE;
@ -851,7 +855,7 @@ static RISCVException read_fflags(CPURISCVState *env, int csrno,
}
static RISCVException write_fflags(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
#if !defined(CONFIG_USER_ONLY)
if (riscv_has_ext(env, RVF)) {
@ -870,7 +874,7 @@ static RISCVException read_frm(CPURISCVState *env, int csrno,
}
static RISCVException write_frm(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
#if !defined(CONFIG_USER_ONLY)
if (riscv_has_ext(env, RVF)) {
@ -890,7 +894,7 @@ static RISCVException read_fcsr(CPURISCVState *env, int csrno,
}
static RISCVException write_fcsr(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
#if !defined(CONFIG_USER_ONLY)
if (riscv_has_ext(env, RVF)) {
@ -942,7 +946,7 @@ static RISCVException read_vxrm(CPURISCVState *env, int csrno,
}
static RISCVException write_vxrm(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
#if !defined(CONFIG_USER_ONLY)
env->mstatus |= MSTATUS_VS;
@ -959,7 +963,7 @@ static RISCVException read_vxsat(CPURISCVState *env, int csrno,
}
static RISCVException write_vxsat(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
#if !defined(CONFIG_USER_ONLY)
env->mstatus |= MSTATUS_VS;
@ -976,7 +980,7 @@ static RISCVException read_vstart(CPURISCVState *env, int csrno,
}
static RISCVException write_vstart(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
#if !defined(CONFIG_USER_ONLY)
env->mstatus |= MSTATUS_VS;
@ -997,7 +1001,7 @@ static RISCVException read_vcsr(CPURISCVState *env, int csrno,
}
static RISCVException write_vcsr(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
#if !defined(CONFIG_USER_ONLY)
env->mstatus |= MSTATUS_VS;
@ -1055,7 +1059,7 @@ static RISCVException read_mcyclecfg(CPURISCVState *env, int csrno,
}
static RISCVException write_mcyclecfg(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
uint64_t inh_avail_mask;
@ -1084,7 +1088,7 @@ static RISCVException read_mcyclecfgh(CPURISCVState *env, int csrno,
}
static RISCVException write_mcyclecfgh(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
MCYCLECFGH_BIT_MINH);
@ -1109,7 +1113,7 @@ static RISCVException read_minstretcfg(CPURISCVState *env, int csrno,
}
static RISCVException write_minstretcfg(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
uint64_t inh_avail_mask;
@ -1136,7 +1140,7 @@ static RISCVException read_minstretcfgh(CPURISCVState *env, int csrno,
}
static RISCVException write_minstretcfgh(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
MINSTRETCFGH_BIT_MINH);
@ -1163,7 +1167,7 @@ static RISCVException read_mhpmevent(CPURISCVState *env, int csrno,
}
static RISCVException write_mhpmevent(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
int evt_index = csrno - CSR_MCOUNTINHIBIT;
uint64_t mhpmevt_val = val;
@ -1201,7 +1205,7 @@ static RISCVException read_mhpmeventh(CPURISCVState *env, int csrno,
}
static RISCVException write_mhpmeventh(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
int evt_index = csrno - CSR_MHPMEVENT3H + 3;
uint64_t mhpmevth_val;
@ -1343,14 +1347,16 @@ static RISCVException riscv_pmu_write_ctrh(CPURISCVState *env, target_ulong val,
return RISCV_EXCP_NONE;
}
static int write_mhpmcounter(CPURISCVState *env, int csrno, target_ulong val)
static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno,
target_ulong val, uintptr_t ra)
{
int ctr_idx = csrno - CSR_MCYCLE;
return riscv_pmu_write_ctr(env, val, ctr_idx);
}
static int write_mhpmcounterh(CPURISCVState *env, int csrno, target_ulong val)
static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
target_ulong val, uintptr_t ra)
{
int ctr_idx = csrno - CSR_MCYCLEH;
@ -1661,7 +1667,7 @@ static RISCVException read_vstimecmph(CPURISCVState *env, int csrno,
}
static RISCVException write_vstimecmp(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
if (riscv_cpu_mxl(env) == MXL_RV32) {
env->vstimecmp = deposit64(env->vstimecmp, 0, 32, (uint64_t)val);
@ -1676,7 +1682,7 @@ static RISCVException write_vstimecmp(CPURISCVState *env, int csrno,
}
static RISCVException write_vstimecmph(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
env->vstimecmp = deposit64(env->vstimecmp, 32, 32, (uint64_t)val);
riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
@ -1710,13 +1716,13 @@ static RISCVException read_stimecmph(CPURISCVState *env, int csrno,
}
static RISCVException write_stimecmp(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
if (env->virt_enabled) {
if (env->hvictl & HVICTL_VTI) {
return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
}
return write_vstimecmp(env, csrno, val);
return write_vstimecmp(env, csrno, val, ra);
}
if (riscv_cpu_mxl(env) == MXL_RV32) {
@ -1731,13 +1737,13 @@ static RISCVException write_stimecmp(CPURISCVState *env, int csrno,
}
static RISCVException write_stimecmph(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
if (env->virt_enabled) {
if (env->hvictl & HVICTL_VTI) {
return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
}
return write_vstimecmph(env, csrno, val);
return write_vstimecmph(env, csrno, val, ra);
}
env->stimecmp = deposit64(env->stimecmp, 32, 32, (uint64_t)val);
@ -1842,7 +1848,7 @@ static RISCVException read_zero(CPURISCVState *env, int csrno,
}
static RISCVException write_ignore(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
return RISCV_EXCP_NONE;
}
@ -1963,7 +1969,7 @@ static target_ulong legalize_mpp(CPURISCVState *env, target_ulong old_mpp,
}
static RISCVException write_mstatus(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
uint64_t mstatus = env->mstatus;
uint64_t mask = 0;
@ -2042,7 +2048,7 @@ static RISCVException read_mstatush(CPURISCVState *env, int csrno,
}
static RISCVException write_mstatush(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
uint64_t valh = (uint64_t)val << 32;
uint64_t mask = riscv_has_ext(env, RVH) ? MSTATUS_MPV | MSTATUS_GVA : 0;
@ -2095,8 +2101,21 @@ static RISCVException read_misa(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
static target_ulong get_next_pc(CPURISCVState *env, uintptr_t ra)
{
uint64_t data[INSN_START_WORDS];
/* Outside of a running cpu, env contains the next pc. */
if (ra == 0 || !cpu_unwind_state_data(env_cpu(env), ra, data)) {
return env->pc;
}
/* Within unwind data, [0] is pc and [1] is the opcode. */
return data[0] + insn_len(data[1]);
}
static RISCVException write_misa(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
RISCVCPU *cpu = env_archcpu(env);
uint32_t orig_misa_ext = env->misa_ext;
@ -2110,11 +2129,8 @@ static RISCVException write_misa(CPURISCVState *env, int csrno,
/* Mask extensions that are not supported by this hart */
val &= env->misa_ext_mask;
/*
* Suppress 'C' if next instruction is not aligned
* TODO: this should check next_pc
*/
if ((val & RVC) && (GETPC() & ~3) != 0) {
/* Suppress 'C' if next instruction is not aligned. */
if ((val & RVC) && (get_next_pc(env, ra) & 3) != 0) {
val &= ~RVC;
}
@ -2160,7 +2176,7 @@ static RISCVException read_medeleg(CPURISCVState *env, int csrno,
}
static RISCVException write_medeleg(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
env->medeleg = (env->medeleg & ~DELEGABLE_EXCPS) | (val & DELEGABLE_EXCPS);
return RISCV_EXCP_NONE;
@ -2955,7 +2971,7 @@ static RISCVException read_mtvec(CPURISCVState *env, int csrno,
}
static RISCVException write_mtvec(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
/* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
if ((val & 3) < 2) {
@ -2974,7 +2990,7 @@ static RISCVException read_mcountinhibit(CPURISCVState *env, int csrno,
}
static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
int cidx;
PMUCTRState *counter;
@ -3049,10 +3065,9 @@ static RISCVException read_scountinhibit(CPURISCVState *env, int csrno,
}
static RISCVException write_scountinhibit(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
write_mcountinhibit(env, csrno, val & env->mcounteren);
return RISCV_EXCP_NONE;
return write_mcountinhibit(env, csrno, val & env->mcounteren, ra);
}
static RISCVException read_mcounteren(CPURISCVState *env, int csrno,
@ -3063,7 +3078,7 @@ static RISCVException read_mcounteren(CPURISCVState *env, int csrno,
}
static RISCVException write_mcounteren(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
RISCVCPU *cpu = env_archcpu(env);
@ -3097,7 +3112,7 @@ static RISCVException read_mscratch(CPURISCVState *env, int csrno,
}
static RISCVException write_mscratch(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
env->mscratch = val;
return RISCV_EXCP_NONE;
@ -3111,7 +3126,7 @@ static RISCVException read_mepc(CPURISCVState *env, int csrno,
}
static RISCVException write_mepc(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
env->mepc = val;
return RISCV_EXCP_NONE;
@ -3125,7 +3140,7 @@ static RISCVException read_mcause(CPURISCVState *env, int csrno,
}
static RISCVException write_mcause(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
env->mcause = val;
return RISCV_EXCP_NONE;
@ -3139,7 +3154,7 @@ static RISCVException read_mtval(CPURISCVState *env, int csrno,
}
static RISCVException write_mtval(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
env->mtval = val;
return RISCV_EXCP_NONE;
@ -3154,9 +3169,9 @@ static RISCVException read_menvcfg(CPURISCVState *env, int csrno,
}
static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
target_ulong val);
target_ulong val, uintptr_t ra);
static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE |
@ -3188,9 +3203,7 @@ static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
}
}
env->menvcfg = (env->menvcfg & ~mask) | (val & mask);
write_henvcfg(env, CSR_HENVCFG, env->henvcfg);
return RISCV_EXCP_NONE;
return write_henvcfg(env, CSR_HENVCFG, env->henvcfg, ra);
}
static RISCVException read_menvcfgh(CPURISCVState *env, int csrno,
@ -3201,9 +3214,9 @@ static RISCVException read_menvcfgh(CPURISCVState *env, int csrno,
}
static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
target_ulong val);
target_ulong val, uintptr_t ra);
static RISCVException write_menvcfgh(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
uint64_t mask = (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
@ -3218,9 +3231,7 @@ static RISCVException write_menvcfgh(CPURISCVState *env, int csrno,
}
env->menvcfg = (env->menvcfg & ~mask) | (valh & mask);
write_henvcfgh(env, CSR_HENVCFGH, env->henvcfg >> 32);
return RISCV_EXCP_NONE;
return write_henvcfgh(env, CSR_HENVCFGH, env->henvcfg >> 32, ra);
}
static RISCVException read_senvcfg(CPURISCVState *env, int csrno,
@ -3238,7 +3249,7 @@ static RISCVException read_senvcfg(CPURISCVState *env, int csrno,
}
static RISCVException write_senvcfg(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
uint64_t mask = SENVCFG_FIOM | SENVCFG_CBIE | SENVCFG_CBCFE | SENVCFG_CBZE;
RISCVException ret;
@ -3295,7 +3306,7 @@ static RISCVException read_henvcfg(CPURISCVState *env, int csrno,
}
static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
uint64_t mask = HENVCFG_FIOM | HENVCFG_CBIE | HENVCFG_CBCFE | HENVCFG_CBZE;
RISCVException ret;
@ -3350,7 +3361,7 @@ static RISCVException read_henvcfgh(CPURISCVState *env, int csrno,
}
static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
uint64_t mask = env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE |
HENVCFG_ADUE | HENVCFG_DTE);
@ -3388,7 +3399,7 @@ static RISCVException write_mstateen(CPURISCVState *env, int csrno,
}
static RISCVException write_mstateen0(CPURISCVState *env, int csrno,
target_ulong new_val)
target_ulong new_val, uintptr_t ra)
{
uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
if (!riscv_has_ext(env, RVF)) {
@ -3420,7 +3431,7 @@ static RISCVException write_mstateen0(CPURISCVState *env, int csrno,
}
static RISCVException write_mstateen_1_3(CPURISCVState *env, int csrno,
target_ulong new_val)
target_ulong new_val, uintptr_t ra)
{
return write_mstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
}
@ -3447,7 +3458,7 @@ static RISCVException write_mstateenh(CPURISCVState *env, int csrno,
}
static RISCVException write_mstateen0h(CPURISCVState *env, int csrno,
target_ulong new_val)
target_ulong new_val, uintptr_t ra)
{
uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
@ -3463,7 +3474,7 @@ static RISCVException write_mstateen0h(CPURISCVState *env, int csrno,
}
static RISCVException write_mstateenh_1_3(CPURISCVState *env, int csrno,
target_ulong new_val)
target_ulong new_val, uintptr_t ra)
{
return write_mstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
}
@ -3492,7 +3503,7 @@ static RISCVException write_hstateen(CPURISCVState *env, int csrno,
}
static RISCVException write_hstateen0(CPURISCVState *env, int csrno,
target_ulong new_val)
target_ulong new_val, uintptr_t ra)
{
uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
@ -3521,7 +3532,7 @@ static RISCVException write_hstateen0(CPURISCVState *env, int csrno,
}
static RISCVException write_hstateen_1_3(CPURISCVState *env, int csrno,
target_ulong new_val)
target_ulong new_val, uintptr_t ra)
{
return write_hstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
}
@ -3552,7 +3563,7 @@ static RISCVException write_hstateenh(CPURISCVState *env, int csrno,
}
static RISCVException write_hstateen0h(CPURISCVState *env, int csrno,
target_ulong new_val)
target_ulong new_val, uintptr_t ra)
{
uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
@ -3564,7 +3575,7 @@ static RISCVException write_hstateen0h(CPURISCVState *env, int csrno,
}
static RISCVException write_hstateenh_1_3(CPURISCVState *env, int csrno,
target_ulong new_val)
target_ulong new_val, uintptr_t ra)
{
return write_hstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
}
@ -3603,7 +3614,7 @@ static RISCVException write_sstateen(CPURISCVState *env, int csrno,
}
static RISCVException write_sstateen0(CPURISCVState *env, int csrno,
target_ulong new_val)
target_ulong new_val, uintptr_t ra)
{
uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
@ -3615,7 +3626,7 @@ static RISCVException write_sstateen0(CPURISCVState *env, int csrno,
}
static RISCVException write_sstateen_1_3(CPURISCVState *env, int csrno,
target_ulong new_val)
target_ulong new_val, uintptr_t ra)
{
return write_sstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
}
@ -3866,7 +3877,7 @@ static RISCVException read_sstatus(CPURISCVState *env, int csrno,
}
static RISCVException write_sstatus(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
target_ulong mask = (sstatus_v1_10_mask);
@ -3883,7 +3894,7 @@ static RISCVException write_sstatus(CPURISCVState *env, int csrno,
mask |= SSTATUS_SDT;
}
target_ulong newval = (env->mstatus & ~mask) | (val & mask);
return write_mstatus(env, CSR_MSTATUS, newval);
return write_mstatus(env, CSR_MSTATUS, newval, ra);
}
static RISCVException rmw_vsie64(CPURISCVState *env, int csrno,
@ -4035,7 +4046,7 @@ static RISCVException read_stvec(CPURISCVState *env, int csrno,
}
static RISCVException write_stvec(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
/* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
if ((val & 3) < 2) {
@ -4054,7 +4065,7 @@ static RISCVException read_scounteren(CPURISCVState *env, int csrno,
}
static RISCVException write_scounteren(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
RISCVCPU *cpu = env_archcpu(env);
@ -4088,7 +4099,7 @@ static RISCVException read_sscratch(CPURISCVState *env, int csrno,
}
static RISCVException write_sscratch(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
env->sscratch = val;
return RISCV_EXCP_NONE;
@ -4102,7 +4113,7 @@ static RISCVException read_sepc(CPURISCVState *env, int csrno,
}
static RISCVException write_sepc(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
env->sepc = val;
return RISCV_EXCP_NONE;
@ -4116,7 +4127,7 @@ static RISCVException read_scause(CPURISCVState *env, int csrno,
}
static RISCVException write_scause(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
env->scause = val;
return RISCV_EXCP_NONE;
@ -4130,7 +4141,7 @@ static RISCVException read_stval(CPURISCVState *env, int csrno,
}
static RISCVException write_stval(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
env->stval = val;
return RISCV_EXCP_NONE;
@ -4270,7 +4281,7 @@ static RISCVException read_satp(CPURISCVState *env, int csrno,
}
static RISCVException write_satp(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
if (!riscv_cpu_cfg(env)->mmu) {
return RISCV_EXCP_NONE;
@ -4492,7 +4503,7 @@ static RISCVException read_hstatus(CPURISCVState *env, int csrno,
}
static RISCVException write_hstatus(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
uint64_t mask = (target_ulong)-1;
if (!env_archcpu(env)->cfg.ext_svukte) {
@ -4524,7 +4535,7 @@ static RISCVException read_hedeleg(CPURISCVState *env, int csrno,
}
static RISCVException write_hedeleg(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
env->hedeleg = val & vs_delegable_excps;
return RISCV_EXCP_NONE;
@ -4545,7 +4556,7 @@ static RISCVException read_hedelegh(CPURISCVState *env, int csrno,
}
static RISCVException write_hedelegh(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
RISCVException ret;
ret = smstateen_acc_ok(env, 0, SMSTATEEN0_P1P13);
@ -4808,7 +4819,7 @@ static RISCVException read_hcounteren(CPURISCVState *env, int csrno,
}
static RISCVException write_hcounteren(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
RISCVCPU *cpu = env_archcpu(env);
@ -4828,7 +4839,7 @@ static RISCVException read_hgeie(CPURISCVState *env, int csrno,
}
static RISCVException write_hgeie(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
/* Only GEILEN:1 bits implemented and BIT0 is never implemented */
val &= ((((target_ulong)1) << env->geilen) - 1) << 1;
@ -4847,7 +4858,7 @@ static RISCVException read_htval(CPURISCVState *env, int csrno,
}
static RISCVException write_htval(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
env->htval = val;
return RISCV_EXCP_NONE;
@ -4861,7 +4872,7 @@ static RISCVException read_htinst(CPURISCVState *env, int csrno,
}
static RISCVException write_htinst(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
return RISCV_EXCP_NONE;
}
@ -4883,7 +4894,7 @@ static RISCVException read_hgatp(CPURISCVState *env, int csrno,
}
static RISCVException write_hgatp(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
env->hgatp = legalize_xatp(env, env->hgatp, val);
return RISCV_EXCP_NONE;
@ -4901,7 +4912,7 @@ static RISCVException read_htimedelta(CPURISCVState *env, int csrno,
}
static RISCVException write_htimedelta(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
if (!env->rdtime_fn) {
return RISCV_EXCP_ILLEGAL_INST;
@ -4933,7 +4944,7 @@ static RISCVException read_htimedeltah(CPURISCVState *env, int csrno,
}
static RISCVException write_htimedeltah(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
if (!env->rdtime_fn) {
return RISCV_EXCP_ILLEGAL_INST;
@ -4957,7 +4968,7 @@ static RISCVException read_hvictl(CPURISCVState *env, int csrno,
}
static RISCVException write_hvictl(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
env->hvictl = val & HVICTL_VALID_MASK;
return RISCV_EXCP_NONE;
@ -5022,7 +5033,7 @@ static RISCVException read_hviprio1(CPURISCVState *env, int csrno,
}
static RISCVException write_hviprio1(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
return write_hvipriox(env, 0, env->hviprio, val);
}
@ -5034,7 +5045,7 @@ static RISCVException read_hviprio1h(CPURISCVState *env, int csrno,
}
static RISCVException write_hviprio1h(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
return write_hvipriox(env, 4, env->hviprio, val);
}
@ -5046,7 +5057,7 @@ static RISCVException read_hviprio2(CPURISCVState *env, int csrno,
}
static RISCVException write_hviprio2(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
return write_hvipriox(env, 8, env->hviprio, val);
}
@ -5058,7 +5069,7 @@ static RISCVException read_hviprio2h(CPURISCVState *env, int csrno,
}
static RISCVException write_hviprio2h(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
return write_hvipriox(env, 12, env->hviprio, val);
}
@ -5072,7 +5083,7 @@ static RISCVException read_vsstatus(CPURISCVState *env, int csrno,
}
static RISCVException write_vsstatus(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
uint64_t mask = (target_ulong)-1;
if ((val & VSSTATUS64_UXL) == 0) {
@ -5097,7 +5108,7 @@ static RISCVException read_vstvec(CPURISCVState *env, int csrno,
}
static RISCVException write_vstvec(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
/* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
if ((val & 3) < 2) {
@ -5116,7 +5127,7 @@ static RISCVException read_vsscratch(CPURISCVState *env, int csrno,
}
static RISCVException write_vsscratch(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
env->vsscratch = val;
return RISCV_EXCP_NONE;
@ -5130,7 +5141,7 @@ static RISCVException read_vsepc(CPURISCVState *env, int csrno,
}
static RISCVException write_vsepc(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
env->vsepc = val;
return RISCV_EXCP_NONE;
@ -5144,7 +5155,7 @@ static RISCVException read_vscause(CPURISCVState *env, int csrno,
}
static RISCVException write_vscause(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
env->vscause = val;
return RISCV_EXCP_NONE;
@ -5158,7 +5169,7 @@ static RISCVException read_vstval(CPURISCVState *env, int csrno,
}
static RISCVException write_vstval(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
env->vstval = val;
return RISCV_EXCP_NONE;
@ -5172,7 +5183,7 @@ static RISCVException read_vsatp(CPURISCVState *env, int csrno,
}
static RISCVException write_vsatp(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
env->vsatp = legalize_xatp(env, env->vsatp, val);
return RISCV_EXCP_NONE;
@ -5186,7 +5197,7 @@ static RISCVException read_mtval2(CPURISCVState *env, int csrno,
}
static RISCVException write_mtval2(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
env->mtval2 = val;
return RISCV_EXCP_NONE;
@ -5200,7 +5211,7 @@ static RISCVException read_mtinst(CPURISCVState *env, int csrno,
}
static RISCVException write_mtinst(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
env->mtinst = val;
return RISCV_EXCP_NONE;
@ -5215,7 +5226,7 @@ static RISCVException read_mseccfg(CPURISCVState *env, int csrno,
}
static RISCVException write_mseccfg(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
mseccfg_csr_write(env, val);
return RISCV_EXCP_NONE;
@ -5231,7 +5242,7 @@ static RISCVException read_pmpcfg(CPURISCVState *env, int csrno,
}
static RISCVException write_pmpcfg(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
uint32_t reg_index = csrno - CSR_PMPCFG0;
@ -5247,7 +5258,7 @@ static RISCVException read_pmpaddr(CPURISCVState *env, int csrno,
}
static RISCVException write_pmpaddr(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val);
return RISCV_EXCP_NONE;
@ -5261,7 +5272,7 @@ static RISCVException read_tselect(CPURISCVState *env, int csrno,
}
static RISCVException write_tselect(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
tselect_csr_write(env, val);
return RISCV_EXCP_NONE;
@ -5285,7 +5296,7 @@ static RISCVException read_tdata(CPURISCVState *env, int csrno,
}
static RISCVException write_tdata(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
if (!tdata_available(env, csrno - CSR_TDATA1)) {
return RISCV_EXCP_ILLEGAL_INST;
@ -5310,7 +5321,7 @@ static RISCVException read_mcontext(CPURISCVState *env, int csrno,
}
static RISCVException write_mcontext(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false;
int32_t mask;
@ -5334,43 +5345,50 @@ static RISCVException read_mnscratch(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
static int write_mnscratch(CPURISCVState *env, int csrno, target_ulong val)
static RISCVException write_mnscratch(CPURISCVState *env, int csrno,
target_ulong val, uintptr_t ra)
{
env->mnscratch = val;
return RISCV_EXCP_NONE;
}
static int read_mnepc(CPURISCVState *env, int csrno, target_ulong *val)
static RISCVException read_mnepc(CPURISCVState *env, int csrno,
target_ulong *val)
{
*val = env->mnepc;
return RISCV_EXCP_NONE;
}
static int write_mnepc(CPURISCVState *env, int csrno, target_ulong val)
static RISCVException write_mnepc(CPURISCVState *env, int csrno,
target_ulong val, uintptr_t ra)
{
env->mnepc = val;
return RISCV_EXCP_NONE;
}
static int read_mncause(CPURISCVState *env, int csrno, target_ulong *val)
static RISCVException read_mncause(CPURISCVState *env, int csrno,
target_ulong *val)
{
*val = env->mncause;
return RISCV_EXCP_NONE;
}
static int write_mncause(CPURISCVState *env, int csrno, target_ulong val)
static RISCVException write_mncause(CPURISCVState *env, int csrno,
target_ulong val, uintptr_t ra)
{
env->mncause = val;
return RISCV_EXCP_NONE;
}
static int read_mnstatus(CPURISCVState *env, int csrno, target_ulong *val)
static RISCVException read_mnstatus(CPURISCVState *env, int csrno,
target_ulong *val)
{
*val = env->mnstatus;
return RISCV_EXCP_NONE;
}
static int write_mnstatus(CPURISCVState *env, int csrno, target_ulong val)
static RISCVException write_mnstatus(CPURISCVState *env, int csrno,
target_ulong val, uintptr_t ra)
{
target_ulong mask = (MNSTATUS_NMIE | MNSTATUS_MNPP);
@ -5510,7 +5528,8 @@ static inline RISCVException riscv_csrrw_check(CPURISCVState *env,
static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno,
target_ulong *ret_value,
target_ulong new_value,
target_ulong write_mask)
target_ulong write_mask,
uintptr_t ra)
{
RISCVException ret;
target_ulong old_value = 0;
@ -5540,7 +5559,7 @@ static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno,
if (write_mask) {
new_value = (old_value & ~write_mask) | (new_value & write_mask);
if (csr_ops[csrno].write) {
ret = csr_ops[csrno].write(env, csrno, new_value);
ret = csr_ops[csrno].write(env, csrno, new_value, ra);
if (ret != RISCV_EXCP_NONE) {
return ret;
}
@ -5563,25 +5582,25 @@ RISCVException riscv_csrr(CPURISCVState *env, int csrno,
return ret;
}
return riscv_csrrw_do64(env, csrno, ret_value, 0, 0);
return riscv_csrrw_do64(env, csrno, ret_value, 0, 0, 0);
}
RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
target_ulong *ret_value,
target_ulong new_value, target_ulong write_mask)
target_ulong *ret_value, target_ulong new_value,
target_ulong write_mask, uintptr_t ra)
{
RISCVException ret = riscv_csrrw_check(env, csrno, true);
if (ret != RISCV_EXCP_NONE) {
return ret;
}
return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask);
return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask, ra);
}
static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno,
Int128 *ret_value,
Int128 new_value,
Int128 write_mask)
Int128 write_mask, uintptr_t ra)
{
RISCVException ret;
Int128 old_value;
@ -5603,7 +5622,7 @@ static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno,
}
} else if (csr_ops[csrno].write) {
/* avoids having to write wrappers for all registers */
ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value));
ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value), ra);
if (ret != RISCV_EXCP_NONE) {
return ret;
}
@ -5630,7 +5649,7 @@ RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
if (csr_ops[csrno].read128) {
return riscv_csrrw_do128(env, csrno, ret_value,
int128_zero(), int128_zero());
int128_zero(), int128_zero(), 0);
}
/*
@ -5641,9 +5660,7 @@ RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
* accesses
*/
target_ulong old_value;
ret = riscv_csrrw_do64(env, csrno, &old_value,
(target_ulong)0,
(target_ulong)0);
ret = riscv_csrrw_do64(env, csrno, &old_value, 0, 0, 0);
if (ret == RISCV_EXCP_NONE && ret_value) {
*ret_value = int128_make64(old_value);
}
@ -5651,8 +5668,8 @@ RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
}
RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
Int128 *ret_value,
Int128 new_value, Int128 write_mask)
Int128 *ret_value, Int128 new_value,
Int128 write_mask, uintptr_t ra)
{
RISCVException ret;
@ -5662,7 +5679,8 @@ RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
}
if (csr_ops[csrno].read128) {
return riscv_csrrw_do128(env, csrno, ret_value, new_value, write_mask);
return riscv_csrrw_do128(env, csrno, ret_value,
new_value, write_mask, ra);
}
/*
@ -5675,7 +5693,7 @@ RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
target_ulong old_value;
ret = riscv_csrrw_do64(env, csrno, &old_value,
int128_getlo(new_value),
int128_getlo(write_mask));
int128_getlo(write_mask), ra);
if (ret == RISCV_EXCP_NONE && ret_value) {
*ret_value = int128_make64(old_value);
}
@ -5698,7 +5716,7 @@ RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
if (!write_mask) {
ret = riscv_csrr(env, csrno, ret_value);
} else {
ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask);
ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask, 0);
}
#if !defined(CONFIG_USER_ONLY)
env->debugger = false;
@ -5714,7 +5732,7 @@ static RISCVException read_jvt(CPURISCVState *env, int csrno,
}
static RISCVException write_jvt(CPURISCVState *env, int csrno,
target_ulong val)
target_ulong val, uintptr_t ra)
{
env->jvt = val;
return RISCV_EXCP_NONE;

View file

@ -703,14 +703,14 @@ vfredmax_vs 000111 . ..... ..... 001 ..... 1010111 @r_vm
# Vector widening ordered and unordered float reduction sum
vfwredusum_vs 110001 . ..... ..... 001 ..... 1010111 @r_vm
vfwredosum_vs 110011 . ..... ..... 001 ..... 1010111 @r_vm
vmand_mm 011001 - ..... ..... 010 ..... 1010111 @r
vmnand_mm 011101 - ..... ..... 010 ..... 1010111 @r
vmandn_mm 011000 - ..... ..... 010 ..... 1010111 @r
vmxor_mm 011011 - ..... ..... 010 ..... 1010111 @r
vmor_mm 011010 - ..... ..... 010 ..... 1010111 @r
vmnor_mm 011110 - ..... ..... 010 ..... 1010111 @r
vmorn_mm 011100 - ..... ..... 010 ..... 1010111 @r
vmxnor_mm 011111 - ..... ..... 010 ..... 1010111 @r
vmand_mm 011001 1 ..... ..... 010 ..... 1010111 @r
vmnand_mm 011101 1 ..... ..... 010 ..... 1010111 @r
vmandn_mm 011000 1 ..... ..... 010 ..... 1010111 @r
vmxor_mm 011011 1 ..... ..... 010 ..... 1010111 @r
vmor_mm 011010 1 ..... ..... 010 ..... 1010111 @r
vmnor_mm 011110 1 ..... ..... 010 ..... 1010111 @r
vmorn_mm 011100 1 ..... ..... 010 ..... 1010111 @r
vmxnor_mm 011111 1 ..... ..... 010 ..... 1010111 @r
vcpop_m 010000 . ..... 10000 010 ..... 1010111 @r2_vm
vfirst_m 010000 . ..... 10001 010 ..... 1010111 @r2_vm
vmsbf_m 010100 . ..... 00001 010 ..... 1010111 @r2_vm
@ -732,7 +732,7 @@ vrgather_vv 001100 . ..... ..... 000 ..... 1010111 @r_vm
vrgatherei16_vv 001110 . ..... ..... 000 ..... 1010111 @r_vm
vrgather_vx 001100 . ..... ..... 100 ..... 1010111 @r_vm
vrgather_vi 001100 . ..... ..... 011 ..... 1010111 @r_vm
vcompress_vm 010111 - ..... ..... 010 ..... 1010111 @r
vcompress_vm 010111 1 ..... ..... 010 ..... 1010111 @r
vmv1r_v 100111 1 ..... 00000 011 ..... 1010111 @r2rd
vmv2r_v 100111 1 ..... 00001 011 ..... 1010111 @r2rd
vmv4r_v 100111 1 ..... 00011 011 ..... 1010111 @r2rd

View file

@ -119,8 +119,11 @@ static bool trans_vfwmaccbf16_vv(DisasContext *ctx, arg_vfwmaccbf16_vv *a)
REQUIRE_FPU;
REQUIRE_ZVFBFWMA(ctx);
uint8_t sew = ctx->sew;
if (require_rvv(ctx) && vext_check_isa_ill(ctx) && (ctx->sew == MO_16) &&
vext_check_dss(ctx, a->rd, a->rs1, a->rs2, a->vm)) {
vext_check_dss(ctx, a->rd, a->rs1, a->rs2, a->vm) &&
vext_check_input_eew(ctx, a->rd, sew + 1, a->rs1, sew, a->vm) &&
vext_check_input_eew(ctx, a->rd, sew + 1, a->rs2, sew, a->vm)) {
uint32_t data = 0;
gen_set_rm_chkfrm(ctx, RISCV_FRM_DYN);
@ -146,8 +149,10 @@ static bool trans_vfwmaccbf16_vf(DisasContext *ctx, arg_vfwmaccbf16_vf *a)
REQUIRE_FPU;
REQUIRE_ZVFBFWMA(ctx);
uint8_t sew = ctx->sew;
if (require_rvv(ctx) && (ctx->sew == MO_16) && vext_check_isa_ill(ctx) &&
vext_check_ds(ctx, a->rd, a->rs2, a->vm)) {
vext_check_ds(ctx, a->rd, a->rs2, a->vm) &&
vext_check_input_eew(ctx, a->rd, sew + 1, a->rs2, sew, a->vm)) {
uint32_t data = 0;
gen_set_rm(ctx, RISCV_FRM_DYN);

View file

@ -100,10 +100,33 @@ static bool require_scale_rvfmin(DisasContext *s)
}
}
/* Destination vector register group cannot overlap source mask register. */
static bool require_vm(int vm, int vd)
/*
* Source and destination vector register groups cannot overlap source mask
* register:
*
* A vector register cannot be used to provide source operands with more than
* one EEW for a single instruction. A mask register source is considered to
* have EEW=1 for this constraint. An encoding that would result in the same
* vector register being read with two or more different EEWs, including when
* the vector register appears at different positions within two or more vector
* register groups, is reserved.
* (Section 5.2)
*
* A destination vector register group can overlap a source vector
* register group only if one of the following holds:
* 1. The destination EEW equals the source EEW.
* 2. The destination EEW is smaller than the source EEW and the overlap
* is in the lowest-numbered part of the source register group.
* 3. The destination EEW is greater than the source EEW, the source EMUL
* is at least 1, and the overlap is in the highest-numbered part of
* the destination register group.
* For the purpose of determining register group overlap constraints, mask
* elements have EEW=1.
* (Section 5.2)
*/
static bool require_vm(int vm, int v)
{
return (vm != 0 || vd != 0);
return (vm != 0 || v != 0);
}
static bool require_nf(int vd, int nf, int lmul)
@ -356,11 +379,41 @@ static bool vext_check_ld_index(DisasContext *s, int vd, int vs2,
return ret;
}
/*
* Check whether a vector register is used to provide source operands with
* more than one EEW for the vector instruction.
* Returns true if the instruction has valid encoding
* Returns false if encoding violates the mismatched input EEWs constraint
*/
static bool vext_check_input_eew(DisasContext *s, int vs1, uint8_t eew_vs1,
int vs2, uint8_t eew_vs2, int vm)
{
bool is_valid = true;
int8_t emul_vs1 = eew_vs1 - s->sew + s->lmul;
int8_t emul_vs2 = eew_vs2 - s->sew + s->lmul;
/* When vm is 0, vs1 & vs2(EEW!=1) group can't overlap v0 (EEW=1) */
if ((vs1 != -1 && !require_vm(vm, vs1)) ||
(vs2 != -1 && !require_vm(vm, vs2))) {
is_valid = false;
}
/* When eew_vs1 != eew_vs2, check whether vs1 and vs2 are overlapped */
if ((vs1 != -1 && vs2 != -1) && (eew_vs1 != eew_vs2) &&
is_overlapped(vs1, 1 << MAX(emul_vs1, 0),
vs2, 1 << MAX(emul_vs2, 0))) {
is_valid = false;
}
return is_valid;
}
static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm)
{
return require_vm(vm, vd) &&
require_align(vd, s->lmul) &&
require_align(vs, s->lmul);
require_align(vs, s->lmul) &&
vext_check_input_eew(s, vs, s->sew, -1, s->sew, vm);
}
/*
@ -379,6 +432,7 @@ static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm)
static bool vext_check_sss(DisasContext *s, int vd, int vs1, int vs2, int vm)
{
return vext_check_ss(s, vd, vs2, vm) &&
vext_check_input_eew(s, vs1, s->sew, vs2, s->sew, vm) &&
require_align(vs1, s->lmul);
}
@ -474,6 +528,7 @@ static bool vext_narrow_check_common(DisasContext *s, int vd, int vs2,
static bool vext_check_ds(DisasContext *s, int vd, int vs, int vm)
{
return vext_wide_check_common(s, vd, vm) &&
vext_check_input_eew(s, vs, s->sew, -1, 0, vm) &&
require_align(vs, s->lmul) &&
require_noover(vd, s->lmul + 1, vs, s->lmul);
}
@ -481,6 +536,7 @@ static bool vext_check_ds(DisasContext *s, int vd, int vs, int vm)
static bool vext_check_dd(DisasContext *s, int vd, int vs, int vm)
{
return vext_wide_check_common(s, vd, vm) &&
vext_check_input_eew(s, vs, s->sew + 1, -1, 0, vm) &&
require_align(vs, s->lmul + 1);
}
@ -499,6 +555,7 @@ static bool vext_check_dd(DisasContext *s, int vd, int vs, int vm)
static bool vext_check_dss(DisasContext *s, int vd, int vs1, int vs2, int vm)
{
return vext_check_ds(s, vd, vs2, vm) &&
vext_check_input_eew(s, vs1, s->sew, vs2, s->sew, vm) &&
require_align(vs1, s->lmul) &&
require_noover(vd, s->lmul + 1, vs1, s->lmul);
}
@ -521,12 +578,14 @@ static bool vext_check_dss(DisasContext *s, int vd, int vs1, int vs2, int vm)
static bool vext_check_dds(DisasContext *s, int vd, int vs1, int vs2, int vm)
{
return vext_check_ds(s, vd, vs1, vm) &&
vext_check_input_eew(s, vs1, s->sew, vs2, s->sew + 1, vm) &&
require_align(vs2, s->lmul + 1);
}
static bool vext_check_sd(DisasContext *s, int vd, int vs, int vm)
{
bool ret = vext_narrow_check_common(s, vd, vs, vm);
bool ret = vext_narrow_check_common(s, vd, vs, vm) &&
vext_check_input_eew(s, vs, s->sew + 1, -1, 0, vm);
if (vd != vs) {
ret &= require_noover(vd, s->lmul, vs, s->lmul + 1);
}
@ -549,6 +608,7 @@ static bool vext_check_sd(DisasContext *s, int vd, int vs, int vm)
static bool vext_check_sds(DisasContext *s, int vd, int vs1, int vs2, int vm)
{
return vext_check_sd(s, vd, vs2, vm) &&
vext_check_input_eew(s, vs1, s->sew, vs2, s->sew + 1, vm) &&
require_align(vs1, s->lmul);
}
@ -584,7 +644,9 @@ static bool vext_check_slide(DisasContext *s, int vd, int vs2,
{
bool ret = require_align(vs2, s->lmul) &&
require_align(vd, s->lmul) &&
require_vm(vm, vd);
require_vm(vm, vd) &&
vext_check_input_eew(s, -1, 0, vs2, s->sew, vm);
if (is_over) {
ret &= (vd != vs2);
}
@ -802,32 +864,286 @@ GEN_VEXT_TRANS(vlm_v, MO_8, vlm_v, ld_us_mask_op, ld_us_mask_check)
GEN_VEXT_TRANS(vsm_v, MO_8, vsm_v, st_us_mask_op, st_us_mask_check)
/*
*** stride load and store
* MAXSZ returns the maximum vector size can be operated in bytes,
* which is used in GVEC IR when vl_eq_vlmax flag is set to true
* to accelerate vector operation.
*/
typedef void gen_helper_ldst_stride(TCGv_ptr, TCGv_ptr, TCGv,
TCGv, TCGv_env, TCGv_i32);
static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
uint32_t data, gen_helper_ldst_stride *fn,
DisasContext *s)
static inline uint32_t MAXSZ(DisasContext *s)
{
TCGv_ptr dest, mask;
TCGv base, stride;
TCGv_i32 desc;
int max_sz = s->cfg_ptr->vlenb << 3;
return max_sz >> (3 - s->lmul);
}
dest = tcg_temp_new_ptr();
mask = tcg_temp_new_ptr();
base = get_gpr(s, rs1, EXT_NONE);
stride = get_gpr(s, rs2, EXT_NONE);
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
s->cfg_ptr->vlenb, data));
static inline uint32_t get_log2(uint32_t a)
{
uint32_t i = 0;
for (; a > 0;) {
a >>= 1;
i++;
}
return i;
}
tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
typedef void gen_tl_ldst(TCGv, TCGv_ptr, tcg_target_long);
/*
* Simulate the strided load/store main loop:
*
* for (i = env->vstart; i < env->vl; env->vstart = ++i) {
* k = 0;
* while (k < nf) {
* if (!vm && !vext_elem_mask(v0, i)) {
* vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz,
* (i + k * max_elems + 1) * esz);
* k++;
* continue;
* }
* target_ulong addr = base + stride * i + (k << log2_esz);
* ldst(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
* k++;
* }
* }
*/
static void gen_ldst_stride_main_loop(DisasContext *s, TCGv dest, uint32_t rs1,
uint32_t rs2, uint32_t vm, uint32_t nf,
gen_tl_ldst *ld_fn, gen_tl_ldst *st_fn,
bool is_load)
{
TCGv addr = tcg_temp_new();
TCGv base = get_gpr(s, rs1, EXT_NONE);
TCGv stride = get_gpr(s, rs2, EXT_NONE);
TCGv i = tcg_temp_new();
TCGv i_esz = tcg_temp_new();
TCGv k = tcg_temp_new();
TCGv k_esz = tcg_temp_new();
TCGv k_max = tcg_temp_new();
TCGv mask = tcg_temp_new();
TCGv mask_offs = tcg_temp_new();
TCGv mask_offs_64 = tcg_temp_new();
TCGv mask_elem = tcg_temp_new();
TCGv mask_offs_rem = tcg_temp_new();
TCGv vreg = tcg_temp_new();
TCGv dest_offs = tcg_temp_new();
TCGv stride_offs = tcg_temp_new();
uint32_t max_elems = MAXSZ(s) >> s->sew;
TCGLabel *start = gen_new_label();
TCGLabel *end = gen_new_label();
TCGLabel *start_k = gen_new_label();
TCGLabel *inc_k = gen_new_label();
TCGLabel *end_k = gen_new_label();
MemOp atomicity = MO_ATOM_NONE;
if (s->sew == 0) {
atomicity = MO_ATOM_NONE;
} else {
atomicity = MO_ATOM_IFALIGN_PAIR;
}
mark_vs_dirty(s);
fn(dest, mask, base, stride, tcg_env, desc);
tcg_gen_addi_tl(mask, (TCGv)tcg_env, vreg_ofs(s, 0));
/* Start of outer loop. */
tcg_gen_mov_tl(i, cpu_vstart);
gen_set_label(start);
tcg_gen_brcond_tl(TCG_COND_GE, i, cpu_vl, end);
tcg_gen_shli_tl(i_esz, i, s->sew);
/* Start of inner loop. */
tcg_gen_movi_tl(k, 0);
gen_set_label(start_k);
tcg_gen_brcond_tl(TCG_COND_GE, k, tcg_constant_tl(nf), end_k);
/*
* If we are in mask agnostic regime and the operation is not unmasked we
* set the inactive elements to 1.
*/
if (!vm && s->vma) {
TCGLabel *active_element = gen_new_label();
/* (i + k * max_elems) * esz */
tcg_gen_shli_tl(mask_offs, k, get_log2(max_elems << s->sew));
tcg_gen_add_tl(mask_offs, mask_offs, i_esz);
/*
* Check whether the i bit of the mask is 0 or 1.
*
* static inline int vext_elem_mask(void *v0, int index)
* {
* int idx = index / 64;
* int pos = index % 64;
* return (((uint64_t *)v0)[idx] >> pos) & 1;
* }
*/
tcg_gen_shri_tl(mask_offs_64, mask_offs, 3);
tcg_gen_add_tl(mask_offs_64, mask_offs_64, mask);
tcg_gen_ld_i64((TCGv_i64)mask_elem, (TCGv_ptr)mask_offs_64, 0);
tcg_gen_rem_tl(mask_offs_rem, mask_offs, tcg_constant_tl(8));
tcg_gen_shr_tl(mask_elem, mask_elem, mask_offs_rem);
tcg_gen_andi_tl(mask_elem, mask_elem, 1);
tcg_gen_brcond_tl(TCG_COND_NE, mask_elem, tcg_constant_tl(0),
active_element);
/*
* Set masked-off elements in the destination vector register to 1s.
* Store instructions simply skip this bit as memory ops access memory
* only for active elements.
*/
if (is_load) {
tcg_gen_shli_tl(mask_offs, mask_offs, s->sew);
tcg_gen_add_tl(mask_offs, mask_offs, dest);
st_fn(tcg_constant_tl(-1), (TCGv_ptr)mask_offs, 0);
}
tcg_gen_br(inc_k);
gen_set_label(active_element);
}
/*
* The element is active, calculate the address with stride:
* target_ulong addr = base + stride * i + (k << log2_esz);
*/
tcg_gen_mul_tl(stride_offs, stride, i);
tcg_gen_shli_tl(k_esz, k, s->sew);
tcg_gen_add_tl(stride_offs, stride_offs, k_esz);
tcg_gen_add_tl(addr, base, stride_offs);
/* Calculate the offset in the dst/src vector register. */
tcg_gen_shli_tl(k_max, k, get_log2(max_elems));
tcg_gen_add_tl(dest_offs, i, k_max);
tcg_gen_shli_tl(dest_offs, dest_offs, s->sew);
tcg_gen_add_tl(dest_offs, dest_offs, dest);
if (is_load) {
tcg_gen_qemu_ld_tl(vreg, addr, s->mem_idx, MO_LE | s->sew | atomicity);
st_fn((TCGv)vreg, (TCGv_ptr)dest_offs, 0);
} else {
ld_fn((TCGv)vreg, (TCGv_ptr)dest_offs, 0);
tcg_gen_qemu_st_tl(vreg, addr, s->mem_idx, MO_LE | s->sew | atomicity);
}
/*
* We don't execute the load/store above if the element was inactive.
* We jump instead directly to incrementing k and continuing the loop.
*/
if (!vm && s->vma) {
gen_set_label(inc_k);
}
tcg_gen_addi_tl(k, k, 1);
tcg_gen_br(start_k);
/* End of the inner loop. */
gen_set_label(end_k);
tcg_gen_addi_tl(i, i, 1);
tcg_gen_mov_tl(cpu_vstart, i);
tcg_gen_br(start);
/* End of the outer loop. */
gen_set_label(end);
return;
}
/*
* Set the tail bytes of the strided loads/stores to 1:
*
* for (k = 0; k < nf; ++k) {
* cnt = (k * max_elems + vl) * esz;
* tot = (k * max_elems + max_elems) * esz;
* for (i = cnt; i < tot; i += esz) {
* store_1s(-1, vd[vl+i]);
* }
* }
*/
static void gen_ldst_stride_tail_loop(DisasContext *s, TCGv dest, uint32_t nf,
gen_tl_ldst *st_fn)
{
TCGv i = tcg_temp_new();
TCGv k = tcg_temp_new();
TCGv tail_cnt = tcg_temp_new();
TCGv tail_tot = tcg_temp_new();
TCGv tail_addr = tcg_temp_new();
TCGLabel *start = gen_new_label();
TCGLabel *end = gen_new_label();
TCGLabel *start_i = gen_new_label();
TCGLabel *end_i = gen_new_label();
uint32_t max_elems_b = MAXSZ(s);
uint32_t esz = 1 << s->sew;
/* Start of the outer loop. */
tcg_gen_movi_tl(k, 0);
tcg_gen_shli_tl(tail_cnt, cpu_vl, s->sew);
tcg_gen_movi_tl(tail_tot, max_elems_b);
tcg_gen_add_tl(tail_addr, dest, tail_cnt);
gen_set_label(start);
tcg_gen_brcond_tl(TCG_COND_GE, k, tcg_constant_tl(nf), end);
/* Start of the inner loop. */
tcg_gen_mov_tl(i, tail_cnt);
gen_set_label(start_i);
tcg_gen_brcond_tl(TCG_COND_GE, i, tail_tot, end_i);
/* store_1s(-1, vd[vl+i]); */
st_fn(tcg_constant_tl(-1), (TCGv_ptr)tail_addr, 0);
tcg_gen_addi_tl(tail_addr, tail_addr, esz);
tcg_gen_addi_tl(i, i, esz);
tcg_gen_br(start_i);
/* End of the inner loop. */
gen_set_label(end_i);
/* Update the counts */
tcg_gen_addi_tl(tail_cnt, tail_cnt, max_elems_b);
tcg_gen_addi_tl(tail_tot, tail_cnt, max_elems_b);
tcg_gen_addi_tl(k, k, 1);
tcg_gen_br(start);
/* End of the outer loop. */
gen_set_label(end);
return;
}
static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
uint32_t data, DisasContext *s, bool is_load)
{
if (!s->vstart_eq_zero) {
return false;
}
TCGv dest = tcg_temp_new();
uint32_t nf = FIELD_EX32(data, VDATA, NF);
uint32_t vm = FIELD_EX32(data, VDATA, VM);
/* Destination register and mask register */
tcg_gen_addi_tl(dest, (TCGv)tcg_env, vreg_ofs(s, vd));
/*
* Select the appropriate load/tore to retrieve data from the vector
* register given a specific sew.
*/
static gen_tl_ldst * const ld_fns[4] = {
tcg_gen_ld8u_tl, tcg_gen_ld16u_tl,
tcg_gen_ld32u_tl, tcg_gen_ld_tl
};
static gen_tl_ldst * const st_fns[4] = {
tcg_gen_st8_tl, tcg_gen_st16_tl,
tcg_gen_st32_tl, tcg_gen_st_tl
};
gen_tl_ldst *ld_fn = ld_fns[s->sew];
gen_tl_ldst *st_fn = st_fns[s->sew];
if (ld_fn == NULL || st_fn == NULL) {
return false;
}
mark_vs_dirty(s);
gen_ldst_stride_main_loop(s, dest, rs1, rs2, vm, nf, ld_fn, st_fn, is_load);
tcg_gen_movi_tl(cpu_vstart, 0);
/*
* Set the tail bytes to 1 if tail agnostic:
*/
if (s->vta != 0 && is_load) {
gen_ldst_stride_tail_loop(s, dest, nf, st_fn);
}
finalize_rvv_inst(s);
return true;
@ -836,16 +1152,6 @@ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
{
uint32_t data = 0;
gen_helper_ldst_stride *fn;
static gen_helper_ldst_stride * const fns[4] = {
gen_helper_vlse8_v, gen_helper_vlse16_v,
gen_helper_vlse32_v, gen_helper_vlse64_v
};
fn = fns[eew];
if (fn == NULL) {
return false;
}
uint8_t emul = vext_get_emul(s, eew);
data = FIELD_DP32(data, VDATA, VM, a->vm);
@ -853,7 +1159,7 @@ static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
data = FIELD_DP32(data, VDATA, NF, a->nf);
data = FIELD_DP32(data, VDATA, VTA, s->vta);
data = FIELD_DP32(data, VDATA, VMA, s->vma);
return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, s, true);
}
static bool ld_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
@ -871,23 +1177,13 @@ GEN_VEXT_TRANS(vlse64_v, MO_64, rnfvm, ld_stride_op, ld_stride_check)
static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
{
uint32_t data = 0;
gen_helper_ldst_stride *fn;
static gen_helper_ldst_stride * const fns[4] = {
/* masked stride store */
gen_helper_vsse8_v, gen_helper_vsse16_v,
gen_helper_vsse32_v, gen_helper_vsse64_v
};
uint8_t emul = vext_get_emul(s, eew);
data = FIELD_DP32(data, VDATA, VM, a->vm);
data = FIELD_DP32(data, VDATA, LMUL, emul);
data = FIELD_DP32(data, VDATA, NF, a->nf);
fn = fns[eew];
if (fn == NULL) {
return false;
}
return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, s, false);
}
static bool st_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
@ -981,7 +1277,8 @@ static bool ld_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
{
return require_rvv(s) &&
vext_check_isa_ill(s) &&
vext_check_ld_index(s, a->rd, a->rs2, a->nf, a->vm, eew);
vext_check_ld_index(s, a->rd, a->rs2, a->nf, a->vm, eew) &&
vext_check_input_eew(s, -1, 0, a->rs2, eew, a->vm);
}
GEN_VEXT_TRANS(vlxei8_v, MO_8, rnfvm, ld_index_op, ld_index_check)
@ -1033,7 +1330,8 @@ static bool st_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
{
return require_rvv(s) &&
vext_check_isa_ill(s) &&
vext_check_st_index(s, a->rd, a->rs2, a->nf, eew);
vext_check_st_index(s, a->rd, a->rs2, a->nf, eew) &&
vext_check_input_eew(s, a->rd, s->sew, a->rs2, eew, a->vm);
}
GEN_VEXT_TRANS(vsxei8_v, MO_8, rnfvm, st_index_op, st_index_check)
@ -1100,25 +1398,86 @@ GEN_VEXT_TRANS(vle64ff_v, MO_64, r2nfvm, ldff_op, ld_us_check)
typedef void gen_helper_ldst_whole(TCGv_ptr, TCGv, TCGv_env, TCGv_i32);
static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf,
gen_helper_ldst_whole *fn,
DisasContext *s)
uint32_t log2_esz, gen_helper_ldst_whole *fn,
DisasContext *s, bool is_load)
{
TCGv_ptr dest;
TCGv base;
TCGv_i32 desc;
uint32_t data = FIELD_DP32(0, VDATA, NF, nf);
data = FIELD_DP32(data, VDATA, VM, 1);
dest = tcg_temp_new_ptr();
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
s->cfg_ptr->vlenb, data));
base = get_gpr(s, rs1, EXT_NONE);
tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
mark_vs_dirty(s);
fn(dest, base, tcg_env, desc);
/*
* Load/store multiple bytes per iteration.
* When possible do this atomically.
* Update vstart with the number of processed elements.
* Use the helper function if either:
* - vstart is not 0.
* - the target has 32 bit registers and we are loading/storing 64 bit long
* elements. This is to ensure that we process every element with a single
* memory instruction.
*/
bool use_helper_fn = !(s->vstart_eq_zero) ||
(TCG_TARGET_REG_BITS == 32 && log2_esz == 3);
if (!use_helper_fn) {
TCGv addr = tcg_temp_new();
uint32_t size = s->cfg_ptr->vlenb * nf;
TCGv_i64 t8 = tcg_temp_new_i64();
TCGv_i32 t4 = tcg_temp_new_i32();
MemOp atomicity = MO_ATOM_NONE;
if (log2_esz == 0) {
atomicity = MO_ATOM_NONE;
} else {
atomicity = MO_ATOM_IFALIGN_PAIR;
}
if (TCG_TARGET_REG_BITS == 64) {
for (int i = 0; i < size; i += 8) {
addr = get_address(s, rs1, i);
if (is_load) {
tcg_gen_qemu_ld_i64(t8, addr, s->mem_idx,
MO_LE | MO_64 | atomicity);
tcg_gen_st_i64(t8, tcg_env, vreg_ofs(s, vd) + i);
} else {
tcg_gen_ld_i64(t8, tcg_env, vreg_ofs(s, vd) + i);
tcg_gen_qemu_st_i64(t8, addr, s->mem_idx,
MO_LE | MO_64 | atomicity);
}
if (i == size - 8) {
tcg_gen_movi_tl(cpu_vstart, 0);
} else {
tcg_gen_addi_tl(cpu_vstart, cpu_vstart, 8 >> log2_esz);
}
}
} else {
for (int i = 0; i < size; i += 4) {
addr = get_address(s, rs1, i);
if (is_load) {
tcg_gen_qemu_ld_i32(t4, addr, s->mem_idx,
MO_LE | MO_32 | atomicity);
tcg_gen_st_i32(t4, tcg_env, vreg_ofs(s, vd) + i);
} else {
tcg_gen_ld_i32(t4, tcg_env, vreg_ofs(s, vd) + i);
tcg_gen_qemu_st_i32(t4, addr, s->mem_idx,
MO_LE | MO_32 | atomicity);
}
if (i == size - 4) {
tcg_gen_movi_tl(cpu_vstart, 0);
} else {
tcg_gen_addi_tl(cpu_vstart, cpu_vstart, 4 >> log2_esz);
}
}
}
} else {
TCGv_ptr dest;
TCGv base;
TCGv_i32 desc;
uint32_t data = FIELD_DP32(0, VDATA, NF, nf);
data = FIELD_DP32(data, VDATA, VM, 1);
dest = tcg_temp_new_ptr();
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
s->cfg_ptr->vlenb, data));
base = get_gpr(s, rs1, EXT_NONE);
tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
fn(dest, base, tcg_env, desc);
}
finalize_rvv_inst(s);
return true;
@ -1128,58 +1487,47 @@ static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf,
* load and store whole register instructions ignore vtype and vl setting.
* Thus, we don't need to check vill bit. (Section 7.9)
*/
#define GEN_LDST_WHOLE_TRANS(NAME, ARG_NF) \
static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \
{ \
if (require_rvv(s) && \
QEMU_IS_ALIGNED(a->rd, ARG_NF)) { \
return ldst_whole_trans(a->rd, a->rs1, ARG_NF, \
gen_helper_##NAME, s); \
} \
return false; \
#define GEN_LDST_WHOLE_TRANS(NAME, ETYPE, ARG_NF, IS_LOAD) \
static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \
{ \
if (require_rvv(s) && \
QEMU_IS_ALIGNED(a->rd, ARG_NF)) { \
return ldst_whole_trans(a->rd, a->rs1, ARG_NF, ctzl(sizeof(ETYPE)), \
gen_helper_##NAME, s, IS_LOAD); \
} \
return false; \
}
GEN_LDST_WHOLE_TRANS(vl1re8_v, 1)
GEN_LDST_WHOLE_TRANS(vl1re16_v, 1)
GEN_LDST_WHOLE_TRANS(vl1re32_v, 1)
GEN_LDST_WHOLE_TRANS(vl1re64_v, 1)
GEN_LDST_WHOLE_TRANS(vl2re8_v, 2)
GEN_LDST_WHOLE_TRANS(vl2re16_v, 2)
GEN_LDST_WHOLE_TRANS(vl2re32_v, 2)
GEN_LDST_WHOLE_TRANS(vl2re64_v, 2)
GEN_LDST_WHOLE_TRANS(vl4re8_v, 4)
GEN_LDST_WHOLE_TRANS(vl4re16_v, 4)
GEN_LDST_WHOLE_TRANS(vl4re32_v, 4)
GEN_LDST_WHOLE_TRANS(vl4re64_v, 4)
GEN_LDST_WHOLE_TRANS(vl8re8_v, 8)
GEN_LDST_WHOLE_TRANS(vl8re16_v, 8)
GEN_LDST_WHOLE_TRANS(vl8re32_v, 8)
GEN_LDST_WHOLE_TRANS(vl8re64_v, 8)
GEN_LDST_WHOLE_TRANS(vl1re8_v, int8_t, 1, true)
GEN_LDST_WHOLE_TRANS(vl1re16_v, int16_t, 1, true)
GEN_LDST_WHOLE_TRANS(vl1re32_v, int32_t, 1, true)
GEN_LDST_WHOLE_TRANS(vl1re64_v, int64_t, 1, true)
GEN_LDST_WHOLE_TRANS(vl2re8_v, int8_t, 2, true)
GEN_LDST_WHOLE_TRANS(vl2re16_v, int16_t, 2, true)
GEN_LDST_WHOLE_TRANS(vl2re32_v, int32_t, 2, true)
GEN_LDST_WHOLE_TRANS(vl2re64_v, int64_t, 2, true)
GEN_LDST_WHOLE_TRANS(vl4re8_v, int8_t, 4, true)
GEN_LDST_WHOLE_TRANS(vl4re16_v, int16_t, 4, true)
GEN_LDST_WHOLE_TRANS(vl4re32_v, int32_t, 4, true)
GEN_LDST_WHOLE_TRANS(vl4re64_v, int64_t, 4, true)
GEN_LDST_WHOLE_TRANS(vl8re8_v, int8_t, 8, true)
GEN_LDST_WHOLE_TRANS(vl8re16_v, int16_t, 8, true)
GEN_LDST_WHOLE_TRANS(vl8re32_v, int32_t, 8, true)
GEN_LDST_WHOLE_TRANS(vl8re64_v, int64_t, 8, true)
/*
* The vector whole register store instructions are encoded similar to
* unmasked unit-stride store of elements with EEW=8.
*/
GEN_LDST_WHOLE_TRANS(vs1r_v, 1)
GEN_LDST_WHOLE_TRANS(vs2r_v, 2)
GEN_LDST_WHOLE_TRANS(vs4r_v, 4)
GEN_LDST_WHOLE_TRANS(vs8r_v, 8)
GEN_LDST_WHOLE_TRANS(vs1r_v, int8_t, 1, false)
GEN_LDST_WHOLE_TRANS(vs2r_v, int8_t, 2, false)
GEN_LDST_WHOLE_TRANS(vs4r_v, int8_t, 4, false)
GEN_LDST_WHOLE_TRANS(vs8r_v, int8_t, 8, false)
/*
*** Vector Integer Arithmetic Instructions
*/
/*
* MAXSZ returns the maximum vector size can be operated in bytes,
* which is used in GVEC IR when vl_eq_vlmax flag is set to true
* to accelerate vector operation.
*/
static inline uint32_t MAXSZ(DisasContext *s)
{
int max_sz = s->cfg_ptr->vlenb * 8;
return max_sz >> (3 - s->lmul);
}
static bool opivv_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) &&
@ -1475,6 +1823,16 @@ static bool opivv_widen_check(DisasContext *s, arg_rmrr *a)
vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
}
/* OPIVV with overwrite and WIDEN */
static bool opivv_overwrite_widen_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) &&
vext_check_isa_ill(s) &&
vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm) &&
vext_check_input_eew(s, a->rd, s->sew + 1, a->rs1, s->sew, a->vm) &&
vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm);
}
static bool do_opivv_widen(DisasContext *s, arg_rmrr *a,
gen_helper_gvec_4_ptr *fn,
bool (*checkfn)(DisasContext *, arg_rmrr *))
@ -1522,6 +1880,14 @@ static bool opivx_widen_check(DisasContext *s, arg_rmrr *a)
vext_check_ds(s, a->rd, a->rs2, a->vm);
}
static bool opivx_overwrite_widen_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) &&
vext_check_isa_ill(s) &&
vext_check_ds(s, a->rd, a->rs2, a->vm) &&
vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm);
}
#define GEN_OPIVX_WIDEN_TRANS(NAME, CHECK) \
static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
{ \
@ -1993,13 +2359,13 @@ GEN_OPIVX_TRANS(vmadd_vx, opivx_check)
GEN_OPIVX_TRANS(vnmsub_vx, opivx_check)
/* Vector Widening Integer Multiply-Add Instructions */
GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_widen_check)
GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_widen_check)
GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_widen_check)
GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx, opivx_widen_check)
GEN_OPIVX_WIDEN_TRANS(vwmacc_vx, opivx_widen_check)
GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx, opivx_widen_check)
GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx, opivx_widen_check)
GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_overwrite_widen_check)
GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_overwrite_widen_check)
GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_overwrite_widen_check)
GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx, opivx_overwrite_widen_check)
GEN_OPIVX_WIDEN_TRANS(vwmacc_vx, opivx_overwrite_widen_check)
GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx, opivx_overwrite_widen_check)
GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx, opivx_overwrite_widen_check)
/* Vector Integer Merge and Move Instructions */
static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a)
@ -2340,6 +2706,17 @@ static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a)
vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
}
static bool opfvv_overwrite_widen_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) &&
require_rvf(s) &&
require_scale_rvf(s) &&
vext_check_isa_ill(s) &&
vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm) &&
vext_check_input_eew(s, a->rd, s->sew + 1, a->rs1, s->sew, a->vm) &&
vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm);
}
/* OPFVV with WIDEN */
#define GEN_OPFVV_WIDEN_TRANS(NAME, CHECK) \
static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
@ -2379,11 +2756,21 @@ static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a)
vext_check_ds(s, a->rd, a->rs2, a->vm);
}
static bool opfvf_overwrite_widen_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) &&
require_rvf(s) &&
require_scale_rvf(s) &&
vext_check_isa_ill(s) &&
vext_check_ds(s, a->rd, a->rs2, a->vm) &&
vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm);
}
/* OPFVF with WIDEN */
#define GEN_OPFVF_WIDEN_TRANS(NAME) \
#define GEN_OPFVF_WIDEN_TRANS(NAME, CHECK) \
static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
{ \
if (opfvf_widen_check(s, a)) { \
if (CHECK(s, a)) { \
uint32_t data = 0; \
static gen_helper_opfvf *const fns[2] = { \
gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
@ -2399,8 +2786,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
return false; \
}
GEN_OPFVF_WIDEN_TRANS(vfwadd_vf)
GEN_OPFVF_WIDEN_TRANS(vfwsub_vf)
GEN_OPFVF_WIDEN_TRANS(vfwadd_vf, opfvf_widen_check)
GEN_OPFVF_WIDEN_TRANS(vfwsub_vf, opfvf_widen_check)
static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a)
{
@ -2482,7 +2869,7 @@ GEN_OPFVF_TRANS(vfrdiv_vf, opfvf_check)
/* Vector Widening Floating-Point Multiply */
GEN_OPFVV_WIDEN_TRANS(vfwmul_vv, opfvv_widen_check)
GEN_OPFVF_WIDEN_TRANS(vfwmul_vf)
GEN_OPFVF_WIDEN_TRANS(vfwmul_vf, opfvf_widen_check)
/* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */
GEN_OPFVV_TRANS(vfmacc_vv, opfvv_check)
@ -2503,14 +2890,14 @@ GEN_OPFVF_TRANS(vfmsub_vf, opfvf_check)
GEN_OPFVF_TRANS(vfnmsub_vf, opfvf_check)
/* Vector Widening Floating-Point Fused Multiply-Add Instructions */
GEN_OPFVV_WIDEN_TRANS(vfwmacc_vv, opfvv_widen_check)
GEN_OPFVV_WIDEN_TRANS(vfwnmacc_vv, opfvv_widen_check)
GEN_OPFVV_WIDEN_TRANS(vfwmsac_vv, opfvv_widen_check)
GEN_OPFVV_WIDEN_TRANS(vfwnmsac_vv, opfvv_widen_check)
GEN_OPFVF_WIDEN_TRANS(vfwmacc_vf)
GEN_OPFVF_WIDEN_TRANS(vfwnmacc_vf)
GEN_OPFVF_WIDEN_TRANS(vfwmsac_vf)
GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf)
GEN_OPFVV_WIDEN_TRANS(vfwmacc_vv, opfvv_overwrite_widen_check)
GEN_OPFVV_WIDEN_TRANS(vfwnmacc_vv, opfvv_overwrite_widen_check)
GEN_OPFVV_WIDEN_TRANS(vfwmsac_vv, opfvv_overwrite_widen_check)
GEN_OPFVV_WIDEN_TRANS(vfwnmsac_vv, opfvv_overwrite_widen_check)
GEN_OPFVF_WIDEN_TRANS(vfwmacc_vf, opfvf_overwrite_widen_check)
GEN_OPFVF_WIDEN_TRANS(vfwnmacc_vf, opfvf_overwrite_widen_check)
GEN_OPFVF_WIDEN_TRANS(vfwmsac_vf, opfvf_overwrite_widen_check)
GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf, opfvf_overwrite_widen_check)
/* Vector Floating-Point Square-Root Instruction */
@ -3426,6 +3813,7 @@ static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) &&
vext_check_isa_ill(s) &&
vext_check_input_eew(s, a->rs1, s->sew, a->rs2, s->sew, a->vm) &&
require_align(a->rd, s->lmul) &&
require_align(a->rs1, s->lmul) &&
require_align(a->rs2, s->lmul) &&
@ -3438,6 +3826,7 @@ static bool vrgatherei16_vv_check(DisasContext *s, arg_rmrr *a)
int8_t emul = MO_16 - s->sew + s->lmul;
return require_rvv(s) &&
vext_check_isa_ill(s) &&
vext_check_input_eew(s, a->rs1, MO_16, a->rs2, s->sew, a->vm) &&
(emul >= -3 && emul <= 3) &&
require_align(a->rd, s->lmul) &&
require_align(a->rs1, emul) &&
@ -3457,6 +3846,7 @@ static bool vrgather_vx_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) &&
vext_check_isa_ill(s) &&
vext_check_input_eew(s, -1, MO_64, a->rs2, s->sew, a->vm) &&
require_align(a->rd, s->lmul) &&
require_align(a->rs2, s->lmul) &&
(a->rd != a->rs2) &&
@ -3600,7 +3990,9 @@ static bool int_ext_check(DisasContext *s, arg_rmr *a, uint8_t div)
require_align(a->rd, s->lmul) &&
require_align(a->rs2, s->lmul - div) &&
require_vm(a->vm, a->rd) &&
require_noover(a->rd, s->lmul, a->rs2, s->lmul - div);
require_noover(a->rd, s->lmul, a->rs2, s->lmul - div) &&
vext_check_input_eew(s, -1, 0, a->rs2, s->sew, a->vm);
return ret;
}

View file

@ -201,4 +201,9 @@ static inline target_ulong adjust_addr_virt(CPURISCVState *env,
return adjust_addr_body(env, addr, true);
}
static inline int insn_len(uint16_t first_word)
{
return (first_word & 3) == 3 ? 4 : 2;
}
#endif

View file

@ -58,33 +58,17 @@ void riscv_kvm_aplic_request(void *opaque, int irq, int level)
static bool cap_has_mp_state;
static uint64_t kvm_riscv_reg_id_ulong(CPURISCVState *env, uint64_t type,
uint64_t idx)
{
uint64_t id = KVM_REG_RISCV | type | idx;
#define KVM_RISCV_REG_ID_U32(type, idx) (KVM_REG_RISCV | KVM_REG_SIZE_U32 | \
type | idx)
switch (riscv_cpu_mxl(env)) {
case MXL_RV32:
id |= KVM_REG_SIZE_U32;
break;
case MXL_RV64:
id |= KVM_REG_SIZE_U64;
break;
default:
g_assert_not_reached();
}
return id;
}
#define KVM_RISCV_REG_ID_U64(type, idx) (KVM_REG_RISCV | KVM_REG_SIZE_U64 | \
type | idx)
static uint64_t kvm_riscv_reg_id_u32(uint64_t type, uint64_t idx)
{
return KVM_REG_RISCV | KVM_REG_SIZE_U32 | type | idx;
}
static uint64_t kvm_riscv_reg_id_u64(uint64_t type, uint64_t idx)
{
return KVM_REG_RISCV | KVM_REG_SIZE_U64 | type | idx;
}
#if defined(TARGET_RISCV64)
#define KVM_RISCV_REG_ID_ULONG(type, idx) KVM_RISCV_REG_ID_U64(type, idx)
#else
#define KVM_RISCV_REG_ID_ULONG(type, idx) KVM_RISCV_REG_ID_U32(type, idx)
#endif
static uint64_t kvm_encode_reg_size_id(uint64_t id, size_t size_b)
{
@ -107,45 +91,29 @@ static uint64_t kvm_riscv_vector_reg_id(RISCVCPU *cpu,
return kvm_encode_reg_size_id(id, size_b);
}
#define RISCV_CORE_REG(env, name) \
kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, \
#define RISCV_CORE_REG(name) \
KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, \
KVM_REG_RISCV_CORE_REG(name))
#define RISCV_CSR_REG(env, name) \
kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CSR, \
#define RISCV_CSR_REG(name) \
KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CSR, \
KVM_REG_RISCV_CSR_REG(name))
#define RISCV_CONFIG_REG(env, name) \
kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG, \
#define RISCV_CONFIG_REG(name) \
KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG, \
KVM_REG_RISCV_CONFIG_REG(name))
#define RISCV_TIMER_REG(name) kvm_riscv_reg_id_u64(KVM_REG_RISCV_TIMER, \
#define RISCV_TIMER_REG(name) KVM_RISCV_REG_ID_U64(KVM_REG_RISCV_TIMER, \
KVM_REG_RISCV_TIMER_REG(name))
#define RISCV_FP_F_REG(idx) kvm_riscv_reg_id_u32(KVM_REG_RISCV_FP_F, idx)
#define RISCV_FP_F_REG(idx) KVM_RISCV_REG_ID_U32(KVM_REG_RISCV_FP_F, idx)
#define RISCV_FP_D_REG(idx) kvm_riscv_reg_id_u64(KVM_REG_RISCV_FP_D, idx)
#define RISCV_FP_D_REG(idx) KVM_RISCV_REG_ID_U64(KVM_REG_RISCV_FP_D, idx)
#define RISCV_VECTOR_CSR_REG(env, name) \
kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_VECTOR, \
#define RISCV_VECTOR_CSR_REG(name) \
KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_VECTOR, \
KVM_REG_RISCV_VECTOR_CSR_REG(name))
#define KVM_RISCV_GET_CSR(cs, env, csr, reg) \
do { \
int _ret = kvm_get_one_reg(cs, RISCV_CSR_REG(env, csr), &reg); \
if (_ret) { \
return _ret; \
} \
} while (0)
#define KVM_RISCV_SET_CSR(cs, env, csr, reg) \
do { \
int _ret = kvm_set_one_reg(cs, RISCV_CSR_REG(env, csr), &reg); \
if (_ret) { \
return _ret; \
} \
} while (0)
#define KVM_RISCV_GET_TIMER(cs, name, reg) \
do { \
int ret = kvm_get_one_reg(cs, RISCV_TIMER_REG(name), &reg); \
@ -167,6 +135,7 @@ typedef struct KVMCPUConfig {
const char *description;
target_ulong offset;
uint64_t kvm_reg_id;
uint32_t prop_size;
bool user_set;
bool supported;
} KVMCPUConfig;
@ -248,7 +217,7 @@ static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs)
/* If we're here we're going to disable the MISA bit */
reg = 0;
id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT,
id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
misa_cfg->kvm_reg_id);
ret = kvm_set_one_reg(cs, id, &reg);
if (ret != 0) {
@ -267,6 +236,56 @@ static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs)
}
}
#define KVM_CSR_CFG(_name, _env_prop, reg_id) \
{.name = _name, .offset = ENV_CSR_OFFSET(_env_prop), \
.prop_size = sizeof(((CPURISCVState *)0)->_env_prop), \
.kvm_reg_id = reg_id}
static KVMCPUConfig kvm_csr_cfgs[] = {
KVM_CSR_CFG("sstatus", mstatus, RISCV_CSR_REG(sstatus)),
KVM_CSR_CFG("sie", mie, RISCV_CSR_REG(sie)),
KVM_CSR_CFG("stvec", stvec, RISCV_CSR_REG(stvec)),
KVM_CSR_CFG("sscratch", sscratch, RISCV_CSR_REG(sscratch)),
KVM_CSR_CFG("sepc", sepc, RISCV_CSR_REG(sepc)),
KVM_CSR_CFG("scause", scause, RISCV_CSR_REG(scause)),
KVM_CSR_CFG("stval", stval, RISCV_CSR_REG(stval)),
KVM_CSR_CFG("sip", mip, RISCV_CSR_REG(sip)),
KVM_CSR_CFG("satp", satp, RISCV_CSR_REG(satp)),
KVM_CSR_CFG("scounteren", scounteren, RISCV_CSR_REG(scounteren)),
KVM_CSR_CFG("senvcfg", senvcfg, RISCV_CSR_REG(senvcfg)),
};
static void *kvmconfig_get_env_addr(RISCVCPU *cpu, KVMCPUConfig *csr_cfg)
{
return (void *)&cpu->env + csr_cfg->offset;
}
static uint32_t kvm_cpu_csr_get_u32(RISCVCPU *cpu, KVMCPUConfig *csr_cfg)
{
uint32_t *val32 = kvmconfig_get_env_addr(cpu, csr_cfg);
return *val32;
}
static uint64_t kvm_cpu_csr_get_u64(RISCVCPU *cpu, KVMCPUConfig *csr_cfg)
{
uint64_t *val64 = kvmconfig_get_env_addr(cpu, csr_cfg);
return *val64;
}
static void kvm_cpu_csr_set_u32(RISCVCPU *cpu, KVMCPUConfig *csr_cfg,
uint32_t val)
{
uint32_t *val32 = kvmconfig_get_env_addr(cpu, csr_cfg);
*val32 = val;
}
static void kvm_cpu_csr_set_u64(RISCVCPU *cpu, KVMCPUConfig *csr_cfg,
uint64_t val)
{
uint64_t *val64 = kvmconfig_get_env_addr(cpu, csr_cfg);
*val64 = val;
}
#define KVM_EXT_CFG(_name, _prop, _reg_id) \
{.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
.kvm_reg_id = _reg_id}
@ -434,7 +453,6 @@ static KVMCPUConfig kvm_sbi_dbcn = {
static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs)
{
CPURISCVState *env = &cpu->env;
uint64_t id, reg;
int i, ret;
@ -445,7 +463,7 @@ static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs)
continue;
}
id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT,
id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
multi_ext_cfg->kvm_reg_id);
reg = kvm_cpu_cfg_get(cpu, multi_ext_cfg);
ret = kvm_set_one_reg(cs, id, &reg);
@ -570,14 +588,14 @@ static int kvm_riscv_get_regs_core(CPUState *cs)
target_ulong reg;
CPURISCVState *env = &RISCV_CPU(cs)->env;
ret = kvm_get_one_reg(cs, RISCV_CORE_REG(env, regs.pc), &reg);
ret = kvm_get_one_reg(cs, RISCV_CORE_REG(regs.pc), &reg);
if (ret) {
return ret;
}
env->pc = reg;
for (i = 1; i < 32; i++) {
uint64_t id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, i);
uint64_t id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, i);
ret = kvm_get_one_reg(cs, id, &reg);
if (ret) {
return ret;
@ -596,13 +614,13 @@ static int kvm_riscv_put_regs_core(CPUState *cs)
CPURISCVState *env = &RISCV_CPU(cs)->env;
reg = env->pc;
ret = kvm_set_one_reg(cs, RISCV_CORE_REG(env, regs.pc), &reg);
ret = kvm_set_one_reg(cs, RISCV_CORE_REG(regs.pc), &reg);
if (ret) {
return ret;
}
for (i = 1; i < 32; i++) {
uint64_t id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, i);
uint64_t id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, i);
reg = env->gpr[i];
ret = kvm_set_one_reg(cs, id, &reg);
if (ret) {
@ -613,6 +631,66 @@ static int kvm_riscv_put_regs_core(CPUState *cs)
return ret;
}
static int kvm_riscv_get_regs_csr(CPUState *cs)
{
RISCVCPU *cpu = RISCV_CPU(cs);
uint64_t reg;
int i, ret;
for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
if (!csr_cfg->supported) {
continue;
}
ret = kvm_get_one_reg(cs, csr_cfg->kvm_reg_id, &reg);
if (ret) {
return ret;
}
if (csr_cfg->prop_size == sizeof(uint32_t)) {
kvm_cpu_csr_set_u32(cpu, csr_cfg, (uint32_t)reg);
} else if (csr_cfg->prop_size == sizeof(uint64_t)) {
kvm_cpu_csr_set_u64(cpu, csr_cfg, reg);
} else {
g_assert_not_reached();
}
}
return 0;
}
static int kvm_riscv_put_regs_csr(CPUState *cs)
{
RISCVCPU *cpu = RISCV_CPU(cs);
uint64_t reg;
int i, ret;
for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
if (!csr_cfg->supported) {
continue;
}
if (csr_cfg->prop_size == sizeof(uint32_t)) {
reg = kvm_cpu_csr_get_u32(cpu, csr_cfg);
} else if (csr_cfg->prop_size == sizeof(uint64_t)) {
reg = kvm_cpu_csr_get_u64(cpu, csr_cfg);
} else {
g_assert_not_reached();
}
ret = kvm_set_one_reg(cs, csr_cfg->kvm_reg_id, &reg);
if (ret) {
return ret;
}
}
return 0;
}
static void kvm_riscv_reset_regs_csr(CPURISCVState *env)
{
env->mstatus = 0;
@ -624,40 +702,8 @@ static void kvm_riscv_reset_regs_csr(CPURISCVState *env)
env->stval = 0;
env->mip = 0;
env->satp = 0;
}
static int kvm_riscv_get_regs_csr(CPUState *cs)
{
CPURISCVState *env = &RISCV_CPU(cs)->env;
KVM_RISCV_GET_CSR(cs, env, sstatus, env->mstatus);
KVM_RISCV_GET_CSR(cs, env, sie, env->mie);
KVM_RISCV_GET_CSR(cs, env, stvec, env->stvec);
KVM_RISCV_GET_CSR(cs, env, sscratch, env->sscratch);
KVM_RISCV_GET_CSR(cs, env, sepc, env->sepc);
KVM_RISCV_GET_CSR(cs, env, scause, env->scause);
KVM_RISCV_GET_CSR(cs, env, stval, env->stval);
KVM_RISCV_GET_CSR(cs, env, sip, env->mip);
KVM_RISCV_GET_CSR(cs, env, satp, env->satp);
return 0;
}
static int kvm_riscv_put_regs_csr(CPUState *cs)
{
CPURISCVState *env = &RISCV_CPU(cs)->env;
KVM_RISCV_SET_CSR(cs, env, sstatus, env->mstatus);
KVM_RISCV_SET_CSR(cs, env, sie, env->mie);
KVM_RISCV_SET_CSR(cs, env, stvec, env->stvec);
KVM_RISCV_SET_CSR(cs, env, sscratch, env->sscratch);
KVM_RISCV_SET_CSR(cs, env, sepc, env->sepc);
KVM_RISCV_SET_CSR(cs, env, scause, env->scause);
KVM_RISCV_SET_CSR(cs, env, stval, env->stval);
KVM_RISCV_SET_CSR(cs, env, sip, env->mip);
KVM_RISCV_SET_CSR(cs, env, satp, env->satp);
return 0;
env->scounteren = 0;
env->senvcfg = 0;
}
static int kvm_riscv_get_regs_fp(CPUState *cs)
@ -800,26 +846,26 @@ static int kvm_riscv_get_regs_vector(CPUState *cs)
return 0;
}
ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vstart), &reg);
ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vstart), &reg);
if (ret) {
return ret;
}
env->vstart = reg;
ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vl), &reg);
ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vl), &reg);
if (ret) {
return ret;
}
env->vl = reg;
ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vtype), &reg);
ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vtype), &reg);
if (ret) {
return ret;
}
env->vtype = reg;
if (kvm_v_vlenb.supported) {
ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vlenb), &reg);
ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vlenb), &reg);
if (ret) {
return ret;
}
@ -857,26 +903,26 @@ static int kvm_riscv_put_regs_vector(CPUState *cs)
}
reg = env->vstart;
ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vstart), &reg);
ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vstart), &reg);
if (ret) {
return ret;
}
reg = env->vl;
ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vl), &reg);
ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vl), &reg);
if (ret) {
return ret;
}
reg = env->vtype;
ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vtype), &reg);
ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vtype), &reg);
if (ret) {
return ret;
}
if (kvm_v_vlenb.supported) {
reg = cpu->cfg.vlenb;
ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vlenb), &reg);
ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vlenb), &reg);
for (int i = 0; i < 32; i++) {
/*
@ -955,25 +1001,24 @@ static void kvm_riscv_destroy_scratch_vcpu(KVMScratchCPU *scratch)
static void kvm_riscv_init_machine_ids(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
{
CPURISCVState *env = &cpu->env;
struct kvm_one_reg reg;
int ret;
reg.id = RISCV_CONFIG_REG(env, mvendorid);
reg.id = RISCV_CONFIG_REG(mvendorid);
reg.addr = (uint64_t)&cpu->cfg.mvendorid;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
if (ret != 0) {
error_report("Unable to retrieve mvendorid from host, error %d", ret);
}
reg.id = RISCV_CONFIG_REG(env, marchid);
reg.id = RISCV_CONFIG_REG(marchid);
reg.addr = (uint64_t)&cpu->cfg.marchid;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
if (ret != 0) {
error_report("Unable to retrieve marchid from host, error %d", ret);
}
reg.id = RISCV_CONFIG_REG(env, mimpid);
reg.id = RISCV_CONFIG_REG(mimpid);
reg.addr = (uint64_t)&cpu->cfg.mimpid;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
if (ret != 0) {
@ -988,7 +1033,7 @@ static void kvm_riscv_init_misa_ext_mask(RISCVCPU *cpu,
struct kvm_one_reg reg;
int ret;
reg.id = RISCV_CONFIG_REG(env, isa);
reg.id = RISCV_CONFIG_REG(isa);
reg.addr = (uint64_t)&env->misa_ext_mask;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
@ -1005,11 +1050,10 @@ static void kvm_riscv_init_misa_ext_mask(RISCVCPU *cpu,
static void kvm_riscv_read_cbomz_blksize(RISCVCPU *cpu, KVMScratchCPU *kvmcpu,
KVMCPUConfig *cbomz_cfg)
{
CPURISCVState *env = &cpu->env;
struct kvm_one_reg reg;
int ret;
reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG,
reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG,
cbomz_cfg->kvm_reg_id);
reg.addr = (uint64_t)kvmconfig_get_cfg_addr(cpu, cbomz_cfg);
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
@ -1023,7 +1067,6 @@ static void kvm_riscv_read_cbomz_blksize(RISCVCPU *cpu, KVMScratchCPU *kvmcpu,
static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu,
KVMScratchCPU *kvmcpu)
{
CPURISCVState *env = &cpu->env;
uint64_t val;
int i, ret;
@ -1031,7 +1074,7 @@ static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu,
KVMCPUConfig *multi_ext_cfg = &kvm_multi_ext_cfgs[i];
struct kvm_one_reg reg;
reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT,
reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
multi_ext_cfg->kvm_reg_id);
reg.addr = (uint64_t)&val;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
@ -1061,6 +1104,32 @@ static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu,
}
}
static void kvm_riscv_read_csr_cfg_legacy(KVMScratchCPU *kvmcpu)
{
uint64_t val;
int i, ret;
for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
struct kvm_one_reg reg;
reg.id = csr_cfg->kvm_reg_id;
reg.addr = (uint64_t)&val;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
if (ret != 0) {
if (errno == EINVAL) {
csr_cfg->supported = false;
} else {
error_report("Unable to read KVM CSR %s: %s",
csr_cfg->name, strerror(errno));
exit(EXIT_FAILURE);
}
} else {
csr_cfg->supported = true;
}
}
}
static int uint64_cmp(const void *a, const void *b)
{
uint64_t val1 = *(const uint64_t *)a;
@ -1078,7 +1147,6 @@ static int uint64_cmp(const void *a, const void *b)
}
static void kvm_riscv_check_sbi_dbcn_support(RISCVCPU *cpu,
KVMScratchCPU *kvmcpu,
struct kvm_reg_list *reglist)
{
struct kvm_reg_list *reg_search;
@ -1118,12 +1186,31 @@ static void kvm_riscv_read_vlenb(RISCVCPU *cpu, KVMScratchCPU *kvmcpu,
}
}
static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
static void kvm_riscv_read_csr_cfg(struct kvm_reg_list *reglist)
{
struct kvm_reg_list *reg_search;
uint64_t reg_id;
for (int i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
reg_id = csr_cfg->kvm_reg_id;
reg_search = bsearch(&reg_id, reglist->reg, reglist->n,
sizeof(uint64_t), uint64_cmp);
if (!reg_search) {
continue;
}
csr_cfg->supported = true;
}
}
static void kvm_riscv_init_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
{
g_autofree struct kvm_reg_list *reglist = NULL;
KVMCPUConfig *multi_ext_cfg;
struct kvm_one_reg reg;
struct kvm_reg_list rl_struct;
struct kvm_reg_list *reglist;
uint64_t val, reg_id, *reg_search;
int i, ret;
@ -1135,7 +1222,9 @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
* (EINVAL). Use read_legacy() in this case.
*/
if (errno == EINVAL) {
return kvm_riscv_read_multiext_legacy(cpu, kvmcpu);
kvm_riscv_read_multiext_legacy(cpu, kvmcpu);
kvm_riscv_read_csr_cfg_legacy(kvmcpu);
return;
} else if (errno != E2BIG) {
/*
* E2BIG is an expected error message for the API since we
@ -1164,7 +1253,7 @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
multi_ext_cfg = &kvm_multi_ext_cfgs[i];
reg_id = kvm_riscv_reg_id_ulong(&cpu->env, KVM_REG_RISCV_ISA_EXT,
reg_id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
multi_ext_cfg->kvm_reg_id);
reg_search = bsearch(&reg_id, reglist->reg, reglist->n,
sizeof(uint64_t), uint64_cmp);
@ -1197,7 +1286,8 @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
kvm_riscv_read_vlenb(cpu, kvmcpu, reglist);
}
kvm_riscv_check_sbi_dbcn_support(cpu, kvmcpu, reglist);
kvm_riscv_check_sbi_dbcn_support(cpu, reglist);
kvm_riscv_read_csr_cfg(reglist);
}
static void riscv_init_kvm_registers(Object *cpu_obj)
@ -1211,7 +1301,7 @@ static void riscv_init_kvm_registers(Object *cpu_obj)
kvm_riscv_init_machine_ids(cpu, &kvmcpu);
kvm_riscv_init_misa_ext_mask(cpu, &kvmcpu);
kvm_riscv_init_multiext_cfg(cpu, &kvmcpu);
kvm_riscv_init_cfg(cpu, &kvmcpu);
kvm_riscv_destroy_scratch_vcpu(&kvmcpu);
}
@ -1343,12 +1433,11 @@ void kvm_arch_init_irq_routing(KVMState *s)
static int kvm_vcpu_set_machine_ids(RISCVCPU *cpu, CPUState *cs)
{
CPURISCVState *env = &cpu->env;
target_ulong reg;
uint64_t id;
int ret;
id = RISCV_CONFIG_REG(env, mvendorid);
id = RISCV_CONFIG_REG(mvendorid);
/*
* cfg.mvendorid is an uint32 but a target_ulong will
* be written. Assign it to a target_ulong var to avoid
@ -1360,13 +1449,13 @@ static int kvm_vcpu_set_machine_ids(RISCVCPU *cpu, CPUState *cs)
return ret;
}
id = RISCV_CONFIG_REG(env, marchid);
id = RISCV_CONFIG_REG(marchid);
ret = kvm_set_one_reg(cs, id, &cpu->cfg.marchid);
if (ret != 0) {
return ret;
}
id = RISCV_CONFIG_REG(env, mimpid);
id = RISCV_CONFIG_REG(mimpid);
ret = kvm_set_one_reg(cs, id, &cpu->cfg.mimpid);
return ret;
@ -1916,7 +2005,7 @@ void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
if (cpu->cfg.ext_zicbom &&
riscv_cpu_option_set(kvm_cbom_blocksize.name)) {
reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG,
reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG,
kvm_cbom_blocksize.kvm_reg_id);
reg.addr = (uint64_t)&val;
ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, &reg);
@ -1935,7 +2024,7 @@ void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
if (cpu->cfg.ext_zicboz &&
riscv_cpu_option_set(kvm_cboz_blocksize.name)) {
reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG,
reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG,
kvm_cboz_blocksize.kvm_reg_id);
reg.addr = (uint64_t)&val;
ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, &reg);

View file

@ -71,7 +71,7 @@ target_ulong helper_csrr(CPURISCVState *env, int csr)
void helper_csrw(CPURISCVState *env, int csr, target_ulong src)
{
target_ulong mask = env->xl == MXL_RV32 ? UINT32_MAX : (target_ulong)-1;
RISCVException ret = riscv_csrrw(env, csr, NULL, src, mask);
RISCVException ret = riscv_csrrw(env, csr, NULL, src, mask, GETPC());
if (ret != RISCV_EXCP_NONE) {
riscv_raise_exception(env, ret, GETPC());
@ -82,7 +82,7 @@ target_ulong helper_csrrw(CPURISCVState *env, int csr,
target_ulong src, target_ulong write_mask)
{
target_ulong val = 0;
RISCVException ret = riscv_csrrw(env, csr, &val, src, write_mask);
RISCVException ret = riscv_csrrw(env, csr, &val, src, write_mask, GETPC());
if (ret != RISCV_EXCP_NONE) {
riscv_raise_exception(env, ret, GETPC());
@ -108,7 +108,7 @@ void helper_csrw_i128(CPURISCVState *env, int csr,
{
RISCVException ret = riscv_csrrw_i128(env, csr, NULL,
int128_make128(srcl, srch),
UINT128_MAX);
UINT128_MAX, GETPC());
if (ret != RISCV_EXCP_NONE) {
riscv_raise_exception(env, ret, GETPC());
@ -116,13 +116,14 @@ void helper_csrw_i128(CPURISCVState *env, int csr,
}
target_ulong helper_csrrw_i128(CPURISCVState *env, int csr,
target_ulong srcl, target_ulong srch,
target_ulong maskl, target_ulong maskh)
target_ulong srcl, target_ulong srch,
target_ulong maskl, target_ulong maskh)
{
Int128 rv = int128_zero();
RISCVException ret = riscv_csrrw_i128(env, csr, &rv,
int128_make128(srcl, srch),
int128_make128(maskl, maskh));
int128_make128(maskl, maskh),
GETPC());
if (ret != RISCV_EXCP_NONE) {
riscv_raise_exception(env, ret, GETPC());

View file

@ -32,6 +32,15 @@ static bool pmp_write_cfg(CPURISCVState *env, uint32_t addr_index,
uint8_t val);
static uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t addr_index);
/*
* Convert the PMP permissions to match the truth table in the Smepmp spec.
*/
static inline uint8_t pmp_get_smepmp_operation(uint8_t cfg)
{
return ((cfg & PMP_LOCK) >> 4) | ((cfg & PMP_READ) << 2) |
(cfg & PMP_WRITE) | ((cfg & PMP_EXEC) >> 2);
}
/*
* Accessor method to extract address matching type 'a field' from cfg reg
*/
@ -46,21 +55,58 @@ static inline uint8_t pmp_get_a_field(uint8_t cfg)
*/
static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index)
{
/* mseccfg.RLB is set */
if (MSECCFG_RLB_ISSET(env)) {
return 0;
}
if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) {
return 1;
}
/* Top PMP has no 'next' to check */
if ((pmp_index + 1u) >= MAX_RISCV_PMPS) {
return 0;
}
/*
* Check whether a PMP is locked for writing or not.
* (i.e. has LOCK flag and mseccfg.RLB is unset)
*/
static int pmp_is_readonly(CPURISCVState *env, uint32_t pmp_index)
{
return pmp_is_locked(env, pmp_index) && !MSECCFG_RLB_ISSET(env);
}
/*
* Check whether `val` is an invalid Smepmp config value
*/
static int pmp_is_invalid_smepmp_cfg(CPURISCVState *env, uint8_t val)
{
/* No check if mseccfg.MML is not set or if mseccfg.RLB is set */
if (!MSECCFG_MML_ISSET(env) || MSECCFG_RLB_ISSET(env)) {
return 0;
}
return 0;
/*
* Adding a rule with executable privileges that either is M-mode-only
* or a locked Shared-Region is not possible
*/
switch (pmp_get_smepmp_operation(val)) {
case 0:
case 1:
case 2:
case 3:
case 4:
case 5:
case 6:
case 7:
case 8:
case 12:
case 14:
case 15:
return 0;
case 9:
case 10:
case 11:
case 13:
return 1;
default:
g_assert_not_reached();
}
}
/*
@ -91,45 +137,18 @@ static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
{
if (pmp_index < MAX_RISCV_PMPS) {
bool locked = true;
if (riscv_cpu_cfg(env)->ext_smepmp) {
/* mseccfg.RLB is set */
if (MSECCFG_RLB_ISSET(env)) {
locked = false;
}
/* mseccfg.MML is not set */
if (!MSECCFG_MML_ISSET(env) && !pmp_is_locked(env, pmp_index)) {
locked = false;
}
/* mseccfg.MML is set */
if (MSECCFG_MML_ISSET(env)) {
/* not adding execute bit */
if ((val & PMP_LOCK) != 0 && (val & PMP_EXEC) != PMP_EXEC) {
locked = false;
}
/* shared region and not adding X bit */
if ((val & PMP_LOCK) != PMP_LOCK &&
(val & 0x7) != (PMP_WRITE | PMP_EXEC)) {
locked = false;
}
}
} else {
if (!pmp_is_locked(env, pmp_index)) {
locked = false;
}
if (env->pmp_state.pmp[pmp_index].cfg_reg == val) {
/* no change */
return false;
}
if (locked) {
qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n");
} else if (env->pmp_state.pmp[pmp_index].cfg_reg != val) {
/* If !mseccfg.MML then ignore writes with encoding RW=01 */
if ((val & PMP_WRITE) && !(val & PMP_READ) &&
!MSECCFG_MML_ISSET(env)) {
return false;
}
if (pmp_is_readonly(env, pmp_index)) {
qemu_log_mask(LOG_GUEST_ERROR,
"ignoring pmpcfg write - read only\n");
} else if (pmp_is_invalid_smepmp_cfg(env, val)) {
qemu_log_mask(LOG_GUEST_ERROR,
"ignoring pmpcfg write - invalid\n");
} else {
env->pmp_state.pmp[pmp_index].cfg_reg = val;
pmp_update_rule_addr(env, pmp_index);
return true;
@ -353,16 +372,6 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
const uint8_t a_field =
pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
/*
* Convert the PMP permissions to match the truth table in the
* Smepmp spec.
*/
const uint8_t smepmp_operation =
((env->pmp_state.pmp[i].cfg_reg & PMP_LOCK) >> 4) |
((env->pmp_state.pmp[i].cfg_reg & PMP_READ) << 2) |
(env->pmp_state.pmp[i].cfg_reg & PMP_WRITE) |
((env->pmp_state.pmp[i].cfg_reg & PMP_EXEC) >> 2);
if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) {
/*
* If the PMP entry is not off and the address is in range,
@ -381,6 +390,9 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
/*
* If mseccfg.MML Bit set, do the enhanced pmp priv check
*/
const uint8_t smepmp_operation =
pmp_get_smepmp_operation(env->pmp_state.pmp[i].cfg_reg);
if (mode == PRV_M) {
switch (smepmp_operation) {
case 0:
@ -517,6 +529,11 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
bool is_next_cfg_tor = false;
if (addr_index < MAX_RISCV_PMPS) {
if (env->pmp_state.pmp[addr_index].addr_reg == val) {
/* no change */
return;
}
/*
* In TOR mode, need to check the lock bit of the next pmp
* (if there is a next).
@ -525,25 +542,23 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg;
is_next_cfg_tor = PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg);
if (pmp_is_locked(env, addr_index + 1) && is_next_cfg_tor) {
if (pmp_is_readonly(env, addr_index + 1) && is_next_cfg_tor) {
qemu_log_mask(LOG_GUEST_ERROR,
"ignoring pmpaddr write - pmpcfg + 1 locked\n");
"ignoring pmpaddr write - pmpcfg+1 read only\n");
return;
}
}
if (!pmp_is_locked(env, addr_index)) {
if (env->pmp_state.pmp[addr_index].addr_reg != val) {
env->pmp_state.pmp[addr_index].addr_reg = val;
pmp_update_rule_addr(env, addr_index);
if (is_next_cfg_tor) {
pmp_update_rule_addr(env, addr_index + 1);
}
tlb_flush(env_cpu(env));
if (!pmp_is_readonly(env, addr_index)) {
env->pmp_state.pmp[addr_index].addr_reg = val;
pmp_update_rule_addr(env, addr_index);
if (is_next_cfg_tor) {
pmp_update_rule_addr(env, addr_index + 1);
}
tlb_flush(env_cpu(env));
} else {
qemu_log_mask(LOG_GUEST_ERROR,
"ignoring pmpaddr write - locked\n");
"ignoring pmpaddr write - read only\n");
}
} else {
qemu_log_mask(LOG_GUEST_ERROR,

View file

@ -1209,11 +1209,6 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
/* The specification allows for longer insns, but not supported by qemu. */
#define MAX_INSN_LEN 4
static inline int insn_len(uint16_t first_word)
{
return (first_word & 3) == 3 ? 4 : 2;
}
const RISCVDecoder decoder_table[] = {
{ always_true_p, decode_insn32 },
{ has_xthead_p, decode_xthead},

View file

@ -117,25 +117,42 @@ static inline uint32_t vext_max_elems(uint32_t desc, uint32_t log2_esz)
* It will trigger an exception if there is no mapping in TLB
* and page table walk can't fill the TLB entry. Then the guest
* software can return here after process the exception or never return.
*
* This function can also be used when direct access to probe_access_flags is
* needed in order to access the flags. If a pointer to a flags operand is
* provided the function will call probe_access_flags instead, use nonfault
* and update host and flags.
*/
static void probe_pages(CPURISCVState *env, target_ulong addr,
target_ulong len, uintptr_t ra,
MMUAccessType access_type)
static void probe_pages(CPURISCVState *env, target_ulong addr, target_ulong len,
uintptr_t ra, MMUAccessType access_type, int mmu_index,
void **host, int *flags, bool nonfault)
{
target_ulong pagelen = -(addr | TARGET_PAGE_MASK);
target_ulong curlen = MIN(pagelen, len);
int mmu_index = riscv_env_mmu_index(env, false);
probe_access(env, adjust_addr(env, addr), curlen, access_type,
mmu_index, ra);
if (len > curlen) {
addr += curlen;
curlen = len - curlen;
if (flags != NULL) {
*flags = probe_access_flags(env, adjust_addr(env, addr), curlen,
access_type, mmu_index, nonfault, host, ra);
} else {
probe_access(env, adjust_addr(env, addr), curlen, access_type,
mmu_index, ra);
}
if (len > curlen) {
addr += curlen;
curlen = len - curlen;
if (flags != NULL) {
*flags = probe_access_flags(env, adjust_addr(env, addr), curlen,
access_type, mmu_index, nonfault,
host, ra);
} else {
probe_access(env, adjust_addr(env, addr), curlen, access_type,
mmu_index, ra);
}
}
}
static inline void vext_set_elem_mask(void *v0, int index,
uint8_t value)
{
@ -335,8 +352,8 @@ vext_page_ldst_us(CPURISCVState *env, void *vd, target_ulong addr,
MMUAccessType access_type = is_load ? MMU_DATA_LOAD : MMU_DATA_STORE;
/* Check page permission/pmp/watchpoint/etc. */
flags = probe_access_flags(env, adjust_addr(env, addr), size, access_type,
mmu_index, true, &host, ra);
probe_pages(env, addr, size, ra, access_type, mmu_index, &host, &flags,
true);
if (flags == 0) {
if (nf == 1) {
@ -635,7 +652,7 @@ vext_ldff(void *vd, void *v0, target_ulong base, CPURISCVState *env,
uint32_t vma = vext_vma(desc);
target_ulong addr, addr_probe, addr_i, offset, remain, page_split, elems;
int mmu_index = riscv_env_mmu_index(env, false);
int flags;
int flags, probe_flags;
void *host;
VSTART_CHECK_EARLY_EXIT(env, env->vl);
@ -649,15 +666,15 @@ vext_ldff(void *vd, void *v0, target_ulong base, CPURISCVState *env,
}
/* Check page permission/pmp/watchpoint/etc. */
flags = probe_access_flags(env, adjust_addr(env, addr), elems * msize,
MMU_DATA_LOAD, mmu_index, true, &host, ra);
probe_pages(env, addr, elems * msize, ra, MMU_DATA_LOAD, mmu_index, &host,
&flags, true);
/* If we are crossing a page check also the second page. */
if (env->vl > elems) {
addr_probe = addr + (elems << log2_esz);
flags |= probe_access_flags(env, adjust_addr(env, addr_probe),
elems * msize, MMU_DATA_LOAD, mmu_index,
true, &host, ra);
probe_pages(env, addr_probe, elems * msize, ra, MMU_DATA_LOAD,
mmu_index, &host, &probe_flags, true);
flags |= probe_flags;
}
if (flags & ~TLB_WATCHPOINT) {
@ -669,16 +686,16 @@ vext_ldff(void *vd, void *v0, target_ulong base, CPURISCVState *env,
addr_i = adjust_addr(env, base + i * (nf << log2_esz));
if (i == 0) {
/* Allow fault on first element. */
probe_pages(env, addr_i, nf << log2_esz, ra, MMU_DATA_LOAD);
probe_pages(env, addr_i, nf << log2_esz, ra, MMU_DATA_LOAD,
mmu_index, &host, NULL, false);
} else {
remain = nf << log2_esz;
while (remain > 0) {
offset = -(addr_i | TARGET_PAGE_MASK);
/* Probe nonfault on subsequent elements. */
flags = probe_access_flags(env, addr_i, offset,
MMU_DATA_LOAD, mmu_index, true,
&host, 0);
probe_pages(env, addr_i, offset, 0, MMU_DATA_LOAD,
mmu_index, &host, &flags, true);
/*
* Stop if invalid (unmapped) or mmio (transaction may
@ -5116,9 +5133,11 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
} \
\
for (i = i_max; i < vl; ++i) { \
if (vm || vext_elem_mask(v0, i)) { \
*((ETYPE *)vd + H(i)) = 0; \
if (!vm && !vext_elem_mask(v0, i)) { \
vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
continue; \
} \
*((ETYPE *)vd + H(i)) = 0; \
} \
\
env->vstart = 0; \