target-arm queue:

* hw/arm/boot: fix direct kernel boot with initrd
  * hw/arm/msf2-som: Exit when the cpu is not the expected one
  * i.mx7: fix bugs in PCI controller needed to boot recent kernels
  * aspeed: add RTC device
  * aspeed: fix some timer device bugs
  * aspeed: add swift-bmc board
  * aspeed: vic: Add support for legacy register interface
  * aspeed: add aspeed-xdma device
  * Add new sbsa-ref board for aarch64
  * target/arm: code refactoring in preparation for support of
    compilation with TCG disabled
 -----BEGIN PGP SIGNATURE-----
 
 iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAl0aNvIZHHBldGVyLm1h
 eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3uoEEACbv26LfzT5TIu1wlBaqSq5
 CynAY3oJnrad6oH4KE0NiFxbgAvBGXj7mKX55uqJnrI6HY2ADVXbDE3rtVK7TG6O
 4Swe7CCIOZV62/e5RmrWgqsCwxRsAsyLsQjmnL97WKgVTv/C33+iL6JgP+ApnK5D
 4eYAitaxK6GB1+oIl6U6MgPROEOhoVGZ/U6Ejf7itL3dCcn8M5aMqOBn40WR5k/s
 rPM8tZASZuEV29qlmQZzatj0yETbjht2BOzO30/A5X9r3q9Lbwq6tP3RyTLE3KtL
 a2+nYobV8PP1WxjSZjAUMtwp8GiZvhB4jcaKpfN+CMIF+uhXVvy/idBHXzunclJ+
 PZpuBozrSd3jFxmMzCZOddsy0y7MIuZWCw7fdCvYLkw4OSExYJkbTzUxE1jSA5Vb
 FbB983WvpYQXX5iT1Q1UGOqKnlgVfVC5eADBSCRGv4mqXOOf6xgoTgIEUgwdL1JN
 vKPsUQgogAJxELyddStj9aTrzHO50Qba8ahLLfIQWa0qFday+CckAqxjf5BROguc
 ak+jvdV8IoMSQYeZCUp8lmV7YCwbEozQ9cd+OGn4PdEKbPfSexiYFUWPxQgUg3EY
 Ul0e+sPXk4EZ1zMEU7752gKFTjZxwJ/0chVtxzjpqfA3cww5I0DR2SAlOCiYUmWq
 SRguMEJXOYaXjqaCIjrXVg==
 =bjIc
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20190701' into staging

target-arm queue:
 * hw/arm/boot: fix direct kernel boot with initrd
 * hw/arm/msf2-som: Exit when the cpu is not the expected one
 * i.mx7: fix bugs in PCI controller needed to boot recent kernels
 * aspeed: add RTC device
 * aspeed: fix some timer device bugs
 * aspeed: add swift-bmc board
 * aspeed: vic: Add support for legacy register interface
 * aspeed: add aspeed-xdma device
 * Add new sbsa-ref board for aarch64
 * target/arm: code refactoring in preparation for support of
   compilation with TCG disabled

# gpg: Signature made Mon 01 Jul 2019 17:38:10 BST
# gpg:                using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE
# gpg:                issuer "peter.maydell@linaro.org"
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [ultimate]
# gpg:                 aka "Peter Maydell <pmaydell@gmail.com>" [ultimate]
# gpg:                 aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [ultimate]
# Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83  15CF 3C25 25ED 1436 0CDE

* remotes/pmaydell/tags/pull-target-arm-20190701: (46 commits)
  target/arm: Declare some M-profile functions publicly
  target/arm: Declare arm_log_exception() function publicly
  target/arm: Restrict PSCI to TCG
  target/arm/vfp_helper: Restrict the SoftFloat use to TCG
  target/arm/vfp_helper: Extract vfp_set_fpscr_from_host()
  target/arm/vfp_helper: Extract vfp_set_fpscr_to_host()
  target/arm/vfp_helper: Move code around
  target/arm: Move TLB related routines to tlb_helper.c
  target/arm: Declare get_phys_addr() function publicly
  target/arm: Move CPU state dumping routines to cpu.c
  target/arm: Move the DC ZVA helper into op_helper
  target/arm: Fix coding style issues
  target/arm: Fix multiline comment syntax
  target/arm/helper: Remove unused include
  target/arm: Add copyright boilerplate
  target/arm: Makefile cleanup (softmmu)
  target/arm: Makefile cleanup (KVM)
  target/arm: Makefile cleanup (ARM)
  target/arm: Makefile cleanup (Aarch64)
  hw/arm: Add arm SBSA reference machine, devices part
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2019-07-02 12:58:32 +01:00
commit c4e42a9c2b
39 changed files with 2675 additions and 926 deletions

View file

@ -730,6 +730,14 @@ F: include/hw/arm/fsl-imx6.h
F: include/hw/misc/imx6_*.h F: include/hw/misc/imx6_*.h
F: include/hw/ssi/imx_spi.h F: include/hw/ssi/imx_spi.h
SBSA-REF
M: Radoslaw Biernacki <radoslaw.biernacki@linaro.org>
M: Peter Maydell <peter.maydell@linaro.org>
R: Leif Lindholm <leif.lindholm@linaro.org>
L: qemu-arm@nongnu.org
S: Maintained
F: hw/arm/sbsa-ref.c
Sharp SL-5500 (Collie) PDA Sharp SL-5500 (Collie) PDA
M: Peter Maydell <peter.maydell@linaro.org> M: Peter Maydell <peter.maydell@linaro.org>
L: qemu-arm@nongnu.org L: qemu-arm@nongnu.org

View file

@ -5,3 +5,4 @@ include arm-softmmu.mak
CONFIG_XLNX_ZYNQMP_ARM=y CONFIG_XLNX_ZYNQMP_ARM=y
CONFIG_XLNX_VERSAL=y CONFIG_XLNX_VERSAL=y
CONFIG_SBSA_REF=y

View file

@ -184,6 +184,20 @@ config REALVIEW
select DS1338 # I2C RTC+NVRAM select DS1338 # I2C RTC+NVRAM
select USB_OHCI select USB_OHCI
config SBSA_REF
bool
imply PCI_DEVICES
select AHCI
select ARM_SMMUV3
select GPIO_KEY
select PCI_EXPRESS
select PCI_EXPRESS_GENERIC_BRIDGE
select PFLASH_CFI01
select PL011 # UART
select PL031 # RTC
select PL061 # GPIO
select USB_EHCI_SYSBUS
config SABRELITE config SABRELITE
bool bool
select FSL_IMX6 select FSL_IMX6

View file

@ -19,6 +19,7 @@ obj-$(CONFIG_SPITZ) += spitz.o
obj-$(CONFIG_TOSA) += tosa.o obj-$(CONFIG_TOSA) += tosa.o
obj-$(CONFIG_Z2) += z2.o obj-$(CONFIG_Z2) += z2.o
obj-$(CONFIG_REALVIEW) += realview.o obj-$(CONFIG_REALVIEW) += realview.o
obj-$(CONFIG_SBSA_REF) += sbsa-ref.o
obj-$(CONFIG_STELLARIS) += stellaris.o obj-$(CONFIG_STELLARIS) += stellaris.o
obj-$(CONFIG_COLLIE) += collie.o obj-$(CONFIG_COLLIE) += collie.o
obj-$(CONFIG_VERSATILE) += versatilepb.o obj-$(CONFIG_VERSATILE) += versatilepb.o

View file

@ -22,17 +22,18 @@
#include "hw/misc/tmp105.h" #include "hw/misc/tmp105.h"
#include "qemu/log.h" #include "qemu/log.h"
#include "sysemu/block-backend.h" #include "sysemu/block-backend.h"
#include "sysemu/sysemu.h"
#include "hw/loader.h" #include "hw/loader.h"
#include "qemu/error-report.h" #include "qemu/error-report.h"
#include "qemu/units.h" #include "qemu/units.h"
static struct arm_boot_info aspeed_board_binfo = { static struct arm_boot_info aspeed_board_binfo = {
.board_id = -1, /* device-tree-only board */ .board_id = -1, /* device-tree-only board */
.nb_cpus = 1,
}; };
struct AspeedBoardState { struct AspeedBoardState {
AspeedSoCState soc; AspeedSoCState soc;
MemoryRegion ram_container;
MemoryRegion ram; MemoryRegion ram;
MemoryRegion max_ram; MemoryRegion max_ram;
}; };
@ -72,6 +73,17 @@ struct AspeedBoardState {
SCU_AST2500_HW_STRAP_ACPI_ENABLE | \ SCU_AST2500_HW_STRAP_ACPI_ENABLE | \
SCU_HW_STRAP_SPI_MODE(SCU_HW_STRAP_SPI_MASTER)) SCU_HW_STRAP_SPI_MODE(SCU_HW_STRAP_SPI_MASTER))
/* Swift hardware value: 0xF11AD206 */
#define SWIFT_BMC_HW_STRAP1 ( \
AST2500_HW_STRAP1_DEFAULTS | \
SCU_AST2500_HW_STRAP_SPI_AUTOFETCH_ENABLE | \
SCU_AST2500_HW_STRAP_GPIO_STRAP_ENABLE | \
SCU_AST2500_HW_STRAP_UART_DEBUG | \
SCU_AST2500_HW_STRAP_DDR4_ENABLE | \
SCU_H_PLL_BYPASS_EN | \
SCU_AST2500_HW_STRAP_ACPI_ENABLE | \
SCU_HW_STRAP_SPI_MODE(SCU_HW_STRAP_SPI_MASTER))
/* Witherspoon hardware value: 0xF10AD216 (but use romulus definition) */ /* Witherspoon hardware value: 0xF10AD216 (but use romulus definition) */
#define WITHERSPOON_BMC_HW_STRAP1 ROMULUS_BMC_HW_STRAP1 #define WITHERSPOON_BMC_HW_STRAP1 ROMULUS_BMC_HW_STRAP1
@ -159,6 +171,10 @@ static void aspeed_board_init(MachineState *machine,
ram_addr_t max_ram_size; ram_addr_t max_ram_size;
bmc = g_new0(AspeedBoardState, 1); bmc = g_new0(AspeedBoardState, 1);
memory_region_init(&bmc->ram_container, NULL, "aspeed-ram-container",
UINT32_MAX);
object_initialize_child(OBJECT(machine), "soc", &bmc->soc, object_initialize_child(OBJECT(machine), "soc", &bmc->soc,
(sizeof(bmc->soc)), cfg->soc_name, &error_abort, (sizeof(bmc->soc)), cfg->soc_name, &error_abort,
NULL); NULL);
@ -171,6 +187,8 @@ static void aspeed_board_init(MachineState *machine,
&error_abort); &error_abort);
object_property_set_int(OBJECT(&bmc->soc), cfg->num_cs, "num-cs", object_property_set_int(OBJECT(&bmc->soc), cfg->num_cs, "num-cs",
&error_abort); &error_abort);
object_property_set_int(OBJECT(&bmc->soc), smp_cpus, "num-cpus",
&error_abort);
if (machine->kernel_filename) { if (machine->kernel_filename) {
/* /*
* When booting with a -kernel command line there is no u-boot * When booting with a -kernel command line there is no u-boot
@ -191,18 +209,16 @@ static void aspeed_board_init(MachineState *machine,
&error_abort); &error_abort);
memory_region_allocate_system_memory(&bmc->ram, NULL, "ram", ram_size); memory_region_allocate_system_memory(&bmc->ram, NULL, "ram", ram_size);
memory_region_add_subregion(get_system_memory(), sc->info->sdram_base, memory_region_add_subregion(&bmc->ram_container, 0, &bmc->ram);
&bmc->ram); memory_region_add_subregion(get_system_memory(),
object_property_add_const_link(OBJECT(&bmc->soc), "ram", OBJECT(&bmc->ram), sc->info->memmap[ASPEED_SDRAM],
&error_abort); &bmc->ram_container);
max_ram_size = object_property_get_uint(OBJECT(&bmc->soc), "max-ram-size", max_ram_size = object_property_get_uint(OBJECT(&bmc->soc), "max-ram-size",
&error_abort); &error_abort);
memory_region_init_io(&bmc->max_ram, NULL, &max_ram_ops, NULL, memory_region_init_io(&bmc->max_ram, NULL, &max_ram_ops, NULL,
"max_ram", max_ram_size - ram_size); "max_ram", max_ram_size - ram_size);
memory_region_add_subregion(get_system_memory(), memory_region_add_subregion(&bmc->ram_container, ram_size, &bmc->max_ram);
sc->info->sdram_base + ram_size,
&bmc->max_ram);
aspeed_board_init_flashes(&bmc->soc.fmc, cfg->fmc_model, &error_abort); aspeed_board_init_flashes(&bmc->soc.fmc, cfg->fmc_model, &error_abort);
aspeed_board_init_flashes(&bmc->soc.spi[0], cfg->spi_model, &error_abort); aspeed_board_init_flashes(&bmc->soc.spi[0], cfg->spi_model, &error_abort);
@ -229,7 +245,8 @@ static void aspeed_board_init(MachineState *machine,
aspeed_board_binfo.initrd_filename = machine->initrd_filename; aspeed_board_binfo.initrd_filename = machine->initrd_filename;
aspeed_board_binfo.kernel_cmdline = machine->kernel_cmdline; aspeed_board_binfo.kernel_cmdline = machine->kernel_cmdline;
aspeed_board_binfo.ram_size = ram_size; aspeed_board_binfo.ram_size = ram_size;
aspeed_board_binfo.loader_start = sc->info->sdram_base; aspeed_board_binfo.loader_start = sc->info->memmap[ASPEED_SDRAM];
aspeed_board_binfo.nb_cpus = bmc->soc.num_cpus;
if (cfg->i2c_init) { if (cfg->i2c_init) {
cfg->i2c_init(bmc); cfg->i2c_init(bmc);
@ -286,6 +303,35 @@ static void romulus_bmc_i2c_init(AspeedBoardState *bmc)
i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 11), "ds1338", 0x32); i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 11), "ds1338", 0x32);
} }
static void swift_bmc_i2c_init(AspeedBoardState *bmc)
{
AspeedSoCState *soc = &bmc->soc;
i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 3), "pca9552", 0x60);
/* The swift board expects a TMP275 but a TMP105 is compatible */
i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 7), "tmp105", 0x48);
/* The swift board expects a pca9551 but a pca9552 is compatible */
i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 7), "pca9552", 0x60);
/* The swift board expects an Epson RX8900 RTC but a ds1338 is compatible */
i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 8), "ds1338", 0x32);
i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 8), "pca9552", 0x60);
i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 9), "tmp423", 0x4c);
/* The swift board expects a pca9539 but a pca9552 is compatible */
i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 9), "pca9552", 0x74);
i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 10), "tmp423", 0x4c);
/* The swift board expects a pca9539 but a pca9552 is compatible */
i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 10), "pca9552",
0x74);
/* The swift board expects a TMP275 but a TMP105 is compatible */
i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 12), "tmp105", 0x48);
i2c_create_slave(aspeed_i2c_get_bus(DEVICE(&soc->i2c), 12), "tmp105", 0x4a);
}
static void witherspoon_bmc_i2c_init(AspeedBoardState *bmc) static void witherspoon_bmc_i2c_init(AspeedBoardState *bmc)
{ {
AspeedSoCState *soc = &bmc->soc; AspeedSoCState *soc = &bmc->soc;
@ -326,7 +372,7 @@ static void aspeed_machine_class_init(ObjectClass *oc, void *data)
mc->desc = board->desc; mc->desc = board->desc;
mc->init = aspeed_machine_init; mc->init = aspeed_machine_init;
mc->max_cpus = 1; mc->max_cpus = ASPEED_CPUS_NUM;
mc->no_sdcard = 1; mc->no_sdcard = 1;
mc->no_floppy = 1; mc->no_floppy = 1;
mc->no_cdrom = 1; mc->no_cdrom = 1;
@ -376,6 +422,16 @@ static const AspeedBoardConfig aspeed_boards[] = {
.num_cs = 2, .num_cs = 2,
.i2c_init = romulus_bmc_i2c_init, .i2c_init = romulus_bmc_i2c_init,
.ram = 512 * MiB, .ram = 512 * MiB,
}, {
.name = MACHINE_TYPE_NAME("swift-bmc"),
.desc = "OpenPOWER Swift BMC (ARM1176)",
.soc_name = "ast2500-a1",
.hw_strap1 = SWIFT_BMC_HW_STRAP1,
.fmc_model = "mx66l1g45g",
.spi_model = "mx66l1g45g",
.num_cs = 2,
.i2c_init = swift_bmc_i2c_init,
.ram = 512 * MiB,
}, { }, {
.name = MACHINE_TYPE_NAME("witherspoon-bmc"), .name = MACHINE_TYPE_NAME("witherspoon-bmc"),
.desc = "OpenPOWER Witherspoon BMC (ARM1176)", .desc = "OpenPOWER Witherspoon BMC (ARM1176)",

View file

@ -19,36 +19,99 @@
#include "hw/char/serial.h" #include "hw/char/serial.h"
#include "qemu/log.h" #include "qemu/log.h"
#include "qemu/module.h" #include "qemu/module.h"
#include "qemu/error-report.h"
#include "hw/i2c/aspeed_i2c.h" #include "hw/i2c/aspeed_i2c.h"
#include "net/net.h" #include "net/net.h"
#define ASPEED_SOC_UART_5_BASE 0x00184000
#define ASPEED_SOC_IOMEM_SIZE 0x00200000 #define ASPEED_SOC_IOMEM_SIZE 0x00200000
#define ASPEED_SOC_IOMEM_BASE 0x1E600000
#define ASPEED_SOC_FMC_BASE 0x1E620000
#define ASPEED_SOC_SPI_BASE 0x1E630000
#define ASPEED_SOC_SPI2_BASE 0x1E631000
#define ASPEED_SOC_VIC_BASE 0x1E6C0000
#define ASPEED_SOC_SDMC_BASE 0x1E6E0000
#define ASPEED_SOC_SCU_BASE 0x1E6E2000
#define ASPEED_SOC_SRAM_BASE 0x1E720000
#define ASPEED_SOC_TIMER_BASE 0x1E782000
#define ASPEED_SOC_WDT_BASE 0x1E785000
#define ASPEED_SOC_I2C_BASE 0x1E78A000
#define ASPEED_SOC_ETH1_BASE 0x1E660000
#define ASPEED_SOC_ETH2_BASE 0x1E680000
static const int uart_irqs[] = { 9, 32, 33, 34, 10 }; static const hwaddr aspeed_soc_ast2400_memmap[] = {
static const int timer_irqs[] = { 16, 17, 18, 35, 36, 37, 38, 39, }; [ASPEED_IOMEM] = 0x1E600000,
[ASPEED_FMC] = 0x1E620000,
[ASPEED_SPI1] = 0x1E630000,
[ASPEED_VIC] = 0x1E6C0000,
[ASPEED_SDMC] = 0x1E6E0000,
[ASPEED_SCU] = 0x1E6E2000,
[ASPEED_XDMA] = 0x1E6E7000,
[ASPEED_ADC] = 0x1E6E9000,
[ASPEED_SRAM] = 0x1E720000,
[ASPEED_GPIO] = 0x1E780000,
[ASPEED_RTC] = 0x1E781000,
[ASPEED_TIMER1] = 0x1E782000,
[ASPEED_WDT] = 0x1E785000,
[ASPEED_PWM] = 0x1E786000,
[ASPEED_LPC] = 0x1E789000,
[ASPEED_IBT] = 0x1E789140,
[ASPEED_I2C] = 0x1E78A000,
[ASPEED_ETH1] = 0x1E660000,
[ASPEED_ETH2] = 0x1E680000,
[ASPEED_UART1] = 0x1E783000,
[ASPEED_UART5] = 0x1E784000,
[ASPEED_VUART] = 0x1E787000,
[ASPEED_SDRAM] = 0x40000000,
};
#define AST2400_SDRAM_BASE 0x40000000 static const hwaddr aspeed_soc_ast2500_memmap[] = {
#define AST2500_SDRAM_BASE 0x80000000 [ASPEED_IOMEM] = 0x1E600000,
[ASPEED_FMC] = 0x1E620000,
[ASPEED_SPI1] = 0x1E630000,
[ASPEED_SPI2] = 0x1E631000,
[ASPEED_VIC] = 0x1E6C0000,
[ASPEED_SDMC] = 0x1E6E0000,
[ASPEED_SCU] = 0x1E6E2000,
[ASPEED_XDMA] = 0x1E6E7000,
[ASPEED_ADC] = 0x1E6E9000,
[ASPEED_SRAM] = 0x1E720000,
[ASPEED_GPIO] = 0x1E780000,
[ASPEED_RTC] = 0x1E781000,
[ASPEED_TIMER1] = 0x1E782000,
[ASPEED_WDT] = 0x1E785000,
[ASPEED_PWM] = 0x1E786000,
[ASPEED_LPC] = 0x1E789000,
[ASPEED_IBT] = 0x1E789140,
[ASPEED_I2C] = 0x1E78A000,
[ASPEED_ETH1] = 0x1E660000,
[ASPEED_ETH2] = 0x1E680000,
[ASPEED_UART1] = 0x1E783000,
[ASPEED_UART5] = 0x1E784000,
[ASPEED_VUART] = 0x1E787000,
[ASPEED_SDRAM] = 0x80000000,
};
static const int aspeed_soc_ast2400_irqmap[] = {
[ASPEED_UART1] = 9,
[ASPEED_UART2] = 32,
[ASPEED_UART3] = 33,
[ASPEED_UART4] = 34,
[ASPEED_UART5] = 10,
[ASPEED_VUART] = 8,
[ASPEED_FMC] = 19,
[ASPEED_SDMC] = 0,
[ASPEED_SCU] = 21,
[ASPEED_ADC] = 31,
[ASPEED_GPIO] = 20,
[ASPEED_RTC] = 22,
[ASPEED_TIMER1] = 16,
[ASPEED_TIMER2] = 17,
[ASPEED_TIMER3] = 18,
[ASPEED_TIMER4] = 35,
[ASPEED_TIMER5] = 36,
[ASPEED_TIMER6] = 37,
[ASPEED_TIMER7] = 38,
[ASPEED_TIMER8] = 39,
[ASPEED_WDT] = 27,
[ASPEED_PWM] = 28,
[ASPEED_LPC] = 8,
[ASPEED_IBT] = 8, /* LPC */
[ASPEED_I2C] = 12,
[ASPEED_ETH1] = 2,
[ASPEED_ETH2] = 3,
[ASPEED_XDMA] = 6,
};
#define aspeed_soc_ast2500_irqmap aspeed_soc_ast2400_irqmap
static const hwaddr aspeed_soc_ast2400_spi_bases[] = { ASPEED_SOC_SPI_BASE };
static const char *aspeed_soc_ast2400_typenames[] = { "aspeed.smc.spi" }; static const char *aspeed_soc_ast2400_typenames[] = { "aspeed.smc.spi" };
static const hwaddr aspeed_soc_ast2500_spi_bases[] = { ASPEED_SOC_SPI_BASE,
ASPEED_SOC_SPI2_BASE};
static const char *aspeed_soc_ast2500_typenames[] = { static const char *aspeed_soc_ast2500_typenames[] = {
"aspeed.smc.ast2500-spi1", "aspeed.smc.ast2500-spi2" }; "aspeed.smc.ast2500-spi1", "aspeed.smc.ast2500-spi2" };
@ -57,57 +120,71 @@ static const AspeedSoCInfo aspeed_socs[] = {
.name = "ast2400-a0", .name = "ast2400-a0",
.cpu_type = ARM_CPU_TYPE_NAME("arm926"), .cpu_type = ARM_CPU_TYPE_NAME("arm926"),
.silicon_rev = AST2400_A0_SILICON_REV, .silicon_rev = AST2400_A0_SILICON_REV,
.sdram_base = AST2400_SDRAM_BASE,
.sram_size = 0x8000, .sram_size = 0x8000,
.spis_num = 1, .spis_num = 1,
.spi_bases = aspeed_soc_ast2400_spi_bases,
.fmc_typename = "aspeed.smc.fmc", .fmc_typename = "aspeed.smc.fmc",
.spi_typename = aspeed_soc_ast2400_typenames, .spi_typename = aspeed_soc_ast2400_typenames,
.wdts_num = 2, .wdts_num = 2,
.irqmap = aspeed_soc_ast2400_irqmap,
.memmap = aspeed_soc_ast2400_memmap,
.num_cpus = 1,
}, { }, {
.name = "ast2400-a1", .name = "ast2400-a1",
.cpu_type = ARM_CPU_TYPE_NAME("arm926"), .cpu_type = ARM_CPU_TYPE_NAME("arm926"),
.silicon_rev = AST2400_A1_SILICON_REV, .silicon_rev = AST2400_A1_SILICON_REV,
.sdram_base = AST2400_SDRAM_BASE,
.sram_size = 0x8000, .sram_size = 0x8000,
.spis_num = 1, .spis_num = 1,
.spi_bases = aspeed_soc_ast2400_spi_bases,
.fmc_typename = "aspeed.smc.fmc", .fmc_typename = "aspeed.smc.fmc",
.spi_typename = aspeed_soc_ast2400_typenames, .spi_typename = aspeed_soc_ast2400_typenames,
.wdts_num = 2, .wdts_num = 2,
.irqmap = aspeed_soc_ast2400_irqmap,
.memmap = aspeed_soc_ast2400_memmap,
.num_cpus = 1,
}, { }, {
.name = "ast2400", .name = "ast2400",
.cpu_type = ARM_CPU_TYPE_NAME("arm926"), .cpu_type = ARM_CPU_TYPE_NAME("arm926"),
.silicon_rev = AST2400_A0_SILICON_REV, .silicon_rev = AST2400_A0_SILICON_REV,
.sdram_base = AST2400_SDRAM_BASE,
.sram_size = 0x8000, .sram_size = 0x8000,
.spis_num = 1, .spis_num = 1,
.spi_bases = aspeed_soc_ast2400_spi_bases,
.fmc_typename = "aspeed.smc.fmc", .fmc_typename = "aspeed.smc.fmc",
.spi_typename = aspeed_soc_ast2400_typenames, .spi_typename = aspeed_soc_ast2400_typenames,
.wdts_num = 2, .wdts_num = 2,
.irqmap = aspeed_soc_ast2400_irqmap,
.memmap = aspeed_soc_ast2400_memmap,
.num_cpus = 1,
}, { }, {
.name = "ast2500-a1", .name = "ast2500-a1",
.cpu_type = ARM_CPU_TYPE_NAME("arm1176"), .cpu_type = ARM_CPU_TYPE_NAME("arm1176"),
.silicon_rev = AST2500_A1_SILICON_REV, .silicon_rev = AST2500_A1_SILICON_REV,
.sdram_base = AST2500_SDRAM_BASE,
.sram_size = 0x9000, .sram_size = 0x9000,
.spis_num = 2, .spis_num = 2,
.spi_bases = aspeed_soc_ast2500_spi_bases,
.fmc_typename = "aspeed.smc.ast2500-fmc", .fmc_typename = "aspeed.smc.ast2500-fmc",
.spi_typename = aspeed_soc_ast2500_typenames, .spi_typename = aspeed_soc_ast2500_typenames,
.wdts_num = 3, .wdts_num = 3,
.irqmap = aspeed_soc_ast2500_irqmap,
.memmap = aspeed_soc_ast2500_memmap,
.num_cpus = 1,
}, },
}; };
static qemu_irq aspeed_soc_get_irq(AspeedSoCState *s, int ctrl)
{
AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s);
return qdev_get_gpio_in(DEVICE(&s->vic), sc->info->irqmap[ctrl]);
}
static void aspeed_soc_init(Object *obj) static void aspeed_soc_init(Object *obj)
{ {
AspeedSoCState *s = ASPEED_SOC(obj); AspeedSoCState *s = ASPEED_SOC(obj);
AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s);
int i; int i;
object_initialize_child(obj, "cpu", OBJECT(&s->cpu), sizeof(s->cpu), for (i = 0; i < sc->info->num_cpus; i++) {
sc->info->cpu_type, &error_abort, NULL); object_initialize_child(obj, "cpu[*]", OBJECT(&s->cpu[i]),
sizeof(s->cpu[i]), sc->info->cpu_type,
&error_abort, NULL);
}
sysbus_init_child_obj(obj, "scu", OBJECT(&s->scu), sizeof(s->scu), sysbus_init_child_obj(obj, "scu", OBJECT(&s->scu), sizeof(s->scu),
TYPE_ASPEED_SCU); TYPE_ASPEED_SCU);
@ -123,6 +200,9 @@ static void aspeed_soc_init(Object *obj)
sysbus_init_child_obj(obj, "vic", OBJECT(&s->vic), sizeof(s->vic), sysbus_init_child_obj(obj, "vic", OBJECT(&s->vic), sizeof(s->vic),
TYPE_ASPEED_VIC); TYPE_ASPEED_VIC);
sysbus_init_child_obj(obj, "rtc", OBJECT(&s->rtc), sizeof(s->rtc),
TYPE_ASPEED_RTC);
sysbus_init_child_obj(obj, "timerctrl", OBJECT(&s->timerctrl), sysbus_init_child_obj(obj, "timerctrl", OBJECT(&s->timerctrl),
sizeof(s->timerctrl), TYPE_ASPEED_TIMER); sizeof(s->timerctrl), TYPE_ASPEED_TIMER);
object_property_add_const_link(OBJECT(&s->timerctrl), "scu", object_property_add_const_link(OBJECT(&s->timerctrl), "scu",
@ -155,10 +235,17 @@ static void aspeed_soc_init(Object *obj)
sizeof(s->wdt[i]), TYPE_ASPEED_WDT); sizeof(s->wdt[i]), TYPE_ASPEED_WDT);
qdev_prop_set_uint32(DEVICE(&s->wdt[i]), "silicon-rev", qdev_prop_set_uint32(DEVICE(&s->wdt[i]), "silicon-rev",
sc->info->silicon_rev); sc->info->silicon_rev);
object_property_add_const_link(OBJECT(&s->wdt[i]), "scu",
OBJECT(&s->scu), &error_abort);
} }
sysbus_init_child_obj(obj, "ftgmac100", OBJECT(&s->ftgmac100), for (i = 0; i < ASPEED_MACS_NUM; i++) {
sizeof(s->ftgmac100), TYPE_FTGMAC100); sysbus_init_child_obj(obj, "ftgmac100[*]", OBJECT(&s->ftgmac100[i]),
sizeof(s->ftgmac100[i]), TYPE_FTGMAC100);
}
sysbus_init_child_obj(obj, "xdma", OBJECT(&s->xdma), sizeof(s->xdma),
TYPE_ASPEED_XDMA);
} }
static void aspeed_soc_realize(DeviceState *dev, Error **errp) static void aspeed_soc_realize(DeviceState *dev, Error **errp)
@ -169,15 +256,23 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp)
Error *err = NULL, *local_err = NULL; Error *err = NULL, *local_err = NULL;
/* IO space */ /* IO space */
create_unimplemented_device("aspeed_soc.io", create_unimplemented_device("aspeed_soc.io", sc->info->memmap[ASPEED_IOMEM],
ASPEED_SOC_IOMEM_BASE, ASPEED_SOC_IOMEM_SIZE); ASPEED_SOC_IOMEM_SIZE);
if (s->num_cpus > sc->info->num_cpus) {
warn_report("%s: invalid number of CPUs %d, using default %d",
sc->info->name, s->num_cpus, sc->info->num_cpus);
s->num_cpus = sc->info->num_cpus;
}
/* CPU */ /* CPU */
object_property_set_bool(OBJECT(&s->cpu), true, "realized", &err); for (i = 0; i < s->num_cpus; i++) {
object_property_set_bool(OBJECT(&s->cpu[i]), true, "realized", &err);
if (err) { if (err) {
error_propagate(errp, err); error_propagate(errp, err);
return; return;
} }
}
/* SRAM */ /* SRAM */
memory_region_init_ram(&s->sram, OBJECT(dev), "aspeed.sram", memory_region_init_ram(&s->sram, OBJECT(dev), "aspeed.sram",
@ -186,8 +281,8 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp)
error_propagate(errp, err); error_propagate(errp, err);
return; return;
} }
memory_region_add_subregion(get_system_memory(), ASPEED_SOC_SRAM_BASE, memory_region_add_subregion(get_system_memory(),
&s->sram); sc->info->memmap[ASPEED_SRAM], &s->sram);
/* SCU */ /* SCU */
object_property_set_bool(OBJECT(&s->scu), true, "realized", &err); object_property_set_bool(OBJECT(&s->scu), true, "realized", &err);
@ -195,7 +290,7 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp)
error_propagate(errp, err); error_propagate(errp, err);
return; return;
} }
sysbus_mmio_map(SYS_BUS_DEVICE(&s->scu), 0, ASPEED_SOC_SCU_BASE); sysbus_mmio_map(SYS_BUS_DEVICE(&s->scu), 0, sc->info->memmap[ASPEED_SCU]);
/* VIC */ /* VIC */
object_property_set_bool(OBJECT(&s->vic), true, "realized", &err); object_property_set_bool(OBJECT(&s->vic), true, "realized", &err);
@ -203,29 +298,39 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp)
error_propagate(errp, err); error_propagate(errp, err);
return; return;
} }
sysbus_mmio_map(SYS_BUS_DEVICE(&s->vic), 0, ASPEED_SOC_VIC_BASE); sysbus_mmio_map(SYS_BUS_DEVICE(&s->vic), 0, sc->info->memmap[ASPEED_VIC]);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->vic), 0, sysbus_connect_irq(SYS_BUS_DEVICE(&s->vic), 0,
qdev_get_gpio_in(DEVICE(&s->cpu), ARM_CPU_IRQ)); qdev_get_gpio_in(DEVICE(&s->cpu), ARM_CPU_IRQ));
sysbus_connect_irq(SYS_BUS_DEVICE(&s->vic), 1, sysbus_connect_irq(SYS_BUS_DEVICE(&s->vic), 1,
qdev_get_gpio_in(DEVICE(&s->cpu), ARM_CPU_FIQ)); qdev_get_gpio_in(DEVICE(&s->cpu), ARM_CPU_FIQ));
/* RTC */
object_property_set_bool(OBJECT(&s->rtc), true, "realized", &err);
if (err) {
error_propagate(errp, err);
return;
}
sysbus_mmio_map(SYS_BUS_DEVICE(&s->rtc), 0, sc->info->memmap[ASPEED_RTC]);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->rtc), 0,
aspeed_soc_get_irq(s, ASPEED_RTC));
/* Timer */ /* Timer */
object_property_set_bool(OBJECT(&s->timerctrl), true, "realized", &err); object_property_set_bool(OBJECT(&s->timerctrl), true, "realized", &err);
if (err) { if (err) {
error_propagate(errp, err); error_propagate(errp, err);
return; return;
} }
sysbus_mmio_map(SYS_BUS_DEVICE(&s->timerctrl), 0, ASPEED_SOC_TIMER_BASE); sysbus_mmio_map(SYS_BUS_DEVICE(&s->timerctrl), 0,
for (i = 0; i < ARRAY_SIZE(timer_irqs); i++) { sc->info->memmap[ASPEED_TIMER1]);
qemu_irq irq = qdev_get_gpio_in(DEVICE(&s->vic), timer_irqs[i]); for (i = 0; i < ASPEED_TIMER_NR_TIMERS; i++) {
qemu_irq irq = aspeed_soc_get_irq(s, ASPEED_TIMER1 + i);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->timerctrl), i, irq); sysbus_connect_irq(SYS_BUS_DEVICE(&s->timerctrl), i, irq);
} }
/* UART - attach an 8250 to the IO space as our UART5 */ /* UART - attach an 8250 to the IO space as our UART5 */
if (serial_hd(0)) { if (serial_hd(0)) {
qemu_irq uart5 = qdev_get_gpio_in(DEVICE(&s->vic), uart_irqs[4]); qemu_irq uart5 = aspeed_soc_get_irq(s, ASPEED_UART5);
serial_mm_init(get_system_memory(), serial_mm_init(get_system_memory(), sc->info->memmap[ASPEED_UART5], 2,
ASPEED_SOC_IOMEM_BASE + ASPEED_SOC_UART_5_BASE, 2,
uart5, 38400, serial_hd(0), DEVICE_LITTLE_ENDIAN); uart5, 38400, serial_hd(0), DEVICE_LITTLE_ENDIAN);
} }
@ -235,21 +340,27 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp)
error_propagate(errp, err); error_propagate(errp, err);
return; return;
} }
sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c), 0, ASPEED_SOC_I2C_BASE); sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c), 0, sc->info->memmap[ASPEED_I2C]);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c), 0, sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c), 0,
qdev_get_gpio_in(DEVICE(&s->vic), 12)); aspeed_soc_get_irq(s, ASPEED_I2C));
/* FMC, The number of CS is set at the board level */ /* FMC, The number of CS is set at the board level */
object_property_set_int(OBJECT(&s->fmc), sc->info->memmap[ASPEED_SDRAM],
"sdram-base", &err);
if (err) {
error_propagate(errp, err);
return;
}
object_property_set_bool(OBJECT(&s->fmc), true, "realized", &err); object_property_set_bool(OBJECT(&s->fmc), true, "realized", &err);
if (err) { if (err) {
error_propagate(errp, err); error_propagate(errp, err);
return; return;
} }
sysbus_mmio_map(SYS_BUS_DEVICE(&s->fmc), 0, ASPEED_SOC_FMC_BASE); sysbus_mmio_map(SYS_BUS_DEVICE(&s->fmc), 0, sc->info->memmap[ASPEED_FMC]);
sysbus_mmio_map(SYS_BUS_DEVICE(&s->fmc), 1, sysbus_mmio_map(SYS_BUS_DEVICE(&s->fmc), 1,
s->fmc.ctrl->flash_window_base); s->fmc.ctrl->flash_window_base);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->fmc), 0, sysbus_connect_irq(SYS_BUS_DEVICE(&s->fmc), 0,
qdev_get_gpio_in(DEVICE(&s->vic), 19)); aspeed_soc_get_irq(s, ASPEED_FMC));
/* SPI */ /* SPI */
for (i = 0; i < sc->info->spis_num; i++) { for (i = 0; i < sc->info->spis_num; i++) {
@ -261,7 +372,8 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp)
error_propagate(errp, err); error_propagate(errp, err);
return; return;
} }
sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 0, sc->info->spi_bases[i]); sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 0,
sc->info->memmap[ASPEED_SPI1 + i]);
sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 1, sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 1,
s->spi[i].ctrl->flash_window_base); s->spi[i].ctrl->flash_window_base);
} }
@ -272,7 +384,7 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp)
error_propagate(errp, err); error_propagate(errp, err);
return; return;
} }
sysbus_mmio_map(SYS_BUS_DEVICE(&s->sdmc), 0, ASPEED_SOC_SDMC_BASE); sysbus_mmio_map(SYS_BUS_DEVICE(&s->sdmc), 0, sc->info->memmap[ASPEED_SDMC]);
/* Watch dog */ /* Watch dog */
for (i = 0; i < sc->info->wdts_num; i++) { for (i = 0; i < sc->info->wdts_num; i++) {
@ -282,24 +394,43 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp)
return; return;
} }
sysbus_mmio_map(SYS_BUS_DEVICE(&s->wdt[i]), 0, sysbus_mmio_map(SYS_BUS_DEVICE(&s->wdt[i]), 0,
ASPEED_SOC_WDT_BASE + i * 0x20); sc->info->memmap[ASPEED_WDT] + i * 0x20);
} }
/* Net */ /* Net */
qdev_set_nic_properties(DEVICE(&s->ftgmac100), &nd_table[0]); for (i = 0; i < nb_nics; i++) {
object_property_set_bool(OBJECT(&s->ftgmac100), true, "aspeed", &err); qdev_set_nic_properties(DEVICE(&s->ftgmac100[i]), &nd_table[i]);
object_property_set_bool(OBJECT(&s->ftgmac100), true, "realized", object_property_set_bool(OBJECT(&s->ftgmac100[i]), true, "aspeed",
&err);
object_property_set_bool(OBJECT(&s->ftgmac100[i]), true, "realized",
&local_err); &local_err);
error_propagate(&err, local_err); error_propagate(&err, local_err);
if (err) { if (err) {
error_propagate(errp, err); error_propagate(errp, err);
return; return;
} }
sysbus_mmio_map(SYS_BUS_DEVICE(&s->ftgmac100), 0, ASPEED_SOC_ETH1_BASE); sysbus_mmio_map(SYS_BUS_DEVICE(&s->ftgmac100[i]), 0,
sysbus_connect_irq(SYS_BUS_DEVICE(&s->ftgmac100), 0, sc->info->memmap[ASPEED_ETH1 + i]);
qdev_get_gpio_in(DEVICE(&s->vic), 2)); sysbus_connect_irq(SYS_BUS_DEVICE(&s->ftgmac100[i]), 0,
aspeed_soc_get_irq(s, ASPEED_ETH1 + i));
} }
/* XDMA */
object_property_set_bool(OBJECT(&s->xdma), true, "realized", &err);
if (err) {
error_propagate(errp, err);
return;
}
sysbus_mmio_map(SYS_BUS_DEVICE(&s->xdma), 0,
sc->info->memmap[ASPEED_XDMA]);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->xdma), 0,
aspeed_soc_get_irq(s, ASPEED_XDMA));
}
static Property aspeed_soc_properties[] = {
DEFINE_PROP_UINT32("num-cpus", AspeedSoCState, num_cpus, 0),
DEFINE_PROP_END_OF_LIST(),
};
static void aspeed_soc_class_init(ObjectClass *oc, void *data) static void aspeed_soc_class_init(ObjectClass *oc, void *data)
{ {
DeviceClass *dc = DEVICE_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc);
@ -309,6 +440,7 @@ static void aspeed_soc_class_init(ObjectClass *oc, void *data)
dc->realize = aspeed_soc_realize; dc->realize = aspeed_soc_realize;
/* Reason: Uses serial_hds and nd_table in realize() directly */ /* Reason: Uses serial_hds and nd_table in realize() directly */
dc->user_creatable = false; dc->user_creatable = false;
dc->props = aspeed_soc_properties;
} }
static const TypeInfo aspeed_soc_type_info = { static const TypeInfo aspeed_soc_type_info = {

View file

@ -1109,10 +1109,11 @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
info->initrd_filename); info->initrd_filename);
exit(1); exit(1);
} }
if (info->initrd_start + initrd_size > info->ram_size) { if (info->initrd_start + initrd_size > ram_end) {
error_report("could not load initrd '%s': " error_report("could not load initrd '%s': "
"too big to fit into RAM after the kernel", "too big to fit into RAM after the kernel",
info->initrd_filename); info->initrd_filename);
exit(1);
} }
} else { } else {
initrd_size = 0; initrd_size = 0;

View file

@ -526,6 +526,17 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
*/ */
create_unimplemented_device("lcdif", FSL_IMX7_LCDIF_ADDR, create_unimplemented_device("lcdif", FSL_IMX7_LCDIF_ADDR,
FSL_IMX7_LCDIF_SIZE); FSL_IMX7_LCDIF_SIZE);
/*
* DMA APBH
*/
create_unimplemented_device("dma-apbh", FSL_IMX7_DMA_APBH_ADDR,
FSL_IMX7_DMA_APBH_SIZE);
/*
* PCIe PHY
*/
create_unimplemented_device("pcie-phy", FSL_IMX7_PCIE_PHY_ADDR,
FSL_IMX7_PCIE_PHY_SIZE);
} }
static void fsl_imx7_class_init(ObjectClass *oc, void *data) static void fsl_imx7_class_init(ObjectClass *oc, void *data)

View file

@ -53,6 +53,7 @@ static void emcraft_sf2_s2s010_init(MachineState *machine)
if (strcmp(machine->cpu_type, mc->default_cpu_type) != 0) { if (strcmp(machine->cpu_type, mc->default_cpu_type) != 0) {
error_report("This board can only be used with CPU %s", error_report("This board can only be used with CPU %s",
mc->default_cpu_type); mc->default_cpu_type);
exit(1);
} }
memory_region_init_ram(ddr, NULL, "ddr-ram", DDR_SIZE, memory_region_init_ram(ddr, NULL, "ddr-ram", DDR_SIZE,

806
hw/arm/sbsa-ref.c Normal file
View file

@ -0,0 +1,806 @@
/*
* ARM SBSA Reference Platform emulation
*
* Copyright (c) 2018 Linaro Limited
* Written by Hongbo Zhang <hongbo.zhang@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2 or later, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qemu/units.h"
#include "sysemu/device_tree.h"
#include "sysemu/numa.h"
#include "sysemu/sysemu.h"
#include "exec/address-spaces.h"
#include "exec/hwaddr.h"
#include "kvm_arm.h"
#include "hw/arm/boot.h"
#include "hw/block/flash.h"
#include "hw/boards.h"
#include "hw/ide/internal.h"
#include "hw/ide/ahci_internal.h"
#include "hw/intc/arm_gicv3_common.h"
#include "hw/loader.h"
#include "hw/pci-host/gpex.h"
#include "hw/usb.h"
#include "net/net.h"
#define RAMLIMIT_GB 8192
#define RAMLIMIT_BYTES (RAMLIMIT_GB * GiB)
#define NUM_IRQS 256
#define NUM_SMMU_IRQS 4
#define NUM_SATA_PORTS 6
#define VIRTUAL_PMU_IRQ 7
#define ARCH_GIC_MAINT_IRQ 9
#define ARCH_TIMER_VIRT_IRQ 11
#define ARCH_TIMER_S_EL1_IRQ 13
#define ARCH_TIMER_NS_EL1_IRQ 14
#define ARCH_TIMER_NS_EL2_IRQ 10
enum {
SBSA_FLASH,
SBSA_MEM,
SBSA_CPUPERIPHS,
SBSA_GIC_DIST,
SBSA_GIC_REDIST,
SBSA_SMMU,
SBSA_UART,
SBSA_RTC,
SBSA_PCIE,
SBSA_PCIE_MMIO,
SBSA_PCIE_MMIO_HIGH,
SBSA_PCIE_PIO,
SBSA_PCIE_ECAM,
SBSA_GPIO,
SBSA_SECURE_UART,
SBSA_SECURE_UART_MM,
SBSA_SECURE_MEM,
SBSA_AHCI,
SBSA_EHCI,
};
typedef struct MemMapEntry {
hwaddr base;
hwaddr size;
} MemMapEntry;
typedef struct {
MachineState parent;
struct arm_boot_info bootinfo;
int smp_cpus;
void *fdt;
int fdt_size;
int psci_conduit;
PFlashCFI01 *flash[2];
} SBSAMachineState;
#define TYPE_SBSA_MACHINE MACHINE_TYPE_NAME("sbsa-ref")
#define SBSA_MACHINE(obj) \
OBJECT_CHECK(SBSAMachineState, (obj), TYPE_SBSA_MACHINE)
static const MemMapEntry sbsa_ref_memmap[] = {
/* 512M boot ROM */
[SBSA_FLASH] = { 0, 0x20000000 },
/* 512M secure memory */
[SBSA_SECURE_MEM] = { 0x20000000, 0x20000000 },
/* Space reserved for CPU peripheral devices */
[SBSA_CPUPERIPHS] = { 0x40000000, 0x00040000 },
[SBSA_GIC_DIST] = { 0x40060000, 0x00010000 },
[SBSA_GIC_REDIST] = { 0x40080000, 0x04000000 },
[SBSA_UART] = { 0x60000000, 0x00001000 },
[SBSA_RTC] = { 0x60010000, 0x00001000 },
[SBSA_GPIO] = { 0x60020000, 0x00001000 },
[SBSA_SECURE_UART] = { 0x60030000, 0x00001000 },
[SBSA_SECURE_UART_MM] = { 0x60040000, 0x00001000 },
[SBSA_SMMU] = { 0x60050000, 0x00020000 },
/* Space here reserved for more SMMUs */
[SBSA_AHCI] = { 0x60100000, 0x00010000 },
[SBSA_EHCI] = { 0x60110000, 0x00010000 },
/* Space here reserved for other devices */
[SBSA_PCIE_PIO] = { 0x7fff0000, 0x00010000 },
/* 32-bit address PCIE MMIO space */
[SBSA_PCIE_MMIO] = { 0x80000000, 0x70000000 },
/* 256M PCIE ECAM space */
[SBSA_PCIE_ECAM] = { 0xf0000000, 0x10000000 },
/* ~1TB PCIE MMIO space (4GB to 1024GB boundary) */
[SBSA_PCIE_MMIO_HIGH] = { 0x100000000ULL, 0xFF00000000ULL },
[SBSA_MEM] = { 0x10000000000ULL, RAMLIMIT_BYTES },
};
static const int sbsa_ref_irqmap[] = {
[SBSA_UART] = 1,
[SBSA_RTC] = 2,
[SBSA_PCIE] = 3, /* ... to 6 */
[SBSA_GPIO] = 7,
[SBSA_SECURE_UART] = 8,
[SBSA_SECURE_UART_MM] = 9,
[SBSA_AHCI] = 10,
[SBSA_EHCI] = 11,
};
/*
* Firmware on this machine only uses ACPI table to load OS, these limited
* device tree nodes are just to let firmware know the info which varies from
* command line parameters, so it is not necessary to be fully compatible
* with the kernel CPU and NUMA binding rules.
*/
static void create_fdt(SBSAMachineState *sms)
{
void *fdt = create_device_tree(&sms->fdt_size);
const MachineState *ms = MACHINE(sms);
int cpu;
if (!fdt) {
error_report("create_device_tree() failed");
exit(1);
}
sms->fdt = fdt;
qemu_fdt_setprop_string(fdt, "/", "compatible", "linux,sbsa-ref");
qemu_fdt_setprop_cell(fdt, "/", "#address-cells", 0x2);
qemu_fdt_setprop_cell(fdt, "/", "#size-cells", 0x2);
if (have_numa_distance) {
int size = nb_numa_nodes * nb_numa_nodes * 3 * sizeof(uint32_t);
uint32_t *matrix = g_malloc0(size);
int idx, i, j;
for (i = 0; i < nb_numa_nodes; i++) {
for (j = 0; j < nb_numa_nodes; j++) {
idx = (i * nb_numa_nodes + j) * 3;
matrix[idx + 0] = cpu_to_be32(i);
matrix[idx + 1] = cpu_to_be32(j);
matrix[idx + 2] = cpu_to_be32(numa_info[i].distance[j]);
}
}
qemu_fdt_add_subnode(fdt, "/distance-map");
qemu_fdt_setprop(fdt, "/distance-map", "distance-matrix",
matrix, size);
g_free(matrix);
}
qemu_fdt_add_subnode(sms->fdt, "/cpus");
for (cpu = sms->smp_cpus - 1; cpu >= 0; cpu--) {
char *nodename = g_strdup_printf("/cpus/cpu@%d", cpu);
ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu));
CPUState *cs = CPU(armcpu);
qemu_fdt_add_subnode(sms->fdt, nodename);
if (ms->possible_cpus->cpus[cs->cpu_index].props.has_node_id) {
qemu_fdt_setprop_cell(sms->fdt, nodename, "numa-node-id",
ms->possible_cpus->cpus[cs->cpu_index].props.node_id);
}
g_free(nodename);
}
}
#define SBSA_FLASH_SECTOR_SIZE (256 * KiB)
static PFlashCFI01 *sbsa_flash_create1(SBSAMachineState *sms,
const char *name,
const char *alias_prop_name)
{
/*
* Create a single flash device. We use the same parameters as
* the flash devices on the Versatile Express board.
*/
DeviceState *dev = qdev_create(NULL, TYPE_PFLASH_CFI01);
qdev_prop_set_uint64(dev, "sector-length", SBSA_FLASH_SECTOR_SIZE);
qdev_prop_set_uint8(dev, "width", 4);
qdev_prop_set_uint8(dev, "device-width", 2);
qdev_prop_set_bit(dev, "big-endian", false);
qdev_prop_set_uint16(dev, "id0", 0x89);
qdev_prop_set_uint16(dev, "id1", 0x18);
qdev_prop_set_uint16(dev, "id2", 0x00);
qdev_prop_set_uint16(dev, "id3", 0x00);
qdev_prop_set_string(dev, "name", name);
object_property_add_child(OBJECT(sms), name, OBJECT(dev),
&error_abort);
object_property_add_alias(OBJECT(sms), alias_prop_name,
OBJECT(dev), "drive", &error_abort);
return PFLASH_CFI01(dev);
}
static void sbsa_flash_create(SBSAMachineState *sms)
{
sms->flash[0] = sbsa_flash_create1(sms, "sbsa.flash0", "pflash0");
sms->flash[1] = sbsa_flash_create1(sms, "sbsa.flash1", "pflash1");
}
static void sbsa_flash_map1(PFlashCFI01 *flash,
hwaddr base, hwaddr size,
MemoryRegion *sysmem)
{
DeviceState *dev = DEVICE(flash);
assert(size % SBSA_FLASH_SECTOR_SIZE == 0);
assert(size / SBSA_FLASH_SECTOR_SIZE <= UINT32_MAX);
qdev_prop_set_uint32(dev, "num-blocks", size / SBSA_FLASH_SECTOR_SIZE);
qdev_init_nofail(dev);
memory_region_add_subregion(sysmem, base,
sysbus_mmio_get_region(SYS_BUS_DEVICE(dev),
0));
}
static void sbsa_flash_map(SBSAMachineState *sms,
MemoryRegion *sysmem,
MemoryRegion *secure_sysmem)
{
/*
* Map two flash devices to fill the SBSA_FLASH space in the memmap.
* sysmem is the system memory space. secure_sysmem is the secure view
* of the system, and the first flash device should be made visible only
* there. The second flash device is visible to both secure and nonsecure.
* If sysmem == secure_sysmem this means there is no separate Secure
* address space and both flash devices are generally visible.
*/
hwaddr flashsize = sbsa_ref_memmap[SBSA_FLASH].size / 2;
hwaddr flashbase = sbsa_ref_memmap[SBSA_FLASH].base;
sbsa_flash_map1(sms->flash[0], flashbase, flashsize,
secure_sysmem);
sbsa_flash_map1(sms->flash[1], flashbase + flashsize, flashsize,
sysmem);
}
static bool sbsa_firmware_init(SBSAMachineState *sms,
MemoryRegion *sysmem,
MemoryRegion *secure_sysmem)
{
int i;
BlockBackend *pflash_blk0;
/* Map legacy -drive if=pflash to machine properties */
for (i = 0; i < ARRAY_SIZE(sms->flash); i++) {
pflash_cfi01_legacy_drive(sms->flash[i],
drive_get(IF_PFLASH, 0, i));
}
sbsa_flash_map(sms, sysmem, secure_sysmem);
pflash_blk0 = pflash_cfi01_get_blk(sms->flash[0]);
if (bios_name) {
char *fname;
MemoryRegion *mr;
int image_size;
if (pflash_blk0) {
error_report("The contents of the first flash device may be "
"specified with -bios or with -drive if=pflash... "
"but you cannot use both options at once");
exit(1);
}
/* Fall back to -bios */
fname = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
if (!fname) {
error_report("Could not find ROM image '%s'", bios_name);
exit(1);
}
mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(sms->flash[0]), 0);
image_size = load_image_mr(fname, mr);
g_free(fname);
if (image_size < 0) {
error_report("Could not load ROM image '%s'", bios_name);
exit(1);
}
}
return pflash_blk0 || bios_name;
}
static void create_secure_ram(SBSAMachineState *sms,
MemoryRegion *secure_sysmem)
{
MemoryRegion *secram = g_new(MemoryRegion, 1);
hwaddr base = sbsa_ref_memmap[SBSA_SECURE_MEM].base;
hwaddr size = sbsa_ref_memmap[SBSA_SECURE_MEM].size;
memory_region_init_ram(secram, NULL, "sbsa-ref.secure-ram", size,
&error_fatal);
memory_region_add_subregion(secure_sysmem, base, secram);
}
static void create_gic(SBSAMachineState *sms, qemu_irq *pic)
{
DeviceState *gicdev;
SysBusDevice *gicbusdev;
const char *gictype;
uint32_t redist0_capacity, redist0_count;
int i;
gictype = gicv3_class_name();
gicdev = qdev_create(NULL, gictype);
qdev_prop_set_uint32(gicdev, "revision", 3);
qdev_prop_set_uint32(gicdev, "num-cpu", smp_cpus);
/*
* Note that the num-irq property counts both internal and external
* interrupts; there are always 32 of the former (mandated by GIC spec).
*/
qdev_prop_set_uint32(gicdev, "num-irq", NUM_IRQS + 32);
qdev_prop_set_bit(gicdev, "has-security-extensions", true);
redist0_capacity =
sbsa_ref_memmap[SBSA_GIC_REDIST].size / GICV3_REDIST_SIZE;
redist0_count = MIN(smp_cpus, redist0_capacity);
qdev_prop_set_uint32(gicdev, "len-redist-region-count", 1);
qdev_prop_set_uint32(gicdev, "redist-region-count[0]", redist0_count);
qdev_init_nofail(gicdev);
gicbusdev = SYS_BUS_DEVICE(gicdev);
sysbus_mmio_map(gicbusdev, 0, sbsa_ref_memmap[SBSA_GIC_DIST].base);
sysbus_mmio_map(gicbusdev, 1, sbsa_ref_memmap[SBSA_GIC_REDIST].base);
/*
* Wire the outputs from each CPU's generic timer and the GICv3
* maintenance interrupt signal to the appropriate GIC PPI inputs,
* and the GIC's IRQ/FIQ/VIRQ/VFIQ interrupt outputs to the CPU's inputs.
*/
for (i = 0; i < smp_cpus; i++) {
DeviceState *cpudev = DEVICE(qemu_get_cpu(i));
int ppibase = NUM_IRQS + i * GIC_INTERNAL + GIC_NR_SGIS;
int irq;
/*
* Mapping from the output timer irq lines from the CPU to the
* GIC PPI inputs used for this board.
*/
const int timer_irq[] = {
[GTIMER_PHYS] = ARCH_TIMER_NS_EL1_IRQ,
[GTIMER_VIRT] = ARCH_TIMER_VIRT_IRQ,
[GTIMER_HYP] = ARCH_TIMER_NS_EL2_IRQ,
[GTIMER_SEC] = ARCH_TIMER_S_EL1_IRQ,
};
for (irq = 0; irq < ARRAY_SIZE(timer_irq); irq++) {
qdev_connect_gpio_out(cpudev, irq,
qdev_get_gpio_in(gicdev,
ppibase + timer_irq[irq]));
}
qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt", 0,
qdev_get_gpio_in(gicdev, ppibase
+ ARCH_GIC_MAINT_IRQ));
qdev_connect_gpio_out_named(cpudev, "pmu-interrupt", 0,
qdev_get_gpio_in(gicdev, ppibase
+ VIRTUAL_PMU_IRQ));
sysbus_connect_irq(gicbusdev, i, qdev_get_gpio_in(cpudev, ARM_CPU_IRQ));
sysbus_connect_irq(gicbusdev, i + smp_cpus,
qdev_get_gpio_in(cpudev, ARM_CPU_FIQ));
sysbus_connect_irq(gicbusdev, i + 2 * smp_cpus,
qdev_get_gpio_in(cpudev, ARM_CPU_VIRQ));
sysbus_connect_irq(gicbusdev, i + 3 * smp_cpus,
qdev_get_gpio_in(cpudev, ARM_CPU_VFIQ));
}
for (i = 0; i < NUM_IRQS; i++) {
pic[i] = qdev_get_gpio_in(gicdev, i);
}
}
static void create_uart(const SBSAMachineState *sms, qemu_irq *pic, int uart,
MemoryRegion *mem, Chardev *chr)
{
hwaddr base = sbsa_ref_memmap[uart].base;
int irq = sbsa_ref_irqmap[uart];
DeviceState *dev = qdev_create(NULL, "pl011");
SysBusDevice *s = SYS_BUS_DEVICE(dev);
qdev_prop_set_chr(dev, "chardev", chr);
qdev_init_nofail(dev);
memory_region_add_subregion(mem, base,
sysbus_mmio_get_region(s, 0));
sysbus_connect_irq(s, 0, pic[irq]);
}
static void create_rtc(const SBSAMachineState *sms, qemu_irq *pic)
{
hwaddr base = sbsa_ref_memmap[SBSA_RTC].base;
int irq = sbsa_ref_irqmap[SBSA_RTC];
sysbus_create_simple("pl031", base, pic[irq]);
}
static DeviceState *gpio_key_dev;
static void sbsa_ref_powerdown_req(Notifier *n, void *opaque)
{
/* use gpio Pin 3 for power button event */
qemu_set_irq(qdev_get_gpio_in(gpio_key_dev, 0), 1);
}
static Notifier sbsa_ref_powerdown_notifier = {
.notify = sbsa_ref_powerdown_req
};
static void create_gpio(const SBSAMachineState *sms, qemu_irq *pic)
{
DeviceState *pl061_dev;
hwaddr base = sbsa_ref_memmap[SBSA_GPIO].base;
int irq = sbsa_ref_irqmap[SBSA_GPIO];
pl061_dev = sysbus_create_simple("pl061", base, pic[irq]);
gpio_key_dev = sysbus_create_simple("gpio-key", -1,
qdev_get_gpio_in(pl061_dev, 3));
/* connect powerdown request */
qemu_register_powerdown_notifier(&sbsa_ref_powerdown_notifier);
}
static void create_ahci(const SBSAMachineState *sms, qemu_irq *pic)
{
hwaddr base = sbsa_ref_memmap[SBSA_AHCI].base;
int irq = sbsa_ref_irqmap[SBSA_AHCI];
DeviceState *dev;
DriveInfo *hd[NUM_SATA_PORTS];
SysbusAHCIState *sysahci;
AHCIState *ahci;
int i;
dev = qdev_create(NULL, "sysbus-ahci");
qdev_prop_set_uint32(dev, "num-ports", NUM_SATA_PORTS);
qdev_init_nofail(dev);
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[irq]);
sysahci = SYSBUS_AHCI(dev);
ahci = &sysahci->ahci;
ide_drive_get(hd, ARRAY_SIZE(hd));
for (i = 0; i < ahci->ports; i++) {
if (hd[i] == NULL) {
continue;
}
ide_create_drive(&ahci->dev[i].port, 0, hd[i]);
}
}
static void create_ehci(const SBSAMachineState *sms, qemu_irq *pic)
{
hwaddr base = sbsa_ref_memmap[SBSA_EHCI].base;
int irq = sbsa_ref_irqmap[SBSA_EHCI];
sysbus_create_simple("platform-ehci-usb", base, pic[irq]);
}
static void create_smmu(const SBSAMachineState *sms, qemu_irq *pic,
PCIBus *bus)
{
hwaddr base = sbsa_ref_memmap[SBSA_SMMU].base;
int irq = sbsa_ref_irqmap[SBSA_SMMU];
DeviceState *dev;
int i;
dev = qdev_create(NULL, "arm-smmuv3");
object_property_set_link(OBJECT(dev), OBJECT(bus), "primary-bus",
&error_abort);
qdev_init_nofail(dev);
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
for (i = 0; i < NUM_SMMU_IRQS; i++) {
sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, pic[irq + i]);
}
}
static void create_pcie(SBSAMachineState *sms, qemu_irq *pic)
{
hwaddr base_ecam = sbsa_ref_memmap[SBSA_PCIE_ECAM].base;
hwaddr size_ecam = sbsa_ref_memmap[SBSA_PCIE_ECAM].size;
hwaddr base_mmio = sbsa_ref_memmap[SBSA_PCIE_MMIO].base;
hwaddr size_mmio = sbsa_ref_memmap[SBSA_PCIE_MMIO].size;
hwaddr base_mmio_high = sbsa_ref_memmap[SBSA_PCIE_MMIO_HIGH].base;
hwaddr size_mmio_high = sbsa_ref_memmap[SBSA_PCIE_MMIO_HIGH].size;
hwaddr base_pio = sbsa_ref_memmap[SBSA_PCIE_PIO].base;
int irq = sbsa_ref_irqmap[SBSA_PCIE];
MemoryRegion *mmio_alias, *mmio_alias_high, *mmio_reg;
MemoryRegion *ecam_alias, *ecam_reg;
DeviceState *dev;
PCIHostState *pci;
int i;
dev = qdev_create(NULL, TYPE_GPEX_HOST);
qdev_init_nofail(dev);
/* Map ECAM space */
ecam_alias = g_new0(MemoryRegion, 1);
ecam_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0);
memory_region_init_alias(ecam_alias, OBJECT(dev), "pcie-ecam",
ecam_reg, 0, size_ecam);
memory_region_add_subregion(get_system_memory(), base_ecam, ecam_alias);
/* Map the MMIO space */
mmio_alias = g_new0(MemoryRegion, 1);
mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1);
memory_region_init_alias(mmio_alias, OBJECT(dev), "pcie-mmio",
mmio_reg, base_mmio, size_mmio);
memory_region_add_subregion(get_system_memory(), base_mmio, mmio_alias);
/* Map the MMIO_HIGH space */
mmio_alias_high = g_new0(MemoryRegion, 1);
memory_region_init_alias(mmio_alias_high, OBJECT(dev), "pcie-mmio-high",
mmio_reg, base_mmio_high, size_mmio_high);
memory_region_add_subregion(get_system_memory(), base_mmio_high,
mmio_alias_high);
/* Map IO port space */
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, base_pio);
for (i = 0; i < GPEX_NUM_IRQS; i++) {
sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, pic[irq + i]);
gpex_set_irq_num(GPEX_HOST(dev), i, irq + i);
}
pci = PCI_HOST_BRIDGE(dev);
if (pci->bus) {
for (i = 0; i < nb_nics; i++) {
NICInfo *nd = &nd_table[i];
if (!nd->model) {
nd->model = g_strdup("e1000e");
}
pci_nic_init_nofail(nd, pci->bus, nd->model, NULL);
}
}
pci_create_simple(pci->bus, -1, "VGA");
create_smmu(sms, pic, pci->bus);
}
static void *sbsa_ref_dtb(const struct arm_boot_info *binfo, int *fdt_size)
{
const SBSAMachineState *board = container_of(binfo, SBSAMachineState,
bootinfo);
*fdt_size = board->fdt_size;
return board->fdt;
}
static void sbsa_ref_init(MachineState *machine)
{
SBSAMachineState *sms = SBSA_MACHINE(machine);
MachineClass *mc = MACHINE_GET_CLASS(machine);
MemoryRegion *sysmem = get_system_memory();
MemoryRegion *secure_sysmem = NULL;
MemoryRegion *ram = g_new(MemoryRegion, 1);
bool firmware_loaded;
const CPUArchIdList *possible_cpus;
int n, sbsa_max_cpus;
qemu_irq pic[NUM_IRQS];
if (strcmp(machine->cpu_type, ARM_CPU_TYPE_NAME("cortex-a57"))) {
error_report("sbsa-ref: CPU type other than the built-in "
"cortex-a57 not supported");
exit(1);
}
if (kvm_enabled()) {
error_report("sbsa-ref: KVM is not supported for this machine");
exit(1);
}
/*
* The Secure view of the world is the same as the NonSecure,
* but with a few extra devices. Create it as a container region
* containing the system memory at low priority; any secure-only
* devices go in at higher priority and take precedence.
*/
secure_sysmem = g_new(MemoryRegion, 1);
memory_region_init(secure_sysmem, OBJECT(machine), "secure-memory",
UINT64_MAX);
memory_region_add_subregion_overlap(secure_sysmem, 0, sysmem, -1);
firmware_loaded = sbsa_firmware_init(sms, sysmem,
secure_sysmem ?: sysmem);
if (machine->kernel_filename && firmware_loaded) {
error_report("sbsa-ref: No fw_cfg device on this machine, "
"so -kernel option is not supported when firmware loaded, "
"please load OS from hard disk instead");
exit(1);
}
/*
* This machine has EL3 enabled, external firmware should supply PSCI
* implementation, so the QEMU's internal PSCI is disabled.
*/
sms->psci_conduit = QEMU_PSCI_CONDUIT_DISABLED;
sbsa_max_cpus = sbsa_ref_memmap[SBSA_GIC_REDIST].size / GICV3_REDIST_SIZE;
if (max_cpus > sbsa_max_cpus) {
error_report("Number of SMP CPUs requested (%d) exceeds max CPUs "
"supported by machine 'sbsa-ref' (%d)",
max_cpus, sbsa_max_cpus);
exit(1);
}
sms->smp_cpus = smp_cpus;
if (machine->ram_size > sbsa_ref_memmap[SBSA_MEM].size) {
error_report("sbsa-ref: cannot model more than %dGB RAM", RAMLIMIT_GB);
exit(1);
}
possible_cpus = mc->possible_cpu_arch_ids(machine);
for (n = 0; n < possible_cpus->len; n++) {
Object *cpuobj;
CPUState *cs;
if (n >= smp_cpus) {
break;
}
cpuobj = object_new(possible_cpus->cpus[n].type);
object_property_set_int(cpuobj, possible_cpus->cpus[n].arch_id,
"mp-affinity", NULL);
cs = CPU(cpuobj);
cs->cpu_index = n;
numa_cpu_pre_plug(&possible_cpus->cpus[cs->cpu_index], DEVICE(cpuobj),
&error_fatal);
if (object_property_find(cpuobj, "reset-cbar", NULL)) {
object_property_set_int(cpuobj,
sbsa_ref_memmap[SBSA_CPUPERIPHS].base,
"reset-cbar", &error_abort);
}
object_property_set_link(cpuobj, OBJECT(sysmem), "memory",
&error_abort);
object_property_set_link(cpuobj, OBJECT(secure_sysmem),
"secure-memory", &error_abort);
object_property_set_bool(cpuobj, true, "realized", &error_fatal);
object_unref(cpuobj);
}
memory_region_allocate_system_memory(ram, NULL, "sbsa-ref.ram",
machine->ram_size);
memory_region_add_subregion(sysmem, sbsa_ref_memmap[SBSA_MEM].base, ram);
create_fdt(sms);
create_secure_ram(sms, secure_sysmem);
create_gic(sms, pic);
create_uart(sms, pic, SBSA_UART, sysmem, serial_hd(0));
create_uart(sms, pic, SBSA_SECURE_UART, secure_sysmem, serial_hd(1));
/* Second secure UART for RAS and MM from EL0 */
create_uart(sms, pic, SBSA_SECURE_UART_MM, secure_sysmem, serial_hd(2));
create_rtc(sms, pic);
create_gpio(sms, pic);
create_ahci(sms, pic);
create_ehci(sms, pic);
create_pcie(sms, pic);
sms->bootinfo.ram_size = machine->ram_size;
sms->bootinfo.kernel_filename = machine->kernel_filename;
sms->bootinfo.nb_cpus = smp_cpus;
sms->bootinfo.board_id = -1;
sms->bootinfo.loader_start = sbsa_ref_memmap[SBSA_MEM].base;
sms->bootinfo.get_dtb = sbsa_ref_dtb;
sms->bootinfo.firmware_loaded = firmware_loaded;
arm_load_kernel(ARM_CPU(first_cpu), &sms->bootinfo);
}
static uint64_t sbsa_ref_cpu_mp_affinity(SBSAMachineState *sms, int idx)
{
uint8_t clustersz = ARM_DEFAULT_CPUS_PER_CLUSTER;
return arm_cpu_mp_affinity(idx, clustersz);
}
static const CPUArchIdList *sbsa_ref_possible_cpu_arch_ids(MachineState *ms)
{
SBSAMachineState *sms = SBSA_MACHINE(ms);
int n;
if (ms->possible_cpus) {
assert(ms->possible_cpus->len == max_cpus);
return ms->possible_cpus;
}
ms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) +
sizeof(CPUArchId) * max_cpus);
ms->possible_cpus->len = max_cpus;
for (n = 0; n < ms->possible_cpus->len; n++) {
ms->possible_cpus->cpus[n].type = ms->cpu_type;
ms->possible_cpus->cpus[n].arch_id =
sbsa_ref_cpu_mp_affinity(sms, n);
ms->possible_cpus->cpus[n].props.has_thread_id = true;
ms->possible_cpus->cpus[n].props.thread_id = n;
}
return ms->possible_cpus;
}
static CpuInstanceProperties
sbsa_ref_cpu_index_to_props(MachineState *ms, unsigned cpu_index)
{
MachineClass *mc = MACHINE_GET_CLASS(ms);
const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms);
assert(cpu_index < possible_cpus->len);
return possible_cpus->cpus[cpu_index].props;
}
static int64_t
sbsa_ref_get_default_cpu_node_id(const MachineState *ms, int idx)
{
return idx % nb_numa_nodes;
}
static void sbsa_ref_instance_init(Object *obj)
{
SBSAMachineState *sms = SBSA_MACHINE(obj);
sbsa_flash_create(sms);
}
static void sbsa_ref_class_init(ObjectClass *oc, void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
mc->init = sbsa_ref_init;
mc->desc = "QEMU 'SBSA Reference' ARM Virtual Machine";
mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a57");
mc->max_cpus = 512;
mc->pci_allow_0_address = true;
mc->minimum_page_bits = 12;
mc->block_default_type = IF_IDE;
mc->no_cdrom = 1;
mc->default_ram_size = 1 * GiB;
mc->default_cpus = 4;
mc->possible_cpu_arch_ids = sbsa_ref_possible_cpu_arch_ids;
mc->cpu_index_to_instance_props = sbsa_ref_cpu_index_to_props;
mc->get_default_cpu_node_id = sbsa_ref_get_default_cpu_node_id;
}
static const TypeInfo sbsa_ref_info = {
.name = TYPE_SBSA_MACHINE,
.parent = TYPE_MACHINE,
.instance_init = sbsa_ref_instance_init,
.class_init = sbsa_ref_class_init,
.instance_size = sizeof(SBSAMachineState),
};
static void sbsa_ref_machine_init(void)
{
type_register_static(&sbsa_ref_info);
}
type_init(sbsa_ref_machine_init);

View file

@ -176,6 +176,7 @@ static const int a15irqmap[] = {
}; };
static const char *valid_cpus[] = { static const char *valid_cpus[] = {
ARM_CPU_TYPE_NAME("cortex-a7"),
ARM_CPU_TYPE_NAME("cortex-a15"), ARM_CPU_TYPE_NAME("cortex-a15"),
ARM_CPU_TYPE_NAME("cortex-a53"), ARM_CPU_TYPE_NAME("cortex-a53"),
ARM_CPU_TYPE_NAME("cortex-a57"), ARM_CPU_TYPE_NAME("cortex-a57"),

View file

@ -104,54 +104,63 @@ static void aspeed_vic_set_irq(void *opaque, int irq, int level)
static uint64_t aspeed_vic_read(void *opaque, hwaddr offset, unsigned size) static uint64_t aspeed_vic_read(void *opaque, hwaddr offset, unsigned size)
{ {
uint64_t val;
const bool high = !!(offset & 0x4);
hwaddr n_offset = (offset & ~0x4);
AspeedVICState *s = (AspeedVICState *)opaque; AspeedVICState *s = (AspeedVICState *)opaque;
hwaddr n_offset;
uint64_t val;
bool high;
if (offset < AVIC_NEW_BASE_OFFSET) { if (offset < AVIC_NEW_BASE_OFFSET) {
qemu_log_mask(LOG_UNIMP, "%s: Ignoring read from legacy registers " high = false;
"at 0x%" HWADDR_PRIx "[%u]\n", __func__, offset, size); n_offset = offset;
return 0; } else {
high = !!(offset & 0x4);
n_offset = (offset & ~0x4);
} }
n_offset -= AVIC_NEW_BASE_OFFSET;
switch (n_offset) { switch (n_offset) {
case 0x0: /* IRQ Status */ case 0x80: /* IRQ Status */
case 0x00:
val = s->raw & ~s->select & s->enable; val = s->raw & ~s->select & s->enable;
break; break;
case 0x08: /* FIQ Status */ case 0x88: /* FIQ Status */
case 0x04:
val = s->raw & s->select & s->enable; val = s->raw & s->select & s->enable;
break; break;
case 0x10: /* Raw Interrupt Status */ case 0x90: /* Raw Interrupt Status */
case 0x08:
val = s->raw; val = s->raw;
break; break;
case 0x18: /* Interrupt Selection */ case 0x98: /* Interrupt Selection */
case 0x0c:
val = s->select; val = s->select;
break; break;
case 0x20: /* Interrupt Enable */ case 0xa0: /* Interrupt Enable */
case 0x10:
val = s->enable; val = s->enable;
break; break;
case 0x30: /* Software Interrupt */ case 0xb0: /* Software Interrupt */
case 0x18:
val = s->trigger; val = s->trigger;
break; break;
case 0x40: /* Interrupt Sensitivity */ case 0xc0: /* Interrupt Sensitivity */
case 0x24:
val = s->sense; val = s->sense;
break; break;
case 0x48: /* Interrupt Both Edge Trigger Control */ case 0xc8: /* Interrupt Both Edge Trigger Control */
case 0x28:
val = s->dual_edge; val = s->dual_edge;
break; break;
case 0x50: /* Interrupt Event */ case 0xd0: /* Interrupt Event */
case 0x2c:
val = s->event; val = s->event;
break; break;
case 0x60: /* Edge Triggered Interrupt Status */ case 0xe0: /* Edge Triggered Interrupt Status */
val = s->raw & ~s->sense; val = s->raw & ~s->sense;
break; break;
/* Illegal */ /* Illegal */
case 0x28: /* Interrupt Enable Clear */ case 0xa8: /* Interrupt Enable Clear */
case 0x38: /* Software Interrupt Clear */ case 0xb8: /* Software Interrupt Clear */
case 0x58: /* Edge Triggered Interrupt Clear */ case 0xd8: /* Edge Triggered Interrupt Clear */
qemu_log_mask(LOG_GUEST_ERROR, qemu_log_mask(LOG_GUEST_ERROR,
"%s: Read of write-only register with offset 0x%" "%s: Read of write-only register with offset 0x%"
HWADDR_PRIx "\n", __func__, offset); HWADDR_PRIx "\n", __func__, offset);
@ -166,6 +175,8 @@ static uint64_t aspeed_vic_read(void *opaque, hwaddr offset, unsigned size)
} }
if (high) { if (high) {
val = extract64(val, 32, 19); val = extract64(val, 32, 19);
} else {
val = extract64(val, 0, 32);
} }
trace_aspeed_vic_read(offset, size, val); trace_aspeed_vic_read(offset, size, val);
return val; return val;
@ -174,19 +185,18 @@ static uint64_t aspeed_vic_read(void *opaque, hwaddr offset, unsigned size)
static void aspeed_vic_write(void *opaque, hwaddr offset, uint64_t data, static void aspeed_vic_write(void *opaque, hwaddr offset, uint64_t data,
unsigned size) unsigned size)
{ {
const bool high = !!(offset & 0x4);
hwaddr n_offset = (offset & ~0x4);
AspeedVICState *s = (AspeedVICState *)opaque; AspeedVICState *s = (AspeedVICState *)opaque;
hwaddr n_offset;
bool high;
if (offset < AVIC_NEW_BASE_OFFSET) { if (offset < AVIC_NEW_BASE_OFFSET) {
qemu_log_mask(LOG_UNIMP, high = false;
"%s: Ignoring write to legacy registers at 0x%" n_offset = offset;
HWADDR_PRIx "[%u] <- 0x%" PRIx64 "\n", __func__, offset, } else {
size, data); high = !!(offset & 0x4);
return; n_offset = (offset & ~0x4);
} }
n_offset -= AVIC_NEW_BASE_OFFSET;
trace_aspeed_vic_write(offset, size, data); trace_aspeed_vic_write(offset, size, data);
/* Given we have members using separate enable/clear registers, deposit64() /* Given we have members using separate enable/clear registers, deposit64()
@ -201,7 +211,8 @@ static void aspeed_vic_write(void *opaque, hwaddr offset, uint64_t data,
} }
switch (n_offset) { switch (n_offset) {
case 0x18: /* Interrupt Selection */ case 0x98: /* Interrupt Selection */
case 0x0c:
/* Register has deposit64() semantics - overwrite requested 32 bits */ /* Register has deposit64() semantics - overwrite requested 32 bits */
if (high) { if (high) {
s->select &= AVIC_L_MASK; s->select &= AVIC_L_MASK;
@ -210,21 +221,25 @@ static void aspeed_vic_write(void *opaque, hwaddr offset, uint64_t data,
} }
s->select |= data; s->select |= data;
break; break;
case 0x20: /* Interrupt Enable */ case 0xa0: /* Interrupt Enable */
case 0x10:
s->enable |= data; s->enable |= data;
break; break;
case 0x28: /* Interrupt Enable Clear */ case 0xa8: /* Interrupt Enable Clear */
case 0x14:
s->enable &= ~data; s->enable &= ~data;
break; break;
case 0x30: /* Software Interrupt */ case 0xb0: /* Software Interrupt */
case 0x18:
qemu_log_mask(LOG_UNIMP, "%s: Software interrupts unavailable. " qemu_log_mask(LOG_UNIMP, "%s: Software interrupts unavailable. "
"IRQs requested: 0x%016" PRIx64 "\n", __func__, data); "IRQs requested: 0x%016" PRIx64 "\n", __func__, data);
break; break;
case 0x38: /* Software Interrupt Clear */ case 0xb8: /* Software Interrupt Clear */
case 0x1c:
qemu_log_mask(LOG_UNIMP, "%s: Software interrupts unavailable. " qemu_log_mask(LOG_UNIMP, "%s: Software interrupts unavailable. "
"IRQs to be cleared: 0x%016" PRIx64 "\n", __func__, data); "IRQs to be cleared: 0x%016" PRIx64 "\n", __func__, data);
break; break;
case 0x50: /* Interrupt Event */ case 0xd0: /* Interrupt Event */
/* Register has deposit64() semantics - overwrite the top four valid /* Register has deposit64() semantics - overwrite the top four valid
* IRQ bits, as only the top four IRQs (GPIOs) can change their event * IRQ bits, as only the top four IRQs (GPIOs) can change their event
* type */ * type */
@ -236,15 +251,21 @@ static void aspeed_vic_write(void *opaque, hwaddr offset, uint64_t data,
"Ignoring invalid write to interrupt event register"); "Ignoring invalid write to interrupt event register");
} }
break; break;
case 0x58: /* Edge Triggered Interrupt Clear */ case 0xd8: /* Edge Triggered Interrupt Clear */
case 0x38:
s->raw &= ~(data & ~s->sense); s->raw &= ~(data & ~s->sense);
break; break;
case 0x00: /* IRQ Status */ case 0x80: /* IRQ Status */
case 0x08: /* FIQ Status */ case 0x00:
case 0x10: /* Raw Interrupt Status */ case 0x88: /* FIQ Status */
case 0x40: /* Interrupt Sensitivity */ case 0x04:
case 0x48: /* Interrupt Both Edge Trigger Control */ case 0x90: /* Raw Interrupt Status */
case 0x60: /* Edge Triggered Interrupt Status */ case 0x08:
case 0xc0: /* Interrupt Sensitivity */
case 0x24:
case 0xc8: /* Interrupt Both Edge Trigger Control */
case 0x28:
case 0xe0: /* Edge Triggered Interrupt Status */
qemu_log_mask(LOG_GUEST_ERROR, qemu_log_mask(LOG_GUEST_ERROR,
"%s: Write of read-only register with offset 0x%" "%s: Write of read-only register with offset 0x%"
HWADDR_PRIx "\n", __func__, offset); HWADDR_PRIx "\n", __func__, offset);

View file

@ -74,6 +74,7 @@ obj-$(CONFIG_ARMSSE_MHU) += armsse-mhu.o
obj-$(CONFIG_PVPANIC) += pvpanic.o obj-$(CONFIG_PVPANIC) += pvpanic.o
obj-$(CONFIG_AUX) += auxbus.o obj-$(CONFIG_AUX) += auxbus.o
obj-$(CONFIG_ASPEED_SOC) += aspeed_xdma.o
obj-$(CONFIG_ASPEED_SOC) += aspeed_scu.o aspeed_sdmc.o obj-$(CONFIG_ASPEED_SOC) += aspeed_scu.o aspeed_sdmc.o
obj-$(CONFIG_MSF2) += msf2-sysreg.o obj-$(CONFIG_MSF2) += msf2-sysreg.o
obj-$(CONFIG_NRF51_SOC) += nrf51_rng.o obj-$(CONFIG_NRF51_SOC) += nrf51_rng.o

165
hw/misc/aspeed_xdma.c Normal file
View file

@ -0,0 +1,165 @@
/*
* ASPEED XDMA Controller
* Eddie James <eajames@linux.ibm.com>
*
* Copyright (C) 2019 IBM Corp
* SPDX-License-Identifer: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "qemu/error-report.h"
#include "hw/misc/aspeed_xdma.h"
#include "qapi/error.h"
#include "trace.h"
#define XDMA_BMC_CMDQ_ADDR 0x10
#define XDMA_BMC_CMDQ_ENDP 0x14
#define XDMA_BMC_CMDQ_WRP 0x18
#define XDMA_BMC_CMDQ_W_MASK 0x0003FFFF
#define XDMA_BMC_CMDQ_RDP 0x1C
#define XDMA_BMC_CMDQ_RDP_MAGIC 0xEE882266
#define XDMA_IRQ_ENG_CTRL 0x20
#define XDMA_IRQ_ENG_CTRL_US_COMP BIT(4)
#define XDMA_IRQ_ENG_CTRL_DS_COMP BIT(5)
#define XDMA_IRQ_ENG_CTRL_W_MASK 0xBFEFF07F
#define XDMA_IRQ_ENG_STAT 0x24
#define XDMA_IRQ_ENG_STAT_US_COMP BIT(4)
#define XDMA_IRQ_ENG_STAT_DS_COMP BIT(5)
#define XDMA_IRQ_ENG_STAT_RESET 0xF8000000
#define XDMA_MEM_SIZE 0x1000
#define TO_REG(addr) ((addr) / sizeof(uint32_t))
static uint64_t aspeed_xdma_read(void *opaque, hwaddr addr, unsigned int size)
{
uint32_t val = 0;
AspeedXDMAState *xdma = opaque;
if (addr < ASPEED_XDMA_REG_SIZE) {
val = xdma->regs[TO_REG(addr)];
}
return (uint64_t)val;
}
static void aspeed_xdma_write(void *opaque, hwaddr addr, uint64_t val,
unsigned int size)
{
unsigned int idx;
uint32_t val32 = (uint32_t)val;
AspeedXDMAState *xdma = opaque;
if (addr >= ASPEED_XDMA_REG_SIZE) {
return;
}
switch (addr) {
case XDMA_BMC_CMDQ_ENDP:
xdma->regs[TO_REG(addr)] = val32 & XDMA_BMC_CMDQ_W_MASK;
break;
case XDMA_BMC_CMDQ_WRP:
idx = TO_REG(addr);
xdma->regs[idx] = val32 & XDMA_BMC_CMDQ_W_MASK;
xdma->regs[TO_REG(XDMA_BMC_CMDQ_RDP)] = xdma->regs[idx];
trace_aspeed_xdma_write(addr, val);
if (xdma->bmc_cmdq_readp_set) {
xdma->bmc_cmdq_readp_set = 0;
} else {
xdma->regs[TO_REG(XDMA_IRQ_ENG_STAT)] |=
XDMA_IRQ_ENG_STAT_US_COMP | XDMA_IRQ_ENG_STAT_DS_COMP;
if (xdma->regs[TO_REG(XDMA_IRQ_ENG_CTRL)] &
(XDMA_IRQ_ENG_CTRL_US_COMP | XDMA_IRQ_ENG_CTRL_DS_COMP))
qemu_irq_raise(xdma->irq);
}
break;
case XDMA_BMC_CMDQ_RDP:
trace_aspeed_xdma_write(addr, val);
if (val32 == XDMA_BMC_CMDQ_RDP_MAGIC) {
xdma->bmc_cmdq_readp_set = 1;
}
break;
case XDMA_IRQ_ENG_CTRL:
xdma->regs[TO_REG(addr)] = val32 & XDMA_IRQ_ENG_CTRL_W_MASK;
break;
case XDMA_IRQ_ENG_STAT:
trace_aspeed_xdma_write(addr, val);
idx = TO_REG(addr);
if (val32 & (XDMA_IRQ_ENG_STAT_US_COMP | XDMA_IRQ_ENG_STAT_DS_COMP)) {
xdma->regs[idx] &=
~(XDMA_IRQ_ENG_STAT_US_COMP | XDMA_IRQ_ENG_STAT_DS_COMP);
qemu_irq_lower(xdma->irq);
}
break;
default:
xdma->regs[TO_REG(addr)] = val32;
break;
}
}
static const MemoryRegionOps aspeed_xdma_ops = {
.read = aspeed_xdma_read,
.write = aspeed_xdma_write,
.endianness = DEVICE_NATIVE_ENDIAN,
.valid.min_access_size = 4,
.valid.max_access_size = 4,
};
static void aspeed_xdma_realize(DeviceState *dev, Error **errp)
{
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
AspeedXDMAState *xdma = ASPEED_XDMA(dev);
sysbus_init_irq(sbd, &xdma->irq);
memory_region_init_io(&xdma->iomem, OBJECT(xdma), &aspeed_xdma_ops, xdma,
TYPE_ASPEED_XDMA, XDMA_MEM_SIZE);
sysbus_init_mmio(sbd, &xdma->iomem);
}
static void aspeed_xdma_reset(DeviceState *dev)
{
AspeedXDMAState *xdma = ASPEED_XDMA(dev);
xdma->bmc_cmdq_readp_set = 0;
memset(xdma->regs, 0, ASPEED_XDMA_REG_SIZE);
xdma->regs[TO_REG(XDMA_IRQ_ENG_STAT)] = XDMA_IRQ_ENG_STAT_RESET;
qemu_irq_lower(xdma->irq);
}
static const VMStateDescription aspeed_xdma_vmstate = {
.name = TYPE_ASPEED_XDMA,
.version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT32_ARRAY(regs, AspeedXDMAState, ASPEED_XDMA_NUM_REGS),
VMSTATE_END_OF_LIST(),
},
};
static void aspeed_xdma_class_init(ObjectClass *classp, void *data)
{
DeviceClass *dc = DEVICE_CLASS(classp);
dc->realize = aspeed_xdma_realize;
dc->reset = aspeed_xdma_reset;
dc->vmsd = &aspeed_xdma_vmstate;
}
static const TypeInfo aspeed_xdma_info = {
.name = TYPE_ASPEED_XDMA,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(AspeedXDMAState),
.class_init = aspeed_xdma_class_init,
};
static void aspeed_xdma_register_type(void)
{
type_register_static(&aspeed_xdma_info);
}
type_init(aspeed_xdma_register_type);

View file

@ -140,3 +140,6 @@ armsse_cpuid_write(uint64_t offset, uint64_t data, unsigned size) "SSE-200 CPU_I
# armsse-mhu.c # armsse-mhu.c
armsse_mhu_read(uint64_t offset, uint64_t data, unsigned size) "SSE-200 MHU read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" armsse_mhu_read(uint64_t offset, uint64_t data, unsigned size) "SSE-200 MHU read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u"
armsse_mhu_write(uint64_t offset, uint64_t data, unsigned size) "SSE-200 MHU write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" armsse_mhu_write(uint64_t offset, uint64_t data, unsigned size) "SSE-200 MHU write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u"
# aspeed_xdma.c
aspeed_xdma_write(uint64_t offset, uint64_t data) "XDMA write: offset 0x%" PRIx64 " data 0x%" PRIx64

View file

@ -51,6 +51,8 @@
#define DESIGNWARE_PCIE_ATU_DEVFN(x) (((x) >> 16) & 0xff) #define DESIGNWARE_PCIE_ATU_DEVFN(x) (((x) >> 16) & 0xff)
#define DESIGNWARE_PCIE_ATU_UPPER_TARGET 0x91C #define DESIGNWARE_PCIE_ATU_UPPER_TARGET 0x91C
#define DESIGNWARE_PCIE_IRQ_MSI 3
static DesignwarePCIEHost * static DesignwarePCIEHost *
designware_pcie_root_to_host(DesignwarePCIERoot *root) designware_pcie_root_to_host(DesignwarePCIERoot *root)
{ {
@ -67,7 +69,7 @@ static void designware_pcie_root_msi_write(void *opaque, hwaddr addr,
root->msi.intr[0].status |= BIT(val) & root->msi.intr[0].enable; root->msi.intr[0].status |= BIT(val) & root->msi.intr[0].enable;
if (root->msi.intr[0].status & ~root->msi.intr[0].mask) { if (root->msi.intr[0].status & ~root->msi.intr[0].mask) {
qemu_set_irq(host->pci.irqs[0], 1); qemu_set_irq(host->pci.irqs[DESIGNWARE_PCIE_IRQ_MSI], 1);
} }
} }
@ -290,23 +292,19 @@ static void designware_pcie_root_config_write(PCIDevice *d, uint32_t address,
case DESIGNWARE_PCIE_MSI_ADDR_LO: case DESIGNWARE_PCIE_MSI_ADDR_LO:
root->msi.base &= 0xFFFFFFFF00000000ULL; root->msi.base &= 0xFFFFFFFF00000000ULL;
root->msi.base |= val; root->msi.base |= val;
designware_pcie_root_update_msi_mapping(root);
break; break;
case DESIGNWARE_PCIE_MSI_ADDR_HI: case DESIGNWARE_PCIE_MSI_ADDR_HI:
root->msi.base &= 0x00000000FFFFFFFFULL; root->msi.base &= 0x00000000FFFFFFFFULL;
root->msi.base |= (uint64_t)val << 32; root->msi.base |= (uint64_t)val << 32;
break;
case DESIGNWARE_PCIE_MSI_INTR0_ENABLE: {
const bool update_msi_mapping = !root->msi.intr[0].enable ^ !!val;
root->msi.intr[0].enable = val;
if (update_msi_mapping) {
designware_pcie_root_update_msi_mapping(root); designware_pcie_root_update_msi_mapping(root);
}
break; break;
}
case DESIGNWARE_PCIE_MSI_INTR0_ENABLE:
root->msi.intr[0].enable = val;
designware_pcie_root_update_msi_mapping(root);
break;
case DESIGNWARE_PCIE_MSI_INTR0_MASK: case DESIGNWARE_PCIE_MSI_INTR0_MASK:
root->msi.intr[0].mask = val; root->msi.intr[0].mask = val;
@ -315,7 +313,7 @@ static void designware_pcie_root_config_write(PCIDevice *d, uint32_t address,
case DESIGNWARE_PCIE_MSI_INTR0_STATUS: case DESIGNWARE_PCIE_MSI_INTR0_STATUS:
root->msi.intr[0].status ^= val; root->msi.intr[0].status ^= val;
if (!root->msi.intr[0].status) { if (!root->msi.intr[0].status) {
qemu_set_irq(host->pci.irqs[0], 0); qemu_set_irq(host->pci.irqs[DESIGNWARE_PCIE_IRQ_MSI], 0);
} }
break; break;

View file

@ -913,6 +913,7 @@ static const VMStateDescription vmstate_aspeed_smc = {
static Property aspeed_smc_properties[] = { static Property aspeed_smc_properties[] = {
DEFINE_PROP_UINT32("num-cs", AspeedSMCState, num_cs, 1), DEFINE_PROP_UINT32("num-cs", AspeedSMCState, num_cs, 1),
DEFINE_PROP_UINT64("sdram-base", AspeedSMCState, sdram_base, 0),
DEFINE_PROP_END_OF_LIST(), DEFINE_PROP_END_OF_LIST(),
}; };

View file

@ -41,7 +41,7 @@ obj-$(CONFIG_MC146818RTC) += mc146818rtc.o
obj-$(CONFIG_ALLWINNER_A10_PIT) += allwinner-a10-pit.o obj-$(CONFIG_ALLWINNER_A10_PIT) += allwinner-a10-pit.o
common-obj-$(CONFIG_STM32F2XX_TIMER) += stm32f2xx_timer.o common-obj-$(CONFIG_STM32F2XX_TIMER) += stm32f2xx_timer.o
common-obj-$(CONFIG_ASPEED_SOC) += aspeed_timer.o common-obj-$(CONFIG_ASPEED_SOC) += aspeed_timer.o aspeed_rtc.o
common-obj-$(CONFIG_SUN4V_RTC) += sun4v-rtc.o common-obj-$(CONFIG_SUN4V_RTC) += sun4v-rtc.o
common-obj-$(CONFIG_CMSDK_APB_TIMER) += cmsdk-apb-timer.o common-obj-$(CONFIG_CMSDK_APB_TIMER) += cmsdk-apb-timer.o

180
hw/timer/aspeed_rtc.c Normal file
View file

@ -0,0 +1,180 @@
/*
* ASPEED Real Time Clock
* Joel Stanley <joel@jms.id.au>
*
* Copyright 2019 IBM Corp
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "hw/timer/aspeed_rtc.h"
#include "qemu/log.h"
#include "qemu/timer.h"
#include "trace.h"
#define COUNTER1 (0x00 / 4)
#define COUNTER2 (0x04 / 4)
#define ALARM (0x08 / 4)
#define CONTROL (0x10 / 4)
#define ALARM_STATUS (0x14 / 4)
#define RTC_UNLOCKED BIT(1)
#define RTC_ENABLED BIT(0)
static void aspeed_rtc_calc_offset(AspeedRtcState *rtc)
{
struct tm tm;
uint32_t year, cent;
uint32_t reg1 = rtc->reg[COUNTER1];
uint32_t reg2 = rtc->reg[COUNTER2];
tm.tm_mday = (reg1 >> 24) & 0x1f;
tm.tm_hour = (reg1 >> 16) & 0x1f;
tm.tm_min = (reg1 >> 8) & 0x3f;
tm.tm_sec = (reg1 >> 0) & 0x3f;
cent = (reg2 >> 16) & 0x1f;
year = (reg2 >> 8) & 0x7f;
tm.tm_mon = ((reg2 >> 0) & 0x0f) - 1;
tm.tm_year = year + (cent * 100) - 1900;
rtc->offset = qemu_timedate_diff(&tm);
}
static uint32_t aspeed_rtc_get_counter(AspeedRtcState *rtc, int r)
{
uint32_t year, cent;
struct tm now;
qemu_get_timedate(&now, rtc->offset);
switch (r) {
case COUNTER1:
return (now.tm_mday << 24) | (now.tm_hour << 16) |
(now.tm_min << 8) | now.tm_sec;
case COUNTER2:
cent = (now.tm_year + 1900) / 100;
year = now.tm_year % 100;
return ((cent & 0x1f) << 16) | ((year & 0x7f) << 8) |
((now.tm_mon + 1) & 0xf);
default:
g_assert_not_reached();
}
}
static uint64_t aspeed_rtc_read(void *opaque, hwaddr addr,
unsigned size)
{
AspeedRtcState *rtc = opaque;
uint64_t val;
uint32_t r = addr >> 2;
switch (r) {
case COUNTER1:
case COUNTER2:
if (rtc->reg[CONTROL] & RTC_ENABLED) {
rtc->reg[r] = aspeed_rtc_get_counter(rtc, r);
}
/* fall through */
case CONTROL:
val = rtc->reg[r];
break;
case ALARM:
case ALARM_STATUS:
default:
qemu_log_mask(LOG_UNIMP, "%s: 0x%" HWADDR_PRIx "\n", __func__, addr);
return 0;
}
trace_aspeed_rtc_read(addr, val);
return val;
}
static void aspeed_rtc_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
AspeedRtcState *rtc = opaque;
uint32_t r = addr >> 2;
switch (r) {
case COUNTER1:
case COUNTER2:
if (!(rtc->reg[CONTROL] & RTC_UNLOCKED)) {
break;
}
/* fall through */
case CONTROL:
rtc->reg[r] = val;
aspeed_rtc_calc_offset(rtc);
break;
case ALARM:
case ALARM_STATUS:
default:
qemu_log_mask(LOG_UNIMP, "%s: 0x%" HWADDR_PRIx "\n", __func__, addr);
break;
}
trace_aspeed_rtc_write(addr, val);
}
static void aspeed_rtc_reset(DeviceState *d)
{
AspeedRtcState *rtc = ASPEED_RTC(d);
rtc->offset = 0;
memset(rtc->reg, 0, sizeof(rtc->reg));
}
static const MemoryRegionOps aspeed_rtc_ops = {
.read = aspeed_rtc_read,
.write = aspeed_rtc_write,
.endianness = DEVICE_NATIVE_ENDIAN,
};
static const VMStateDescription vmstate_aspeed_rtc = {
.name = TYPE_ASPEED_RTC,
.version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT32_ARRAY(reg, AspeedRtcState, 0x18),
VMSTATE_INT32(offset, AspeedRtcState),
VMSTATE_INT32(offset, AspeedRtcState),
VMSTATE_END_OF_LIST()
}
};
static void aspeed_rtc_realize(DeviceState *dev, Error **errp)
{
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
AspeedRtcState *s = ASPEED_RTC(dev);
sysbus_init_irq(sbd, &s->irq);
memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_rtc_ops, s,
"aspeed-rtc", 0x18ULL);
sysbus_init_mmio(sbd, &s->iomem);
}
static void aspeed_rtc_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = aspeed_rtc_realize;
dc->vmsd = &vmstate_aspeed_rtc;
dc->reset = aspeed_rtc_reset;
}
static const TypeInfo aspeed_rtc_info = {
.name = TYPE_ASPEED_RTC,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(AspeedRtcState),
.class_init = aspeed_rtc_class_init,
};
static void aspeed_rtc_register_types(void)
{
type_register_static(&aspeed_rtc_info);
}
type_init(aspeed_rtc_register_types)

View file

@ -107,41 +107,51 @@ static inline uint64_t calculate_time(struct AspeedTimer *t, uint32_t ticks)
return t->start + delta_ns; return t->start + delta_ns;
} }
static inline uint32_t calculate_match(struct AspeedTimer *t, int i)
{
return t->match[i] < t->reload ? t->match[i] : 0;
}
static uint64_t calculate_next(struct AspeedTimer *t) static uint64_t calculate_next(struct AspeedTimer *t)
{ {
uint64_t next = 0;
uint32_t rate = calculate_rate(t);
while (!next) {
/* We don't know the relationship between the values in the match
* registers, so sort using MAX/MIN/zero. We sort in that order as the
* timer counts down to zero. */
uint64_t seq[] = {
calculate_time(t, MAX(t->match[0], t->match[1])),
calculate_time(t, MIN(t->match[0], t->match[1])),
calculate_time(t, 0),
};
uint64_t reload_ns;
uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
uint64_t next;
if (now < seq[0]) { /*
next = seq[0]; * We don't know the relationship between the values in the match
} else if (now < seq[1]) { * registers, so sort using MAX/MIN/zero. We sort in that order as
next = seq[1]; * the timer counts down to zero.
} else if (now < seq[2]) { */
next = seq[2];
} else if (t->reload) {
reload_ns = muldiv64(t->reload, NANOSECONDS_PER_SECOND, rate);
t->start = now - ((now - t->start) % reload_ns);
} else {
/* no reload value, return 0 */
break;
}
}
next = calculate_time(t, MAX(calculate_match(t, 0), calculate_match(t, 1)));
if (now < next) {
return next; return next;
} }
next = calculate_time(t, MIN(calculate_match(t, 0), calculate_match(t, 1)));
if (now < next) {
return next;
}
next = calculate_time(t, 0);
if (now < next) {
return next;
}
/* We've missed all deadlines, fire interrupt and try again */
timer_del(&t->timer);
if (timer_overflow_interrupt(t)) {
t->level = !t->level;
qemu_set_irq(t->irq, t->level);
}
next = MAX(MAX(calculate_match(t, 0), calculate_match(t, 1)), 0);
t->start = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
return calculate_time(t, next);
}
static void aspeed_timer_mod(AspeedTimer *t) static void aspeed_timer_mod(AspeedTimer *t)
{ {
uint64_t next = calculate_next(t); uint64_t next = calculate_next(t);
@ -184,7 +194,11 @@ static uint64_t aspeed_timer_get_value(AspeedTimer *t, int reg)
switch (reg) { switch (reg) {
case TIMER_REG_STATUS: case TIMER_REG_STATUS:
if (timer_enabled(t)) {
value = calculate_ticks(t, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); value = calculate_ticks(t, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
} else {
value = t->reload;
}
break; break;
case TIMER_REG_RELOAD: case TIMER_REG_RELOAD:
value = t->reload; value = t->reload;
@ -261,7 +275,11 @@ static void aspeed_timer_set_value(AspeedTimerCtrlState *s, int timer, int reg,
int64_t delta = (int64_t) value - (int64_t) calculate_ticks(t, now); int64_t delta = (int64_t) value - (int64_t) calculate_ticks(t, now);
uint32_t rate = calculate_rate(t); uint32_t rate = calculate_rate(t);
if (delta >= 0) {
t->start += muldiv64(delta, NANOSECONDS_PER_SECOND, rate); t->start += muldiv64(delta, NANOSECONDS_PER_SECOND, rate);
} else {
t->start -= muldiv64(-delta, NANOSECONDS_PER_SECOND, rate);
}
aspeed_timer_mod(t); aspeed_timer_mod(t);
} }
break; break;

View file

@ -66,6 +66,10 @@ cmsdk_apb_dualtimer_read(uint64_t offset, uint64_t data, unsigned size) "CMSDK A
cmsdk_apb_dualtimer_write(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB dualtimer write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" cmsdk_apb_dualtimer_write(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB dualtimer write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u"
cmsdk_apb_dualtimer_reset(void) "CMSDK APB dualtimer: reset" cmsdk_apb_dualtimer_reset(void) "CMSDK APB dualtimer: reset"
# hw/timer/aspeed-rtc.c
aspeed_rtc_read(uint64_t addr, uint64_t value) "addr 0x%02" PRIx64 " value 0x%08" PRIx64
aspeed_rtc_write(uint64_t addr, uint64_t value) "addr 0x%02" PRIx64 " value 0x%08" PRIx64
# sun4v-rtc.c # sun4v-rtc.c
sun4v_rtc_read(uint64_t addr, uint64_t value) "read: addr 0x%" PRIx64 " value 0x%" PRIx64 sun4v_rtc_read(uint64_t addr, uint64_t value) "read: addr 0x%" PRIx64 " value 0x%" PRIx64
sun4v_rtc_write(uint64_t addr, uint64_t value) "write: addr 0x%" PRIx64 " value 0x%" PRIx64 sun4v_rtc_write(uint64_t addr, uint64_t value) "write: addr 0x%" PRIx64 " value 0x%" PRIx64

View file

@ -44,6 +44,9 @@
#define WDT_RESTART_MAGIC 0x4755 #define WDT_RESTART_MAGIC 0x4755
#define SCU_RESET_CONTROL1 (0x04 / 4)
#define SCU_RESET_SDRAM BIT(0)
static bool aspeed_wdt_is_enabled(const AspeedWDTState *s) static bool aspeed_wdt_is_enabled(const AspeedWDTState *s)
{ {
return s->regs[WDT_CTRL] & WDT_CTRL_ENABLE; return s->regs[WDT_CTRL] & WDT_CTRL_ENABLE;
@ -222,6 +225,13 @@ static void aspeed_wdt_timer_expired(void *dev)
{ {
AspeedWDTState *s = ASPEED_WDT(dev); AspeedWDTState *s = ASPEED_WDT(dev);
/* Do not reset on SDRAM controller reset */
if (s->scu->regs[SCU_RESET_CONTROL1] & SCU_RESET_SDRAM) {
timer_del(s->timer);
s->regs[WDT_CTRL] = 0;
return;
}
qemu_log_mask(CPU_LOG_RESET, "Watchdog timer expired.\n"); qemu_log_mask(CPU_LOG_RESET, "Watchdog timer expired.\n");
watchdog_perform_action(); watchdog_perform_action();
timer_del(s->timer); timer_del(s->timer);
@ -233,6 +243,16 @@ static void aspeed_wdt_realize(DeviceState *dev, Error **errp)
{ {
SysBusDevice *sbd = SYS_BUS_DEVICE(dev); SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
AspeedWDTState *s = ASPEED_WDT(dev); AspeedWDTState *s = ASPEED_WDT(dev);
Error *err = NULL;
Object *obj;
obj = object_property_get_link(OBJECT(dev), "scu", &err);
if (!obj) {
error_propagate(errp, err);
error_prepend(errp, "required link 'scu' not found: ");
return;
}
s->scu = ASPEED_SCU(obj);
if (!is_supported_silicon_rev(s->silicon_rev)) { if (!is_supported_silicon_rev(s->silicon_rev)) {
error_setg(errp, "Unknown silicon revision: 0x%" PRIx32, error_setg(errp, "Unknown silicon revision: 0x%" PRIx32,

View file

@ -15,7 +15,9 @@
#include "hw/intc/aspeed_vic.h" #include "hw/intc/aspeed_vic.h"
#include "hw/misc/aspeed_scu.h" #include "hw/misc/aspeed_scu.h"
#include "hw/misc/aspeed_sdmc.h" #include "hw/misc/aspeed_sdmc.h"
#include "hw/misc/aspeed_xdma.h"
#include "hw/timer/aspeed_timer.h" #include "hw/timer/aspeed_timer.h"
#include "hw/timer/aspeed_rtc.h"
#include "hw/i2c/aspeed_i2c.h" #include "hw/i2c/aspeed_i2c.h"
#include "hw/ssi/aspeed_smc.h" #include "hw/ssi/aspeed_smc.h"
#include "hw/watchdog/wdt_aspeed.h" #include "hw/watchdog/wdt_aspeed.h"
@ -23,23 +25,28 @@
#define ASPEED_SPIS_NUM 2 #define ASPEED_SPIS_NUM 2
#define ASPEED_WDTS_NUM 3 #define ASPEED_WDTS_NUM 3
#define ASPEED_CPUS_NUM 2
#define ASPEED_MACS_NUM 2
typedef struct AspeedSoCState { typedef struct AspeedSoCState {
/*< private >*/ /*< private >*/
DeviceState parent; DeviceState parent;
/*< public >*/ /*< public >*/
ARMCPU cpu; ARMCPU cpu[ASPEED_CPUS_NUM];
uint32_t num_cpus;
MemoryRegion sram; MemoryRegion sram;
AspeedVICState vic; AspeedVICState vic;
AspeedRtcState rtc;
AspeedTimerCtrlState timerctrl; AspeedTimerCtrlState timerctrl;
AspeedI2CState i2c; AspeedI2CState i2c;
AspeedSCUState scu; AspeedSCUState scu;
AspeedXDMAState xdma;
AspeedSMCState fmc; AspeedSMCState fmc;
AspeedSMCState spi[ASPEED_SPIS_NUM]; AspeedSMCState spi[ASPEED_SPIS_NUM];
AspeedSDMCState sdmc; AspeedSDMCState sdmc;
AspeedWDTState wdt[ASPEED_WDTS_NUM]; AspeedWDTState wdt[ASPEED_WDTS_NUM];
FTGMAC100State ftgmac100; FTGMAC100State ftgmac100[ASPEED_MACS_NUM];
} AspeedSoCState; } AspeedSoCState;
#define TYPE_ASPEED_SOC "aspeed-soc" #define TYPE_ASPEED_SOC "aspeed-soc"
@ -49,13 +56,14 @@ typedef struct AspeedSoCInfo {
const char *name; const char *name;
const char *cpu_type; const char *cpu_type;
uint32_t silicon_rev; uint32_t silicon_rev;
hwaddr sdram_base;
uint64_t sram_size; uint64_t sram_size;
int spis_num; int spis_num;
const hwaddr *spi_bases;
const char *fmc_typename; const char *fmc_typename;
const char **spi_typename; const char **spi_typename;
int wdts_num; int wdts_num;
const int *irqmap;
const hwaddr *memmap;
uint32_t num_cpus;
} AspeedSoCInfo; } AspeedSoCInfo;
typedef struct AspeedSoCClass { typedef struct AspeedSoCClass {
@ -68,4 +76,41 @@ typedef struct AspeedSoCClass {
#define ASPEED_SOC_GET_CLASS(obj) \ #define ASPEED_SOC_GET_CLASS(obj) \
OBJECT_GET_CLASS(AspeedSoCClass, (obj), TYPE_ASPEED_SOC) OBJECT_GET_CLASS(AspeedSoCClass, (obj), TYPE_ASPEED_SOC)
enum {
ASPEED_IOMEM,
ASPEED_UART1,
ASPEED_UART2,
ASPEED_UART3,
ASPEED_UART4,
ASPEED_UART5,
ASPEED_VUART,
ASPEED_FMC,
ASPEED_SPI1,
ASPEED_SPI2,
ASPEED_VIC,
ASPEED_SDMC,
ASPEED_SCU,
ASPEED_ADC,
ASPEED_SRAM,
ASPEED_GPIO,
ASPEED_RTC,
ASPEED_TIMER1,
ASPEED_TIMER2,
ASPEED_TIMER3,
ASPEED_TIMER4,
ASPEED_TIMER5,
ASPEED_TIMER6,
ASPEED_TIMER7,
ASPEED_TIMER8,
ASPEED_WDT,
ASPEED_PWM,
ASPEED_LPC,
ASPEED_IBT,
ASPEED_I2C,
ASPEED_ETH1,
ASPEED_ETH2,
ASPEED_SDRAM,
ASPEED_XDMA,
};
#endif /* ASPEED_SOC_H */ #endif /* ASPEED_SOC_H */

View file

@ -125,6 +125,9 @@ enum FslIMX7MemoryMap {
FSL_IMX7_ADC2_ADDR = 0x30620000, FSL_IMX7_ADC2_ADDR = 0x30620000,
FSL_IMX7_ADCn_SIZE = 0x1000, FSL_IMX7_ADCn_SIZE = 0x1000,
FSL_IMX7_PCIE_PHY_ADDR = 0x306D0000,
FSL_IMX7_PCIE_PHY_SIZE = 0x10000,
FSL_IMX7_GPC_ADDR = 0x303A0000, FSL_IMX7_GPC_ADDR = 0x303A0000,
FSL_IMX7_I2C1_ADDR = 0x30A20000, FSL_IMX7_I2C1_ADDR = 0x30A20000,
@ -179,6 +182,9 @@ enum FslIMX7MemoryMap {
FSL_IMX7_PCIE_REG_SIZE = 16 * 1024, FSL_IMX7_PCIE_REG_SIZE = 16 * 1024,
FSL_IMX7_GPR_ADDR = 0x30340000, FSL_IMX7_GPR_ADDR = 0x30340000,
FSL_IMX7_DMA_APBH_ADDR = 0x33000000,
FSL_IMX7_DMA_APBH_SIZE = 0x2000,
}; };
enum FslIMX7IRQs { enum FslIMX7IRQs {
@ -207,10 +213,10 @@ enum FslIMX7IRQs {
FSL_IMX7_USB2_IRQ = 42, FSL_IMX7_USB2_IRQ = 42,
FSL_IMX7_USB3_IRQ = 40, FSL_IMX7_USB3_IRQ = 40,
FSL_IMX7_PCI_INTA_IRQ = 122, FSL_IMX7_PCI_INTA_IRQ = 125,
FSL_IMX7_PCI_INTB_IRQ = 123, FSL_IMX7_PCI_INTB_IRQ = 124,
FSL_IMX7_PCI_INTC_IRQ = 124, FSL_IMX7_PCI_INTC_IRQ = 123,
FSL_IMX7_PCI_INTD_IRQ = 125, FSL_IMX7_PCI_INTD_IRQ = 122,
FSL_IMX7_UART7_IRQ = 126, FSL_IMX7_UART7_IRQ = 126,

View file

@ -0,0 +1,30 @@
/*
* ASPEED XDMA Controller
* Eddie James <eajames@linux.ibm.com>
*
* Copyright (C) 2019 IBM Corp.
* SPDX-License-Identifer: GPL-2.0-or-later
*/
#ifndef ASPEED_XDMA_H
#define ASPEED_XDMA_H
#include "hw/sysbus.h"
#define TYPE_ASPEED_XDMA "aspeed.xdma"
#define ASPEED_XDMA(obj) OBJECT_CHECK(AspeedXDMAState, (obj), TYPE_ASPEED_XDMA)
#define ASPEED_XDMA_NUM_REGS (ASPEED_XDMA_REG_SIZE / sizeof(uint32_t))
#define ASPEED_XDMA_REG_SIZE 0x7C
typedef struct AspeedXDMAState {
SysBusDevice parent;
MemoryRegion iomem;
qemu_irq irq;
char bmc_cmdq_readp_set;
uint32_t regs[ASPEED_XDMA_NUM_REGS];
} AspeedXDMAState;
#endif /* ASPEED_XDMA_H */

View file

@ -97,6 +97,9 @@ typedef struct AspeedSMCState {
uint8_t r_timings; uint8_t r_timings;
uint8_t conf_enable_w0; uint8_t conf_enable_w0;
/* for DMA support */
uint64_t sdram_base;
AspeedSMCFlash *flashes; AspeedSMCFlash *flashes;
uint8_t snoop_index; uint8_t snoop_index;

View file

@ -0,0 +1,31 @@
/*
* ASPEED Real Time Clock
* Joel Stanley <joel@jms.id.au>
*
* Copyright 2019 IBM Corp
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#ifndef ASPEED_RTC_H
#define ASPEED_RTC_H
#include <stdint.h>
#include "hw/hw.h"
#include "hw/irq.h"
#include "hw/sysbus.h"
typedef struct AspeedRtcState {
SysBusDevice parent_obj;
MemoryRegion iomem;
qemu_irq irq;
uint32_t reg[0x18];
int offset;
} AspeedRtcState;
#define TYPE_ASPEED_RTC "aspeed.rtc"
#define ASPEED_RTC(obj) OBJECT_CHECK(AspeedRtcState, (obj), TYPE_ASPEED_RTC)
#endif /* ASPEED_RTC_H */

View file

@ -27,6 +27,7 @@ typedef struct AspeedWDTState {
MemoryRegion iomem; MemoryRegion iomem;
uint32_t regs[ASPEED_WDT_REGS_MAX]; uint32_t regs[ASPEED_WDT_REGS_MAX];
AspeedSCUState *scu;
uint32_t pclk_freq; uint32_t pclk_freq;
uint32_t silicon_rev; uint32_t silicon_rev;
uint32_t ext_pulse_width_mask; uint32_t ext_pulse_width_mask;

View file

@ -1,16 +1,15 @@
obj-y += arm-semi.o obj-y += arm-semi.o
obj-$(CONFIG_SOFTMMU) += machine.o psci.o arch_dump.o monitor.o obj-y += helper.o vfp_helper.o
obj-y += cpu.o gdbstub.o
obj-$(TARGET_AARCH64) += cpu64.o gdbstub64.o
obj-$(CONFIG_SOFTMMU) += machine.o arch_dump.o monitor.o
obj-$(CONFIG_SOFTMMU) += arm-powerctl.o
obj-$(CONFIG_KVM) += kvm.o obj-$(CONFIG_KVM) += kvm.o
obj-$(call land,$(CONFIG_KVM),$(call lnot,$(TARGET_AARCH64))) += kvm32.o obj-$(call land,$(CONFIG_KVM),$(call lnot,$(TARGET_AARCH64))) += kvm32.o
obj-$(call land,$(CONFIG_KVM),$(TARGET_AARCH64)) += kvm64.o obj-$(call land,$(CONFIG_KVM),$(TARGET_AARCH64)) += kvm64.o
obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o
obj-y += translate.o op_helper.o helper.o cpu.o
obj-y += neon_helper.o iwmmxt_helper.o vec_helper.o vfp_helper.o
obj-y += gdbstub.o
obj-$(TARGET_AARCH64) += cpu64.o translate-a64.o helper-a64.o gdbstub64.o
obj-$(TARGET_AARCH64) += pauth_helper.o
obj-y += crypto_helper.o
obj-$(CONFIG_SOFTMMU) += arm-powerctl.o
DECODETREE = $(SRC_PATH)/scripts/decodetree.py DECODETREE = $(SRC_PATH)/scripts/decodetree.py
@ -33,4 +32,13 @@ target/arm/translate-sve.o: target/arm/decode-sve.inc.c
target/arm/translate.o: target/arm/decode-vfp.inc.c target/arm/translate.o: target/arm/decode-vfp.inc.c
target/arm/translate.o: target/arm/decode-vfp-uncond.inc.c target/arm/translate.o: target/arm/decode-vfp-uncond.inc.c
obj-y += tlb_helper.o
obj-y += translate.o op_helper.o
obj-y += crypto_helper.o
obj-y += iwmmxt_helper.o vec_helper.o neon_helper.o
obj-$(CONFIG_SOFTMMU) += psci.o
obj-$(TARGET_AARCH64) += translate-a64.o helper-a64.o
obj-$(TARGET_AARCH64) += translate-sve.o sve_helper.o obj-$(TARGET_AARCH64) += translate-sve.o sve_helper.o
obj-$(TARGET_AARCH64) += pauth_helper.o

View file

@ -19,6 +19,7 @@
*/ */
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "qemu/qemu-print.h"
#include "qemu-common.h" #include "qemu-common.h"
#include "target/arm/idau.h" #include "target/arm/idau.h"
#include "qemu/module.h" #include "qemu/module.h"
@ -676,6 +677,231 @@ static void arm_disas_set_info(CPUState *cpu, disassemble_info *info)
#endif #endif
} }
#ifdef TARGET_AARCH64
static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
uint32_t psr = pstate_read(env);
int i;
int el = arm_current_el(env);
const char *ns_status;
qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc);
for (i = 0; i < 32; i++) {
if (i == 31) {
qemu_fprintf(f, " SP=%016" PRIx64 "\n", env->xregs[i]);
} else {
qemu_fprintf(f, "X%02d=%016" PRIx64 "%s", i, env->xregs[i],
(i + 2) % 3 ? " " : "\n");
}
}
if (arm_feature(env, ARM_FEATURE_EL3) && el != 3) {
ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
} else {
ns_status = "";
}
qemu_fprintf(f, "PSTATE=%08x %c%c%c%c %sEL%d%c",
psr,
psr & PSTATE_N ? 'N' : '-',
psr & PSTATE_Z ? 'Z' : '-',
psr & PSTATE_C ? 'C' : '-',
psr & PSTATE_V ? 'V' : '-',
ns_status,
el,
psr & PSTATE_SP ? 'h' : 't');
if (cpu_isar_feature(aa64_bti, cpu)) {
qemu_fprintf(f, " BTYPE=%d", (psr & PSTATE_BTYPE) >> 10);
}
if (!(flags & CPU_DUMP_FPU)) {
qemu_fprintf(f, "\n");
return;
}
if (fp_exception_el(env, el) != 0) {
qemu_fprintf(f, " FPU disabled\n");
return;
}
qemu_fprintf(f, " FPCR=%08x FPSR=%08x\n",
vfp_get_fpcr(env), vfp_get_fpsr(env));
if (cpu_isar_feature(aa64_sve, cpu) && sve_exception_el(env, el) == 0) {
int j, zcr_len = sve_zcr_len_for_el(env, el);
for (i = 0; i <= FFR_PRED_NUM; i++) {
bool eol;
if (i == FFR_PRED_NUM) {
qemu_fprintf(f, "FFR=");
/* It's last, so end the line. */
eol = true;
} else {
qemu_fprintf(f, "P%02d=", i);
switch (zcr_len) {
case 0:
eol = i % 8 == 7;
break;
case 1:
eol = i % 6 == 5;
break;
case 2:
case 3:
eol = i % 3 == 2;
break;
default:
/* More than one quadword per predicate. */
eol = true;
break;
}
}
for (j = zcr_len / 4; j >= 0; j--) {
int digits;
if (j * 4 + 4 <= zcr_len + 1) {
digits = 16;
} else {
digits = (zcr_len % 4 + 1) * 4;
}
qemu_fprintf(f, "%0*" PRIx64 "%s", digits,
env->vfp.pregs[i].p[j],
j ? ":" : eol ? "\n" : " ");
}
}
for (i = 0; i < 32; i++) {
if (zcr_len == 0) {
qemu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64 "%s",
i, env->vfp.zregs[i].d[1],
env->vfp.zregs[i].d[0], i & 1 ? "\n" : " ");
} else if (zcr_len == 1) {
qemu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64
":%016" PRIx64 ":%016" PRIx64 "\n",
i, env->vfp.zregs[i].d[3], env->vfp.zregs[i].d[2],
env->vfp.zregs[i].d[1], env->vfp.zregs[i].d[0]);
} else {
for (j = zcr_len; j >= 0; j--) {
bool odd = (zcr_len - j) % 2 != 0;
if (j == zcr_len) {
qemu_fprintf(f, "Z%02d[%x-%x]=", i, j, j - 1);
} else if (!odd) {
if (j > 0) {
qemu_fprintf(f, " [%x-%x]=", j, j - 1);
} else {
qemu_fprintf(f, " [%x]=", j);
}
}
qemu_fprintf(f, "%016" PRIx64 ":%016" PRIx64 "%s",
env->vfp.zregs[i].d[j * 2 + 1],
env->vfp.zregs[i].d[j * 2],
odd || j == 0 ? "\n" : ":");
}
}
}
} else {
for (i = 0; i < 32; i++) {
uint64_t *q = aa64_vfp_qreg(env, i);
qemu_fprintf(f, "Q%02d=%016" PRIx64 ":%016" PRIx64 "%s",
i, q[1], q[0], (i & 1 ? "\n" : " "));
}
}
}
#else
static inline void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
{
g_assert_not_reached();
}
#endif
static void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
int i;
if (is_a64(env)) {
aarch64_cpu_dump_state(cs, f, flags);
return;
}
for (i = 0; i < 16; i++) {
qemu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
if ((i % 4) == 3) {
qemu_fprintf(f, "\n");
} else {
qemu_fprintf(f, " ");
}
}
if (arm_feature(env, ARM_FEATURE_M)) {
uint32_t xpsr = xpsr_read(env);
const char *mode;
const char *ns_status = "";
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
ns_status = env->v7m.secure ? "S " : "NS ";
}
if (xpsr & XPSR_EXCP) {
mode = "handler";
} else {
if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
mode = "unpriv-thread";
} else {
mode = "priv-thread";
}
}
qemu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
xpsr,
xpsr & XPSR_N ? 'N' : '-',
xpsr & XPSR_Z ? 'Z' : '-',
xpsr & XPSR_C ? 'C' : '-',
xpsr & XPSR_V ? 'V' : '-',
xpsr & XPSR_T ? 'T' : 'A',
ns_status,
mode);
} else {
uint32_t psr = cpsr_read(env);
const char *ns_status = "";
if (arm_feature(env, ARM_FEATURE_EL3) &&
(psr & CPSR_M) != ARM_CPU_MODE_MON) {
ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
}
qemu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
psr,
psr & CPSR_N ? 'N' : '-',
psr & CPSR_Z ? 'Z' : '-',
psr & CPSR_C ? 'C' : '-',
psr & CPSR_V ? 'V' : '-',
psr & CPSR_T ? 'T' : 'A',
ns_status,
aarch32_mode_name(psr), (psr & 0x10) ? 32 : 26);
}
if (flags & CPU_DUMP_FPU) {
int numvfpregs = 0;
if (arm_feature(env, ARM_FEATURE_VFP)) {
numvfpregs += 16;
}
if (arm_feature(env, ARM_FEATURE_VFP3)) {
numvfpregs += 16;
}
for (i = 0; i < numvfpregs; i++) {
uint64_t v = *aa32_vfp_dreg(env, i);
qemu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
i * 2, (uint32_t)v,
i * 2 + 1, (uint32_t)(v >> 32),
i, v);
}
qemu_fprintf(f, "FPSCR: %08x\n", vfp_get_fpscr(env));
}
}
uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz) uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz)
{ {
uint32_t Aff1 = idx / clustersz; uint32_t Aff1 = idx / clustersz;
@ -2340,8 +2566,6 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
cc->gdb_write_register = arm_cpu_gdb_write_register; cc->gdb_write_register = arm_cpu_gdb_write_register;
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
cc->do_interrupt = arm_cpu_do_interrupt; cc->do_interrupt = arm_cpu_do_interrupt;
cc->do_unaligned_access = arm_cpu_do_unaligned_access;
cc->do_transaction_failed = arm_cpu_do_transaction_failed;
cc->get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug; cc->get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug;
cc->asidx_from_attrs = arm_asidx_from_attrs; cc->asidx_from_attrs = arm_asidx_from_attrs;
cc->vmsd = &vmstate_arm_cpu; cc->vmsd = &vmstate_arm_cpu;
@ -2364,6 +2588,10 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
#ifdef CONFIG_TCG #ifdef CONFIG_TCG
cc->tcg_initialize = arm_translate_init; cc->tcg_initialize = arm_translate_init;
cc->tlb_fill = arm_cpu_tlb_fill; cc->tlb_fill = arm_cpu_tlb_fill;
#if !defined(CONFIG_USER_ONLY)
cc->do_unaligned_access = arm_cpu_do_unaligned_access;
cc->do_transaction_failed = arm_cpu_do_transaction_failed;
#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
#endif #endif
} }

View file

@ -929,8 +929,6 @@ void arm_cpu_do_interrupt(CPUState *cpu);
void arm_v7m_cpu_do_interrupt(CPUState *cpu); void arm_v7m_cpu_do_interrupt(CPUState *cpu);
bool arm_cpu_exec_interrupt(CPUState *cpu, int int_req); bool arm_cpu_exec_interrupt(CPUState *cpu, int int_req);
void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags);
hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
MemTxAttrs *attrs); MemTxAttrs *attrs);

View file

@ -1,3 +1,10 @@
/*
* ARM generic helpers.
*
* This code is licensed under the GNU GPL v2 or later.
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "qemu/units.h" #include "qemu/units.h"
#include "target/arm/idau.h" #include "target/arm/idau.h"
@ -7,7 +14,6 @@
#include "exec/gdbstub.h" #include "exec/gdbstub.h"
#include "exec/helper-proto.h" #include "exec/helper-proto.h"
#include "qemu/host-utils.h" #include "qemu/host-utils.h"
#include "sysemu/arch_init.h"
#include "sysemu/sysemu.h" #include "sysemu/sysemu.h"
#include "qemu/bitops.h" #include "qemu/bitops.h"
#include "qemu/crc32c.h" #include "qemu/crc32c.h"
@ -19,7 +25,6 @@
#include "hw/semihosting/semihost.h" #include "hw/semihosting/semihost.h"
#include "sysemu/cpus.h" #include "sysemu/cpus.h"
#include "sysemu/kvm.h" #include "sysemu/kvm.h"
#include "fpu/softfloat.h"
#include "qemu/range.h" #include "qemu/range.h"
#include "qapi/qapi-commands-target.h" #include "qapi/qapi-commands-target.h"
#include "qapi/error.h" #include "qapi/error.h"
@ -28,38 +33,12 @@
#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */ #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
/* Cacheability and shareability attributes for a memory access */
typedef struct ARMCacheAttrs {
unsigned int attrs:8; /* as in the MAIR register encoding */
unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
} ARMCacheAttrs;
static bool get_phys_addr(CPUARMState *env, target_ulong address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
target_ulong *page_size,
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
MMUAccessType access_type, ARMMMUIdx mmu_idx, MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
target_ulong *page_size_ptr, target_ulong *page_size_ptr,
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs); ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
/* Security attributes for an address, as returned by v8m_security_lookup. */
typedef struct V8M_SAttributes {
bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
bool ns;
bool nsc;
uint8_t sregion;
bool srvalid;
uint8_t iregion;
bool irvalid;
} V8M_SAttributes;
static void v8m_security_lookup(CPUARMState *env, uint32_t address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
V8M_SAttributes *sattrs);
#endif #endif
static void switch_mode(CPUARMState *env, int mode); static void switch_mode(CPUARMState *env, int mode);
@ -7524,7 +7503,8 @@ void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op) uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
{ {
/* The TT instructions can be used by unprivileged code, but in /*
* The TT instructions can be used by unprivileged code, but in
* user-only emulation we don't have the MPU. * user-only emulation we don't have the MPU.
* Luckily since we know we are NonSecure unprivileged (and that in * Luckily since we know we are NonSecure unprivileged (and that in
* turn means that the A flag wasn't specified), all the bits in the * turn means that the A flag wasn't specified), all the bits in the
@ -7700,22 +7680,41 @@ uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
return target_el; return target_el;
} }
/* void arm_log_exception(int idx)
* Return true if the v7M CPACR permits access to the FPU for the specified
* security state and privilege level.
*/
static bool v7m_cpacr_pass(CPUARMState *env, bool is_secure, bool is_priv)
{ {
switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) { if (qemu_loglevel_mask(CPU_LOG_INT)) {
case 0: const char *exc = NULL;
case 2: /* UNPREDICTABLE: we treat like 0 */ static const char * const excnames[] = {
return false; [EXCP_UDEF] = "Undefined Instruction",
case 1: [EXCP_SWI] = "SVC",
return is_priv; [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
case 3: [EXCP_DATA_ABORT] = "Data Abort",
return true; [EXCP_IRQ] = "IRQ",
default: [EXCP_FIQ] = "FIQ",
g_assert_not_reached(); [EXCP_BKPT] = "Breakpoint",
[EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
[EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
[EXCP_HVC] = "Hypervisor Call",
[EXCP_HYP_TRAP] = "Hypervisor Trap",
[EXCP_SMC] = "Secure Monitor Call",
[EXCP_VIRQ] = "Virtual IRQ",
[EXCP_VFIQ] = "Virtual FIQ",
[EXCP_SEMIHOST] = "Semihosting call",
[EXCP_NOCP] = "v7M NOCP UsageFault",
[EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
[EXCP_STKOF] = "v8M STKOF UsageFault",
[EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
[EXCP_LSERR] = "v8M LSERR UsageFault",
[EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
};
if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
exc = excnames[idx];
}
if (!exc) {
exc = "unknown";
}
qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc);
} }
} }
@ -7796,7 +7795,8 @@ static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
return true; return true;
pend_fault: pend_fault:
/* By pending the exception at this point we are making /*
* By pending the exception at this point we are making
* the IMPDEF choice "overridden exceptions pended" (see the * the IMPDEF choice "overridden exceptions pended" (see the
* MergeExcInfo() pseudocode). The other choice would be to not * MergeExcInfo() pseudocode). The other choice would be to not
* pend them now and then make a choice about which to throw away * pend them now and then make a choice about which to throw away
@ -7871,7 +7871,8 @@ static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
return true; return true;
pend_fault: pend_fault:
/* By pending the exception at this point we are making /*
* By pending the exception at this point we are making
* the IMPDEF choice "overridden exceptions pended" (see the * the IMPDEF choice "overridden exceptions pended" (see the
* MergeExcInfo() pseudocode). The other choice would be to not * MergeExcInfo() pseudocode). The other choice would be to not
* pend them now and then make a choice about which to throw away * pend them now and then make a choice about which to throw away
@ -7972,7 +7973,8 @@ void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
*/ */
} }
/* Write to v7M CONTROL.SPSEL bit for the specified security bank. /*
* Write to v7M CONTROL.SPSEL bit for the specified security bank.
* This may change the current stack pointer between Main and Process * This may change the current stack pointer between Main and Process
* stack pointers if it is done for the CONTROL register for the current * stack pointers if it is done for the CONTROL register for the current
* security state. * security state.
@ -8000,7 +8002,8 @@ static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
} }
} }
/* Write to v7M CONTROL.SPSEL bit. This may change the current /*
* Write to v7M CONTROL.SPSEL bit. This may change the current
* stack pointer between Main and Process stack pointers. * stack pointer between Main and Process stack pointers.
*/ */
static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel) static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
@ -8010,7 +8013,8 @@ static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
void write_v7m_exception(CPUARMState *env, uint32_t new_exc) void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
{ {
/* Write a new value to v7m.exception, thus transitioning into or out /*
* Write a new value to v7m.exception, thus transitioning into or out
* of Handler mode; this may result in a change of active stack pointer. * of Handler mode; this may result in a change of active stack pointer.
*/ */
bool new_is_psp, old_is_psp = v7m_using_psp(env); bool new_is_psp, old_is_psp = v7m_using_psp(env);
@ -8036,7 +8040,8 @@ static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
return; return;
} }
/* All the banked state is accessed by looking at env->v7m.secure /*
* All the banked state is accessed by looking at env->v7m.secure
* except for the stack pointer; rearrange the SP appropriately. * except for the stack pointer; rearrange the SP appropriately.
*/ */
new_ss_msp = env->v7m.other_ss_msp; new_ss_msp = env->v7m.other_ss_msp;
@ -8063,7 +8068,8 @@ static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest) void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
{ {
/* Handle v7M BXNS: /*
* Handle v7M BXNS:
* - if the return value is a magic value, do exception return (like BX) * - if the return value is a magic value, do exception return (like BX)
* - otherwise bit 0 of the return value is the target security state * - otherwise bit 0 of the return value is the target security state
*/ */
@ -8078,7 +8084,8 @@ void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
} }
if (dest >= min_magic) { if (dest >= min_magic) {
/* This is an exception return magic value; put it where /*
* This is an exception return magic value; put it where
* do_v7m_exception_exit() expects and raise EXCEPTION_EXIT. * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
* Note that if we ever add gen_ss_advance() singlestep support to * Note that if we ever add gen_ss_advance() singlestep support to
* M profile this should count as an "instruction execution complete" * M profile this should count as an "instruction execution complete"
@ -8103,7 +8110,8 @@ void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest) void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
{ {
/* Handle v7M BLXNS: /*
* Handle v7M BLXNS:
* - bit 0 of the destination address is the target security state * - bit 0 of the destination address is the target security state
*/ */
@ -8116,7 +8124,8 @@ void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
assert(env->v7m.secure); assert(env->v7m.secure);
if (dest & 1) { if (dest & 1) {
/* target is Secure, so this is just a normal BLX, /*
* Target is Secure, so this is just a normal BLX,
* except that the low bit doesn't indicate Thumb/not. * except that the low bit doesn't indicate Thumb/not.
*/ */
env->regs[14] = nextinst; env->regs[14] = nextinst;
@ -8147,7 +8156,8 @@ void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
env->regs[13] = sp; env->regs[13] = sp;
env->regs[14] = 0xfeffffff; env->regs[14] = 0xfeffffff;
if (arm_v7m_is_handler_mode(env)) { if (arm_v7m_is_handler_mode(env)) {
/* Write a dummy value to IPSR, to avoid leaking the current secure /*
* Write a dummy value to IPSR, to avoid leaking the current secure
* exception number to non-secure code. This is guaranteed not * exception number to non-secure code. This is guaranteed not
* to cause write_v7m_exception() to actually change stacks. * to cause write_v7m_exception() to actually change stacks.
*/ */
@ -8162,7 +8172,8 @@ void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode, static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
bool spsel) bool spsel)
{ {
/* Return a pointer to the location where we currently store the /*
* Return a pointer to the location where we currently store the
* stack pointer for the requested security state and thread mode. * stack pointer for the requested security state and thread mode.
* This pointer will become invalid if the CPU state is updated * This pointer will become invalid if the CPU state is updated
* such that the stack pointers are switched around (eg changing * such that the stack pointers are switched around (eg changing
@ -8208,7 +8219,8 @@ static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true); mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true);
/* We don't do a get_phys_addr() here because the rules for vector /*
* We don't do a get_phys_addr() here because the rules for vector
* loads are special: they always use the default memory map, and * loads are special: they always use the default memory map, and
* the default memory map permits reads from all addresses. * the default memory map permits reads from all addresses.
* Since there's no easy way to pass through to pmsav8_mpu_lookup() * Since there's no easy way to pass through to pmsav8_mpu_lookup()
@ -8239,7 +8251,8 @@ static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
return true; return true;
load_fail: load_fail:
/* All vector table fetch fails are reported as HardFault, with /*
* All vector table fetch fails are reported as HardFault, with
* HFSR.VECTTBL and .FORCED set. (FORCED is set because * HFSR.VECTTBL and .FORCED set. (FORCED is set because
* technically the underlying exception is a MemManage or BusFault * technically the underlying exception is a MemManage or BusFault
* that is escalated to HardFault.) This is a terminal exception, * that is escalated to HardFault.) This is a terminal exception,
@ -8271,7 +8284,8 @@ static uint32_t v7m_integrity_sig(CPUARMState *env, uint32_t lr)
static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain, static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
bool ignore_faults) bool ignore_faults)
{ {
/* For v8M, push the callee-saves register part of the stack frame. /*
* For v8M, push the callee-saves register part of the stack frame.
* Compare the v8M pseudocode PushCalleeStack(). * Compare the v8M pseudocode PushCalleeStack().
* In the tailchaining case this may not be the current stack. * In the tailchaining case this may not be the current stack.
*/ */
@ -8322,7 +8336,8 @@ static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
return true; return true;
} }
/* Write as much of the stack frame as we can. A write failure may /*
* Write as much of the stack frame as we can. A write failure may
* cause us to pend a derived exception. * cause us to pend a derived exception.
*/ */
sig = v7m_integrity_sig(env, lr); sig = v7m_integrity_sig(env, lr);
@ -8346,7 +8361,8 @@ static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain, static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
bool ignore_stackfaults) bool ignore_stackfaults)
{ {
/* Do the "take the exception" parts of exception entry, /*
* Do the "take the exception" parts of exception entry,
* but not the pushing of state to the stack. This is * but not the pushing of state to the stack. This is
* similar to the pseudocode ExceptionTaken() function. * similar to the pseudocode ExceptionTaken() function.
*/ */
@ -8371,13 +8387,15 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
if (arm_feature(env, ARM_FEATURE_V8)) { if (arm_feature(env, ARM_FEATURE_V8)) {
if (arm_feature(env, ARM_FEATURE_M_SECURITY) && if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
(lr & R_V7M_EXCRET_S_MASK)) { (lr & R_V7M_EXCRET_S_MASK)) {
/* The background code (the owner of the registers in the /*
* The background code (the owner of the registers in the
* exception frame) is Secure. This means it may either already * exception frame) is Secure. This means it may either already
* have or now needs to push callee-saves registers. * have or now needs to push callee-saves registers.
*/ */
if (targets_secure) { if (targets_secure) {
if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) { if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
/* We took an exception from Secure to NonSecure /*
* We took an exception from Secure to NonSecure
* (which means the callee-saved registers got stacked) * (which means the callee-saved registers got stacked)
* and are now tailchaining to a Secure exception. * and are now tailchaining to a Secure exception.
* Clear DCRS so eventual return from this Secure * Clear DCRS so eventual return from this Secure
@ -8386,7 +8404,8 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
lr &= ~R_V7M_EXCRET_DCRS_MASK; lr &= ~R_V7M_EXCRET_DCRS_MASK;
} }
} else { } else {
/* We're going to a non-secure exception; push the /*
* We're going to a non-secure exception; push the
* callee-saves registers to the stack now, if they're * callee-saves registers to the stack now, if they're
* not already saved. * not already saved.
*/ */
@ -8408,14 +8427,16 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
lr |= R_V7M_EXCRET_SPSEL_MASK; lr |= R_V7M_EXCRET_SPSEL_MASK;
} }
/* Clear registers if necessary to prevent non-secure exception /*
* Clear registers if necessary to prevent non-secure exception
* code being able to see register values from secure code. * code being able to see register values from secure code.
* Where register values become architecturally UNKNOWN we leave * Where register values become architecturally UNKNOWN we leave
* them with their previous values. * them with their previous values.
*/ */
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
if (!targets_secure) { if (!targets_secure) {
/* Always clear the caller-saved registers (they have been /*
* Always clear the caller-saved registers (they have been
* pushed to the stack earlier in v7m_push_stack()). * pushed to the stack earlier in v7m_push_stack()).
* Clear callee-saved registers if the background code is * Clear callee-saved registers if the background code is
* Secure (in which case these regs were saved in * Secure (in which case these regs were saved in
@ -8436,7 +8457,8 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
} }
if (push_failed && !ignore_stackfaults) { if (push_failed && !ignore_stackfaults) {
/* Derived exception on callee-saves register stacking: /*
* Derived exception on callee-saves register stacking:
* we might now want to take a different exception which * we might now want to take a different exception which
* targets a different security state, so try again from the top. * targets a different security state, so try again from the top.
*/ */
@ -8453,7 +8475,8 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
return; return;
} }
/* Now we've done everything that might cause a derived exception /*
* Now we've done everything that might cause a derived exception
* we can go ahead and activate whichever exception we're going to * we can go ahead and activate whichever exception we're going to
* take (which might now be the derived exception). * take (which might now be the derived exception).
*/ */
@ -8656,7 +8679,8 @@ void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
static bool v7m_push_stack(ARMCPU *cpu) static bool v7m_push_stack(ARMCPU *cpu)
{ {
/* Do the "set up stack frame" part of exception entry, /*
* Do the "set up stack frame" part of exception entry,
* similar to pseudocode PushStack(). * similar to pseudocode PushStack().
* Return true if we generate a derived exception (and so * Return true if we generate a derived exception (and so
* should ignore further stack faults trying to process * should ignore further stack faults trying to process
@ -8724,7 +8748,8 @@ static bool v7m_push_stack(ARMCPU *cpu)
} }
} }
/* Write as much of the stack frame as we can. If we fail a stack /*
* Write as much of the stack frame as we can. If we fail a stack
* write this will result in a derived exception being pended * write this will result in a derived exception being pended
* (which may be taken in preference to the one we started with * (which may be taken in preference to the one we started with
* if it has higher priority). * if it has higher priority).
@ -8841,7 +8866,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
bool ftype; bool ftype;
bool restore_s16_s31; bool restore_s16_s31;
/* If we're not in Handler mode then jumps to magic exception-exit /*
* If we're not in Handler mode then jumps to magic exception-exit
* addresses don't have magic behaviour. However for the v8M * addresses don't have magic behaviour. However for the v8M
* security extensions the magic secure-function-return has to * security extensions the magic secure-function-return has to
* work in thread mode too, so to avoid doing an extra check in * work in thread mode too, so to avoid doing an extra check in
@ -8855,7 +8881,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
return; return;
} }
/* In the spec pseudocode ExceptionReturn() is called directly /*
* In the spec pseudocode ExceptionReturn() is called directly
* from BXWritePC() and gets the full target PC value including * from BXWritePC() and gets the full target PC value including
* bit zero. In QEMU's implementation we treat it as a normal * bit zero. In QEMU's implementation we treat it as a normal
* jump-to-register (which is then caught later on), and so split * jump-to-register (which is then caught later on), and so split
@ -8888,7 +8915,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
} }
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
/* EXC_RETURN.ES validation check (R_SMFL). We must do this before /*
* EXC_RETURN.ES validation check (R_SMFL). We must do this before
* we pick which FAULTMASK to clear. * we pick which FAULTMASK to clear.
*/ */
if (!env->v7m.secure && if (!env->v7m.secure &&
@ -8902,7 +8930,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
} }
if (env->v7m.exception != ARMV7M_EXCP_NMI) { if (env->v7m.exception != ARMV7M_EXCP_NMI) {
/* Auto-clear FAULTMASK on return from other than NMI. /*
* Auto-clear FAULTMASK on return from other than NMI.
* If the security extension is implemented then this only * If the security extension is implemented then this only
* happens if the raw execution priority is >= 0; the * happens if the raw execution priority is >= 0; the
* value of the ES bit in the exception return value indicates * value of the ES bit in the exception return value indicates
@ -8927,7 +8956,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
/* still an irq active now */ /* still an irq active now */
break; break;
case 1: case 1:
/* we returned to base exception level, no nesting. /*
* We returned to base exception level, no nesting.
* (In the pseudocode this is written using "NestedActivation != 1" * (In the pseudocode this is written using "NestedActivation != 1"
* where we have 'rettobase == false'.) * where we have 'rettobase == false'.)
*/ */
@ -8944,7 +8974,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
if (arm_feature(env, ARM_FEATURE_V8)) { if (arm_feature(env, ARM_FEATURE_V8)) {
if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) { if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
/* UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP); /*
* UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
* we choose to take the UsageFault. * we choose to take the UsageFault.
*/ */
if ((excret & R_V7M_EXCRET_S_MASK) || if ((excret & R_V7M_EXCRET_S_MASK) ||
@ -8963,7 +8994,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
break; break;
case 13: /* Return to Thread using Process stack */ case 13: /* Return to Thread using Process stack */
case 9: /* Return to Thread using Main stack */ case 9: /* Return to Thread using Main stack */
/* We only need to check NONBASETHRDENA for v7M, because in /*
* We only need to check NONBASETHRDENA for v7M, because in
* v8M this bit does not exist (it is RES1). * v8M this bit does not exist (it is RES1).
*/ */
if (!rettobase && if (!rettobase &&
@ -9021,7 +9053,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
} }
if (ufault) { if (ufault) {
/* Bad exception return: instead of popping the exception /*
* Bad exception return: instead of popping the exception
* stack, directly take a usage fault on the current stack. * stack, directly take a usage fault on the current stack.
*/ */
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
@ -9051,7 +9084,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
switch_v7m_security_state(env, return_to_secure); switch_v7m_security_state(env, return_to_secure);
{ {
/* The stack pointer we should be reading the exception frame from /*
* The stack pointer we should be reading the exception frame from
* depends on bits in the magic exception return type value (and * depends on bits in the magic exception return type value (and
* for v8M isn't necessarily the stack pointer we will eventually * for v8M isn't necessarily the stack pointer we will eventually
* end up resuming execution with). Get a pointer to the location * end up resuming execution with). Get a pointer to the location
@ -9124,7 +9158,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx); v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx);
if (!pop_ok) { if (!pop_ok) {
/* v7m_stack_read() pended a fault, so take it (as a tail /*
* v7m_stack_read() pended a fault, so take it (as a tail
* chained exception on the same stack frame) * chained exception on the same stack frame)
*/ */
qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n"); qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
@ -9132,7 +9167,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
return; return;
} }
/* Returning from an exception with a PC with bit 0 set is defined /*
* Returning from an exception with a PC with bit 0 set is defined
* behaviour on v8M (bit 0 is ignored), but for v7M it was specified * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
* to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
* the lsbit, and there are several RTOSes out there which incorrectly * the lsbit, and there are several RTOSes out there which incorrectly
@ -9150,13 +9186,15 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
} }
if (arm_feature(env, ARM_FEATURE_V8)) { if (arm_feature(env, ARM_FEATURE_V8)) {
/* For v8M we have to check whether the xPSR exception field /*
* For v8M we have to check whether the xPSR exception field
* matches the EXCRET value for return to handler/thread * matches the EXCRET value for return to handler/thread
* before we commit to changing the SP and xPSR. * before we commit to changing the SP and xPSR.
*/ */
bool will_be_handler = (xpsr & XPSR_EXCP) != 0; bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
if (return_to_handler != will_be_handler) { if (return_to_handler != will_be_handler) {
/* Take an INVPC UsageFault on the current stack. /*
* Take an INVPC UsageFault on the current stack.
* By this point we will have switched to the security state * By this point we will have switched to the security state
* for the background state, so this UsageFault will target * for the background state, so this UsageFault will target
* that state. * that state.
@ -9271,7 +9309,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
frameptr += 0x40; frameptr += 0x40;
} }
} }
/* Undo stack alignment (the SPREALIGN bit indicates that the original /*
* Undo stack alignment (the SPREALIGN bit indicates that the original
* pre-exception SP was not 8-aligned and we added a padding word to * pre-exception SP was not 8-aligned and we added a padding word to
* align it, so we undo this by ORing in the bit that increases it * align it, so we undo this by ORing in the bit that increases it
* from the current 8-aligned value to the 8-unaligned value. (Adding 4 * from the current 8-aligned value to the 8-unaligned value. (Adding 4
@ -9297,13 +9336,15 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
V7M_CONTROL, SFPA, sfpa); V7M_CONTROL, SFPA, sfpa);
} }
/* The restored xPSR exception field will be zero if we're /*
* The restored xPSR exception field will be zero if we're
* resuming in Thread mode. If that doesn't match what the * resuming in Thread mode. If that doesn't match what the
* exception return excret specified then this is a UsageFault. * exception return excret specified then this is a UsageFault.
* v7M requires we make this check here; v8M did it earlier. * v7M requires we make this check here; v8M did it earlier.
*/ */
if (return_to_handler != arm_v7m_is_handler_mode(env)) { if (return_to_handler != arm_v7m_is_handler_mode(env)) {
/* Take an INVPC UsageFault by pushing the stack again; /*
* Take an INVPC UsageFault by pushing the stack again;
* we know we're v7M so this is never a Secure UsageFault. * we know we're v7M so this is never a Secure UsageFault.
*/ */
bool ignore_stackfaults; bool ignore_stackfaults;
@ -9325,7 +9366,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
static bool do_v7m_function_return(ARMCPU *cpu) static bool do_v7m_function_return(ARMCPU *cpu)
{ {
/* v8M security extensions magic function return. /*
* v8M security extensions magic function return.
* We may either: * We may either:
* (1) throw an exception (longjump) * (1) throw an exception (longjump)
* (2) return true if we successfully handled the function return * (2) return true if we successfully handled the function return
@ -9355,7 +9397,8 @@ static bool do_v7m_function_return(ARMCPU *cpu)
frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel); frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel);
frameptr = *frame_sp_p; frameptr = *frame_sp_p;
/* These loads may throw an exception (for MPU faults). We want to /*
* These loads may throw an exception (for MPU faults). We want to
* do them as secure, so work out what MMU index that is. * do them as secure, so work out what MMU index that is.
*/ */
mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true); mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
@ -9395,48 +9438,11 @@ static bool do_v7m_function_return(ARMCPU *cpu)
return true; return true;
} }
static void arm_log_exception(int idx)
{
if (qemu_loglevel_mask(CPU_LOG_INT)) {
const char *exc = NULL;
static const char * const excnames[] = {
[EXCP_UDEF] = "Undefined Instruction",
[EXCP_SWI] = "SVC",
[EXCP_PREFETCH_ABORT] = "Prefetch Abort",
[EXCP_DATA_ABORT] = "Data Abort",
[EXCP_IRQ] = "IRQ",
[EXCP_FIQ] = "FIQ",
[EXCP_BKPT] = "Breakpoint",
[EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
[EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
[EXCP_HVC] = "Hypervisor Call",
[EXCP_HYP_TRAP] = "Hypervisor Trap",
[EXCP_SMC] = "Secure Monitor Call",
[EXCP_VIRQ] = "Virtual IRQ",
[EXCP_VFIQ] = "Virtual FIQ",
[EXCP_SEMIHOST] = "Semihosting call",
[EXCP_NOCP] = "v7M NOCP UsageFault",
[EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
[EXCP_STKOF] = "v8M STKOF UsageFault",
[EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
[EXCP_LSERR] = "v8M LSERR UsageFault",
[EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
};
if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
exc = excnames[idx];
}
if (!exc) {
exc = "unknown";
}
qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc);
}
}
static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
uint32_t addr, uint16_t *insn) uint32_t addr, uint16_t *insn)
{ {
/* Load a 16-bit portion of a v7M instruction, returning true on success, /*
* Load a 16-bit portion of a v7M instruction, returning true on success,
* or false on failure (in which case we will have pended the appropriate * or false on failure (in which case we will have pended the appropriate
* exception). * exception).
* We need to do the instruction fetch's MPU and SAU checks * We need to do the instruction fetch's MPU and SAU checks
@ -9459,7 +9465,8 @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs); v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs);
if (!sattrs.nsc || sattrs.ns) { if (!sattrs.nsc || sattrs.ns) {
/* This must be the second half of the insn, and it straddles a /*
* This must be the second half of the insn, and it straddles a
* region boundary with the second half not being S&NSC. * region boundary with the second half not being S&NSC.
*/ */
env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
@ -9489,7 +9496,8 @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
static bool v7m_handle_execute_nsc(ARMCPU *cpu) static bool v7m_handle_execute_nsc(ARMCPU *cpu)
{ {
/* Check whether this attempt to execute code in a Secure & NS-Callable /*
* Check whether this attempt to execute code in a Secure & NS-Callable
* memory region is for an SG instruction; if so, then emulate the * memory region is for an SG instruction; if so, then emulate the
* effect of the SG instruction and return true. Otherwise pend * effect of the SG instruction and return true. Otherwise pend
* the correct kind of exception and return false. * the correct kind of exception and return false.
@ -9498,7 +9506,8 @@ static bool v7m_handle_execute_nsc(ARMCPU *cpu)
ARMMMUIdx mmu_idx; ARMMMUIdx mmu_idx;
uint16_t insn; uint16_t insn;
/* We should never get here unless get_phys_addr_pmsav8() caused /*
* We should never get here unless get_phys_addr_pmsav8() caused
* an exception for NS executing in S&NSC memory. * an exception for NS executing in S&NSC memory.
*/ */
assert(!env->v7m.secure); assert(!env->v7m.secure);
@ -9516,7 +9525,8 @@ static bool v7m_handle_execute_nsc(ARMCPU *cpu)
} }
if (insn != 0xe97f) { if (insn != 0xe97f) {
/* Not an SG instruction first half (we choose the IMPDEF /*
* Not an SG instruction first half (we choose the IMPDEF
* early-SG-check option). * early-SG-check option).
*/ */
goto gen_invep; goto gen_invep;
@ -9527,13 +9537,15 @@ static bool v7m_handle_execute_nsc(ARMCPU *cpu)
} }
if (insn != 0xe97f) { if (insn != 0xe97f) {
/* Not an SG instruction second half (yes, both halves of the SG /*
* Not an SG instruction second half (yes, both halves of the SG
* insn have the same hex value) * insn have the same hex value)
*/ */
goto gen_invep; goto gen_invep;
} }
/* OK, we have confirmed that we really have an SG instruction. /*
* OK, we have confirmed that we really have an SG instruction.
* We know we're NS in S memory so don't need to repeat those checks. * We know we're NS in S memory so don't need to repeat those checks.
*/ */
qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32 qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
@ -9562,8 +9574,10 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
arm_log_exception(cs->exception_index); arm_log_exception(cs->exception_index);
/* For exceptions we just mark as pending on the NVIC, and let that /*
handle it. */ * For exceptions we just mark as pending on the NVIC, and let that
* handle it.
*/
switch (cs->exception_index) { switch (cs->exception_index) {
case EXCP_UDEF: case EXCP_UDEF:
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
@ -9609,13 +9623,15 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
break; break;
case EXCP_PREFETCH_ABORT: case EXCP_PREFETCH_ABORT:
case EXCP_DATA_ABORT: case EXCP_DATA_ABORT:
/* Note that for M profile we don't have a guest facing FSR, but /*
* Note that for M profile we don't have a guest facing FSR, but
* the env->exception.fsr will be populated by the code that * the env->exception.fsr will be populated by the code that
* raises the fault, in the A profile short-descriptor format. * raises the fault, in the A profile short-descriptor format.
*/ */
switch (env->exception.fsr & 0xf) { switch (env->exception.fsr & 0xf) {
case M_FAKE_FSR_NSC_EXEC: case M_FAKE_FSR_NSC_EXEC:
/* Exception generated when we try to execute code at an address /*
* Exception generated when we try to execute code at an address
* which is marked as Secure & Non-Secure Callable and the CPU * which is marked as Secure & Non-Secure Callable and the CPU
* is in the Non-Secure state. The only instruction which can * is in the Non-Secure state. The only instruction which can
* be executed like this is SG (and that only if both halves of * be executed like this is SG (and that only if both halves of
@ -9628,7 +9644,8 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
} }
break; break;
case M_FAKE_FSR_SFAULT: case M_FAKE_FSR_SFAULT:
/* Various flavours of SecureFault for attempts to execute or /*
* Various flavours of SecureFault for attempts to execute or
* access data in the wrong security state. * access data in the wrong security state.
*/ */
switch (cs->exception_index) { switch (cs->exception_index) {
@ -9670,7 +9687,8 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false); armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
break; break;
default: default:
/* All other FSR values are either MPU faults or "can't happen /*
* All other FSR values are either MPU faults or "can't happen
* for M profile" cases. * for M profile" cases.
*/ */
switch (cs->exception_index) { switch (cs->exception_index) {
@ -9736,7 +9754,8 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
if (arm_feature(env, ARM_FEATURE_V8)) { if (arm_feature(env, ARM_FEATURE_V8)) {
lr = R_V7M_EXCRET_RES1_MASK | lr = R_V7M_EXCRET_RES1_MASK |
R_V7M_EXCRET_DCRS_MASK; R_V7M_EXCRET_DCRS_MASK;
/* The S bit indicates whether we should return to Secure /*
* The S bit indicates whether we should return to Secure
* or NonSecure (ie our current state). * or NonSecure (ie our current state).
* The ES bit indicates whether we're taking this exception * The ES bit indicates whether we're taking this exception
* to Secure or NonSecure (ie our target state). We set it * to Secure or NonSecure (ie our target state). We set it
@ -9771,7 +9790,8 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
v7m_exception_taken(cpu, lr, false, ignore_stackfaults); v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
} }
/* Function used to synchronize QEMU's AArch64 register set with AArch32 /*
* Function used to synchronize QEMU's AArch64 register set with AArch32
* register set. This is necessary when switching between AArch32 and AArch64 * register set. This is necessary when switching between AArch32 and AArch64
* execution state. * execution state.
*/ */
@ -9785,7 +9805,8 @@ void aarch64_sync_32_to_64(CPUARMState *env)
env->xregs[i] = env->regs[i]; env->xregs[i] = env->regs[i];
} }
/* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12. /*
* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
* Otherwise, they come from the banked user regs. * Otherwise, they come from the banked user regs.
*/ */
if (mode == ARM_CPU_MODE_FIQ) { if (mode == ARM_CPU_MODE_FIQ) {
@ -9798,7 +9819,8 @@ void aarch64_sync_32_to_64(CPUARMState *env)
} }
} }
/* Registers x13-x23 are the various mode SP and FP registers. Registers /*
* Registers x13-x23 are the various mode SP and FP registers. Registers
* r13 and r14 are only copied if we are in that mode, otherwise we copy * r13 and r14 are only copied if we are in that mode, otherwise we copy
* from the mode banked register. * from the mode banked register.
*/ */
@ -9853,7 +9875,8 @@ void aarch64_sync_32_to_64(CPUARMState *env)
env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)]; env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
} }
/* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ /*
* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
* mode, then we can copy from r8-r14. Otherwise, we copy from the * mode, then we can copy from r8-r14. Otherwise, we copy from the
* FIQ bank for r8-r14. * FIQ bank for r8-r14.
*/ */
@ -9872,7 +9895,8 @@ void aarch64_sync_32_to_64(CPUARMState *env)
env->pc = env->regs[15]; env->pc = env->regs[15];
} }
/* Function used to synchronize QEMU's AArch32 register set with AArch64 /*
* Function used to synchronize QEMU's AArch32 register set with AArch64
* register set. This is necessary when switching between AArch32 and AArch64 * register set. This is necessary when switching between AArch32 and AArch64
* execution state. * execution state.
*/ */
@ -9886,7 +9910,8 @@ void aarch64_sync_64_to_32(CPUARMState *env)
env->regs[i] = env->xregs[i]; env->regs[i] = env->xregs[i];
} }
/* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12. /*
* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
* Otherwise, we copy x8-x12 into the banked user regs. * Otherwise, we copy x8-x12 into the banked user regs.
*/ */
if (mode == ARM_CPU_MODE_FIQ) { if (mode == ARM_CPU_MODE_FIQ) {
@ -9899,7 +9924,8 @@ void aarch64_sync_64_to_32(CPUARMState *env)
} }
} }
/* Registers r13 & r14 depend on the current mode. /*
* Registers r13 & r14 depend on the current mode.
* If we are in a given mode, we copy the corresponding x registers to r13 * If we are in a given mode, we copy the corresponding x registers to r13
* and r14. Otherwise, we copy the x register to the banked r13 and r14 * and r14. Otherwise, we copy the x register to the banked r13 and r14
* for the mode. * for the mode.
@ -9910,7 +9936,8 @@ void aarch64_sync_64_to_32(CPUARMState *env)
} else { } else {
env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13]; env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
/* HYP is an exception in that it does not have its own banked r14 but /*
* HYP is an exception in that it does not have its own banked r14 but
* shares the USR r14 * shares the USR r14
*/ */
if (mode == ARM_CPU_MODE_HYP) { if (mode == ARM_CPU_MODE_HYP) {
@ -12056,7 +12083,7 @@ static bool v8m_is_sau_exempt(CPUARMState *env,
(address >= 0xe00ff000 && address <= 0xe00fffff); (address >= 0xe00ff000 && address <= 0xe00fffff);
} }
static void v8m_security_lookup(CPUARMState *env, uint32_t address, void v8m_security_lookup(CPUARMState *env, uint32_t address,
MMUAccessType access_type, ARMMMUIdx mmu_idx, MMUAccessType access_type, ARMMMUIdx mmu_idx,
V8M_SAttributes *sattrs) V8M_SAttributes *sattrs)
{ {
@ -12163,7 +12190,7 @@ static void v8m_security_lookup(CPUARMState *env, uint32_t address,
} }
} }
static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
MMUAccessType access_type, ARMMMUIdx mmu_idx, MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *txattrs, hwaddr *phys_ptr, MemTxAttrs *txattrs,
int *prot, bool *is_subpage, int *prot, bool *is_subpage,
@ -12567,7 +12594,7 @@ static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
* @fi: set to fault info if the translation fails * @fi: set to fault info if the translation fails
* @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
*/ */
static bool get_phys_addr(CPUARMState *env, target_ulong address, bool get_phys_addr(CPUARMState *env, target_ulong address,
MMUAccessType access_type, ARMMMUIdx mmu_idx, MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
target_ulong *page_size, target_ulong *page_size,
@ -12753,7 +12780,8 @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
return value; return value;
} }
case 0x94: /* CONTROL_NS */ case 0x94: /* CONTROL_NS */
/* We have to handle this here because unprivileged Secure code /*
* We have to handle this here because unprivileged Secure code
* can read the NS CONTROL register. * can read the NS CONTROL register.
*/ */
if (!env->v7m.secure) { if (!env->v7m.secure) {
@ -12806,7 +12834,8 @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
return env->v7m.faultmask[M_REG_NS]; return env->v7m.faultmask[M_REG_NS];
case 0x98: /* SP_NS */ case 0x98: /* SP_NS */
{ {
/* This gives the non-secure SP selected based on whether we're /*
* This gives the non-secure SP selected based on whether we're
* currently in handler mode or not, using the NS CONTROL.SPSEL. * currently in handler mode or not, using the NS CONTROL.SPSEL.
*/ */
bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK; bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
@ -12857,7 +12886,8 @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val) void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
{ {
/* We're passed bits [11..0] of the instruction; extract /*
* We're passed bits [11..0] of the instruction; extract
* SYSm and the mask bits. * SYSm and the mask bits.
* Invalid combinations of SYSm and mask are UNPREDICTABLE; * Invalid combinations of SYSm and mask are UNPREDICTABLE;
* we choose to treat them as if the mask bits were valid. * we choose to treat them as if the mask bits were valid.
@ -12943,7 +12973,8 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
return; return;
case 0x98: /* SP_NS */ case 0x98: /* SP_NS */
{ {
/* This gives the non-secure SP selected based on whether we're /*
* This gives the non-secure SP selected based on whether we're
* currently in handler mode or not, using the NS CONTROL.SPSEL. * currently in handler mode or not, using the NS CONTROL.SPSEL.
*/ */
bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK; bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
@ -13104,7 +13135,8 @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
bool targetsec = env->v7m.secure; bool targetsec = env->v7m.secure;
bool is_subpage; bool is_subpage;
/* Work out what the security state and privilege level we're /*
* Work out what the security state and privilege level we're
* interested in is... * interested in is...
*/ */
if (alt) { if (alt) {
@ -13121,12 +13153,14 @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
/* ...and then figure out which MMU index this is */ /* ...and then figure out which MMU index this is */
mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv); mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
/* We know that the MPU and SAU don't care about the access type /*
* We know that the MPU and SAU don't care about the access type
* for our purposes beyond that we don't want to claim to be * for our purposes beyond that we don't want to claim to be
* an insn fetch, so we arbitrarily call this a read. * an insn fetch, so we arbitrarily call this a read.
*/ */
/* MPU region info only available for privileged or if /*
* MPU region info only available for privileged or if
* inspecting the other MPU state. * inspecting the other MPU state.
*/ */
if (arm_current_el(env) != 0 || alt) { if (arm_current_el(env) != 0 || alt) {
@ -13176,146 +13210,6 @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
#endif #endif
bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
ARMCPU *cpu = ARM_CPU(cs);
#ifdef CONFIG_USER_ONLY
cpu->env.exception.vaddress = address;
if (access_type == MMU_INST_FETCH) {
cs->exception_index = EXCP_PREFETCH_ABORT;
} else {
cs->exception_index = EXCP_DATA_ABORT;
}
cpu_loop_exit_restore(cs, retaddr);
#else
hwaddr phys_addr;
target_ulong page_size;
int prot, ret;
MemTxAttrs attrs = {};
ARMMMUFaultInfo fi = {};
/*
* Walk the page table and (if the mapping exists) add the page
* to the TLB. On success, return true. Otherwise, if probing,
* return false. Otherwise populate fsr with ARM DFSR/IFSR fault
* register format, and signal the fault.
*/
ret = get_phys_addr(&cpu->env, address, access_type,
core_to_arm_mmu_idx(&cpu->env, mmu_idx),
&phys_addr, &attrs, &prot, &page_size, &fi, NULL);
if (likely(!ret)) {
/*
* Map a single [sub]page. Regions smaller than our declared
* target page size are handled specially, so for those we
* pass in the exact addresses.
*/
if (page_size >= TARGET_PAGE_SIZE) {
phys_addr &= TARGET_PAGE_MASK;
address &= TARGET_PAGE_MASK;
}
tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
prot, mmu_idx, page_size);
return true;
} else if (probe) {
return false;
} else {
/* now we have a real cpu fault */
cpu_restore_state(cs, retaddr, true);
arm_deliver_fault(cpu, address, access_type, mmu_idx, &fi);
}
#endif
}
void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
{
/* Implement DC ZVA, which zeroes a fixed-length block of memory.
* Note that we do not implement the (architecturally mandated)
* alignment fault for attempts to use this on Device memory
* (which matches the usual QEMU behaviour of not implementing either
* alignment faults or any memory attribute handling).
*/
ARMCPU *cpu = env_archcpu(env);
uint64_t blocklen = 4 << cpu->dcz_blocksize;
uint64_t vaddr = vaddr_in & ~(blocklen - 1);
#ifndef CONFIG_USER_ONLY
{
/* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
* the block size so we might have to do more than one TLB lookup.
* We know that in fact for any v8 CPU the page size is at least 4K
* and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
* 1K as an artefact of legacy v5 subpage support being present in the
* same QEMU executable. So in practice the hostaddr[] array has
* two entries, given the current setting of TARGET_PAGE_BITS_MIN.
*/
int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
void *hostaddr[DIV_ROUND_UP(2 * KiB, 1 << TARGET_PAGE_BITS_MIN)];
int try, i;
unsigned mmu_idx = cpu_mmu_index(env, false);
TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
assert(maxidx <= ARRAY_SIZE(hostaddr));
for (try = 0; try < 2; try++) {
for (i = 0; i < maxidx; i++) {
hostaddr[i] = tlb_vaddr_to_host(env,
vaddr + TARGET_PAGE_SIZE * i,
1, mmu_idx);
if (!hostaddr[i]) {
break;
}
}
if (i == maxidx) {
/* If it's all in the TLB it's fair game for just writing to;
* we know we don't need to update dirty status, etc.
*/
for (i = 0; i < maxidx - 1; i++) {
memset(hostaddr[i], 0, TARGET_PAGE_SIZE);
}
memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE));
return;
}
/* OK, try a store and see if we can populate the tlb. This
* might cause an exception if the memory isn't writable,
* in which case we will longjmp out of here. We must for
* this purpose use the actual register value passed to us
* so that we get the fault address right.
*/
helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC());
/* Now we can populate the other TLB entries, if any */
for (i = 0; i < maxidx; i++) {
uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
if (va != (vaddr_in & TARGET_PAGE_MASK)) {
helper_ret_stb_mmu(env, va, 0, oi, GETPC());
}
}
}
/* Slow path (probably attempt to do this to an I/O device or
* similar, or clearing of a block of code we have translations
* cached for). Just do a series of byte writes as the architecture
* demands. It's not worth trying to use a cpu_physical_memory_map(),
* memset(), unmap() sequence here because:
* + we'd need to account for the blocksize being larger than a page
* + the direct-RAM access case is almost always going to be dealt
* with in the fastpath code above, so there's no speed benefit
* + we would have to deal with the map returning NULL because the
* bounce buffer was in use
*/
for (i = 0; i < blocklen; i++) {
helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC());
}
}
#else
memset(g2h(vaddr), 0, blocklen);
#endif
}
/* Note that signed overflow is undefined in C. The following routines are /* Note that signed overflow is undefined in C. The following routines are
careful to use unsigned types where modulo arithmetic is required. careful to use unsigned types where modulo arithmetic is required.
Failure to do so _will_ break on newer gcc. */ Failure to do so _will_ break on newer gcc. */

View file

@ -529,11 +529,15 @@ vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len);
/* Callback function for when a watchpoint or breakpoint triggers. */ /* Callback function for when a watchpoint or breakpoint triggers. */
void arm_debug_excp_handler(CPUState *cs); void arm_debug_excp_handler(CPUState *cs);
#ifdef CONFIG_USER_ONLY #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type) static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
{ {
return false; return false;
} }
static inline void arm_handle_psci_call(ARMCPU *cpu)
{
g_assert_not_reached();
}
#else #else
/* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */ /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
bool arm_is_psci_call(ARMCPU *cpu, int excp_type); bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
@ -765,9 +769,6 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx, MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr); bool probe, uintptr_t retaddr);
void arm_deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
int mmu_idx, ARMMMUFaultInfo *fi) QEMU_NORETURN;
/* Return true if the stage 1 translation regime is using LPAE format page /* Return true if the stage 1 translation regime is using LPAE format page
* tables */ * tables */
bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx); bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
@ -891,6 +892,27 @@ static inline uint32_t v7m_sp_limit(CPUARMState *env)
} }
} }
/**
* v7m_cpacr_pass:
* Return true if the v7M CPACR permits access to the FPU for the specified
* security state and privilege level.
*/
static inline bool v7m_cpacr_pass(CPUARMState *env,
bool is_secure, bool is_priv)
{
switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
case 0:
case 2: /* UNPREDICTABLE: we treat like 0 */
return false;
case 1:
return is_priv;
case 3:
return true;
default:
g_assert_not_reached();
}
}
/** /**
* aarch32_mode_name(): Return name of the AArch32 CPU mode * aarch32_mode_name(): Return name of the AArch32 CPU mode
* @psr: Program Status Register indicating CPU mode * @psr: Program Status Register indicating CPU mode
@ -985,4 +1007,43 @@ static inline int exception_target_el(CPUARMState *env)
return target_el; return target_el;
} }
#ifndef CONFIG_USER_ONLY
/* Security attributes for an address, as returned by v8m_security_lookup. */
typedef struct V8M_SAttributes {
bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
bool ns;
bool nsc;
uint8_t sregion;
bool srvalid;
uint8_t iregion;
bool irvalid;
} V8M_SAttributes;
void v8m_security_lookup(CPUARMState *env, uint32_t address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
V8M_SAttributes *sattrs);
bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *txattrs,
int *prot, bool *is_subpage,
ARMMMUFaultInfo *fi, uint32_t *mregion);
/* Cacheability and shareability attributes for a memory access */
typedef struct ARMCacheAttrs {
unsigned int attrs:8; /* as in the MAIR register encoding */
unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
} ARMCacheAttrs;
bool get_phys_addr(CPUARMState *env, target_ulong address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
target_ulong *page_size,
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
void arm_log_exception(int idx);
#endif /* !CONFIG_USER_ONLY */
#endif #endif

View file

@ -17,6 +17,7 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/ */
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "qemu/units.h"
#include "qemu/log.h" #include "qemu/log.h"
#include "qemu/main-loop.h" #include "qemu/main-loop.h"
#include "cpu.h" #include "cpu.h"
@ -87,136 +88,6 @@ uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def, void *vn,
return val; return val;
} }
#if !defined(CONFIG_USER_ONLY)
static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
unsigned int target_el,
bool same_el, bool ea,
bool s1ptw, bool is_write,
int fsc)
{
uint32_t syn;
/* ISV is only set for data aborts routed to EL2 and
* never for stage-1 page table walks faulting on stage 2.
*
* Furthermore, ISV is only set for certain kinds of load/stores.
* If the template syndrome does not have ISV set, we should leave
* it cleared.
*
* See ARMv8 specs, D7-1974:
* ISS encoding for an exception from a Data Abort, the
* ISV field.
*/
if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
syn = syn_data_abort_no_iss(same_el,
ea, 0, s1ptw, is_write, fsc);
} else {
/* Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
* syndrome created at translation time.
* Now we create the runtime syndrome with the remaining fields.
*/
syn = syn_data_abort_with_iss(same_el,
0, 0, 0, 0, 0,
ea, 0, s1ptw, is_write, fsc,
false);
/* Merge the runtime syndrome with the template syndrome. */
syn |= template_syn;
}
return syn;
}
void arm_deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
int mmu_idx, ARMMMUFaultInfo *fi)
{
CPUARMState *env = &cpu->env;
int target_el;
bool same_el;
uint32_t syn, exc, fsr, fsc;
ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
target_el = exception_target_el(env);
if (fi->stage2) {
target_el = 2;
env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
}
same_el = (arm_current_el(env) == target_el);
if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
/* LPAE format fault status register : bottom 6 bits are
* status code in the same form as needed for syndrome
*/
fsr = arm_fi_to_lfsc(fi);
fsc = extract32(fsr, 0, 6);
} else {
fsr = arm_fi_to_sfsc(fi);
/* Short format FSR : this fault will never actually be reported
* to an EL that uses a syndrome register. Use a (currently)
* reserved FSR code in case the constructed syndrome does leak
* into the guest somehow.
*/
fsc = 0x3f;
}
if (access_type == MMU_INST_FETCH) {
syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
exc = EXCP_PREFETCH_ABORT;
} else {
syn = merge_syn_data_abort(env->exception.syndrome, target_el,
same_el, fi->ea, fi->s1ptw,
access_type == MMU_DATA_STORE,
fsc);
if (access_type == MMU_DATA_STORE
&& arm_feature(env, ARM_FEATURE_V6)) {
fsr |= (1 << 11);
}
exc = EXCP_DATA_ABORT;
}
env->exception.vaddress = addr;
env->exception.fsr = fsr;
raise_exception(env, exc, syn, target_el);
}
/* Raise a data fault alignment exception for the specified virtual address */
void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
{
ARMCPU *cpu = ARM_CPU(cs);
ARMMMUFaultInfo fi = {};
/* now we have a real cpu fault */
cpu_restore_state(cs, retaddr, true);
fi.type = ARMFault_Alignment;
arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
}
/* arm_cpu_do_transaction_failed: handle a memory system error response
* (eg "no device/memory present at address") by raising an external abort
* exception
*/
void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
vaddr addr, unsigned size,
MMUAccessType access_type,
int mmu_idx, MemTxAttrs attrs,
MemTxResult response, uintptr_t retaddr)
{
ARMCPU *cpu = ARM_CPU(cs);
ARMMMUFaultInfo fi = {};
/* now we have a real cpu fault */
cpu_restore_state(cs, retaddr, true);
fi.ea = arm_extabort_type(response);
fi.type = ARMFault_SyncExternal;
arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
}
#endif /* !defined(CONFIG_USER_ONLY) */
void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue) void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue)
{ {
/* /*
@ -970,7 +841,8 @@ static bool linked_bp_matches(ARMCPU *cpu, int lbn)
int bt; int bt;
uint32_t contextidr; uint32_t contextidr;
/* Links to unimplemented or non-context aware breakpoints are /*
* Links to unimplemented or non-context aware breakpoints are
* CONSTRAINED UNPREDICTABLE: either behave as if disabled, or * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
* as if linked to an UNKNOWN context-aware breakpoint (in which * as if linked to an UNKNOWN context-aware breakpoint (in which
* case DBGWCR<n>_EL1.LBN must indicate that breakpoint). * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
@ -989,7 +861,8 @@ static bool linked_bp_matches(ARMCPU *cpu, int lbn)
bt = extract64(bcr, 20, 4); bt = extract64(bcr, 20, 4);
/* We match the whole register even if this is AArch32 using the /*
* We match the whole register even if this is AArch32 using the
* short descriptor format (in which case it holds both PROCID and ASID), * short descriptor format (in which case it holds both PROCID and ASID),
* since we don't implement the optional v7 context ID masking. * since we don't implement the optional v7 context ID masking.
*/ */
@ -1006,7 +879,8 @@ static bool linked_bp_matches(ARMCPU *cpu, int lbn)
case 9: /* linked VMID match (reserved if no EL2) */ case 9: /* linked VMID match (reserved if no EL2) */
case 11: /* linked context ID and VMID match (reserved if no EL2) */ case 11: /* linked context ID and VMID match (reserved if no EL2) */
default: default:
/* Links to Unlinked context breakpoints must generate no /*
* Links to Unlinked context breakpoints must generate no
* events; we choose to do the same for reserved values too. * events; we choose to do the same for reserved values too.
*/ */
return false; return false;
@ -1020,7 +894,8 @@ static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
CPUARMState *env = &cpu->env; CPUARMState *env = &cpu->env;
uint64_t cr; uint64_t cr;
int pac, hmc, ssc, wt, lbn; int pac, hmc, ssc, wt, lbn;
/* Note that for watchpoints the check is against the CPU security /*
* Note that for watchpoints the check is against the CPU security
* state, not the S/NS attribute on the offending data access. * state, not the S/NS attribute on the offending data access.
*/ */
bool is_secure = arm_is_secure(env); bool is_secure = arm_is_secure(env);
@ -1034,7 +909,8 @@ static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
} }
cr = env->cp15.dbgwcr[n]; cr = env->cp15.dbgwcr[n];
if (wp->hitattrs.user) { if (wp->hitattrs.user) {
/* The LDRT/STRT/LDT/STT "unprivileged access" instructions should /*
* The LDRT/STRT/LDT/STT "unprivileged access" instructions should
* match watchpoints as if they were accesses done at EL0, even if * match watchpoints as if they were accesses done at EL0, even if
* the CPU is at EL1 or higher. * the CPU is at EL1 or higher.
*/ */
@ -1048,7 +924,8 @@ static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
} }
cr = env->cp15.dbgbcr[n]; cr = env->cp15.dbgbcr[n];
} }
/* The WATCHPOINT_HIT flag guarantees us that the watchpoint is /*
* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
* enabled and that the address and access type match; for breakpoints * enabled and that the address and access type match; for breakpoints
* we know the address matched; check the remaining fields, including * we know the address matched; check the remaining fields, including
* linked breakpoints. We rely on WCR and BCR having the same layout * linked breakpoints. We rely on WCR and BCR having the same layout
@ -1116,7 +993,8 @@ static bool check_watchpoints(ARMCPU *cpu)
CPUARMState *env = &cpu->env; CPUARMState *env = &cpu->env;
int n; int n;
/* If watchpoints are disabled globally or we can't take debug /*
* If watchpoints are disabled globally or we can't take debug
* exceptions here then watchpoint firings are ignored. * exceptions here then watchpoint firings are ignored.
*/ */
if (extract32(env->cp15.mdscr_el1, 15, 1) == 0 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
@ -1137,7 +1015,8 @@ static bool check_breakpoints(ARMCPU *cpu)
CPUARMState *env = &cpu->env; CPUARMState *env = &cpu->env;
int n; int n;
/* If breakpoints are disabled globally or we can't take debug /*
* If breakpoints are disabled globally or we can't take debug
* exceptions here then breakpoint firings are ignored. * exceptions here then breakpoint firings are ignored.
*/ */
if (extract32(env->cp15.mdscr_el1, 15, 1) == 0 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
@ -1164,7 +1043,8 @@ void HELPER(check_breakpoints)(CPUARMState *env)
bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp) bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
{ {
/* Called by core code when a CPU watchpoint fires; need to check if this /*
* Called by core code when a CPU watchpoint fires; need to check if this
* is also an architectural watchpoint match. * is also an architectural watchpoint match.
*/ */
ARMCPU *cpu = ARM_CPU(cs); ARMCPU *cpu = ARM_CPU(cs);
@ -1177,7 +1057,8 @@ vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
ARMCPU *cpu = ARM_CPU(cs); ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env; CPUARMState *env = &cpu->env;
/* In BE32 system mode, target memory is stored byteswapped (on a /*
* In BE32 system mode, target memory is stored byteswapped (on a
* little-endian host system), and by the time we reach here (via an * little-endian host system), and by the time we reach here (via an
* opcode helper) the addresses of subword accesses have been adjusted * opcode helper) the addresses of subword accesses have been adjusted
* to account for that, which means that watchpoints will not match. * to account for that, which means that watchpoints will not match.
@ -1196,7 +1077,8 @@ vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
void arm_debug_excp_handler(CPUState *cs) void arm_debug_excp_handler(CPUState *cs)
{ {
/* Called by core code when a watchpoint or breakpoint fires; /*
* Called by core code when a watchpoint or breakpoint fires;
* need to check which one and raise the appropriate exception. * need to check which one and raise the appropriate exception.
*/ */
ARMCPU *cpu = ARM_CPU(cs); ARMCPU *cpu = ARM_CPU(cs);
@ -1220,7 +1102,8 @@ void arm_debug_excp_handler(CPUState *cs)
uint64_t pc = is_a64(env) ? env->pc : env->regs[15]; uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
bool same_el = (arm_debug_target_el(env) == arm_current_el(env)); bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
/* (1) GDB breakpoints should be handled first. /*
* (1) GDB breakpoints should be handled first.
* (2) Do not raise a CPU exception if no CPU breakpoint has fired, * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
* since singlestep is also done by generating a debug internal * since singlestep is also done by generating a debug internal
* exception. * exception.
@ -1231,7 +1114,8 @@ void arm_debug_excp_handler(CPUState *cs)
} }
env->exception.fsr = arm_debug_exception_fsr(env); env->exception.fsr = arm_debug_exception_fsr(env);
/* FAR is UNKNOWN: clear vaddress to avoid potentially exposing /*
* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
* values to the guest that it shouldn't be able to see at its * values to the guest that it shouldn't be able to see at its
* exception/security level. * exception/security level.
*/ */
@ -1307,3 +1191,95 @@ uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
return ((uint32_t)x >> shift) | (x << (32 - shift)); return ((uint32_t)x >> shift) | (x << (32 - shift));
} }
} }
void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
{
/*
* Implement DC ZVA, which zeroes a fixed-length block of memory.
* Note that we do not implement the (architecturally mandated)
* alignment fault for attempts to use this on Device memory
* (which matches the usual QEMU behaviour of not implementing either
* alignment faults or any memory attribute handling).
*/
ARMCPU *cpu = env_archcpu(env);
uint64_t blocklen = 4 << cpu->dcz_blocksize;
uint64_t vaddr = vaddr_in & ~(blocklen - 1);
#ifndef CONFIG_USER_ONLY
{
/*
* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
* the block size so we might have to do more than one TLB lookup.
* We know that in fact for any v8 CPU the page size is at least 4K
* and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
* 1K as an artefact of legacy v5 subpage support being present in the
* same QEMU executable. So in practice the hostaddr[] array has
* two entries, given the current setting of TARGET_PAGE_BITS_MIN.
*/
int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
void *hostaddr[DIV_ROUND_UP(2 * KiB, 1 << TARGET_PAGE_BITS_MIN)];
int try, i;
unsigned mmu_idx = cpu_mmu_index(env, false);
TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
assert(maxidx <= ARRAY_SIZE(hostaddr));
for (try = 0; try < 2; try++) {
for (i = 0; i < maxidx; i++) {
hostaddr[i] = tlb_vaddr_to_host(env,
vaddr + TARGET_PAGE_SIZE * i,
1, mmu_idx);
if (!hostaddr[i]) {
break;
}
}
if (i == maxidx) {
/*
* If it's all in the TLB it's fair game for just writing to;
* we know we don't need to update dirty status, etc.
*/
for (i = 0; i < maxidx - 1; i++) {
memset(hostaddr[i], 0, TARGET_PAGE_SIZE);
}
memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE));
return;
}
/*
* OK, try a store and see if we can populate the tlb. This
* might cause an exception if the memory isn't writable,
* in which case we will longjmp out of here. We must for
* this purpose use the actual register value passed to us
* so that we get the fault address right.
*/
helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC());
/* Now we can populate the other TLB entries, if any */
for (i = 0; i < maxidx; i++) {
uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
if (va != (vaddr_in & TARGET_PAGE_MASK)) {
helper_ret_stb_mmu(env, va, 0, oi, GETPC());
}
}
}
/*
* Slow path (probably attempt to do this to an I/O device or
* similar, or clearing of a block of code we have translations
* cached for). Just do a series of byte writes as the architecture
* demands. It's not worth trying to use a cpu_physical_memory_map(),
* memset(), unmap() sequence here because:
* + we'd need to account for the blocksize being larger than a page
* + the direct-RAM access case is almost always going to be dealt
* with in the fastpath code above, so there's no speed benefit
* + we would have to deal with the map returning NULL because the
* bounce buffer was in use
*/
for (i = 0; i < blocklen; i++) {
helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC());
}
}
#else
memset(g2h(vaddr), 0, blocklen);
#endif
}

200
target/arm/tlb_helper.c Normal file
View file

@ -0,0 +1,200 @@
/*
* ARM TLB (Translation lookaside buffer) helpers.
*
* This code is licensed under the GNU GPL v2 or later.
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
#include "cpu.h"
#include "internals.h"
#include "exec/exec-all.h"
#if !defined(CONFIG_USER_ONLY)
static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
unsigned int target_el,
bool same_el, bool ea,
bool s1ptw, bool is_write,
int fsc)
{
uint32_t syn;
/*
* ISV is only set for data aborts routed to EL2 and
* never for stage-1 page table walks faulting on stage 2.
*
* Furthermore, ISV is only set for certain kinds of load/stores.
* If the template syndrome does not have ISV set, we should leave
* it cleared.
*
* See ARMv8 specs, D7-1974:
* ISS encoding for an exception from a Data Abort, the
* ISV field.
*/
if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
syn = syn_data_abort_no_iss(same_el,
ea, 0, s1ptw, is_write, fsc);
} else {
/*
* Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
* syndrome created at translation time.
* Now we create the runtime syndrome with the remaining fields.
*/
syn = syn_data_abort_with_iss(same_el,
0, 0, 0, 0, 0,
ea, 0, s1ptw, is_write, fsc,
false);
/* Merge the runtime syndrome with the template syndrome. */
syn |= template_syn;
}
return syn;
}
static void QEMU_NORETURN arm_deliver_fault(ARMCPU *cpu, vaddr addr,
MMUAccessType access_type,
int mmu_idx, ARMMMUFaultInfo *fi)
{
CPUARMState *env = &cpu->env;
int target_el;
bool same_el;
uint32_t syn, exc, fsr, fsc;
ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
target_el = exception_target_el(env);
if (fi->stage2) {
target_el = 2;
env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
}
same_el = (arm_current_el(env) == target_el);
if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
/*
* LPAE format fault status register : bottom 6 bits are
* status code in the same form as needed for syndrome
*/
fsr = arm_fi_to_lfsc(fi);
fsc = extract32(fsr, 0, 6);
} else {
fsr = arm_fi_to_sfsc(fi);
/*
* Short format FSR : this fault will never actually be reported
* to an EL that uses a syndrome register. Use a (currently)
* reserved FSR code in case the constructed syndrome does leak
* into the guest somehow.
*/
fsc = 0x3f;
}
if (access_type == MMU_INST_FETCH) {
syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
exc = EXCP_PREFETCH_ABORT;
} else {
syn = merge_syn_data_abort(env->exception.syndrome, target_el,
same_el, fi->ea, fi->s1ptw,
access_type == MMU_DATA_STORE,
fsc);
if (access_type == MMU_DATA_STORE
&& arm_feature(env, ARM_FEATURE_V6)) {
fsr |= (1 << 11);
}
exc = EXCP_DATA_ABORT;
}
env->exception.vaddress = addr;
env->exception.fsr = fsr;
raise_exception(env, exc, syn, target_el);
}
/* Raise a data fault alignment exception for the specified virtual address */
void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
{
ARMCPU *cpu = ARM_CPU(cs);
ARMMMUFaultInfo fi = {};
/* now we have a real cpu fault */
cpu_restore_state(cs, retaddr, true);
fi.type = ARMFault_Alignment;
arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
}
/*
* arm_cpu_do_transaction_failed: handle a memory system error response
* (eg "no device/memory present at address") by raising an external abort
* exception
*/
void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
vaddr addr, unsigned size,
MMUAccessType access_type,
int mmu_idx, MemTxAttrs attrs,
MemTxResult response, uintptr_t retaddr)
{
ARMCPU *cpu = ARM_CPU(cs);
ARMMMUFaultInfo fi = {};
/* now we have a real cpu fault */
cpu_restore_state(cs, retaddr, true);
fi.ea = arm_extabort_type(response);
fi.type = ARMFault_SyncExternal;
arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
}
#endif /* !defined(CONFIG_USER_ONLY) */
bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
ARMCPU *cpu = ARM_CPU(cs);
#ifdef CONFIG_USER_ONLY
cpu->env.exception.vaddress = address;
if (access_type == MMU_INST_FETCH) {
cs->exception_index = EXCP_PREFETCH_ABORT;
} else {
cs->exception_index = EXCP_DATA_ABORT;
}
cpu_loop_exit_restore(cs, retaddr);
#else
hwaddr phys_addr;
target_ulong page_size;
int prot, ret;
MemTxAttrs attrs = {};
ARMMMUFaultInfo fi = {};
/*
* Walk the page table and (if the mapping exists) add the page
* to the TLB. On success, return true. Otherwise, if probing,
* return false. Otherwise populate fsr with ARM DFSR/IFSR fault
* register format, and signal the fault.
*/
ret = get_phys_addr(&cpu->env, address, access_type,
core_to_arm_mmu_idx(&cpu->env, mmu_idx),
&phys_addr, &attrs, &prot, &page_size, &fi, NULL);
if (likely(!ret)) {
/*
* Map a single [sub]page. Regions smaller than our declared
* target page size are handled specially, so for those we
* pass in the exact addresses.
*/
if (page_size >= TARGET_PAGE_SIZE) {
phys_addr &= TARGET_PAGE_MASK;
address &= TARGET_PAGE_MASK;
}
tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
prot, mmu_idx, page_size);
return true;
} else if (probe) {
return false;
} else {
/* now we have a real cpu fault */
cpu_restore_state(cs, retaddr, true);
arm_deliver_fault(cpu, address, access_type, mmu_idx, &fi);
}
#endif
}

View file

@ -27,7 +27,6 @@
#include "translate.h" #include "translate.h"
#include "internals.h" #include "internals.h"
#include "qemu/host-utils.h" #include "qemu/host-utils.h"
#include "qemu/qemu-print.h"
#include "hw/semihosting/semihost.h" #include "hw/semihosting/semihost.h"
#include "exec/gen-icount.h" #include "exec/gen-icount.h"
@ -152,133 +151,6 @@ static void set_btype(DisasContext *s, int val)
s->btype = -1; s->btype = -1;
} }
void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
uint32_t psr = pstate_read(env);
int i;
int el = arm_current_el(env);
const char *ns_status;
qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc);
for (i = 0; i < 32; i++) {
if (i == 31) {
qemu_fprintf(f, " SP=%016" PRIx64 "\n", env->xregs[i]);
} else {
qemu_fprintf(f, "X%02d=%016" PRIx64 "%s", i, env->xregs[i],
(i + 2) % 3 ? " " : "\n");
}
}
if (arm_feature(env, ARM_FEATURE_EL3) && el != 3) {
ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
} else {
ns_status = "";
}
qemu_fprintf(f, "PSTATE=%08x %c%c%c%c %sEL%d%c",
psr,
psr & PSTATE_N ? 'N' : '-',
psr & PSTATE_Z ? 'Z' : '-',
psr & PSTATE_C ? 'C' : '-',
psr & PSTATE_V ? 'V' : '-',
ns_status,
el,
psr & PSTATE_SP ? 'h' : 't');
if (cpu_isar_feature(aa64_bti, cpu)) {
qemu_fprintf(f, " BTYPE=%d", (psr & PSTATE_BTYPE) >> 10);
}
if (!(flags & CPU_DUMP_FPU)) {
qemu_fprintf(f, "\n");
return;
}
if (fp_exception_el(env, el) != 0) {
qemu_fprintf(f, " FPU disabled\n");
return;
}
qemu_fprintf(f, " FPCR=%08x FPSR=%08x\n",
vfp_get_fpcr(env), vfp_get_fpsr(env));
if (cpu_isar_feature(aa64_sve, cpu) && sve_exception_el(env, el) == 0) {
int j, zcr_len = sve_zcr_len_for_el(env, el);
for (i = 0; i <= FFR_PRED_NUM; i++) {
bool eol;
if (i == FFR_PRED_NUM) {
qemu_fprintf(f, "FFR=");
/* It's last, so end the line. */
eol = true;
} else {
qemu_fprintf(f, "P%02d=", i);
switch (zcr_len) {
case 0:
eol = i % 8 == 7;
break;
case 1:
eol = i % 6 == 5;
break;
case 2:
case 3:
eol = i % 3 == 2;
break;
default:
/* More than one quadword per predicate. */
eol = true;
break;
}
}
for (j = zcr_len / 4; j >= 0; j--) {
int digits;
if (j * 4 + 4 <= zcr_len + 1) {
digits = 16;
} else {
digits = (zcr_len % 4 + 1) * 4;
}
qemu_fprintf(f, "%0*" PRIx64 "%s", digits,
env->vfp.pregs[i].p[j],
j ? ":" : eol ? "\n" : " ");
}
}
for (i = 0; i < 32; i++) {
if (zcr_len == 0) {
qemu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64 "%s",
i, env->vfp.zregs[i].d[1],
env->vfp.zregs[i].d[0], i & 1 ? "\n" : " ");
} else if (zcr_len == 1) {
qemu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64
":%016" PRIx64 ":%016" PRIx64 "\n",
i, env->vfp.zregs[i].d[3], env->vfp.zregs[i].d[2],
env->vfp.zregs[i].d[1], env->vfp.zregs[i].d[0]);
} else {
for (j = zcr_len; j >= 0; j--) {
bool odd = (zcr_len - j) % 2 != 0;
if (j == zcr_len) {
qemu_fprintf(f, "Z%02d[%x-%x]=", i, j, j - 1);
} else if (!odd) {
if (j > 0) {
qemu_fprintf(f, " [%x-%x]=", j, j - 1);
} else {
qemu_fprintf(f, " [%x]=", j);
}
}
qemu_fprintf(f, "%016" PRIx64 ":%016" PRIx64 "%s",
env->vfp.zregs[i].d[j * 2 + 1],
env->vfp.zregs[i].d[j * 2],
odd || j == 0 ? "\n" : ":");
}
}
}
} else {
for (i = 0; i < 32; i++) {
uint64_t *q = aa64_vfp_qreg(env, i);
qemu_fprintf(f, "Q%02d=%016" PRIx64 ":%016" PRIx64 "%s",
i, q[1], q[0], (i & 1 ? "\n" : " "));
}
}
}
void gen_a64_set_pc_im(uint64_t val) void gen_a64_set_pc_im(uint64_t val)
{ {
tcg_gen_movi_i64(cpu_pc, val); tcg_gen_movi_i64(cpu_pc, val);

View file

@ -28,7 +28,6 @@
#include "tcg-op-gvec.h" #include "tcg-op-gvec.h"
#include "qemu/log.h" #include "qemu/log.h"
#include "qemu/bitops.h" #include "qemu/bitops.h"
#include "qemu/qemu-print.h"
#include "arm_ldst.h" #include "arm_ldst.h"
#include "hw/semihosting/semihost.h" #include "hw/semihosting/semihost.h"
@ -12342,92 +12341,6 @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
translator_loop(ops, &dc.base, cpu, tb, max_insns); translator_loop(ops, &dc.base, cpu, tb, max_insns);
} }
void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
int i;
if (is_a64(env)) {
aarch64_cpu_dump_state(cs, f, flags);
return;
}
for(i=0;i<16;i++) {
qemu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
if ((i % 4) == 3)
qemu_fprintf(f, "\n");
else
qemu_fprintf(f, " ");
}
if (arm_feature(env, ARM_FEATURE_M)) {
uint32_t xpsr = xpsr_read(env);
const char *mode;
const char *ns_status = "";
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
ns_status = env->v7m.secure ? "S " : "NS ";
}
if (xpsr & XPSR_EXCP) {
mode = "handler";
} else {
if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
mode = "unpriv-thread";
} else {
mode = "priv-thread";
}
}
qemu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
xpsr,
xpsr & XPSR_N ? 'N' : '-',
xpsr & XPSR_Z ? 'Z' : '-',
xpsr & XPSR_C ? 'C' : '-',
xpsr & XPSR_V ? 'V' : '-',
xpsr & XPSR_T ? 'T' : 'A',
ns_status,
mode);
} else {
uint32_t psr = cpsr_read(env);
const char *ns_status = "";
if (arm_feature(env, ARM_FEATURE_EL3) &&
(psr & CPSR_M) != ARM_CPU_MODE_MON) {
ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
}
qemu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
psr,
psr & CPSR_N ? 'N' : '-',
psr & CPSR_Z ? 'Z' : '-',
psr & CPSR_C ? 'C' : '-',
psr & CPSR_V ? 'V' : '-',
psr & CPSR_T ? 'T' : 'A',
ns_status,
aarch32_mode_name(psr), (psr & 0x10) ? 32 : 26);
}
if (flags & CPU_DUMP_FPU) {
int numvfpregs = 0;
if (arm_feature(env, ARM_FEATURE_VFP)) {
numvfpregs += 16;
}
if (arm_feature(env, ARM_FEATURE_VFP3)) {
numvfpregs += 16;
}
for (i = 0; i < numvfpregs; i++) {
uint64_t v = *aa32_vfp_dreg(env, i);
qemu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
i * 2, (uint32_t)v,
i * 2 + 1, (uint32_t)(v >> 32),
i, v);
}
qemu_fprintf(f, "FPSCR: %08x\n", vfp_get_fpscr(env));
}
}
void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
target_ulong *data) target_ulong *data)
{ {

View file

@ -169,7 +169,6 @@ static inline void disas_set_insn_syndrome(DisasContext *s, uint32_t syn)
#ifdef TARGET_AARCH64 #ifdef TARGET_AARCH64
void a64_translate_init(void); void a64_translate_init(void);
void gen_a64_set_pc_im(uint64_t val); void gen_a64_set_pc_im(uint64_t val);
void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags);
extern const TranslatorOps aarch64_translator_ops; extern const TranslatorOps aarch64_translator_ops;
#else #else
static inline void a64_translate_init(void) static inline void a64_translate_init(void)
@ -179,10 +178,6 @@ static inline void a64_translate_init(void)
static inline void gen_a64_set_pc_im(uint64_t val) static inline void gen_a64_set_pc_im(uint64_t val)
{ {
} }
static inline void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
{
}
#endif #endif
void arm_test_cc(DisasCompare *cmp, int cc); void arm_test_cc(DisasCompare *cmp, int cc);

View file

@ -18,122 +18,89 @@
*/ */
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "qemu/log.h"
#include "cpu.h" #include "cpu.h"
#include "exec/helper-proto.h" #include "exec/helper-proto.h"
#include "fpu/softfloat.h"
#include "internals.h" #include "internals.h"
#ifdef CONFIG_TCG
#include "qemu/log.h"
#include "fpu/softfloat.h"
#endif
/* VFP support. We follow the convention used for VFP instructions: /* VFP support. We follow the convention used for VFP instructions:
Single precision routines have a "s" suffix, double precision a Single precision routines have a "s" suffix, double precision a
"d" suffix. */ "d" suffix. */
#ifdef CONFIG_TCG
/* Convert host exception flags to vfp form. */ /* Convert host exception flags to vfp form. */
static inline int vfp_exceptbits_from_host(int host_bits) static inline int vfp_exceptbits_from_host(int host_bits)
{ {
int target_bits = 0; int target_bits = 0;
if (host_bits & float_flag_invalid) if (host_bits & float_flag_invalid) {
target_bits |= 1; target_bits |= 1;
if (host_bits & float_flag_divbyzero) }
if (host_bits & float_flag_divbyzero) {
target_bits |= 2; target_bits |= 2;
if (host_bits & float_flag_overflow) }
if (host_bits & float_flag_overflow) {
target_bits |= 4; target_bits |= 4;
if (host_bits & (float_flag_underflow | float_flag_output_denormal)) }
if (host_bits & (float_flag_underflow | float_flag_output_denormal)) {
target_bits |= 8; target_bits |= 8;
if (host_bits & float_flag_inexact) }
if (host_bits & float_flag_inexact) {
target_bits |= 0x10; target_bits |= 0x10;
if (host_bits & float_flag_input_denormal) }
if (host_bits & float_flag_input_denormal) {
target_bits |= 0x80; target_bits |= 0x80;
}
return target_bits; return target_bits;
} }
uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
{
uint32_t i, fpscr;
fpscr = env->vfp.xregs[ARM_VFP_FPSCR]
| (env->vfp.vec_len << 16)
| (env->vfp.vec_stride << 20);
i = get_float_exception_flags(&env->vfp.fp_status);
i |= get_float_exception_flags(&env->vfp.standard_fp_status);
/* FZ16 does not generate an input denormal exception. */
i |= (get_float_exception_flags(&env->vfp.fp_status_f16)
& ~float_flag_input_denormal);
fpscr |= vfp_exceptbits_from_host(i);
i = env->vfp.qc[0] | env->vfp.qc[1] | env->vfp.qc[2] | env->vfp.qc[3];
fpscr |= i ? FPCR_QC : 0;
return fpscr;
}
uint32_t vfp_get_fpscr(CPUARMState *env)
{
return HELPER(vfp_get_fpscr)(env);
}
/* Convert vfp exception flags to target form. */ /* Convert vfp exception flags to target form. */
static inline int vfp_exceptbits_to_host(int target_bits) static inline int vfp_exceptbits_to_host(int target_bits)
{ {
int host_bits = 0; int host_bits = 0;
if (target_bits & 1) if (target_bits & 1) {
host_bits |= float_flag_invalid; host_bits |= float_flag_invalid;
if (target_bits & 2) }
if (target_bits & 2) {
host_bits |= float_flag_divbyzero; host_bits |= float_flag_divbyzero;
if (target_bits & 4) }
if (target_bits & 4) {
host_bits |= float_flag_overflow; host_bits |= float_flag_overflow;
if (target_bits & 8) }
if (target_bits & 8) {
host_bits |= float_flag_underflow; host_bits |= float_flag_underflow;
if (target_bits & 0x10) }
if (target_bits & 0x10) {
host_bits |= float_flag_inexact; host_bits |= float_flag_inexact;
if (target_bits & 0x80) }
if (target_bits & 0x80) {
host_bits |= float_flag_input_denormal; host_bits |= float_flag_input_denormal;
}
return host_bits; return host_bits;
} }
void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val) static uint32_t vfp_get_fpscr_from_host(CPUARMState *env)
{
uint32_t i;
i = get_float_exception_flags(&env->vfp.fp_status);
i |= get_float_exception_flags(&env->vfp.standard_fp_status);
/* FZ16 does not generate an input denormal exception. */
i |= (get_float_exception_flags(&env->vfp.fp_status_f16)
& ~float_flag_input_denormal);
return vfp_exceptbits_from_host(i);
}
static void vfp_set_fpscr_to_host(CPUARMState *env, uint32_t val)
{ {
int i; int i;
uint32_t changed = env->vfp.xregs[ARM_VFP_FPSCR]; uint32_t changed = env->vfp.xregs[ARM_VFP_FPSCR];
/* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */
if (!cpu_isar_feature(aa64_fp16, env_archcpu(env))) {
val &= ~FPCR_FZ16;
}
if (arm_feature(env, ARM_FEATURE_M)) {
/*
* M profile FPSCR is RES0 for the QC, STRIDE, FZ16, LEN bits
* and also for the trapped-exception-handling bits IxE.
*/
val &= 0xf7c0009f;
}
/*
* We don't implement trapped exception handling, so the
* trap enable bits, IDE|IXE|UFE|OFE|DZE|IOE are all RAZ/WI (not RES0!)
*
* If we exclude the exception flags, IOC|DZC|OFC|UFC|IXC|IDC
* (which are stored in fp_status), and the other RES0 bits
* in between, then we clear all of the low 16 bits.
*/
env->vfp.xregs[ARM_VFP_FPSCR] = val & 0xf7c80000;
env->vfp.vec_len = (val >> 16) & 7;
env->vfp.vec_stride = (val >> 20) & 3;
/*
* The bit we set within fpscr_q is arbitrary; the register as a
* whole being zero/non-zero is what counts.
*/
env->vfp.qc[0] = val & FPCR_QC;
env->vfp.qc[1] = 0;
env->vfp.qc[2] = 0;
env->vfp.qc[3] = 0;
changed ^= val; changed ^= val;
if (changed & (3 << 22)) { if (changed & (3 << 22)) {
i = (val >> 22) & 3; i = (val >> 22) & 3;
@ -170,7 +137,8 @@ void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16); set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16);
} }
/* The exception flags are ORed together when we read fpscr so we /*
* The exception flags are ORed together when we read fpscr so we
* only need to preserve the current state in one of our * only need to preserve the current state in one of our
* float_status values. * float_status values.
*/ */
@ -180,11 +148,86 @@ void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
set_float_exception_flags(0, &env->vfp.standard_fp_status); set_float_exception_flags(0, &env->vfp.standard_fp_status);
} }
#else
static uint32_t vfp_get_fpscr_from_host(CPUARMState *env)
{
return 0;
}
static void vfp_set_fpscr_to_host(CPUARMState *env, uint32_t val)
{
}
#endif
uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
{
uint32_t i, fpscr;
fpscr = env->vfp.xregs[ARM_VFP_FPSCR]
| (env->vfp.vec_len << 16)
| (env->vfp.vec_stride << 20);
fpscr |= vfp_get_fpscr_from_host(env);
i = env->vfp.qc[0] | env->vfp.qc[1] | env->vfp.qc[2] | env->vfp.qc[3];
fpscr |= i ? FPCR_QC : 0;
return fpscr;
}
uint32_t vfp_get_fpscr(CPUARMState *env)
{
return HELPER(vfp_get_fpscr)(env);
}
void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
{
/* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */
if (!cpu_isar_feature(aa64_fp16, env_archcpu(env))) {
val &= ~FPCR_FZ16;
}
if (arm_feature(env, ARM_FEATURE_M)) {
/*
* M profile FPSCR is RES0 for the QC, STRIDE, FZ16, LEN bits
* and also for the trapped-exception-handling bits IxE.
*/
val &= 0xf7c0009f;
}
/*
* We don't implement trapped exception handling, so the
* trap enable bits, IDE|IXE|UFE|OFE|DZE|IOE are all RAZ/WI (not RES0!)
*
* If we exclude the exception flags, IOC|DZC|OFC|UFC|IXC|IDC
* (which are stored in fp_status), and the other RES0 bits
* in between, then we clear all of the low 16 bits.
*/
env->vfp.xregs[ARM_VFP_FPSCR] = val & 0xf7c80000;
env->vfp.vec_len = (val >> 16) & 7;
env->vfp.vec_stride = (val >> 20) & 3;
/*
* The bit we set within fpscr_q is arbitrary; the register as a
* whole being zero/non-zero is what counts.
*/
env->vfp.qc[0] = val & FPCR_QC;
env->vfp.qc[1] = 0;
env->vfp.qc[2] = 0;
env->vfp.qc[3] = 0;
vfp_set_fpscr_to_host(env, val);
}
void vfp_set_fpscr(CPUARMState *env, uint32_t val) void vfp_set_fpscr(CPUARMState *env, uint32_t val)
{ {
HELPER(vfp_set_fpscr)(env, val); HELPER(vfp_set_fpscr)(env, val);
} }
#ifdef CONFIG_TCG
#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p)) #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
#define VFP_BINOP(name) \ #define VFP_BINOP(name) \
@ -1278,3 +1321,5 @@ float64 HELPER(frint64_d)(float64 f, void *fpst)
{ {
return frint_d(f, fpst, 64); return frint_d(f, fpst, 64);
} }
#endif