mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-09 10:34:58 -06:00
ppc patch queue 2019-07-2
Here's my next pull request for qemu-4.1. I'm not sure if this will squeak in just before the soft freeze, or just after. I don't think it really matters - most of this is bugfixes anyway. There's some cleanups which aren't stictly bugfixes, but which I think are safe enough improvements to go in the soft freeze. There's no true feature work. Unfortunately, I wasn't able to complete a few of my standard battery of pre-pull tests, due to some failures that appear to also be in master. I'm hoping that hasn't missed anything important in here. Highlights are: * A number of fixe and cleanups for the XIVE implementation * Cleanups to the XICS interrupt controller to fit better with the new XIVE code * Numerous fixes and improvements to TCG handling of ppc vector instructions * Remove a number of unnnecessary #ifdef CONFIG_KVM guards * Fix some errors in the PCI hotplug paths * Assorted other fixes -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEdfRlhq5hpmzETofcbDjKyiDZs5IFAl0a9JMACgkQbDjKyiDZ s5ItkQ//bpkDkztJfRbOB7cgFVQCbXIJ5mpG7PBnBJDohXRtEsjCunNwL+GelRMl FizPJO3sGpR2f+MgH+7MJ+Y6ESSwDhI6u8TbH4MjGTc9kWsqV1YUy6nB3grxwqG7 k9AXN0z6e1MZLaZuseGBrZmPzZcvNwnPKFqEU06ZXqIWscNgXWXteyO5JTZW4O9M +Ttiser/f6dRCHKrKnlJp3D1blBaJVUXzZTJVqmH6AiJy/xfHq7Ak6LQKrVrt8Vc I2hGMEqyDE+ppr8cuGku4KR8GWUen9m0F0bTVGjPsG1io+spAznxNZL/Z+KJPzrI cCFaKoyNknIicx/0/iil5TEuu4rz985erNZBcglarK/w9w0RyW2LlcDbvzV+gO6c Ln/1WLZZh4WufR4s4195zUJwZPwGp0E4xFdfk20ulzVzV4wVCMbNJHZpchHYFMi3 fW4Yzhpq5zaOTIaew5+tWST+8RuduacZ/Rm+f9LNui42uA52/EMoD8Vo34n8CIro 9DPOS64Jk9BjIr9bMstFOBCyTVt64IFzskDOMCSCznUl51Hm0ytfAJH3Gty7YazQ ZxncazzlC9E6OzCTYRDNSPnTKGFvccGmuir/SXPWf3bn8oBC9p3P1mPK3cgk//as CvWW8Y/QAJOrxEls5QZzpIBjxqAcMoMVjir6l1OT2/gvBTJto1Q= =QAyU -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-4.1-20190702' into staging ppc patch queue 2019-07-2 Here's my next pull request for qemu-4.1. I'm not sure if this will squeak in just before the soft freeze, or just after. I don't think it really matters - most of this is bugfixes anyway. There's some cleanups which aren't stictly bugfixes, but which I think are safe enough improvements to go in the soft freeze. There's no true feature work. Unfortunately, I wasn't able to complete a few of my standard battery of pre-pull tests, due to some failures that appear to also be in master. I'm hoping that hasn't missed anything important in here. Highlights are: * A number of fixe and cleanups for the XIVE implementation * Cleanups to the XICS interrupt controller to fit better with the new XIVE code * Numerous fixes and improvements to TCG handling of ppc vector instructions * Remove a number of unnnecessary #ifdef CONFIG_KVM guards * Fix some errors in the PCI hotplug paths * Assorted other fixes # gpg: Signature made Tue 02 Jul 2019 07:07:15 BST # gpg: using RSA key 75F46586AE61A66CC44E87DC6C38CACA20D9B392 # gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>" [full] # gpg: aka "David Gibson (Red Hat) <dgibson@redhat.com>" [full] # gpg: aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>" [full] # gpg: aka "David Gibson (kernel.org) <dwg@kernel.org>" [unknown] # Primary key fingerprint: 75F4 6586 AE61 A66C C44E 87DC 6C38 CACA 20D9 B392 * remotes/dgibson/tags/ppc-for-4.1-20190702: (49 commits) spapr/xive: Add proper rollback to kvmppc_xive_connect() ppc/xive: Fix TM_PULL_POOL_CTX special operation ppc/pnv: Rework cache watch model of PnvXIVE ppc/xive: Make the PIPR register readonly ppc/xive: Force the Physical CAM line value to group mode spapr/xive: simplify spapr_irq_init_device() to remove the emulated init spapr/xive: rework the mapping the KVM memory regions spapr_pci: Unregister listeners before destroying the IOMMU address space target/ppc: improve VSX_FMADD with new GEN_VSX_HELPER_VSX_MADD macro target/ppc: decode target register in VSX_EXTRACT_INSERT at translation time target/ppc: decode target register in VSX_VECTOR_LOAD_STORE_LENGTH at translation time target/ppc: introduce GEN_VSX_HELPER_R2_AB macro to fpu_helper.c target/ppc: introduce GEN_VSX_HELPER_R2 macro to fpu_helper.c target/ppc: introduce GEN_VSX_HELPER_R3 macro to fpu_helper.c target/ppc: introduce GEN_VSX_HELPER_X1 macro to fpu_helper.c target/ppc: introduce GEN_VSX_HELPER_X2_AB macro to fpu_helper.c target/ppc: introduce GEN_VSX_HELPER_X2 macro to fpu_helper.c target/ppc: introduce separate generator and helper for xscvqpdp target/ppc: introduce GEN_VSX_HELPER_X3 macro to fpu_helper.c target/ppc: introduce separate VSX_CMP macro for xvcmp* instructions ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
506179e421
38 changed files with 1513 additions and 1191 deletions
|
@ -169,7 +169,7 @@ static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
|
|||
vsd = ldq_be_dma(&address_space_memory, vsd_addr);
|
||||
|
||||
if (!(vsd & VSD_ADDRESS_MASK)) {
|
||||
xive_error(xive, "VST: invalid %s entry %x !?", info->name, 0);
|
||||
xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -190,7 +190,7 @@ static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
|
|||
vsd = ldq_be_dma(&address_space_memory, vsd_addr);
|
||||
|
||||
if (!(vsd & VSD_ADDRESS_MASK)) {
|
||||
xive_error(xive, "VST: invalid %s entry %x !?", info->name, 0);
|
||||
xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -294,8 +294,12 @@ static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
|
|||
word_number);
|
||||
}
|
||||
|
||||
static int pnv_xive_end_update(PnvXive *xive, uint8_t blk, uint32_t idx)
|
||||
static int pnv_xive_end_update(PnvXive *xive)
|
||||
{
|
||||
uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
|
||||
xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
|
||||
uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
|
||||
xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
|
||||
int i;
|
||||
uint64_t eqc_watch[4];
|
||||
|
||||
|
@ -307,6 +311,24 @@ static int pnv_xive_end_update(PnvXive *xive, uint8_t blk, uint32_t idx)
|
|||
XIVE_VST_WORD_ALL);
|
||||
}
|
||||
|
||||
static void pnv_xive_end_cache_load(PnvXive *xive)
|
||||
{
|
||||
uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
|
||||
xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
|
||||
uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
|
||||
xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
|
||||
uint64_t eqc_watch[4] = { 0 };
|
||||
int i;
|
||||
|
||||
if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) {
|
||||
xive_error(xive, "VST: no END entry %x/%x !?", blk, idx);
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
|
||||
xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]);
|
||||
}
|
||||
}
|
||||
|
||||
static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
|
||||
XiveNVT *nvt)
|
||||
{
|
||||
|
@ -320,8 +342,12 @@ static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
|
|||
word_number);
|
||||
}
|
||||
|
||||
static int pnv_xive_nvt_update(PnvXive *xive, uint8_t blk, uint32_t idx)
|
||||
static int pnv_xive_nvt_update(PnvXive *xive)
|
||||
{
|
||||
uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
|
||||
xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
|
||||
uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
|
||||
xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
|
||||
int i;
|
||||
uint64_t vpc_watch[8];
|
||||
|
||||
|
@ -333,6 +359,24 @@ static int pnv_xive_nvt_update(PnvXive *xive, uint8_t blk, uint32_t idx)
|
|||
XIVE_VST_WORD_ALL);
|
||||
}
|
||||
|
||||
static void pnv_xive_nvt_cache_load(PnvXive *xive)
|
||||
{
|
||||
uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
|
||||
xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
|
||||
uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
|
||||
xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
|
||||
uint64_t vpc_watch[8] = { 0 };
|
||||
int i;
|
||||
|
||||
if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) {
|
||||
xive_error(xive, "VST: no NVT entry %x/%x !?", blk, idx);
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
|
||||
xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]);
|
||||
}
|
||||
}
|
||||
|
||||
static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
|
||||
XiveEAS *eas)
|
||||
{
|
||||
|
@ -346,12 +390,6 @@ static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
|
|||
return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas);
|
||||
}
|
||||
|
||||
static int pnv_xive_eas_update(PnvXive *xive, uint8_t blk, uint32_t idx)
|
||||
{
|
||||
/* All done. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static XiveTCTX *pnv_xive_get_tctx(XiveRouter *xrtr, CPUState *cs)
|
||||
{
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
|
@ -781,8 +819,7 @@ static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset,
|
|||
* support recently though)
|
||||
*/
|
||||
if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) {
|
||||
object_property_set_int(OBJECT(&xive->ipi_source),
|
||||
XIVE_SRC_STORE_EOI, "flags", &error_fatal);
|
||||
xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI;
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -951,28 +988,43 @@ static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset,
|
|||
* XIVE PC & VC cache updates for EAS, NVT and END
|
||||
*/
|
||||
case VC_IVC_SCRUB_MASK:
|
||||
break;
|
||||
case VC_IVC_SCRUB_TRIG:
|
||||
pnv_xive_eas_update(xive, GETFIELD(PC_SCRUB_BLOCK_ID, val),
|
||||
GETFIELD(VC_SCRUB_OFFSET, val));
|
||||
break;
|
||||
|
||||
case VC_EQC_SCRUB_MASK:
|
||||
case VC_EQC_CWATCH_SPEC:
|
||||
case VC_EQC_CWATCH_DAT0 ... VC_EQC_CWATCH_DAT3:
|
||||
val &= ~VC_EQC_CWATCH_CONFLICT; /* HW resets this bit */
|
||||
break;
|
||||
case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
|
||||
break;
|
||||
case VC_EQC_CWATCH_DAT0:
|
||||
/* writing to DATA0 triggers the cache write */
|
||||
xive->regs[reg] = val;
|
||||
pnv_xive_end_update(xive);
|
||||
break;
|
||||
case VC_EQC_SCRUB_MASK:
|
||||
case VC_EQC_SCRUB_TRIG:
|
||||
pnv_xive_end_update(xive, GETFIELD(VC_SCRUB_BLOCK_ID, val),
|
||||
GETFIELD(VC_SCRUB_OFFSET, val));
|
||||
/*
|
||||
* The scrubbing registers flush the cache in RAM and can also
|
||||
* invalidate.
|
||||
*/
|
||||
break;
|
||||
|
||||
case PC_VPC_SCRUB_MASK:
|
||||
case PC_VPC_CWATCH_SPEC:
|
||||
case PC_VPC_CWATCH_DAT0 ... PC_VPC_CWATCH_DAT7:
|
||||
val &= ~PC_VPC_CWATCH_CONFLICT; /* HW resets this bit */
|
||||
break;
|
||||
case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
|
||||
break;
|
||||
case PC_VPC_CWATCH_DAT0:
|
||||
/* writing to DATA0 triggers the cache write */
|
||||
xive->regs[reg] = val;
|
||||
pnv_xive_nvt_update(xive);
|
||||
break;
|
||||
case PC_VPC_SCRUB_MASK:
|
||||
case PC_VPC_SCRUB_TRIG:
|
||||
pnv_xive_nvt_update(xive, GETFIELD(PC_SCRUB_BLOCK_ID, val),
|
||||
GETFIELD(PC_SCRUB_OFFSET, val));
|
||||
/*
|
||||
* The scrubbing registers flush the cache in RAM and can also
|
||||
* invalidate.
|
||||
*/
|
||||
break;
|
||||
|
||||
|
||||
|
@ -1023,15 +1075,6 @@ static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size)
|
|||
case PC_GLOBAL_CONFIG:
|
||||
|
||||
case PC_VPC_SCRUB_MASK:
|
||||
case PC_VPC_CWATCH_SPEC:
|
||||
case PC_VPC_CWATCH_DAT0:
|
||||
case PC_VPC_CWATCH_DAT1:
|
||||
case PC_VPC_CWATCH_DAT2:
|
||||
case PC_VPC_CWATCH_DAT3:
|
||||
case PC_VPC_CWATCH_DAT4:
|
||||
case PC_VPC_CWATCH_DAT5:
|
||||
case PC_VPC_CWATCH_DAT6:
|
||||
case PC_VPC_CWATCH_DAT7:
|
||||
|
||||
case VC_GLOBAL_CONFIG:
|
||||
case VC_AIB_TX_ORDER_TAG2:
|
||||
|
@ -1044,12 +1087,6 @@ static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size)
|
|||
case VC_IRQ_CONFIG_IPI_CASC:
|
||||
|
||||
case VC_EQC_SCRUB_MASK:
|
||||
case VC_EQC_CWATCH_DAT0:
|
||||
case VC_EQC_CWATCH_DAT1:
|
||||
case VC_EQC_CWATCH_DAT2:
|
||||
case VC_EQC_CWATCH_DAT3:
|
||||
|
||||
case VC_EQC_CWATCH_SPEC:
|
||||
case VC_IVC_SCRUB_MASK:
|
||||
case VC_SBC_CONFIG:
|
||||
case VC_AT_MACRO_KILL_MASK:
|
||||
|
@ -1081,6 +1118,38 @@ static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size)
|
|||
/*
|
||||
* XIVE PC & VC cache updates for EAS, NVT and END
|
||||
*/
|
||||
case VC_EQC_CWATCH_SPEC:
|
||||
xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT);
|
||||
val = xive->regs[reg];
|
||||
break;
|
||||
case VC_EQC_CWATCH_DAT0:
|
||||
/*
|
||||
* Load DATA registers from cache with data requested by the
|
||||
* SPEC register
|
||||
*/
|
||||
pnv_xive_end_cache_load(xive);
|
||||
val = xive->regs[reg];
|
||||
break;
|
||||
case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
|
||||
val = xive->regs[reg];
|
||||
break;
|
||||
|
||||
case PC_VPC_CWATCH_SPEC:
|
||||
xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT);
|
||||
val = xive->regs[reg];
|
||||
break;
|
||||
case PC_VPC_CWATCH_DAT0:
|
||||
/*
|
||||
* Load DATA registers from cache with data requested by the
|
||||
* SPEC register
|
||||
*/
|
||||
pnv_xive_nvt_cache_load(xive);
|
||||
val = xive->regs[reg];
|
||||
break;
|
||||
case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
|
||||
val = xive->regs[reg];
|
||||
break;
|
||||
|
||||
case PC_VPC_SCRUB_TRIG:
|
||||
case VC_IVC_SCRUB_TRIG:
|
||||
case VC_EQC_SCRUB_TRIG:
|
||||
|
|
|
@ -194,13 +194,6 @@ void spapr_xive_pic_print_info(SpaprXive *xive, Monitor *mon)
|
|||
}
|
||||
}
|
||||
|
||||
void spapr_xive_map_mmio(SpaprXive *xive)
|
||||
{
|
||||
sysbus_mmio_map(SYS_BUS_DEVICE(xive), 0, xive->vc_base);
|
||||
sysbus_mmio_map(SYS_BUS_DEVICE(xive), 1, xive->end_base);
|
||||
sysbus_mmio_map(SYS_BUS_DEVICE(xive), 2, xive->tm_base);
|
||||
}
|
||||
|
||||
void spapr_xive_mmio_set_enabled(SpaprXive *xive, bool enable)
|
||||
{
|
||||
memory_region_set_enabled(&xive->source.esb_mmio, enable);
|
||||
|
@ -305,6 +298,7 @@ static void spapr_xive_realize(DeviceState *dev, Error **errp)
|
|||
error_propagate(errp, local_err);
|
||||
return;
|
||||
}
|
||||
sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xsrc->esb_mmio);
|
||||
|
||||
/*
|
||||
* Initialize the END ESB source
|
||||
|
@ -318,6 +312,7 @@ static void spapr_xive_realize(DeviceState *dev, Error **errp)
|
|||
error_propagate(errp, local_err);
|
||||
return;
|
||||
}
|
||||
sysbus_init_mmio(SYS_BUS_DEVICE(xive), &end_xsrc->esb_mmio);
|
||||
|
||||
/* Set the mapping address of the END ESB pages after the source ESBs */
|
||||
xive->end_base = xive->vc_base + (1ull << xsrc->esb_shift) * xsrc->nr_irqs;
|
||||
|
@ -333,31 +328,18 @@ static void spapr_xive_realize(DeviceState *dev, Error **errp)
|
|||
|
||||
qemu_register_reset(spapr_xive_reset, dev);
|
||||
|
||||
/* Define all XIVE MMIO regions on SysBus */
|
||||
sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xsrc->esb_mmio);
|
||||
sysbus_init_mmio(SYS_BUS_DEVICE(xive), &end_xsrc->esb_mmio);
|
||||
sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xive->tm_mmio);
|
||||
}
|
||||
|
||||
void spapr_xive_init(SpaprXive *xive, Error **errp)
|
||||
{
|
||||
XiveSource *xsrc = &xive->source;
|
||||
|
||||
/*
|
||||
* The emulated XIVE device can only be initialized once. If the
|
||||
* ESB memory region has been already mapped, it means we have been
|
||||
* through there.
|
||||
*/
|
||||
if (memory_region_is_mapped(&xsrc->esb_mmio)) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* TIMA initialization */
|
||||
memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &xive_tm_ops, xive,
|
||||
"xive.tima", 4ull << TM_SHIFT);
|
||||
sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xive->tm_mmio);
|
||||
|
||||
/* Map all regions */
|
||||
spapr_xive_map_mmio(xive);
|
||||
/*
|
||||
* Map all regions. These will be enabled or disabled at reset and
|
||||
* can also be overridden by KVM memory regions if active
|
||||
*/
|
||||
sysbus_mmio_map(SYS_BUS_DEVICE(xive), 0, xive->vc_base);
|
||||
sysbus_mmio_map(SYS_BUS_DEVICE(xive), 1, xive->end_base);
|
||||
sysbus_mmio_map(SYS_BUS_DEVICE(xive), 2, xive->tm_base);
|
||||
}
|
||||
|
||||
static int spapr_xive_get_eas(XiveRouter *xrtr, uint8_t eas_blk,
|
||||
|
|
|
@ -724,12 +724,13 @@ void kvmppc_xive_connect(SpaprXive *xive, Error **errp)
|
|||
xsrc->esb_mmap = kvmppc_xive_mmap(xive, KVM_XIVE_ESB_PAGE_OFFSET, esb_len,
|
||||
&local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
memory_region_init_ram_device_ptr(&xsrc->esb_mmio, OBJECT(xsrc),
|
||||
memory_region_init_ram_device_ptr(&xsrc->esb_mmio_kvm, OBJECT(xsrc),
|
||||
"xive.esb", esb_len, xsrc->esb_mmap);
|
||||
memory_region_add_subregion_overlap(&xsrc->esb_mmio, 0,
|
||||
&xsrc->esb_mmio_kvm, 1);
|
||||
|
||||
/*
|
||||
* 2. END ESB pages (No KVM support yet)
|
||||
|
@ -741,11 +742,12 @@ void kvmppc_xive_connect(SpaprXive *xive, Error **errp)
|
|||
xive->tm_mmap = kvmppc_xive_mmap(xive, KVM_XIVE_TIMA_PAGE_OFFSET, tima_len,
|
||||
&local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
goto fail;
|
||||
}
|
||||
memory_region_init_ram_device_ptr(&xive->tm_mmio, OBJECT(xive),
|
||||
memory_region_init_ram_device_ptr(&xive->tm_mmio_kvm, OBJECT(xive),
|
||||
"xive.tima", tima_len, xive->tm_mmap);
|
||||
memory_region_add_subregion_overlap(&xive->tm_mmio, 0,
|
||||
&xive->tm_mmio_kvm, 1);
|
||||
|
||||
xive->change = qemu_add_vm_change_state_handler(
|
||||
kvmppc_xive_change_state_handler, xive);
|
||||
|
@ -756,24 +758,24 @@ void kvmppc_xive_connect(SpaprXive *xive, Error **errp)
|
|||
|
||||
kvmppc_xive_cpu_connect(spapr_cpu_state(cpu)->tctx, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
/* Update the KVM sources */
|
||||
kvmppc_xive_source_reset(xsrc, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
kvm_kernel_irqchip = true;
|
||||
kvm_msi_via_irqfd_allowed = true;
|
||||
kvm_gsi_direct_mapping = true;
|
||||
return;
|
||||
|
||||
/* Map all regions */
|
||||
spapr_xive_map_mmio(xive);
|
||||
fail:
|
||||
error_propagate(errp, local_err);
|
||||
kvmppc_xive_disconnect(xive, NULL);
|
||||
}
|
||||
|
||||
void kvmppc_xive_disconnect(SpaprXive *xive, Error **errp)
|
||||
|
@ -795,21 +797,29 @@ void kvmppc_xive_disconnect(SpaprXive *xive, Error **errp)
|
|||
xsrc = &xive->source;
|
||||
esb_len = (1ull << xsrc->esb_shift) * xsrc->nr_irqs;
|
||||
|
||||
sysbus_mmio_unmap(SYS_BUS_DEVICE(xive), 0);
|
||||
munmap(xsrc->esb_mmap, esb_len);
|
||||
if (xsrc->esb_mmap) {
|
||||
memory_region_del_subregion(&xsrc->esb_mmio, &xsrc->esb_mmio_kvm);
|
||||
object_unparent(OBJECT(&xsrc->esb_mmio_kvm));
|
||||
munmap(xsrc->esb_mmap, esb_len);
|
||||
xsrc->esb_mmap = NULL;
|
||||
}
|
||||
|
||||
sysbus_mmio_unmap(SYS_BUS_DEVICE(xive), 1);
|
||||
|
||||
sysbus_mmio_unmap(SYS_BUS_DEVICE(xive), 2);
|
||||
munmap(xive->tm_mmap, 4ull << TM_SHIFT);
|
||||
if (xive->tm_mmap) {
|
||||
memory_region_del_subregion(&xive->tm_mmio, &xive->tm_mmio_kvm);
|
||||
object_unparent(OBJECT(&xive->tm_mmio_kvm));
|
||||
munmap(xive->tm_mmap, 4ull << TM_SHIFT);
|
||||
xive->tm_mmap = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* When the KVM device fd is closed, the KVM device is destroyed
|
||||
* and removed from the list of devices of the VM. The VCPU
|
||||
* presenters are also detached from the device.
|
||||
*/
|
||||
close(xive->fd);
|
||||
xive->fd = -1;
|
||||
if (xive->fd != -1) {
|
||||
close(xive->fd);
|
||||
xive->fd = -1;
|
||||
}
|
||||
|
||||
kvm_kernel_irqchip = false;
|
||||
kvm_msi_via_irqfd_allowed = false;
|
||||
|
@ -819,5 +829,8 @@ void kvmppc_xive_disconnect(SpaprXive *xive, Error **errp)
|
|||
kvm_cpu_disable_all();
|
||||
|
||||
/* VM Change state handler is not needed anymore */
|
||||
qemu_del_vm_change_state_handler(xive->change);
|
||||
if (xive->change) {
|
||||
qemu_del_vm_change_state_handler(xive->change);
|
||||
xive->change = NULL;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -267,7 +267,14 @@ static int icp_post_load(void *opaque, int version_id)
|
|||
ICPState *icp = opaque;
|
||||
|
||||
if (kvm_irqchip_in_kernel()) {
|
||||
return icp_set_kvm_state(icp);
|
||||
Error *local_err = NULL;
|
||||
int ret;
|
||||
|
||||
ret = icp_set_kvm_state(icp, &local_err);
|
||||
if (ret < 0) {
|
||||
error_report_err(local_err);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -300,7 +307,12 @@ static void icp_reset_handler(void *dev)
|
|||
qemu_set_irq(icp->output, 0);
|
||||
|
||||
if (kvm_irqchip_in_kernel()) {
|
||||
icp_set_kvm_state(ICP(dev));
|
||||
Error *local_err = NULL;
|
||||
|
||||
icp_set_kvm_state(ICP(dev), &local_err);
|
||||
if (local_err) {
|
||||
error_report_err(local_err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -351,6 +363,7 @@ static void icp_realize(DeviceState *dev, Error **errp)
|
|||
return;
|
||||
}
|
||||
|
||||
/* Connect the presenter to the VCPU (required for CPU hotplug) */
|
||||
if (kvm_irqchip_in_kernel()) {
|
||||
icp_kvm_realize(dev, &err);
|
||||
if (err) {
|
||||
|
@ -563,7 +576,12 @@ static void ics_simple_reset(DeviceState *dev)
|
|||
icsc->parent_reset(dev);
|
||||
|
||||
if (kvm_irqchip_in_kernel()) {
|
||||
ics_set_kvm_state(ICS_BASE(dev));
|
||||
Error *local_err = NULL;
|
||||
|
||||
ics_set_kvm_state(ICS_BASE(dev), &local_err);
|
||||
if (local_err) {
|
||||
error_report_err(local_err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -679,7 +697,14 @@ static int ics_base_post_load(void *opaque, int version_id)
|
|||
ICSState *ics = opaque;
|
||||
|
||||
if (kvm_irqchip_in_kernel()) {
|
||||
return ics_set_kvm_state(ics);
|
||||
Error *local_err = NULL;
|
||||
int ret;
|
||||
|
||||
ret = ics_set_kvm_state(ics, &local_err);
|
||||
if (ret < 0) {
|
||||
error_report_err(local_err);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -765,8 +790,13 @@ void ics_set_irq_type(ICSState *ics, int srcno, bool lsi)
|
|||
lsi ? XICS_FLAGS_IRQ_LSI : XICS_FLAGS_IRQ_MSI;
|
||||
|
||||
if (kvm_irqchip_in_kernel()) {
|
||||
Error *local_err = NULL;
|
||||
|
||||
ics_reset_irq(ics->irqs + srcno);
|
||||
ics_set_kvm_state_one(ics, srcno);
|
||||
ics_set_kvm_state_one(ics, srcno, &local_err);
|
||||
if (local_err) {
|
||||
error_report_err(local_err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -106,7 +106,7 @@ void icp_synchronize_state(ICPState *icp)
|
|||
}
|
||||
}
|
||||
|
||||
int icp_set_kvm_state(ICPState *icp)
|
||||
int icp_set_kvm_state(ICPState *icp, Error **errp)
|
||||
{
|
||||
uint64_t state;
|
||||
int ret;
|
||||
|
@ -126,10 +126,11 @@ int icp_set_kvm_state(ICPState *icp)
|
|||
| ((uint64_t)icp->pending_priority << KVM_REG_PPC_ICP_PPRI_SHIFT);
|
||||
|
||||
ret = kvm_set_one_reg(icp->cs, KVM_REG_PPC_ICP_STATE, &state);
|
||||
if (ret != 0) {
|
||||
error_report("Unable to restore KVM interrupt controller state (0x%"
|
||||
PRIx64 ") for CPU %ld: %s", state, kvm_arch_vcpu_id(icp->cs),
|
||||
strerror(errno));
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret,
|
||||
"Unable to restore KVM interrupt controller state (0x%"
|
||||
PRIx64 ") for CPU %ld", state,
|
||||
kvm_arch_vcpu_id(icp->cs));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -240,10 +241,9 @@ void ics_synchronize_state(ICSState *ics)
|
|||
ics_get_kvm_state(ics);
|
||||
}
|
||||
|
||||
int ics_set_kvm_state_one(ICSState *ics, int srcno)
|
||||
int ics_set_kvm_state_one(ICSState *ics, int srcno, Error **errp)
|
||||
{
|
||||
uint64_t state;
|
||||
Error *local_err = NULL;
|
||||
ICSIRQState *irq = &ics->irqs[srcno];
|
||||
int ret;
|
||||
|
||||
|
@ -278,16 +278,15 @@ int ics_set_kvm_state_one(ICSState *ics, int srcno)
|
|||
}
|
||||
|
||||
ret = kvm_device_access(kernel_xics_fd, KVM_DEV_XICS_GRP_SOURCES,
|
||||
srcno + ics->offset, &state, true, &local_err);
|
||||
if (local_err) {
|
||||
error_report_err(local_err);
|
||||
srcno + ics->offset, &state, true, errp);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ics_set_kvm_state(ICSState *ics)
|
||||
int ics_set_kvm_state(ICSState *ics, Error **errp)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -297,10 +296,12 @@ int ics_set_kvm_state(ICSState *ics)
|
|||
}
|
||||
|
||||
for (i = 0; i < ics->nr_irqs; i++) {
|
||||
Error *local_err = NULL;
|
||||
int ret;
|
||||
|
||||
ret = ics_set_kvm_state_one(ics, i);
|
||||
if (ret) {
|
||||
ret = ics_set_kvm_state_one(ics, i, &local_err);
|
||||
if (ret < 0) {
|
||||
error_propagate(errp, local_err);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
@ -331,16 +332,7 @@ void ics_kvm_set_irq(ICSState *ics, int srcno, int val)
|
|||
}
|
||||
}
|
||||
|
||||
static void rtas_dummy(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
||||
uint32_t token,
|
||||
uint32_t nargs, target_ulong args,
|
||||
uint32_t nret, target_ulong rets)
|
||||
{
|
||||
error_report("pseries: %s must never be called for in-kernel XICS",
|
||||
__func__);
|
||||
}
|
||||
|
||||
int xics_kvm_init(SpaprMachineState *spapr, Error **errp)
|
||||
int xics_kvm_connect(SpaprMachineState *spapr, Error **errp)
|
||||
{
|
||||
int rc;
|
||||
CPUState *cs;
|
||||
|
@ -357,42 +349,41 @@ int xics_kvm_init(SpaprMachineState *spapr, Error **errp)
|
|||
if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_IRQ_XICS)) {
|
||||
error_setg(errp,
|
||||
"KVM and IRQ_XICS capability must be present for in-kernel XICS");
|
||||
goto fail;
|
||||
return -1;
|
||||
}
|
||||
|
||||
spapr_rtas_register(RTAS_IBM_SET_XIVE, "ibm,set-xive", rtas_dummy);
|
||||
spapr_rtas_register(RTAS_IBM_GET_XIVE, "ibm,get-xive", rtas_dummy);
|
||||
spapr_rtas_register(RTAS_IBM_INT_OFF, "ibm,int-off", rtas_dummy);
|
||||
spapr_rtas_register(RTAS_IBM_INT_ON, "ibm,int-on", rtas_dummy);
|
||||
|
||||
rc = kvmppc_define_rtas_kernel_token(RTAS_IBM_SET_XIVE, "ibm,set-xive");
|
||||
if (rc < 0) {
|
||||
error_setg(errp, "kvmppc_define_rtas_kernel_token: ibm,set-xive");
|
||||
error_setg_errno(&local_err, -rc,
|
||||
"kvmppc_define_rtas_kernel_token: ibm,set-xive");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
rc = kvmppc_define_rtas_kernel_token(RTAS_IBM_GET_XIVE, "ibm,get-xive");
|
||||
if (rc < 0) {
|
||||
error_setg(errp, "kvmppc_define_rtas_kernel_token: ibm,get-xive");
|
||||
error_setg_errno(&local_err, -rc,
|
||||
"kvmppc_define_rtas_kernel_token: ibm,get-xive");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
rc = kvmppc_define_rtas_kernel_token(RTAS_IBM_INT_ON, "ibm,int-on");
|
||||
if (rc < 0) {
|
||||
error_setg(errp, "kvmppc_define_rtas_kernel_token: ibm,int-on");
|
||||
error_setg_errno(&local_err, -rc,
|
||||
"kvmppc_define_rtas_kernel_token: ibm,int-on");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
rc = kvmppc_define_rtas_kernel_token(RTAS_IBM_INT_OFF, "ibm,int-off");
|
||||
if (rc < 0) {
|
||||
error_setg(errp, "kvmppc_define_rtas_kernel_token: ibm,int-off");
|
||||
error_setg_errno(&local_err, -rc,
|
||||
"kvmppc_define_rtas_kernel_token: ibm,int-off");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Create the KVM XICS device */
|
||||
rc = kvm_create_device(kvm_state, KVM_DEV_TYPE_XICS, false);
|
||||
if (rc < 0) {
|
||||
error_setg_errno(errp, -rc, "Error on KVM_CREATE_DEVICE for XICS");
|
||||
error_setg_errno(&local_err, -rc, "Error on KVM_CREATE_DEVICE for XICS");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
@ -407,27 +398,30 @@ int xics_kvm_init(SpaprMachineState *spapr, Error **errp)
|
|||
|
||||
icp_kvm_realize(DEVICE(spapr_cpu_state(cpu)->icp), &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
/* Update the KVM sources */
|
||||
ics_set_kvm_state(spapr->ics);
|
||||
ics_set_kvm_state(spapr->ics, &local_err);
|
||||
if (local_err) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Connect the presenters to the initial VCPUs of the machine */
|
||||
CPU_FOREACH(cs) {
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
icp_set_kvm_state(spapr_cpu_state(cpu)->icp);
|
||||
icp_set_kvm_state(spapr_cpu_state(cpu)->icp, &local_err);
|
||||
if (local_err) {
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
kvmppc_define_rtas_kernel_token(0, "ibm,set-xive");
|
||||
kvmppc_define_rtas_kernel_token(0, "ibm,get-xive");
|
||||
kvmppc_define_rtas_kernel_token(0, "ibm,int-on");
|
||||
kvmppc_define_rtas_kernel_token(0, "ibm,int-off");
|
||||
error_propagate(errp, local_err);
|
||||
xics_kvm_disconnect(spapr, NULL);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -451,13 +445,10 @@ void xics_kvm_disconnect(SpaprMachineState *spapr, Error **errp)
|
|||
* removed from the list of devices of the VM. The VCPU presenters
|
||||
* are also detached from the device.
|
||||
*/
|
||||
close(kernel_xics_fd);
|
||||
kernel_xics_fd = -1;
|
||||
|
||||
spapr_rtas_unregister(RTAS_IBM_SET_XIVE);
|
||||
spapr_rtas_unregister(RTAS_IBM_GET_XIVE);
|
||||
spapr_rtas_unregister(RTAS_IBM_INT_OFF);
|
||||
spapr_rtas_unregister(RTAS_IBM_INT_ON);
|
||||
if (kernel_xics_fd != -1) {
|
||||
close(kernel_xics_fd);
|
||||
kernel_xics_fd = -1;
|
||||
}
|
||||
|
||||
kvmppc_define_rtas_kernel_token(0, "ibm,set-xive");
|
||||
kvmppc_define_rtas_kernel_token(0, "ibm,get-xive");
|
||||
|
@ -471,3 +462,33 @@ void xics_kvm_disconnect(SpaprMachineState *spapr, Error **errp)
|
|||
/* Clear the presenter from the VCPUs */
|
||||
kvm_disable_icps();
|
||||
}
|
||||
|
||||
/*
|
||||
* This is a heuristic to detect older KVMs on POWER9 hosts that don't
|
||||
* support destruction of a KVM XICS device while the VM is running.
|
||||
* Required to start a spapr machine with ic-mode=dual,kernel-irqchip=on.
|
||||
*/
|
||||
bool xics_kvm_has_broken_disconnect(SpaprMachineState *spapr)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = kvm_create_device(kvm_state, KVM_DEV_TYPE_XICS, false);
|
||||
if (rc < 0) {
|
||||
/*
|
||||
* The error is ignored on purpose. The KVM XICS setup code
|
||||
* will catch it again anyway. The goal here is to see if
|
||||
* close() actually destroys the device or not.
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
|
||||
close(rc);
|
||||
|
||||
rc = kvm_create_device(kvm_state, KVM_DEV_TYPE_XICS, false);
|
||||
if (rc >= 0) {
|
||||
close(rc);
|
||||
return false;
|
||||
}
|
||||
|
||||
return errno == EEXIST;
|
||||
}
|
||||
|
|
|
@ -41,11 +41,32 @@
|
|||
* Guest interfaces
|
||||
*/
|
||||
|
||||
static bool check_emulated_xics(SpaprMachineState *spapr, const char *func)
|
||||
{
|
||||
if (spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT) ||
|
||||
kvm_irqchip_in_kernel()) {
|
||||
error_report("pseries: %s must only be called for emulated XICS",
|
||||
func);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#define CHECK_EMULATED_XICS_HCALL(spapr) \
|
||||
do { \
|
||||
if (!check_emulated_xics((spapr), __func__)) { \
|
||||
return H_HARDWARE; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
static target_ulong h_cppr(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
||||
target_ulong opcode, target_ulong *args)
|
||||
{
|
||||
target_ulong cppr = args[0];
|
||||
|
||||
CHECK_EMULATED_XICS_HCALL(spapr);
|
||||
|
||||
icp_set_cppr(spapr_cpu_state(cpu)->icp, cppr);
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
@ -56,6 +77,8 @@ static target_ulong h_ipi(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
|||
target_ulong mfrr = args[1];
|
||||
ICPState *icp = xics_icp_get(XICS_FABRIC(spapr), args[0]);
|
||||
|
||||
CHECK_EMULATED_XICS_HCALL(spapr);
|
||||
|
||||
if (!icp) {
|
||||
return H_PARAMETER;
|
||||
}
|
||||
|
@ -69,6 +92,8 @@ static target_ulong h_xirr(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
|||
{
|
||||
uint32_t xirr = icp_accept(spapr_cpu_state(cpu)->icp);
|
||||
|
||||
CHECK_EMULATED_XICS_HCALL(spapr);
|
||||
|
||||
args[0] = xirr;
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
@ -78,6 +103,8 @@ static target_ulong h_xirr_x(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
|||
{
|
||||
uint32_t xirr = icp_accept(spapr_cpu_state(cpu)->icp);
|
||||
|
||||
CHECK_EMULATED_XICS_HCALL(spapr);
|
||||
|
||||
args[0] = xirr;
|
||||
args[1] = cpu_get_host_ticks();
|
||||
return H_SUCCESS;
|
||||
|
@ -88,6 +115,8 @@ static target_ulong h_eoi(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
|||
{
|
||||
target_ulong xirr = args[0];
|
||||
|
||||
CHECK_EMULATED_XICS_HCALL(spapr);
|
||||
|
||||
icp_eoi(spapr_cpu_state(cpu)->icp, xirr);
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
@ -99,6 +128,8 @@ static target_ulong h_ipoll(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
|||
uint32_t mfrr;
|
||||
uint32_t xirr;
|
||||
|
||||
CHECK_EMULATED_XICS_HCALL(spapr);
|
||||
|
||||
if (!icp) {
|
||||
return H_PARAMETER;
|
||||
}
|
||||
|
@ -111,6 +142,14 @@ static target_ulong h_ipoll(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
|||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
#define CHECK_EMULATED_XICS_RTAS(spapr, rets) \
|
||||
do { \
|
||||
if (!check_emulated_xics((spapr), __func__)) { \
|
||||
rtas_st((rets), 0, RTAS_OUT_HW_ERROR); \
|
||||
return; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
static void rtas_set_xive(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
||||
uint32_t token,
|
||||
uint32_t nargs, target_ulong args,
|
||||
|
@ -119,6 +158,8 @@ static void rtas_set_xive(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
|||
ICSState *ics = spapr->ics;
|
||||
uint32_t nr, srcno, server, priority;
|
||||
|
||||
CHECK_EMULATED_XICS_RTAS(spapr, rets);
|
||||
|
||||
if ((nargs != 3) || (nret != 1)) {
|
||||
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
|
||||
return;
|
||||
|
@ -152,6 +193,8 @@ static void rtas_get_xive(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
|||
ICSState *ics = spapr->ics;
|
||||
uint32_t nr, srcno;
|
||||
|
||||
CHECK_EMULATED_XICS_RTAS(spapr, rets);
|
||||
|
||||
if ((nargs != 1) || (nret != 3)) {
|
||||
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
|
||||
return;
|
||||
|
@ -182,6 +225,8 @@ static void rtas_int_off(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
|||
ICSState *ics = spapr->ics;
|
||||
uint32_t nr, srcno;
|
||||
|
||||
CHECK_EMULATED_XICS_RTAS(spapr, rets);
|
||||
|
||||
if ((nargs != 1) || (nret != 1)) {
|
||||
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
|
||||
return;
|
||||
|
@ -213,6 +258,8 @@ static void rtas_int_on(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
|||
ICSState *ics = spapr->ics;
|
||||
uint32_t nr, srcno;
|
||||
|
||||
CHECK_EMULATED_XICS_RTAS(spapr, rets);
|
||||
|
||||
if ((nargs != 1) || (nret != 1)) {
|
||||
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
|
||||
return;
|
||||
|
@ -239,14 +286,6 @@ static void rtas_int_on(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
|||
|
||||
void xics_spapr_init(SpaprMachineState *spapr)
|
||||
{
|
||||
/* Emulated mode can only be initialized once. */
|
||||
if (spapr->ics->init) {
|
||||
return;
|
||||
}
|
||||
|
||||
spapr->ics->init = true;
|
||||
|
||||
/* Registration of global state belongs into realize */
|
||||
spapr_rtas_register(RTAS_IBM_SET_XIVE, "ibm,set-xive", rtas_set_xive);
|
||||
spapr_rtas_register(RTAS_IBM_GET_XIVE, "ibm,get-xive", rtas_get_xive);
|
||||
spapr_rtas_register(RTAS_IBM_INT_OFF, "ibm,int-off", rtas_int_off);
|
||||
|
|
|
@ -132,6 +132,11 @@ static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
|
|||
xive_tctx_notify(tctx, ring);
|
||||
}
|
||||
|
||||
static inline uint32_t xive_tctx_word2(uint8_t *ring)
|
||||
{
|
||||
return *((uint32_t *) &ring[TM_WORD2]);
|
||||
}
|
||||
|
||||
/*
|
||||
* XIVE Thread Interrupt Management Area (TIMA)
|
||||
*/
|
||||
|
@ -150,11 +155,12 @@ static uint64_t xive_tm_ack_hv_reg(XiveTCTX *tctx, hwaddr offset, unsigned size)
|
|||
static uint64_t xive_tm_pull_pool_ctx(XiveTCTX *tctx, hwaddr offset,
|
||||
unsigned size)
|
||||
{
|
||||
uint64_t ret;
|
||||
uint32_t qw2w2_prev = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
|
||||
uint32_t qw2w2;
|
||||
|
||||
ret = tctx->regs[TM_QW2_HV_POOL + TM_WORD2] & TM_QW2W2_POOL_CAM;
|
||||
tctx->regs[TM_QW2_HV_POOL + TM_WORD2] &= ~TM_QW2W2_POOL_CAM;
|
||||
return ret;
|
||||
qw2w2 = xive_set_field32(TM_QW2W2_VP, qw2w2_prev, 0);
|
||||
memcpy(&tctx->regs[TM_QW2_HV_POOL + TM_WORD2], &qw2w2, 4);
|
||||
return qw2w2;
|
||||
}
|
||||
|
||||
static void xive_tm_vt_push(XiveTCTX *tctx, hwaddr offset,
|
||||
|
@ -182,31 +188,31 @@ static uint64_t xive_tm_vt_poll(XiveTCTX *tctx, hwaddr offset, unsigned size)
|
|||
*/
|
||||
|
||||
static const uint8_t xive_tm_hw_view[] = {
|
||||
/* QW-0 User */ 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
|
||||
/* QW-1 OS */ 3, 3, 3, 3, 3, 3, 0, 3, 3, 3, 3, 3, 0, 0, 0, 0,
|
||||
/* QW-2 POOL */ 0, 0, 3, 3, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
|
||||
/* QW-3 PHYS */ 3, 3, 3, 3, 0, 3, 0, 3, 3, 0, 0, 3, 3, 3, 3, 0,
|
||||
3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
|
||||
3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */
|
||||
0, 0, 3, 3, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */
|
||||
3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 3, 3, 3, 0, /* QW-3 PHYS */
|
||||
};
|
||||
|
||||
static const uint8_t xive_tm_hv_view[] = {
|
||||
/* QW-0 User */ 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
|
||||
/* QW-1 OS */ 3, 3, 3, 3, 3, 3, 0, 3, 3, 3, 3, 3, 0, 0, 0, 0,
|
||||
/* QW-2 POOL */ 0, 0, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0,
|
||||
/* QW-3 PHYS */ 3, 3, 3, 3, 0, 3, 0, 3, 3, 0, 0, 3, 0, 0, 0, 0,
|
||||
3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
|
||||
3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */
|
||||
0, 0, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */
|
||||
3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 0, 0, 0, 0, /* QW-3 PHYS */
|
||||
};
|
||||
|
||||
static const uint8_t xive_tm_os_view[] = {
|
||||
/* QW-0 User */ 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
|
||||
/* QW-1 OS */ 2, 3, 2, 2, 2, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
/* QW-2 POOL */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
/* QW-3 PHYS */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
|
||||
2, 3, 2, 2, 2, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */
|
||||
};
|
||||
|
||||
static const uint8_t xive_tm_user_view[] = {
|
||||
/* QW-0 User */ 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
/* QW-1 OS */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
/* QW-2 POOL */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
/* QW-3 PHYS */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-0 User */
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -484,11 +490,6 @@ const MemoryRegionOps xive_tm_ops = {
|
|||
},
|
||||
};
|
||||
|
||||
static inline uint32_t xive_tctx_word2(uint8_t *ring)
|
||||
{
|
||||
return *((uint32_t *) &ring[TM_WORD2]);
|
||||
}
|
||||
|
||||
static char *xive_tctx_ring_print(uint8_t *ring)
|
||||
{
|
||||
uint32_t w2 = xive_tctx_word2(ring);
|
||||
|
@ -1229,27 +1230,16 @@ XiveTCTX *xive_router_get_tctx(XiveRouter *xrtr, CPUState *cs)
|
|||
}
|
||||
|
||||
/*
|
||||
* By default on P9, the HW CAM line (23bits) is hardwired to :
|
||||
* Encode the HW CAM line in the block group mode format :
|
||||
*
|
||||
* 0x000||0b1||4Bit chip number||7Bit Thread number.
|
||||
*
|
||||
* When the block grouping is enabled, the CAM line is changed to :
|
||||
*
|
||||
* 4Bit chip number||0x001||7Bit Thread number.
|
||||
* chip << 19 | 0000000 0 0001 thread (7Bit)
|
||||
*/
|
||||
static uint32_t hw_cam_line(uint8_t chip_id, uint8_t tid)
|
||||
{
|
||||
return 1 << 11 | (chip_id & 0xf) << 7 | (tid & 0x7f);
|
||||
}
|
||||
|
||||
static bool xive_presenter_tctx_match_hw(XiveTCTX *tctx,
|
||||
uint8_t nvt_blk, uint32_t nvt_idx)
|
||||
static uint32_t xive_tctx_hw_cam_line(XiveTCTX *tctx)
|
||||
{
|
||||
CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
|
||||
uint32_t pir = env->spr_cb[SPR_PIR].default_value;
|
||||
|
||||
return hw_cam_line((pir >> 8) & 0xf, pir & 0x7f) ==
|
||||
hw_cam_line(nvt_blk, nvt_idx);
|
||||
return xive_nvt_cam_line((pir >> 8) & 0xf, 1 << 7 | (pir & 0x7f));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1285,7 +1275,7 @@ static int xive_presenter_tctx_match(XiveTCTX *tctx, uint8_t format,
|
|||
|
||||
/* PHYS ring */
|
||||
if ((be32_to_cpu(qw3w2) & TM_QW3W2_VT) &&
|
||||
xive_presenter_tctx_match_hw(tctx, nvt_blk, nvt_idx)) {
|
||||
cam == xive_tctx_hw_cam_line(tctx)) {
|
||||
return TM_QW3_HV_PHYS;
|
||||
}
|
||||
|
||||
|
|
|
@ -437,13 +437,11 @@ static void ppc_core99_init(MachineState *machine)
|
|||
}
|
||||
|
||||
/* The NewWorld NVRAM is not located in the MacIO device */
|
||||
#ifdef CONFIG_KVM
|
||||
if (kvm_enabled() && getpagesize() > 4096) {
|
||||
/* We can't combine read-write and read-only in a single page, so
|
||||
move the NVRAM out of ROM again for KVM */
|
||||
nvram_addr = 0xFFE00000;
|
||||
}
|
||||
#endif
|
||||
dev = qdev_create(NULL, TYPE_MACIO_NVRAM);
|
||||
qdev_prop_set_uint32(dev, "size", 0x2000);
|
||||
qdev_prop_set_uint32(dev, "it_shift", 1);
|
||||
|
@ -488,14 +486,12 @@ static void ppc_core99_init(MachineState *machine)
|
|||
|
||||
fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_IS_KVM, kvm_enabled());
|
||||
if (kvm_enabled()) {
|
||||
#ifdef CONFIG_KVM
|
||||
uint8_t *hypercall;
|
||||
|
||||
hypercall = g_malloc(16);
|
||||
kvmppc_get_hypercall(env, hypercall, 16);
|
||||
fw_cfg_add_bytes(fw_cfg, FW_CFG_PPC_KVM_HC, hypercall, 16);
|
||||
fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_KVM_PID, getpid());
|
||||
#endif
|
||||
}
|
||||
fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_TBFREQ, tbfreq);
|
||||
/* Mac OS X requires a "known good" clock-frequency value; pass it one. */
|
||||
|
|
|
@ -345,14 +345,12 @@ static void ppc_heathrow_init(MachineState *machine)
|
|||
|
||||
fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_IS_KVM, kvm_enabled());
|
||||
if (kvm_enabled()) {
|
||||
#ifdef CONFIG_KVM
|
||||
uint8_t *hypercall;
|
||||
|
||||
hypercall = g_malloc(16);
|
||||
kvmppc_get_hypercall(env, hypercall, 16);
|
||||
fw_cfg_add_bytes(fw_cfg, FW_CFG_PPC_KVM_HC, hypercall, 16);
|
||||
fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_KVM_PID, getpid());
|
||||
#endif
|
||||
}
|
||||
fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_TBFREQ, tbfreq);
|
||||
/* Mac OS X requires a "known good" clock-frequency value; pass it one. */
|
||||
|
|
34
hw/ppc/pnv.c
34
hw/ppc/pnv.c
|
@ -860,6 +860,14 @@ static void pnv_chip_power8_realize(DeviceState *dev, Error **errp)
|
|||
Pnv8Psi *psi8 = &chip8->psi;
|
||||
Error *local_err = NULL;
|
||||
|
||||
/* XSCOM bridge is first */
|
||||
pnv_xscom_realize(chip, PNV_XSCOM_SIZE, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
}
|
||||
sysbus_mmio_map(SYS_BUS_DEVICE(chip), 0, PNV_XSCOM_BASE(chip));
|
||||
|
||||
pcc->parent_realize(dev, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
|
@ -916,7 +924,6 @@ static void pnv_chip_power8e_class_init(ObjectClass *klass, void *data)
|
|||
k->isa_create = pnv_chip_power8_isa_create;
|
||||
k->dt_populate = pnv_chip_power8_dt_populate;
|
||||
k->pic_print_info = pnv_chip_power8_pic_print_info;
|
||||
k->xscom_base = 0x003fc0000000000ull;
|
||||
dc->desc = "PowerNV Chip POWER8E";
|
||||
|
||||
device_class_set_parent_realize(dc, pnv_chip_power8_realize,
|
||||
|
@ -936,7 +943,6 @@ static void pnv_chip_power8_class_init(ObjectClass *klass, void *data)
|
|||
k->isa_create = pnv_chip_power8_isa_create;
|
||||
k->dt_populate = pnv_chip_power8_dt_populate;
|
||||
k->pic_print_info = pnv_chip_power8_pic_print_info;
|
||||
k->xscom_base = 0x003fc0000000000ull;
|
||||
dc->desc = "PowerNV Chip POWER8";
|
||||
|
||||
device_class_set_parent_realize(dc, pnv_chip_power8_realize,
|
||||
|
@ -956,7 +962,6 @@ static void pnv_chip_power8nvl_class_init(ObjectClass *klass, void *data)
|
|||
k->isa_create = pnv_chip_power8nvl_isa_create;
|
||||
k->dt_populate = pnv_chip_power8_dt_populate;
|
||||
k->pic_print_info = pnv_chip_power8_pic_print_info;
|
||||
k->xscom_base = 0x003fc0000000000ull;
|
||||
dc->desc = "PowerNV Chip POWER8NVL";
|
||||
|
||||
device_class_set_parent_realize(dc, pnv_chip_power8_realize,
|
||||
|
@ -1024,6 +1029,14 @@ static void pnv_chip_power9_realize(DeviceState *dev, Error **errp)
|
|||
Pnv9Psi *psi9 = &chip9->psi;
|
||||
Error *local_err = NULL;
|
||||
|
||||
/* XSCOM bridge is first */
|
||||
pnv_xscom_realize(chip, PNV9_XSCOM_SIZE, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
}
|
||||
sysbus_mmio_map(SYS_BUS_DEVICE(chip), 0, PNV9_XSCOM_BASE(chip));
|
||||
|
||||
pcc->parent_realize(dev, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
|
@ -1099,7 +1112,6 @@ static void pnv_chip_power9_class_init(ObjectClass *klass, void *data)
|
|||
k->isa_create = pnv_chip_power9_isa_create;
|
||||
k->dt_populate = pnv_chip_power9_dt_populate;
|
||||
k->pic_print_info = pnv_chip_power9_pic_print_info;
|
||||
k->xscom_base = 0x00603fc00000000ull;
|
||||
dc->desc = "PowerNV Chip POWER9";
|
||||
|
||||
device_class_set_parent_realize(dc, pnv_chip_power9_realize,
|
||||
|
@ -1136,11 +1148,6 @@ static void pnv_chip_core_sanitize(PnvChip *chip, Error **errp)
|
|||
}
|
||||
}
|
||||
|
||||
static void pnv_chip_instance_init(Object *obj)
|
||||
{
|
||||
PNV_CHIP(obj)->xscom_base = PNV_CHIP_GET_CLASS(obj)->xscom_base;
|
||||
}
|
||||
|
||||
static void pnv_chip_core_realize(PnvChip *chip, Error **errp)
|
||||
{
|
||||
Error *error = NULL;
|
||||
|
@ -1206,14 +1213,6 @@ static void pnv_chip_realize(DeviceState *dev, Error **errp)
|
|||
PnvChip *chip = PNV_CHIP(dev);
|
||||
Error *error = NULL;
|
||||
|
||||
/* XSCOM bridge */
|
||||
pnv_xscom_realize(chip, &error);
|
||||
if (error) {
|
||||
error_propagate(errp, error);
|
||||
return;
|
||||
}
|
||||
sysbus_mmio_map(SYS_BUS_DEVICE(chip), 0, PNV_XSCOM_BASE(chip));
|
||||
|
||||
/* Cores */
|
||||
pnv_chip_core_realize(chip, &error);
|
||||
if (error) {
|
||||
|
@ -1398,7 +1397,6 @@ static const TypeInfo types[] = {
|
|||
.name = TYPE_PNV_CHIP,
|
||||
.parent = TYPE_SYS_BUS_DEVICE,
|
||||
.class_init = pnv_chip_class_init,
|
||||
.instance_init = pnv_chip_instance_init,
|
||||
.instance_size = sizeof(PnvChip),
|
||||
.class_size = sizeof(PnvChipClass),
|
||||
.abstract = true,
|
||||
|
|
|
@ -213,17 +213,17 @@ const MemoryRegionOps pnv_xscom_ops = {
|
|||
.endianness = DEVICE_BIG_ENDIAN,
|
||||
};
|
||||
|
||||
void pnv_xscom_realize(PnvChip *chip, Error **errp)
|
||||
void pnv_xscom_realize(PnvChip *chip, uint64_t size, Error **errp)
|
||||
{
|
||||
SysBusDevice *sbd = SYS_BUS_DEVICE(chip);
|
||||
char *name;
|
||||
|
||||
name = g_strdup_printf("xscom-%x", chip->chip_id);
|
||||
memory_region_init_io(&chip->xscom_mmio, OBJECT(chip), &pnv_xscom_ops,
|
||||
chip, name, PNV_XSCOM_SIZE);
|
||||
chip, name, size);
|
||||
sysbus_init_mmio(sbd, &chip->xscom_mmio);
|
||||
|
||||
memory_region_init(&chip->xscom, OBJECT(chip), name, PNV_XSCOM_SIZE);
|
||||
memory_region_init(&chip->xscom, OBJECT(chip), name, size);
|
||||
address_space_init(&chip->xscom_as, &chip->xscom, name);
|
||||
g_free(name);
|
||||
}
|
||||
|
@ -265,12 +265,19 @@ static const char compat_p9[] = "ibm,power9-xscom\0ibm,xscom";
|
|||
|
||||
int pnv_dt_xscom(PnvChip *chip, void *fdt, int root_offset)
|
||||
{
|
||||
uint64_t reg[] = { cpu_to_be64(PNV_XSCOM_BASE(chip)),
|
||||
cpu_to_be64(PNV_XSCOM_SIZE) };
|
||||
uint64_t reg[2];
|
||||
int xscom_offset;
|
||||
ForeachPopulateArgs args;
|
||||
char *name;
|
||||
|
||||
if (pnv_chip_is_power9(chip)) {
|
||||
reg[0] = cpu_to_be64(PNV9_XSCOM_BASE(chip));
|
||||
reg[1] = cpu_to_be64(PNV9_XSCOM_SIZE);
|
||||
} else {
|
||||
reg[0] = cpu_to_be64(PNV_XSCOM_BASE(chip));
|
||||
reg[1] = cpu_to_be64(PNV_XSCOM_SIZE);
|
||||
}
|
||||
|
||||
name = g_strdup_printf("xscom@%" PRIx64, be64_to_cpu(reg[0]));
|
||||
xscom_offset = fdt_add_subnode(fdt, root_offset, name);
|
||||
_FDT(xscom_offset);
|
||||
|
|
|
@ -80,9 +80,7 @@ void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level)
|
|||
}
|
||||
|
||||
if (old_pending != env->pending_interrupts) {
|
||||
#ifdef CONFIG_KVM
|
||||
kvmppc_set_interrupt(cpu, n_IRQ, level);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
@ -1036,10 +1034,7 @@ static void timebase_load(PPCTimebase *tb)
|
|||
CPU_FOREACH(cpu) {
|
||||
PowerPCCPU *pcpu = POWERPC_CPU(cpu);
|
||||
pcpu->env.tb_env->tb_offset = tb_off_adj;
|
||||
#if defined(CONFIG_KVM)
|
||||
kvm_set_one_reg(cpu, KVM_REG_PPC_TB_OFFSET,
|
||||
&pcpu->env.tb_env->tb_offset);
|
||||
#endif
|
||||
kvmppc_set_reg_tb_offset(pcpu, pcpu->env.tb_env->tb_offset);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -780,7 +780,6 @@ static void ibm_40p_init(MachineState *machine)
|
|||
|
||||
fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_IS_KVM, kvm_enabled());
|
||||
if (kvm_enabled()) {
|
||||
#ifdef CONFIG_KVM
|
||||
uint8_t *hypercall;
|
||||
|
||||
fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_TBFREQ, kvmppc_get_tbfreq());
|
||||
|
@ -788,7 +787,6 @@ static void ibm_40p_init(MachineState *machine)
|
|||
kvmppc_get_hypercall(env, hypercall, 16);
|
||||
fw_cfg_add_bytes(fw_cfg, FW_CFG_PPC_KVM_HC, hypercall, 16);
|
||||
fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_KVM_PID, getpid());
|
||||
#endif
|
||||
} else {
|
||||
fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_TBFREQ, NANOSECONDS_PER_SECOND);
|
||||
}
|
||||
|
|
|
@ -62,7 +62,7 @@ void spapr_irq_msi_reset(SpaprMachineState *spapr)
|
|||
bitmap_clear(spapr->irq_map, 0, spapr->irq_map_nr);
|
||||
}
|
||||
|
||||
static void spapr_irq_init_device(SpaprMachineState *spapr,
|
||||
static void spapr_irq_init_kvm(SpaprMachineState *spapr,
|
||||
SpaprIrq *irq, Error **errp)
|
||||
{
|
||||
MachineState *machine = MACHINE(spapr);
|
||||
|
@ -88,8 +88,6 @@ static void spapr_irq_init_device(SpaprMachineState *spapr,
|
|||
error_prepend(&local_err, "kernel_irqchip allowed but unavailable: ");
|
||||
warn_report_err(local_err);
|
||||
}
|
||||
|
||||
irq->init_emu(spapr, errp);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -114,6 +112,8 @@ static void spapr_irq_init_xics(SpaprMachineState *spapr, int nr_irqs,
|
|||
}
|
||||
|
||||
spapr->ics = ICS_BASE(obj);
|
||||
|
||||
xics_spapr_init(spapr);
|
||||
}
|
||||
|
||||
#define ICS_IRQ_FREE(ics, srcno) \
|
||||
|
@ -222,7 +222,7 @@ static void spapr_irq_reset_xics(SpaprMachineState *spapr, Error **errp)
|
|||
{
|
||||
Error *local_err = NULL;
|
||||
|
||||
spapr_irq_init_device(spapr, &spapr_irq_xics, &local_err);
|
||||
spapr_irq_init_kvm(spapr, &spapr_irq_xics, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
|
@ -234,15 +234,10 @@ static const char *spapr_irq_get_nodename_xics(SpaprMachineState *spapr)
|
|||
return XICS_NODENAME;
|
||||
}
|
||||
|
||||
static void spapr_irq_init_emu_xics(SpaprMachineState *spapr, Error **errp)
|
||||
{
|
||||
xics_spapr_init(spapr);
|
||||
}
|
||||
|
||||
static void spapr_irq_init_kvm_xics(SpaprMachineState *spapr, Error **errp)
|
||||
{
|
||||
if (kvm_enabled()) {
|
||||
xics_kvm_init(spapr, errp);
|
||||
xics_kvm_connect(spapr, errp);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -266,7 +261,6 @@ SpaprIrq spapr_irq_xics = {
|
|||
.reset = spapr_irq_reset_xics,
|
||||
.set_irq = spapr_irq_set_irq_xics,
|
||||
.get_nodename = spapr_irq_get_nodename_xics,
|
||||
.init_emu = spapr_irq_init_emu_xics,
|
||||
.init_kvm = spapr_irq_init_kvm_xics,
|
||||
};
|
||||
|
||||
|
@ -384,7 +378,7 @@ static void spapr_irq_reset_xive(SpaprMachineState *spapr, Error **errp)
|
|||
spapr_xive_set_tctx_os_cam(spapr_cpu_state(cpu)->tctx);
|
||||
}
|
||||
|
||||
spapr_irq_init_device(spapr, &spapr_irq_xive, &local_err);
|
||||
spapr_irq_init_kvm(spapr, &spapr_irq_xive, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
|
@ -410,11 +404,6 @@ static const char *spapr_irq_get_nodename_xive(SpaprMachineState *spapr)
|
|||
return spapr->xive->nodename;
|
||||
}
|
||||
|
||||
static void spapr_irq_init_emu_xive(SpaprMachineState *spapr, Error **errp)
|
||||
{
|
||||
spapr_xive_init(spapr->xive, errp);
|
||||
}
|
||||
|
||||
static void spapr_irq_init_kvm_xive(SpaprMachineState *spapr, Error **errp)
|
||||
{
|
||||
if (kvm_enabled()) {
|
||||
|
@ -446,7 +435,6 @@ SpaprIrq spapr_irq_xive = {
|
|||
.reset = spapr_irq_reset_xive,
|
||||
.set_irq = spapr_irq_set_irq_xive,
|
||||
.get_nodename = spapr_irq_get_nodename_xive,
|
||||
.init_emu = spapr_irq_init_emu_xive,
|
||||
.init_kvm = spapr_irq_init_kvm_xive,
|
||||
};
|
||||
|
||||
|
@ -624,7 +612,6 @@ SpaprIrq spapr_irq_dual = {
|
|||
.reset = spapr_irq_reset_dual,
|
||||
.set_irq = spapr_irq_set_irq_dual,
|
||||
.get_nodename = spapr_irq_get_nodename_dual,
|
||||
.init_emu = NULL, /* should not be used */
|
||||
.init_kvm = NULL, /* should not be used */
|
||||
};
|
||||
|
||||
|
@ -668,6 +655,19 @@ static void spapr_irq_check(SpaprMachineState *spapr, Error **errp)
|
|||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* On a POWER9 host, some older KVM XICS devices cannot be destroyed and
|
||||
* re-created. Detect that early to avoid QEMU to exit later when the
|
||||
* guest reboots.
|
||||
*/
|
||||
if (kvm_enabled() &&
|
||||
spapr->irq == &spapr_irq_dual &&
|
||||
machine_kernel_irqchip_required(machine) &&
|
||||
xics_kvm_has_broken_disconnect(spapr)) {
|
||||
error_setg(errp, "KVM is too old to support ic-mode=dual,kernel-irqchip=on");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -827,6 +827,5 @@ SpaprIrq spapr_irq_xics_legacy = {
|
|||
.reset = spapr_irq_reset_xics,
|
||||
.set_irq = spapr_irq_set_irq_xics,
|
||||
.get_nodename = spapr_irq_get_nodename_xics,
|
||||
.init_emu = spapr_irq_init_emu_xics,
|
||||
.init_kvm = spapr_irq_init_kvm_xics,
|
||||
};
|
||||
|
|
|
@ -1343,6 +1343,7 @@ static void spapr_dt_pci_device_cb(PCIBus *bus, PCIDevice *pdev,
|
|||
static int spapr_dt_pci_bus(SpaprPhbState *sphb, PCIBus *bus,
|
||||
void *fdt, int offset)
|
||||
{
|
||||
Object *owner;
|
||||
PciWalkFdt cbinfo = {
|
||||
.fdt = fdt,
|
||||
.offset = offset,
|
||||
|
@ -1356,15 +1357,20 @@ static int spapr_dt_pci_bus(SpaprPhbState *sphb, PCIBus *bus,
|
|||
_FDT(fdt_setprop_cell(fdt, offset, "#size-cells",
|
||||
RESOURCE_CELLS_SIZE));
|
||||
|
||||
if (bus) {
|
||||
pci_for_each_device_reverse(bus, pci_bus_num(bus),
|
||||
spapr_dt_pci_device_cb, &cbinfo);
|
||||
if (cbinfo.err) {
|
||||
return cbinfo.err;
|
||||
}
|
||||
assert(bus);
|
||||
pci_for_each_device_reverse(bus, pci_bus_num(bus),
|
||||
spapr_dt_pci_device_cb, &cbinfo);
|
||||
if (cbinfo.err) {
|
||||
return cbinfo.err;
|
||||
}
|
||||
|
||||
ret = spapr_dt_drc(fdt, offset, OBJECT(bus->parent_dev),
|
||||
if (pci_bus_is_root(bus)) {
|
||||
owner = OBJECT(sphb);
|
||||
} else {
|
||||
owner = OBJECT(pci_bridge_get_device(bus));
|
||||
}
|
||||
|
||||
ret = spapr_dt_drc(fdt, offset, owner,
|
||||
SPAPR_DR_CONNECTOR_TYPE_PCI);
|
||||
if (ret) {
|
||||
return ret;
|
||||
|
@ -1782,6 +1788,12 @@ static void spapr_phb_unrealize(DeviceState *dev, Error **errp)
|
|||
|
||||
memory_region_del_subregion(&sphb->iommu_root, &sphb->msiwindow);
|
||||
|
||||
/*
|
||||
* An attached PCI device may have memory listeners, eg. VFIO PCI. We have
|
||||
* unmapped all sections. Remove the listeners now, before destroying the
|
||||
* address space.
|
||||
*/
|
||||
address_space_remove_listeners(&sphb->iommu_as);
|
||||
address_space_destroy(&sphb->iommu_as);
|
||||
|
||||
qbus_set_hotplug_handler(BUS(phb->bus), NULL, &error_abort);
|
||||
|
@ -1945,11 +1957,9 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
|
|||
* For KVM we want to ensure that this memory is a full page so that
|
||||
* our memory slot is of page size granularity.
|
||||
*/
|
||||
#ifdef CONFIG_KVM
|
||||
if (kvm_enabled()) {
|
||||
msi_window_size = getpagesize();
|
||||
}
|
||||
#endif
|
||||
|
||||
memory_region_init_io(&sphb->msiwindow, OBJECT(sphb), &spapr_msi_ops, spapr,
|
||||
"msi", msi_window_size);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue