mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-07-27 04:13:53 -06:00
ppc/xive2: Add undelivered group interrupt to backlog
When a group interrupt cannot be delivered, we need to: - increment the backlog counter for the group in the NVG table (if the END is configured to keep a backlog). - start a broadcast operation to set the LSMFB field on matching CPUs which can't take the interrupt now because they're running at too high a priority. [npiggin: squash in fixes from milesg] [milesg: only load the NVP if the END is !ignore] [milesg: always broadcast backlog, not only when there are precluded VPs] Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com> Signed-off-by: Michael Kowal <kowal@linux.ibm.com> Reviewed-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
This commit is contained in:
parent
9cb7f6ebed
commit
58fa4433e0
5 changed files with 174 additions and 30 deletions
|
@ -705,6 +705,47 @@ static uint32_t pnv_xive2_presenter_get_config(XivePresenter *xptr)
|
||||||
return cfg;
|
return cfg;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int pnv_xive2_broadcast(XivePresenter *xptr,
|
||||||
|
uint8_t nvt_blk, uint32_t nvt_idx,
|
||||||
|
uint8_t priority)
|
||||||
|
{
|
||||||
|
PnvXive2 *xive = PNV_XIVE2(xptr);
|
||||||
|
PnvChip *chip = xive->chip;
|
||||||
|
int i, j;
|
||||||
|
bool gen1_tima_os =
|
||||||
|
xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS;
|
||||||
|
|
||||||
|
for (i = 0; i < chip->nr_cores; i++) {
|
||||||
|
PnvCore *pc = chip->cores[i];
|
||||||
|
CPUCore *cc = CPU_CORE(pc);
|
||||||
|
|
||||||
|
for (j = 0; j < cc->nr_threads; j++) {
|
||||||
|
PowerPCCPU *cpu = pc->threads[j];
|
||||||
|
XiveTCTX *tctx;
|
||||||
|
int ring;
|
||||||
|
|
||||||
|
if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
|
||||||
|
|
||||||
|
if (gen1_tima_os) {
|
||||||
|
ring = xive_presenter_tctx_match(xptr, tctx, 0, nvt_blk,
|
||||||
|
nvt_idx, true, 0);
|
||||||
|
} else {
|
||||||
|
ring = xive2_presenter_tctx_match(xptr, tctx, 0, nvt_blk,
|
||||||
|
nvt_idx, true, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ring != -1) {
|
||||||
|
xive2_tm_set_lsmfb(tctx, ring, priority);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static uint8_t pnv_xive2_get_block_id(Xive2Router *xrtr)
|
static uint8_t pnv_xive2_get_block_id(Xive2Router *xrtr)
|
||||||
{
|
{
|
||||||
return pnv_xive2_block_id(PNV_XIVE2(xrtr));
|
return pnv_xive2_block_id(PNV_XIVE2(xrtr));
|
||||||
|
@ -2444,6 +2485,7 @@ static void pnv_xive2_class_init(ObjectClass *klass, void *data)
|
||||||
|
|
||||||
xpc->match_nvt = pnv_xive2_match_nvt;
|
xpc->match_nvt = pnv_xive2_match_nvt;
|
||||||
xpc->get_config = pnv_xive2_presenter_get_config;
|
xpc->get_config = pnv_xive2_presenter_get_config;
|
||||||
|
xpc->broadcast = pnv_xive2_broadcast;
|
||||||
};
|
};
|
||||||
|
|
||||||
static const TypeInfo pnv_xive2_info = {
|
static const TypeInfo pnv_xive2_info = {
|
||||||
|
|
134
hw/intc/xive2.c
134
hw/intc/xive2.c
|
@ -53,7 +53,8 @@ static uint32_t xive2_nvgc_get_backlog(Xive2Nvgc *nvgc, uint8_t priority)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The per-priority backlog counters are 24-bit and the structure
|
* The per-priority backlog counters are 24-bit and the structure
|
||||||
* is stored in big endian
|
* is stored in big endian. NVGC is 32-bytes long, so 24-bytes from
|
||||||
|
* w2, which fits 8 priorities * 24-bits per priority.
|
||||||
*/
|
*/
|
||||||
ptr = (uint8_t *)&nvgc->w2 + priority * 3;
|
ptr = (uint8_t *)&nvgc->w2 + priority * 3;
|
||||||
for (i = 0; i < 3; i++, ptr++) {
|
for (i = 0; i < 3; i++, ptr++) {
|
||||||
|
@ -62,6 +63,30 @@ static uint32_t xive2_nvgc_get_backlog(Xive2Nvgc *nvgc, uint8_t priority)
|
||||||
return val;
|
return val;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void xive2_nvgc_set_backlog(Xive2Nvgc *nvgc, uint8_t priority,
|
||||||
|
uint32_t val)
|
||||||
|
{
|
||||||
|
uint8_t *ptr, i;
|
||||||
|
uint32_t shift;
|
||||||
|
|
||||||
|
if (priority > 7) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (val > 0xFFFFFF) {
|
||||||
|
val = 0xFFFFFF;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* The per-priority backlog counters are 24-bit and the structure
|
||||||
|
* is stored in big endian
|
||||||
|
*/
|
||||||
|
ptr = (uint8_t *)&nvgc->w2 + priority * 3;
|
||||||
|
for (i = 0; i < 3; i++, ptr++) {
|
||||||
|
shift = 8 * (2 - i);
|
||||||
|
*ptr = (val >> shift) & 0xFF;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, GString *buf)
|
void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, GString *buf)
|
||||||
{
|
{
|
||||||
if (!xive2_eas_is_valid(eas)) {
|
if (!xive2_eas_is_valid(eas)) {
|
||||||
|
@ -830,6 +855,19 @@ bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void xive2_tm_set_lsmfb(XiveTCTX *tctx, int ring, uint8_t priority)
|
||||||
|
{
|
||||||
|
uint8_t *regs = &tctx->regs[ring];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Called by the router during a VP-group notification when the
|
||||||
|
* thread matches but can't take the interrupt because it's
|
||||||
|
* already running at a more favored priority. It then stores the
|
||||||
|
* new interrupt priority in the LSMFB field.
|
||||||
|
*/
|
||||||
|
regs[TM_LSMFB] = priority;
|
||||||
|
}
|
||||||
|
|
||||||
static void xive2_router_realize(DeviceState *dev, Error **errp)
|
static void xive2_router_realize(DeviceState *dev, Error **errp)
|
||||||
{
|
{
|
||||||
Xive2Router *xrtr = XIVE2_ROUTER(dev);
|
Xive2Router *xrtr = XIVE2_ROUTER(dev);
|
||||||
|
@ -870,7 +908,6 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk,
|
||||||
uint8_t priority;
|
uint8_t priority;
|
||||||
uint8_t format;
|
uint8_t format;
|
||||||
bool found, precluded;
|
bool found, precluded;
|
||||||
Xive2Nvp nvp;
|
|
||||||
uint8_t nvp_blk;
|
uint8_t nvp_blk;
|
||||||
uint32_t nvp_idx;
|
uint32_t nvp_idx;
|
||||||
|
|
||||||
|
@ -934,19 +971,6 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk,
|
||||||
nvp_blk = xive_get_field32(END2_W6_VP_BLOCK, end.w6);
|
nvp_blk = xive_get_field32(END2_W6_VP_BLOCK, end.w6);
|
||||||
nvp_idx = xive_get_field32(END2_W6_VP_OFFSET, end.w6);
|
nvp_idx = xive_get_field32(END2_W6_VP_OFFSET, end.w6);
|
||||||
|
|
||||||
/* NVP cache lookup */
|
|
||||||
if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
|
|
||||||
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVP %x/%x\n",
|
|
||||||
nvp_blk, nvp_idx);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!xive2_nvp_is_valid(&nvp)) {
|
|
||||||
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is invalid\n",
|
|
||||||
nvp_blk, nvp_idx);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
found = xive_presenter_notify(xrtr->xfb, format, nvp_blk, nvp_idx,
|
found = xive_presenter_notify(xrtr->xfb, format, nvp_blk, nvp_idx,
|
||||||
xive2_end_is_ignore(&end),
|
xive2_end_is_ignore(&end),
|
||||||
priority,
|
priority,
|
||||||
|
@ -962,10 +986,9 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk,
|
||||||
/*
|
/*
|
||||||
* If no matching NVP is dispatched on a HW thread :
|
* If no matching NVP is dispatched on a HW thread :
|
||||||
* - specific VP: update the NVP structure if backlog is activated
|
* - specific VP: update the NVP structure if backlog is activated
|
||||||
* - logical server : forward request to IVPE (not supported)
|
* - VP-group: update the backlog counter for that priority in the NVG
|
||||||
*/
|
*/
|
||||||
if (xive2_end_is_backlog(&end)) {
|
if (xive2_end_is_backlog(&end)) {
|
||||||
uint8_t ipb;
|
|
||||||
|
|
||||||
if (format == 1) {
|
if (format == 1) {
|
||||||
qemu_log_mask(LOG_GUEST_ERROR,
|
qemu_log_mask(LOG_GUEST_ERROR,
|
||||||
|
@ -974,19 +997,72 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
if (!xive2_end_is_ignore(&end)) {
|
||||||
* Record the IPB in the associated NVP structure for later
|
uint8_t ipb;
|
||||||
* use. The presenter will resend the interrupt when the vCPU
|
Xive2Nvp nvp;
|
||||||
* is dispatched again on a HW thread.
|
|
||||||
*/
|
|
||||||
ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2) |
|
|
||||||
xive_priority_to_ipb(priority);
|
|
||||||
nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb);
|
|
||||||
xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
|
|
||||||
|
|
||||||
/*
|
/* NVP cache lookup */
|
||||||
* On HW, follows a "Broadcast Backlog" to IVPEs
|
if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
|
||||||
*/
|
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVP %x/%x\n",
|
||||||
|
nvp_blk, nvp_idx);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!xive2_nvp_is_valid(&nvp)) {
|
||||||
|
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is invalid\n",
|
||||||
|
nvp_blk, nvp_idx);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Record the IPB in the associated NVP structure for later
|
||||||
|
* use. The presenter will resend the interrupt when the vCPU
|
||||||
|
* is dispatched again on a HW thread.
|
||||||
|
*/
|
||||||
|
ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2) |
|
||||||
|
xive_priority_to_ipb(priority);
|
||||||
|
nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb);
|
||||||
|
xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
|
||||||
|
} else {
|
||||||
|
Xive2Nvgc nvg;
|
||||||
|
uint32_t backlog;
|
||||||
|
|
||||||
|
/* For groups, the per-priority backlog counters are in the NVG */
|
||||||
|
if (xive2_router_get_nvgc(xrtr, false, nvp_blk, nvp_idx, &nvg)) {
|
||||||
|
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVG %x/%x\n",
|
||||||
|
nvp_blk, nvp_idx);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!xive2_nvgc_is_valid(&nvg)) {
|
||||||
|
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVG %x/%x is invalid\n",
|
||||||
|
nvp_blk, nvp_idx);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Increment the backlog counter for that priority.
|
||||||
|
* We only call broadcast the first time the counter is
|
||||||
|
* incremented. broadcast will set the LSMFB field of the TIMA of
|
||||||
|
* relevant threads so that they know an interrupt is pending.
|
||||||
|
*/
|
||||||
|
backlog = xive2_nvgc_get_backlog(&nvg, priority) + 1;
|
||||||
|
xive2_nvgc_set_backlog(&nvg, priority, backlog);
|
||||||
|
xive2_router_write_nvgc(xrtr, false, nvp_blk, nvp_idx, &nvg);
|
||||||
|
|
||||||
|
if (backlog == 1) {
|
||||||
|
XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xrtr->xfb);
|
||||||
|
xfc->broadcast(xrtr->xfb, nvp_blk, nvp_idx, priority);
|
||||||
|
|
||||||
|
if (!xive2_end_is_precluded_escalation(&end)) {
|
||||||
|
/*
|
||||||
|
* The interrupt will be picked up when the
|
||||||
|
* matching thread lowers its priority level
|
||||||
|
*/
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
do_escalation:
|
do_escalation:
|
||||||
|
|
22
hw/ppc/pnv.c
22
hw/ppc/pnv.c
|
@ -1,7 +1,9 @@
|
||||||
/*
|
/*
|
||||||
* QEMU PowerPC PowerNV machine model
|
* QEMU PowerPC PowerNV machine model
|
||||||
*
|
*
|
||||||
* Copyright (c) 2016, IBM Corporation.
|
* Copyright (c) 2016-2024, IBM Corporation.
|
||||||
|
*
|
||||||
|
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
*
|
*
|
||||||
* This library is free software; you can redistribute it and/or
|
* This library is free software; you can redistribute it and/or
|
||||||
* modify it under the terms of the GNU Lesser General Public
|
* modify it under the terms of the GNU Lesser General Public
|
||||||
|
@ -2662,6 +2664,23 @@ static int pnv10_xive_match_nvt(XiveFabric *xfb, uint8_t format,
|
||||||
return total_count;
|
return total_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int pnv10_xive_broadcast(XiveFabric *xfb,
|
||||||
|
uint8_t nvt_blk, uint32_t nvt_idx,
|
||||||
|
uint8_t priority)
|
||||||
|
{
|
||||||
|
PnvMachineState *pnv = PNV_MACHINE(xfb);
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < pnv->num_chips; i++) {
|
||||||
|
Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]);
|
||||||
|
XivePresenter *xptr = XIVE_PRESENTER(&chip10->xive);
|
||||||
|
XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr);
|
||||||
|
|
||||||
|
xpc->broadcast(xptr, nvt_blk, nvt_idx, priority);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static bool pnv_machine_get_big_core(Object *obj, Error **errp)
|
static bool pnv_machine_get_big_core(Object *obj, Error **errp)
|
||||||
{
|
{
|
||||||
PnvMachineState *pnv = PNV_MACHINE(obj);
|
PnvMachineState *pnv = PNV_MACHINE(obj);
|
||||||
|
@ -2795,6 +2814,7 @@ static void pnv_machine_p10_common_class_init(ObjectClass *oc, void *data)
|
||||||
pmc->dt_power_mgt = pnv_dt_power_mgt;
|
pmc->dt_power_mgt = pnv_dt_power_mgt;
|
||||||
|
|
||||||
xfc->match_nvt = pnv10_xive_match_nvt;
|
xfc->match_nvt = pnv10_xive_match_nvt;
|
||||||
|
xfc->broadcast = pnv10_xive_broadcast;
|
||||||
|
|
||||||
machine_class_allow_dynamic_sysbus_dev(mc, TYPE_PNV_PHB);
|
machine_class_allow_dynamic_sysbus_dev(mc, TYPE_PNV_PHB);
|
||||||
}
|
}
|
||||||
|
|
|
@ -442,6 +442,9 @@ struct XivePresenterClass {
|
||||||
uint32_t logic_serv, XiveTCTXMatch *match);
|
uint32_t logic_serv, XiveTCTXMatch *match);
|
||||||
bool (*in_kernel)(const XivePresenter *xptr);
|
bool (*in_kernel)(const XivePresenter *xptr);
|
||||||
uint32_t (*get_config)(XivePresenter *xptr);
|
uint32_t (*get_config)(XivePresenter *xptr);
|
||||||
|
int (*broadcast)(XivePresenter *xptr,
|
||||||
|
uint8_t nvt_blk, uint32_t nvt_idx,
|
||||||
|
uint8_t priority);
|
||||||
};
|
};
|
||||||
|
|
||||||
int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
|
int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
|
||||||
|
@ -472,6 +475,8 @@ struct XiveFabricClass {
|
||||||
uint8_t nvt_blk, uint32_t nvt_idx,
|
uint8_t nvt_blk, uint32_t nvt_idx,
|
||||||
bool cam_ignore, uint8_t priority,
|
bool cam_ignore, uint8_t priority,
|
||||||
uint32_t logic_serv, XiveTCTXMatch *match);
|
uint32_t logic_serv, XiveTCTXMatch *match);
|
||||||
|
int (*broadcast)(XiveFabric *xfb, uint8_t nvt_blk, uint32_t nvt_idx,
|
||||||
|
uint8_t priority);
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -120,6 +120,7 @@ uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
|
||||||
void xive2_tm_pull_os_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
|
void xive2_tm_pull_os_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
|
||||||
hwaddr offset, uint64_t value, unsigned size);
|
hwaddr offset, uint64_t value, unsigned size);
|
||||||
bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority);
|
bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority);
|
||||||
|
void xive2_tm_set_lsmfb(XiveTCTX *tctx, int ring, uint8_t priority);
|
||||||
void xive2_tm_set_hv_target(XivePresenter *xptr, XiveTCTX *tctx,
|
void xive2_tm_set_hv_target(XivePresenter *xptr, XiveTCTX *tctx,
|
||||||
hwaddr offset, uint64_t value, unsigned size);
|
hwaddr offset, uint64_t value, unsigned size);
|
||||||
void xive2_tm_pull_phys_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
|
void xive2_tm_pull_phys_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue