[Skiboot] [PATCH 02/15] xive: Wrap irq_sources to provide standard set_xive/get_xive
Benjamin Herrenschmidt
benh at kernel.crashing.org
Tue Aug 9 16:38:06 AEST 2016
All the source controllers use the standard XIVE mechanism for
masking/unmasking and EOI, so there is no point having that
logic duplicated. There are a few variations on how they are
implemented but that can be handled using a few flags.
So let's create a wrapper around irq_sources for a xive source
and provide a new API for things like PHB4 to instanciate them
This patch while at it also fixes the calculation of the source
offset when setting up targetting information in the PHB4.
Signed-off-by: Benjamin Herrenschmidt <benh at kernel.crashing.org>
---
hw/phb4.c | 95 +++--------
hw/xive.c | 514 ++++++++++++++++++++++++++++++++++++++-------------------
include/xive.h | 11 +-
3 files changed, 379 insertions(+), 241 deletions(-)
diff --git a/hw/phb4.c b/hw/phb4.c
index e1e8d3f..d3e7620 100644
--- a/hw/phb4.c
+++ b/hw/phb4.c
@@ -2407,7 +2407,8 @@ static void phb4_init_ioda3(struct phb4 *p)
out_be64(p->regs + PHB_INT_NOTIFY_ADDR, p->irq_port);
/* Init_18 - Interrupt Notify Base Index */
- out_be64(p->regs + PHB_INT_NOTIFY_INDEX, p->base_msi);
+ out_be64(p->regs + PHB_INT_NOTIFY_INDEX,
+ xive_get_notify_base(p->base_msi));
/* Init_xx - Not in spec: Initialize source ID */
PHBDBG(p, "Reset state SRC_ID: %016llx\n",
@@ -2990,70 +2991,29 @@ static bool phb4_calculate_windows(struct phb4 *p)
return true;
}
-
-static int64_t phb4_get_xive(struct irq_source *is __unused, uint32_t isn,
- uint16_t *server, uint8_t *prio)
-{
- uint32_t target_id;
-
- if (xive_get_eq_info(isn, &target_id, prio)) {
- *server = target_id;
- return OPAL_SUCCESS;
- } else
- return OPAL_PARAMETER;
-}
-
-static int64_t phb4_set_xive(struct irq_source *is, uint32_t isn,
- uint16_t server, uint8_t prio)
+static void phb4_err_interrupt(struct irq_source *is, uint32_t isn)
{
struct phb4 *p = is->data;
- uint32_t idx = isn - p->base_msi;
- void *mmio_base;
- /* Let XIVE configure the EQ */
- if (!xive_set_eq_info(isn, server, prio))
- return OPAL_PARAMETER;
+ PHBDBG(p, "Got interrupt 0x%08x\n", isn);
- /* Ensure it's enabled/disabled in the PHB. This won't do much
- * for LSIs but will work for MSIs and will ensure that a stray
- * P bit left over won't block further interrupts when enabling
- */
- mmio_base = p->int_mmio + 0x10000 * idx;
- if (prio == 0xff)
- in_8(mmio_base + 0xd00); /* PQ = 01 */
- else
- in_8(mmio_base + 0xc00); /* PQ = 00 */
-
- return OPAL_SUCCESS;
-}
+#if 0
+ /* Update pending event */
+ opal_update_pending_evt(OPAL_EVENT_PCI_ERROR,
+ OPAL_EVENT_PCI_ERROR);
-static void phb4_eoi(struct irq_source *is, uint32_t isn)
-{
- struct phb4 *p = is->data;
- uint32_t idx = isn - p->base_msi;
- void *mmio_base;
- uint8_t eoi_val;
+ /* If the PHB is broken, go away */
+ if (p->state == PHB3_STATE_BROKEN)
+ return;
- /* For EOI, we use the special MMIO that does a clear of both
- * P and Q and returns the old Q.
- *
- * This allows us to then do a re-trigger if Q was set rather
- * than synthetizing an interrupt in software
+ /*
+ * Mark the PHB has pending error so that the OS
+ * can handle it at late point.
*/
- mmio_base = p->int_mmio + 0x10000 * idx;
- eoi_val = in_8(mmio_base + 0xc00);
- if (eoi_val & 1) {
- /* PHB doesn't use a separate replay, use the same page */
- out_8(mmio_base, 0);
- }
+ phb3_set_err_pending(p, true);
+#endif
}
-static const struct irq_source_ops phb4_msi_ops = {
- .get_xive = phb4_get_xive,
- .set_xive = phb4_set_xive,
- .eoi = phb4_eoi
-};
-
static uint64_t phb4_lsi_attributes(struct irq_source *is __unused,
uint32_t isn __unused)
{
@@ -3068,19 +3028,10 @@ static uint64_t phb4_lsi_attributes(struct irq_source *is __unused,
}
static const struct irq_source_ops phb4_lsi_ops = {
- .get_xive = phb4_get_xive,
- .set_xive = phb4_set_xive,
+ .interrupt = phb4_err_interrupt,
.attributes = phb4_lsi_attributes,
- .eoi = phb4_eoi
};
-/* Error LSIs (skiboot owned) */
-//static const struct irq_source_ops phb3_err_lsi_irq_ops = {
-// .get_xive = phb3_lsi_get_xive,
-// .set_xive = phb3_lsi_set_xive,
-// .interrupt = phb3_err_interrupt,
-//};
-
static void phb4_create(struct dt_node *np)
{
const struct dt_property *prop;
@@ -3228,13 +3179,16 @@ static void phb4_create(struct dt_node *np)
/* Clear IODA3 cache */
phb4_init_ioda_cache(p);
- /* Register interrupt sources */
- register_irq_source(&phb4_msi_ops, p, p->base_msi, p->num_irqs - 8);
- register_irq_source(&phb4_lsi_ops, p, p->base_lsi, 8);
-
/* Get the HW up and running */
phb4_init_hw(p, true);
+ /* Register all interrupt sources with XIVE */
+ xive_register_source(p->base_msi, p->num_irqs - 8, 16, p->int_mmio, 0,
+ NULL, NULL);
+ xive_register_source(p->base_lsi, 8, 16,
+ p->int_mmio + ((p->num_irqs - 8) << 16),
+ XIVE_SRC_LSI, p, &phb4_lsi_ops);
+
/* Platform additional setup */
if (platform.pci_setup_phb)
platform.pci_setup_phb(&p->phb, p->index);
@@ -3478,3 +3432,4 @@ void probe_phb4(void)
dt_for_each_compatible(dt_root, np, "ibm,power9-pciex")
phb4_create(np);
}
+
diff --git a/hw/xive.c b/hw/xive.c
index ed30252..dcd8d5b 100644
--- a/hw/xive.c
+++ b/hw/xive.c
@@ -226,6 +226,19 @@
#endif
+/* Each source controller has one of these. There's one embedded
+ * in the XIVE struct for IPIs
+ */
+struct xive_src {
+ struct irq_source is;
+ const struct irq_source_ops *orig_ops;
+ struct xive *xive;
+ void *esb_mmio;
+ uint32_t esb_base;
+ uint32_t esb_shift;
+ uint32_t flags;
+};
+
struct xive {
uint32_t chip_id;
struct dt_node *x_node;
@@ -319,6 +332,9 @@ struct xive {
*/
uint32_t int_hw_bot; /* Bottom of HW allocation */
uint32_t int_ipi_top; /* Highest IPI handed out so far */
+
+ /* Embedded source IPIs */
+ struct xive_src ipis;
};
/* Conversion between GIRQ and block/index.
@@ -676,41 +692,6 @@ static int64_t xive_ivc_scrub(struct xive *x, uint64_t block, uint64_t idx)
return __xive_cache_scrub(x, xive_cache_ivc, block, idx, false, false);
}
-static void xive_ipi_init(struct xive *x, uint32_t idx)
-{
- uint8_t *mm = x->esb_mmio + idx * 0x20000;
-
- /* Clear P and Q */
- in_8(mm + 0x10c00);
-}
-
-static void xive_ipi_eoi(struct xive *x, uint32_t idx)
-{
- uint8_t *mm = x->esb_mmio + idx * 0x20000;
- uint8_t eoi_val;
-
- /* For EOI, we use the special MMIO that does a clear of both
- * P and Q and returns the old Q.
- *
- * This allows us to then do a re-trigger if Q was set rather
- * than synthetizing an interrupt in software
- */
- eoi_val = in_8(mm + 0x10c00);
- if (eoi_val & 1) {
- out_8(mm, 0);
- }
-}
-
-static void xive_ipi_trigger(struct xive *x, uint32_t idx)
-{
- uint8_t *mm = x->esb_mmio + idx * 0x20000;
-
- xive_vdbg(x, "Trigger IPI 0x%x\n", idx);
-
- out_8(mm, 0);
-}
-
-
static bool xive_set_vsd(struct xive *x, uint32_t tbl, uint32_t idx, uint64_t v)
{
/* Set VC version */
@@ -838,9 +819,7 @@ static bool xive_configure_bars(struct xive *x)
mmio_base = 0x006000000000000ull;
chip_base = mmio_base | (0x40000000000ull * (uint64_t)x->chip_id);
- /* IC BAR. We use 4K pages here, 64K doesn't seem implemented
- * in SIMCIS
- */
+ /* IC BAR */
x->ic_base = (void *)(chip_base | IC_BAR_DEFAULT);
x->ic_size = IC_BAR_SIZE;
val = (uint64_t)x->ic_base | CQ_IC_BAR_VALID;
@@ -940,8 +919,10 @@ static bool xive_check_update_bars(struct xive *x)
x->eq_mmio = x->vc_base + (x->vc_size / VC_MAX_SETS) * VC_ESB_SETS;
/* Print things out */
- xive_dbg(x, "IC: %14p [0x%012llx/%d]\n", x->ic_base, x->ic_size, x->ic_shift);
- xive_dbg(x, "TM: %14p [0x%012llx/%d]\n", x->tm_base, x->tm_size, x->tm_shift);
+ xive_dbg(x, "IC: %14p [0x%012llx/%d]\n", x->ic_base, x->ic_size,
+ x->ic_shift);
+ xive_dbg(x, "TM: %14p [0x%012llx/%d]\n", x->tm_base, x->tm_size,
+ x->tm_shift);
xive_dbg(x, "PC: %14p [0x%012llx]\n", x->pc_base, x->pc_size);
xive_dbg(x, "VC: %14p [0x%012llx]\n", x->vc_base, x->vc_size);
@@ -1227,6 +1208,9 @@ uint32_t xive_alloc_hw_irqs(uint32_t chip_id, uint32_t count, uint32_t align)
}
x->int_hw_bot = base;
+ /* Adjust the irq source to avoid overlaps */
+ adjust_irq_source(&x->ipis.is, base - x->int_base);
+
/* Initialize the corresponding IVT entries to sane defaults,
* IE entry is valid, not routed and masked, EQ data is set
* to the GIRQ number.
@@ -1269,7 +1253,8 @@ uint32_t xive_alloc_ipi_irqs(uint32_t chip_id, uint32_t count, uint32_t align)
for (i = 0; i < count; i++) {
struct xive_ive *ive = xive_get_ive(x, base + i);
- ive->w = IVE_VALID | IVE_MASKED | SETFIELD(IVE_EQ_DATA, 0ul, base + i);
+ ive->w = IVE_VALID | IVE_MASKED |
+ SETFIELD(IVE_EQ_DATA, 0ul, base + i);
}
return base;
@@ -1354,6 +1339,267 @@ uint64_t xive_get_notify_port(uint32_t chip_id, uint32_t ent)
return ((uint64_t)x->ic_base) + (1ul << x->ic_shift) + offset;
}
+/* Manufacture the powerbus packet bits 32:63 */
+__attrconst uint32_t xive_get_notify_base(uint32_t girq)
+{
+ return (GIRQ_TO_BLK(girq) << 28) | GIRQ_TO_IDX(girq);
+}
+
+static bool xive_get_eq_info(uint32_t isn, uint32_t *out_target,
+ uint8_t *out_prio)
+{
+ struct xive_ive *ive;
+ struct xive *x, *eq_x;
+ struct xive_eq *eq;
+ uint32_t eq_blk, eq_idx;
+ uint32_t vp_blk, vp_idx;
+ uint32_t prio, server;
+
+ /* Find XIVE on which the IVE resides */
+ x = xive_from_isn(isn);
+ if (!x)
+ return false;
+ /* Grab the IVE */
+ ive = xive_get_ive(x, isn);
+ if (!ive)
+ return false;
+ if (!(ive->w & IVE_VALID)) {
+ xive_err(x, "ISN %x lead to invalid IVE !\n", isn);
+ return false;
+ }
+ /* Find the EQ and its xive instance */
+ eq_blk = GETFIELD(IVE_EQ_BLOCK, ive->w);
+ eq_idx = GETFIELD(IVE_EQ_INDEX, ive->w);
+ eq_x = xive_from_vc_blk(eq_blk);
+ if (!eq_x) {
+ xive_err(x, "Can't find controller for EQ BLK %d\n", eq_blk);
+ return false;
+ }
+ eq = xive_get_eq(eq_x, eq_idx);
+ if (!eq) {
+ xive_err(eq_x, "Can't locate EQ %d\n", eq_idx);
+ return false;
+ }
+ /* XXX Check valid and format 0 */
+
+ /* No priority conversion, return the actual one ! */
+ prio = GETFIELD(EQ_W7_F0_PRIORITY, eq->w7);
+ if (out_prio)
+ *out_prio = prio;
+
+ vp_blk = GETFIELD(EQ_W6_NVT_BLOCK, eq->w6);
+ vp_idx = GETFIELD(EQ_W6_NVT_INDEX, eq->w6);
+ server = VP2PIR(vp_blk, vp_idx);
+
+ if (out_target)
+ *out_target = server;
+ xive_vdbg(eq_x, "EQ info for ISN %x: prio=%d, server=0x%x (VP %x/%x)\n",
+ isn, prio, server, vp_blk, vp_idx);
+ return true;
+}
+
+static inline bool xive_eq_for_target(uint32_t target, uint8_t prio __unused,
+ uint32_t *eq_blk, uint32_t *eq_idx)
+{
+ uint32_t vp_blk = PIR2VP_BLK(target);
+ uint32_t vp_idx = PIR2VP_IDX(target);
+
+ /* XXX We currently have EQ BLK/IDX == VP BLK/IDX. This will change
+ * when we support priorities.
+ */
+ if (eq_blk)
+ *eq_blk = vp_blk;
+ if (eq_idx)
+ *eq_idx = vp_idx;
+ return true;
+}
+
+static bool xive_set_eq_info(uint32_t isn, uint32_t target, uint8_t prio)
+{
+ struct xive *x;
+ struct xive_ive *ive;
+ uint32_t eq_blk, eq_idx;
+
+ /* Find XIVE on which the IVE resides */
+ x = xive_from_isn(isn);
+ if (!x)
+ return false;
+ /* Grab the IVE */
+ ive = xive_get_ive(x, isn);
+ if (!ive)
+ return false;
+ if (!(ive->w & IVE_VALID)) {
+ xive_err(x, "ISN %x lead to invalid IVE !\n", isn);
+ return false;
+ }
+
+ /* Are we masking ? */
+ if (prio == 0xff) {
+ /* Masking, just set the M bit */
+ ive->w |= IVE_MASKED;
+
+ xive_vdbg(x, "ISN %x masked !\n", isn);
+ } else {
+ uint64_t new_ive;
+
+ /* Unmasking, re-target the IVE. First find the EQ
+ * correponding to the target
+ */
+ if (!xive_eq_for_target(target, prio, &eq_blk, &eq_idx)) {
+ xive_err(x, "Can't find EQ for target/prio 0x%x/%d\n",
+ target, prio);
+ return false;
+ }
+
+ /* Try to update it atomically to avoid an intermediary
+ * stale state
+ */
+ new_ive = ive->w & ~IVE_MASKED;
+ new_ive = SETFIELD(IVE_EQ_BLOCK, new_ive, eq_blk);
+ new_ive = SETFIELD(IVE_EQ_INDEX, new_ive, eq_idx);
+ sync();
+ ive->w = new_ive;
+
+ xive_vdbg(x,"ISN %x routed to eq %x/%x IVE=%016llx !\n",
+ isn, eq_blk, eq_idx, new_ive);
+ }
+
+ /* Scrub IVE from cache */
+ xive_ivc_scrub(x, x->chip_id, GIRQ_TO_IDX(isn));
+
+ return true;
+}
+
+static int64_t xive_source_get_xive(struct irq_source *is __unused,
+ uint32_t isn, uint16_t *server,
+ uint8_t *prio)
+{
+ uint32_t target_id;
+
+ if (xive_get_eq_info(isn, &target_id, prio)) {
+ *server = target_id;
+ return OPAL_SUCCESS;
+ } else
+ return OPAL_PARAMETER;
+}
+
+static int64_t xive_source_set_xive(struct irq_source *is, uint32_t isn,
+ uint16_t server, uint8_t prio)
+{
+ struct xive_src *s = container_of(is, struct xive_src, is);
+ uint32_t idx = isn - s->esb_base;
+ void *mmio_base;
+
+ /* Let XIVE configure the EQ */
+ if (!xive_set_eq_info(isn, server, prio))
+ return OPAL_PARAMETER;
+
+ /* Ensure it's enabled/disabled in the source controller.
+ *
+ * This won't do much for LSIs but will work for MSIs and will
+ * ensure that a stray P bit left over won't block further
+ * interrupts when enabling
+ */
+ mmio_base = s->esb_mmio + (1ul << s->esb_shift) * idx;
+ if (s->flags & XIVE_SRC_EOI_PAGE1)
+ mmio_base += 1ull << (s->esb_shift - 1);
+ if (prio == 0xff)
+ in_be64(mmio_base + 0xd00); /* PQ = 01 */
+ else
+ in_be64(mmio_base + 0xc00); /* PQ = 00 */
+
+ return OPAL_SUCCESS;
+}
+
+static void xive_source_eoi(struct irq_source *is, uint32_t isn)
+{
+ struct xive_src *s = container_of(is, struct xive_src, is);
+ uint32_t idx = isn - s->esb_base;
+ void *mmio_base;
+ uint64_t eoi_val;
+
+ mmio_base = s->esb_mmio + (1ull << s->esb_shift) * idx;
+
+ /* If the XIVE supports the new "store EOI facility, use it */
+ if (s->flags & XIVE_SRC_STORE_EOI)
+ out_be64(mmio_base, 0);
+ else {
+ /* Otherwise for EOI, we use the special MMIO that does
+ * a clear of both P and Q and returns the old Q.
+ *
+ * This allows us to then do a re-trigger if Q was set
+ rather than synthetizing an interrupt in software
+ */
+ if (s->flags & XIVE_SRC_EOI_PAGE1) {
+ uint64_t p1off = 1ull << (s->esb_shift - 1);
+ eoi_val = in_be64(mmio_base + p1off + 0xc00);
+ } else
+ eoi_val = in_be64(mmio_base + 0xc00);
+ xive_vdbg(s->xive, "ISN: %08x EOI=%llx\n", isn, eoi_val);
+ if ((s->flags & XIVE_SRC_LSI) || !(eoi_val & 1))
+ return;
+
+ /* Re-trigger always on page0 or page1 ? */
+ out_be64(mmio_base, 0);
+ }
+}
+
+static void xive_source_interrupt(struct irq_source *is, uint32_t isn)
+{
+ struct xive_src *s = container_of(is, struct xive_src, is);
+
+ if (!s->orig_ops || !s->orig_ops->interrupt)
+ return;
+ s->orig_ops->interrupt(is, isn);
+}
+
+static uint64_t xive_source_attributes(struct irq_source *is, uint32_t isn)
+{
+ struct xive_src *s = container_of(is, struct xive_src, is);
+
+ if (!s->orig_ops || !s->orig_ops->attributes)
+ return IRQ_ATTR_TARGET_LINUX;
+ return s->orig_ops->attributes(is, isn);
+}
+
+static const struct irq_source_ops xive_irq_source_ops = {
+ .get_xive = xive_source_get_xive,
+ .set_xive = xive_source_set_xive,
+ .eoi = xive_source_eoi,
+ .interrupt = xive_source_interrupt,
+ .attributes = xive_source_attributes,
+};
+
+static void __xive_register_source(struct xive_src *s, uint32_t base,
+ uint32_t count, uint32_t shift,
+ void *mmio, uint32_t flags, void *data,
+ const struct irq_source_ops *orig_ops)
+{
+ s->esb_base = base;
+ s->esb_shift = shift;
+ s->esb_mmio = mmio;
+ s->flags = flags;
+ s->orig_ops = orig_ops;
+
+ s->is.start = base;
+ s->is.end = base + count;
+ s->is.ops = &xive_irq_source_ops;
+ s->is.data = data;
+
+ __register_irq_source(&s->is);
+}
+
+void xive_register_source(uint32_t base, uint32_t count, uint32_t shift,
+ void *mmio, uint32_t flags, void *data,
+ const struct irq_source_ops *ops)
+{
+ struct xive_src *s;
+
+ s = malloc(sizeof(struct xive_src));
+ assert(s);
+ __xive_register_source(s, base, count, shift, mmio, flags, data, ops);
+}
+
static void init_one_xive(struct dt_node *np)
{
struct xive *x;
@@ -1372,6 +1618,9 @@ static void init_one_xive(struct dt_node *np)
chip->xive = x;
/* Base interrupt numbers and allocator init */
+ /* XXX Consider allocating half as many ESBs than MMIO space
+ * so that HW sources land outside of ESB space...
+ */
x->int_base = BLKIDX_TO_GIRQ(x->chip_id, 0);
x->int_max = x->int_base + MAX_INT_ENTRIES;
x->int_hw_bot = x->int_max;
@@ -1383,7 +1632,8 @@ static void init_one_xive(struct dt_node *np)
if (x->int_ipi_top < 0x10)
x->int_ipi_top = 0x10;
- xive_dbg(x, "Handling interrupts [%08x..%08x]\n", x->int_base, x->int_max - 1);
+ xive_dbg(x, "Handling interrupts [%08x..%08x]\n",
+ x->int_base, x->int_max - 1);
/* System dependant values that must be set before BARs */
//xive_regwx(x, CQ_CFG_PB_GEN, xx);
@@ -1407,10 +1657,18 @@ static void init_one_xive(struct dt_node *np)
if (!xive_prealloc_tables(x))
goto fail;
- /* Configure local tables in VSDs (forward ports will be handled later) */
+ /* Configure local tables in VSDs (forward ports will be
+ * handled later)
+ */
if (!xive_set_local_tables(x))
goto fail;
+ /* Register built-in source controllers (aka IPIs) */
+ /* XXX Add new EOI mode for DD2 */
+ __xive_register_source(&x->ipis, x->int_base,
+ x->int_hw_bot - x->int_base, 16 + 1,
+ x->esb_mmio, XIVE_SRC_EOI_PAGE1, NULL, NULL);
+
/* Create a device-tree node for Linux use */
xive_create_mmio_dt_node(x);
@@ -1443,6 +1701,47 @@ struct xive_cpu_state {
uint32_t ipi_irq;
};
+static void xive_ipi_init(struct xive *x, struct cpu_thread *cpu)
+{
+ struct xive_cpu_state *xs = cpu->xstate;
+ uint32_t idx = GIRQ_TO_IDX(xs->ipi_irq);
+ uint8_t *mm = x->esb_mmio + idx * 0x20000;
+
+ assert(xs);
+
+ xive_source_set_xive(&x->ipis.is, xs->ipi_irq, cpu->pir, 0x7);
+
+ /* Clear P and Q */
+ in_8(mm + 0x10c00);
+}
+
+static void xive_ipi_eoi(struct xive *x, uint32_t idx)
+{
+ uint8_t *mm = x->esb_mmio + idx * 0x20000;
+ uint8_t eoi_val;
+
+ /* For EOI, we use the special MMIO that does a clear of both
+ * P and Q and returns the old Q.
+ *
+ * This allows us to then do a re-trigger if Q was set rather
+ * than synthetizing an interrupt in software
+ */
+ eoi_val = in_8(mm + 0x10c00);
+ if (eoi_val & 1) {
+ out_8(mm, 0);
+ }
+}
+
+static void xive_ipi_trigger(struct xive *x, uint32_t idx)
+{
+ uint8_t *mm = x->esb_mmio + idx * 0x20000;
+
+ xive_vdbg(x, "Trigger IPI 0x%x\n", idx);
+
+ out_8(mm, 0);
+}
+
+
void xive_cpu_callin(struct cpu_thread *cpu)
{
struct xive_cpu_state *xs = cpu->xstate;
@@ -1477,7 +1776,7 @@ void xive_cpu_callin(struct cpu_thread *cpu)
xive_cpu_dbg(cpu, "Initialized interrupt management area\n");
/* Now unmask the IPI */
- xive_ipi_init(x, GIRQ_TO_IDX(xs->ipi_irq));
+ xive_ipi_init(x, cpu);
}
static void xive_init_cpu(struct cpu_thread *c)
@@ -1535,132 +1834,8 @@ static void xive_init_cpu(struct cpu_thread *c)
/* Allocate an IPI */
xs->ipi_irq = xive_alloc_ipi_irqs(c->chip_id, 1, 1);
- xive_set_eq_info(xs->ipi_irq, c->pir, 0x7);
- xive_cpu_dbg(c, "CPU IPI is irq %08x\n", xs->ipi_irq);
-}
-bool xive_get_eq_info(uint32_t isn, uint32_t *out_target, uint8_t *out_prio)
-{
- struct xive_ive *ive;
- struct xive *x, *eq_x;
- struct xive_eq *eq;
- uint32_t eq_blk, eq_idx;
- uint32_t vp_blk, vp_idx;
- uint32_t prio, server;
-
- /* Find XIVE on which the IVE resides */
- x = xive_from_isn(isn);
- if (!x)
- return false;
- /* Grab the IVE */
- ive = xive_get_ive(x, isn);
- if (!ive)
- return false;
- if (!(ive->w & IVE_VALID)) {
- xive_err(x, "ISN %x lead to invalid IVE !\n", isn);
- return false;
- }
- /* Find the EQ and its xive instance */
- eq_blk = GETFIELD(IVE_EQ_BLOCK, ive->w);
- eq_idx = GETFIELD(IVE_EQ_INDEX, ive->w);
- eq_x = xive_from_vc_blk(eq_blk);
- if (!eq_x) {
- xive_err(x, "Can't find controller for EQ BLK %d\n", eq_blk);
- return false;
- }
- eq = xive_get_eq(eq_x, eq_idx);
- if (!eq) {
- xive_err(eq_x, "Can't locate EQ %d\n", eq_idx);
- return false;
- }
- /* XXX Check valid and format 0 */
-
- /* No priority conversion, return the actual one ! */
- prio = GETFIELD(EQ_W7_F0_PRIORITY, eq->w7);
- if (out_prio)
- *out_prio = prio;
-
- vp_blk = GETFIELD(EQ_W6_NVT_BLOCK, eq->w6);
- vp_idx = GETFIELD(EQ_W6_NVT_INDEX, eq->w6);
- server = VP2PIR(vp_blk, vp_idx);
-
- if (out_target)
- *out_target = server;
- xive_vdbg(eq_x, "EQ info for ISN %x: prio=%d, server=0x%x (VP %x/%x)\n",
- isn, prio, server, vp_blk, vp_idx);
- return true;
-}
-
-static inline bool xive_eq_for_target(uint32_t target, uint8_t prio __unused,
- uint32_t *eq_blk, uint32_t *eq_idx)
-{
- uint32_t vp_blk = PIR2VP_BLK(target);
- uint32_t vp_idx = PIR2VP_IDX(target);
-
- /* XXX We currently have EQ BLK/IDX == VP BLK/IDX. This will change
- * when we support priorities.
- */
- if (eq_blk)
- *eq_blk = vp_blk;
- if (eq_idx)
- *eq_idx = vp_idx;
- return true;
-}
-
-bool xive_set_eq_info(uint32_t isn, uint32_t target, uint8_t prio)
-{
- struct xive *x;
- struct xive_ive *ive;
- uint32_t eq_blk, eq_idx;
-
- /* Find XIVE on which the IVE resides */
- x = xive_from_isn(isn);
- if (!x)
- return false;
- /* Grab the IVE */
- ive = xive_get_ive(x, isn);
- if (!ive)
- return false;
- if (!(ive->w & IVE_VALID)) {
- xive_err(x, "ISN %x lead to invalid IVE !\n", isn);
- return false;
- }
-
- /* Are we masking ? */
- if (prio == 0xff) {
- /* Masking, just set the M bit */
- ive->w |= IVE_MASKED;
-
- xive_vdbg(x, "ISN %x masked !\n", isn);
- } else {
- uint64_t new_ive;
-
- /* Unmasking, re-target the IVE. First find the EQ
- * correponding to the target
- */
- if (!xive_eq_for_target(target, prio, &eq_blk, &eq_idx)) {
- xive_err(x, "Can't find EQ for target/prio 0x%x/%d\n",
- target, prio);
- return false;
- }
-
- /* Try to update it atomically to avoid an intermediary
- * stale state
- */
- new_ive = ive->w & ~IVE_MASKED;
- new_ive = SETFIELD(IVE_EQ_BLOCK, new_ive, eq_blk);
- new_ive = SETFIELD(IVE_EQ_INDEX, new_ive, eq_idx);
- sync();
- ive->w = new_ive;
-
- xive_vdbg(x,"ISN %x routed to eq %x/%x IVE=%016llx !\n",
- isn, eq_blk, eq_idx, new_ive);
- }
-
- /* Scrub IVE from cache */
- xive_ivc_scrub(x, x->chip_id, GIRQ_TO_IDX(isn));
-
- return true;
+ xive_cpu_dbg(c, "CPU IPI is irq %08x\n", xs->ipi_irq);
}
@@ -2001,3 +2176,4 @@ void init_xive(void)
opal_register(OPAL_INT_EOI, opal_xive_eoi, 1);
opal_register(OPAL_INT_SET_MFRR, opal_xive_set_mfrr, 2);
}
+
diff --git a/include/xive.h b/include/xive.h
index c3bd33a..775ddd7 100644
--- a/include/xive.h
+++ b/include/xive.h
@@ -369,9 +369,16 @@ uint32_t xive_alloc_ipi_irqs(uint32_t chip_id, uint32_t count, uint32_t align);
#define XIVE_HW_SRC_PSI 8
uint64_t xive_get_notify_port(uint32_t chip_id, uint32_t ent);
+uint32_t xive_get_notify_base(uint32_t girq);
-bool xive_get_eq_info(uint32_t isn, uint32_t *out_target, uint8_t *out_prio);
-bool xive_set_eq_info(uint32_t isn, uint32_t target, uint8_t prio);
+#define XIVE_SRC_EOI_PAGE1 0x00000001 /* EOI and trig. separate */
+#define XIVE_SRC_STORE_EOI 0x00000002 /* Store EOI (auto trigger) */
+#define XIVE_SRC_LSI 0x00000004 /* No Q bit, no retrigger */
+
+struct irq_source_ops;
+void xive_register_source(uint32_t base, uint32_t count, uint32_t shift,
+ void *mmio, uint32_t flags, void *data,
+ const struct irq_source_ops *ops);
void xive_cpu_callin(struct cpu_thread *cpu);
--
2.7.4
More information about the Skiboot
mailing list