[Skiboot] [RFC PATCH] Remove POWER9 DD1 support
Cédric Le Goater
clg at kaod.org
Wed Jan 9 04:28:17 AEDT 2019
Hello Nicholas,
On 1/7/19 3:09 PM, Nicholas Piggin wrote:
> There's a couple of cases I'm not sure if they're still needed (marked
> with XXX).
>
> Hostboot and Linux have removed DD1 support so there's not much point
> keeping it around.
Some comments on XIVE below.
> core/cpu.c | 10 --
> core/direct-controls.c | 7 --
> hdata/cpu-common.c | 16 ---
> hdata/iohub.c | 9 --
> hw/lpc.c | 17 ---
> hw/phb4.c | 276 ++++++++++-------------------------------
> hw/psi.c | 43 +------
> hw/slw.c | 51 --------
> hw/xive.c | 29 ++---
> include/lpc.h | 3 -
> include/phb4-regs.h | 7 --
> 11 files changed, 79 insertions(+), 389 deletions(-)
>
> diff --git a/core/cpu.c b/core/cpu.c
> index a83f8baf1..0ed53c57b 100644
> --- a/core/cpu.c
> +++ b/core/cpu.c
> @@ -610,11 +610,6 @@ void cpu_set_sreset_enable(bool enabled)
> }
>
> } else if (proc_gen == proc_gen_p9) {
> - /* Don't use sreset idle on DD1 (has a number of bugs) */
> - uint32_t version = mfspr(SPR_PVR);
> - if (is_power9n(version) && (PVR_VERS_MAJ(version) == 1))
> - return;
> -
> sreset_enabled = enabled;
> sync();
> /*
> @@ -643,11 +638,6 @@ void cpu_set_ipi_enable(bool enabled)
> }
>
> } else if (proc_gen == proc_gen_p9) {
> - /* Don't use doorbell on DD1 (requires darn for msgsync) */
> - uint32_t version = mfspr(SPR_PVR);
> - if (is_power9n(version) && (PVR_VERS_MAJ(version) == 1))
> - return;
> -
> ipi_enabled = enabled;
> sync();
> if (!enabled)
> diff --git a/core/direct-controls.c b/core/direct-controls.c
> index 04b93a16f..1d0f6818e 100644
> --- a/core/direct-controls.c
> +++ b/core/direct-controls.c
> @@ -851,18 +851,11 @@ int64_t opal_signal_system_reset(int cpu_nr)
>
> void direct_controls_init(void)
> {
> - uint32_t version;
> -
> if (chip_quirk(QUIRK_MAMBO_CALLOUTS))
> return;
>
> if (proc_gen != proc_gen_p9)
> return;
>
> - /* DD1 has some sreset quirks we do not support */
> - version = mfspr(SPR_PVR);
> - if (is_power9n(version) && PVR_VERS_MAJ(version) == 1)
> - return;
> -
> opal_register(OPAL_SIGNAL_SYSTEM_RESET, opal_signal_system_reset, 1);
> }
> diff --git a/hdata/cpu-common.c b/hdata/cpu-common.c
> index eb86f95fb..a2ac062ca 100644
> --- a/hdata/cpu-common.c
> +++ b/hdata/cpu-common.c
> @@ -42,17 +42,6 @@ struct dt_node * add_core_common(struct dt_node *cpus,
> 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
> 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00,
> };
> - const uint8_t pa_features_p9n_dd1[] = {
> - 64, 0,
> - 0xf6, 0x3f, 0xc7, 0xc0, 0x80, 0xd0, 0x80, 0x00, /* 0 .. 7 */
> - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 8 .. 15 */
> - 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 16 .. 23 */
> - 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 24 .. 31 */
> - 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, 0x00, /* 32 .. 39 */
> - 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 40 .. 47 */
> - 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 .. 55 */
> - 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, 0x00, /* 56 .. 63 */
> - };
> const uint8_t pa_features_p9n_dd20[] = {
> 64, 0,
> 0xf6, 0x3f, 0xc7, 0xc0, 0x80, 0xd0, 0x80, 0x00, /* 0 .. 7 */
> @@ -111,11 +100,6 @@ struct dt_node * add_core_common(struct dt_node *cpus,
> case PVR_TYPE_P9:
> name = "PowerPC,POWER9";
> if (is_power9n(version) &&
> - (PVR_VERS_MAJ(version) == 1)) {
> - /* P9N DD1 */
> - pa_features = pa_features_p9n_dd1;
> - pa_features_size = sizeof(pa_features_p9n_dd1);
> - } else if (is_power9n(version) &&
> (PVR_VERS_MAJ(version) == 2) &&
> (PVR_VERS_MIN(version) == 0)) {
> /* P9N DD2.0 */
> diff --git a/hdata/iohub.c b/hdata/iohub.c
> index e16fb0b4e..ad1ddae40 100644
> --- a/hdata/iohub.c
> +++ b/hdata/iohub.c
> @@ -257,7 +257,6 @@ static struct dt_node *add_pec_stack(const struct cechub_io_hub *hub,
> {
> struct dt_node *stack;
> u64 eq[8];
> - uint32_t version;
> u8 *gen4;
> int i;
>
> @@ -283,16 +282,8 @@ static struct dt_node *add_pec_stack(const struct cechub_io_hub *hub,
> eq[i+4] = be64_to_cpu(hub->phb4_lane_eq[phb_index][i]);
>
> /* Lane-eq settings are packed 2 bytes per lane for 16 lanes
> - * On P9 DD1, 2 bytes per lane are used in the hardware
> * On P9 DD2, 1 byte per lane is used in the hardware
> */
> - version = mfspr(SPR_PVR);
> - if (is_power9n(version) &&
> - (PVR_VERS_MAJ(version) == 1)) {
> - dt_add_property_u64s(stack, "ibm,lane-eq", eq[0], eq[1],
> - eq[2], eq[3], eq[4], eq[5], eq[6], eq[7]);
> - return stack;
> - }
>
> /* Repack 2 byte lane settings into 1 byte */
> gen4 = (u8 *)&eq[4];
> diff --git a/hw/lpc.c b/hw/lpc.c
> index 20e54c99c..3f2300ce9 100644
> --- a/hw/lpc.c
> +++ b/hw/lpc.c
> @@ -1184,23 +1184,6 @@ void lpc_serirq(uint32_t chip_id, uint32_t index)
> unlock(&lpc->lock);
> }
>
> -void lpc_p9_sirq_eoi(uint32_t chip_id, uint32_t index)
> -{
> - struct proc_chip *chip = get_chip(chip_id);
> - struct lpcm *lpc;
> - uint32_t rmask;
> -
> - /* No initialized LPC controller on that chip */
> - if (!chip || !chip->lpc)
> - return;
> - lpc = chip->lpc;
> -
> - lock(&lpc->lock);
> - rmask = lpc->sirq_rmasks[index];
> - opb_write(lpc, lpc_reg_opb_base + LPC_HC_IRQSTAT, rmask, 4);
> - unlock(&lpc->lock);
> -}
> -
> void lpc_all_interrupts(uint32_t chip_id)
> {
> struct proc_chip *chip = get_chip(chip_id);
> diff --git a/hw/phb4.c b/hw/phb4.c
> index c0797647c..61e067df6 100644
> --- a/hw/phb4.c
> +++ b/hw/phb4.c
> @@ -145,8 +145,7 @@ static void phb4_init_hw(struct phb4 *p);
> #define PHBLOGCFG(p, fmt, a...) do {} while (0)
> #endif
>
> -#define PHB4_CAN_STORE_EOI(p) \
> - (XIVE_STORE_EOI_ENABLED && ((p)->rev >= PHB4_REV_NIMBUS_DD20))
> +#define PHB4_CAN_STORE_EOI(p) XIVE_STORE_EOI_ENABLED
>
> static bool verbose_eeh;
> static bool pci_tracing;
> @@ -419,8 +418,6 @@ static int64_t phb4_rc_write(struct phb4 *p, uint32_t offset, uint8_t sz,
> break;
> default:
> /* Workaround PHB config space enable */
> - if ((p->rev == PHB4_REV_NIMBUS_DD10) && (reg == PCI_CFG_CMD))
> - val |= PCI_CFG_CMD_MEM_EN | PCI_CFG_CMD_BUS_MASTER_EN;
> PHBLOGCFG(p, "000 CFG%02d Wr %02x=%08x\n", 8 * sz, reg, val);
> if (use_asb)
> phb4_write_reg_asb(p, PHB_RC_CONFIG_BASE + reg, val);
> @@ -882,29 +879,21 @@ static uint64_t phb4_default_mbt0(struct phb4 *p, unsigned int bar_idx)
> {
> uint64_t mbt0;
>
> - if (p->rev == PHB4_REV_NIMBUS_DD10) {
> + switch (p->mbt_size - bar_idx - 1) {
> + case 0:
> mbt0 = SETFIELD(IODA3_MBT0_MODE, 0ull, IODA3_MBT0_MODE_MDT);
> - if (bar_idx == 0)
> - mbt0 |= SETFIELD(IODA3_MBT0_MDT_COLUMN, 0ull, 0);
> - else
> - mbt0 |= SETFIELD(IODA3_MBT0_MDT_COLUMN, 0ull, 1);
> - } else {
> - switch (p->mbt_size - bar_idx - 1) {
> - case 0:
> - mbt0 = SETFIELD(IODA3_MBT0_MODE, 0ull, IODA3_MBT0_MODE_MDT);
> - mbt0 = SETFIELD(IODA3_MBT0_MDT_COLUMN, mbt0, 3);
> - break;
> - case 1:
> - mbt0 = SETFIELD(IODA3_MBT0_MODE, 0ull, IODA3_MBT0_MODE_MDT);
> - mbt0 = SETFIELD(IODA3_MBT0_MDT_COLUMN, mbt0, 2);
> - break;
> - case 2:
> - mbt0 = SETFIELD(IODA3_MBT0_MODE, 0ull, IODA3_MBT0_MODE_MDT);
> - mbt0 = SETFIELD(IODA3_MBT0_MDT_COLUMN, mbt0, 1);
> - break;
> - default:
> - mbt0 = SETFIELD(IODA3_MBT0_MODE, 0ull, IODA3_MBT0_MODE_PE_SEG);
> - }
> + mbt0 = SETFIELD(IODA3_MBT0_MDT_COLUMN, mbt0, 3);
> + break;
> + case 1:
> + mbt0 = SETFIELD(IODA3_MBT0_MODE, 0ull, IODA3_MBT0_MODE_MDT);
> + mbt0 = SETFIELD(IODA3_MBT0_MDT_COLUMN, mbt0, 2);
> + break;
> + case 2:
> + mbt0 = SETFIELD(IODA3_MBT0_MODE, 0ull, IODA3_MBT0_MODE_MDT);
> + mbt0 = SETFIELD(IODA3_MBT0_MDT_COLUMN, mbt0, 1);
> + break;
> + default:
> + mbt0 = SETFIELD(IODA3_MBT0_MODE, 0ull, IODA3_MBT0_MODE_PE_SEG);
> }
> return mbt0;
> }
> @@ -943,34 +932,17 @@ static void phb4_init_ioda_cache(struct phb4 *p)
> memset(p->mist_cache, 0x0, sizeof(p->mist_cache));
>
> /* Configure MBT entries 1...N */
> - if (p->rev == PHB4_REV_NIMBUS_DD10) {
> - /* Since we configure the DD1.0 PHB4 with half the PE's,
> - * we need to give the illusion that we support only
> - * 128/256 segments half the segments.
> - *
> - * To achieve that, we configure *all* the M64 windows to use
> - * column 1 of the MDT, which is itself set so that segment 0
> - * and 1 map to PE0, 2 and 3 to PE1 etc...
> - *
> - * Column 0, 2 and 3 are left all 0, column 0 will be used for
> - * M32 and configured by the OS.
> - */
> - for (i = 0; i < p->max_num_pes; i++)
> - p->mdt_cache[i] = SETFIELD(IODA3_MDT_PE_B, 0ull, i >> 1);
> -
> - } else {
> - /* On DD2.0 we don't have the above problem. We still use MDT
> - * column 1..3 for the last 3 BARs however, thus allowing Linux
> - * to remap those, and setup all the other ones for now in mode 00
> - * (segment# == PE#). By default those columns are set to map
> - * the same way.
> - */
> - for (i = 0; i < p->max_num_pes; i++) {
> - p->mdt_cache[i] = SETFIELD(IODA3_MDT_PE_B, 0ull, i);
> - p->mdt_cache[i] |= SETFIELD(IODA3_MDT_PE_C, 0ull, i);
> - p->mdt_cache[i] |= SETFIELD(IODA3_MDT_PE_D, 0ull, i);
> - }
>
> + /* Column 0 is left 0 and will be used fo M32 and configured
> + * by the OS. We use MDT column 1..3 for the last 3 BARs, thus
> + * allowing Linux to remap those, and setup all the other ones
> + * for now in mode 00 (segment# == PE#). By default those
> + * columns are set to map the same way.
> + */
> + for (i = 0; i < p->max_num_pes; i++) {
> + p->mdt_cache[i] = SETFIELD(IODA3_MDT_PE_B, 0ull, i);
> + p->mdt_cache[i] |= SETFIELD(IODA3_MDT_PE_C, 0ull, i);
> + p->mdt_cache[i] |= SETFIELD(IODA3_MDT_PE_D, 0ull, i);
> }
>
> /* Initialize MBT entries for BARs 1...N */
> @@ -1217,10 +1189,7 @@ static int64_t phb4_set_phb_mem_window(struct phb *phb,
> uint64_t mbt0, mbt1;
>
> /*
> - * We have a unified MBT for all BARs on PHB4. However we
> - * also have a current limitation that only half of the PEs
> - * are available (in order to have 2 TVT entries per PE)
> - * on DD1.0
> + * We have a unified MBT for all BARs on PHB4.
> *
> * So we use it as follow:
> *
> @@ -1231,16 +1200,8 @@ static int64_t phb4_set_phb_mem_window(struct phb *phb,
> * fully segmented or single PE (we don't yet expose the
> * new segmentation modes).
> *
> - * - [DD1.0] In order to deal with the above PE# limitations, since
> - * the OS assumes the segmentation is done with as many
> - * segments as PEs, we effectively fake it by mapping all
> - * MBT[1..n] to NDT column 1 which has been configured to
> - * give 2 adjacent segments the same PE# (see comment in
> - * ioda cache init). We don't expose the other columns to
> - * the OS.
> - *
> - * - [DD2.0] We configure the 3 last BARs to columnt 1..3
> - * initially set to segment# == PE#. We will need to provide some
> + * - We configure the 3 last BARs to columnt 1..3 initially
> + * set to segment# == PE#. We will need to provide some
> * extensions to the existing APIs to enable remapping of
> * segments on those BARs (and only those) as the current
> * API forces single segment mode.
> @@ -1392,7 +1353,7 @@ static int64_t phb4_map_pe_mmio_window(struct phb *phb,
> uint16_t segment_num)
> {
> struct phb4 *p = phb_to_phb4(phb);
> - uint64_t mbt0, mbt1, mdt0, mdt1;
> + uint64_t mbt0, mbt1, mdt0;
>
> if (pe_number >= p->num_pes)
> return OPAL_PARAMETER;
> @@ -1401,13 +1362,9 @@ static int64_t phb4_map_pe_mmio_window(struct phb *phb,
> * We support a combined MDT that has 4 columns. We let the OS
> * use kernel 0 for M32.
> *
> - * On DD1.0 we configure column1 ourselves to handle the "half PEs"
> - * problem and thus simulate having a smaller number of segments.
> - * columns 2 and 3 unused.
> - *
> - * On DD2.0 we configure the 3 last BARs to map column 3..1 which
> - * by default are set to map segment# == pe#, but can be remapped
> - * here if we extend this function.
> + * We configure the 3 last BARs to map column 3..1 which by default
> + * are set to map segment# == pe#, but can be remapped here if we
> + * extend this function.
> *
> * The problem is that the current API was "hijacked" so that an
> * attempt at remapping any segment of an M64 has the effect of
> @@ -1422,22 +1379,10 @@ static int64_t phb4_map_pe_mmio_window(struct phb *phb,
> if (window_num != 0 || segment_num >= p->num_pes)
> return OPAL_PARAMETER;
>
> - if (p->rev == PHB4_REV_NIMBUS_DD10) {
> - mdt0 = p->mdt_cache[segment_num << 1];
> - mdt1 = p->mdt_cache[(segment_num << 1) + 1];
> - mdt0 = SETFIELD(IODA3_MDT_PE_A, mdt0, pe_number);
> - mdt1 = SETFIELD(IODA3_MDT_PE_A, mdt1, pe_number);
> - p->mdt_cache[segment_num << 1] = mdt0;
> - p->mdt_cache[(segment_num << 1) + 1] = mdt1;
> - phb4_ioda_sel(p, IODA3_TBL_MDT, segment_num << 1, true);
> - out_be64(p->regs + PHB_IODA_DATA0, mdt0);
> - out_be64(p->regs + PHB_IODA_DATA0, mdt1);
> - } else {
> - mdt0 = p->mdt_cache[segment_num];
> - mdt0 = SETFIELD(IODA3_MDT_PE_A, mdt0, pe_number);
> - phb4_ioda_sel(p, IODA3_TBL_MDT, segment_num, false);
> - out_be64(p->regs + PHB_IODA_DATA0, mdt0);
> - }
> + mdt0 = p->mdt_cache[segment_num];
> + mdt0 = SETFIELD(IODA3_MDT_PE_A, mdt0, pe_number);
> + phb4_ioda_sel(p, IODA3_TBL_MDT, segment_num, false);
> + out_be64(p->regs + PHB_IODA_DATA0, mdt0);
> break;
> case OPAL_M64_WINDOW_TYPE:
> if (window_num == 0 || window_num >= p->mbt_size)
> @@ -3213,7 +3158,7 @@ static int64_t phb4_creset(struct pci_slot *slot)
> xscom_write(p->chip_id,
> p->pci_stk_xscom + XPEC_PCI_STK_ETU_RESET, 0x0);
>
> - /* DD1 errata: write to PEST to force update */
> + /* DD1 errata: write to PEST to force update XXX: needed? */
> phb4_ioda_sel(p, IODA3_TBL_PESTA, PHB4_RESERVED_PE_NUM(p),
> false);
> phb4_write_reg(p, PHB_IODA_DATA0, 0);
> @@ -3928,13 +3873,9 @@ static void phb4_init_capp_regs(struct phb4 *p, uint32_t capp_eng)
> reg |= PPC_BIT(0); /* enable cResp exam */
> reg |= PPC_BIT(3); /* disable vg not sys */
> reg |= PPC_BIT(12);/* HW417025: disable capp virtual machines */
> - if (p->rev == PHB4_REV_NIMBUS_DD10) {
> - reg |= PPC_BIT(1);
> - } else {
> - reg |= PPC_BIT(2); /* disable nn rn */
> - reg |= PPC_BIT(4); /* disable g */
> - reg |= PPC_BIT(5); /* disable ln */
> - }
> + reg |= PPC_BIT(2); /* disable nn rn */
> + reg |= PPC_BIT(4); /* disable g */
> + reg |= PPC_BIT(5); /* disable ln */
> xscom_write(p->chip_id, APC_MASTER_PB_CTRL + offset, reg);
>
> /* Set PHB mode, HPC Dir State and P9 mode */
> @@ -4018,11 +3959,9 @@ static void phb4_init_capp_regs(struct phb4 *p, uint32_t capp_eng)
> xscom_write(p->chip_id, FLUSH_SUE_STATE_MAP + offset,
> 0x08020A0000000000);
>
> - if (!(p->rev == PHB4_REV_NIMBUS_DD10)) {
> - /* Flush SUE uOP1 Register */
> - xscom_write(p->chip_id, FLUSH_SUE_UOP1 + offset,
> - 0xDCE0280428000000);
> - }
> + /* Flush SUE uOP1 Register */
> + xscom_write(p->chip_id, FLUSH_SUE_UOP1 + offset,
> + 0xDCE0280428000000);
>
> /* capp owns PHB read buffers */
> if (p->index == CAPP0_PHB_INDEX) {
> @@ -4278,20 +4217,15 @@ static int64_t enable_capi_mode(struct phb4 *p, uint64_t pe_number,
> ((u64)CAPIIND << 48) |
> ((u64)CAPIMASK << 32) | PHB_CAPI_CMPM_ENABLE);
>
> - if (!(p->rev == PHB4_REV_NIMBUS_DD10)) {
> - /* PB AIB Hardware Control Register
> - * Wait 32 PCI clocks for a credit to become available
> - * before rejecting.
> - */
> - xscom_read(p->chip_id,
> - p->pci_xscom + XPEC_PCI_PBAIB_HW_CONFIG, ®);
> - reg |= PPC_BITMASK(40, 42);
> - if (p->index == CAPP1_PHB_INDEX)
> - reg |= PPC_BIT(30);
> - xscom_write(p->chip_id,
> - p->pci_xscom + XPEC_PCI_PBAIB_HW_CONFIG,
> - reg);
> - }
> + /* PB AIB Hardware Control Register
> + * Wait 32 PCI clocks for a credit to become available
> + * before rejecting.
> + */
> + xscom_read(p->chip_id, p->pci_xscom + XPEC_PCI_PBAIB_HW_CONFIG, ®);
> + reg |= PPC_BITMASK(40, 42);
> + if (p->index == CAPP1_PHB_INDEX)
> + reg |= PPC_BIT(30);
> + xscom_write(p->chip_id, p->pci_xscom + XPEC_PCI_PBAIB_HW_CONFIG, reg);
>
> /* non-translate/50-bit mode */
> out_be64(p->regs + PHB_NXLATE_PREFIX, 0x0000000000000000Ull);
> @@ -4763,10 +4697,7 @@ static void phb4_init_errors(struct phb4 *p)
> out_be64(p->regs + 0x1908, 0x0000000000000000ull);
> out_be64(p->regs + 0x1920, 0x000000004d1780f8ull);
> out_be64(p->regs + 0x1928, 0x0000000000000000ull);
> - if (p->rev == PHB4_REV_NIMBUS_DD10)
> - out_be64(p->regs + 0x1930, 0xffffffffb2e87f07ull);
> - else
> - out_be64(p->regs + 0x1930, 0xffffffffb2f87f07ull);
> + out_be64(p->regs + 0x1930, 0xffffffffb2f87f07ull);
> out_be64(p->regs + 0x1940, 0x0000000000000000ull);
> out_be64(p->regs + 0x1948, 0x0000000000000000ull);
> out_be64(p->regs + 0x1950, 0x0000000000000000ull);
> @@ -4814,10 +4745,7 @@ static void phb4_init_errors(struct phb4 *p)
> out_be64(p->regs + 0x0d80, 0xffffffffffffffffull);
> out_be64(p->regs + 0x0d88, 0x0000000000000000ull);
> out_be64(p->regs + 0x0d98, 0xfffffffffbffffffull);
> - if (p->rev == PHB4_REV_NIMBUS_DD10)
> - out_be64(p->regs + 0x0da8, 0xc00000b801000060ull);
> - else
> - out_be64(p->regs + 0x0da8, 0xc00018b801000060ull);
> + out_be64(p->regs + 0x0da8, 0xc00018b801000060ull);
> /*
> * Errata ER20161123 says we should set the top two bits in
> * 0x0db0 but this causes config space accesses which don't
> @@ -4835,10 +4763,7 @@ static void phb4_init_errors(struct phb4 *p)
> out_be64(p->regs + 0x0e08, 0x0000000000000000ull);
> out_be64(p->regs + 0x0e18, 0xffffffffffffffffull);
> out_be64(p->regs + 0x0e28, 0x0000600000000000ull);
> - if (p->rev == PHB4_REV_NIMBUS_DD10) /* XXX CAPI has diff. value */
> - out_be64(p->regs + 0x0e30, 0xffff9effff7fff57ull);
> - else
> - out_be64(p->regs + 0x0e30, 0xfffffeffff7fff57ull);
> + out_be64(p->regs + 0x0e30, 0xfffffeffff7fff57ull);
> out_be64(p->regs + 0x0e40, 0x0000000000000000ull);
> out_be64(p->regs + 0x0e48, 0x0000000000000000ull);
> out_be64(p->regs + 0x0e50, 0x0000000000000000ull);
> @@ -4848,10 +4773,7 @@ static void phb4_init_errors(struct phb4 *p)
> out_be64(p->regs + 0x0e80, 0xffffffffffffffffull);
> out_be64(p->regs + 0x0e88, 0x0000000000000000ull);
> out_be64(p->regs + 0x0e98, 0xffffffffffffffffull);
> - if (p->rev == PHB4_REV_NIMBUS_DD10)
> - out_be64(p->regs + 0x0ea8, 0x6000000000000000ull);
> - else
> - out_be64(p->regs + 0x0ea8, 0x60000000c0000000ull);
> + out_be64(p->regs + 0x0ea8, 0x60000000c0000000ull);
> out_be64(p->regs + 0x0eb0, 0x9faeffaf3fffffffull); /* XXX CAPI has diff. value */
> out_be64(p->regs + 0x0ec0, 0x0000000000000000ull);
> out_be64(p->regs + 0x0ec8, 0x0000000000000000ull);
> @@ -4956,12 +4878,6 @@ static void phb4_init_hw(struct phb4 *p)
> out_be64(p->regs + PHB_PCIE_LANE_EQ_CNTL3, be64_to_cpu(p->lane_eq[3]));
> out_be64(p->regs + PHB_PCIE_LANE_EQ_CNTL20, be64_to_cpu(p->lane_eq[4]));
> out_be64(p->regs + PHB_PCIE_LANE_EQ_CNTL21, be64_to_cpu(p->lane_eq[5]));
> - if (p->rev == PHB4_REV_NIMBUS_DD10) {
> - out_be64(p->regs + PHB_PCIE_LANE_EQ_CNTL22,
> - be64_to_cpu(p->lane_eq[6]));
> - out_be64(p->regs + PHB_PCIE_LANE_EQ_CNTL23,
> - be64_to_cpu(p->lane_eq[7]));
> - }
> }
> if (!p->lane_eq_en) {
> /* Read modify write and set to 2 bits */
> @@ -4990,13 +4906,9 @@ static void phb4_init_hw(struct phb4 *p)
>
> /* Init_17 - PHB Control */
> val = PHB_CTRLR_IRQ_PGSZ_64K;
> - if (p->rev == PHB4_REV_NIMBUS_DD10) {
> - val |= SETFIELD(PHB_CTRLR_TVT_ADDR_SEL, 0ull, TVT_DD1_2_PER_PE);
> - } else {
> - val |= SETFIELD(PHB_CTRLR_TVT_ADDR_SEL, 0ull, TVT_2_PER_PE);
> - if (PHB4_CAN_STORE_EOI(p))
> - val |= PHB_CTRLR_IRQ_STORE_EOI;
> - }
> + val |= SETFIELD(PHB_CTRLR_TVT_ADDR_SEL, 0ull, TVT_2_PER_PE);
> + if (PHB4_CAN_STORE_EOI(p))
> + val |= PHB_CTRLR_IRQ_STORE_EOI;
>
> if (!pci_eeh_mmio)
> val |= PHB_CTRLR_MMIO_EEH_DISABLE;
> @@ -5055,10 +4967,7 @@ static void phb4_init_hw(struct phb4 *p)
> out_be64(p->regs + PHB_TXE_ERR_IRQ_ENABLE, 0x2008400e08200000ull);
> out_be64(p->regs + PHB_RXE_ARB_ERR_IRQ_ENABLE, 0xc40038fc01804070ull);
> out_be64(p->regs + PHB_RXE_MRG_ERR_IRQ_ENABLE, 0x00006100008000a8ull);
> - if (p->rev == PHB4_REV_NIMBUS_DD10)
> - out_be64(p->regs + PHB_RXE_TCE_ERR_IRQ_ENABLE, 0x6051005000000000ull);
> - else
> - out_be64(p->regs + PHB_RXE_TCE_ERR_IRQ_ENABLE, 0x60510050c0000000ull);
> + out_be64(p->regs + PHB_RXE_TCE_ERR_IRQ_ENABLE, 0x60510050c0000000ull);
>
> /* Init_131 - Re-enable LEM error mask */
> out_be64(p->regs + PHB_LEM_ERROR_MASK, 0x0000000000000000ull);
> @@ -5115,15 +5024,12 @@ static bool phb4_read_capabilities(struct phb4 *p)
> if (p->max_num_pes >= 512) {
> p->mrt_size = 16;
> p->mbt_size = 32;
> - p->tvt_size = 512;
> + p->tvt_size = 1024;
> } else {
> p->mrt_size = 8;
> p->mbt_size = 16;
> - p->tvt_size = 256;
> + p->tvt_size = 512;
> }
> - /* DD2.0 has twice has many TVEs */
> - if (p->rev >= PHB4_REV_NIMBUS_DD20)
> - p->tvt_size *= 2;
>
> val = in_be64(p->regs + PHB_PHB4_IRQ_CAP);
> if (val == 0xffffffffffffffff) {
> @@ -5366,44 +5272,6 @@ static uint64_t phb4_lsi_attributes(struct irq_source *is __unused,
> return IRQ_ATTR_TARGET_LINUX;
> }
>
> -static int64_t phb4_ndd1_lsi_set_xive(struct irq_source *is, uint32_t isn,
> - uint16_t server __unused, uint8_t priority)
> -{
> - struct phb4 *p = is->data;
> - uint32_t idx = isn - p->base_lsi;
> -
> - if (idx > 8)
> - return OPAL_PARAMETER;
> -
> - phb_lock(&p->phb);
> -
> - phb4_ioda_sel(p, IODA3_TBL_LIST, idx, false);
> -
> - /* Mask using P=0,Q=1, unmask using P=1,Q=0 followed by EOI */
> - /* XXX FIXME: A quick mask/umask can make us shoot an interrupt
> - * more than once to a queue. We need to keep track better.
> - *
> - * Thankfully, this is only on Nimubs DD1 and for LSIs, so
> - * will go away soon enough.
> - */
> - if (priority == 0xff)
> - out_be64(p->regs + PHB_IODA_DATA0, IODA3_LIST_Q);
> - else {
> - out_be64(p->regs + PHB_IODA_DATA0, IODA3_LIST_P);
> - __irq_source_eoi(is, isn);
> - }
> -
> - phb_unlock(&p->phb);
> -
> - return 0;
> -}
> -
> -static const struct irq_source_ops phb4_ndd1_lsi_ops = {
> - .set_xive = phb4_ndd1_lsi_set_xive,
> - .interrupt = phb4_err_interrupt,
> - .attributes = phb4_lsi_attributes,
> -};
> -
> static const struct irq_source_ops phb4_lsi_ops = {
> .interrupt = phb4_err_interrupt,
> .attributes = phb4_lsi_attributes,
> @@ -5524,10 +5392,8 @@ static void phb4_create(struct dt_node *np)
> if (!phb4_read_capabilities(p))
> goto failed;
>
> - /* Priority order: NVRAM -> dt -> GEN2 dd1 -> GEN3 dd2.00 -> GEN4 */
> + /* Priority order: NVRAM -> dt -> GEN3 dd2.00 -> GEN4 */
> p->max_link_speed = 4;
> - if (p->rev == PHB4_REV_NIMBUS_DD10)
> - p->max_link_speed = 2;
> if (p->rev == PHB4_REV_NIMBUS_DD20 &&
> ((0xf & chip->ec_level) == 0) && chip->ec_rev == 0)
> p->max_link_speed = 3;
> @@ -5542,10 +5408,7 @@ static void phb4_create(struct dt_node *np)
> /* Check for lane equalization values from HB or HDAT */
> p->lane_eq_en = true;
> p->lane_eq = dt_prop_get_def_size(np, "ibm,lane-eq", NULL, &lane_eq_len);
> - if (p->rev == PHB4_REV_NIMBUS_DD10)
> - lane_eq_len_req = 8 * 8;
> - else
> - lane_eq_len_req = 6 * 8;
> + lane_eq_len_req = 6 * 8;
> if (p->lane_eq) {
> if (lane_eq_len < lane_eq_len_req) {
> PHBERR(p, "Device-tree has ibm,lane-eq too short: %ld"
> @@ -5578,11 +5441,7 @@ static void phb4_create(struct dt_node *np)
> p->base_lsi = irq_base + p->num_irqs - 8;
> p->irq_port = xive_get_notify_port(p->chip_id,
> XIVE_HW_SRC_PHBn(p->index));
> -
> - if (p->rev == PHB4_REV_NIMBUS_DD10)
> - p->num_pes = p->max_num_pes/2;
> - else
> - p->num_pes = p->max_num_pes;
> + p->num_pes = p->max_num_pes;
>
> /* Allocate the SkiBoot internal in-memory tables for the PHB */
> phb4_allocate_tables(p);
> @@ -5600,8 +5459,6 @@ static void phb4_create(struct dt_node *np)
>
> /* Compute XIVE source flags depending on PHB revision */
> irq_flags = 0;
> - if (p->rev == PHB4_REV_NIMBUS_DD10)
> - irq_flags |= XIVE_SRC_SHIFT_BUG;
> if (PHB4_CAN_STORE_EOI(p))
> irq_flags |= XIVE_SRC_STORE_EOI;
> else
> @@ -5615,8 +5472,7 @@ static void phb4_create(struct dt_node *np)
> p->int_mmio + ((p->num_irqs - 8) << 16),
> XIVE_SRC_LSI | XIVE_SRC_SHIFT_BUG,
> p,
> - (p->rev == PHB4_REV_NIMBUS_DD10) ?
> - &phb4_ndd1_lsi_ops : &phb4_lsi_ops);
> + &phb4_lsi_ops);
>
> /* Platform additional setup */
> if (platform.pci_setup_phb)
> diff --git a/hw/psi.c b/hw/psi.c
> index cbdbeaa9a..2f7ab5f98 100644
> --- a/hw/psi.c
> +++ b/hw/psi.c
> @@ -665,24 +665,6 @@ static char *psi_p9_irq_name(struct irq_source *is, uint32_t isn)
> return strdup(names[idx]);
> }
>
> -static void psi_p9_irq_ndd1_eoi(struct irq_source *is, uint32_t isn)
> -{
> - struct psi *psi = is->data;
> - unsigned int idx = isn & 0xf;
> -
> - if (idx >= P9_PSI_IRQ_LPC_SIRQ0 &&
> - idx <= P9_PSI_IRQ_LPC_SIRQ3)
> - lpc_p9_sirq_eoi(psi->chip_id, idx - P9_PSI_IRQ_LPC_SIRQ0);
> - __xive_source_eoi(is, isn);
> -}
> -
> -static const struct irq_source_ops psi_p9_ndd1_irq_ops = {
> - .interrupt = psihb_p9_interrupt,
> - .attributes = psi_p9_irq_attributes,
> - .name = psi_p9_irq_name,
> - .eoi = psi_p9_irq_ndd1_eoi,
> -};
> -
> static const struct irq_source_ops psi_p9_irq_ops = {
> .interrupt = psihb_p9_interrupt,
> .attributes = psi_p9_irq_attributes,
> @@ -824,7 +806,6 @@ static void psi_init_p8_interrupts(struct psi *psi)
> static void psi_init_p9_interrupts(struct psi *psi)
> {
> struct proc_chip *chip;
> - bool is_p9ndd1;
> u64 val;
>
> /* Grab chip */
> @@ -853,24 +834,12 @@ static void psi_init_p9_interrupts(struct psi *psi)
> out_be64(psi->regs + PSIHB_IVT_OFFSET, val);
>
> /* Register sources */
> - is_p9ndd1 = (chip->ec_level < 0x20 &&
> - chip->type == PROC_CHIP_P9_NIMBUS);
> -
> - if (is_p9ndd1) {
> - prlog(PR_DEBUG,
> - "PSI[0x%03x]: Interrupts sources registered for P9N DD1.x\n",
> - psi->chip_id);
> - xive_register_hw_source(psi->interrupt, P9_PSI_NUM_IRQS,
> - 12, psi->esb_mmio, XIVE_SRC_LSI,
> - psi, &psi_p9_ndd1_irq_ops);
> - } else {
> - prlog(PR_DEBUG,
> - "PSI[0x%03x]: Interrupts sources registered for P9 DD2.x\n",
> - psi->chip_id);
> - xive_register_hw_source(psi->interrupt, P9_PSI_NUM_IRQS,
> - 12, psi->esb_mmio, XIVE_SRC_LSI,
> - psi, &psi_p9_irq_ops);
> - }
> + prlog(PR_DEBUG,
> + "PSI[0x%03x]: Interrupts sources registered for P9 DD2.x\n",
> + psi->chip_id);
> + xive_register_hw_source(psi->interrupt, P9_PSI_NUM_IRQS,
> + 12, psi->esb_mmio, XIVE_SRC_LSI,
> + psi, &psi_p9_irq_ops);
>
> /* Reset irq handling and switch to ESB mode */
> out_be64(psi->regs + PSIHB_INTERRUPT_CONTROL, PSIHB_IRQ_RESET);
> diff --git a/hw/slw.c b/hw/slw.c
> index dfa9189bf..acd47baf3 100644
> --- a/hw/slw.c
> +++ b/hw/slw.c
> @@ -746,53 +746,6 @@ static struct cpu_idle_states power9_mambo_cpu_idle_states[] = {
>
> };
>
> -/* Idle states supported for P9 DD1 */
> -static struct cpu_idle_states power9_ndd1_cpu_idle_states[] = {
> - {
> - .name = "stop0_lite",
> - .latency_ns = 1000,
> - .residency_ns = 10000,
> - .flags = 0*OPAL_PM_DEC_STOP \
> - | 0*OPAL_PM_TIMEBASE_STOP \
> - | 0*OPAL_PM_LOSE_USER_CONTEXT \
> - | 0*OPAL_PM_LOSE_HYP_CONTEXT \
> - | 0*OPAL_PM_LOSE_FULL_CONTEXT \
> - | 1*OPAL_PM_STOP_INST_FAST,
> - .pm_ctrl_reg_val = OPAL_PM_PSSCR_RL(0) \
> - | OPAL_PM_PSSCR_MTL(3) \
> - | OPAL_PM_PSSCR_TR(3),
> - .pm_ctrl_reg_mask = OPAL_PM_PSSCR_MASK },
> - {
> - .name = "stop1_lite",
> - .latency_ns = 4900,
> - .residency_ns = 49000,
> - .flags = 0*OPAL_PM_DEC_STOP \
> - | 0*OPAL_PM_TIMEBASE_STOP \
> - | 0*OPAL_PM_LOSE_USER_CONTEXT \
> - | 0*OPAL_PM_LOSE_HYP_CONTEXT \
> - | 0*OPAL_PM_LOSE_FULL_CONTEXT \
> - | 1*OPAL_PM_STOP_INST_FAST,
> - .pm_ctrl_reg_val = OPAL_PM_PSSCR_RL(1) \
> - | OPAL_PM_PSSCR_MTL(3) \
> - | OPAL_PM_PSSCR_TR(3),
> - .pm_ctrl_reg_mask = OPAL_PM_PSSCR_MASK },
> - {
> - .name = "stop1",
> - .latency_ns = 2050000,
> - .residency_ns = 50000,
> - .flags = 0*OPAL_PM_DEC_STOP \
> - | 0*OPAL_PM_TIMEBASE_STOP \
> - | 1*OPAL_PM_LOSE_USER_CONTEXT \
> - | 0*OPAL_PM_LOSE_HYP_CONTEXT \
> - | 0*OPAL_PM_LOSE_FULL_CONTEXT \
> - | 1*OPAL_PM_STOP_INST_FAST,
> - .pm_ctrl_reg_val = OPAL_PM_PSSCR_RL(1) \
> - | OPAL_PM_PSSCR_MTL(3) \
> - | OPAL_PM_PSSCR_TR(3) \
> - | OPAL_PM_PSSCR_ESL \
> - | OPAL_PM_PSSCR_EC,
> - .pm_ctrl_reg_mask = OPAL_PM_PSSCR_MASK }
> -};
> static void slw_late_init_p9(struct proc_chip *chip)
> {
> struct cpu_thread *c;
> @@ -876,10 +829,6 @@ void add_cpu_idle_state_properties(void)
> if (proc_chip_quirks & QUIRK_MAMBO_CALLOUTS) {
> states = power9_mambo_cpu_idle_states;
> nr_states = ARRAY_SIZE(power9_mambo_cpu_idle_states);
> - } else if ((chip->ec_level == 0x10) &&
> - (chip->type == PROC_CHIP_P9_NIMBUS)) {
> - states = power9_ndd1_cpu_idle_states;
> - nr_states = ARRAY_SIZE(power9_ndd1_cpu_idle_states);
> } else {
> states = power9_cpu_idle_states;
> nr_states = ARRAY_SIZE(power9_cpu_idle_states);
> diff --git a/hw/xive.c b/hw/xive.c
> index 515f154d7..810538725 100644
> --- a/hw/xive.c
> +++ b/hw/xive.c
> @@ -487,8 +487,7 @@ struct xive {
> void *q_ovf;
> };
>
> -#define XIVE_CAN_STORE_EOI(x) \
> - (XIVE_STORE_EOI_ENABLED && ((x)->rev >= XIVE_REV_2))
> +#define XIVE_CAN_STORE_EOI(x) XIVE_STORE_EOI_ENABLED
OK. Let's keep the macro. P10 should have Store EOI.
> /* Global DT node */
> static struct dt_node *xive_dt_node;
> @@ -1521,7 +1520,7 @@ static bool xive_set_vsd(struct xive *x, uint32_t tbl, uint32_t idx, uint64_t v)
> SETFIELD(VST_TABLE_OFFSET, 0ull, idx));
> if (x->last_reg_error)
> return false;
> - /* Hack to workaround DD1 issue with NVT in VC in DD1 */
> + /* Hack to workaround DD1 issue with NVT in VC in DD1 XXX still needed? */
> if (tbl == VST_TSEL_VPDT)
> xive_regw(x, VC_VSD_TABLE_DATA, v | VSD_TSIZE);
Yes. I will check. It has been a pain to model in QEMU.
> else
> @@ -1743,10 +1742,6 @@ static bool xive_config_init(struct xive *x)
There is a check on XIVE_REV_2 that we could clean up :
if (x->rev >= XIVE_REV_2) {
val = SETFIELD(PC_TCTXT_INIT_AGE, val, 0x2);
val |= PC_TCTXT_CFG_LGS_EN;
/* Disable pressure relief as we hijack the field in the VPs */
val &= ~PC_TCTXT_CFG_STORE_ACK;
}
I think.
> xive_regw(x, PC_TCTXT_CFG, val);
> xive_dbg(x, "PC_TCTXT_CFG=%016llx\n", val);
>
> - /* Subsequent inits are DD2 only */
> - if (x->rev < XIVE_REV_2)
> - return true;
> -
> val = xive_regr(x, CQ_CFG_PB_GEN);
> /* 1-block-per-chip mode */
> val = SETFIELD(CQ_INT_ADDR_OPT, val, 2);
> @@ -2008,8 +2003,7 @@ static void xive_create_mmio_dt_node(struct xive *x)
> 12, 16, 21, 24);
>
> dt_add_property_cells(xive_dt_node, "ibm,xive-#priorities", 8);
> - if (x->rev >= XIVE_REV_2)
> - dt_add_property(xive_dt_node, "single-escalation-support", NULL, 0);
> + dt_add_property(xive_dt_node, "single-escalation-support", NULL, 0);
>
> xive_add_provisioning_properties();
> }
> @@ -2840,10 +2834,8 @@ static struct xive *init_one_xive(struct dt_node *np)
>
> x->rev = XIVE_REV_UNKNOWN;
> if (chip->type == PROC_CHIP_P9_NIMBUS) {
> - if ((chip->ec_level & 0xf0) == 0x10)
> - x->rev = XIVE_REV_1;
> - else if ((chip->ec_level & 0xf0) == 0x20)
> - x->rev = XIVE_REV_2;
> + assert((chip->ec_level & 0xf0) != 0x10);
> + x->rev = XIVE_REV_2;
> } else if (chip->type == PROC_CHIP_P9_CUMULUS)
> x->rev = XIVE_REV_2;
So all P9s are XIVE_REV_2 now ?
C.
> @@ -4399,16 +4391,9 @@ static int64_t opal_xive_set_vp_info(uint64_t vp_id,
> vp_new.w6 = report_cl_pair >> 32;
> vp_new.w7 = report_cl_pair & 0xffffffff;
>
> - if (flags & OPAL_XIVE_VP_SINGLE_ESCALATION) {
> - if (x->rev < XIVE_REV_2) {
> - xive_dbg(x, "Attempt at enabling single escalate"
> - " on xive rev %d failed\n",
> - x->rev);
> - unlock(&x->lock);
> - return OPAL_PARAMETER;
> - }
> + if (flags & OPAL_XIVE_VP_SINGLE_ESCALATION)
> rc = xive_setup_silent_gather(vp_id, true);
> - } else
> + else
> rc = xive_setup_silent_gather(vp_id, false);
> } else {
> vp_new.w0 = vp_new.w6 = vp_new.w7 = 0;
> diff --git a/include/lpc.h b/include/lpc.h
> index 19bf47910..83b6c9dbe 100644
> --- a/include/lpc.h
> +++ b/include/lpc.h
> @@ -100,9 +100,6 @@ extern void lpc_register_client(uint32_t chip_id, const struct lpc_client *clt,
> /* Return the policy for a given serirq */
> extern unsigned int lpc_get_irq_policy(uint32_t chip_id, uint32_t psi_idx);
>
> -/* Clear SerIRQ latch on P9 DD1 */
> -extern void lpc_p9_sirq_eoi(uint32_t chip_id, uint32_t index);
> -
> /* Default bus accessors that perform error logging */
> extern int64_t lpc_write(enum OpalLPCAddressType addr_type, uint32_t addr,
> uint32_t data, uint32_t sz);
> diff --git a/include/phb4-regs.h b/include/phb4-regs.h
> index 235c213f9..8dd8cdc55 100644
> --- a/include/phb4-regs.h
> +++ b/include/phb4-regs.h
> @@ -119,11 +119,6 @@
> #define PHB_CTRLR_CFG_EEH_BLOCK PPC_BIT(15)
> #define PHB_CTRLR_FENCE_LNKILL_DIS PPC_BIT(16)
> #define PHB_CTRLR_TVT_ADDR_SEL PPC_BITMASK(17,19)
> -#define TVT_DD1_1_PER_PE 0
> -#define TVT_DD1_2_PER_PE 1
> -#define TVT_DD1_4_PER_PE 2
> -#define TVT_DD1_8_PER_PE 3
> -#define TVT_DD1_16_PER_PE 4
> #define TVT_2_PER_PE 0
> #define TVT_4_PER_PE 1
> #define TVT_8_PER_PE 2
> @@ -308,8 +303,6 @@
> #define PHB_PCIE_LANE_EQ_CNTL3 0x1AE8
> #define PHB_PCIE_LANE_EQ_CNTL20 0x1AF0
> #define PHB_PCIE_LANE_EQ_CNTL21 0x1AF8
> -#define PHB_PCIE_LANE_EQ_CNTL22 0x1B00 /* DD1 only */
> -#define PHB_PCIE_LANE_EQ_CNTL23 0x1B08 /* DD1 only */
> #define PHB_PCIE_TRACE_CTRL 0x1B20
> #define PHB_PCIE_MISC_STRAP 0x1B30
>
>
More information about the Skiboot
mailing list