[Skiboot] [PATCH 6/8] Remove support for POWER8 DD1
Nicholas Piggin
npiggin at gmail.com
Sun Oct 3 12:22:08 AEDT 2021
This significantly simplifies the SLW code.
HILE is now always supported.
Reviewed-by: Stewart Smith <stewart at flamingspork.com>
Signed-off-by: Nicholas Piggin <npiggin at gmail.com>
---
core/cpu.c | 23 ++--
hw/phb3.c | 68 ++--------
hw/slw.c | 323 ----------------------------------------------
include/phb3.h | 2 +-
include/skiboot.h | 5 -
5 files changed, 22 insertions(+), 399 deletions(-)
diff --git a/core/cpu.c b/core/cpu.c
index 0f2da1524..d11d7f9bc 100644
--- a/core/cpu.c
+++ b/core/cpu.c
@@ -35,7 +35,6 @@ unsigned int cpu_thread_count;
unsigned int cpu_max_pir;
struct cpu_thread *boot_cpu;
static struct lock reinit_lock = LOCK_UNLOCKED;
-static bool hile_supported;
static bool radix_supported;
static unsigned long hid0_hile;
static unsigned long hid0_attn;
@@ -1004,27 +1003,23 @@ void init_boot_cpu(void)
case PVR_TYPE_P8E:
case PVR_TYPE_P8:
proc_gen = proc_gen_p8;
- hile_supported = PVR_VERS_MAJ(mfspr(SPR_PVR)) >= 2;
hid0_hile = SPR_HID0_POWER8_HILE;
hid0_attn = SPR_HID0_POWER8_ENABLE_ATTN;
break;
case PVR_TYPE_P8NVL:
proc_gen = proc_gen_p8;
- hile_supported = true;
hid0_hile = SPR_HID0_POWER8_HILE;
hid0_attn = SPR_HID0_POWER8_ENABLE_ATTN;
break;
case PVR_TYPE_P9:
case PVR_TYPE_P9P:
proc_gen = proc_gen_p9;
- hile_supported = true;
radix_supported = true;
hid0_hile = SPR_HID0_POWER9_HILE;
hid0_attn = SPR_HID0_POWER9_ENABLE_ATTN;
break;
case PVR_TYPE_P10:
proc_gen = proc_gen_p10;
- hile_supported = true;
radix_supported = true;
hid0_hile = SPR_HID0_POWER10_HILE;
hid0_attn = SPR_HID0_POWER10_ENABLE_ATTN;
@@ -1061,6 +1056,11 @@ void init_boot_cpu(void)
cpu_thread_count = 1;
}
+ if (proc_gen == proc_gen_p8 && (PVR_VERS_MAJ(mfspr(SPR_PVR)) == 1)) {
+ prerror("CPU: POWER8 DD1 is not supported\n");
+ abort();
+ }
+
if (is_power9n(pvr) && (PVR_VERS_MAJ(pvr) == 1)) {
prerror("CPU: POWER9N DD1 is not supported\n");
abort();
@@ -1602,7 +1602,7 @@ static int64_t opal_reinit_cpus(uint64_t flags)
}
/*
* Now we need to mark ourselves "active" or we'll be skipped
- * by the various "for_each_active_..." calls done by slw_reinit()
+ * by the various "for_each_active_..."
*/
this_cpu()->state = cpu_state_active;
this_cpu()->in_reinit = true;
@@ -1616,10 +1616,8 @@ static int64_t opal_reinit_cpus(uint64_t flags)
*/
cpu_cleanup_all();
- /* If HILE change via HID0 is supported ... */
- if (hile_supported &&
- (flags & (OPAL_REINIT_CPUS_HILE_BE |
- OPAL_REINIT_CPUS_HILE_LE))) {
+ if (flags & (OPAL_REINIT_CPUS_HILE_BE |
+ OPAL_REINIT_CPUS_HILE_LE)) {
bool hile = !!(flags & OPAL_REINIT_CPUS_HILE_LE);
flags &= ~(OPAL_REINIT_CPUS_HILE_BE | OPAL_REINIT_CPUS_HILE_LE);
@@ -1674,10 +1672,7 @@ static int64_t opal_reinit_cpus(uint64_t flags)
rc = OPAL_SUCCESS;
}
- /* Handle P8 DD1 SLW reinit */
- if (flags != 0 && proc_gen == proc_gen_p8 && !hile_supported)
- rc = slw_reinit(flags);
- else if (flags != 0)
+ if (flags != 0)
rc = OPAL_UNSUPPORTED;
/* And undo the above */
diff --git a/hw/phb3.c b/hw/phb3.c
index 8af6b6164..aeeb6b655 100644
--- a/hw/phb3.c
+++ b/hw/phb3.c
@@ -444,7 +444,6 @@ static void phb3_switch_port_init(struct phb *phb,
struct pci_device *dev,
int ecap, int aercap)
{
- struct phb3 *p = phb_to_phb3(phb);
uint16_t bdfn = dev->bdfn;
uint16_t val16;
uint32_t val32;
@@ -498,17 +497,8 @@ static void phb3_switch_port_init(struct phb *phb,
PCIECAP_AER_UE_SEVERITY_INTERNAL);
pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_UE_SEVERITY, val32);
- /*
- * Mask various correctable errors
- *
- * On Murano and Venice DD1.0 we disable emission of corrected
- * error messages to the PHB completely to workaround errata
- * HW257476 causing the loss of tags.
- */
- if (p->rev < PHB3_REV_MURANO_DD20)
- val32 = 0xffffffff;
- else
- val32 = PCIECAP_AER_CE_MASK_ADV_NONFATAL;
+ /* Mask various correctable errors */
+ val32 = PCIECAP_AER_CE_MASK_ADV_NONFATAL;
pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_CE_MASK, val32);
/* Enable ECRC generation and disable ECRC check */
@@ -522,7 +512,6 @@ static void phb3_endpoint_init(struct phb *phb,
struct pci_device *dev,
int ecap, int aercap)
{
- struct phb3 *p = phb_to_phb3(phb);
uint16_t bdfn = dev->bdfn;
uint16_t val16;
uint32_t val32;
@@ -544,15 +533,6 @@ static void phb3_endpoint_init(struct phb *phb,
val16 &= ~PCICAP_EXP_DEVCTL_CE_REPORT;
pci_cfg_write16(phb, bdfn, ecap + PCICAP_EXP_DEVCTL, val16);
- /*
- * On Murano and Venice DD1.0 we disable emission of corrected
- * error messages to the PHB completely to workaround errata
- * HW257476 causing the loss of tags.
- */
- if (p->rev < PHB3_REV_MURANO_DD20)
- pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_CE_MASK,
- 0xffffffff);
-
/* Enable ECRC generation and check */
pci_cfg_read32(phb, bdfn, aercap + PCIECAP_AER_CAPCTL, &val32);
val32 |= (PCIECAP_AER_CAPCTL_ECRCG_EN |
@@ -855,11 +835,9 @@ static int64_t phb3_ioda_reset(struct phb *phb, bool purge)
out_be64(p->regs + PHB_TCE_KILL, PHB_TCE_KILL_ALL);
/* Clear RBA */
- if (p->rev >= PHB3_REV_MURANO_DD20) {
- phb3_ioda_sel(p, IODA2_TBL_RBA, 0, true);
- for (i = 0; i < 32; i++)
- out_be64(p->regs + PHB_IODA_DATA0, 0x0ul);
- }
+ phb3_ioda_sel(p, IODA2_TBL_RBA, 0, true);
+ for (i = 0; i < 32; i++)
+ out_be64(p->regs + PHB_IODA_DATA0, 0x0ul);
/* Clear PEST & PEEV */
for (i = 0; i < PHB3_MAX_PE_NUM; i++) {
@@ -3926,11 +3904,7 @@ static void phb3_init_ioda2(struct phb3 *p)
/* DD2.0 or the subsequent chips don't have memory
* resident RBA.
*/
- if (p->rev >= PHB3_REV_MURANO_DD20)
- out_be64(p->regs + PHB_RBA_BAR, 0x0ul);
- else
- out_be64(p->regs + PHB_RBA_BAR,
- p->tbl_rba | PHB_RBA_BAR_ENABLE);
+ out_be64(p->regs + PHB_RBA_BAR, 0x0ul);
/* Init_18..21 - Setup M32 */
out_be64(p->regs + PHB_M32_BASE_ADDR, p->mm1_base);
@@ -3952,7 +3926,7 @@ static void phb3_init_ioda2(struct phb3 *p)
else if (p->rev >= PHB3_REV_MURANO_DD20)
out_be64(p->regs + PHB_INTREP_TIMER, 0x0004000000000000UL);
else
- out_be64(p->regs + PHB_INTREP_TIMER, 0);
+ assert(0); // DD1 not supported
/* Init_25 - PHB3 Configuration Register. Clear TCE cache then
* configure the PHB
@@ -4225,16 +4199,7 @@ static void phb3_init_errors(struct phb3 *p)
out_be64(p->regs + PHB_INB_ERR1_STATUS, 0x0000000000000000UL);
out_be64(p->regs + PHB_INB_ERR_LEM_ENABLE, 0xffffffffffffffffUL);
- /*
- * Workaround for errata HW257476, turn correctable messages into
- * ER freezes on Murano and Venice DD1.0
- */
- if (p->rev < PHB3_REV_MURANO_DD20)
- out_be64(p->regs + PHB_INB_ERR_FREEZE_ENABLE,
- 0x0000600000000070UL);
- else
- out_be64(p->regs + PHB_INB_ERR_FREEZE_ENABLE,
- 0x0000600000000060UL);
+ out_be64(p->regs + PHB_INB_ERR_FREEZE_ENABLE, 0x0000600000000060UL);
out_be64(p->regs + PHB_INB_ERR_AIB_FENCE_ENABLE, 0xfcff80fbff7ff08cUL);
out_be64(p->regs + PHB_INB_ERR_LOG_0, 0x0000000000000000UL);
@@ -4381,12 +4346,10 @@ static void phb3_init_hw(struct phb3 *p, bool first_init)
* Enable IVC for Murano DD2.0 or later one
*/
#ifdef IVT_TABLE_IVE_16B
- val = 0xf3a80e4b00000000UL;
+ val = 0xf3a80e5b00000000UL;
#else
- val = 0xf3a80ecb00000000UL;
+ val = 0xf3a80edb00000000UL;
#endif
- if (p->rev >= PHB3_REV_MURANO_DD20)
- val |= 0x0000010000000000UL;
if (first_init && p->rev >= PHB3_REV_NAPLES_DD10) {
/* Enable 32-bit bypass support on Naples and tell the OS
* about it
@@ -4451,10 +4414,7 @@ static void phb3_init_hw(struct phb3 *p, bool first_init)
* Murano DD2.0 and later but lacks sufficient testing. We will re-enable
* it once that has been done.
*/
- if (p->rev >= PHB3_REV_MURANO_DD20)
- out_be64(p->regs + PHB_TCE_SPEC_CTL, 0xf000000000000000UL);
- else
- out_be64(p->regs + PHB_TCE_SPEC_CTL, 0x0ul);
+ out_be64(p->regs + PHB_TCE_SPEC_CTL, 0xf000000000000000UL);
/* Errata#20131017: avoid TCE queue overflow */
if (p->rev == PHB3_REV_MURANO_DD20)
@@ -4508,10 +4468,6 @@ static void phb3_allocate_tables(struct phb3 *p)
p->tbl_ivt = (uint64_t)local_alloc(p->chip_id, IVT_TABLE_SIZE, IVT_TABLE_SIZE);
assert(p->tbl_ivt);
memset((void *)p->tbl_ivt, 0, IVT_TABLE_SIZE);
-
- p->tbl_rba = (uint64_t)local_alloc(p->chip_id, RBA_TABLE_SIZE, RBA_TABLE_SIZE);
- assert(p->tbl_rba);
- memset((void *)p->tbl_rba, 0, RBA_TABLE_SIZE);
}
static void phb3_add_properties(struct phb3 *p)
@@ -4610,7 +4566,7 @@ static void phb3_add_properties(struct phb3 *p)
dt_add_property_cells(np, "ibm,opal-ive-stride",
IVT_TABLE_STRIDE);
dt_add_property_cells(np, "ibm,opal-rba-table",
- hi32(p->tbl_rba), lo32(p->tbl_rba), RBA_TABLE_SIZE);
+ 0, 0, 0);
dt_add_property_cells(np, "ibm,phb-diag-data-size",
sizeof(struct OpalIoPhb3ErrorData));
diff --git a/hw/slw.c b/hw/slw.c
index 56ba05b0a..178ee4f85 100644
--- a/hw/slw.c
+++ b/hw/slw.c
@@ -29,10 +29,6 @@
#include <p8_pore_table_gen_api.H>
#include <sbe_xip_image.h>
-static uint32_t slw_saved_reset[0x100];
-
-static bool slw_current_le = false;
-
enum wakeup_engine_states wakeup_engine_state = WAKEUP_ENGINE_NOT_PRESENT;
bool has_deep_states = false;
@@ -52,125 +48,6 @@ DEFINE_LOG_ENTRY(OPAL_RC_SLW_REG, OPAL_PLATFORM_ERR_EVT, OPAL_SLW,
OPAL_PLATFORM_FIRMWARE, OPAL_INFO,
OPAL_NA);
-static void slw_do_rvwinkle(void *data)
-{
- struct cpu_thread *cpu = this_cpu();
- struct cpu_thread *master = data;
- uint64_t lpcr = mfspr(SPR_LPCR);
- struct proc_chip *chip;
-
- /* Setup our ICP to receive IPIs */
- icp_prep_for_pm();
-
- /* Setup LPCR to wakeup on external interrupts only */
- mtspr(SPR_LPCR, ((lpcr & ~SPR_LPCR_P8_PECE) | SPR_LPCR_P8_PECE2));
- isync();
-
- prlog(PR_DEBUG, "SLW: CPU PIR 0x%04x going to rvwinkle...\n",
- cpu->pir);
-
- /* Tell that we got it */
- cpu->state = cpu_state_rvwinkle;
-
- enter_p8_pm_state(1);
-
- /* Restore SPRs */
- init_shared_sprs();
- init_replicated_sprs();
-
- /* Ok, it's ours again */
- cpu->state = cpu_state_active;
-
- prlog(PR_DEBUG, "SLW: CPU PIR 0x%04x woken up !\n", cpu->pir);
-
- /* Cleanup our ICP */
- reset_cpu_icp();
-
- /* Resync timebase */
- chiptod_wakeup_resync();
-
- /* Restore LPCR */
- mtspr(SPR_LPCR, lpcr);
- isync();
-
- /* If we are passed a master pointer we are the designated
- * waker, let's proceed. If not, return, we are finished.
- */
- if (!master)
- return;
-
- prlog(PR_DEBUG, "SLW: CPU PIR 0x%04x waiting for master...\n",
- cpu->pir);
-
- /* Allriiiight... now wait for master to go down */
- while(master->state != cpu_state_rvwinkle)
- sync();
-
- /* XXX Wait one second ! (should check xscom state ? ) */
- time_wait_ms(1000);
-
- for_each_chip(chip) {
- struct cpu_thread *c;
- uint64_t tmp;
- for_each_available_core_in_chip(c, chip->id) {
- xscom_read(chip->id,
- XSCOM_ADDR_P8_EX_SLAVE(pir_to_core_id(c->pir),
- EX_PM_IDLE_STATE_HISTORY_PHYP),
- &tmp);
- prlog(PR_TRACE, "SLW: core %x:%x"
- " history: 0x%016llx (mid2)\n",
- chip->id, pir_to_core_id(c->pir),
- tmp);
- }
- }
-
- prlog(PR_DEBUG, "SLW: Waking master (PIR 0x%04x)...\n", master->pir);
-
- /* Now poke all the secondary threads on the master's core */
- for_each_cpu(cpu) {
- if (!cpu_is_sibling(cpu, master) || (cpu == master))
- continue;
- icp_kick_cpu(cpu);
-
- /* Wait for it to claim to be back (XXX ADD TIMEOUT) */
- while(cpu->state != cpu_state_active)
- sync();
- }
-
- /* Now poke the master and be gone */
- icp_kick_cpu(master);
-}
-
-static void slw_patch_reset(void)
-{
- uint32_t *src, *dst, *sav;
-
- src = &reset_patch_start;
- dst = (uint32_t *)0x100;
- sav = slw_saved_reset;
- while(src < &reset_patch_end) {
- *(sav++) = *(dst);
- *(dst++) = *(src++);
- }
- sync_icache();
-}
-
-static void slw_unpatch_reset(void)
-{
- extern uint32_t reset_patch_start;
- extern uint32_t reset_patch_end;
- uint32_t *src, *dst, *sav;
-
- src = &reset_patch_start;
- dst = (uint32_t *)0x100;
- sav = slw_saved_reset;
- while(src < &reset_patch_end) {
- *(dst++) = *(sav++);
- src++;
- }
- sync_icache();
-}
-
static bool slw_general_init(struct proc_chip *chip, struct cpu_thread *c)
{
uint32_t core = pir_to_core_id(c->pir);
@@ -274,15 +151,6 @@ static bool slw_set_overrides_p9(struct proc_chip *chip, struct cpu_thread *c)
return true;
}
-static bool slw_unset_overrides(struct proc_chip *chip, struct cpu_thread *c)
-{
- uint32_t core = pir_to_core_id(c->pir);
-
- /* XXX FIXME: Save and restore the overrides */
- prlog(PR_DEBUG, "SLW: slw_unset_overrides %x:%x\n", chip->id, core);
- return true;
-}
-
static bool slw_set_idle_mode(struct proc_chip *chip, struct cpu_thread *c)
{
uint32_t core = pir_to_core_id(c->pir);
@@ -1201,197 +1069,6 @@ void add_cpu_idle_state_properties(void)
free(pm_ctrl_reg_mask_buf);
}
-static void slw_cleanup_core(struct proc_chip *chip, struct cpu_thread *c)
-{
- uint64_t tmp;
- int rc;
-
- /* Display history to check transition */
- rc = xscom_read(chip->id,
- XSCOM_ADDR_P8_EX_SLAVE(pir_to_core_id(c->pir),
- EX_PM_IDLE_STATE_HISTORY_PHYP),
- &tmp);
- if (rc) {
- log_simple_error(&e_info(OPAL_RC_SLW_GET),
- "SLW: Failed to read PM_IDLE_STATE_HISTORY\n");
- /* XXX error handling ? return false; */
- }
-
- prlog(PR_DEBUG, "SLW: core %x:%x history: 0x%016llx (new1)\n",
- chip->id, pir_to_core_id(c->pir), tmp);
-
- rc = xscom_read(chip->id,
- XSCOM_ADDR_P8_EX_SLAVE(pir_to_core_id(c->pir),
- EX_PM_IDLE_STATE_HISTORY_PHYP),
- &tmp);
- if (rc) {
- log_simple_error(&e_info(OPAL_RC_SLW_GET),
- "SLW: Failed to read PM_IDLE_STATE_HISTORY\n");
- /* XXX error handling ? return false; */
- }
-
- prlog(PR_DEBUG, "SLW: core %x:%x history: 0x%016llx (new2)\n",
- chip->id, pir_to_core_id(c->pir), tmp);
-
- /*
- * XXX FIXME: Error out if the transition didn't reach rvwinkle ?
- */
-
- /*
- * XXX FIXME: We should restore a bunch of the EX bits we
- * overwrite to sane values here
- */
- slw_unset_overrides(chip, c);
-}
-
-static void slw_cleanup_chip(struct proc_chip *chip)
-{
- struct cpu_thread *c;
-
- for_each_available_core_in_chip(c, chip->id)
- slw_cleanup_core(chip, c);
-}
-
-static void slw_patch_scans(struct proc_chip *chip, bool le_mode)
-{
- int64_t rc;
- uint64_t old_val, new_val;
-
- rc = sbe_xip_get_scalar((void *)chip->slw_base,
- "skip_ex_override_ring_scans", &old_val);
- if (rc) {
- log_simple_error(&e_info(OPAL_RC_SLW_REG),
- "SLW: Failed to read scan override on chip %d\n",
- chip->id);
- return;
- }
-
- new_val = le_mode ? 0 : 1;
-
- prlog(PR_TRACE, "SLW: Chip %d, LE value was: %lld, setting to %lld\n",
- chip->id, old_val, new_val);
-
- rc = sbe_xip_set_scalar((void *)chip->slw_base,
- "skip_ex_override_ring_scans", new_val);
- if (rc) {
- log_simple_error(&e_info(OPAL_RC_SLW_REG),
- "SLW: Failed to set LE mode on chip %d\n", chip->id);
- return;
- }
-}
-
-int64_t slw_reinit(uint64_t flags)
-{
- struct proc_chip *chip;
- struct cpu_thread *cpu;
- bool has_waker = false;
- bool target_le = slw_current_le;
-
- if (flags & OPAL_REINIT_CPUS_HILE_BE)
- target_le = false;
- if (flags & OPAL_REINIT_CPUS_HILE_LE)
- target_le = true;
-
- prlog(PR_TRACE, "SLW Reinit from CPU PIR 0x%04x,"
- " HILE set to %s endian...\n",
- this_cpu()->pir,
- target_le ? "little" : "big");
-
- /* Prepare chips/cores for rvwinkle */
- for_each_chip(chip) {
- if (!chip->slw_base) {
- log_simple_error(&e_info(OPAL_RC_SLW_INIT),
- "SLW: Not found on chip %d\n", chip->id);
- return OPAL_HARDWARE;
- }
-
- slw_patch_scans(chip, target_le);
- }
- slw_current_le = target_le;
-
- /* XXX Save HIDs ? Or do that in head.S ... */
-
- slw_patch_reset();
-
- /* rvwinkle everybody and pick one to wake me once I rvwinkle myself */
- for_each_available_cpu(cpu) {
- struct cpu_thread *master = NULL;
-
- if (cpu == this_cpu())
- continue;
-
- /* Pick up a waker for myself: it must not be a sibling of
- * the current CPU and must be a thread 0 (so it gets to
- * sync its timebase before doing time_wait_ms()
- */
- if (!has_waker && !cpu_is_sibling(cpu, this_cpu()) &&
- cpu_is_thread0(cpu)) {
- has_waker = true;
- master = this_cpu();
- }
- __cpu_queue_job(cpu, "slw_do_rvwinkle",
- slw_do_rvwinkle, master, true);
-
- /* Wait for it to claim to be down */
- while(cpu->state != cpu_state_rvwinkle)
- sync();
- }
-
- /* XXX Wait one second ! (should check xscom state ? ) */
- prlog(PR_TRACE, "SLW: Waiting one second...\n");
- time_wait_ms(1000);
- prlog(PR_TRACE, "SLW: Done.\n");
-
- for_each_chip(chip) {
- struct cpu_thread *c;
- uint64_t tmp;
- for_each_available_core_in_chip(c, chip->id) {
- xscom_read(chip->id,
- XSCOM_ADDR_P8_EX_SLAVE(pir_to_core_id(c->pir),
- EX_PM_IDLE_STATE_HISTORY_PHYP),
- &tmp);
- prlog(PR_DEBUG, "SLW: core %x:%x"
- " history: 0x%016llx (mid)\n",
- chip->id, pir_to_core_id(c->pir), tmp);
- }
- }
-
-
- /* Wake everybody except on my core */
- for_each_cpu(cpu) {
- if (cpu->state != cpu_state_rvwinkle ||
- cpu_is_sibling(cpu, this_cpu()))
- continue;
- icp_kick_cpu(cpu);
-
- /* Wait for it to claim to be back (XXX ADD TIMEOUT) */
- while(cpu->state != cpu_state_active)
- sync();
- }
-
- /* Did we find a waker ? If we didn't, that means we had no
- * other core in the system, we can't do it
- */
- if (!has_waker) {
- prlog(PR_TRACE, "SLW: No candidate waker, giving up !\n");
- return OPAL_HARDWARE;
- }
-
- /* Our siblings are rvwinkling, and our waker is waiting for us
- * so let's just go down now
- */
- slw_do_rvwinkle(NULL);
-
- slw_unpatch_reset();
-
- for_each_chip(chip)
- slw_cleanup_chip(chip);
-
- prlog(PR_TRACE, "SLW Reinit complete !\n");
-
- return OPAL_SUCCESS;
-}
-
static void slw_patch_regs(struct proc_chip *chip)
{
struct cpu_thread *c;
diff --git a/include/phb3.h b/include/phb3.h
index c8a605f11..447e667cd 100644
--- a/include/phb3.h
+++ b/include/phb3.h
@@ -122,6 +122,7 @@
/* RBA Table : 256 bytes - Reject Bit Array
*
* 2048 interrupts, 1 bit each, indiates the reject state of interrupts
+ * Not used (Murano / Venice DD1 only)
*/
#define RBA_TABLE_SIZE 0x100
@@ -217,7 +218,6 @@ struct phb3 {
uint64_t tbl_peltv;
uint64_t tbl_pest;
uint64_t tbl_ivt;
- uint64_t tbl_rba;
bool skip_perst; /* Skip first perst */
bool has_link;
diff --git a/include/skiboot.h b/include/skiboot.h
index df11934f6..abb1ab71c 100644
--- a/include/skiboot.h
+++ b/include/skiboot.h
@@ -310,11 +310,6 @@ extern enum wakeup_engine_states wakeup_engine_state;
extern bool has_deep_states;
extern void nx_p9_rng_late_init(void);
-
-
-/* SLW reinit function for switching core settings */
-extern int64_t slw_reinit(uint64_t flags);
-
/* Patch SPR in SLW image */
extern int64_t opal_slw_set_reg(uint64_t cpu_pir, uint64_t sprn, uint64_t val);
--
2.23.0
More information about the Skiboot
mailing list