[Skiboot] [PATCH] pci: Standardise on uint64_t pe_number
Russell Currey
ruscur at russell.cc
Thu Aug 18 16:42:53 AEST 2016
Throughout skiboot (and the kernel) PE numbers are named "pe_no",
"pe_num" and "pe_number", and sized as 16, 32 and 64bit uints depending
on where you look. This is annoying and potentially misleading in cases
such as the OPAL API, where different calls have different int sizes
even though the PE number they want is the same.
Fix this by making *everything* uint64_t pe_number. In doing this, there
are some whitespace fixes and mve_number gets dragged into this as well
for cases like set_msi_{32/64} where they essentially mean the same thing.
Signed-off-by: Russell Currey <ruscur at russell.cc>
---
No functional changes. Changing the int size in the OPAL API should make
no difference as uints are zero extended into 64bit registers.
I tested this and it didn't break PCI, anyway.
---
core/pci-opal.c | 18 ++---
doc/opal-api/opal-pci-map-pe-dma-window-44.rst | 14 ++--
doc/opal-api/opal-pci-map-pe-mmio-window-29.rst | 10 +--
doc/opal-api/opal-pci-set-mve-33.rst | 2 +-
doc/opal-api/opal-pci-set-xive-pe-37.rst | 2 +-
doc/opal-api/opal-pci-tce-kill-126.rst | 2 +-
hw/npu.c | 28 ++++----
hw/p7ioc-phb.c | 45 ++++++------
hw/phb3.c | 94 ++++++++++++-------------
hw/phb4.c | 51 +++++++-------
include/npu.h | 2 +-
include/pci.h | 24 ++++---
12 files changed, 147 insertions(+), 145 deletions(-)
diff --git a/core/pci-opal.c b/core/pci-opal.c
index c5a0f71..ba7a261 100644
--- a/core/pci-opal.c
+++ b/core/pci-opal.c
@@ -155,7 +155,7 @@ static int64_t opal_pci_eeh_freeze_set(uint64_t phb_id, uint64_t pe_number,
}
opal_call(OPAL_PCI_EEH_FREEZE_SET, opal_pci_eeh_freeze_set, 3);
-static int64_t opal_pci_err_inject(uint64_t phb_id, uint32_t pe_no,
+static int64_t opal_pci_err_inject(uint64_t phb_id, uint64_t pe_number,
uint32_t type, uint32_t func,
uint64_t addr, uint64_t mask)
{
@@ -172,7 +172,7 @@ static int64_t opal_pci_err_inject(uint64_t phb_id, uint32_t pe_no,
return OPAL_PARAMETER;
phb_lock(phb);
- rc = phb->ops->err_inject(phb, pe_no, type, func, addr, mask);
+ rc = phb->ops->err_inject(phb, pe_number, type, func, addr, mask);
phb_unlock(phb);
return rc;
@@ -220,7 +220,7 @@ static int64_t opal_pci_set_phb_mem_window(uint64_t phb_id,
}
opal_call(OPAL_PCI_SET_PHB_MEM_WINDOW, opal_pci_set_phb_mem_window, 6);
-static int64_t opal_pci_map_pe_mmio_window(uint64_t phb_id, uint16_t pe_number,
+static int64_t opal_pci_map_pe_mmio_window(uint64_t phb_id, uint64_t pe_number,
uint16_t window_type,
uint16_t window_num,
uint16_t segment_num)
@@ -293,7 +293,7 @@ static int64_t opal_pci_set_peltv(uint64_t phb_id, uint32_t parent_pe,
opal_call(OPAL_PCI_SET_PELTV, opal_pci_set_peltv, 4);
static int64_t opal_pci_set_mve(uint64_t phb_id, uint32_t mve_number,
- uint32_t pe_number)
+ uint64_t pe_number)
{
struct phb *phb = pci_get_phb(phb_id);
int64_t rc;
@@ -368,7 +368,7 @@ opal_call(OPAL_PCI_MSI_EOI, opal_pci_msi_eoi, 2);
static int64_t opal_pci_tce_kill(uint64_t phb_id,
uint32_t kill_type,
- uint32_t pe_num, uint32_t tce_size,
+ uint64_t pe_number, uint32_t tce_size,
uint64_t dma_addr, uint32_t npages)
{
struct phb *phb = pci_get_phb(phb_id);
@@ -379,7 +379,7 @@ static int64_t opal_pci_tce_kill(uint64_t phb_id,
if (!phb->ops->tce_kill)
return OPAL_UNSUPPORTED;
phb_lock(phb);
- rc = phb->ops->tce_kill(phb, kill_type, pe_num, tce_size,
+ rc = phb->ops->tce_kill(phb, kill_type, pe_number, tce_size,
dma_addr, npages);
phb_unlock(phb);
@@ -387,7 +387,7 @@ static int64_t opal_pci_tce_kill(uint64_t phb_id,
}
opal_call(OPAL_PCI_TCE_KILL, opal_pci_tce_kill, 6);
-static int64_t opal_pci_set_xive_pe(uint64_t phb_id, uint32_t pe_number,
+static int64_t opal_pci_set_xive_pe(uint64_t phb_id, uint64_t pe_number,
uint32_t xive_num)
{
struct phb *phb = pci_get_phb(phb_id);
@@ -472,7 +472,7 @@ static int64_t opal_get_msi_64(uint64_t phb_id, uint32_t mve_number,
}
opal_call(OPAL_GET_MSI_64, opal_get_msi_64, 6);
-static int64_t opal_pci_map_pe_dma_window(uint64_t phb_id, uint16_t pe_number,
+static int64_t opal_pci_map_pe_dma_window(uint64_t phb_id, uint64_t pe_number,
uint16_t window_id,
uint16_t tce_levels,
uint64_t tce_table_addr,
@@ -497,7 +497,7 @@ static int64_t opal_pci_map_pe_dma_window(uint64_t phb_id, uint16_t pe_number,
opal_call(OPAL_PCI_MAP_PE_DMA_WINDOW, opal_pci_map_pe_dma_window, 7);
static int64_t opal_pci_map_pe_dma_window_real(uint64_t phb_id,
- uint16_t pe_number,
+ uint64_t pe_number,
uint16_t window_id,
uint64_t pci_start_addr,
uint64_t pci_mem_size)
diff --git a/doc/opal-api/opal-pci-map-pe-dma-window-44.rst b/doc/opal-api/opal-pci-map-pe-dma-window-44.rst
index dbfe231..bec8f08 100644
--- a/doc/opal-api/opal-pci-map-pe-dma-window-44.rst
+++ b/doc/opal-api/opal-pci-map-pe-dma-window-44.rst
@@ -4,12 +4,13 @@ OPAL_PCI_MAP_PE_DMA_WINDOW
#define OPAL_PCI_MAP_PE_DMA_WINDOW 44
- static int64_t opal_pci_map_pe_dma_window(uint64_t phb_id, uint16_t pe_number,
- uint16_t window_id,
- uint16_t tce_levels,
- uint64_t tce_table_addr,
- uint64_t tce_table_size,
- uint64_t tce_page_size)
+ static int64_t opal_pci_map_pe_dma_window(uint64_t phb_id,
+ uint64_t pe_number,
+ uint16_t window_id,
+ uint16_t tce_levels,
+ uint64_t tce_table_addr,
+ uint64_t tce_table_size,
+ uint64_t tce_page_size)
**WARNING:** following documentation is from old sources, and is possibly
not representative of OPALv3 as implemented by skiboot. This should be
@@ -99,4 +100,3 @@ Return value: ::
return OPAL_PARAMETER;
if (!phb->ops->map_pe_dma_window)
return OPAL_UNSUPPORTED;
-
diff --git a/doc/opal-api/opal-pci-map-pe-mmio-window-29.rst b/doc/opal-api/opal-pci-map-pe-mmio-window-29.rst
index 595c588..aaa1d9e 100644
--- a/doc/opal-api/opal-pci-map-pe-mmio-window-29.rst
+++ b/doc/opal-api/opal-pci-map-pe-mmio-window-29.rst
@@ -4,10 +4,11 @@ OPAL_PCI_MAP_PE_MMIO_WINDOW
#define OPAL_PCI_MAP_PE_MMIO_WINDOW 29
- static int64_t opal_pci_map_pe_mmio_window(uint64_t phb_id, uint16_t pe_number,
- uint16_t window_type,
- uint16_t window_num,
- uint16_t segment_num)
+ static int64_t opal_pci_map_pe_mmio_window(uint64_t phb_id,
+ uint64_t pe_number,
+ uint16_t window_type,
+ uint16_t window_num,
+ uint16_t segment_num)
**WARNING:** following documentation is from old sources, and is possibly
not representative of OPALv3 as implemented by skiboot. This should be
@@ -43,4 +44,3 @@ Return value: ::
return OPAL_PARAMETER;
if (!phb->ops->map_pe_mmio_window)
return OPAL_UNSUPPORTED;
-
diff --git a/doc/opal-api/opal-pci-set-mve-33.rst b/doc/opal-api/opal-pci-set-mve-33.rst
index 66a81ed..bf53587 100644
--- a/doc/opal-api/opal-pci-set-mve-33.rst
+++ b/doc/opal-api/opal-pci-set-mve-33.rst
@@ -5,7 +5,7 @@ OPAL_PCI_SET_MVE
#define OPAL_PCI_SET_MVE 33
static int64_t opal_pci_set_mve(uint64_t phb_id, uint32_t mve_number,
- uint32_t pe_number)
+ uint64_t pe_number)
**WARNING:** following documentation is from old sources, and is possibly
not representative of OPALv3 as implemented by skiboot. This should be
diff --git a/doc/opal-api/opal-pci-set-xive-pe-37.rst b/doc/opal-api/opal-pci-set-xive-pe-37.rst
index 67a86e7..a5c6582 100644
--- a/doc/opal-api/opal-pci-set-xive-pe-37.rst
+++ b/doc/opal-api/opal-pci-set-xive-pe-37.rst
@@ -2,7 +2,7 @@ OPAL_PCI_SET_XIVE_PE
====================
::
- static int64_t opal_pci_set_xive_pe(uint64_t phb_id, uint32_t pe_number,
+ static int64_t opal_pci_set_xive_pe(uint64_t phb_id, uint64_t pe_number,
uint32_t xive_num)
**WARNING:** following documentation is from old sources, and is possibly
diff --git a/doc/opal-api/opal-pci-tce-kill-126.rst b/doc/opal-api/opal-pci-tce-kill-126.rst
index 4aa6eb9..dc7ab6b 100644
--- a/doc/opal-api/opal-pci-tce-kill-126.rst
+++ b/doc/opal-api/opal-pci-tce-kill-126.rst
@@ -4,7 +4,7 @@ OPAL_PCI_TCE_KILL
int64_t opal_pci_tce_kill(uint64_t phb_id,
uint32_t kill_type,
- uint32_t pe_num,
+ uint64_t pe_number,
uint32_t tce_size,
uint64_t dma_addr,
uint32_t npages)
diff --git a/hw/npu.c b/hw/npu.c
index e805f10..bca53df 100644
--- a/hw/npu.c
+++ b/hw/npu.c
@@ -806,7 +806,7 @@ static void npu_hw_init(struct npu *p)
}
static int64_t npu_map_pe_dma_window_real(struct phb *phb,
- uint16_t pe_num,
+ uint64_t pe_number,
uint16_t window_id,
uint64_t pci_start_addr,
uint64_t pci_mem_size)
@@ -816,8 +816,8 @@ static int64_t npu_map_pe_dma_window_real(struct phb *phb,
uint64_t tve;
/* Sanity check. Each PE has one corresponding TVE */
- if (pe_num >= NPU_NUM_OF_PES ||
- window_id != pe_num)
+ if (pe_number >= NPU_NUM_OF_PES ||
+ window_id != pe_number)
return OPAL_PARAMETER;
if (pci_mem_size) {
@@ -862,7 +862,7 @@ static int64_t npu_map_pe_dma_window_real(struct phb *phb,
}
static int64_t npu_map_pe_dma_window(struct phb *phb,
- uint16_t pe_num,
+ uint64_t pe_number,
uint16_t window_id,
uint16_t tce_levels,
uint64_t tce_table_addr,
@@ -874,8 +874,8 @@ static int64_t npu_map_pe_dma_window(struct phb *phb,
uint64_t data64 = 0;
/* Sanity check. Each PE has one corresponding TVE */
- if (pe_num >= NPU_NUM_OF_PES ||
- window_id != pe_num)
+ if (pe_number >= NPU_NUM_OF_PES ||
+ window_id != pe_number)
return OPAL_PARAMETER;
/* Special condition, zero TCE table size used to disable
@@ -930,7 +930,7 @@ static int64_t npu_map_pe_dma_window(struct phb *phb,
}
static int64_t npu_set_pe(struct phb *phb,
- uint64_t pe_num,
+ uint64_t pe_number,
uint64_t bdfn,
uint8_t bcompare,
uint8_t dcompare,
@@ -946,7 +946,7 @@ static int64_t npu_set_pe(struct phb *phb,
if (action != OPAL_MAP_PE &&
action != OPAL_UNMAP_PE)
return OPAL_PARAMETER;
- if (pe_num >= NPU_NUM_OF_PES)
+ if (pe_number >= NPU_NUM_OF_PES)
return OPAL_PARAMETER;
/* All emulated PCI devices hooked to root bus, whose
@@ -957,7 +957,7 @@ static int64_t npu_set_pe(struct phb *phb,
return OPAL_PARAMETER;
link_idx = dev->index;
- dev->pe_num = pe_num;
+ dev->pe_number = pe_number;
/* Separate links will be mapped to different PEs */
if (bcompare != OpalPciBusAll ||
@@ -969,7 +969,7 @@ static int64_t npu_set_pe(struct phb *phb,
data64 = &p->pce_cache[link_idx];
if (action == OPAL_MAP_PE)
*data64 = SETFIELD(NPU_IODA_PCT_PE, *data64,
- pe_num);
+ pe_number);
else
*data64 = SETFIELD(NPU_IODA_PCT_PE, *data64,
NPU_NUM_OF_PES);
@@ -1105,7 +1105,7 @@ void npu_set_fence_state(struct npu *p, bool fence) {
}
/* Sets the NPU to trigger an error when a DMA occurs */
-static int64_t npu_err_inject(struct phb *phb, uint32_t pe_num,
+static int64_t npu_err_inject(struct phb *phb, uint64_t pe_number,
uint32_t type, uint32_t func __unused,
uint64_t addr __unused, uint64_t mask __unused)
{
@@ -1113,20 +1113,20 @@ static int64_t npu_err_inject(struct phb *phb, uint32_t pe_num,
struct npu_dev *dev = NULL;
int i;
- if (pe_num > NPU_NUM_OF_PES) {
+ if (pe_number > NPU_NUM_OF_PES) {
prlog(PR_ERR, "NPU: error injection failed, bad PE given\n");
return OPAL_PARAMETER;
}
for (i = 0; i < p->total_devices; i++) {
- if (p->devices[i].pe_num == pe_num) {
+ if (p->devices[i].pe_number == pe_number) {
dev = &p->devices[i];
break;
}
}
if (!dev) {
- prlog(PR_ERR, "NPU: couldn't find device with PE %x\n", pe_num);
+ prlog(PR_ERR, "NPU: couldn't find device with PE%llx\n", pe_number);
return OPAL_PARAMETER;
}
diff --git a/hw/p7ioc-phb.c b/hw/p7ioc-phb.c
index 8ea393c..d2a18a3 100644
--- a/hw/p7ioc-phb.c
+++ b/hw/p7ioc-phb.c
@@ -659,7 +659,7 @@ static int64_t p7ioc_err_inject_finalize(struct p7ioc_phb *p, uint64_t addr,
return OPAL_SUCCESS;
}
-static int64_t p7ioc_err_inject_mem32(struct p7ioc_phb *p, uint32_t pe_no,
+static int64_t p7ioc_err_inject_mem32(struct p7ioc_phb *p, uint64_t pe_number,
uint64_t addr, uint64_t mask,
bool is_write)
{
@@ -670,7 +670,7 @@ static int64_t p7ioc_err_inject_mem32(struct p7ioc_phb *p, uint32_t pe_no,
a = 0x0ull;
prefer = 0x0ull;
for (index = 0; index < 128; index++) {
- if (GETFIELD(IODA_XXDT_PE, p->m32d_cache[index]) != pe_no)
+ if (GETFIELD(IODA_XXDT_PE, p->m32d_cache[index]) != pe_number)
continue;
base = p->m32_base + M32_PCI_START +
@@ -706,7 +706,7 @@ static int64_t p7ioc_err_inject_mem32(struct p7ioc_phb *p, uint32_t pe_no,
return p7ioc_err_inject_finalize(p, a, m, ctrl, is_write);
}
-static int64_t p7ioc_err_inject_io32(struct p7ioc_phb *p, uint32_t pe_no,
+static int64_t p7ioc_err_inject_io32(struct p7ioc_phb *p, uint64_t pe_number,
uint64_t addr, uint64_t mask,
bool is_write)
{
@@ -717,7 +717,7 @@ static int64_t p7ioc_err_inject_io32(struct p7ioc_phb *p, uint32_t pe_no,
a = 0x0ull;
prefer = 0x0ull;
for (index = 0; index < 128; index++) {
- if (GETFIELD(IODA_XXDT_PE, p->iod_cache[index]) != pe_no)
+ if (GETFIELD(IODA_XXDT_PE, p->iod_cache[index]) != pe_number)
continue;
base = p->io_base + (PHB_IO_SIZE / 128) * index;
@@ -751,7 +751,7 @@ static int64_t p7ioc_err_inject_io32(struct p7ioc_phb *p, uint32_t pe_no,
return p7ioc_err_inject_finalize(p, a, m, ctrl, is_write);
}
-static int64_t p7ioc_err_inject_cfg(struct p7ioc_phb *p, uint32_t pe_no,
+static int64_t p7ioc_err_inject_cfg(struct p7ioc_phb *p, uint64_t pe_number,
uint64_t addr, uint64_t mask,
bool is_write)
{
@@ -762,8 +762,8 @@ static int64_t p7ioc_err_inject_cfg(struct p7ioc_phb *p, uint32_t pe_no,
/* Looking into PELTM to see if the PCI bus# is owned
* by the PE#. Otherwise, we have to figure one out.
*/
- base = GETFIELD(IODA_PELTM_BUS, p->peltm_cache[pe_no]);
- v_bits = GETFIELD(IODA_PELTM_BUS_VALID, p->peltm_cache[pe_no]);
+ base = GETFIELD(IODA_PELTM_BUS, p->peltm_cache[pe_number]);
+ v_bits = GETFIELD(IODA_PELTM_BUS_VALID, p->peltm_cache[pe_number]);
switch (v_bits) {
case IODA_BUS_VALID_3_BITS:
case IODA_BUS_VALID_4_BITS:
@@ -771,7 +771,7 @@ static int64_t p7ioc_err_inject_cfg(struct p7ioc_phb *p, uint32_t pe_no,
case IODA_BUS_VALID_6_BITS:
case IODA_BUS_VALID_7_BITS:
case IODA_BUS_VALID_ALL:
- base = GETFIELD(IODA_PELTM_BUS, p->peltm_cache[pe_no]);
+ base = GETFIELD(IODA_PELTM_BUS, p->peltm_cache[pe_number]);
base &= (0xff - (((1 << (7 - v_bits)) - 1)));
a = SETFIELD(PHB_PAPR_ERR_INJ_MASK_CFG, 0x0ul, base);
m = PHB_PAPR_ERR_INJ_MASK_CFG;
@@ -792,7 +792,7 @@ static int64_t p7ioc_err_inject_cfg(struct p7ioc_phb *p, uint32_t pe_no,
return p7ioc_err_inject_finalize(p, a, m, ctrl, is_write);
}
-static int64_t p7ioc_err_inject_dma(struct p7ioc_phb *p, uint32_t pe_no,
+static int64_t p7ioc_err_inject_dma(struct p7ioc_phb *p, uint64_t pe_number,
uint64_t addr, uint64_t mask,
bool is_write)
{
@@ -801,7 +801,8 @@ static int64_t p7ioc_err_inject_dma(struct p7ioc_phb *p, uint32_t pe_no,
/* For DMA, we just pick address from TVT */
for (index = 0; index < 128; index++) {
- if (GETFIELD(IODA_TVT1_PE_NUM, p->tve_hi_cache[index]) != pe_no)
+ if (GETFIELD(IODA_TVT1_PE_NUM, p->tve_hi_cache[index]) !=
+ pe_number)
continue;
addr = SETFIELD(PHB_PAPR_ERR_INJ_MASK_DMA, 0ul, index);
@@ -816,12 +817,12 @@ static int64_t p7ioc_err_inject_dma(struct p7ioc_phb *p, uint32_t pe_no,
return p7ioc_err_inject_finalize(p, addr, mask, ctrl, is_write);
}
-static int64_t p7ioc_err_inject(struct phb *phb, uint32_t pe_no,
+static int64_t p7ioc_err_inject(struct phb *phb, uint64_t pe_number,
uint32_t type, uint32_t func,
uint64_t addr, uint64_t mask)
{
struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
- int64_t (*handler)(struct p7ioc_phb *p, uint32_t pe_no,
+ int64_t (*handler)(struct p7ioc_phb *p, uint64_t pe_number,
uint64_t addr, uint64_t mask, bool is_write);
bool is_write;
@@ -830,14 +831,14 @@ static int64_t p7ioc_err_inject(struct phb *phb, uint32_t pe_no,
return OPAL_UNSUPPORTED;
/* We can't inject error to the reserved PE#127 */
- if (pe_no > 126)
+ if (pe_number > 126)
return OPAL_PARAMETER;
/* Clear the leftover from last time */
out_be64(p->regs + PHB_PAPR_ERR_INJ_CTL, 0x0ul);
/* Check if PE number is valid one in PELTM cache */
- if (p->peltm_cache[pe_no] == 0x0001f80000000000ull)
+ if (p->peltm_cache[pe_number] == 0x0001f80000000000ull)
return OPAL_PARAMETER;
/* Clear the leftover from last time */
@@ -892,7 +893,7 @@ static int64_t p7ioc_err_inject(struct phb *phb, uint32_t pe_no,
return OPAL_PARAMETER;
}
- return handler(p, pe_no, addr, mask, is_write);
+ return handler(p, pe_number, addr, mask, is_write);
}
static int64_t p7ioc_get_diag_data(struct phb *phb, void *diag_buffer,
@@ -1025,7 +1026,7 @@ static int64_t p7ioc_phb_mmio_enable(struct phb *phb,
return OPAL_SUCCESS;
}
-static int64_t p7ioc_map_pe_mmio_window(struct phb *phb, uint16_t pe_number,
+static int64_t p7ioc_map_pe_mmio_window(struct phb *phb, uint64_t pe_number,
uint16_t window_type,
uint16_t window_num,
uint16_t segment_num)
@@ -1143,7 +1144,7 @@ static int64_t p7ioc_set_peltv(struct phb *phb, uint32_t parent_pe,
return OPAL_SUCCESS;
}
-static int64_t p7ioc_map_pe_dma_window(struct phb *phb, uint16_t pe_number,
+static int64_t p7ioc_map_pe_dma_window(struct phb *phb, uint64_t pe_number,
uint16_t window_id, uint16_t tce_levels,
uint64_t tce_table_addr,
uint64_t tce_table_size,
@@ -1225,7 +1226,7 @@ static int64_t p7ioc_map_pe_dma_window(struct phb *phb, uint16_t pe_number,
}
static int64_t p7ioc_map_pe_dma_window_real(struct phb *phb __unused,
- uint16_t pe_number __unused,
+ uint64_t pe_number __unused,
uint16_t dma_window_num __unused,
uint64_t pci_start_addr __unused,
uint64_t pci_mem_size __unused)
@@ -1235,7 +1236,7 @@ static int64_t p7ioc_map_pe_dma_window_real(struct phb *phb __unused,
}
static int64_t p7ioc_set_mve(struct phb *phb, uint32_t mve_number,
- uint32_t pe_number)
+ uint64_t pe_number)
{
struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
uint64_t pelt, mve = 0;
@@ -1295,7 +1296,7 @@ static int64_t p7ioc_set_mve_enable(struct phb *phb, uint32_t mve_number,
return OPAL_SUCCESS;
}
-static int64_t p7ioc_set_xive_pe(struct phb *phb, uint32_t pe_number,
+static int64_t p7ioc_set_xive_pe(struct phb *phb, uint64_t pe_number,
uint32_t xive_num)
{
struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
@@ -1331,7 +1332,7 @@ static int64_t p7ioc_get_xive_source(struct phb *phb, uint32_t xive_num,
return OPAL_SUCCESS;
}
-static int64_t p7ioc_get_msi_32(struct phb *phb __unused, uint32_t mve_number,
+static int64_t p7ioc_get_msi_32(struct phb *phb __unused, uint64_t mve_number,
uint32_t xive_num, uint8_t msi_range,
uint32_t *msi_address, uint32_t *message_data)
{
@@ -1344,7 +1345,7 @@ static int64_t p7ioc_get_msi_32(struct phb *phb __unused, uint32_t mve_number,
return OPAL_SUCCESS;
}
-static int64_t p7ioc_get_msi_64(struct phb *phb __unused, uint32_t mve_number,
+static int64_t p7ioc_get_msi_64(struct phb *phb __unused, uint64_t mve_number,
uint32_t xive_num, uint8_t msi_range,
uint64_t *msi_address, uint32_t *message_data)
{
diff --git a/hw/phb3.c b/hw/phb3.c
index 8d34c0f..9689509 100644
--- a/hw/phb3.c
+++ b/hw/phb3.c
@@ -871,7 +871,7 @@ static int64_t phb3_phb_mmio_enable(struct phb *phb,
}
static int64_t phb3_map_pe_mmio_window(struct phb *phb,
- uint16_t pe_num,
+ uint64_t pe_number,
uint16_t window_type,
uint16_t window_num,
uint16_t segment_num)
@@ -879,7 +879,7 @@ static int64_t phb3_map_pe_mmio_window(struct phb *phb,
struct phb3 *p = phb_to_phb3(phb);
uint64_t data64, *cache;
- if (pe_num >= PHB3_MAX_PE_NUM)
+ if (pe_number >= PHB3_MAX_PE_NUM)
return OPAL_PARAMETER;
/*
@@ -898,8 +898,8 @@ static int64_t phb3_map_pe_mmio_window(struct phb *phb,
cache = &p->m32d_cache[segment_num];
phb3_ioda_sel(p, IODA2_TBL_M32DT, segment_num, false);
out_be64(p->regs + PHB_IODA_DATA0,
- SETFIELD(IODA2_M32DT_PE, 0ull, pe_num));
- *cache = SETFIELD(IODA2_M32DT_PE, 0ull, pe_num);
+ SETFIELD(IODA2_M32DT_PE, 0ull, pe_number));
+ *cache = SETFIELD(IODA2_M32DT_PE, 0ull, pe_number);
break;
case OPAL_M64_WINDOW_TYPE:
@@ -913,8 +913,8 @@ static int64_t phb3_map_pe_mmio_window(struct phb *phb,
return OPAL_PARTIAL;
data64 |= IODA2_M64BT_SINGLE_PE;
- data64 = SETFIELD(IODA2_M64BT_PE_HI, data64, pe_num >> 5);
- data64 = SETFIELD(IODA2_M64BT_PE_LOW, data64, pe_num);
+ data64 = SETFIELD(IODA2_M64BT_PE_HI, data64, pe_number >> 5);
+ data64 = SETFIELD(IODA2_M64BT_PE_LOW, data64, pe_number);
*cache = data64;
break;
@@ -926,7 +926,7 @@ static int64_t phb3_map_pe_mmio_window(struct phb *phb,
}
static int64_t phb3_map_pe_dma_window(struct phb *phb,
- uint16_t pe_num,
+ uint64_t pe_number,
uint16_t window_id,
uint16_t tce_levels,
uint64_t tce_table_addr,
@@ -941,8 +941,8 @@ static int64_t phb3_map_pe_dma_window(struct phb *phb,
* Sanity check. We currently only support "2 window per PE" mode
* ie, only bit 59 of the PCI address is used to select the window
*/
- if (pe_num >= PHB3_MAX_PE_NUM ||
- (window_id >> 1) != pe_num)
+ if (pe_number >= PHB3_MAX_PE_NUM ||
+ (window_id >> 1) != pe_number)
return OPAL_PARAMETER;
/*
@@ -998,7 +998,7 @@ static int64_t phb3_map_pe_dma_window(struct phb *phb,
}
static int64_t phb3_map_pe_dma_window_real(struct phb *phb,
- uint16_t pe_num,
+ uint64_t pe_number,
uint16_t window_id,
uint64_t pci_start_addr,
uint64_t pci_mem_size)
@@ -1007,8 +1007,8 @@ static int64_t phb3_map_pe_dma_window_real(struct phb *phb,
uint64_t end;
uint64_t tve;
- if (pe_num >= PHB3_MAX_PE_NUM ||
- (window_id >> 1) != pe_num)
+ if (pe_number >= PHB3_MAX_PE_NUM ||
+ (window_id >> 1) != pe_number)
return OPAL_PARAMETER;
if (pci_mem_size) {
@@ -1182,7 +1182,7 @@ static int64_t phb3_pci_msi_eoi(struct phb *phb,
}
static int64_t phb3_set_ive_pe(struct phb *phb,
- uint32_t pe_num,
+ uint64_t pe_number,
uint32_t ive_num)
{
struct phb3 *p = phb_to_phb3(phb);
@@ -1194,18 +1194,18 @@ static int64_t phb3_set_ive_pe(struct phb *phb,
return OPAL_HARDWARE;
/* Each IVE reserves 128 bytes */
- if (pe_num >= PHB3_MAX_PE_NUM ||
+ if (pe_number >= PHB3_MAX_PE_NUM ||
ive_num >= IVT_TABLE_ENTRIES)
return OPAL_PARAMETER;
/* Update IVE cache */
cache = &p->ive_cache[ive_num];
- *cache = SETFIELD(IODA2_IVT_PE, *cache, pe_num);
+ *cache = SETFIELD(IODA2_IVT_PE, *cache, pe_number);
/* Update in-memory IVE without clobbering P and Q */
ivep = p->tbl_ivt + (ive_num * IVT_TABLE_STRIDE * 8);
pe_word = (uint16_t *)(ivep + 6);
- *pe_word = pe_num;
+ *pe_word = pe_number;
/* Invalidate IVC */
data64 = SETFIELD(PHB_IVC_INVALIDATE_SID, 0ul, ive_num);
@@ -1215,7 +1215,7 @@ static int64_t phb3_set_ive_pe(struct phb *phb,
}
static int64_t phb3_get_msi_32(struct phb *phb __unused,
- uint32_t pe_num,
+ uint64_t pe_number,
uint32_t ive_num,
uint8_t msi_range,
uint32_t *msi_address,
@@ -1227,7 +1227,7 @@ static int64_t phb3_get_msi_32(struct phb *phb __unused,
* by its DMA address and data, but the check isn't
* harmful.
*/
- if (pe_num >= PHB3_MAX_PE_NUM ||
+ if (pe_number >= PHB3_MAX_PE_NUM ||
ive_num >= IVT_TABLE_ENTRIES ||
msi_range != 1 || !msi_address|| !message_data)
return OPAL_PARAMETER;
@@ -1243,14 +1243,14 @@ static int64_t phb3_get_msi_32(struct phb *phb __unused,
}
static int64_t phb3_get_msi_64(struct phb *phb __unused,
- uint32_t pe_num,
+ uint64_t pe_number,
uint32_t ive_num,
uint8_t msi_range,
uint64_t *msi_address,
uint32_t *message_data)
{
/* Sanity check */
- if (pe_num >= PHB3_MAX_PE_NUM ||
+ if (pe_number >= PHB3_MAX_PE_NUM ||
ive_num >= IVT_TABLE_ENTRIES ||
msi_range != 1 || !msi_address || !message_data)
return OPAL_PARAMETER;
@@ -1825,8 +1825,8 @@ static const struct irq_source_ops phb3_lsi_irq_ops = {
};
static int64_t phb3_set_pe(struct phb *phb,
- uint64_t pe_num,
- uint64_t bdfn,
+ uint64_t pe_number,
+ uint64_t bdfn,
uint8_t bcompare,
uint8_t dcompare,
uint8_t fcompare,
@@ -1842,7 +1842,7 @@ static int64_t phb3_set_pe(struct phb *phb,
return OPAL_HARDWARE;
if (action != OPAL_MAP_PE && action != OPAL_UNMAP_PE)
return OPAL_PARAMETER;
- if (pe_num >= PHB3_MAX_PE_NUM || bdfn > 0xffff ||
+ if (pe_number >= PHB3_MAX_PE_NUM || bdfn > 0xffff ||
bcompare > OpalPciBusAll ||
dcompare > OPAL_COMPARE_RID_DEVICE_NUMBER ||
fcompare > OPAL_COMPARE_RID_FUNCTION_NUMBER)
@@ -1877,7 +1877,7 @@ static int64_t phb3_set_pe(struct phb *phb,
if (all == 0x7) {
if (action == OPAL_MAP_PE) {
for (idx = 0; idx < RTT_TABLE_ENTRIES; idx++)
- p->rte_cache[idx] = pe_num;
+ p->rte_cache[idx] = pe_number;
} else {
for ( idx = 0; idx < ARRAY_SIZE(p->rte_cache); idx++)
p->rte_cache[idx] = PHB3_RESERVED_PE_NUM;
@@ -1889,7 +1889,7 @@ static int64_t phb3_set_pe(struct phb *phb,
if ((idx & mask) != val)
continue;
if (action == OPAL_MAP_PE)
- p->rte_cache[idx] = pe_num;
+ p->rte_cache[idx] = pe_number;
else
p->rte_cache[idx] = PHB3_RESERVED_PE_NUM;
*rte = p->rte_cache[idx];
@@ -2821,7 +2821,7 @@ static int64_t phb3_err_inject_finalize(struct phb3 *p, uint64_t addr,
return OPAL_SUCCESS;
}
-static int64_t phb3_err_inject_mem32(struct phb3 *p, uint32_t pe_no,
+static int64_t phb3_err_inject_mem32(struct phb3 *p, uint64_t pe_number,
uint64_t addr, uint64_t mask,
bool is_write)
{
@@ -2834,7 +2834,7 @@ static int64_t phb3_err_inject_mem32(struct phb3 *p, uint32_t pe_no,
a = base = len = 0x0ull;
for (index = 0; index < PHB3_MAX_PE_NUM; index++) {
- if (GETFIELD(IODA2_M32DT_PE, p->m32d_cache[index]) != pe_no)
+ if (GETFIELD(IODA2_M32DT_PE, p->m32d_cache[index]) != pe_number)
continue;
/* Obviously, we can't support discontiguous segments.
@@ -2875,7 +2875,7 @@ static int64_t phb3_err_inject_mem32(struct phb3 *p, uint32_t pe_no,
return phb3_err_inject_finalize(p, a, m, ctrl, is_write);
}
-static int64_t phb3_err_inject_mem64(struct phb3 *p, uint32_t pe_no,
+static int64_t phb3_err_inject_mem64(struct phb3 *p, uint64_t pe_number,
uint64_t addr, uint64_t mask,
bool is_write)
{
@@ -2888,14 +2888,14 @@ static int64_t phb3_err_inject_mem64(struct phb3 *p, uint32_t pe_no,
s_index = 0;
e_index = ARRAY_SIZE(p->m64b_cache) - 2;
for (index = 0; index < RTT_TABLE_ENTRIES; index++) {
- if (p->rte_cache[index] != pe_no)
+ if (p->rte_cache[index] != pe_number)
continue;
if (index + 8 >= RTT_TABLE_ENTRIES)
break;
/* PCI bus dependent PE */
- if (p->rte_cache[index + 8] == pe_no) {
+ if (p->rte_cache[index + 8] == pe_number) {
s_index = e_index = ARRAY_SIZE(p->m64b_cache) - 1;
break;
}
@@ -2908,8 +2908,8 @@ static int64_t phb3_err_inject_mem64(struct phb3 *p, uint32_t pe_no,
continue;
if (cache & IODA2_M64BT_SINGLE_PE) {
- if (GETFIELD(IODA2_M64BT_PE_HI, cache) != (pe_no >> 5) ||
- GETFIELD(IODA2_M64BT_PE_LOW, cache) != (pe_no & 0x1f))
+ if (GETFIELD(IODA2_M64BT_PE_HI, cache) != (pe_number >> 5) ||
+ GETFIELD(IODA2_M64BT_PE_LOW, cache) != (pe_number & 0x1f))
continue;
segstart = GETFIELD(IODA2_M64BT_SINGLE_BASE, cache);
@@ -2923,7 +2923,7 @@ static int64_t phb3_err_inject_mem64(struct phb3 *p, uint32_t pe_no,
segsize = (0x40000000ull - segsize) << 20;
segsize /= PHB3_MAX_PE_NUM;
- segstart = segstart + segsize * pe_no;
+ segstart = segstart + segsize * pe_number;
}
/* First window always wins based on the ascending
@@ -2960,7 +2960,7 @@ static int64_t phb3_err_inject_mem64(struct phb3 *p, uint32_t pe_no,
return phb3_err_inject_finalize(p, a, m, ctrl, is_write);
}
-static int64_t phb3_err_inject_cfg(struct phb3 *p, uint32_t pe_no,
+static int64_t phb3_err_inject_cfg(struct phb3 *p, uint64_t pe_number,
uint64_t addr, uint64_t mask,
bool is_write)
{
@@ -2973,13 +2973,13 @@ static int64_t phb3_err_inject_cfg(struct phb3 *p, uint32_t pe_no,
prefer = 0xffffull;
m = PHB_PAPR_ERR_INJ_MASK_CFG_ALL;
for (bdfn = 0; bdfn < RTT_TABLE_ENTRIES; bdfn++) {
- if (p->rte_cache[bdfn] != pe_no)
+ if (p->rte_cache[bdfn] != pe_number)
continue;
/* The PE can be associated with PCI bus or device */
is_bus_pe = false;
if ((bdfn + 8) < RTT_TABLE_ENTRIES &&
- p->rte_cache[bdfn + 8] == pe_no)
+ p->rte_cache[bdfn + 8] == pe_number)
is_bus_pe = true;
/* Figure out the PCI config address */
@@ -3020,7 +3020,7 @@ static int64_t phb3_err_inject_cfg(struct phb3 *p, uint32_t pe_no,
return phb3_err_inject_finalize(p, a, m, ctrl, is_write);
}
-static int64_t phb3_err_inject_dma(struct phb3 *p, uint32_t pe_no,
+static int64_t phb3_err_inject_dma(struct phb3 *p, uint64_t pe_number,
uint64_t addr, uint64_t mask,
bool is_write, bool is_64bits)
{
@@ -3031,10 +3031,10 @@ static int64_t phb3_err_inject_dma(struct phb3 *p, uint32_t pe_no,
/* TVE index and base address */
if (!is_64bits) {
- index = (pe_no << 1);
+ index = (pe_number << 1);
base = 0x0ull;
} else {
- index = ((pe_no << 1) + 1);
+ index = ((pe_number << 1) + 1);
base = (0x1ull << 59);
}
@@ -3084,26 +3084,26 @@ static int64_t phb3_err_inject_dma(struct phb3 *p, uint32_t pe_no,
return phb3_err_inject_finalize(p, a, m, ctrl, is_write);
}
-static int64_t phb3_err_inject_dma32(struct phb3 *p, uint32_t pe_no,
+static int64_t phb3_err_inject_dma32(struct phb3 *p, uint64_t pe_number,
uint64_t addr, uint64_t mask,
bool is_write)
{
- return phb3_err_inject_dma(p, pe_no, addr, mask, is_write, false);
+ return phb3_err_inject_dma(p, pe_number, addr, mask, is_write, false);
}
-static int64_t phb3_err_inject_dma64(struct phb3 *p, uint32_t pe_no,
+static int64_t phb3_err_inject_dma64(struct phb3 *p, uint64_t pe_number,
uint64_t addr, uint64_t mask,
bool is_write)
{
- return phb3_err_inject_dma(p, pe_no, addr, mask, is_write, true);
+ return phb3_err_inject_dma(p, pe_number, addr, mask, is_write, true);
}
-static int64_t phb3_err_inject(struct phb *phb, uint32_t pe_no,
+static int64_t phb3_err_inject(struct phb *phb, uint64_t pe_number,
uint32_t type, uint32_t func,
uint64_t addr, uint64_t mask)
{
struct phb3 *p = phb_to_phb3(phb);
- int64_t (*handler)(struct phb3 *p, uint32_t pe_no,
+ int64_t (*handler)(struct phb3 *p, uint64_t pe_number,
uint64_t addr, uint64_t mask, bool is_write);
bool is_write;
@@ -3112,7 +3112,7 @@ static int64_t phb3_err_inject(struct phb *phb, uint32_t pe_no,
return OPAL_HARDWARE;
/* We can't inject error to the reserved PE */
- if (pe_no == PHB3_RESERVED_PE_NUM || pe_no >= PHB3_MAX_PE_NUM)
+ if (pe_number == PHB3_RESERVED_PE_NUM || pe_number >= PHB3_MAX_PE_NUM)
return OPAL_PARAMETER;
/* Clear leftover from last time */
@@ -3169,7 +3169,7 @@ static int64_t phb3_err_inject(struct phb *phb, uint32_t pe_no,
return OPAL_PARAMETER;
}
- return handler(p, pe_no, addr, mask, is_write);
+ return handler(p, pe_number, addr, mask, is_write);
}
static int64_t phb3_get_diag_data(struct phb *phb,
diff --git a/hw/phb4.c b/hw/phb4.c
index d3e7620..e7de70c 100644
--- a/hw/phb4.c
+++ b/hw/phb4.c
@@ -790,7 +790,7 @@ static int64_t phb4_wait_bit(struct phb4 *p, uint32_t reg,
}
static int64_t phb4_tce_kill(struct phb *phb, uint32_t kill_type,
- uint32_t pe_num, uint32_t tce_size,
+ uint64_t pe_number, uint32_t tce_size,
uint64_t dma_addr, uint32_t npages)
{
struct phb4 *p = phb_to_phb4(phb);
@@ -808,7 +808,7 @@ static int64_t phb4_tce_kill(struct phb *phb, uint32_t kill_type,
PHB_TCE_KILL_ONE, 0);
if (rc)
return rc;
- val = SETFIELD(PHB_TCE_KILL_PENUM, dma_addr, pe_num);
+ val = SETFIELD(PHB_TCE_KILL_PENUM, dma_addr, pe_number);
/* Set appropriate page size */
switch(tce_size) {
@@ -850,7 +850,7 @@ static int64_t phb4_tce_kill(struct phb *phb, uint32_t kill_type,
return rc;
/* Perform kill */
out_be64(p->regs + PHB_TCE_KILL, PHB_TCE_KILL_PE |
- SETFIELD(PHB_TCE_KILL_PENUM, 0ull, pe_num));
+ SETFIELD(PHB_TCE_KILL_PENUM, 0ull, pe_number));
break;
case OPAL_PCI_TCE_KILL_ALL:
/* Wait for a slot in the HW kill queue */
@@ -1146,7 +1146,7 @@ static int64_t phb4_phb_mmio_enable(struct phb __unused *phb,
}
static int64_t phb4_map_pe_mmio_window(struct phb *phb,
- uint16_t pe_num,
+ uint64_t pe_number,
uint16_t window_type,
uint16_t window_num,
uint16_t segment_num)
@@ -1154,7 +1154,7 @@ static int64_t phb4_map_pe_mmio_window(struct phb *phb,
struct phb4 *p = phb_to_phb4(phb);
uint64_t mbt0, mbt1, mdt;
- if (pe_num >= p->num_pes)
+ if (pe_number >= p->num_pes)
return OPAL_PARAMETER;
/*
@@ -1172,7 +1172,7 @@ static int64_t phb4_map_pe_mmio_window(struct phb *phb,
return OPAL_PARAMETER;
mdt = p->mdt_cache[segment_num];
- mdt = SETFIELD(IODA3_MDT_PE_A, mdt, pe_num);
+ mdt = SETFIELD(IODA3_MDT_PE_A, mdt, pe_number);
p->mdt_cache[segment_num] = mdt;
phb4_ioda_sel(p, IODA3_TBL_MDT, segment_num, false);
out_be64(p->regs + PHB_IODA_DATA0, mdt);
@@ -1191,7 +1191,7 @@ static int64_t phb4_map_pe_mmio_window(struct phb *phb,
/* Set to single PE mode and configure the PE */
mbt0 = SETFIELD(IODA3_MBT0_MODE, mbt0,
IODA3_MBT0_MODE_SINGLE_PE);
- mbt1 = SETFIELD(IODA3_MBT1_SINGLE_PE_NUM, mbt1, pe_num);
+ mbt1 = SETFIELD(IODA3_MBT1_SINGLE_PE_NUM, mbt1, pe_number);
p->mbt_cache[window_num][0] = mbt0;
p->mbt_cache[window_num][1] = mbt1;
break;
@@ -1203,7 +1203,7 @@ static int64_t phb4_map_pe_mmio_window(struct phb *phb,
}
static int64_t phb4_map_pe_dma_window(struct phb *phb,
- uint16_t pe_num,
+ uint64_t pe_number,
uint16_t window_id,
uint16_t tce_levels,
uint64_t tce_table_addr,
@@ -1224,7 +1224,7 @@ static int64_t phb4_map_pe_dma_window(struct phb *phb,
* Sanity check. We currently only support "2 window per PE" mode
* ie, only bit 59 of the PCI address is used to select the window
*/
- if (pe_num >= p->num_pes || (window_id >> 1) != pe_num)
+ if (pe_number >= p->num_pes || (window_id >> 1) != pe_number)
return OPAL_PARAMETER;
/*
@@ -1282,7 +1282,7 @@ static int64_t phb4_map_pe_dma_window(struct phb *phb,
}
static int64_t phb4_map_pe_dma_window_real(struct phb *phb,
- uint16_t pe_num,
+ uint64_t pe_number,
uint16_t window_id,
uint64_t pci_start_addr,
uint64_t pci_mem_size)
@@ -1291,8 +1291,8 @@ static int64_t phb4_map_pe_dma_window_real(struct phb *phb,
uint64_t end = pci_start_addr + pci_mem_size;
uint64_t tve;
- if (pe_num >= p->num_pes ||
- (window_id >> 1) != pe_num)
+ if (pe_number >= p->num_pes ||
+ (window_id >> 1) != pe_number)
return OPAL_PARAMETER;
if (pci_mem_size) {
@@ -1346,7 +1346,7 @@ static int64_t phb4_map_pe_dma_window_real(struct phb *phb,
}
static int64_t phb4_set_ive_pe(struct phb *phb,
- uint32_t pe_num,
+ uint64_t pe_number,
uint32_t ive_num)
{
struct phb4 *p = phb_to_phb4(phb);
@@ -1355,14 +1355,14 @@ static int64_t phb4_set_ive_pe(struct phb *phb,
uint32_t mist_shift;
uint64_t val;
- if (pe_num >= p->num_pes || ive_num >= (p->num_irqs - 8))
+ if (pe_number >= p->num_pes || ive_num >= (p->num_irqs - 8))
return OPAL_PARAMETER;
mist_idx = ive_num >> 2;
mist_quad = ive_num & 3;
mist_shift = (3 - mist_quad) << 4;
p->mist_cache[mist_idx] &= ~(0x0fffull << mist_shift);
- p->mist_cache[mist_idx] |= ((uint64_t)pe_num) << mist_shift;
+ p->mist_cache[mist_idx] |= ((uint64_t)pe_number) << mist_shift;
/* Note: This has the side effect of clearing P/Q, so this
* shouldn't be called while the interrupt is "hot"
@@ -1384,7 +1384,7 @@ static int64_t phb4_set_ive_pe(struct phb *phb,
}
static int64_t phb4_get_msi_32(struct phb *phb,
- uint32_t pe_num,
+ uint64_t pe_number,
uint32_t ive_num,
uint8_t msi_range,
uint32_t *msi_address,
@@ -1398,7 +1398,7 @@ static int64_t phb4_get_msi_32(struct phb *phb,
* by its DMA address and data, but the check isn't
* harmful.
*/
- if (pe_num >= p->num_pes ||
+ if (pe_number >= p->num_pes ||
ive_num >= (p->num_irqs - 8) ||
msi_range != 1 || !msi_address|| !message_data)
return OPAL_PARAMETER;
@@ -1414,7 +1414,7 @@ static int64_t phb4_get_msi_32(struct phb *phb,
}
static int64_t phb4_get_msi_64(struct phb *phb,
- uint32_t pe_num,
+ uint64_t pe_number,
uint32_t ive_num,
uint8_t msi_range,
uint64_t *msi_address,
@@ -1423,7 +1423,7 @@ static int64_t phb4_get_msi_64(struct phb *phb,
struct phb4 *p = phb_to_phb4(phb);
/* Sanity check */
- if (pe_num >= p->num_pes ||
+ if (pe_number >= p->num_pes ||
ive_num >= (p->num_irqs - 8) ||
msi_range != 1 || !msi_address || !message_data)
return OPAL_PARAMETER;
@@ -1533,7 +1533,7 @@ static void phb4_read_phb_status(struct phb4 *p,
}
static int64_t phb4_set_pe(struct phb *phb,
- uint64_t pe_num,
+ uint64_t pe_number,
uint64_t bdfn,
uint8_t bcompare,
uint8_t dcompare,
@@ -1550,7 +1550,7 @@ static int64_t phb4_set_pe(struct phb *phb,
return OPAL_HARDWARE;
if (action != OPAL_MAP_PE && action != OPAL_UNMAP_PE)
return OPAL_PARAMETER;
- if (pe_num >= p->num_pes || bdfn > 0xffff ||
+ if (pe_number >= p->num_pes || bdfn > 0xffff ||
bcompare > OpalPciBusAll ||
dcompare > OPAL_COMPARE_RID_DEVICE_NUMBER ||
fcompare > OPAL_COMPARE_RID_FUNCTION_NUMBER)
@@ -1585,7 +1585,7 @@ static int64_t phb4_set_pe(struct phb *phb,
if (all == 0x7) {
if (action == OPAL_MAP_PE) {
for (idx = 0; idx < RTT_TABLE_ENTRIES; idx++)
- p->rte_cache[idx] = pe_num;
+ p->rte_cache[idx] = pe_number;
} else {
for ( idx = 0; idx < ARRAY_SIZE(p->rte_cache); idx++)
p->rte_cache[idx] = PHB4_RESERVED_PE_NUM(p);
@@ -1597,7 +1597,7 @@ static int64_t phb4_set_pe(struct phb *phb,
if ((idx & mask) != val)
continue;
if (action == OPAL_MAP_PE)
- p->rte_cache[idx] = pe_num;
+ p->rte_cache[idx] = pe_number;
else
p->rte_cache[idx] = PHB4_RESERVED_PE_NUM(p);
*rte = p->rte_cache[idx];
@@ -2327,7 +2327,7 @@ static int64_t phb4_eeh_next_error(struct phb *phb,
return OPAL_SUCCESS;
}
-static int64_t phb4_err_inject(struct phb *phb, uint32_t pe_no,
+static int64_t phb4_err_inject(struct phb *phb, uint64_t pe_number,
uint32_t type, uint32_t func,
uint64_t addr, uint64_t mask)
{
@@ -2535,7 +2535,7 @@ static bool phb4_init_rc_cfg(struct phb4 *p)
PCIECAP_AER_UE_POISON_TLP |
PCIECAP_AER_UE_COMPL_TIMEOUT |
PCIECAP_AER_UE_COMPL_ABORT);
-
+
/* Clear all CE status */
phb4_pcicfg_write32(&p->phb, 0, aercap + PCIECAP_AER_CE_STATUS,
0xffffffff);
@@ -3432,4 +3432,3 @@ void probe_phb4(void)
dt_for_each_compatible(dt_root, np, "ibm,power9-pciex")
phb4_create(np);
}
-
diff --git a/include/npu.h b/include/npu.h
index 800515a..d92d3ed 100644
--- a/include/npu.h
+++ b/include/npu.h
@@ -147,7 +147,7 @@ struct npu_dev {
uint32_t procedure_status;
- uint8_t pe_num;
+ uint64_t pe_number;
/* Used to associate the NPU device with GPU PCI devices */
const char *slot_label;
diff --git a/include/pci.h b/include/pci.h
index 1915adc..61ece8f 100644
--- a/include/pci.h
+++ b/include/pci.h
@@ -213,8 +213,9 @@ struct phb_ops {
uint64_t eeh_action_token);
int64_t (*eeh_freeze_set)(struct phb *phb, uint64_t pe_number,
uint64_t eeh_action_token);
- int64_t (*err_inject)(struct phb *phb, uint32_t pe_no, uint32_t type,
- uint32_t func, uint64_t addr, uint64_t mask);
+ int64_t (*err_inject)(struct phb *phb, uint64_t pe_number,
+ uint32_t type, uint32_t func, uint64_t addr,
+ uint64_t mask);
int64_t (*get_diag_data)(struct phb *phb, void *diag_buffer,
uint64_t diag_buffer_len);
int64_t (*get_diag_data2)(struct phb *phb, void *diag_buffer,
@@ -236,7 +237,7 @@ struct phb_ops {
uint16_t window_num, uint64_t addr,
uint64_t pci_addr, uint64_t size);
- int64_t (*map_pe_mmio_window)(struct phb *phb, uint16_t pe_number,
+ int64_t (*map_pe_mmio_window)(struct phb *phb, uint64_t pe_number,
uint16_t window_type, uint16_t window_num,
uint16_t segment_num);
@@ -248,34 +249,34 @@ struct phb_ops {
int64_t (*set_peltv)(struct phb *phb, uint32_t parent_pe,
uint32_t child_pe, uint8_t state);
- int64_t (*map_pe_dma_window)(struct phb *phb, uint16_t pe_number,
+ int64_t (*map_pe_dma_window)(struct phb *phb, uint64_t pe_number,
uint16_t window_id, uint16_t tce_levels,
uint64_t tce_table_addr,
uint64_t tce_table_size,
uint64_t tce_page_size);
- int64_t (*map_pe_dma_window_real)(struct phb *phb, uint16_t pe_number,
+ int64_t (*map_pe_dma_window_real)(struct phb *phb, uint64_t pe_number,
uint16_t dma_window_number,
uint64_t pci_start_addr,
uint64_t pci_mem_size);
int64_t (*set_mve)(struct phb *phb, uint32_t mve_number,
- uint32_t pe_number);
+ uint64_t pe_number);
int64_t (*set_mve_enable)(struct phb *phb, uint32_t mve_number,
uint32_t state);
- int64_t (*set_xive_pe)(struct phb *phb, uint32_t pe_number,
+ int64_t (*set_xive_pe)(struct phb *phb, uint64_t pe_number,
uint32_t xive_num);
int64_t (*get_xive_source)(struct phb *phb, uint32_t xive_num,
int32_t *interrupt_source_number);
- int64_t (*get_msi_32)(struct phb *phb, uint32_t mve_number,
+ int64_t (*get_msi_32)(struct phb *phb, uint64_t mve_number,
uint32_t xive_num, uint8_t msi_range,
uint32_t *msi_address, uint32_t *message_data);
- int64_t (*get_msi_64)(struct phb *phb, uint32_t mve_number,
+ int64_t (*get_msi_64)(struct phb *phb, uint64_t mve_number,
uint32_t xive_num, uint8_t msi_range,
uint64_t *msi_address, uint32_t *message_data);
@@ -296,11 +297,12 @@ struct phb_ops {
/* TCE Kill abstraction */
int64_t (*tce_kill)(struct phb *phb, uint32_t kill_type,
- uint32_t pe_num, uint32_t tce_size,
+ uint64_t pe_number, uint32_t tce_size,
uint64_t dma_addr, uint32_t npages);
/* Put phb in capi mode or pcie mode */
- int64_t (*set_capi_mode)(struct phb *phb, uint64_t mode, uint64_t pe_number);
+ int64_t (*set_capi_mode)(struct phb *phb, uint64_t mode,
+ uint64_t pe_number);
int64_t (*set_capp_recovery)(struct phb *phb);
};
--
2.9.3
More information about the Skiboot
mailing list