[Skiboot] [PATCH v2 06/15] hw/npu2: Don't repopulate NPU devices in NVLink init path
Andrew Donnellan
andrew.donnellan at au1.ibm.com
Fri Jan 11 12:09:26 AEDT 2019
In 68415d5e38ef ("hw/npu2: Common NPU2 init routine between NVLink and
OpenCAPI") we refactored a large chunk of the NPU init path into common
code, including walking the device tree to setup npu2_devs based on the
ibm,npu-link device tree nodes.
We didn't actually remove the code that does that in
npu2_populate_devices(), so currently we populate the devices in the common
setup path, then repopulate them incorrectly in the NVLink setup path.
Currently this is fine, because we don't support having both NVLink and
OpenCAPI devices on the same NPU, but when we do, this will be a problem.
Fix npu2_populate_devices() to only populate additional fields as required
for NVLink devices. Rename it to npu2_configure_devices() to better reflect
what it now does.
Signed-off-by: Andrew Donnellan <andrew.donnellan at au1.ibm.com>
Reviewed-by: Frederic Barrat <fbarrat at linux.ibm.com>
---
v1->v2:
- remove unneeded scan_map assignment, it's zalloced anyway (Alexey)
- rebase on Reza's cleanups
---
hw/npu2.c | 42 ++++++++++--------------------------------
1 file changed, 10 insertions(+), 32 deletions(-)
diff --git a/hw/npu2.c b/hw/npu2.c
index 1c7af14958e8..106b32150994 100644
--- a/hw/npu2.c
+++ b/hw/npu2.c
@@ -1599,12 +1599,12 @@ static void npu2_populate_cfg(struct npu2_dev *dev)
PCI_VIRT_CFG_INIT_RO(pvd, pos + 1, 1, 0);
}
-static uint32_t npu_allocate_bdfn(struct npu2 *p, uint32_t group)
+static uint32_t npu_allocate_bdfn(struct npu2 *p, uint32_t group, int size)
{
int i;
int bdfn = (group << 3);
- for (i = 0; i < p->total_devices; i++) {
+ for (i = 0; i < size; i++) {
if ((p->devices[i].bdfn & 0xf8) == (bdfn & 0xf8))
bdfn++;
}
@@ -1612,51 +1612,29 @@ static uint32_t npu_allocate_bdfn(struct npu2 *p, uint32_t group)
return bdfn;
}
-static void npu2_populate_devices(struct npu2 *p,
- struct dt_node *dn)
+static void npu2_configure_devices(struct npu2 *p)
{
struct npu2_dev *dev;
- struct dt_node *npu2_dn, *link;
- uint32_t npu_phandle, index = 0;
-
- /*
- * Get the npu node which has the links which we expand here
- * into pci like devices attached to our emulated phb.
- */
- npu_phandle = dt_prop_get_u32(dn, "ibm,npcq");
- npu2_dn = dt_find_by_phandle(dt_root, npu_phandle);
- assert(npu2_dn);
+ uint32_t index = 0;
- /* Walk the link at x nodes to initialize devices */
- p->total_devices = 0;
- p->phb_nvlink.scan_map = 0;
- dt_for_each_compatible(npu2_dn, link, "ibm,npu-link") {
+ for (index = 0; index < p->total_devices; index++) {
uint32_t group_id;
dev = &p->devices[index];
- dev->type = NPU2_DEV_TYPE_NVLINK;
- dev->npu = p;
- dev->dt_node = link;
- dev->link_index = dt_prop_get_u32(link, "ibm,npu-link-index");
- dev->brick_index = dev->link_index;
+ if (dev->type != NPU2_DEV_TYPE_NVLINK)
+ continue;
- group_id = dt_prop_get_u32(link, "ibm,npu-group-id");
- dev->bdfn = npu_allocate_bdfn(p, group_id);
+ group_id = dt_prop_get_u32(dev->dt_node, "ibm,npu-group-id");
+ dev->bdfn = npu_allocate_bdfn(p, group_id, index);
/* This must be done after calling
* npu_allocate_bdfn() */
- p->total_devices++;
p->phb_nvlink.scan_map |= 0x1 << ((dev->bdfn & 0xf8) >> 3);
- dev->pl_xscom_base = dt_prop_get_u64(link, "ibm,npu-phy");
- dev->lane_mask = dt_prop_get_u32(link, "ibm,npu-lane-mask");
-
/* Initialize PCI virtual device */
dev->nvlink.pvd = pci_virt_add_device(&p->phb_nvlink, dev->bdfn, 0x100, dev);
if (dev->nvlink.pvd)
npu2_populate_cfg(dev);
-
- index++;
}
}
@@ -1857,7 +1835,7 @@ void npu2_nvlink_create_phb(struct npu2 *npu, struct dt_node *dn)
list_head_init(&npu->phb_nvlink.virt_devices);
npu2_setup_irqs(npu);
- npu2_populate_devices(npu, dn);
+ npu2_configure_devices(npu);
npu2_add_interrupt_map(npu, dn);
npu2_add_phb_properties(npu);
--
git-series 0.9.1
More information about the Skiboot
mailing list