[PATCH 2/2] powerpc/powernv/npu: Don't explicitly flush nmmu tlb

Alistair Popple alistair at popple.id.au
Fri Aug 11 16:22:57 AEST 2017


The nest mmu required an explicit flush as a tlbi would not flush it in the
same way as the core. However an alternate firmware fix exists which should
eliminate the need for this flush, so instead add a device-tree property
(ibm,nmmu-flush) on the NVLink2 PHB to enable it only if required.

Signed-off-by: Alistair Popple <alistair at popple.id.au>
---

Michael,

This patch depends on http://patchwork.ozlabs.org/patch/796775/ - [v3,1/3]
powerpc/mm: Add marker for contexts requiring global TLB invalidations.

- Alistair

 arch/powerpc/platforms/powernv/npu-dma.c | 27 +++++++++++++++++++++------
 arch/powerpc/platforms/powernv/pci.h     |  3 +++
 2 files changed, 24 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index 3d4f879..ac07800 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -544,12 +544,7 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
 	struct pci_dev *npdev;
 	struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS];
 	unsigned long pid = npu_context->mm->context.id;
-
-	/*
-	 * Unfortunately the nest mmu does not support flushing specific
-	 * addresses so we have to flush the whole mm.
-	 */
-	flush_tlb_mm(npu_context->mm);
+	bool nmmu_flushed = false;
 
 	/*
 	 * Loop over all the NPUs this process is active on and launch
@@ -566,6 +561,17 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
 			npu = &nphb->npu;
 			mmio_atsd_reg[i].npu = npu;
 
+			if (nphb->npu.nmmu_flush && !nmmu_flushed) {
+				/*
+				 * Unfortunately the nest mmu does not support
+				 * flushing specific addresses so we have to
+				 * flush the whole mm once before shooting down
+				 * the GPU translation.
+				 */
+				flush_tlb_mm(npu_context->mm);
+				nmmu_flushed = true;
+			}
+
 			if (va)
 				mmio_atsd_reg[i].reg =
 					mmio_invalidate_va(npu, address, pid,
@@ -732,6 +738,13 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
 		return ERR_PTR(-ENODEV);
 	npu_context->npdev[npu->index][nvlink_index] = npdev;
 
+	if (!nphb->npu.nmmu_flush)
+		/*
+		 * If we're not explicitly flushing ourselves we need to mark
+		 * the thread for global flushes
+		 */
+		mm_context_set_global_tlbi(&mm->context);
+
 	return npu_context;
 }
 EXPORT_SYMBOL(pnv_npu2_init_context);
@@ -829,6 +842,8 @@ int pnv_npu2_init(struct pnv_phb *phb)
 	static int npu_index;
 	uint64_t rc = 0;
 
+	phb->npu.nmmu_flush =
+		of_property_read_bool(phb->hose->dn, "ibm,nmmu-flush");
 	for_each_child_of_node(phb->hose->dn, dn) {
 		gpdev = pnv_pci_get_gpu_dev(get_pci_dev(dn));
 		if (gpdev) {
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index f16bc40..e8e3e20 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -184,6 +184,9 @@ struct pnv_phb {
 
 		/* Bitmask for MMIO register usage */
 		unsigned long mmio_atsd_usage;
+
+		/* Do we need to explicitly flush the nest mmu? */
+		bool nmmu_flush;
 	} npu;
 
 #ifdef CONFIG_CXL_BASE
-- 
2.1.4



More information about the Linuxppc-dev mailing list