[PATCH] ppc64: use full vaddr in mmu batches

Benjamin Herrenschmidt benh at kernel.crashing.org
Tue Sep 6 15:45:15 EST 2005


This patch slightly change the mmu batching code to use the full vaddr
instead of getting it from the vsid. This avoids playing with contexts
late when doing the actual flush.

This version of the patch is untested ;)

Signed-off-by: Benjamin Herrenschmidt <benh at kernel.crashing.org>

Index: linux-work/include/asm-ppc64/tlbflush.h
===================================================================
--- linux-work.orig/include/asm-ppc64/tlbflush.h	2005-09-05 14:55:10.000000000 +1000
+++ linux-work/include/asm-ppc64/tlbflush.h	2005-09-05 14:55:42.000000000 +1000
@@ -20,10 +20,8 @@
 struct mm_struct;
 struct ppc64_tlb_batch {
 	unsigned long index;
-	unsigned long context;
 	struct mm_struct *mm;
 	pte_t pte[PPC64_TLB_BATCH_NR];
-	unsigned long addr[PPC64_TLB_BATCH_NR];
 	unsigned long vaddr[PPC64_TLB_BATCH_NR];
 };
 DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
@@ -47,8 +45,7 @@
 #define flush_tlb_kernel_range(start, end)	flush_tlb_pending()
 #define flush_tlb_pgtables(mm, start, end)	do { } while (0)
 
-extern void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte,
-			    int local);
-void flush_hash_range(unsigned long context, unsigned long number, int local);
+extern void flush_hash_page(unsigned long va, pte_t pte, int local);
+void flush_hash_range(unsigned long number, int local);
 
 #endif /* _PPC64_TLBFLUSH_H */
Index: linux-work/arch/ppc64/mm/tlb.c
===================================================================
--- linux-work.orig/arch/ppc64/mm/tlb.c	2005-09-05 14:55:10.000000000 +1000
+++ linux-work/arch/ppc64/mm/tlb.c	2005-09-05 14:55:42.000000000 +1000
@@ -128,12 +128,10 @@
 void hpte_update(struct mm_struct *mm, unsigned long addr,
 		 unsigned long pte, int wrprot)
 {
-	int i;
-	unsigned long context = 0;
 	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
+	unsigned long vsid;
+	int i;
 
-	if (REGION_ID(addr) == USER_REGION_ID)
-		context = mm->context.id;
 	i = batch->index;
 
 	/*
@@ -143,17 +141,19 @@
 	 * up scanning and resetting referenced bits then our batch context
 	 * will change mid stream.
 	 */
-	if (unlikely(i != 0 && context != batch->context)) {
+	if (unlikely(i != 0 && mm != batch->mm)) {
 		flush_tlb_pending();
 		i = 0;
 	}
-
-	if (i == 0) {
-		batch->context = context;
+	if (i == 0)
 		batch->mm = mm;
-	}
+	if (addr < KERNELBASE) {
+		vsid = get_vsid(mm->context.id, addr);
+		WARN_ON(vsid == 0);
+	} else
+		vsid = get_kernel_vsid(addr);
+	batch->vaddr[i] = (vsid << 28 ) | (addr & 0x0fffffff);
 	batch->pte[i] = __pte(pte);
-	batch->addr[i] = addr;
 	batch->index = ++i;
 	if (i >= PPC64_TLB_BATCH_NR)
 		flush_tlb_pending();
@@ -175,10 +175,9 @@
 		local = 1;
 
 	if (i == 1)
-		flush_hash_page(batch->context, batch->addr[0], batch->pte[0],
-				local);
+		flush_hash_page(batch->vaddr[0], batch->pte[0], local);
 	else
-		flush_hash_range(batch->context, i, local);
+		flush_hash_range(i, local);
 	batch->index = 0;
 	put_cpu();
 }
Index: linux-work/arch/ppc64/mm/hash_utils.c
===================================================================
--- linux-work.orig/arch/ppc64/mm/hash_utils.c	2005-09-05 14:55:10.000000000 +1000
+++ linux-work/arch/ppc64/mm/hash_utils.c	2005-09-05 14:56:02.000000000 +1000
@@ -355,18 +355,11 @@
 	return ret;
 }
 
-void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte,
-		     int local)
+void flush_hash_page(unsigned long va, pte_t pte, int local)
 {
-	unsigned long vsid, vpn, va, hash, secondary, slot;
+	unsigned long vpn, hash, secondary, slot;
 	unsigned long huge = pte_huge(pte);
 
-	if (ea < KERNELBASE)
-		vsid = get_vsid(context, ea);
-	else
-		vsid = get_kernel_vsid(ea);
-
-	va = (vsid << 28) | (ea & 0x0fffffff);
 	if (huge)
 		vpn = va >> HPAGE_SHIFT;
 	else
@@ -381,17 +374,17 @@
 	ppc_md.hpte_invalidate(slot, va, huge, local);
 }
 
-void flush_hash_range(unsigned long context, unsigned long number, int local)
+void flush_hash_range(unsigned long number, int local)
 {
 	if (ppc_md.flush_hash_range) {
-		ppc_md.flush_hash_range(context, number, local);
+		ppc_md.flush_hash_range(number, local);
 	} else {
 		int i;
-		struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
+		struct ppc64_tlb_batch *batch =
+			&__get_cpu_var(ppc64_tlb_batch);
 
 		for (i = 0; i < number; i++)
-			flush_hash_page(context, batch->addr[i], batch->pte[i],
-					local);
+			flush_hash_page(batch->vaddr[i], batch->pte[i], local);
 	}
 }
 
Index: linux-work/include/asm-ppc64/machdep.h
===================================================================
--- linux-work.orig/include/asm-ppc64/machdep.h	2005-09-05 14:55:10.000000000 +1000
+++ linux-work/include/asm-ppc64/machdep.h	2005-09-05 14:55:42.000000000 +1000
@@ -56,9 +56,8 @@
 				       unsigned long vflags,
 				       unsigned long rflags);
 	long		(*hpte_remove)(unsigned long hpte_group);
-	void		(*flush_hash_range)(unsigned long context,
-					    unsigned long number,
-					    int local);
+	void		(*flush_hash_range)(unsigned long number, int local);
+
 	/* special for kexec, to be called in real mode, linar mapping is
 	 * destroyed as well */
 	void		(*hpte_clear_all)(void);
Index: linux-work/arch/ppc64/mm/hash_native.c
===================================================================
--- linux-work.orig/arch/ppc64/mm/hash_native.c	2005-09-05 14:55:10.000000000 +1000
+++ linux-work/arch/ppc64/mm/hash_native.c	2005-09-05 14:55:42.000000000 +1000
@@ -335,10 +335,9 @@
 	local_irq_restore(flags);
 }
 
-static void native_flush_hash_range(unsigned long context,
-				    unsigned long number, int local)
+static void native_flush_hash_range(unsigned long number, int local)
 {
-	unsigned long vsid, vpn, va, hash, secondary, slot, flags, avpn;
+	unsigned long va, vpn, hash, secondary, slot, flags, avpn;
 	int i, j;
 	hpte_t *hptep;
 	unsigned long hpte_v;
@@ -351,13 +350,7 @@
 
 	j = 0;
 	for (i = 0; i < number; i++) {
-		if (batch->addr[i] < KERNELBASE)
-			vsid = get_vsid(context, batch->addr[i]);
-		else
-			vsid = get_kernel_vsid(batch->addr[i]);
-
-		va = (vsid << 28) | (batch->addr[i] & 0x0fffffff);
-		batch->vaddr[j] = va;
+		va = batch->vaddr[j];
 		if (large)
 			vpn = va >> HPAGE_SHIFT;
 		else
Index: linux-work/arch/ppc64/kernel/pSeries_lpar.c
===================================================================
--- linux-work.orig/arch/ppc64/kernel/pSeries_lpar.c	2005-09-05 14:55:10.000000000 +1000
+++ linux-work/arch/ppc64/kernel/pSeries_lpar.c	2005-09-05 14:55:42.000000000 +1000
@@ -482,8 +482,7 @@
  * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
  * lock.
  */
-void pSeries_lpar_flush_hash_range(unsigned long context, unsigned long number,
-				   int local)
+void pSeries_lpar_flush_hash_range(unsigned long number, int local)
 {
 	int i;
 	unsigned long flags = 0;
@@ -494,7 +493,7 @@
 		spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
 
 	for (i = 0; i < number; i++)
-		flush_hash_page(context, batch->addr[i], batch->pte[i], local);
+		flush_hash_page(batch->vaddr[i], batch->pte[i], local);
 
 	if (lock_tlbie)
 		spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);





More information about the Linuxppc64-dev mailing list