[5/5] STAB cleanup - replace flush_stab() with switch_stab()

David Gibson david at gibson.dropbear.id.au
Tue Aug 3 12:15:57 EST 2004


preload_stab() is only ever called (once) from flush_stab(), and
flush_stab() is only ever called from switch_mm().  So, combine both
functions into the more accurately named switch_stab(), called from
switch_mm().

Signed-off-by: David Gibson <dwg at au1.ibm.com>

Index: working-2.6/include/asm-ppc64/mmu_context.h
===================================================================
--- working-2.6.orig/include/asm-ppc64/mmu_context.h	2004-07-30 14:13:32.932890776 +1000
+++ working-2.6/include/asm-ppc64/mmu_context.h	2004-07-30 14:13:35.017837008 +1000
@@ -135,7 +135,7 @@
 	spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
 }

-extern void flush_stab(struct task_struct *tsk, struct mm_struct *mm);
+extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
 extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);

 /*
@@ -163,7 +163,7 @@
 	if (cur_cpu_spec->cpu_features & CPU_FTR_SLB)
 		switch_slb(tsk, next);
 	else
-		flush_stab(tsk, next);
+		switch_stab(tsk, next);
 }

 #define deactivate_mm(tsk,mm)	do { } while (0)
Index: working-2.6/arch/ppc64/mm/stab.c
===================================================================
--- working-2.6.orig/arch/ppc64/mm/stab.c	2004-07-30 14:13:34.968844456 +1000
+++ working-2.6/arch/ppc64/mm/stab.c	2004-07-30 14:13:55.314811992 +1000
@@ -152,42 +148,18 @@
 }

 /*
- * preload some userspace segments into the segment table.
+ * Do the segment table work for a context switch: flush all user
+ * entries from the table, then preload some probably useful entries
+ * for the new task
  */
-static void preload_stab(struct task_struct *tsk, struct mm_struct *mm)
-{
-	unsigned long pc = KSTK_EIP(tsk);
-	unsigned long stack = KSTK_ESP(tsk);
-	unsigned long unmapped_base;
-
-	if (test_tsk_thread_flag(tsk, TIF_32BIT))
-		unmapped_base = TASK_UNMAPPED_BASE_USER32;
-	else
-		unmapped_base = TASK_UNMAPPED_BASE_USER64;
-
-	__ste_allocate(pc, mm);
-
-	if (GET_ESID(pc) == GET_ESID(stack))
-		return;
-
-	__ste_allocate(stack, mm);
-
-	if ((GET_ESID(pc) == GET_ESID(unmapped_base))
-	    || (GET_ESID(stack) == GET_ESID(unmapped_base)))
-		return;
-
-	__ste_allocate(unmapped_base, mm);
-
-	/* Order update */
-	asm volatile("sync" : : : "memory");
-}
-
-/* Flush all user entries from the segment table of the current processor. */
-void flush_stab(struct task_struct *tsk, struct mm_struct *mm)
+void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
 {
 	struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr;
 	struct stab_entry *ste;
 	unsigned long offset = __get_cpu_var(stab_cache_ptr);
+	unsigned long pc = KSTK_EIP(tsk);
+	unsigned long stack = KSTK_ESP(tsk);
+	unsigned long unmapped_base;

 	/* Force previous translations to complete. DRENG */
 	asm volatile("isync" : : : "memory");
@@ -222,7 +194,27 @@

 	__get_cpu_var(stab_cache_ptr) = 0;

-	preload_stab(tsk, mm);
+	/* Now preload some entries for the new task */
+	if (test_tsk_thread_flag(tsk, TIF_32BIT))
+		unmapped_base = TASK_UNMAPPED_BASE_USER32;
+	else
+		unmapped_base = TASK_UNMAPPED_BASE_USER64;
+
+	__ste_allocate(pc, mm);
+
+	if (GET_ESID(pc) == GET_ESID(stack))
+		return;
+
+	__ste_allocate(stack, mm);
+
+	if ((GET_ESID(pc) == GET_ESID(unmapped_base))
+	    || (GET_ESID(stack) == GET_ESID(unmapped_base)))
+		return;
+
+	__ste_allocate(unmapped_base, mm);
+
+	/* Order update */
+	asm volatile("sync" : : : "memory");
 }

 extern void slb_initialize(void);

--
David Gibson			| For every complex problem there is a
david AT gibson.dropbear.id.au	| solution which is simple, neat and
				| wrong.
http://www.ozlabs.org/people/dgibson

** Sent via the linuxppc64-dev mail list. See http://lists.linuxppc.org/





More information about the Linuxppc64-dev mailing list