Invert sense of SLB class bit

David Gibson david at gibson.dropbear.id.au
Tue Sep 6 14:59:47 EST 2005


Currently, we set the class bit in kernel SLB entries, and clear it on
user SLB entries.  On POWER5, ERAT entries created in real mode have
the class bit clear.  So to avoid flushing kernel ERAT entries on each
context switch, this patch inverts our usage of the class bit, setting
it on user SLB entries and clearing it on kernel SLB entries.

Booted on POWER5 and G5.

Signed-off-by: David Gibson <dwg at au1.ibm.com>

Index: working-2.6/arch/ppc64/kernel/entry.S
===================================================================
--- working-2.6.orig/arch/ppc64/kernel/entry.S	2005-06-08 15:50:39.000000000 +1000
+++ working-2.6/arch/ppc64/kernel/entry.S	2005-09-05 10:52:47.000000000 +1000
@@ -400,15 +400,14 @@
 	cmpd	cr1,r6,r9	/* or is new ESID the same as current ESID? */
 	cror	eq,4*cr1+eq,eq
 	beq	2f		/* if yes, don't slbie it */
-	oris	r0,r6,0x0800	/* set C (class) bit */
 
 	/* Bolt in the new stack SLB entry */
 	ld	r7,KSP_VSID(r4)	/* Get new stack's VSID */
-	oris	r6,r6,(SLB_ESID_V)@h
-	ori	r6,r6,(SLB_NUM_BOLTED-1)@l
-	slbie	r0
-	slbie	r0		/* Workaround POWER5 < DD2.1 issue */
-	slbmte	r7,r6
+	oris	r0,r6,(SLB_ESID_V)@h
+	ori	r0,r0,(SLB_NUM_BOLTED-1)@l
+	slbie	r6
+	slbie	r6		/* Workaround POWER5 < DD2.1 issue */
+	slbmte	r7,r0
 	isync
 
 2:
Index: working-2.6/arch/ppc64/mm/hugetlbpage.c
===================================================================
--- working-2.6.orig/arch/ppc64/mm/hugetlbpage.c	2005-08-31 11:41:53.000000000 +1000
+++ working-2.6/arch/ppc64/mm/hugetlbpage.c	2005-09-05 11:01:14.000000000 +1000
@@ -144,7 +144,8 @@
 	for (i = 0; i < NUM_LOW_AREAS; i++) {
 		if (! (areas & (1U << i)))
 			continue;
-		asm volatile("slbie %0" : : "r" (i << SID_SHIFT));
+		asm volatile("slbie %0"
+			     : : "r" ((i << SID_SHIFT) | SLBIE_C));
 	}
 
 	asm volatile("isync" : : : "memory");
@@ -164,7 +165,8 @@
 			continue;
 		for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
 			asm volatile("slbie %0"
-				     :: "r" ((i << HTLB_AREA_SHIFT) + (j << SID_SHIFT)));
+				     :: "r" (((i << HTLB_AREA_SHIFT)
+					     + (j << SID_SHIFT)) | SLBIE_C));
 	}
 
 	asm volatile("isync" : : : "memory");
Index: working-2.6/arch/ppc64/mm/slb.c
===================================================================
--- working-2.6.orig/arch/ppc64/mm/slb.c	2005-06-08 15:46:23.000000000 +1000
+++ working-2.6/arch/ppc64/mm/slb.c	2005-09-05 10:52:47.000000000 +1000
@@ -87,8 +87,8 @@
 		int i;
 		asm volatile("isync" : : : "memory");
 		for (i = 0; i < offset; i++) {
-			esid_data = (unsigned long)get_paca()->slb_cache[i]
-				<< SID_SHIFT;
+			esid_data = ((unsigned long)get_paca()->slb_cache[i]
+				<< SID_SHIFT) | SLBIE_C;
 			asm volatile("slbie %0" : : "r" (esid_data));
 		}
 		asm volatile("isync" : : : "memory");
Index: working-2.6/include/asm-ppc64/mmu.h
===================================================================
--- working-2.6.orig/include/asm-ppc64/mmu.h	2005-08-31 11:41:57.000000000 +1000
+++ working-2.6/include/asm-ppc64/mmu.h	2005-09-05 11:04:37.000000000 +1000
@@ -54,8 +54,10 @@
 #define SLB_VSID_C		ASM_CONST(0x0000000000000080) /* class */
 #define SLB_VSID_LS		ASM_CONST(0x0000000000000070) /* size of largepage */
  
-#define SLB_VSID_KERNEL		(SLB_VSID_KP|SLB_VSID_C)
-#define SLB_VSID_USER		(SLB_VSID_KP|SLB_VSID_KS)
+#define SLB_VSID_KERNEL		(SLB_VSID_KP)
+#define SLB_VSID_USER		(SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
+
+#define SLBIE_C			(0x08000000)
 
 /*
  * Hash table


-- 
David Gibson			| I'll have my music baroque, and my code
david AT gibson.dropbear.id.au	| minimalist, thank you.  NOT _the_ _other_
				| _way_ _around_!
http://www.ozlabs.org/people/dgibson



More information about the Linuxppc64-dev mailing list