[RFC 2/8] powerpc/slb: Rename all the 'entry' occurrences to 'slot'

Anshuman Khandual khandual at linux.vnet.ibm.com
Tue Jul 21 16:58:40 AEST 2015


From: "khandual at linux.vnet.ibm.com" <khandual at linux.vnet.ibm.com>

These are essentially SLB individual slots what we are dealing with
in these functions. Usage of both 'entry' and 'slot' synonyms makes
it real confusing sometimes. This patch makes it uniform across the
file by replacing all those 'entry's with 'slot's.

Signed-off-by: Anshuman Khandual <khandual at linux.vnet.ibm.com>
---
 arch/powerpc/mm/slb.c | 29 ++++++++++++++---------------
 1 file changed, 14 insertions(+), 15 deletions(-)

diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 62fafb3..3842a54 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -55,39 +55,39 @@ static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
 
 static inline void slb_shadow_update(unsigned long ea, int ssize,
 				     unsigned long flags,
-				     unsigned long entry)
+				     unsigned long slot)
 {
 	/*
-	 * Clear the ESID first so the entry is not valid while we are
+	 * Clear the ESID first so the slot is not valid while we are
 	 * updating it.  No write barriers are needed here, provided
 	 * we only update the current CPU's SLB shadow buffer.
 	 */
-	get_slb_shadow()->save_area[entry].esid = 0;
-	get_slb_shadow()->save_area[entry].vsid =
+	get_slb_shadow()->save_area[slot].esid = 0;
+	get_slb_shadow()->save_area[slot].vsid =
 				cpu_to_be64(mk_vsid_data(ea, ssize, flags));
-	get_slb_shadow()->save_area[entry].esid =
-				cpu_to_be64(mk_esid_data(ea, ssize, entry));
+	get_slb_shadow()->save_area[slot].esid =
+				cpu_to_be64(mk_esid_data(ea, ssize, slot));
 }
 
-static inline void slb_shadow_clear(unsigned long entry)
+static inline void slb_shadow_clear(unsigned long slot)
 {
-	get_slb_shadow()->save_area[entry].esid = 0;
+	get_slb_shadow()->save_area[slot].esid = 0;
 }
 
 static inline void create_shadowed_slbe(unsigned long ea, int ssize,
 					unsigned long flags,
-					unsigned long entry)
+					unsigned long slot)
 {
 	/*
 	 * Updating the shadow buffer before writing the SLB ensures
-	 * we don't get a stale entry here if we get preempted by PHYP
+	 * we don't get a stale slot here if we get preempted by PHYP
 	 * between these two statements.
 	 */
-	slb_shadow_update(ea, ssize, flags, entry);
+	slb_shadow_update(ea, ssize, flags, slot);
 
 	asm volatile("slbmte  %0,%1" :
 		     : "r" (mk_vsid_data(ea, ssize, flags)),
-		       "r" (mk_esid_data(ea, ssize, entry))
+		       "r" (mk_esid_data(ea, ssize, slot))
 		     : "memory" );
 }
 
@@ -109,7 +109,7 @@ static void __slb_flush_and_rebolt(void)
 		ksp_vsid_data = 0;
 		slb_shadow_clear(2);
 	} else {
-		/* Update stack entry; others don't change */
+		/* Update stack slot; others don't change */
 		slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2);
 		ksp_vsid_data =
 			be64_to_cpu(get_slb_shadow()->save_area[2].vsid);
@@ -313,13 +313,12 @@ void slb_initialize(void)
 	asm volatile("slbmte  %0,%0"::"r" (0) : "memory");
 	asm volatile("isync; slbia; isync":::"memory");
 	create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0);
-
 	create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
 
 	/* For the boot cpu, we're running on the stack in init_thread_union,
 	 * which is in the first segment of the linear mapping, and also
 	 * get_paca()->kstack hasn't been initialized yet.
-	 * For secondary cpus, we need to bolt the kernel stack entry now.
+	 * For secondary cpus, we need to bolt the kernel stack slot now.
 	 */
 	slb_shadow_clear(2);
 	if (raw_smp_processor_id() != boot_cpuid &&
-- 
2.1.0



More information about the Linuxppc-dev mailing list