[2/5] STAB cleanup - kill bitfields

David Gibson david at gibson.dropbear.id.au
Tue Aug 3 12:13:47 EST 2004


Remove the overly verbose and hard to follow use of bitfields in the
PPC64 segment table code, replacing it with explicit bitmask
operations.

Signed-off-by: David Gibson <dwg at au1.ibm.com>

Index: working-2.6/arch/ppc64/mm/stab.c
===================================================================
--- working-2.6.orig/arch/ppc64/mm/stab.c	2004-07-29 16:14:46.201804936 +1000
+++ working-2.6/arch/ppc64/mm/stab.c	2004-07-29 16:14:46.642871056 +1000
@@ -61,33 +61,32 @@
 {
 	unsigned long entry, group, old_esid, castout_entry, i;
 	unsigned int global_entry;
-	STE *ste, *castout_ste;
+	struct stab_entry *ste, *castout_ste;
 	unsigned long kernel_segment = (REGION_ID(esid << SID_SHIFT) !=
 					USER_REGION_ID);
+	unsigned long esid_data;

 	/* Search the primary group first. */
 	global_entry = (esid & 0x1f) << 3;
-	ste = (STE *)(stab | ((esid & 0x1f) << 7));
+	ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7));

 	/* Find an empty entry, if one exists. */
 	for (group = 0; group < 2; group++) {
 		for (entry = 0; entry < 8; entry++, ste++) {
-			if (!(ste->dw0.dw0.v)) {
-				ste->dw0.dword0 = 0;
-				ste->dw1.dword1 = 0;
-				ste->dw1.dw1.vsid = vsid;
-				ste->dw0.dw0.esid = esid;
-				ste->dw0.dw0.kp = 1;
-				if (!kernel_segment)
-					ste->dw0.dw0.ks = 1;
+			if (!(ste->esid_data & STE_ESID_V)) {
+				ste->vsid_data = vsid << STE_VSID_SHIFT;
 				asm volatile("eieio":::"memory");
-				ste->dw0.dw0.v = 1;
+				esid_data = esid << SID_SHIFT;
+				esid_data |= STE_ESID_KP | STE_ESID_V;
+				if (! kernel_segment)
+					esid_data |= STE_ESID_KS;
+				ste->esid_data = esid_data;
 				return (global_entry | entry);
 			}
 		}
 		/* Now search the secondary group. */
 		global_entry = ((~esid) & 0x1f) << 3;
-		ste = (STE *)(stab | (((~esid) & 0x1f) << 7));
+		ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7));
 	}

 	/*
@@ -98,16 +97,16 @@
 	for (i = 0; i < 16; i++) {
 		if (castout_entry < 8) {
 			global_entry = (esid & 0x1f) << 3;
-			ste = (STE *)(stab | ((esid & 0x1f) << 7));
+			ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7));
 			castout_ste = ste + castout_entry;
 		} else {
 			global_entry = ((~esid) & 0x1f) << 3;
-			ste = (STE *)(stab | (((~esid) & 0x1f) << 7));
+			ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7));
 			castout_ste = ste + (castout_entry - 8);
 		}

 		/* Dont cast out the first kernel segment */
-		if (castout_ste->dw0.dw0.esid != GET_ESID(KERNELBASE))
+		if ((castout_ste->esid_data & ESID_MASK) != KERNELBASE)
 			break;

 		castout_entry = (castout_entry + 1) & 0xf;
@@ -120,19 +119,21 @@
 	/* Force previous translations to complete. DRENG */
 	asm volatile("isync" : : : "memory");

-	castout_ste->dw0.dw0.v = 0;
+	old_esid = castout_ste->esid_data >> SID_SHIFT;
+	castout_ste->esid_data = 0;		/* Invalidate old entry */
+
 	asm volatile("sync" : : : "memory");    /* Order update */

-	castout_ste->dw0.dword0 = 0;
-	castout_ste->dw1.dword1 = 0;
-	castout_ste->dw1.dw1.vsid = vsid;
-	old_esid = castout_ste->dw0.dw0.esid;
-	castout_ste->dw0.dw0.esid = esid;
-	castout_ste->dw0.dw0.kp = 1;
-	if (!kernel_segment)
-		castout_ste->dw0.dw0.ks = 1;
+	castout_ste->vsid_data = vsid << STE_VSID_SHIFT;
+
 	asm volatile("eieio" : : : "memory");   /* Order update */
-	castout_ste->dw0.dw0.v  = 1;
+
+	esid_data = esid << SID_SHIFT;
+	esid_data |= STE_ESID_KP | STE_ESID_V;
+	if (!kernel_segment)
+		esid_data |= STE_ESID_KS;
+	castout_ste->esid_data = esid_data;
+
 	asm volatile("slbie  %0" : : "r" (old_esid << SID_SHIFT));
 	/* Ensure completion of slbie */
 	asm volatile("sync" : : : "memory");
@@ -240,8 +241,8 @@
 /* Flush all user entries from the segment table of the current processor. */
 void flush_stab(struct task_struct *tsk, struct mm_struct *mm)
 {
-	STE *stab = (STE *) get_paca()->stab_addr;
-	STE *ste;
+	struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr;
+	struct stab_entry *ste;
 	unsigned long offset = __get_cpu_var(stab_cache_ptr);

 	/* Force previous translations to complete. DRENG */
@@ -252,7 +253,7 @@

 		for (i = 0; i < offset; i++) {
 			ste = stab + __get_cpu_var(stab_cache[i]);
-			ste->dw0.dw0.v = 0;
+			ste->esid_data = 0; /* invalidate entry */
 		}
 	} else {
 		unsigned long entry;
@@ -263,12 +264,12 @@
 		/* Never flush the first entry. */
 		ste += 1;
 		for (entry = 1;
-		     entry < (PAGE_SIZE / sizeof(STE));
+		     entry < (PAGE_SIZE / sizeof(struct stab_entry));
 		     entry++, ste++) {
 			unsigned long ea;
-			ea = ste->dw0.dw0.esid << SID_SHIFT;
+			ea = ste->esid_data & ESID_MASK;
 			if (ea < KERNELBASE) {
-				ste->dw0.dw0.v = 0;
+				ste->esid_data = 0;
 			}
 		}
 	}
Index: working-2.6/include/asm-ppc64/mmu.h
===================================================================
--- working-2.6.orig/include/asm-ppc64/mmu.h	2004-07-29 16:14:41.040790952 +1000
+++ working-2.6/include/asm-ppc64/mmu.h	2004-07-29 16:14:46.643870904 +1000
@@ -37,33 +37,17 @@
 		mm_context_t ctx = { .id = REGION_ID(ea), KERNEL_LOW_HPAGES}; \
 		ctx; })

-typedef struct {
-	unsigned long esid: 36; /* Effective segment ID */
-	unsigned long resv0:20; /* Reserved */
-	unsigned long v:     1; /* Entry valid (v=1) or invalid */
-	unsigned long resv1: 1; /* Reserved */
-	unsigned long ks:    1; /* Supervisor (privileged) state storage key */
-	unsigned long kp:    1; /* Problem state storage key */
-	unsigned long n:     1; /* No-execute if n=1 */
-	unsigned long resv2: 3; /* padding to a 64b boundary */
-} ste_dword0;
-
-typedef struct {
-	unsigned long vsid: 52; /* Virtual segment ID */
-	unsigned long resv0:12; /* Padding to a 64b boundary */
-} ste_dword1;
-
-typedef struct _STE {
-	union {
-		unsigned long dword0;
-		ste_dword0    dw0;
-	} dw0;
-
-	union {
-		unsigned long dword1;
-		ste_dword1    dw1;
-	} dw1;
-} STE;
+#define STE_ESID_V	0x80
+#define STE_ESID_KS	0x20
+#define STE_ESID_KP	0x10
+#define STE_ESID_N	0x08
+
+#define STE_VSID_SHIFT	12
+
+struct stab_entry {
+	unsigned long esid_data;
+	unsigned long vsid_data;
+};

 /* Hardware Page Table Entry */


--
David Gibson			| For every complex problem there is a
david AT gibson.dropbear.id.au	| solution which is simple, neat and
				| wrong.
http://www.ozlabs.org/people/dgibson

** Sent via the linuxppc64-dev mail list. See http://lists.linuxppc.org/





More information about the Linuxppc64-dev mailing list