[PATCH 1/12] powerpc: Seperate usage of KERNELBASE and PAGE_OFFSET

Michael Ellerman michael at ellerman.id.au
Tue Nov 8 00:06:48 EST 2005


This patch tries to seperate usage of KERNELBASE and PAGE_OFFSET.

PAGE_OFFSET == 0xC00..00 and always will. It's the quantity you subtract
from a virtual kernel address to get a physical one.

KERNELBASE == 0xC00..00 + SOMETHING, where SOMETHING tends to be 0, but might
not be. It points to the start of the kernel text + data in virtual memory.

 arch/powerpc/kernel/entry_64.S    |    4 ++--
 arch/powerpc/kernel/lparmap.c     |    6 +++---
 arch/powerpc/mm/hash_utils_64.c   |    6 +++---
 arch/powerpc/mm/slb.c             |    4 ++--
 arch/powerpc/mm/slb_low.S         |    6 +++---
 arch/powerpc/mm/stab.c            |   10 +++++-----
 arch/powerpc/mm/tlb_64.c          |    2 +-
 arch/ppc64/kernel/machine_kexec.c |    5 ++---
 8 files changed, 21 insertions(+), 22 deletions(-)

Index: kexec/arch/powerpc/mm/stab.c
===================================================================
--- kexec.orig/arch/powerpc/mm/stab.c
+++ kexec/arch/powerpc/mm/stab.c
@@ -40,7 +40,7 @@ static int make_ste(unsigned long stab, 
 	unsigned long entry, group, old_esid, castout_entry, i;
 	unsigned int global_entry;
 	struct stab_entry *ste, *castout_ste;
-	unsigned long kernel_segment = (esid << SID_SHIFT) >= KERNELBASE;
+	unsigned long kernel_segment = (esid << SID_SHIFT) >= PAGE_OFFSET;
 
 	vsid_data = vsid << STE_VSID_SHIFT;
 	esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V;
@@ -83,7 +83,7 @@ static int make_ste(unsigned long stab, 
 		}
 
 		/* Dont cast out the first kernel segment */
-		if ((castout_ste->esid_data & ESID_MASK) != KERNELBASE)
+		if ((castout_ste->esid_data & ESID_MASK) != PAGE_OFFSET)
 			break;
 
 		castout_entry = (castout_entry + 1) & 0xf;
@@ -248,7 +248,7 @@ void stabs_alloc(void)
 			panic("Unable to allocate segment table for CPU %d.\n",
 			      cpu);
 
-		newstab += KERNELBASE;
+		newstab = (unsigned long)__va(newstab);
 
 		memset((void *)newstab, 0, PAGE_SIZE);
 
@@ -265,13 +265,13 @@ void stabs_alloc(void)
  */
 void stab_initialize(unsigned long stab)
 {
-	unsigned long vsid = get_kernel_vsid(KERNELBASE);
+	unsigned long vsid = get_kernel_vsid(PAGE_OFFSET);
 
 	if (cpu_has_feature(CPU_FTR_SLB)) {
 		slb_initialize();
 	} else {
 		asm volatile("isync; slbia; isync":::"memory");
-		make_ste(stab, GET_ESID(KERNELBASE), vsid);
+		make_ste(stab, GET_ESID(PAGE_OFFSET), vsid);
 
 		/* Order update */
 		asm volatile("sync":::"memory");
Index: kexec/arch/ppc64/kernel/machine_kexec.c
===================================================================
--- kexec.orig/arch/ppc64/kernel/machine_kexec.c
+++ kexec/arch/ppc64/kernel/machine_kexec.c
@@ -171,9 +171,8 @@ void kexec_copy_flush(struct kimage *ima
 	 * including ones that were in place on the original copy
 	 */
 	for (i = 0; i < nr_segments; i++)
-		flush_icache_range(ranges[i].mem + KERNELBASE,
-				ranges[i].mem + KERNELBASE +
-				ranges[i].memsz);
+		flush_icache_range((unsigned long)__va(ranges[i].mem),
+			(unsigned long)__va(ranges[i].mem + ranges[i].memsz));
 }
 
 #ifdef CONFIG_SMP
Index: kexec/arch/powerpc/mm/hash_utils_64.c
===================================================================
--- kexec.orig/arch/powerpc/mm/hash_utils_64.c
+++ kexec/arch/powerpc/mm/hash_utils_64.c
@@ -239,7 +239,7 @@ void __init htab_initialize(void)
 
 	/* create bolted the linear mapping in the hash table */
 	for (i=0; i < lmb.memory.cnt; i++) {
-		base = lmb.memory.region[i].base + KERNELBASE;
+		base = (unsigned long)__va(lmb.memory.region[i].base);
 		size = lmb.memory.region[i].size;
 
 		DBG("creating mapping for region: %lx : %lx\n", base, size);
@@ -276,8 +276,8 @@ void __init htab_initialize(void)
 	 * for either 4K or 16MB pages.
 	 */
 	if (tce_alloc_start) {
-		tce_alloc_start += KERNELBASE;
-		tce_alloc_end += KERNELBASE;
+		tce_alloc_start = (unsigned long)__va(tce_alloc_start);
+		tce_alloc_end = (unsigned long)__va(tce_alloc_end);
 
 		if (base + size >= tce_alloc_start)
 			tce_alloc_start = base + size + 1;
Index: kexec/arch/powerpc/mm/slb.c
===================================================================
--- kexec.orig/arch/powerpc/mm/slb.c
+++ kexec/arch/powerpc/mm/slb.c
@@ -55,7 +55,7 @@ static void slb_flush_and_rebolt(void)
 		ksp_flags |= SLB_VSID_L;
 
 	ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
-	if ((ksp_esid_data & ESID_MASK) == KERNELBASE)
+	if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET)
 		ksp_esid_data &= ~SLB_ESID_V;
 
 	/* We need to do this all in asm, so we're sure we don't touch
@@ -145,7 +145,7 @@ void slb_initialize(void)
  	asm volatile("isync":::"memory");
  	asm volatile("slbmte  %0,%0"::"r" (0) : "memory");
 	asm volatile("isync; slbia; isync":::"memory");
-	create_slbe(KERNELBASE, flags, 0);
+	create_slbe(PAGE_OFFSET, flags, 0);
 	create_slbe(VMALLOCBASE, SLB_VSID_KERNEL, 1);
 	/* We don't bolt the stack for the time being - we're in boot,
 	 * so the stack is in the bolted segment.  By the time it goes
Index: kexec/arch/powerpc/kernel/entry_64.S
===================================================================
--- kexec.orig/arch/powerpc/kernel/entry_64.S
+++ kexec/arch/powerpc/kernel/entry_64.S
@@ -674,7 +674,7 @@ _GLOBAL(enter_rtas)
 
 	/* Setup our real return addr */	
 	SET_REG_TO_LABEL(r4,.rtas_return_loc)
-	SET_REG_TO_CONST(r9,KERNELBASE)
+	SET_REG_TO_CONST(r9,PAGE_OFFSET)
 	sub	r4,r4,r9
        	mtlr	r4
 
@@ -702,7 +702,7 @@ _GLOBAL(enter_rtas)
 _STATIC(rtas_return_loc)
 	/* relocation is off at this point */
 	mfspr	r4,SPRN_SPRG3	        /* Get PACA */
-	SET_REG_TO_CONST(r5, KERNELBASE)
+	SET_REG_TO_CONST(r5, PAGE_OFFSET)
         sub     r4,r4,r5                /* RELOC the PACA base pointer */
 
 	mfmsr   r6
Index: kexec/arch/powerpc/mm/slb_low.S
===================================================================
--- kexec.orig/arch/powerpc/mm/slb_low.S
+++ kexec/arch/powerpc/mm/slb_low.S
@@ -66,12 +66,12 @@ _GLOBAL(slb_allocate)
 
 	srdi	r9,r3,60		/* get region */
 	srdi	r3,r3,28		/* get esid */
-	cmpldi	cr7,r9,0xc		/* cmp KERNELBASE for later use */
+	cmpldi	cr7,r9,0xc		/* cmp PAGE_OFFSET for later use */
 
 	rldimi	r10,r3,28,0		/* r10= ESID<<28 | entry */
 	oris	r10,r10,SLB_ESID_V at h	/* r10 |= SLB_ESID_V */
 
-	/* r3 = esid, r10 = esid_data, cr7 = <>KERNELBASE */
+	/* r3 = esid, r10 = esid_data, cr7 = <> PAGE_OFFSET */
 
 	blt	cr7,0f			/* user or kernel? */
 
@@ -114,7 +114,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
 	ld	r9,PACACONTEXTID(r13)
 	rldimi	r3,r9,USER_ESID_BITS,0
 
-9:	/* r3 = protovsid, r11 = flags, r10 = esid_data, cr7 = <>KERNELBASE */
+9:	/* r3 = protovsid, r11 = flags, r10 = esid_data, cr7 = <> PAGE_OFFSET */
 	ASM_VSID_SCRAMBLE(r3,r9)
 
 	rldimi	r11,r3,SLB_VSID_SHIFT,16	/* combine VSID and flags */
Index: kexec/arch/powerpc/kernel/lparmap.c
===================================================================
--- kexec.orig/arch/powerpc/kernel/lparmap.c
+++ kexec/arch/powerpc/kernel/lparmap.c
@@ -16,8 +16,8 @@ const struct LparMap __attribute__((__se
 	.xSegmentTableOffs = STAB0_PAGE,
 
 	.xEsids = {
-		{ .xKernelEsid = GET_ESID(KERNELBASE),
-		  .xKernelVsid = KERNEL_VSID(KERNELBASE), },
+		{ .xKernelEsid = GET_ESID(PAGE_OFFSET),
+		  .xKernelVsid = KERNEL_VSID(PAGE_OFFSET), },
 		{ .xKernelEsid = GET_ESID(VMALLOCBASE),
 		  .xKernelVsid = KERNEL_VSID(VMALLOCBASE), },
 	},
@@ -25,7 +25,7 @@ const struct LparMap __attribute__((__se
 	.xRanges = {
 		{ .xPages = HvPagesToMap,
 		  .xOffset = 0,
-		  .xVPN = KERNEL_VSID(KERNELBASE) << (SID_SHIFT - PAGE_SHIFT),
+		  .xVPN = KERNEL_VSID(PAGE_OFFSET) << (SID_SHIFT - PAGE_SHIFT),
 		},
 	},
 };
Index: kexec/arch/powerpc/mm/tlb_64.c
===================================================================
--- kexec.orig/arch/powerpc/mm/tlb_64.c
+++ kexec/arch/powerpc/mm/tlb_64.c
@@ -149,7 +149,7 @@ void hpte_update(struct mm_struct *mm, u
 		batch->mm = mm;
 		batch->large = pte_huge(pte);
 	}
-	if (addr < KERNELBASE) {
+	if (!is_kernel_addr(addr)) {
 		vsid = get_vsid(mm->context.id, addr);
 		WARN_ON(vsid == 0);
 	} else



More information about the Linuxppc64-dev mailing list