[PATCH 8/12] ppc64: Seperate usage of KERNELBASE and PAGE_OFFSET

Michael Ellerman michael at ellerman.id.au
Fri Aug 26 12:53:29 EST 2005


This patch tries to seperate usage of KERNELBASE and PAGE_OFFSET.

PAGE_OFFSET == 0xC00..00 and always will. It's the quantity you subtract
from a virtual kernel address to get a physical one.

KERNELBASE == 0xC00..00 + SOMETHING, where SOMETHING tends to be 0, but might
not be. It points to the start of the kernel text + data in virtual memory.

Signed-off-by: Michael Ellerman <michael at ellerman.id.au>
---

 arch/ppc64/kernel/entry.S         |    4 ++--
 arch/ppc64/kernel/head.S          |    2 +-
 arch/ppc64/kernel/machine_kexec.c |    5 ++---
 arch/ppc64/kernel/pmac_smp.c      |    7 +++----
 arch/ppc64/kernel/prom_init.c     |    2 +-
 arch/ppc64/mm/hash_utils.c        |    6 +++---
 arch/ppc64/mm/slb.c               |    4 ++--
 arch/ppc64/mm/stab.c              |   10 +++++-----
 include/asm-ppc64/mmu.h           |    2 +-
 include/asm-ppc64/page.h          |    6 +++---
 include/asm-ppc64/ppc_asm.h       |   13 -------------
 11 files changed, 23 insertions(+), 38 deletions(-)

Index: work/arch/ppc64/mm/stab.c
===================================================================
--- work.orig/arch/ppc64/mm/stab.c
+++ work/arch/ppc64/mm/stab.c
@@ -40,7 +40,7 @@ static int make_ste(unsigned long stab, 
 	unsigned long entry, group, old_esid, castout_entry, i;
 	unsigned int global_entry;
 	struct stab_entry *ste, *castout_ste;
-	unsigned long kernel_segment = (esid << SID_SHIFT) >= KERNELBASE;
+	unsigned long kernel_segment = (esid << SID_SHIFT) >= PAGE_OFFSET;
 
 	vsid_data = vsid << STE_VSID_SHIFT;
 	esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V;
@@ -83,7 +83,7 @@ static int make_ste(unsigned long stab, 
 		}
 
 		/* Dont cast out the first kernel segment */
-		if ((castout_ste->esid_data & ESID_MASK) != KERNELBASE)
+		if ((castout_ste->esid_data & ESID_MASK) != PAGE_OFFSET)
 			break;
 
 		castout_entry = (castout_entry + 1) & 0xf;
@@ -248,7 +248,7 @@ void stabs_alloc(void)
 			panic("Unable to allocate segment table for CPU %d.\n",
 			      cpu);
 
-		newstab += KERNELBASE;
+		newstab = __va_ul(newstab);
 
 		memset((void *)newstab, 0, PAGE_SIZE);
 
@@ -265,13 +265,13 @@ void stabs_alloc(void)
  */
 void stab_initialize(unsigned long stab)
 {
-	unsigned long vsid = get_kernel_vsid(KERNELBASE);
+	unsigned long vsid = get_kernel_vsid(PAGE_OFFSET);
 
 	if (cpu_has_feature(CPU_FTR_SLB)) {
 		slb_initialize();
 	} else {
 		asm volatile("isync; slbia; isync":::"memory");
-		make_ste(stab, GET_ESID(KERNELBASE), vsid);
+		make_ste(stab, GET_ESID(PAGE_OFFSET), vsid);
 
 		/* Order update */
 		asm volatile("sync":::"memory");
Index: work/include/asm-ppc64/ppc_asm.h
===================================================================
--- work.orig/include/asm-ppc64/ppc_asm.h
+++ work/include/asm-ppc64/ppc_asm.h
@@ -110,19 +110,6 @@
 	ori     reg,reg,(label)@l;
 
 
-/* PPPBBB - DRENG  If KERNELBASE is always 0xC0...,
- * Then we can easily do this with one asm insn. -Peter
- */
-#define tophys(rd,rs)                           \
-        lis     rd,((KERNELBASE>>48)&0xFFFF);   \
-        rldicr  rd,rd,32,31;                    \
-        sub     rd,rs,rd
-
-#define tovirt(rd,rs)                           \
-        lis     rd,((KERNELBASE>>48)&0xFFFF);   \
-        rldicr  rd,rd,32,31;                    \
-        add     rd,rs,rd
-
 /* Condition Register Bit Fields */
 
 #define	cr0	0
Index: work/arch/ppc64/kernel/machine_kexec.c
===================================================================
--- work.orig/arch/ppc64/kernel/machine_kexec.c
+++ work/arch/ppc64/kernel/machine_kexec.c
@@ -171,9 +171,8 @@ void kexec_copy_flush(struct kimage *ima
 	 * including ones that were in place on the original copy
 	 */
 	for (i = 0; i < nr_segments; i++)
-		flush_icache_range(ranges[i].mem + KERNELBASE,
-				ranges[i].mem + KERNELBASE +
-				ranges[i].memsz);
+		flush_icache_range(__va_ul(ranges[i].mem),
+			__va_ul(ranges[i].mem + ranges[i].memsz));
 }
 
 #ifdef CONFIG_SMP
Index: work/arch/ppc64/kernel/pmac_smp.c
===================================================================
--- work.orig/arch/ppc64/kernel/pmac_smp.c
+++ work/arch/ppc64/kernel/pmac_smp.c
@@ -240,8 +240,7 @@ static void __init smp_core99_kick_cpu(i
 	int save_vector, j;
 	unsigned long new_vector;
 	unsigned long flags;
-	volatile unsigned int *vector
-		 = ((volatile unsigned int *)(KERNELBASE+0x100));
+	volatile unsigned int *vector = (volatile unsigned int *)__va(0x100);
 
 	if (nr < 1 || nr > 3)
 		return;
@@ -254,7 +253,7 @@ static void __init smp_core99_kick_cpu(i
 	save_vector = *vector;
 
 	/* Setup fake reset vector that does	
-	 *   b .pmac_secondary_start - KERNELBASE
+	 *   b .pmac_secondary_start_(1|2|3)
 	 */
 	switch(nr) {
 	case 1:
@@ -268,7 +267,7 @@ static void __init smp_core99_kick_cpu(i
 		new_vector = (unsigned long)pmac_secondary_start_3;
 		break;
 	}
-	*vector = 0x48000002 + (new_vector - KERNELBASE);
+	*vector = 0x48000001 + (new_vector - (unsigned long)vector);
 
 	/* flush data cache and inval instruction cache */
 	flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
Index: work/arch/ppc64/kernel/prom_init.c
===================================================================
--- work.orig/arch/ppc64/kernel/prom_init.c
+++ work/arch/ppc64/kernel/prom_init.c
@@ -1940,7 +1940,7 @@ unsigned long __init prom_init(unsigned 
 	 * On pSeries and BPA, copy the CPU hold code
 	 */
        	if (RELOC(of_platform) & (PLATFORM_PSERIES | PLATFORM_BPA))
-       		copy_and_flush(0, KERNELBASE - offset, 0x100, 0);
+       		copy_and_flush(__pa(KERNELBASE), KERNELBASE - offset, 0x100, 0);
 
 	/*
 	 * Get memory cells format
Index: work/arch/ppc64/mm/hash_utils.c
===================================================================
--- work.orig/arch/ppc64/mm/hash_utils.c
+++ work/arch/ppc64/mm/hash_utils.c
@@ -210,7 +210,7 @@ void __init htab_initialize(void)
 
 	/* create bolted the linear mapping in the hash table */
 	for (i=0; i < lmb.memory.cnt; i++) {
-		base = lmb.memory.region[i].base + KERNELBASE;
+		base = __va_ul(lmb.memory.region[i].base);
 		size = lmb.memory.region[i].size;
 
 		DBG("creating mapping for region: %lx : %lx\n", base, size);
@@ -247,8 +247,8 @@ void __init htab_initialize(void)
 	 * for either 4K or 16MB pages.
 	 */
 	if (tce_alloc_start) {
-		tce_alloc_start += KERNELBASE;
-		tce_alloc_end += KERNELBASE;
+		tce_alloc_start = __va_ul(tce_alloc_start);
+		tce_alloc_end = __va_ul(tce_alloc_end);
 
 		if (base + size >= tce_alloc_start)
 			tce_alloc_start = base + size + 1;
Index: work/arch/ppc64/mm/slb.c
===================================================================
--- work.orig/arch/ppc64/mm/slb.c
+++ work/arch/ppc64/mm/slb.c
@@ -55,7 +55,7 @@ static void slb_flush_and_rebolt(void)
 		ksp_flags |= SLB_VSID_L;
 
 	ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
-	if ((ksp_esid_data & ESID_MASK) == KERNELBASE)
+	if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET)
 		ksp_esid_data &= ~SLB_ESID_V;
 
 	/* We need to do this all in asm, so we're sure we don't touch
@@ -145,7 +145,7 @@ void slb_initialize(void)
  	asm volatile("isync":::"memory");
  	asm volatile("slbmte  %0,%0"::"r" (0) : "memory");
 	asm volatile("isync; slbia; isync":::"memory");
-	create_slbe(KERNELBASE, flags, 0);
+	create_slbe(PAGE_OFFSET, flags, 0);
 	create_slbe(VMALLOC_OFFSET, SLB_VSID_KERNEL, 1);
 	/* We don't bolt the stack for the time being - we're in boot,
 	 * so the stack is in the bolted segment.  By the time it goes
Index: work/include/asm-ppc64/mmu.h
===================================================================
--- work.orig/include/asm-ppc64/mmu.h
+++ work/include/asm-ppc64/mmu.h
@@ -29,7 +29,7 @@
 
 /* Location of cpu0's segment table */
 #define STAB0_PAGE	0x6
-#define STAB0_PHYS_ADDR	(STAB0_PAGE<<PAGE_SHIFT)
+#define STAB0_PHYS_ADDR	((STAB0_PAGE << PAGE_SHIFT) + PHYSICAL_START)
 
 #ifndef __ASSEMBLY__
 extern char initial_stab[];
Index: work/include/asm-ppc64/page.h
===================================================================
--- work.orig/include/asm-ppc64/page.h
+++ work/include/asm-ppc64/page.h
@@ -219,14 +219,14 @@ extern u64 ppc64_pft_size;		/* Log 2 of 
 #define KERNELBASE		(PAGE_OFFSET + PHYSICAL_START)
 
 #define VMALLOC_REGION_ID	(VMALLOC_OFFSET >> REGION_SHIFT)
-#define KERNEL_REGION_ID   (KERNELBASE >> REGION_SHIFT)
+#define KERNEL_REGION_ID	(PAGE_OFFSET >> REGION_SHIFT)
 #define USER_REGION_ID     (0UL)
 #define REGION_ID(ea)	   (((unsigned long)(ea)) >> REGION_SHIFT)
 
-#define __va_ul(x) (((unsigned long)(x) + KERNELBASE))
+#define __va_ul(x) (((unsigned long)(x) + PAGE_OFFSET))
 #define __va(x) ((void *)__va_ul(x))
 
-#define is_kernel_addr(x)	((x) >= KERNELBASE)
+#define is_kernel_addr(x)	((x) >= PAGE_OFFSET)
 
 #ifdef CONFIG_DISCONTIGMEM
 #define page_to_pfn(page)	discontigmem_page_to_pfn(page)
Index: work/arch/ppc64/kernel/head.S
===================================================================
--- work.orig/arch/ppc64/kernel/head.S
+++ work/arch/ppc64/kernel/head.S
@@ -1257,7 +1257,7 @@ unrecov_slb:
  * fixed address (the linker can't compute (u64)&initial_stab >>
  * PAGE_SHIFT).
  */
-	. = STAB0_PHYS_ADDR	/* 0x6000 */
+	. = (STAB0_PAGE << PAGE_SHIFT)	/* 0x6000 */
 	.globl initial_stab
 initial_stab:
 	.space	4096
Index: work/arch/ppc64/kernel/entry.S
===================================================================
--- work.orig/arch/ppc64/kernel/entry.S
+++ work/arch/ppc64/kernel/entry.S
@@ -668,7 +668,7 @@ _GLOBAL(enter_rtas)
 
 	/* Setup our real return addr */	
 	SET_REG_TO_LABEL(r4,.rtas_return_loc)
-	SET_REG_TO_CONST(r9,KERNELBASE)
+	SET_REG_TO_CONST(r9,PAGE_OFFSET)
 	sub	r4,r4,r9
        	mtlr	r4
 
@@ -696,7 +696,7 @@ _GLOBAL(enter_rtas)
 _STATIC(rtas_return_loc)
 	/* relocation is off at this point */
 	mfspr	r4,SPRG3	        /* Get PACA */
-	SET_REG_TO_CONST(r5, KERNELBASE)
+	SET_REG_TO_CONST(r5, PAGE_OFFSET)
         sub     r4,r4,r5                /* RELOC the PACA base pointer */
 
 	mfmsr   r6



More information about the Linuxppc64-dev mailing list