[RFC/PATCH 2/2] ppc64: Seperate usage of KERNELBASE and PAGE_OFFSET

Michael Ellerman michael at ellerman.id.au
Wed Aug 17 17:08:13 EST 2005


This patch *tries* to seperate usage of KERNELBASE and PAGE_OFFSET.

PAGE_OFFSET == 0xC00..00 and always will. It's the quantity you subtract
from a virtual kernel address to get a physical one.

KERNELBASE == 0xC00..00 *currently*, but might one day be something else,
hold onto your hats. It points to the start of the kernel text + data in
virtual memory.

I'd really appreciate it if people can cast an eye over this, as I'm sure
I've got some wrong. There's still a few more users of KERNELBASE that could
be converted to __pa() or __va() but I'll get to those later.

This actually boots on my P5 LPAR, but there might be some subtleties.

Signed-off-by: Michael Ellerman <michael at ellerman.id.au>
---

 arch/ppc64/kernel/head.S              |   12 ++++++------
 arch/ppc64/kernel/machine_kexec.c     |    5 ++---
 arch/ppc64/kernel/pmac_smp.c          |    8 ++++----
 arch/ppc64/kernel/prom_init.c         |    4 ++--
 arch/ppc64/kernel/setup.c             |    2 +-
 arch/ppc64/mm/hash_native.c           |    2 +-
 arch/ppc64/mm/hash_utils.c            |    8 ++++----
 arch/ppc64/mm/slb.c                   |   10 +++++-----
 arch/ppc64/mm/stab.c                  |   22 ++++++++++------------
 arch/ppc64/oprofile/op_model_power4.c |    4 ++--
 arch/ppc64/oprofile/op_model_rs64.c   |    2 +-
 arch/ppc64/xmon/xmon.c                |    4 ++--
 include/asm-ppc64/mmu.h               |    4 ++--
 include/asm-ppc64/page.h              |    6 +++---
 include/asm-ppc64/ppc_asm.h           |   13 -------------
 15 files changed, 45 insertions(+), 61 deletions(-)

Index: work/arch/ppc64/mm/stab.c
===================================================================
--- work.orig/arch/ppc64/mm/stab.c
+++ work/arch/ppc64/mm/stab.c
@@ -40,7 +40,7 @@ static int make_ste(unsigned long stab, 
 	unsigned long entry, group, old_esid, castout_entry, i;
 	unsigned int global_entry;
 	struct stab_entry *ste, *castout_ste;
-	unsigned long kernel_segment = (esid << SID_SHIFT) >= KERNELBASE;
+	unsigned long kernel_segment = (esid << SID_SHIFT) >= PAGE_OFFSET;
 
 	vsid_data = vsid << STE_VSID_SHIFT;
 	esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V;
@@ -83,7 +83,7 @@ static int make_ste(unsigned long stab, 
 		}
 
 		/* Dont cast out the first kernel segment */
-		if ((castout_ste->esid_data & ESID_MASK) != KERNELBASE)
+		if ((castout_ste->esid_data & ESID_MASK) != PAGE_OFFSET)
 			break;
 
 		castout_entry = (castout_entry + 1) & 0xf;
@@ -122,7 +122,7 @@ static int __ste_allocate(unsigned long 
 	unsigned long offset;
 
 	/* Kernel or user address? */
-	if (ea >= KERNELBASE) {
+	if (ea >= PAGE_OFFSET) {
 		vsid = get_kernel_vsid(ea);
 	} else {
 		if ((ea >= TASK_SIZE_USER64) || (! mm))
@@ -133,7 +133,7 @@ static int __ste_allocate(unsigned long 
 
 	stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid);
 
-	if (ea < KERNELBASE) {
+	if (ea < PAGE_OFFSET) {
 		offset = __get_cpu_var(stab_cache_ptr);
 		if (offset < NR_STAB_CACHE_ENTRIES)
 			__get_cpu_var(stab_cache[offset++]) = stab_entry;
@@ -190,7 +190,7 @@ void switch_stab(struct task_struct *tsk
 		     entry++, ste++) {
 			unsigned long ea;
 			ea = ste->esid_data & ESID_MASK;
-			if (ea < KERNELBASE) {
+			if (ea < PAGE_OFFSET) {
 				ste->esid_data = 0;
 			}
 		}
@@ -248,12 +248,10 @@ void stabs_alloc(void)
 			panic("Unable to allocate segment table for CPU %d.\n",
 			      cpu);
 
-		newstab += KERNELBASE;
+		memset(__va(newstab), 0, PAGE_SIZE);
 
-		memset((void *)newstab, 0, PAGE_SIZE);
-
-		paca[cpu].stab_addr = newstab;
-		paca[cpu].stab_real = virt_to_abs(newstab);
+		paca[cpu].stab_addr = (u64)__va(newstab);
+		paca[cpu].stab_real = newstab;
 		printk(KERN_DEBUG "Segment table for CPU %d at 0x%lx virtual, 0x%lx absolute\n", cpu, paca[cpu].stab_addr, paca[cpu].stab_real);
 	}
 }
@@ -265,13 +263,13 @@ void stabs_alloc(void)
  */
 void stab_initialize(unsigned long stab)
 {
-	unsigned long vsid = get_kernel_vsid(KERNELBASE);
+	unsigned long vsid = get_kernel_vsid(PAGE_OFFSET);
 
 	if (cpu_has_feature(CPU_FTR_SLB)) {
 		slb_initialize();
 	} else {
 		asm volatile("isync; slbia; isync":::"memory");
-		make_ste(stab, GET_ESID(KERNELBASE), vsid);
+		make_ste(stab, GET_ESID(PAGE_OFFSET), vsid);
 
 		/* Order update */
 		asm volatile("sync":::"memory");
Index: work/include/asm-ppc64/ppc_asm.h
===================================================================
--- work.orig/include/asm-ppc64/ppc_asm.h
+++ work/include/asm-ppc64/ppc_asm.h
@@ -110,19 +110,6 @@
 	ori     reg,reg,(label)@l;
 
 
-/* PPPBBB - DRENG  If KERNELBASE is always 0xC0...,
- * Then we can easily do this with one asm insn. -Peter
- */
-#define tophys(rd,rs)                           \
-        lis     rd,((KERNELBASE>>48)&0xFFFF);   \
-        rldicr  rd,rd,32,31;                    \
-        sub     rd,rs,rd
-
-#define tovirt(rd,rs)                           \
-        lis     rd,((KERNELBASE>>48)&0xFFFF);   \
-        rldicr  rd,rd,32,31;                    \
-        add     rd,rs,rd
-
 /* Condition Register Bit Fields */
 
 #define	cr0	0
Index: work/arch/ppc64/kernel/head.S
===================================================================
--- work.orig/arch/ppc64/kernel/head.S
+++ work/arch/ppc64/kernel/head.S
@@ -100,7 +100,7 @@ END_FTR_SECTION(0, 1)
 	 * This is required by the hypervisor
 	 */
 	. = 0x20
-	.llong hvReleaseData-KERNELBASE
+	.llong hvReleaseData-PAGE_OFFSET
 
 	/*
 	 * At offset 0x28 and 0x30 are offsets to the msChunks
@@ -108,8 +108,8 @@ END_FTR_SECTION(0, 1)
 	 * between physical addresses and absolute addresses) and
 	 * to the pidhash table (also used by the debugger)
 	 */
-	.llong msChunks-KERNELBASE
-	.llong 0	/* pidhash-KERNELBASE SFRXXX */
+	.llong msChunks-PAGE_OFFSET
+	.llong 0	/* pidhash-PAGE_OFFSET SFRXXX */
 
 	/* Offset 0x38 - Pointer to start of embedded System.map */
 	.globl	embedded_sysmap_start
@@ -1301,7 +1301,7 @@ _GLOBAL(__start_initialization_multiplat
 	li	r24,0
 
 	/* Switch off MMU if not already */
-	LOADADDR(r4, .__after_prom_start - KERNELBASE)
+	LOADADDR(r4, .__after_prom_start - PAGE_OFFSET)
 	add	r4,r4,r30
 	bl	.__mmu_off
 	b	.__after_prom_start
@@ -1355,11 +1355,11 @@ _STATIC(__after_prom_start)
  *
  * Note: This process overwrites the OF exception vectors.
  *	r26 == relocation offset
- *	r27 == KERNELBASE
+ *	r27 == PAGE_OFFSET
  */
 	bl	.reloc_offset
 	mr	r26,r3
-	SET_REG_TO_CONST(r27,KERNELBASE)
+	SET_REG_TO_CONST(r27,PAGE_OFFSET)
 
 	li	r3,0			/* target addr */
 
Index: work/arch/ppc64/kernel/machine_kexec.c
===================================================================
--- work.orig/arch/ppc64/kernel/machine_kexec.c
+++ work/arch/ppc64/kernel/machine_kexec.c
@@ -184,9 +184,8 @@ void kexec_copy_flush(struct kimage *ima
 	 * including ones that were in place on the original copy
 	 */
 	for (i = 0; i < nr_segments; i++)
-		flush_icache_range(ranges[i].mem + KERNELBASE,
-				ranges[i].mem + KERNELBASE +
-				ranges[i].memsz);
+		flush_icache_range((unsigned long)__va(ranges[i].mem),
+			(unsigned long)__va(ranges[i].mem + ranges[i].memsz));
 }
 
 #ifdef CONFIG_SMP
Index: work/arch/ppc64/kernel/pmac_smp.c
===================================================================
--- work.orig/arch/ppc64/kernel/pmac_smp.c
+++ work/arch/ppc64/kernel/pmac_smp.c
@@ -241,7 +241,7 @@ static void __init smp_core99_kick_cpu(i
 	unsigned long new_vector;
 	unsigned long flags;
 	volatile unsigned int *vector
-		 = ((volatile unsigned int *)(KERNELBASE+0x100));
+		 = ((volatile unsigned int *)(PAGE_OFFSET + 0x100));
 
 	if (nr < 1 || nr > 3)
 		return;
@@ -253,8 +253,8 @@ static void __init smp_core99_kick_cpu(i
 	/* Save reset vector */
 	save_vector = *vector;
 
-	/* Setup fake reset vector that does	
-	 *   b .pmac_secondary_start - KERNELBASE
+	/* Setup fake reset vector that does
+	 *   b .pmac_secondary_start - PAGE_OFFSET
 	 */
 	switch(nr) {
 	case 1:
@@ -268,7 +268,7 @@ static void __init smp_core99_kick_cpu(i
 		new_vector = (unsigned long)pmac_secondary_start_3;
 		break;
 	}
-	*vector = 0x48000002 + (new_vector - KERNELBASE);
+	*vector = 0x48000002 + (new_vector - PAGE_OFFSET);
 
 	/* flush data cache and inval instruction cache */
 	flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
Index: work/arch/ppc64/kernel/prom_init.c
===================================================================
--- work.orig/arch/ppc64/kernel/prom_init.c
+++ work/arch/ppc64/kernel/prom_init.c
@@ -1846,7 +1846,7 @@ static void __init prom_check_initrd(uns
 	if ( r3 && r4 && r4 != 0xdeadbeef) {
 		u64 val;
 
-		RELOC(prom_initrd_start) = (r3 >= KERNELBASE) ? __pa(r3) : r3;
+		RELOC(prom_initrd_start) = (r3 >= PAGE_OFFSET) ? __pa(r3) : r3;
 		RELOC(prom_initrd_end) = RELOC(prom_initrd_start) + r4;
 
 		val = (u64)RELOC(prom_initrd_start);
@@ -1918,7 +1918,7 @@ unsigned long __init prom_init(unsigned 
 	 * On pSeries and BPA, copy the CPU hold code
 	 */
        	if (RELOC(of_platform) & (PLATFORM_PSERIES | PLATFORM_BPA))
-       		copy_and_flush(0, KERNELBASE - offset, 0x100, 0);
+       		copy_and_flush(__pa(KERNELBASE), KERNELBASE - offset, 0x100, 0);
 
 	/*
 	 * Get memory cells format
Index: work/arch/ppc64/kernel/setup.c
===================================================================
--- work.orig/arch/ppc64/kernel/setup.c
+++ work/arch/ppc64/kernel/setup.c
@@ -550,7 +550,7 @@ static void __init check_for_initrd(void
 	/* If we were passed an initrd, set the ROOT_DEV properly if the values
 	 * look sensible. If not, clear initrd reference.
 	 */
-	if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE &&
+	if (initrd_start >= PAGE_OFFSET && initrd_end >= PAGE_OFFSET &&
 	    initrd_end > initrd_start)
 		ROOT_DEV = Root_RAM0;
 	else
Index: work/arch/ppc64/mm/hash_native.c
===================================================================
--- work.orig/arch/ppc64/mm/hash_native.c
+++ work/arch/ppc64/mm/hash_native.c
@@ -352,7 +352,7 @@ static void native_flush_hash_range(unsi
 
 	j = 0;
 	for (i = 0; i < number; i++) {
-		if (batch->addr[i] < KERNELBASE)
+		if (batch->addr[i] < PAGE_OFFSET)
 			vsid = get_vsid(context, batch->addr[i]);
 		else
 			vsid = get_kernel_vsid(batch->addr[i]);
Index: work/arch/ppc64/mm/hash_utils.c
===================================================================
--- work.orig/arch/ppc64/mm/hash_utils.c
+++ work/arch/ppc64/mm/hash_utils.c
@@ -210,7 +210,7 @@ void __init htab_initialize(void)
 
 	/* create bolted the linear mapping in the hash table */
 	for (i=0; i < lmb.memory.cnt; i++) {
-		base = lmb.memory.region[i].physbase + KERNELBASE;
+		base = (unsigned long)__va(lmb.memory.region[i].physbase);
 		size = lmb.memory.region[i].size;
 
 		DBG("creating mapping for region: %lx : %lx\n", base, size);
@@ -247,8 +247,8 @@ void __init htab_initialize(void)
 	 * for either 4K or 16MB pages.
 	 */
 	if (tce_alloc_start) {
-		tce_alloc_start += KERNELBASE;
-		tce_alloc_end += KERNELBASE;
+		tce_alloc_start = (unsigned long)__va(tce_alloc_start);
+		tce_alloc_end = (unsigned long)__va(tce_alloc_end);
 
 		if (base + size >= tce_alloc_start)
 			tce_alloc_start = base + size + 1;
@@ -361,7 +361,7 @@ void flush_hash_page(unsigned long conte
 	unsigned long vsid, vpn, va, hash, secondary, slot;
 	unsigned long huge = pte_huge(pte);
 
-	if (ea < KERNELBASE)
+	if (ea < PAGE_OFFSET)
 		vsid = get_vsid(context, ea);
 	else
 		vsid = get_kernel_vsid(ea);
Index: work/arch/ppc64/mm/slb.c
===================================================================
--- work.orig/arch/ppc64/mm/slb.c
+++ work/arch/ppc64/mm/slb.c
@@ -55,7 +55,7 @@ static void slb_flush_and_rebolt(void)
 		ksp_flags |= SLB_VSID_L;
 
 	ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
-	if ((ksp_esid_data & ESID_MASK) == KERNELBASE)
+	if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET)
 		ksp_esid_data &= ~SLB_ESID_V;
 
 	/* We need to do this all in asm, so we're sure we don't touch
@@ -111,14 +111,14 @@ void switch_slb(struct task_struct *tsk,
 	else
 		unmapped_base = TASK_UNMAPPED_BASE_USER64;
 
-	if (pc >= KERNELBASE)
+	if (pc >= PAGE_OFFSET)
 		return;
 	slb_allocate(pc);
 
 	if (GET_ESID(pc) == GET_ESID(stack))
 		return;
 
-	if (stack >= KERNELBASE)
+	if (stack >= PAGE_OFFSET)
 		return;
 	slb_allocate(stack);
 
@@ -126,7 +126,7 @@ void switch_slb(struct task_struct *tsk,
 	    || (GET_ESID(stack) == GET_ESID(unmapped_base)))
 		return;
 
-	if (unmapped_base >= KERNELBASE)
+	if (unmapped_base >= PAGE_OFFSET)
 		return;
 	slb_allocate(unmapped_base);
 }
@@ -145,7 +145,7 @@ void slb_initialize(void)
  	asm volatile("isync":::"memory");
  	asm volatile("slbmte  %0,%0"::"r" (0) : "memory");
 	asm volatile("isync; slbia; isync":::"memory");
-	create_slbe(KERNELBASE, flags, 0);
+	create_slbe(PAGE_OFFSET, flags, 0);
 	create_slbe(VMALLOC_OFFSET, SLB_VSID_KERNEL, 1);
 	/* We don't bolt the stack for the time being - we're in boot,
 	 * so the stack is in the bolted segment.  By the time it goes
Index: work/arch/ppc64/oprofile/op_model_power4.c
===================================================================
--- work.orig/arch/ppc64/oprofile/op_model_power4.c
+++ work/arch/ppc64/oprofile/op_model_power4.c
@@ -236,7 +236,7 @@ static unsigned long get_pc(struct pt_re
 		return (unsigned long)__va(pc);
 
 	/* Not sure where we were */
-	if (pc < KERNELBASE)
+	if (pc < PAGE_OFFSET)
 		/* function descriptor madness */
 		return *((unsigned long *)kernel_unknown_bucket);
 
@@ -248,7 +248,7 @@ static int get_kernel(unsigned long pc)
 	int is_kernel;
 
 	if (!mmcra_has_sihv) {
-		is_kernel = (pc >= KERNELBASE);
+		is_kernel = (pc >= PAGE_OFFSET);
 	} else {
 		unsigned long mmcra = mfspr(SPRN_MMCRA);
 		is_kernel = ((mmcra & MMCRA_SIPR) == 0);
Index: work/arch/ppc64/oprofile/op_model_rs64.c
===================================================================
--- work.orig/arch/ppc64/oprofile/op_model_rs64.c
+++ work/arch/ppc64/oprofile/op_model_rs64.c
@@ -179,7 +179,7 @@ static void rs64_handle_interrupt(struct
 	int val;
 	int i;
 	unsigned long pc = mfspr(SPRN_SIAR);
-	int is_kernel = (pc >= KERNELBASE);
+	int is_kernel = (pc >= PAGE_OFFSET);
 
 	/* set the PMM bit (see comment below) */
 	mtmsrd(mfmsr() | MSR_PMM);
Index: work/arch/ppc64/xmon/xmon.c
===================================================================
--- work.orig/arch/ppc64/xmon/xmon.c
+++ work/arch/ppc64/xmon/xmon.c
@@ -1044,7 +1044,7 @@ static long check_bp_loc(unsigned long a
 	unsigned int instr;
 
 	addr &= ~3;
-	if (addr < KERNELBASE) {
+	if (addr < PAGE_OFFSET) {
 		printf("Breakpoints may only be placed at kernel addresses\n");
 		return 0;
 	}
@@ -1094,7 +1094,7 @@ bpt_cmds(void)
 		dabr.address = 0;
 		dabr.enabled = 0;
 		if (scanhex(&dabr.address)) {
-			if (dabr.address < KERNELBASE) {
+			if (dabr.address < PAGE_OFFSET) {
 				printf(badaddr);
 				break;
 			}
Index: work/include/asm-ppc64/mmu.h
===================================================================
--- work.orig/include/asm-ppc64/mmu.h
+++ work/include/asm-ppc64/mmu.h
@@ -29,8 +29,8 @@
 
 /* Location of cpu0's segment table */
 #define STAB0_PAGE	0x9
-#define STAB0_PHYS_ADDR	(STAB0_PAGE<<PAGE_SHIFT)
-#define STAB0_VIRT_ADDR	(KERNELBASE+STAB0_PHYS_ADDR)
+#define STAB0_PHYS_ADDR	(STAB0_PAGE << PAGE_SHIFT)
+#define STAB0_VIRT_ADDR	(PAGE_OFFSET + STAB0_PHYS_ADDR)
 
 /*
  * SLB
Index: work/include/asm-ppc64/page.h
===================================================================
--- work.orig/include/asm-ppc64/page.h
+++ work/include/asm-ppc64/page.h
@@ -204,14 +204,14 @@ extern u64 ppc64_pft_size;		/* Log 2 of 
 #define VMALLOC_OFFSET		ASM_CONST(0xD000000000000000)
 
 #define VMALLOC_REGION_ID	(VMALLOC_OFFSET >> REGION_SHIFT)
-#define KERNEL_REGION_ID   (KERNELBASE >> REGION_SHIFT)
+#define KERNEL_REGION_ID	(PAGE_OFFSET >> REGION_SHIFT)
 #define USER_REGION_ID     (0UL)
 #define REGION_ID(ea)	   (((unsigned long)(ea)) >> REGION_SHIFT)
 
-#define __bpn_to_ba(x) ((((unsigned long)(x)) << PAGE_SHIFT) + KERNELBASE)
+#define __bpn_to_ba(x) ((((unsigned long)(x)) << PAGE_SHIFT) + PAGE_OFFSET)
 #define __ba_to_bpn(x) ((((unsigned long)(x)) & ~REGION_MASK) >> PAGE_SHIFT)
 
-#define __va(x) ((void *)((unsigned long)(x) + KERNELBASE))
+#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET))
 
 #ifdef CONFIG_DISCONTIGMEM
 #define page_to_pfn(page)	discontigmem_page_to_pfn(page)



More information about the Linuxppc64-dev mailing list