[PATCH 2/11] powerpc: Add a is_kernel_addr() macro

Michael Ellerman michael at ellerman.id.au
Sun Dec 4 18:39:15 EST 2005


There's a bunch of code that compares an address with KERNELBASE to see if
it's a "kernel address", ie. >= KERNELBASE. The proper test is actually to
compare with PAGE_OFFSET, since we're going to change KERNELBASE soon.

So replace all of them with an is_kernel_addr() macro that does that.

Signed-off-by: Michael Ellerman <michael at ellerman.id.au>
---

 arch/powerpc/kernel/prom_init.c         |    2 +-
 arch/powerpc/kernel/setup-common.c      |    2 +-
 arch/powerpc/mm/slb.c                   |    6 +++---
 arch/powerpc/mm/stab.c                  |    6 +++---
 arch/powerpc/mm/tlb_64.c                |    2 +-
 arch/powerpc/oprofile/op_model_power4.c |    4 ++--
 arch/powerpc/oprofile/op_model_rs64.c   |    3 +--
 arch/powerpc/xmon/xmon.c                |    4 ++--
 include/asm-powerpc/page.h              |    6 ++++++
 9 files changed, 20 insertions(+), 15 deletions(-)

Index: kexec/arch/powerpc/mm/stab.c
===================================================================
--- kexec.orig/arch/powerpc/mm/stab.c
+++ kexec/arch/powerpc/mm/stab.c
@@ -122,7 +122,7 @@ static int __ste_allocate(unsigned long 
 	unsigned long offset;
 
 	/* Kernel or user address? */
-	if (ea >= KERNELBASE) {
+	if (is_kernel_addr(ea)) {
 		vsid = get_kernel_vsid(ea);
 	} else {
 		if ((ea >= TASK_SIZE_USER64) || (! mm))
@@ -133,7 +133,7 @@ static int __ste_allocate(unsigned long 
 
 	stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid);
 
-	if (ea < KERNELBASE) {
+	if (!is_kernel_addr(ea)) {
 		offset = __get_cpu_var(stab_cache_ptr);
 		if (offset < NR_STAB_CACHE_ENTRIES)
 			__get_cpu_var(stab_cache[offset++]) = stab_entry;
@@ -190,7 +190,7 @@ void switch_stab(struct task_struct *tsk
 		     entry++, ste++) {
 			unsigned long ea;
 			ea = ste->esid_data & ESID_MASK;
-			if (ea < KERNELBASE) {
+			if (!is_kernel_addr(ea)) {
 				ste->esid_data = 0;
 			}
 		}
Index: kexec/arch/powerpc/kernel/prom_init.c
===================================================================
--- kexec.orig/arch/powerpc/kernel/prom_init.c
+++ kexec/arch/powerpc/kernel/prom_init.c
@@ -1994,7 +1994,7 @@ static void __init prom_check_initrd(uns
 	if (r3 && r4 && r4 != 0xdeadbeef) {
 		unsigned long val;
 
-		RELOC(prom_initrd_start) = (r3 >= KERNELBASE) ? __pa(r3) : r3;
+		RELOC(prom_initrd_start) = is_kernel_addr(r3) ? __pa(r3) : r3;
 		RELOC(prom_initrd_end) = RELOC(prom_initrd_start) + r4;
 
 		val = RELOC(prom_initrd_start);
Index: kexec/arch/powerpc/kernel/setup-common.c
===================================================================
--- kexec.orig/arch/powerpc/kernel/setup-common.c
+++ kexec/arch/powerpc/kernel/setup-common.c
@@ -319,7 +319,7 @@ void __init check_for_initrd(void)
 	/* If we were passed an initrd, set the ROOT_DEV properly if the values
 	 * look sensible. If not, clear initrd reference.
 	 */
-	if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE &&
+	if (is_kernel_addr(initrd_start) && is_kernel_addr(initrd_end) &&
 	    initrd_end > initrd_start)
 		ROOT_DEV = Root_RAM0;
 	else
Index: kexec/arch/powerpc/mm/slb.c
===================================================================
--- kexec.orig/arch/powerpc/mm/slb.c
+++ kexec/arch/powerpc/mm/slb.c
@@ -134,14 +134,14 @@ void switch_slb(struct task_struct *tsk,
 	else
 		unmapped_base = TASK_UNMAPPED_BASE_USER64;
 
-	if (pc >= KERNELBASE)
+	if (is_kernel_addr(pc))
 		return;
 	slb_allocate(pc);
 
 	if (GET_ESID(pc) == GET_ESID(stack))
 		return;
 
-	if (stack >= KERNELBASE)
+	if (is_kernel_addr(stack))
 		return;
 	slb_allocate(stack);
 
@@ -149,7 +149,7 @@ void switch_slb(struct task_struct *tsk,
 	    || (GET_ESID(stack) == GET_ESID(unmapped_base)))
 		return;
 
-	if (unmapped_base >= KERNELBASE)
+	if (is_kernel_addr(unmapped_base))
 		return;
 	slb_allocate(unmapped_base);
 }
Index: kexec/arch/powerpc/oprofile/op_model_power4.c
===================================================================
--- kexec.orig/arch/powerpc/oprofile/op_model_power4.c
+++ kexec/arch/powerpc/oprofile/op_model_power4.c
@@ -252,7 +252,7 @@ static unsigned long get_pc(struct pt_re
 		return (unsigned long)__va(pc);
 
 	/* Not sure where we were */
-	if (pc < KERNELBASE)
+	if (!is_kernel_addr(pc))
 		/* function descriptor madness */
 		return *((unsigned long *)kernel_unknown_bucket);
 
@@ -264,7 +264,7 @@ static int get_kernel(unsigned long pc)
 	int is_kernel;
 
 	if (!mmcra_has_sihv) {
-		is_kernel = (pc >= KERNELBASE);
+		is_kernel = is_kernel_addr(pc);
 	} else {
 		unsigned long mmcra = mfspr(SPRN_MMCRA);
 		is_kernel = ((mmcra & MMCRA_SIPR) == 0);
Index: kexec/arch/powerpc/xmon/xmon.c
===================================================================
--- kexec.orig/arch/powerpc/xmon/xmon.c
+++ kexec/arch/powerpc/xmon/xmon.c
@@ -1013,7 +1013,7 @@ static long check_bp_loc(unsigned long a
 	unsigned int instr;
 
 	addr &= ~3;
-	if (addr < KERNELBASE) {
+	if (!is_kernel_addr(addr)) {
 		printf("Breakpoints may only be placed at kernel addresses\n");
 		return 0;
 	}
@@ -1064,7 +1064,7 @@ bpt_cmds(void)
 		dabr.address = 0;
 		dabr.enabled = 0;
 		if (scanhex(&dabr.address)) {
-			if (dabr.address < KERNELBASE) {
+			if (!is_kernel_addr(dabr.address)) {
 				printf(badaddr);
 				break;
 			}
Index: kexec/include/asm-powerpc/page.h
===================================================================
--- kexec.orig/include/asm-powerpc/page.h
+++ kexec/include/asm-powerpc/page.h
@@ -86,6 +86,12 @@
 /* to align the pointer to the (next) page boundary */
 #define PAGE_ALIGN(addr)	_ALIGN(addr, PAGE_SIZE)
 
+/*
+ * Don't compare things with KERNELBASE or PAGE_OFFSET to test for
+ * "kernelness", use is_kernel_addr() - it should do what you want.
+ */
+#define is_kernel_addr(x)	((x) >= PAGE_OFFSET)
+
 #ifndef __ASSEMBLY__
 
 #undef STRICT_MM_TYPECHECKS
Index: kexec/arch/powerpc/oprofile/op_model_rs64.c
===================================================================
--- kexec.orig/arch/powerpc/oprofile/op_model_rs64.c
+++ kexec/arch/powerpc/oprofile/op_model_rs64.c
@@ -178,7 +178,6 @@ static void rs64_handle_interrupt(struct
 	int val;
 	int i;
 	unsigned long pc = mfspr(SPRN_SIAR);
-	int is_kernel = (pc >= KERNELBASE);
 
 	/* set the PMM bit (see comment below) */
 	mtmsrd(mfmsr() | MSR_PMM);
@@ -187,7 +186,7 @@ static void rs64_handle_interrupt(struct
 		val = ctr_read(i);
 		if (val < 0) {
 			if (ctr[i].enabled) {
-				oprofile_add_pc(pc, is_kernel, i);
+				oprofile_add_pc(pc, is_kernel_addr(pc), i);
 				ctr_write(i, reset_value[i]);
 			} else {
 				ctr_write(i, 0);
Index: kexec/arch/powerpc/mm/tlb_64.c
===================================================================
--- kexec.orig/arch/powerpc/mm/tlb_64.c
+++ kexec/arch/powerpc/mm/tlb_64.c
@@ -168,7 +168,7 @@ void hpte_update(struct mm_struct *mm, u
 		batch->mm = mm;
 		batch->psize = psize;
 	}
-	if (addr < KERNELBASE) {
+	if (!is_kernel_addr(addr)) {
 		vsid = get_vsid(mm->context.id, addr);
 		WARN_ON(vsid == 0);
 	} else



More information about the Linuxppc64-dev mailing list