[PATCH 2/12] powerpc: Add a is_kernel_addr() macro

Michael Ellerman michael at ellerman.id.au
Tue Nov 8 00:06:50 EST 2005


There's a bunch of code that compares an address with KERNELBASE to see if
it's a "kernel address", ie. >= KERNELBASE. Replace all of them with an
is_kernel_addr() macro that does the same thing. This will save us some pain
when we change KERNELBASE, and also makes the code more readable IMHO.

 arch/powerpc/kernel/prom_init.c         |    2 +-
 arch/powerpc/kernel/setup_64.c          |    2 +-
 arch/powerpc/mm/slb.c                   |    6 +++---
 arch/powerpc/mm/stab.c                  |    6 +++---
 arch/powerpc/oprofile/op_model_power4.c |    4 ++--
 arch/powerpc/oprofile/op_model_rs64.c   |    3 +--
 arch/powerpc/xmon/xmon.c                |    4 ++--
 include/asm-powerpc/page.h              |    6 ++++++
 include/asm-ppc64/pgtable.h             |    2 +-
 9 files changed, 20 insertions(+), 15 deletions(-)

Index: kexec/arch/powerpc/mm/stab.c
===================================================================
--- kexec.orig/arch/powerpc/mm/stab.c
+++ kexec/arch/powerpc/mm/stab.c
@@ -122,7 +122,7 @@ static int __ste_allocate(unsigned long 
 	unsigned long offset;
 
 	/* Kernel or user address? */
-	if (ea >= KERNELBASE) {
+	if (is_kernel_addr(ea)) {
 		vsid = get_kernel_vsid(ea);
 	} else {
 		if ((ea >= TASK_SIZE_USER64) || (! mm))
@@ -133,7 +133,7 @@ static int __ste_allocate(unsigned long 
 
 	stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid);
 
-	if (ea < KERNELBASE) {
+	if (!is_kernel_addr(ea)) {
 		offset = __get_cpu_var(stab_cache_ptr);
 		if (offset < NR_STAB_CACHE_ENTRIES)
 			__get_cpu_var(stab_cache[offset++]) = stab_entry;
@@ -190,7 +190,7 @@ void switch_stab(struct task_struct *tsk
 		     entry++, ste++) {
 			unsigned long ea;
 			ea = ste->esid_data & ESID_MASK;
-			if (ea < KERNELBASE) {
+			if (!is_kernel_addr(ea)) {
 				ste->esid_data = 0;
 			}
 		}
Index: kexec/arch/powerpc/kernel/prom_init.c
===================================================================
--- kexec.orig/arch/powerpc/kernel/prom_init.c
+++ kexec/arch/powerpc/kernel/prom_init.c
@@ -1917,7 +1917,7 @@ static void __init prom_check_initrd(uns
 	if (r3 && r4 && r4 != 0xdeadbeef) {
 		unsigned long val;
 
-		RELOC(prom_initrd_start) = (r3 >= KERNELBASE) ? __pa(r3) : r3;
+		RELOC(prom_initrd_start) = is_kernel_addr(r3) ? __pa(r3) : r3;
 		RELOC(prom_initrd_end) = RELOC(prom_initrd_start) + r4;
 
 		val = RELOC(prom_initrd_start);
Index: kexec/arch/powerpc/kernel/setup_64.c
===================================================================
--- kexec.orig/arch/powerpc/kernel/setup_64.c
+++ kexec/arch/powerpc/kernel/setup_64.c
@@ -421,7 +421,7 @@ static void __init check_for_initrd(void
 	/* If we were passed an initrd, set the ROOT_DEV properly if the values
 	 * look sensible. If not, clear initrd reference.
 	 */
-	if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE &&
+	if (is_kernel_addr(initrd_start) && is_kernel_addr(initrd_end) &&
 	    initrd_end > initrd_start)
 		ROOT_DEV = Root_RAM0;
 	else
Index: kexec/arch/powerpc/mm/slb.c
===================================================================
--- kexec.orig/arch/powerpc/mm/slb.c
+++ kexec/arch/powerpc/mm/slb.c
@@ -111,14 +111,14 @@ void switch_slb(struct task_struct *tsk,
 	else
 		unmapped_base = TASK_UNMAPPED_BASE_USER64;
 
-	if (pc >= KERNELBASE)
+	if (is_kernel_addr(pc))
 		return;
 	slb_allocate(pc);
 
 	if (GET_ESID(pc) == GET_ESID(stack))
 		return;
 
-	if (stack >= KERNELBASE)
+	if (is_kernel_addr(stack))
 		return;
 	slb_allocate(stack);
 
@@ -126,7 +126,7 @@ void switch_slb(struct task_struct *tsk,
 	    || (GET_ESID(stack) == GET_ESID(unmapped_base)))
 		return;
 
-	if (unmapped_base >= KERNELBASE)
+	if (is_kernel_addr(unmapped_base))
 		return;
 	slb_allocate(unmapped_base);
 }
Index: kexec/arch/powerpc/oprofile/op_model_power4.c
===================================================================
--- kexec.orig/arch/powerpc/oprofile/op_model_power4.c
+++ kexec/arch/powerpc/oprofile/op_model_power4.c
@@ -232,7 +232,7 @@ static unsigned long get_pc(struct pt_re
 		return (unsigned long)__va(pc);
 
 	/* Not sure where we were */
-	if (pc < KERNELBASE)
+	if (!is_kernel_addr(pc))
 		/* function descriptor madness */
 		return *((unsigned long *)kernel_unknown_bucket);
 
@@ -244,7 +244,7 @@ static int get_kernel(unsigned long pc)
 	int is_kernel;
 
 	if (!mmcra_has_sihv) {
-		is_kernel = (pc >= KERNELBASE);
+		is_kernel = is_kernel_addr(pc);
 	} else {
 		unsigned long mmcra = mfspr(SPRN_MMCRA);
 		is_kernel = ((mmcra & MMCRA_SIPR) == 0);
Index: kexec/arch/powerpc/xmon/xmon.c
===================================================================
--- kexec.orig/arch/powerpc/xmon/xmon.c
+++ kexec/arch/powerpc/xmon/xmon.c
@@ -1015,7 +1015,7 @@ static long check_bp_loc(unsigned long a
 	unsigned int instr;
 
 	addr &= ~3;
-	if (addr < KERNELBASE) {
+	if (!is_kernel_addr(addr)) {
 		printf("Breakpoints may only be placed at kernel addresses\n");
 		return 0;
 	}
@@ -1066,7 +1066,7 @@ bpt_cmds(void)
 		dabr.address = 0;
 		dabr.enabled = 0;
 		if (scanhex(&dabr.address)) {
-			if (dabr.address < KERNELBASE) {
+			if (!is_kernel_addr(dabr.address)) {
 				printf(badaddr);
 				break;
 			}
Index: kexec/include/asm-powerpc/page.h
===================================================================
--- kexec.orig/include/asm-powerpc/page.h
+++ kexec/include/asm-powerpc/page.h
@@ -106,6 +106,12 @@
 #define KERNELBASE	PAGE_OFFSET
 #define VMALLOCBASE     ASM_CONST(0xD000000000000000)
 
+/*
+ * Don't compare things with KERNELBASE or PAGE_OFFSET to test for
+ * "kernelness", use is_kernel_addr() - it should do what you want.
+ */
+#define is_kernel_addr(x)	((x) >= PAGE_OFFSET)
+
 #ifndef __ASSEMBLY__
 
 #ifdef __powerpc64__
Index: kexec/include/asm-ppc64/pgtable.h
===================================================================
--- kexec.orig/include/asm-ppc64/pgtable.h
+++ kexec/include/asm-ppc64/pgtable.h
@@ -212,7 +212,7 @@ static inline pte_t pfn_pte(unsigned lon
 #define pte_pfn(x)		((unsigned long)((pte_val(x) >> PTE_SHIFT)))
 #define pte_page(x)		pfn_to_page(pte_pfn(x))
 
-#define pmd_set(pmdp, ptep) 	({BUG_ON((u64)ptep < KERNELBASE); pmd_val(*(pmdp)) = (unsigned long)(ptep);})
+#define pmd_set(pmdp, ptep) 	({BUG_ON(!is_kernel_addr((u64)ptep)); pmd_val(*(pmdp)) = (unsigned long)(ptep);})
 #define pmd_none(pmd)		(!pmd_val(pmd))
 #define	pmd_bad(pmd)		(pmd_val(pmd) == 0)
 #define	pmd_present(pmd)	(pmd_val(pmd) != 0)
Index: kexec/arch/powerpc/oprofile/op_model_rs64.c
===================================================================
--- kexec.orig/arch/powerpc/oprofile/op_model_rs64.c
+++ kexec/arch/powerpc/oprofile/op_model_rs64.c
@@ -178,7 +178,6 @@ static void rs64_handle_interrupt(struct
 	int val;
 	int i;
 	unsigned long pc = mfspr(SPRN_SIAR);
-	int is_kernel = (pc >= KERNELBASE);
 
 	/* set the PMM bit (see comment below) */
 	mtmsrd(mfmsr() | MSR_PMM);
@@ -187,7 +186,7 @@ static void rs64_handle_interrupt(struct
 		val = ctr_read(i);
 		if (val < 0) {
 			if (ctr[i].enabled) {
-				oprofile_add_pc(pc, is_kernel, i);
+				oprofile_add_pc(pc, is_kernel_addr(pc), i);
 				ctr_write(i, reset_value[i]);
 			} else {
 				ctr_write(i, 0);



More information about the Linuxppc64-dev mailing list