[PATCH 6/12] ppc64: Add a is_kernel_addr() macro
Michael Ellerman
michael at ellerman.id.au
Fri Aug 26 12:53:26 EST 2005
There's a bunch of code that compares an address with KERNELBASE to see if
it's a "kernel address", ie. >= KERNELBASE. Replace all of them with an
is_kernel_addr() macro that does the same thing. This will save us some pain
when we change KERNELBASE, and also makes the code more readable IMHO.
Signed-off-by: Michael Ellerman <michael at ellerman.id.au>
---
arch/ppc64/kernel/prom_init.c | 2 +-
arch/ppc64/kernel/setup.c | 2 +-
arch/ppc64/mm/hash_native.c | 2 +-
arch/ppc64/mm/hash_utils.c | 2 +-
arch/ppc64/mm/slb.c | 6 +++---
arch/ppc64/mm/stab.c | 6 +++---
arch/ppc64/oprofile/op_model_power4.c | 4 ++--
arch/ppc64/oprofile/op_model_rs64.c | 3 +--
arch/ppc64/xmon/xmon.c | 4 ++--
include/asm-ppc64/page.h | 2 ++
include/asm-ppc64/pgtable.h | 2 +-
11 files changed, 18 insertions(+), 17 deletions(-)
Index: work/arch/ppc64/mm/stab.c
===================================================================
--- work.orig/arch/ppc64/mm/stab.c
+++ work/arch/ppc64/mm/stab.c
@@ -122,7 +122,7 @@ static int __ste_allocate(unsigned long
unsigned long offset;
/* Kernel or user address? */
- if (ea >= KERNELBASE) {
+ if (is_kernel_addr(ea)) {
vsid = get_kernel_vsid(ea);
} else {
if ((ea >= TASK_SIZE_USER64) || (! mm))
@@ -133,7 +133,7 @@ static int __ste_allocate(unsigned long
stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid);
- if (ea < KERNELBASE) {
+ if (!is_kernel_addr(ea)) {
offset = __get_cpu_var(stab_cache_ptr);
if (offset < NR_STAB_CACHE_ENTRIES)
__get_cpu_var(stab_cache[offset++]) = stab_entry;
@@ -190,7 +190,7 @@ void switch_stab(struct task_struct *tsk
entry++, ste++) {
unsigned long ea;
ea = ste->esid_data & ESID_MASK;
- if (ea < KERNELBASE) {
+ if (!is_kernel_addr(ea)) {
ste->esid_data = 0;
}
}
Index: work/arch/ppc64/kernel/prom_init.c
===================================================================
--- work.orig/arch/ppc64/kernel/prom_init.c
+++ work/arch/ppc64/kernel/prom_init.c
@@ -1868,7 +1868,7 @@ static void __init prom_check_initrd(uns
if ( r3 && r4 && r4 != 0xdeadbeef) {
u64 val;
- RELOC(prom_initrd_start) = (r3 >= KERNELBASE) ? __pa(r3) : r3;
+ RELOC(prom_initrd_start) = is_kernel_addr(r3) ? __pa(r3) : r3;
RELOC(prom_initrd_end) = RELOC(prom_initrd_start) + r4;
val = (u64)RELOC(prom_initrd_start);
Index: work/arch/ppc64/kernel/setup.c
===================================================================
--- work.orig/arch/ppc64/kernel/setup.c
+++ work/arch/ppc64/kernel/setup.c
@@ -554,7 +554,7 @@ static void __init check_for_initrd(void
/* If we were passed an initrd, set the ROOT_DEV properly if the values
* look sensible. If not, clear initrd reference.
*/
- if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE &&
+ if (is_kernel_addr(initrd_start) && is_kernel_addr(initrd_end) &&
initrd_end > initrd_start)
ROOT_DEV = Root_RAM0;
else
Index: work/arch/ppc64/mm/hash_native.c
===================================================================
--- work.orig/arch/ppc64/mm/hash_native.c
+++ work/arch/ppc64/mm/hash_native.c
@@ -351,7 +351,7 @@ static void native_flush_hash_range(unsi
j = 0;
for (i = 0; i < number; i++) {
- if (batch->addr[i] < KERNELBASE)
+ if (!is_kernel_addr(batch->addr[i]))
vsid = get_vsid(context, batch->addr[i]);
else
vsid = get_kernel_vsid(batch->addr[i]);
Index: work/arch/ppc64/mm/hash_utils.c
===================================================================
--- work.orig/arch/ppc64/mm/hash_utils.c
+++ work/arch/ppc64/mm/hash_utils.c
@@ -361,7 +361,7 @@ void flush_hash_page(unsigned long conte
unsigned long vsid, vpn, va, hash, secondary, slot;
unsigned long huge = pte_huge(pte);
- if (ea < KERNELBASE)
+ if (!is_kernel_addr(ea))
vsid = get_vsid(context, ea);
else
vsid = get_kernel_vsid(ea);
Index: work/arch/ppc64/mm/slb.c
===================================================================
--- work.orig/arch/ppc64/mm/slb.c
+++ work/arch/ppc64/mm/slb.c
@@ -111,14 +111,14 @@ void switch_slb(struct task_struct *tsk,
else
unmapped_base = TASK_UNMAPPED_BASE_USER64;
- if (pc >= KERNELBASE)
+ if (is_kernel_addr(pc))
return;
slb_allocate(pc);
if (GET_ESID(pc) == GET_ESID(stack))
return;
- if (stack >= KERNELBASE)
+ if (is_kernel_addr(stack))
return;
slb_allocate(stack);
@@ -126,7 +126,7 @@ void switch_slb(struct task_struct *tsk,
|| (GET_ESID(stack) == GET_ESID(unmapped_base)))
return;
- if (unmapped_base >= KERNELBASE)
+ if (is_kernel_addr(unmapped_base))
return;
slb_allocate(unmapped_base);
}
Index: work/arch/ppc64/oprofile/op_model_power4.c
===================================================================
--- work.orig/arch/ppc64/oprofile/op_model_power4.c
+++ work/arch/ppc64/oprofile/op_model_power4.c
@@ -236,7 +236,7 @@ static unsigned long get_pc(struct pt_re
return __va_ul(pc);
/* Not sure where we were */
- if (pc < KERNELBASE)
+ if (!is_kernel_addr(pc))
/* function descriptor madness */
return *((unsigned long *)kernel_unknown_bucket);
@@ -248,7 +248,7 @@ static int get_kernel(unsigned long pc)
int is_kernel;
if (!mmcra_has_sihv) {
- is_kernel = (pc >= KERNELBASE);
+ is_kernel = is_kernel_addr(pc);
} else {
unsigned long mmcra = mfspr(SPRN_MMCRA);
is_kernel = ((mmcra & MMCRA_SIPR) == 0);
Index: work/arch/ppc64/xmon/xmon.c
===================================================================
--- work.orig/arch/ppc64/xmon/xmon.c
+++ work/arch/ppc64/xmon/xmon.c
@@ -1044,7 +1044,7 @@ static long check_bp_loc(unsigned long a
unsigned int instr;
addr &= ~3;
- if (addr < KERNELBASE) {
+ if (!is_kernel_addr(addr)) {
printf("Breakpoints may only be placed at kernel addresses\n");
return 0;
}
@@ -1094,7 +1094,7 @@ bpt_cmds(void)
dabr.address = 0;
dabr.enabled = 0;
if (scanhex(&dabr.address)) {
- if (dabr.address < KERNELBASE) {
+ if (!is_kernel_addr(dabr.address)) {
printf(badaddr);
break;
}
Index: work/include/asm-ppc64/page.h
===================================================================
--- work.orig/include/asm-ppc64/page.h
+++ work/include/asm-ppc64/page.h
@@ -223,6 +223,8 @@ extern u64 ppc64_pft_size; /* Log 2 of
#define __va_ul(x) (((unsigned long)(x) + KERNELBASE))
#define __va(x) ((void *)__va_ul(x))
+#define is_kernel_addr(x) ((x) >= KERNELBASE)
+
#ifdef CONFIG_DISCONTIGMEM
#define page_to_pfn(page) discontigmem_page_to_pfn(page)
#define pfn_to_page(pfn) discontigmem_pfn_to_page(pfn)
Index: work/include/asm-ppc64/pgtable.h
===================================================================
--- work.orig/include/asm-ppc64/pgtable.h
+++ work/include/asm-ppc64/pgtable.h
@@ -212,7 +212,7 @@ static inline pte_t pfn_pte(unsigned lon
#define pte_pfn(x) ((unsigned long)((pte_val(x) >> PTE_SHIFT)))
#define pte_page(x) pfn_to_page(pte_pfn(x))
-#define pmd_set(pmdp, ptep) ({BUG_ON((u64)ptep < KERNELBASE); pmd_val(*(pmdp)) = (unsigned long)(ptep);})
+#define pmd_set(pmdp, ptep) ({BUG_ON(!is_kernel_addr((u64)ptep)); pmd_val(*(pmdp)) = (unsigned long)(ptep);})
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) (pmd_val(pmd) == 0)
#define pmd_present(pmd) (pmd_val(pmd) != 0)
Index: work/arch/ppc64/oprofile/op_model_rs64.c
===================================================================
--- work.orig/arch/ppc64/oprofile/op_model_rs64.c
+++ work/arch/ppc64/oprofile/op_model_rs64.c
@@ -179,7 +179,6 @@ static void rs64_handle_interrupt(struct
int val;
int i;
unsigned long pc = mfspr(SPRN_SIAR);
- int is_kernel = (pc >= KERNELBASE);
/* set the PMM bit (see comment below) */
mtmsrd(mfmsr() | MSR_PMM);
@@ -188,7 +187,7 @@ static void rs64_handle_interrupt(struct
val = ctr_read(i);
if (val < 0) {
if (ctr[i].enabled) {
- oprofile_add_pc(pc, is_kernel, i);
+ oprofile_add_pc(pc, is_kernel_addr(pc), i);
ctr_write(i, reset_value[i]);
} else {
ctr_write(i, 0);
More information about the Linuxppc64-dev
mailing list