[PATCH 3/11] powerpc: Seperate usage of KERNELBASE and PAGE_OFFSET
Michael Ellerman
michael at ellerman.id.au
Sun Dec 4 18:39:20 EST 2005
This patch seperates usage of KERNELBASE and PAGE_OFFSET. I haven't looked at
any of the PPC code, if we ever want to support Kdump on PPC we'll have to do
another audit, ditto for iSeries.
This patch makes PAGE_OFFSET the constant, it'll always be 0xC * 1 gazillion.
To get a physical address from a virtual one you subtract PAGE_OFFSET, _not_
KERNELBASE.
KERNELBASE is the virtual address of the start of the kernel, it's often the
same as PAGE_OFFSET, but _might not be_.
If you want to know something's offset from the start of the kernel you should
subtract KERNELBASE.
Signed-off-by: Michael Ellerman <michael at ellerman.id.au>
---
arch/powerpc/kernel/btext.c | 4 ++--
arch/powerpc/kernel/entry_64.S | 4 ++--
arch/powerpc/kernel/lparmap.c | 6 +++---
arch/powerpc/kernel/machine_kexec_64.c | 5 ++---
arch/powerpc/mm/hash_utils_64.c | 6 +++---
arch/powerpc/mm/slb.c | 4 ++--
arch/powerpc/mm/slb_low.S | 6 +++---
arch/powerpc/mm/stab.c | 10 +++++-----
include/asm-powerpc/page.h | 2 +-
9 files changed, 23 insertions(+), 24 deletions(-)
Index: kexec/arch/powerpc/mm/stab.c
===================================================================
--- kexec.orig/arch/powerpc/mm/stab.c
+++ kexec/arch/powerpc/mm/stab.c
@@ -40,7 +40,7 @@ static int make_ste(unsigned long stab,
unsigned long entry, group, old_esid, castout_entry, i;
unsigned int global_entry;
struct stab_entry *ste, *castout_ste;
- unsigned long kernel_segment = (esid << SID_SHIFT) >= KERNELBASE;
+ unsigned long kernel_segment = (esid << SID_SHIFT) >= PAGE_OFFSET;
vsid_data = vsid << STE_VSID_SHIFT;
esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V;
@@ -83,7 +83,7 @@ static int make_ste(unsigned long stab,
}
/* Dont cast out the first kernel segment */
- if ((castout_ste->esid_data & ESID_MASK) != KERNELBASE)
+ if ((castout_ste->esid_data & ESID_MASK) != PAGE_OFFSET)
break;
castout_entry = (castout_entry + 1) & 0xf;
@@ -251,7 +251,7 @@ void stabs_alloc(void)
panic("Unable to allocate segment table for CPU %d.\n",
cpu);
- newstab += KERNELBASE;
+ newstab = (unsigned long)__va(newstab);
memset((void *)newstab, 0, HW_PAGE_SIZE);
@@ -270,11 +270,11 @@ void stabs_alloc(void)
*/
void stab_initialize(unsigned long stab)
{
- unsigned long vsid = get_kernel_vsid(KERNELBASE);
+ unsigned long vsid = get_kernel_vsid(PAGE_OFFSET);
unsigned long stabreal;
asm volatile("isync; slbia; isync":::"memory");
- make_ste(stab, GET_ESID(KERNELBASE), vsid);
+ make_ste(stab, GET_ESID(PAGE_OFFSET), vsid);
/* Order update */
asm volatile("sync":::"memory");
Index: kexec/arch/powerpc/kernel/machine_kexec_64.c
===================================================================
--- kexec.orig/arch/powerpc/kernel/machine_kexec_64.c
+++ kexec/arch/powerpc/kernel/machine_kexec_64.c
@@ -153,9 +153,8 @@ void kexec_copy_flush(struct kimage *ima
* including ones that were in place on the original copy
*/
for (i = 0; i < nr_segments; i++)
- flush_icache_range(ranges[i].mem + KERNELBASE,
- ranges[i].mem + KERNELBASE +
- ranges[i].memsz);
+ flush_icache_range((unsigned long)__va(ranges[i].mem),
+ (unsigned long)__va(ranges[i].mem + ranges[i].memsz));
}
#ifdef CONFIG_SMP
Index: kexec/arch/powerpc/mm/hash_utils_64.c
===================================================================
--- kexec.orig/arch/powerpc/mm/hash_utils_64.c
+++ kexec/arch/powerpc/mm/hash_utils_64.c
@@ -456,7 +456,7 @@ void __init htab_initialize(void)
/* create bolted the linear mapping in the hash table */
for (i=0; i < lmb.memory.cnt; i++) {
- base = lmb.memory.region[i].base + KERNELBASE;
+ base = (unsigned long)__va(lmb.memory.region[i].base);
size = lmb.memory.region[i].size;
DBG("creating mapping for region: %lx : %lx\n", base, size);
@@ -498,8 +498,8 @@ void __init htab_initialize(void)
* for either 4K or 16MB pages.
*/
if (tce_alloc_start) {
- tce_alloc_start += KERNELBASE;
- tce_alloc_end += KERNELBASE;
+ tce_alloc_start = (unsigned long)__va(tce_alloc_start);
+ tce_alloc_end = (unsigned long)__va(tce_alloc_end);
if (base + size >= tce_alloc_start)
tce_alloc_start = base + size + 1;
Index: kexec/arch/powerpc/mm/slb.c
===================================================================
--- kexec.orig/arch/powerpc/mm/slb.c
+++ kexec/arch/powerpc/mm/slb.c
@@ -75,7 +75,7 @@ static void slb_flush_and_rebolt(void)
vflags = SLB_VSID_KERNEL | virtual_llp;
ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
- if ((ksp_esid_data & ESID_MASK) == KERNELBASE)
+ if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET)
ksp_esid_data &= ~SLB_ESID_V;
/* We need to do this all in asm, so we're sure we don't touch
@@ -213,7 +213,7 @@ void slb_initialize(void)
asm volatile("isync":::"memory");
asm volatile("slbmte %0,%0"::"r" (0) : "memory");
asm volatile("isync; slbia; isync":::"memory");
- create_slbe(KERNELBASE, lflags, 0);
+ create_slbe(PAGE_OFFSET, lflags, 0);
/* VMALLOC space has 4K pages always for now */
create_slbe(VMALLOCBASE, vflags, 1);
Index: kexec/arch/powerpc/kernel/entry_64.S
===================================================================
--- kexec.orig/arch/powerpc/kernel/entry_64.S
+++ kexec/arch/powerpc/kernel/entry_64.S
@@ -690,7 +690,7 @@ _GLOBAL(enter_rtas)
/* Setup our real return addr */
SET_REG_TO_LABEL(r4,.rtas_return_loc)
- SET_REG_TO_CONST(r9,KERNELBASE)
+ SET_REG_TO_CONST(r9,PAGE_OFFSET)
sub r4,r4,r9
mtlr r4
@@ -718,7 +718,7 @@ _GLOBAL(enter_rtas)
_STATIC(rtas_return_loc)
/* relocation is off at this point */
mfspr r4,SPRN_SPRG3 /* Get PACA */
- SET_REG_TO_CONST(r5, KERNELBASE)
+ SET_REG_TO_CONST(r5, PAGE_OFFSET)
sub r4,r4,r5 /* RELOC the PACA base pointer */
mfmsr r6
Index: kexec/arch/powerpc/mm/slb_low.S
===================================================================
--- kexec.orig/arch/powerpc/mm/slb_low.S
+++ kexec/arch/powerpc/mm/slb_low.S
@@ -37,9 +37,9 @@ _GLOBAL(slb_allocate_realmode)
srdi r9,r3,60 /* get region */
srdi r10,r3,28 /* get esid */
- cmpldi cr7,r9,0xc /* cmp KERNELBASE for later use */
+ cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */
- /* r3 = address, r10 = esid, cr7 = <>KERNELBASE */
+ /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
blt cr7,0f /* user or kernel? */
/* kernel address: proto-VSID = ESID */
@@ -166,7 +166,7 @@ _GLOBAL(slb_allocate_user)
/*
* Finish loading of an SLB entry and return
*
- * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <>KERNELBASE
+ * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
*/
slb_finish_load:
ASM_VSID_SCRAMBLE(r10,r9)
Index: kexec/arch/powerpc/kernel/lparmap.c
===================================================================
--- kexec.orig/arch/powerpc/kernel/lparmap.c
+++ kexec/arch/powerpc/kernel/lparmap.c
@@ -16,8 +16,8 @@ const struct LparMap __attribute__((__se
.xSegmentTableOffs = STAB0_PAGE,
.xEsids = {
- { .xKernelEsid = GET_ESID(KERNELBASE),
- .xKernelVsid = KERNEL_VSID(KERNELBASE), },
+ { .xKernelEsid = GET_ESID(PAGE_OFFSET),
+ .xKernelVsid = KERNEL_VSID(PAGE_OFFSET), },
{ .xKernelEsid = GET_ESID(VMALLOCBASE),
.xKernelVsid = KERNEL_VSID(VMALLOCBASE), },
},
@@ -25,7 +25,7 @@ const struct LparMap __attribute__((__se
.xRanges = {
{ .xPages = HvPagesToMap,
.xOffset = 0,
- .xVPN = KERNEL_VSID(KERNELBASE) << (SID_SHIFT - HW_PAGE_SHIFT),
+ .xVPN = KERNEL_VSID(PAGE_OFFSET) << (SID_SHIFT - HW_PAGE_SHIFT),
},
},
};
Index: kexec/include/asm-powerpc/page.h
===================================================================
--- kexec.orig/include/asm-powerpc/page.h
+++ kexec/include/asm-powerpc/page.h
@@ -56,7 +56,7 @@
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-#define __va(x) ((void *)((unsigned long)(x) + KERNELBASE))
+#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET))
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
/*
Index: kexec/arch/powerpc/kernel/btext.c
===================================================================
--- kexec.orig/arch/powerpc/kernel/btext.c
+++ kexec/arch/powerpc/kernel/btext.c
@@ -60,7 +60,7 @@ int force_printk_to_btext = 0;
*
* The display is mapped to virtual address 0xD0000000, rather
* than 1:1, because some some CHRP machines put the frame buffer
- * in the region starting at 0xC0000000 (KERNELBASE).
+ * in the region starting at 0xC0000000 (PAGE_OFFSET).
* This mapping is temporary and will disappear as soon as the
* setup done by MMU_Init() is applied.
*
@@ -71,7 +71,7 @@ int force_printk_to_btext = 0;
*/
void __init btext_prepare_BAT(void)
{
- unsigned long vaddr = KERNELBASE + 0x10000000;
+ unsigned long vaddr = PAGE_OFFSET + 0x10000000;
unsigned long addr;
unsigned long lowbits;
More information about the Linuxppc64-dev
mailing list