[PATCH 6/12] powerpc: Fixups for kernel linked at 32 MB
Michael Ellerman
michael at ellerman.id.au
Tue Nov 8 00:07:00 EST 2005
There's a few places where we need to fix things up for the kernel to work
if it's linked at 32MB:
- platforms/powermac/smp.c
To start secondary cpus on pmac we patch the reset vector, which is fine.
Except if we're above 32MB we don't have enough bits for an absolute branch,
it needs to relative.
- kernel/head_64.s
- A few branches in the cpu hold code need to load the full target address
and do a bctr.
- after_prom_start needs to load PHYSICAL_START as the dest address, not 0.
- The exception prolog needs to load the low word of the target adddress,
not just the low halfword.
- Fixup handling of the initial stab address.
- kernel/setup_64.c
smp_release_cpus() needs to write 1 to the spinloop flag near 0, not 32 MB.
arch/powerpc/kernel/head_64.S | 30 ++++++++++++++++++++++++------
arch/powerpc/kernel/setup_64.c | 5 ++++-
arch/powerpc/platforms/powermac/smp.c | 2 +-
include/asm-ppc64/mmu.h | 3 ++-
4 files changed, 31 insertions(+), 9 deletions(-)
Index: kexec/arch/powerpc/platforms/powermac/smp.c
===================================================================
--- kexec.orig/arch/powerpc/platforms/powermac/smp.c
+++ kexec/arch/powerpc/platforms/powermac/smp.c
@@ -762,7 +762,7 @@ static void __devinit smp_core99_kick_cp
* b __secondary_start_pmac_0 + nr*8 - KERNELBASE
*/
new_vector = (unsigned long) __secondary_start_pmac_0 + nr * 8;
- *vector = 0x48000002 + new_vector - KERNELBASE;
+ *vector = 0x48000001 + new_vector - (unsigned long)vector;
/* flush data cache and inval instruction cache */
flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
Index: kexec/arch/powerpc/kernel/head_64.S
===================================================================
--- kexec.orig/arch/powerpc/kernel/head_64.S
+++ kexec/arch/powerpc/kernel/head_64.S
@@ -155,11 +155,15 @@ _GLOBAL(__secondary_hold)
bne 100b
#ifdef CONFIG_HMT
- b .hmt_init
+ LOADADDR(r4, .hmt_init)
+ mtctr r4
+ bctr
#else
#ifdef CONFIG_SMP
+ LOADADDR(r4, .pSeries_secondary_smp_init)
+ mtctr r4
mr r3,r24
- b .pSeries_secondary_smp_init
+ bctr
#else
BUG_OPCODE
#endif
@@ -201,6 +205,20 @@ exception_marker:
#define EX_DSISR 56
#define EX_CCR 60
+/*
+ * We're short on space and time in the exception prolog, so we can't use
+ * the normal LOADADDR macro. Normally we just need the low halfword of the
+ * address, but for Kdump we need the whole low word.
+ */
+#ifdef CONFIG_CRASH_DUMP
+#define LOAD_HANDLER(reg, label) \
+ oris r12,r12,(label)@h; /* virt addr of handler ... */ \
+ ori r12,r12,(label)@l; /* .. and the rest */
+#else
+#define LOAD_HANDLER(reg, label) \
+ ori r12,r12,(label)@l; /* virt addr of handler ... */
+#endif
+
#define EXCEPTION_PROLOG_PSERIES(area, label) \
mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
std r9,area+EX_R9(r13); /* save r9 - r12 */ \
@@ -213,8 +231,8 @@ exception_marker:
clrrdi r12,r13,32; /* get high part of &label */ \
mfmsr r10; \
mfspr r11,SPRN_SRR0; /* save SRR0 */ \
- ori r12,r12,(label)@l; /* virt addr of handler */ \
ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
+ LOAD_HANDLER(r12,label) \
mtspr SPRN_SRR0,r12; \
mfspr r12,SPRN_SRR1; /* and SRR1 */ \
mtspr SPRN_SRR1,r10; \
@@ -1205,7 +1223,7 @@ unrecov_slb:
* fixed address (the linker can't compute (u64)&initial_stab >>
* PAGE_SHIFT).
*/
- . = STAB0_PHYS_ADDR /* 0x6000 */
+ . = STAB0_OFFSET /* 0x6000 */
.globl initial_stab
initial_stab:
.space 4096
@@ -1410,7 +1428,7 @@ _STATIC(__boot_from_prom)
_STATIC(__after_prom_start)
/*
- * We need to run with __start at physical address 0.
+ * We need to run with __start at physical address PHYSICAL_START.
* This will leave some code in the first 256B of
* real memory, which are reserved for software use.
* The remainder of the first page is loaded with the fixed
@@ -1425,7 +1443,7 @@ _STATIC(__after_prom_start)
mr r26,r3
SET_REG_TO_CONST(r27,KERNELBASE)
- li r3,0 /* target addr */
+ LOADADDR(r3, PHYSICAL_START) /* target addr */
// XXX FIXME: Use phys returned by OF (r30)
add r4,r27,r26 /* source addr */
Index: kexec/arch/powerpc/kernel/setup_64.c
===================================================================
--- kexec.orig/arch/powerpc/kernel/setup_64.c
+++ kexec/arch/powerpc/kernel/setup_64.c
@@ -301,6 +301,7 @@ void __init early_setup(unsigned long dt
void smp_release_cpus(void)
{
extern unsigned long __secondary_hold_spinloop;
+ unsigned long *ptr;
DBG(" -> smp_release_cpus()\n");
@@ -311,7 +312,9 @@ void smp_release_cpus(void)
* This is useless but harmless on iSeries, secondaries are already
* waiting on their paca spinloops. */
- __secondary_hold_spinloop = 1;
+ ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
+ - PHYSICAL_START);
+ *ptr = 1;
mb();
DBG(" <- smp_release_cpus()\n");
Index: kexec/include/asm-ppc64/mmu.h
===================================================================
--- kexec.orig/include/asm-ppc64/mmu.h
+++ kexec/include/asm-ppc64/mmu.h
@@ -30,7 +30,8 @@
/* Location of cpu0's segment table */
#define STAB0_PAGE 0x6
-#define STAB0_PHYS_ADDR (STAB0_PAGE<<12)
+#define STAB0_OFFSET (STAB0_PAGE << 12)
+#define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START)
#ifndef __ASSEMBLY__
extern char initial_stab[];
More information about the Linuxppc64-dev
mailing list