[PATCH] powerpc: Fixups for kernel linked at 32 MB

Michael Ellerman michael at ellerman.id.au
Tue Dec 6 08:49:00 EST 2005


There's a few places where we need to fix things up for the kernel to work
if it's linked at 32MB:

 - platforms/powermac/smp.c
   To start secondary cpus on pmac we patch the reset vector, which is fine.
   Except if we're above 32MB we don't have enough bits for an absolute branch,
   it needs to relative.
 - kernel/head_64.s
    - A few branches in the cpu hold code need to load the full target address
      and do a bctr.
    - after_prom_start needs to load PHYSICAL_START as the dest address, not 0.
    - The exception prolog needs to load the low word of the target adddress,
      not just the low halfword.
    - Fixup handling of the initial stab address.
 - kernel/setup_64.c
   smp_release_cpus() needs to write 1 to the spinloop flag near 0, not 32 MB.

Signed-off-by: Michael Ellerman <michael at ellerman.id.au>
 arch/powerpc/kernel/head_64.S         |   30 ++++++++++++++++++++++++------
 arch/powerpc/kernel/setup_64.c        |    5 ++++-
 arch/powerpc/platforms/powermac/smp.c |   16 +++++++---------
 include/asm-powerpc/mmu.h             |    3 ++-
 4 files changed, 37 insertions(+), 17 deletions(-)

Index: kexec/arch/powerpc/platforms/powermac/smp.c
===================================================================
--- kexec.orig/arch/powerpc/platforms/powermac/smp.c
+++ kexec/arch/powerpc/platforms/powermac/smp.c
@@ -753,14 +753,15 @@ static int __init smp_core99_probe(void)
 static void __devinit smp_core99_kick_cpu(int nr)
 {
 	unsigned int save_vector;
-	unsigned long new_vector;
-	unsigned long flags;
+	unsigned long target, flags;
 	volatile unsigned int *vector
 		 = ((volatile unsigned int *)(KERNELBASE+0x100));
 
 	if (nr < 0 || nr > 3)
 		return;
-	if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346);
+
+	if (ppc_md.progress)
+		ppc_md.progress("smp_core99_kick_cpu", 0x346);
 
 	local_irq_save(flags);
 	local_irq_disable();
@@ -768,14 +769,11 @@ static void __devinit smp_core99_kick_cp
 	/* Save reset vector */
 	save_vector = *vector;
 
-	/* Setup fake reset vector that does	
+	/* Setup fake reset vector that does
 	 *   b __secondary_start_pmac_0 + nr*8 - KERNELBASE
 	 */
-	new_vector = (unsigned long) __secondary_start_pmac_0 + nr * 8;
-	*vector = 0x48000002 + new_vector - KERNELBASE;
-
-	/* flush data cache and inval instruction cache */
-	flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
+	target = (unsigned long) __secondary_start_pmac_0 + nr * 8;
+	create_branch((unsigned long)vector, target, BRANCH_SET_LINK);
 
 	/* Put some life in our friend */
 	pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0);
Index: kexec/arch/powerpc/kernel/head_64.S
===================================================================
--- kexec.orig/arch/powerpc/kernel/head_64.S
+++ kexec/arch/powerpc/kernel/head_64.S
@@ -154,11 +154,15 @@ _GLOBAL(__secondary_hold)
 	bne	100b
 
 #ifdef CONFIG_HMT
-	b	.hmt_init
+	LOADADDR(r4, .hmt_init)
+	mtctr	r4
+	bctr
 #else
 #ifdef CONFIG_SMP
+	LOADADDR(r4, .pSeries_secondary_smp_init)
+	mtctr	r4
 	mr	r3,r24
-	b	.pSeries_secondary_smp_init
+	bctr
 #else
 	BUG_OPCODE
 #endif
@@ -200,6 +204,20 @@ exception_marker:
 #define EX_R3		64
 #define EX_LR		72
 
+/*
+ * We're short on space and time in the exception prolog, so we can't use
+ * the normal LOADADDR macro. Normally we just need the low halfword of the
+ * address, but for Kdump we need the whole low word.
+ */
+#ifdef CONFIG_CRASH_DUMP
+#define LOAD_HANDLER(reg, label)					\
+	oris	reg,reg,(label)@h;	/* virt addr of handler ... */	\
+	ori	reg,reg,(label)@l;	/* .. and the rest */
+#else
+#define LOAD_HANDLER(reg, label)					\
+	ori	reg,reg,(label)@l;	/* virt addr of handler ... */
+#endif
+
 #define EXCEPTION_PROLOG_PSERIES(area, label)				\
 	mfspr	r13,SPRN_SPRG3;		/* get paca address into r13 */	\
 	std	r9,area+EX_R9(r13);	/* save r9 - r12 */		\
@@ -212,7 +230,7 @@ exception_marker:
 	clrrdi	r12,r13,32;		/* get high part of &label */	\
 	mfmsr	r10;							\
 	mfspr	r11,SPRN_SRR0;		/* save SRR0 */			\
-	ori	r12,r12,(label)@l;	/* virt addr of handler */	\
+	LOAD_HANDLER(r12,label)						\
 	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI;				\
 	mtspr	SPRN_SRR0,r12;						\
 	mfspr	r12,SPRN_SRR1;		/* and SRR1 */			\
@@ -1348,7 +1366,7 @@ _GLOBAL(do_stab_bolted)
  * fixed address (the linker can't compute (u64)&initial_stab >>
  * PAGE_SHIFT).
  */
-	. = STAB0_PHYS_ADDR	/* 0x6000 */
+	. = STAB0_OFFSET	/* 0x6000 */
 	.globl initial_stab
 initial_stab:
 	.space	4096
@@ -1553,7 +1571,7 @@ _STATIC(__boot_from_prom)
 _STATIC(__after_prom_start)
 
 /*
- * We need to run with __start at physical address 0.
+ * We need to run with __start at physical address PHYSICAL_START.
  * This will leave some code in the first 256B of
  * real memory, which are reserved for software use.
  * The remainder of the first page is loaded with the fixed
@@ -1568,7 +1586,7 @@ _STATIC(__after_prom_start)
 	mr	r26,r3
 	SET_REG_TO_CONST(r27,KERNELBASE)
 
-	li	r3,0			/* target addr */
+	LOADADDR(r3, PHYSICAL_START)	/* target addr */
 
 	// XXX FIXME: Use phys returned by OF (r30)
 	add	r4,r27,r26 		/* source addr			 */
Index: kexec/arch/powerpc/kernel/setup_64.c
===================================================================
--- kexec.orig/arch/powerpc/kernel/setup_64.c
+++ kexec/arch/powerpc/kernel/setup_64.c
@@ -314,6 +314,7 @@ void early_setup_secondary(void)
 void smp_release_cpus(void)
 {
 	extern unsigned long __secondary_hold_spinloop;
+	unsigned long *ptr;
 
 	DBG(" -> smp_release_cpus()\n");
 
@@ -324,7 +325,9 @@ void smp_release_cpus(void)
 	 * This is useless but harmless on iSeries, secondaries are already
 	 * waiting on their paca spinloops. */
 
-	__secondary_hold_spinloop = 1;
+	ptr  = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
+			- PHYSICAL_START);
+	*ptr = 1;
 	mb();
 
 	DBG(" <- smp_release_cpus()\n");
Index: kexec/include/asm-powerpc/mmu.h
===================================================================
--- kexec.orig/include/asm-powerpc/mmu.h
+++ kexec/include/asm-powerpc/mmu.h
@@ -33,7 +33,8 @@
 
 /* Location of cpu0's segment table */
 #define STAB0_PAGE	0x6
-#define STAB0_PHYS_ADDR	(STAB0_PAGE<<12)
+#define STAB0_OFFSET	(STAB0_PAGE << 12)
+#define STAB0_PHYS_ADDR	(STAB0_OFFSET + PHYSICAL_START)
 
 #ifndef __ASSEMBLY__
 extern char initial_stab[];



More information about the Linuxppc64-dev mailing list