[PATCH v2 11/18] powerpc/book3e: support CONFIG_RELOCATABLE
Scott Wood
scottwood at freescale.com
Wed Oct 7 14:48:15 AEDT 2015
From: Tiejun Chen <tiejun.chen at windriver.com>
book3e is different with book3s since 3s includes the exception
vectors code in head_64.S as it relies on absolute addressing
which is only possible within this compilation unit. So we have
to get that label address with got.
And when boot a relocated kernel, we should reset ipvr properly again
after .relocate.
Signed-off-by: Tiejun Chen <tiejun.chen at windriver.com>
[scottwood: cleanup and ifdef removal]
Signed-off-by: Scott Wood <scottwood at freescale.com>
---
arch/powerpc/include/asm/exception-64e.h | 4 ++--
arch/powerpc/kernel/exceptions-64e.S | 9 +++++++--
arch/powerpc/kernel/head_64.S | 22 +++++++++++++++++++---
3 files changed, 28 insertions(+), 7 deletions(-)
diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h
index a8b52b6..344fc43 100644
--- a/arch/powerpc/include/asm/exception-64e.h
+++ b/arch/powerpc/include/asm/exception-64e.h
@@ -204,8 +204,8 @@ exc_##label##_book3e:
#endif
#define SET_IVOR(vector_number, vector_offset) \
- li r3,vector_offset at l; \
- ori r3,r3,interrupt_base_book3e at l; \
+ LOAD_REG_ADDR(r3,interrupt_base_book3e);\
+ ori r3,r3,vector_offset at l; \
mtspr SPRN_IVOR##vector_number,r3;
#endif /* _ASM_POWERPC_EXCEPTION_64E_H */
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 9d4a006..488e631 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -1351,7 +1351,10 @@ skpinv: addi r6,r6,1 /* Increment */
* r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
*/
/* Now we branch the new virtual address mapped by this entry */
- LOAD_REG_IMMEDIATE(r6,2f)
+ bl 1f /* Find our address */
+1: mflr r6
+ addi r6,r6,(2f - 1b)
+ tovirt(r6,r6)
lis r7,MSR_KERNEL at h
ori r7,r7,MSR_KERNEL at l
mtspr SPRN_SRR0,r6
@@ -1583,9 +1586,11 @@ _GLOBAL(book3e_secondary_thread_init)
mflr r28
b 3b
+ .globl init_core_book3e
init_core_book3e:
/* Establish the interrupt vector base */
- LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e)
+ tovirt(r2,r2)
+ LOAD_REG_ADDR(r3, interrupt_base_book3e)
mtspr SPRN_IVPR,r3
sync
blr
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index a1e85ca..1b77956 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -457,12 +457,22 @@ __after_prom_start:
/* process relocations for the final address of the kernel */
lis r25,PAGE_OFFSET at highest /* compute virtual base of kernel */
sldi r25,r25,32
+#if defined(CONFIG_PPC_BOOK3E)
+ tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */
+#endif
lwz r7,__run_at_load-_stext(r26)
+#if defined(CONFIG_PPC_BOOK3E)
+ tophys(r26,r26)
+#endif
cmplwi cr0,r7,1 /* flagged to stay where we are ? */
bne 1f
add r25,r25,r26
1: mr r3,r25
bl relocate
+#if defined(CONFIG_PPC_BOOK3E)
+ /* IVPR needs to be set after relocation. */
+ bl init_core_book3e
+#endif
#endif
/*
@@ -490,12 +500,21 @@ __after_prom_start:
* variable __run_at_load, if it is set the kernel is treated as relocatable
* kernel, otherwise it will be moved to PHYSICAL_START
*/
+#if defined(CONFIG_PPC_BOOK3E)
+ tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */
+#endif
lwz r7,__run_at_load-_stext(r26)
cmplwi cr0,r7,1
bne 3f
+#ifdef CONFIG_PPC_BOOK3E
+ LOAD_REG_ADDR(r5, __end_interrupts)
+ LOAD_REG_ADDR(r11, _stext)
+ sub r5,r5,r11
+#else
/* just copy interrupts */
LOAD_REG_IMMEDIATE(r5, __end_interrupts - _stext)
+#endif
b 5f
3:
#endif
@@ -514,9 +533,6 @@ __after_prom_start:
p_end: .llong _end - _stext
4: /* Now copy the rest of the kernel up to _end */
-#if defined(CONFIG_PPC_BOOK3E)
- tovirt(r26,r26)
-#endif
addis r5,r26,(p_end - _stext)@ha
ld r5,(p_end - _stext)@l(r5) /* get _end */
5: bl copy_and_flush /* copy the rest */
--
2.1.4
More information about the Linuxppc-dev
mailing list