[PATCH 14/14] powerpc/pseries: exceptions use short handler load again
Nicholas Piggin
nicholas.piggin at gmail.com
Thu Jul 21 16:44:13 AEST 2016
addis generated by LOAD_HANDLER_4GB is always 0, so it is safe to
use 64K handlers. Move decrementer exception back inline.
Signed-off-by: Nick Piggin <npiggin at gmail.com>
---
arch/powerpc/include/asm/exception-64s.h | 4 ++--
arch/powerpc/kernel/exceptions-64s.S | 21 ++++++++++++---------
2 files changed, 14 insertions(+), 11 deletions(-)
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 06e2247..eaad38f 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -55,7 +55,7 @@
#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
ld r12,PACAKBASE(r13); /* get high part of &label */ \
mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
- LOAD_HANDLER_4G(r12,label); \
+ LOAD_HANDLER_64K(r12,label); \
mtctr r12; \
mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
li r10,MSR_RI; \
@@ -186,7 +186,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
ld r12,PACAKBASE(r13); /* get high part of &label */ \
ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \
mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
- LOAD_HANDLER_4G(r12,label); \
+ LOAD_HANDLER_64K(r12,label); \
mtspr SPRN_##h##SRR0,r12; \
mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
mtspr SPRN_##h##SRR1,r10; \
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index c317faf..462bf67 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -262,7 +262,7 @@ BEGIN_FTR_SECTION
ori r11,r11,MSR_ME /* turn on ME bit */
ori r11,r11,MSR_RI /* turn on RI bit */
ld r12,PACAKBASE(r13) /* get high part of &label */
- LOAD_HANDLER_4G(r12, machine_check_handle_early)
+ LOAD_HANDLER_64K(r12, machine_check_handle_early)
1: mtspr SPRN_SRR0,r12
mtspr SPRN_SRR1,r11
rfid
@@ -275,7 +275,7 @@ BEGIN_FTR_SECTION
addi r1,r1,INT_FRAME_SIZE /* go back to previous stack frame */
ld r11,PACAKMSR(r13)
ld r12,PACAKBASE(r13)
- LOAD_HANDLER_4G(r12, unrecover_mce)
+ LOAD_HANDLER_64K(r12, unrecover_mce)
li r10,MSR_ME
andc r11,r11,r10 /* Turn off MSR_ME */
b 1b
@@ -416,7 +416,7 @@ COMMON_HANDLER_BEGIN(machine_check_handle_early)
bne 2f
1: mfspr r11,SPRN_SRR0
ld r10,PACAKBASE(r13)
- LOAD_HANDLER_4G(r10,unrecover_mce)
+ LOAD_HANDLER_64K(r10,unrecover_mce)
mtspr SPRN_SRR0,r10
ld r10,PACAKMSR(r13)
/*
@@ -510,7 +510,7 @@ COMMON_HANDLER_END(data_access_common)
mfspr r12,SPRN_SRR1; \
mfctr r11; \
ld r10,PACAKBASE(r13); \
- LOAD_HANDLER_4G(r10, slb_miss_realmode); \
+ LOAD_HANDLER_64K(r10, slb_miss_realmode); \
mtctr r10; \
bctr;
#else
@@ -577,7 +577,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX)
2: mfspr r11,SPRN_SRR0
ld r10,PACAKBASE(r13)
- LOAD_HANDLER_4G(r10,unrecov_slb)
+ LOAD_HANDLER_64K(r10,unrecov_slb)
mtspr SPRN_SRR0,r10
ld r10,PACAKMSR(r13)
mtspr SPRN_SRR1,r10
@@ -729,7 +729,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
COMMON_HANDLER_END(fp_unavailable_common)
-VECTOR_HANDLER_REAL_OOL_MASKABLE(decrementer, 0x900, 0x980)
+VECTOR_HANDLER_REAL_MASKABLE(decrementer, 0x900, 0x980)
VECTOR_HANDLER_VIRT_MASKABLE(decrementer, 0x4900, 0x4980, 0x900)
TRAMP_KVM(PACA_EXGEN, 0x900)
COMMON_HANDLER_ASYNC(decrementer_common, 0x900, timer_interrupt)
@@ -771,7 +771,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
#define SYSCALL_PSERIES_2_RFID \
mfspr r12,SPRN_SRR1 ; \
ld r10,PACAKBASE(r13) ; \
- LOAD_HANDLER_4G(r10, system_call_common) ; \
+ LOAD_HANDLER_64K(r10, system_call_common) ; \
mtspr SPRN_SRR0,r10 ; \
ld r10,PACAKMSR(r13) ; \
mtspr SPRN_SRR1,r10 ; \
@@ -794,7 +794,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
#define SYSCALL_PSERIES_2_DIRECT \
mflr r10 ; \
ld r12,PACAKBASE(r13) ; \
- LOAD_HANDLER_4G(r12, system_call_common) ; \
+ LOAD_HANDLER_64K(r12, system_call_common) ; \
mtctr r12 ; \
mfspr r12,SPRN_SRR1 ; \
/* Re-use of r13... No spare regs to do this */ \
@@ -1317,7 +1317,10 @@ USE_FIXED_SECTION(virt_trampolines)
* handlers, so that they are copied to real address 0x100 when running
* a relocatable kernel. This ensures they can be reached from the short
* trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch
- * directly, without using LOAD_HANDLER_4G().
+ * directly, without using LOAD_HANDLER_*().
+ *
+ * This needs to be aligned according to copy_and_flush, which copies
+ * cacheline at a time.
*/
.align 7
.globl __end_interrupts
--
2.8.1
More information about the Linuxppc-dev
mailing list