[PATCH v6 05/25] powerpc/32: Clarify interrupt restores with REST_GPR macro in entry_32.S
Rohan McLure
rmclure at linux.ibm.com
Wed Sep 21 16:55:45 AEST 2022
Restoring the register state of the interrupted thread involves issuing
a large number of predictable loads to the kernel stack frame. Issue the
REST_GPR{,S} macros to clearly signal when this is happening, and bunch
together restores at the end of the interrupt handler where the saved
value is not consumed earlier in the handler code.
Signed-off-by: Rohan McLure <rmclure at linux.ibm.com>
Reported-by: Christophe Leroy <christophe.leroy at csgroup.eu>
Reviewed-by: Nicholas Piggin <npiggin at gmail.com>
---
V3: New patch.
V4: Minimise restores in the unrecoverable window between
restoring SRR0/1 and return from interrupt.
---
arch/powerpc/kernel/entry_32.S | 33 +++++++++++++-------------------
1 file changed, 13 insertions(+), 20 deletions(-)
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 44dfce9a60c5..e4b694cebc44 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -68,7 +68,7 @@ prepare_transfer_to_handler:
lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
rlwinm r9,r9,0,~MSR_EE
lwz r12,_LINK(r11) /* and return to address in LR */
- lwz r2, GPR2(r11)
+ REST_GPR(2, r11)
b fast_exception_return
_ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler)
#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
@@ -144,7 +144,7 @@ ret_from_syscall:
lwz r7,_NIP(r1)
lwz r8,_MSR(r1)
cmpwi r3,0
- lwz r3,GPR3(r1)
+ REST_GPR(3, r1)
syscall_exit_finish:
mtspr SPRN_SRR0,r7
mtspr SPRN_SRR1,r8
@@ -152,8 +152,8 @@ syscall_exit_finish:
bne 3f
mtcr r5
-1: lwz r2,GPR2(r1)
- lwz r1,GPR1(r1)
+1: REST_GPR(2, r1)
+ REST_GPR(1, r1)
rfi
#ifdef CONFIG_40x
b . /* Prevent prefetch past rfi */
@@ -165,10 +165,8 @@ syscall_exit_finish:
REST_NVGPRS(r1)
mtctr r4
mtxer r5
- lwz r0,GPR0(r1)
- lwz r3,GPR3(r1)
- REST_GPRS(4, 11, r1)
- lwz r12,GPR12(r1)
+ REST_GPR(0, r1)
+ REST_GPRS(3, 12, r1)
b 1b
#ifdef CONFIG_44x
@@ -260,9 +258,8 @@ fast_exception_return:
beq 3f /* if not, we've got problems */
#endif
-2: REST_GPRS(3, 6, r11)
- lwz r10,_CCR(r11)
- REST_GPRS(1, 2, r11)
+2: lwz r10,_CCR(r11)
+ REST_GPRS(1, 6, r11)
mtcr r10
lwz r10,_LINK(r11)
mtlr r10
@@ -277,7 +274,7 @@ fast_exception_return:
mtspr SPRN_SRR0,r12
REST_GPR(9, r11)
REST_GPR(12, r11)
- lwz r11,GPR11(r11)
+ REST_GPR(11, r11)
rfi
#ifdef CONFIG_40x
b . /* Prevent prefetch past rfi */
@@ -454,9 +451,8 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return)
lwz r3,_MSR(r1); \
andi. r3,r3,MSR_PR; \
bne interrupt_return; \
- lwz r0,GPR0(r1); \
- lwz r2,GPR2(r1); \
- REST_GPRS(3, 8, r1); \
+ REST_GPR(0, r1); \
+ REST_GPRS(2, 8, r1); \
lwz r10,_XER(r1); \
lwz r11,_CTR(r1); \
mtspr SPRN_XER,r10; \
@@ -475,11 +471,8 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return)
lwz r12,_MSR(r1); \
mtspr exc_lvl_srr0,r11; \
mtspr exc_lvl_srr1,r12; \
- lwz r9,GPR9(r1); \
- lwz r12,GPR12(r1); \
- lwz r10,GPR10(r1); \
- lwz r11,GPR11(r1); \
- lwz r1,GPR1(r1); \
+ REST_GPRS(9, 12, r1); \
+ REST_GPR(1, r1); \
exc_lvl_rfi; \
b .; /* prevent prefetch past exc_lvl_rfi */
--
2.34.1
More information about the Linuxppc-dev
mailing list