[PATCH 09/14] powerpc/64: treat low kernel text as irqs soft-masked
Nicholas Piggin
npiggin at gmail.com
Tue Mar 16 09:03:57 AEDT 2021
Treat code below __end_soft_masked as soft-masked for the purpose
of alternate return. 64s already mostly does this for scv entry.
This will be used to exit from interrupts without disabling MSR[EE].
Signed-off-by: Nicholas Piggin <npiggin at gmail.com>
---
arch/powerpc/include/asm/interrupt.h | 8 ++++++++
arch/powerpc/kernel/exceptions-64e.S | 12 +++++++++++-
arch/powerpc/kernel/exceptions-64s.S | 3 ++-
arch/powerpc/kernel/interrupt_64.S | 6 +++++-
4 files changed, 26 insertions(+), 3 deletions(-)
diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
index 5cdbd3630254..8796eb4630c9 100644
--- a/arch/powerpc/include/asm/interrupt.h
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -72,6 +72,10 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
*/
if (TRAP(regs) != 0x700)
CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
+ BUG_ON(regs->nip < (unsigned long)__end_soft_masked);
+ /* Move this under a debugging check */
+ if (arch_irq_disabled_regs(regs))
+ BUG_ON(search_kernel_restart_table(regs->nip));
}
#endif
@@ -147,6 +151,10 @@ static inline bool nmi_disables_ftrace(struct pt_regs *regs)
static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
{
#ifdef CONFIG_PPC64
+ /* Ensure arch_irq_disabled_regs(regs) looks right. */
+ if (!(regs->msr & MSR_PR) && regs->nip < (unsigned long)__end_soft_masked)
+ regs->softe = IRQS_ALL_DISABLED;
+
state->irq_soft_mask = local_paca->irq_soft_mask;
state->irq_happened = local_paca->irq_happened;
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 69d0d63cee85..87fe307b4da8 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -342,7 +342,17 @@ ret_from_mc_except:
#define PROLOG_ADDITION_MASKABLE_GEN(n) \
lbz r10,PACAIRQSOFTMASK(r13); /* are irqs soft-masked? */ \
andi. r10,r10,IRQS_DISABLED; /* yes -> go out of line */ \
- bne masked_interrupt_book3e_##n
+ bne masked_interrupt_book3e_##n; \
+ /* Kernel code below __end_soft_masked is implicitly masked */ \
+ andi. r10,r11,MSR_PR; \
+ bne 1f; /* user -> not masked */ \
+ std r14,PACA_EXGEN+EX_R14(r13); \
+ LOAD_REG_IMMEDIATE_SYM(r14, r10, __end_soft_masked); \
+ mfspr r10,SPRN_SRR0; \
+ cmpld r10,r14; \
+ ld r14,PACA_EXGEN+EX_R14(r13); \
+ blt masked_interrupt_book3e_##n; \
+1:
#define PROLOG_ADDITION_2REGS_GEN(n) \
std r14,PACA_EXGEN+EX_R14(r13); \
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 32b11431ac4a..bd0c82ac9de5 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -514,8 +514,9 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
/* Kernel code running below __end_interrupts is implicitly
* soft-masked */
- LOAD_HANDLER(r10, __end_interrupts)
+ LOAD_HANDLER(r10, __end_soft_masked)
cmpld r11,r10
+
li r10,IMASK
blt- 1f
diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S
index 8a2b8188108b..c6a0349dde59 100644
--- a/arch/powerpc/kernel/interrupt_64.S
+++ b/arch/powerpc/kernel/interrupt_64.S
@@ -642,4 +642,8 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
interrupt_return_macro srr
#ifdef CONFIG_PPC_BOOK3S
interrupt_return_macro hsrr
-#endif
+#endif /* CONFIG_PPC_BOOK3S */
+
+ .globl __end_soft_masked
+__end_soft_masked:
+DEFINE_FIXED_SYMBOL(__end_soft_masked)
--
2.23.0
More information about the Linuxppc-dev
mailing list