[PATCH v9 11/14] Add support to mask perf interrupts and replay them
Madhavan Srinivasan
maddy at linux.vnet.ibm.com
Thu Aug 3 13:49:15 AEST 2017
Two new bit mask field "IRQ_DISABLE_MASK_PMU" is introduced to support
the masking of PMI and "IRQ_DISABLE_MASK_ALL" to aid interrupt masking checking.
Couple of new irq #defs "PACA_IRQ_PMI" and "SOFTEN_VALUE_0xf0*" added to
use in the exception code to check for PMI interrupts.
In the masked_interrupt handler, for PMIs we reset the MSR[EE]
and return. In the __check_irq_replay(), replay the PMI interrupt
by calling performance_monitor_common handler.
Signed-off-by: Madhavan Srinivasan <maddy at linux.vnet.ibm.com>
---
arch/powerpc/include/asm/exception-64s.h | 5 +++++
arch/powerpc/include/asm/hw_irq.h | 5 ++++-
arch/powerpc/kernel/entry_64.S | 5 +++++
arch/powerpc/kernel/exceptions-64s.S | 6 ++++--
arch/powerpc/kernel/irq.c | 24 +++++++++++++++++++++++-
5 files changed, 41 insertions(+), 4 deletions(-)
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index e44b0fdb56f7..6f7685ccec28 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -513,6 +513,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
#define SOFTEN_VALUE_0xe80 PACA_IRQ_DBELL
#define SOFTEN_VALUE_0xe60 PACA_IRQ_HMI
#define SOFTEN_VALUE_0xea0 PACA_IRQ_EE
+#define SOFTEN_VALUE_0xf00 PACA_IRQ_PMI
#define __SOFTEN_TEST(h, vec, bitmask) \
lbz r10,PACASOFTIRQEN(r13); \
@@ -577,6 +578,10 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
_MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, \
EXC_STD, SOFTEN_NOTEST_PR, bitmask)
+#define MASKABLE_RELON_EXCEPTION_PSERIES_OOL(vec, label, bitmask) \
+ MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_PR, vec, bitmask);\
+ EXCEPTION_PROLOG_PSERIES_1(label, EXC_STD);
+
#define MASKABLE_RELON_EXCEPTION_HV(loc, vec, label, bitmask) \
_MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, \
EXC_HV, SOFTEN_TEST_HV, bitmask)
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index c60922c77249..8c1057b20b48 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -26,12 +26,15 @@
#define PACA_IRQ_DEC 0x08 /* Or FIT */
#define PACA_IRQ_EE_EDGE 0x10 /* BookE only */
#define PACA_IRQ_HMI 0x20
+#define PACA_IRQ_PMI 0x40
/*
* flags for paca->soft_disable_mask
*/
#define IRQ_DISABLE_MASK_NONE 0
#define IRQ_DISABLE_MASK_LINUX 1
+#define IRQ_DISABLE_MASK_PMU 2
+#define IRQ_DISABLE_MASK_ALL 3
#endif /* CONFIG_PPC64 */
@@ -131,7 +134,7 @@ static inline bool arch_irqs_disabled(void)
#define hard_irq_disable() do { \
unsigned long flags; \
__hard_irq_disable(); \
- flags = soft_disable_mask_set_return(IRQ_DISABLE_MASK_LINUX);\
+ flags = soft_disable_mask_set_return(IRQ_DISABLE_MASK_ALL);\
local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
if (!arch_irqs_disabled_flags(flags)) \
trace_hardirqs_off(); \
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 845b37387e47..296c7b1a2bb1 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -974,6 +974,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
addi r3,r1,STACK_FRAME_OVERHEAD;
bl do_IRQ
b ret_from_except
+1: cmpwi cr0,r3,0xf00
+ bne 1f
+ addi r3,r1,STACK_FRAME_OVERHEAD;
+ bl performance_monitor_exception
+ b ret_from_except
1: cmpwi cr0,r3,0xe60
bne 1f
addi r3,r1,STACK_FRAME_OVERHEAD;
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index d653ff08e839..3666d27220f7 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1073,8 +1073,8 @@ EXC_REAL_NONE(0xee0, 0x20)
EXC_VIRT_NONE(0x4ee0, 0x20)
-EXC_REAL_OOL(performance_monitor, 0xf00, 0x20)
-EXC_VIRT_OOL(performance_monitor, 0x4f00, 0x20, 0xf00)
+EXC_REAL_OOL_MASKABLE(performance_monitor, 0xf00, 0x20, IRQ_DISABLE_MASK_PMU)
+EXC_VIRT_OOL_MASKABLE(performance_monitor, 0x4f00, 0x20, 0xf00, IRQ_DISABLE_MASK_PMU)
TRAMP_KVM(PACA_EXGEN, 0xf00)
EXC_COMMON_ASYNC(performance_monitor_common, 0xf00, performance_monitor_exception)
@@ -1674,6 +1674,8 @@ _GLOBAL(__replay_interrupt)
beq decrementer_common
cmpwi r3,0x500
beq hardware_interrupt_common
+ cmpwi r3,0xf00
+ beq performance_monitor_common
BEGIN_FTR_SECTION
cmpwi r3,0xe80
beq h_doorbell_common_msgclr
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 63f7838cf9a6..a9ba4d2b0610 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -168,6 +168,27 @@ notrace unsigned int __check_irq_replay(void)
if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow())
return 0x900;
+ /*
+ * In masked_handler() for PMI, we disable MSR[EE] and return.
+ * Replay it here.
+ *
+ * After this point, PMIs could still be disabled in certain
+ * scenarios like this one.
+ *
+ * local_irq_disable();
+ * powerpc_irq_pmu_save();
+ * powerpc_irq_pmu_restore();
+ * local_irq_restore();
+ *
+ * Even though powerpc_irq_pmu_restore() would have replayed the PMIs
+ * if any, we have still not enabled EE and this will happen only at
+ * complition of last *_restore in this nested cases. And PMIs will
+ * once again start firing only when we have MSR[EE] enabled.
+ */
+ local_paca->irq_happened &= ~PACA_IRQ_PMI;
+ if (happened & PACA_IRQ_PMI)
+ return 0xf00;
+
/* Finally check if an external interrupt happened */
local_paca->irq_happened &= ~PACA_IRQ_EE;
if (happened & PACA_IRQ_EE)
@@ -207,7 +228,8 @@ notrace void arch_local_irq_restore(unsigned long en)
/* Write the new soft-enabled value */
soft_disable_mask_set(en);
- if (en == IRQ_DISABLE_MASK_LINUX)
+ /* any bits still disabled */
+ if (en)
return;
/*
* From this point onward, we can take interrupts, preempt,
--
2.7.4
More information about the Linuxppc-dev
mailing list