[PATCH] tracing: Have all levels of checks prevent recursion
Steven Rostedt
rostedt at goodmis.org
Sat Oct 16 05:20:33 AEDT 2021
On Fri, 15 Oct 2021 20:04:29 +0200
Peter Zijlstra <peterz at infradead.org> wrote:
> On Fri, Oct 15, 2021 at 01:58:06PM -0400, Steven Rostedt wrote:
> > Something like this:
>
> I think having one copy of that in a header is better than having 3
> copies. But yes, something along them lines.
I was just about to ask you about this patch ;-)
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 4d244e295e85..a6ae329aa654 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -74,6 +74,27 @@
*/
#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
+/**
+ * interrupt_context_level - return interrupt context level
+ *
+ * Returns the current interrupt context level.
+ * 0 - normal context
+ * 1 - softirq context
+ * 2 - hardirq context
+ * 3 - NMI context
+ */
+static __always_inline unsigned char interrupt_context_level(void)
+{
+ unsigned long pc = preempt_count();
+ unsigned char level = 0;
+
+ level += !!(pc & (NMI_MASK));
+ level += !!(pc & (NMI_MASK | HARDIRQ_MASK));
+ level += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
+
+ return level;
+}
+
/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
#include <asm/preempt.h>
diff --git a/include/linux/trace_recursion.h b/include/linux/trace_recursion.h
index 41f5bfd9e93f..018a04381556 100644
--- a/include/linux/trace_recursion.h
+++ b/include/linux/trace_recursion.h
@@ -118,12 +118,7 @@ enum {
static __always_inline int trace_get_context_bit(void)
{
- unsigned long pc = preempt_count();
- unsigned char bit = 0;
-
- bit += !!(pc & (NMI_MASK));
- bit += !!(pc & (NMI_MASK | HARDIRQ_MASK));
- bit += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
+ unsigned char bit = interrupt_context_level();
return TRACE_CTX_NORMAL - bit;
}
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 228801e20788..c91711f20cf8 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -206,11 +206,7 @@ DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
static inline int get_recursion_context(int *recursion)
{
unsigned int pc = preempt_count();
- unsigned char rctx = 0;
-
- rctx += !!(pc & (NMI_MASK));
- rctx += !!(pc & (NMI_MASK | HARDIRQ_MASK));
- rctx += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
+ unsigned char rctx = interrupt_context_level();
if (recursion[rctx])
return -1;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 15d4380006e3..f6520d0a4c8c 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -3167,12 +3167,7 @@ static __always_inline int
trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
{
unsigned int val = cpu_buffer->current_context;
- unsigned long pc = preempt_count();
- int bit = 0;
-
- bit += !!(pc & (NMI_MASK));
- bit += !!(pc & (NMI_MASK | HARDIRQ_MASK));
- bit += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
+ int bit = interrupt_context_level();
bit = RB_CTX_NORMAL - bit;
More information about the Linuxppc-dev
mailing list