[RFC PATCH] powerpc/ftrace: Refactoring and support for -fpatchable-function-entry

Naveen N Rao naveen at kernel.org
Tue May 23 19:31:39 AEST 2023


Christophe Leroy wrote:
> 
> That's better, but still more time than original implementation:
> 
> +20% to activate function tracer (was +40% with your RFC)
> +21% to activate nop tracer (was +24% with your RFC)
> 
> perf record (without strict kernel rwx) :
> 
>      17.75%  echo     [kernel.kallsyms]   [k] ftrace_check_record
>       9.76%  echo     [kernel.kallsyms]   [k] ftrace_replace_code
>       6.53%  echo     [kernel.kallsyms]   [k] patch_instruction
>       5.21%  echo     [kernel.kallsyms]   [k] __ftrace_hash_rec_update
>       4.26%  echo     [kernel.kallsyms]   [k] ftrace_get_addr_curr
>       4.18%  echo     [kernel.kallsyms]   [k] ftrace_get_call_inst.isra.0
>       3.45%  echo     [kernel.kallsyms]   [k] ftrace_get_addr_new
>       3.08%  echo     [kernel.kallsyms]   [k] function_trace_call
>       2.20%  echo     [kernel.kallsyms]   [k] __rb_reserve_next.constprop.0
>       2.05%  echo     [kernel.kallsyms]   [k] copy_page
>       1.91%  echo     [kernel.kallsyms]   [k] 
> ftrace_create_branch_inst.constprop.0
>       1.83%  echo     [kernel.kallsyms]   [k] ftrace_rec_iter_next
>       1.83%  echo     [kernel.kallsyms]   [k] rb_commit
>       1.69%  echo     [kernel.kallsyms]   [k] ring_buffer_lock_reserve
>       1.54%  echo     [kernel.kallsyms]   [k] trace_function
>       1.39%  echo     [kernel.kallsyms]   [k] __call_rcu_common.constprop.0
>       1.25%  echo     ld-2.23.so          [.] do_lookup_x
>       1.17%  echo     [kernel.kallsyms]   [k] ftrace_rec_iter_record
>       1.03%  echo     [kernel.kallsyms]   [k] unmap_page_range
>       0.95%  echo     [kernel.kallsyms]   [k] flush_dcache_icache_page
>       0.95%  echo     [kernel.kallsyms]   [k] ftrace_lookup_ip

Ok, I simplified this further, and this is as close to the previous fast 
path as we can get (applies atop the original RFC). The only difference 
left is the ftrace_rec iterator.


- Naveen

---
 arch/powerpc/kernel/trace/ftrace.c | 55 +++++++++++++-----------------
 1 file changed, 23 insertions(+), 32 deletions(-)

diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index a9d57f338bd78e..4937651ecfafb0 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -96,13 +96,18 @@ static unsigned long find_ftrace_tramp(unsigned long ip)
 
 static int ftrace_get_call_inst(struct dyn_ftrace *rec, unsigned long addr, ppc_inst_t *call_inst)
 {
-	struct module *mod = rec->arch.mod;
 	unsigned long ip = rec->ip;
 	unsigned long stub;
 
 	if (is_offset_in_branch_range(addr - ip)) {
 		/* Within range */
 		stub = addr;
+#ifdef CONFIG_MODULES
+	} else if (rec->arch.mod) {
+		/* Module code would be going to one of the module stubs */
+		stub = (addr == (unsigned long)ftrace_caller ? rec->arch.mod->arch.tramp :
+							       rec->arch.mod->arch.tramp_regs);
+#endif
 	} else if (core_kernel_text(ip)) {
 		/* We would be branching to one of our ftrace stubs */
 		stub = find_ftrace_tramp(ip);
@@ -110,9 +115,6 @@ static int ftrace_get_call_inst(struct dyn_ftrace *rec, unsigned long addr, ppc_
 			pr_err("0x%lx: No ftrace stubs reachable\n", ip);
 			return -EINVAL;
 		}
-	} else if (IS_ENABLED(CONFIG_MODULES)) {
-		/* Module code would be going to one of the module stubs */
-		stub = (addr == (unsigned long)ftrace_caller ? mod->arch.tramp : mod->arch.tramp_regs);
 	} else {
 		return -EINVAL;
 	}
@@ -159,7 +161,8 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long ad
 
 void ftrace_replace_code(int enable)
 {
-	ppc_inst_t old, new, nop_inst, call_inst, new_call_inst;
+	ppc_inst_t old, new, call_inst, new_call_inst;
+	ppc_inst_t nop_inst = ppc_inst(PPC_RAW_NOP());
 	unsigned long ip, new_addr, addr;
 	struct ftrace_rec_iter *iter;
 	struct dyn_ftrace *rec;
@@ -167,53 +170,41 @@ void ftrace_replace_code(int enable)
 
 	for_ftrace_rec_iter(iter) {
 		rec = ftrace_rec_iter_record(iter);
-		update = ftrace_test_record(rec, enable);
 		ip = rec->ip;
-		new_addr = 0;
+
+		if (rec->flags & FTRACE_FL_DISABLED && !(rec->flags & FTRACE_FL_ENABLED))
+			continue;
+
+		addr = ftrace_get_addr_curr(rec);
+		new_addr = ftrace_get_addr_new(rec);
+		update = ftrace_update_record(rec, enable);
 
 		switch (update) {
 		case FTRACE_UPDATE_IGNORE:
 		default:
 			continue;
 		case FTRACE_UPDATE_MODIFY_CALL:
-			addr = ftrace_get_addr_curr(rec);
-			new_addr = ftrace_get_addr_new(rec);
-			break;
-		case FTRACE_UPDATE_MAKE_CALL:
-			addr = ftrace_get_addr_new(rec);
-			break;
-		case FTRACE_UPDATE_MAKE_NOP:
-			addr = ftrace_get_addr_curr(rec);
-			break;
-		}
-		nop_inst = ppc_inst(PPC_RAW_NOP());
-		ret = ftrace_get_call_inst(rec, addr, &call_inst);
-		if (!ret && new_addr)
 			ret = ftrace_get_call_inst(rec, new_addr, &new_call_inst);
-		if (ret)
-			goto out;
-
-		switch (update) {
-		case FTRACE_UPDATE_MODIFY_CALL:
+			ret |= ftrace_get_call_inst(rec, addr, &call_inst);
 			old = call_inst;
 			new = new_call_inst;
 			break;
+		case FTRACE_UPDATE_MAKE_NOP:
+			ret = ftrace_get_call_inst(rec, addr, &call_inst);
+			old = call_inst;
+			new = nop_inst;
+			break;
 		case FTRACE_UPDATE_MAKE_CALL:
+			ret = ftrace_get_call_inst(rec, new_addr, &call_inst);
 			old = nop_inst;
 			new = call_inst;
 			break;
-		case FTRACE_UPDATE_MAKE_NOP:
-			new = nop_inst;
-			old = call_inst;
-			break;
 		}
 
-		/* old == new when going to .ftrace.text stub for modify */
-		if (!ppc_inst_equal(old, new))
+		if (!ret)
 			ret = ftrace_modify_code(ip, old, new);
 		if (ret)
 			goto out;
-		ftrace_update_record(rec, enable);
 	}
 
 out:



More information about the Linuxppc-dev mailing list