[RFC PATCH 09/15] powerpc/ftrace: Add separate ftrace_init_nop() with additional validation
Naveen N Rao
naveen at kernel.org
Thu Jun 8 02:55:24 AEST 2023
Currently, we validate instructions around the ftrace location every
time we have to enable/disable ftrace. Introduce ftrace_init_nop() to
instead perform all the validation during ftrace initialization. This
allows us to simply patch the necessary instructions during
enabling/disabling ftrace.
Signed-off-by: Naveen N Rao <naveen at kernel.org>
---
arch/powerpc/include/asm/ftrace.h | 6 +++
arch/powerpc/kernel/trace/ftrace.c | 71 ++++++++++++++++++++++++++++++
2 files changed, 77 insertions(+)
diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
index 1a5d365523e160..89fbae3caa1fc2 100644
--- a/arch/powerpc/include/asm/ftrace.h
+++ b/arch/powerpc/include/asm/ftrace.h
@@ -29,11 +29,17 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
unsigned long sp);
+struct module;
+struct dyn_ftrace;
struct dyn_arch_ftrace {
struct module *mod;
};
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
+#define ftrace_need_init_nop() (true)
+int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
+#define ftrace_init_nop ftrace_init_nop
+
struct ftrace_regs {
struct pt_regs regs;
};
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index 278bf8e52b6e89..c0d185742c23ca 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -31,6 +31,16 @@
#define NUM_FTRACE_TRAMPS 2
static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
+static ppc_inst_t ftrace_create_branch_inst(unsigned long ip, unsigned long addr, int link)
+{
+ ppc_inst_t op;
+
+ WARN_ON(!is_offset_in_branch_range(addr - ip));
+ create_branch(&op, (u32 *)ip, addr, link ? BRANCH_SET_LINK : 0);
+
+ return op;
+}
+
static ppc_inst_t
ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
{
@@ -597,6 +607,67 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
}
#endif
+int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
+{
+ unsigned long addr, ip = rec->ip;
+ ppc_inst_t old, new;
+ int ret = 0;
+
+ /* Verify instructions surrounding the ftrace location */
+ if (IS_ENABLED(CONFIG_PPC32)) {
+ /* Expected sequence: 'mflr r0', 'stw r0,4(r1)', 'bl _mcount' */
+ ret = ftrace_validate_inst(ip - 8, ppc_inst(PPC_RAW_MFLR(_R0)));
+ if (!ret)
+ ret = ftrace_validate_inst(ip - 4, ppc_inst(PPC_RAW_STW(_R0, _R1, 4)));
+ } else if (IS_ENABLED(CONFIG_MPROFILE_KERNEL)) {
+ /* Expected sequence: 'mflr r0', ['std r0,16(r1)'], 'bl _mcount' */
+ ret = ftrace_validate_inst(ip - 4, ppc_inst(PPC_RAW_MFLR(_R0)));
+ if (ret) {
+ ret = ftrace_validate_inst(ip - 4, ppc_inst(PPC_RAW_STD(_R0, _R1, 16)));
+ ret |= ftrace_validate_inst(ip - 8, ppc_inst(PPC_RAW_MFLR(_R0)));
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ if (!core_kernel_text(ip)) {
+ if (!mod) {
+ pr_err("0x%lx: No module provided for non-kernel address\n", ip);
+ return -EFAULT;
+ }
+ rec->arch.mod = mod;
+ }
+
+ /* Nop-out the ftrace location */
+ new = ppc_inst(PPC_RAW_NOP());
+ addr = MCOUNT_ADDR;
+ if (is_offset_in_branch_range(addr - ip)) {
+ /* Within range */
+ old = ftrace_create_branch_inst(ip, addr, 1);
+ ret = ftrace_modify_code(ip, old, new);
+ } else if (core_kernel_text(ip) || (IS_ENABLED(CONFIG_MODULES) && mod)) {
+ /*
+ * We would be branching to a linker-generated stub, or to the module _mcount
+ * stub. Let's just confirm we have a 'bl' here.
+ */
+ ret = ftrace_read_inst(ip, &old);
+ if (ret)
+ return ret;
+ if (!is_bl_op(old)) {
+ pr_err("0x%lx: expected (bl) != found (%08lx)\n", ip, ppc_inst_as_ulong(old));
+ return -EINVAL;
+ }
+ ret = patch_instruction((u32 *)ip, new);
+ } else {
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
int ftrace_update_ftrace_func(ftrace_func_t func)
{
unsigned long ip = (unsigned long)(&ftrace_call);
--
2.40.1
More information about the Linuxppc-dev
mailing list