[PATCH 1/1] powerpc: Fix kuap warnings on lazy/full preemption with tracing
Shrikanth Hegde
sshegde at linux.ibm.com
Fri Jan 9 17:49:17 AEDT 2026
These KUAP bugs/Warnings were seen often when tracing was enabled.
It happens with preempt=full/lazy. It is easily hit.
How to trigger:
echo lazy > /sys/kernel/debug/sched/preempt
echo function > /sys/kernel/debug/tracing/current_tracer
stress-ng --class memory --all 1 -t 3
Bug: Write fault blocked by KUAP!
WARNING: [] arch/powerpc/mm/fault.c:231 at bad_kernel_fault.constprop.0+0x1a8/0x2c8,
CPU#9: stress-ng-vm-rw/5477
NIP [c00000000008cdec] bad_kernel_fault.constprop.0+0x1a8/0x2c8
Call Trace:
bad_kernel_fault.constprop.0+0x1a4/0x2c8 (unreliable)
___do_page_fault+0x688/0xa54
do_page_fault+0x30/0x70
data_access_common_virt+0x210/0x220
---- interrupt: 300 at __copy_tofrom_user_power7+0x410/0x7ac
NIP [c0000000000b3b44] __copy_tofrom_user_power7+0x410/0x7ac
LR [c0000000009a7d78] _copy_to_iter+0x134/0x9c4
Enabled CONFIG_PPC_KUAP_DEBUG=y, which prints out below warnings.
WARNING: ./arch/powerpc/include/asm/book3s/64/kup.h:93 at _switch+0x80/0x12c,
CPU#9: stress-ng-vm-rw/5477
NIP [c000000000013ce4] _switch+0x80/0x12c
LR [c00000000001f968] __switch_to+0x148/0x230
Call Trace:
__switch_to+0x148/0x230
__schedule+0x270/0x700
preempt_schedule_notrace+0x64/0xd8
function_trace_call+0x180/0x204
ftrace_call+0x4/0x4c
enter_vmx_usercopy+0x10/0x74
__copy_tofrom_user_power7+0x278/0x7ac
_copy_to_iter+0x134/0x9c4
copy_page_to_iter+0xe4/0x1c4
process_vm_rw_single_vec.constprop.0+0x1cc/0x3b4
process_vm_rw_core.constprop.0+0x168/0x30c
process_vm_rw+0x128/0x184
system_call_exception+0x128/0x390
system_call_vectored_common+0x15c/0x2ec
enter/exit vmx_usercopy clearly says it shouldn't call schedule.
Doing so will end up corrupting AMR registers. When function tracer is
enabled, the entry point, i.e enter_vmx_usercopy could be in preemptible
context. First thing enter_vmx_usercopy does is, preempt_disable and
again function exit of exit_vmx_usercopy maybe preemptible too.
So make these as notrace to avoid these bug reports.
WARNING: [amr != AMR_KUAP_BLOCKED] ./arch/powerpc/include/asm/book3s/64/kup.h:293
at arch_local_irq_restore.part.0+0x1e8/0x224, CPU#15: stress-ng-pipe/11623
NIP [c000000000038830] arch_local_irq_restore.part.0+0x1e8/0x224
LR [c00000000003871c] arch_local_irq_restore.part.0+0xd4/0x224
Call Trace:
return_to_handler+0x0/0x4c (unreliable)
__rb_reserve_next+0x198/0x4f8
ring_buffer_lock_reserve+0x1a8/0x51c
trace_buffer_lock_reserve+0x30/0x80
__graph_entry.isra.0+0x118/0x140
function_graph_enter_regs+0x1ec/0x408
ftrace_graph_func+0x50/0xcc
ftrace_call+0x4/0x4c
enable_kernel_altivec+0x10/0xd0
enter_vmx_usercopy+0x58/0x74
return_to_handler+0x0/0x4c (__copy_tofrom_user_power7+0x278/0x7ac)
_copy_from_iter+0x134/0x9bc
copy_page_from_iter+0xd4/0x1a0
Since AMR registers aren't set to BLOCKED state, warnings could be seen
if there is any unlock involved, which gets triggered via
arch_local_irq_restore. So had to for that enable_kernel_altivec too.
Similarly for check_if_tm_restore_required, giveup_altivec.
Signed-off-by: Shrikanth Hegde <sshegde at linux.ibm.com>
---
arch/powerpc/kernel/process.c | 10 +++++-----
arch/powerpc/lib/vmx-helper.c | 4 ++--
2 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index a45fe147868b..7bf2fe3e5878 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -80,7 +80,7 @@
*/
bool tm_suspend_disabled __ro_after_init = false;
-static void check_if_tm_restore_required(struct task_struct *tsk)
+notrace static void check_if_tm_restore_required(struct task_struct *tsk)
{
/*
* If we are saving the current thread's registers, and the
@@ -98,7 +98,7 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
}
#else
-static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
+static __always_inline void check_if_tm_restore_required(struct task_struct *tsk) { }
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
bool strict_msr_control;
@@ -231,7 +231,7 @@ static inline void __giveup_fpu(struct task_struct *tsk) { }
#endif /* CONFIG_PPC_FPU */
#ifdef CONFIG_ALTIVEC
-static void __giveup_altivec(struct task_struct *tsk)
+notrace static void __giveup_altivec(struct task_struct *tsk)
{
unsigned long msr;
@@ -243,7 +243,7 @@ static void __giveup_altivec(struct task_struct *tsk)
regs_set_return_msr(tsk->thread.regs, msr);
}
-void giveup_altivec(struct task_struct *tsk)
+notrace void giveup_altivec(struct task_struct *tsk)
{
check_if_tm_restore_required(tsk);
@@ -253,7 +253,7 @@ void giveup_altivec(struct task_struct *tsk)
}
EXPORT_SYMBOL(giveup_altivec);
-void enable_kernel_altivec(void)
+notrace void enable_kernel_altivec(void)
{
unsigned long cpumsr;
diff --git a/arch/powerpc/lib/vmx-helper.c b/arch/powerpc/lib/vmx-helper.c
index 54340912398f..a0c041c148e4 100644
--- a/arch/powerpc/lib/vmx-helper.c
+++ b/arch/powerpc/lib/vmx-helper.c
@@ -10,7 +10,7 @@
#include <linux/hardirq.h>
#include <asm/switch_to.h>
-int enter_vmx_usercopy(void)
+notrace int enter_vmx_usercopy(void)
{
if (in_interrupt())
return 0;
@@ -32,7 +32,7 @@ int enter_vmx_usercopy(void)
* This function must return 0 because we tail call optimise when calling
* from __copy_tofrom_user_power7 which returns 0 on success.
*/
-int exit_vmx_usercopy(void)
+notrace int exit_vmx_usercopy(void)
{
disable_kernel_altivec();
pagefault_enable();
--
2.47.3
More information about the Linuxppc-dev
mailing list