[PATCH 3/8] powerpc: Mark functions called inside uaccess blocks w/ 'notrace'

Christophe Leroy christophe.leroy at csgroup.eu
Fri Oct 16 18:02:20 AEDT 2020



Le 15/10/2020 à 17:01, Christopher M. Riedl a écrit :
> Functions called between user_*_access_begin() and user_*_access_end()
> should be either inlined or marked 'notrace' to prevent leaving
> userspace access exposed. Mark any such functions relevant to signal
> handling so that subsequent patches can call them inside uaccess blocks.

Is it enough to mark it "notrace" ? I see that when I activate KASAN, there are still KASAN calls in 
those functions.

In my series for 32 bits, I re-ordered stuff in order to do all those calls before doing the 
_access_begin(), can't you do the same on PPC64 ? (See 
https://patchwork.ozlabs.org/project/linuxppc-dev/patch/f6eac65781b4a57220477c8864bca2b57f29a5d5.1597770847.git.christophe.leroy@csgroup.eu/)

Christophe

> 
> Signed-off-by: Christopher M. Riedl <cmr at codefail.de>
> ---
>   arch/powerpc/kernel/process.c | 20 ++++++++++----------
>   arch/powerpc/mm/mem.c         |  4 ++--
>   2 files changed, 12 insertions(+), 12 deletions(-)
> 
> diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
> index ba2c987b8403..bf5d9654bd2c 100644
> --- a/arch/powerpc/kernel/process.c
> +++ b/arch/powerpc/kernel/process.c
> @@ -84,7 +84,7 @@ extern unsigned long _get_SP(void);
>    */
>   bool tm_suspend_disabled __ro_after_init = false;
>   
> -static void check_if_tm_restore_required(struct task_struct *tsk)
> +static void notrace check_if_tm_restore_required(struct task_struct *tsk)
>   {
>   	/*
>   	 * If we are saving the current thread's registers, and the
> @@ -151,7 +151,7 @@ void notrace __msr_check_and_clear(unsigned long bits)
>   EXPORT_SYMBOL(__msr_check_and_clear);
>   
>   #ifdef CONFIG_PPC_FPU
> -static void __giveup_fpu(struct task_struct *tsk)
> +static void notrace __giveup_fpu(struct task_struct *tsk)
>   {
>   	unsigned long msr;
>   
> @@ -163,7 +163,7 @@ static void __giveup_fpu(struct task_struct *tsk)
>   	tsk->thread.regs->msr = msr;
>   }
>   
> -void giveup_fpu(struct task_struct *tsk)
> +void notrace giveup_fpu(struct task_struct *tsk)
>   {
>   	check_if_tm_restore_required(tsk);
>   
> @@ -177,7 +177,7 @@ EXPORT_SYMBOL(giveup_fpu);
>    * Make sure the floating-point register state in the
>    * the thread_struct is up to date for task tsk.
>    */
> -void flush_fp_to_thread(struct task_struct *tsk)
> +void notrace flush_fp_to_thread(struct task_struct *tsk)
>   {
>   	if (tsk->thread.regs) {
>   		/*
> @@ -234,7 +234,7 @@ static inline void __giveup_fpu(struct task_struct *tsk) { }
>   #endif /* CONFIG_PPC_FPU */
>   
>   #ifdef CONFIG_ALTIVEC
> -static void __giveup_altivec(struct task_struct *tsk)
> +static void notrace __giveup_altivec(struct task_struct *tsk)
>   {
>   	unsigned long msr;
>   
> @@ -246,7 +246,7 @@ static void __giveup_altivec(struct task_struct *tsk)
>   	tsk->thread.regs->msr = msr;
>   }
>   
> -void giveup_altivec(struct task_struct *tsk)
> +void notrace giveup_altivec(struct task_struct *tsk)
>   {
>   	check_if_tm_restore_required(tsk);
>   
> @@ -285,7 +285,7 @@ EXPORT_SYMBOL(enable_kernel_altivec);
>    * Make sure the VMX/Altivec register state in the
>    * the thread_struct is up to date for task tsk.
>    */
> -void flush_altivec_to_thread(struct task_struct *tsk)
> +void notrace flush_altivec_to_thread(struct task_struct *tsk)
>   {
>   	if (tsk->thread.regs) {
>   		preempt_disable();
> @@ -300,7 +300,7 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
>   #endif /* CONFIG_ALTIVEC */
>   
>   #ifdef CONFIG_VSX
> -static void __giveup_vsx(struct task_struct *tsk)
> +static void notrace __giveup_vsx(struct task_struct *tsk)
>   {
>   	unsigned long msr = tsk->thread.regs->msr;
>   
> @@ -317,7 +317,7 @@ static void __giveup_vsx(struct task_struct *tsk)
>   		__giveup_altivec(tsk);
>   }
>   
> -static void giveup_vsx(struct task_struct *tsk)
> +static void notrace giveup_vsx(struct task_struct *tsk)
>   {
>   	check_if_tm_restore_required(tsk);
>   
> @@ -352,7 +352,7 @@ void enable_kernel_vsx(void)
>   }
>   EXPORT_SYMBOL(enable_kernel_vsx);
>   
> -void flush_vsx_to_thread(struct task_struct *tsk)
> +void notrace flush_vsx_to_thread(struct task_struct *tsk)
>   {
>   	if (tsk->thread.regs) {
>   		preempt_disable();
> diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
> index ddc32cc1b6cf..da2345a2abc6 100644
> --- a/arch/powerpc/mm/mem.c
> +++ b/arch/powerpc/mm/mem.c
> @@ -378,7 +378,7 @@ static inline bool flush_coherent_icache(unsigned long addr)
>    * @start: the start address
>    * @stop: the stop address (exclusive)
>    */
> -static void invalidate_icache_range(unsigned long start, unsigned long stop)
> +static void notrace invalidate_icache_range(unsigned long start, unsigned long stop)
>   {
>   	unsigned long shift = l1_icache_shift();
>   	unsigned long bytes = l1_icache_bytes();
> @@ -402,7 +402,7 @@ static void invalidate_icache_range(unsigned long start, unsigned long stop)
>    * @start: the start address
>    * @stop: the stop address (exclusive)
>    */
> -void flush_icache_range(unsigned long start, unsigned long stop)
> +void notrace flush_icache_range(unsigned long start, unsigned long stop)
>   {
>   	if (flush_coherent_icache(start))
>   		return;
> 


More information about the Linuxppc-dev mailing list