[PATCH v2] powerpc/kprobes: Blacklist functions running with MMU disabled on PPC32

Naveen N. Rao naveen.n.rao at linux.vnet.ibm.com
Fri Mar 27 20:07:37 AEDT 2020


Christophe Leroy wrote:
> kprobe does not handle events happening in real mode, all
> functions running with MMU disabled have to be blacklisted.
> 
> As already done for PPC64, do it for PPC32.
> 
> Signed-off-by: Christophe Leroy <christophe.leroy at c-s.fr>
> ---
> v2:
> - Don't rename nonrecoverable as local, mark it noprobe instead.
> - Add missing linux/kprobes.h include in pq2.c
> ---
>  arch/powerpc/include/asm/ppc_asm.h           | 10 +++
>  arch/powerpc/kernel/cpu_setup_6xx.S          |  4 +-
>  arch/powerpc/kernel/entry_32.S               | 65 ++++++++------------
>  arch/powerpc/kernel/fpu.S                    |  1 +
>  arch/powerpc/kernel/idle_6xx.S               |  2 +-
>  arch/powerpc/kernel/idle_e500.S              |  2 +-
>  arch/powerpc/kernel/l2cr_6xx.S               |  2 +-
>  arch/powerpc/kernel/misc.S                   |  2 +
>  arch/powerpc/kernel/misc_32.S                |  4 +-
>  arch/powerpc/kernel/swsusp_32.S              |  6 +-
>  arch/powerpc/kernel/vector.S                 |  1 +
>  arch/powerpc/mm/book3s32/hash_low.S          | 38 ++++++------
>  arch/powerpc/mm/mem.c                        |  2 +
>  arch/powerpc/platforms/52xx/lite5200_sleep.S |  2 +
>  arch/powerpc/platforms/82xx/pq2.c            |  3 +
>  arch/powerpc/platforms/83xx/suspend-asm.S    |  1 +
>  arch/powerpc/platforms/powermac/cache.S      |  2 +
>  arch/powerpc/platforms/powermac/sleep.S      | 13 ++--
>  18 files changed, 86 insertions(+), 74 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
> index 6b03dff61a05..e8f34ba89497 100644
> --- a/arch/powerpc/include/asm/ppc_asm.h
> +++ b/arch/powerpc/include/asm/ppc_asm.h
> @@ -267,8 +267,18 @@ GLUE(.,name):
>  	.pushsection "_kprobe_blacklist","aw";		\
>  	PPC_LONG (entry) ;				\
>  	.popsection
> +#define _NOKPROBE_ENTRY(entry)				\
> +	_ASM_NOKPROBE_SYMBOL(entry)				\
> +	_ENTRY(entry)
> +#define _NOKPROBE_GLOBAL(entry)				\
> +	_ASM_NOKPROBE_SYMBOL(entry)				\
> +	_GLOBAL(entry)
>  #else
>  #define _ASM_NOKPROBE_SYMBOL(entry)
> +#define _NOKPROBE_ENTRY(entry)				\
> +	_ENTRY(entry)
> +#define _NOKPROBE_GLOBAL(entry)				\
> +	_GLOBAL(entry)
>  #endif

Michael hasn't preferred including NOKPROBE variants of those macros 
previously, since he would like to see some cleanups there:
https://patchwork.ozlabs.org/patch/696138/

> 
>  #define FUNC_START(name)	_GLOBAL(name)
> diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S
> index f6517f67265a..1cb947268546 100644
> --- a/arch/powerpc/kernel/cpu_setup_6xx.S
> +++ b/arch/powerpc/kernel/cpu_setup_6xx.S
> @@ -276,7 +276,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM)
>   * in some 750 cpus where using a not yet initialized FPU register after
>   * power on reset may hang the CPU
>   */
> -_GLOBAL(__init_fpu_registers)
> +_NOKPROBE_GLOBAL(__init_fpu_registers)
>  	mfmsr	r10
>  	ori	r11,r10,MSR_FP
>  	mtmsr	r11
> @@ -381,7 +381,7 @@ _GLOBAL(__save_cpu_setup)
>   * restore CPU state as backed up by the previous
>   * function. This does not include cache setting
>   */
> -_GLOBAL(__restore_cpu_setup)
> +_NOKPROBE_GLOBAL(__restore_cpu_setup)
>  	/* Some CR fields are volatile, we back it up all */
>  	mfcr	r7
> 
> diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
> index 16af0d8d90a8..f788d586254d 100644
> --- a/arch/powerpc/kernel/entry_32.S
> +++ b/arch/powerpc/kernel/entry_32.S
> @@ -44,24 +44,21 @@
>  	.align	12
> 
>  #ifdef CONFIG_BOOKE
> -	.globl	mcheck_transfer_to_handler
> -mcheck_transfer_to_handler:
> +_NOKPROBE_ENTRY(mcheck_transfer_to_handler)
>  	mfspr	r0,SPRN_DSRR0
>  	stw	r0,_DSRR0(r11)
>  	mfspr	r0,SPRN_DSRR1
>  	stw	r0,_DSRR1(r11)
>  	/* fall through */
> 
> -	.globl	debug_transfer_to_handler
> -debug_transfer_to_handler:
> +_NOKPROBE_ENTRY(debug_transfer_to_handler)
>  	mfspr	r0,SPRN_CSRR0
>  	stw	r0,_CSRR0(r11)
>  	mfspr	r0,SPRN_CSRR1
>  	stw	r0,_CSRR1(r11)
>  	/* fall through */
> 
> -	.globl	crit_transfer_to_handler
> -crit_transfer_to_handler:
> +_NOKPROBE_ENTRY(crit_transfer_to_handler)
>  #ifdef CONFIG_PPC_BOOK3E_MMU
>  	mfspr	r0,SPRN_MAS0
>  	stw	r0,MAS0(r11)
> @@ -97,8 +94,7 @@ crit_transfer_to_handler:
>  #endif
> 
>  #ifdef CONFIG_40x
> -	.globl	crit_transfer_to_handler
> -crit_transfer_to_handler:
> +_NOKPROBE_ENTRY(crit_transfer_to_handler)
>  	lwz	r0,crit_r10 at l(0)
>  	stw	r0,GPR10(r11)
>  	lwz	r0,crit_r11 at l(0)
> @@ -124,13 +120,11 @@ crit_transfer_to_handler:
>   * Note that we rely on the caller having set cr0.eq iff the exception
>   * occurred in kernel mode (i.e. MSR:PR = 0).
>   */
> -	.globl	transfer_to_handler_full
> -transfer_to_handler_full:
> +_NOKPROBE_ENTRY(transfer_to_handler_full)
>  	SAVE_NVGPRS(r11)
>  	/* fall through */
> 
> -	.globl	transfer_to_handler
> -transfer_to_handler:
> +_NOKPROBE_ENTRY(transfer_to_handler)
>  	stw	r2,GPR2(r11)
>  	stw	r12,_NIP(r11)
>  	stw	r9,_MSR(r11)
> @@ -194,8 +188,7 @@ transfer_to_handler:
>  	bt-	31-TLF_NAPPING,4f
>  	bt-	31-TLF_SLEEPING,7f
>  #endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
> -	.globl transfer_to_handler_cont
> -transfer_to_handler_cont:
> +_NOKPROBE_ENTRY(transfer_to_handler_cont)
>  3:
>  	mflr	r9
>  	tovirt_novmstack r2, r2 	/* set r2 to current */
> @@ -297,6 +290,7 @@ reenable_mmu:
>   * On kernel stack overflow, load up an initial stack pointer
>   * and call StackOverflow(regs), which should not return.
>   */
> +_ASM_NOKPROBE_SYMBOL(stack_ovf)
>  stack_ovf:

The current convention is to add the NOKPROBE annotation at the _end_ of 
the associated function/symbol...

>  	/* sometimes we use a statically-allocated stack, which is OK. */
>  	lis	r12,_end at h
> @@ -460,6 +454,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
>  	lwz	r7,_NIP(r1)
>  	lwz	r2,GPR2(r1)
>  	lwz	r1,GPR1(r1)
> +syscall_exit_finish:
>  #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
>  	mtspr	SPRN_NRI, r0
>  #endif
> @@ -467,6 +462,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
>  	mtspr	SPRN_SRR1,r8
>  	SYNC
>  	RFI
> +_ASM_NOKPROBE_SYMBOL(syscall_exit_finish)

... like so.

>  #ifdef CONFIG_44x
>  2:	li	r7,0
>  	iccci	r0,r0
> @@ -750,8 +746,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPE)
>  	addi	r1,r1,INT_FRAME_SIZE
>  	blr
> 
> -	.globl	fast_exception_return
> -fast_exception_return:
> +_NOKPROBE_ENTRY(fast_exception_return)
>  #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
>  	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
>  	beq	1f			/* if not, we've got problems */
> @@ -780,8 +775,8 @@ fast_exception_return:
> 
>  #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
>  /* check if the exception happened in a restartable section */
> -1:	lis	r3,exc_exit_restart_end at ha
> -	addi	r3,r3,exc_exit_restart_end at l
> +1:	lis	r3,.Lexc_exit_restart_end at ha
> +	addi	r3,r3,.Lexc_exit_restart_end at l
>  	cmplw	r12,r3
>  #ifdef CONFIG_PPC_BOOK3S_601
>  	bge	2b
> @@ -1005,15 +1000,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
>  	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL & ~MSR_RI)
>  	SYNC
>  	mtmsr	r10		/* clear the RI bit */
> -	.globl exc_exit_restart
> -exc_exit_restart:
> +_NOKPROBE_ENTRY(exc_exit_restart)
>  	lwz	r12,_NIP(r1)
>  	mtspr	SPRN_SRR0,r12
>  	mtspr	SPRN_SRR1,r9
>  	REST_4GPRS(9, r1)
>  	lwz	r1,GPR1(r1)
> -	.globl exc_exit_restart_end
> -exc_exit_restart_end:
> +.Lexc_exit_restart_end:
>  	SYNC
>  	RFI
> 
> @@ -1033,17 +1026,15 @@ exc_exit_restart_end:
>  	li	r10, 0
>  	stw	r10, 8(r1)
>  	REST_2GPRS(9, r1)
> -	.globl exc_exit_restart
> +_NOKPROBE_ENTRY(exc_exit_restart)
>  exc_exit_restart:
>  	lwz	r11,_NIP(r1)
>  	lwz	r12,_MSR(r1)
> -exc_exit_start:
>  	mtspr	SPRN_SRR0,r11
>  	mtspr	SPRN_SRR1,r12
>  	REST_2GPRS(11, r1)
>  	lwz	r1,GPR1(r1)
> -	.globl exc_exit_restart_end
> -exc_exit_restart_end:
> +.Lexc_exit_restart_end:

I think it would be good to break this into smaller patches to handle 
specific code paths, if possible. At the very least, it would be good to 
move changes to symbol visibility to a separate patch since this also 
changes the names printed in a backtrace.

- Naveen



More information about the Linuxppc-dev mailing list