[PATCH 1/2] powerpc/64s/hash: add torture_slb kernel boot option to increase SLB faults

Aneesh Kumar K.V aneesh.kumar at linux.ibm.com
Mon May 4 17:27:06 AEST 2020


Nicholas Piggin <npiggin at gmail.com> writes:

> This option increases the number of SLB misses by limiting the number of
> kernel SLB entries, and increased flushing of cached lookaside information.
> This helps stress test difficult to hit paths in the kernel.
>
> Signed-off-by: Nicholas Piggin <npiggin at gmail.com>

....

> +{
> +	unsigned long slbie_data = get_paca()->slb_cache[index];
> +	unsigned long ksp = get_paca()->kstack;
> +
> +	slbie_data <<= SID_SHIFT;
> +	slbie_data |= 0xc000000000000000ULL;
> +	if ((ksp & slb_esid_mask(mmu_kernel_ssize)) == slbie_data)
> +		return;
> +	slbie_data |= mmu_kernel_ssize << SLBIE_SSIZE_SHIFT;
> +
> +	asm volatile("slbie %0" : : "r" (slbie_data));
> +}
> +
> +static void slb_cache_slbie(unsigned int index)

May be slb_cache_slbie_user()? Similar to _kernel above?

> +{
> +	unsigned long slbie_data = get_paca()->slb_cache[index];
> +
> +	slbie_data <<= SID_SHIFT;
> +	slbie_data |= user_segment_size(slbie_data) << SLBIE_SSIZE_SHIFT;
> +	slbie_data |= SLBIE_C; /* user slbs have C=1 */
> +
> +	asm volatile("slbie %0" : : "r" (slbie_data));
> +}
>  
>  /* Flush all user entries from the segment table of the current processor. */
>  void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
> @@ -414,8 +449,14 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
>  	 * which would update the slb_cache/slb_cache_ptr fields in the PACA.
>  	 */
>  	hard_irq_disable();
> -	asm volatile("isync" : : : "memory");
> -	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
> +	isync();
> +	if (torture_slb()) {
> +		__slb_flush_and_restore_bolted(0);

s/0/SLIBA_IH_ALL or something like that? 


> +		isync();
> +		get_paca()->slb_cache_ptr = 0;
> +		get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1;
> +
> +	} else if (cpu_has_feature(CPU_FTR_ARCH_300)) {
>  		/*
>  		 * SLBIA IH=3 invalidates all Class=1 SLBEs and their
>  		 * associated lookaside structures, which matches what
> @@ -423,47 +464,36 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
>  		 * cache.
>  		 */
>  		asm volatile(PPC_SLBIA(3));


-aneesh


More information about the Linuxppc-dev mailing list