[PATCH 1/3] powerpc/spinlocks: Refactor SHARED_PROCESSOR

Andrew Donnellan ajd at linux.ibm.com
Thu Aug 1 13:20:41 AEST 2019


On 28/7/19 10:54 pm, Christopher M. Riedl wrote:
> Determining if a processor is in shared processor mode is not a constant
> so don't hide it behind a #define.
> 
> Signed-off-by: Christopher M. Riedl <cmr at informatik.wtf>

This seems aesthetically more right.

Reviewed-by: Andrew Donnellan <ajd at linux.ibm.com>

> ---
>   arch/powerpc/include/asm/spinlock.h | 21 +++++++++++++++------
>   1 file changed, 15 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
> index a47f827bc5f1..8631b0b4e109 100644
> --- a/arch/powerpc/include/asm/spinlock.h
> +++ b/arch/powerpc/include/asm/spinlock.h
> @@ -101,15 +101,24 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
>   
>   #if defined(CONFIG_PPC_SPLPAR)
>   /* We only yield to the hypervisor if we are in shared processor mode */
> -#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
>   extern void __spin_yield(arch_spinlock_t *lock);
>   extern void __rw_yield(arch_rwlock_t *lock);
>   #else /* SPLPAR */
>   #define __spin_yield(x)	barrier()
>   #define __rw_yield(x)	barrier()
> -#define SHARED_PROCESSOR	0
>   #endif
>   
> +static inline bool is_shared_processor(void)
> +{
> +/* Only server processors have an lppaca struct */
> +#ifdef CONFIG_PPC_BOOK3S
> +	return (IS_ENABLED(CONFIG_PPC_SPLPAR) &&
> +		lppaca_shared_proc(local_paca->lppaca_ptr));
> +#else
> +	return false;
> +#endif
> +}
> +
>   static inline void arch_spin_lock(arch_spinlock_t *lock)
>   {
>   	while (1) {
> @@ -117,7 +126,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
>   			break;
>   		do {
>   			HMT_low();
> -			if (SHARED_PROCESSOR)
> +			if (is_shared_processor())
>   				__spin_yield(lock);
>   		} while (unlikely(lock->slock != 0));
>   		HMT_medium();
> @@ -136,7 +145,7 @@ void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
>   		local_irq_restore(flags);
>   		do {
>   			HMT_low();
> -			if (SHARED_PROCESSOR)
> +			if (is_shared_processor())
>   				__spin_yield(lock);
>   		} while (unlikely(lock->slock != 0));
>   		HMT_medium();
> @@ -226,7 +235,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
>   			break;
>   		do {
>   			HMT_low();
> -			if (SHARED_PROCESSOR)
> +			if (is_shared_processor())
>   				__rw_yield(rw);
>   		} while (unlikely(rw->lock < 0));
>   		HMT_medium();
> @@ -240,7 +249,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
>   			break;
>   		do {
>   			HMT_low();
> -			if (SHARED_PROCESSOR)
> +			if (is_shared_processor())
>   				__rw_yield(rw);
>   		} while (unlikely(rw->lock != 0));
>   		HMT_medium();
> 

-- 
Andrew Donnellan              OzLabs, ADL Canberra
ajd at linux.ibm.com             IBM Australia Limited



More information about the Linuxppc-dev mailing list