[PATCH 08/17] powerpc/qspinlock: paravirt yield to lock owner

Nicholas Piggin npiggin at gmail.com
Thu Nov 10 22:13:29 AEDT 2022


On Thu Nov 10, 2022 at 10:41 AM AEST, Jordan Niethe wrote:
>  On Thu, 2022-07-28 at 16:31 +1000, Nicholas Piggin wrote:
>  [resend as utf-8, not utf-7]
> > Waiters spinning on the lock word should yield to the lock owner if the
> > vCPU is preempted. This improves performance when the hypervisor has
> > oversubscribed physical CPUs.
> > ---
> >  arch/powerpc/lib/qspinlock.c | 97 ++++++++++++++++++++++++++++++------
> >  1 file changed, 83 insertions(+), 14 deletions(-)
> > 
> > diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
> > index aa26cfe21f18..55286ac91da5 100644
> > --- a/arch/powerpc/lib/qspinlock.c
> > +++ b/arch/powerpc/lib/qspinlock.c
> > @@ -5,6 +5,7 @@
> >  #include <linux/percpu.h>
> >  #include <linux/smp.h>
> >  #include <asm/qspinlock.h>
> > +#include <asm/paravirt.h>
> >  
> >  #define MAX_NODES	4
> >  
> > @@ -24,14 +25,16 @@ static int STEAL_SPINS __read_mostly = (1<<5);
> >  static bool MAYBE_STEALERS __read_mostly = true;
> >  static int HEAD_SPINS __read_mostly = (1<<8);
> >  
> > +static bool pv_yield_owner __read_mostly = true;
>
> Not macro case for these globals? To me name does not make it super clear this
> is a boolean. What about pv_yield_owner_enabled?

Hmm. Might think about doing a better prefix namespace for these
tunables, which might help.

> > +
> >  static DEFINE_PER_CPU_ALIGNED(struct qnodes, qnodes);
> >  
> > -static __always_inline int get_steal_spins(void)
> > +static __always_inline int get_steal_spins(bool paravirt)
> >  {
> >  	return STEAL_SPINS;
> >  }
> >  
> > -static __always_inline int get_head_spins(void)
> > +static __always_inline int get_head_spins(bool paravirt)
> >  {
> >  	return HEAD_SPINS;
> >  }
> > @@ -46,7 +49,11 @@ static inline int get_tail_cpu(u32 val)
> >  	return (val >> _Q_TAIL_CPU_OFFSET) - 1;
> >  }
> >  
> > -/* Take the lock by setting the bit, no other CPUs may concurrently lock it. */
> > +static inline int get_owner_cpu(u32 val)
> > +{
> > +	return (val & _Q_OWNER_CPU_MASK) >> _Q_OWNER_CPU_OFFSET;
> > +}
> > +
> >  /* Take the lock by setting the lock bit, no other CPUs will touch it. */
> >  static __always_inline void lock_set_locked(struct qspinlock *lock)
> >  {
> > @@ -180,7 +187,45 @@ static struct qnode *get_tail_qnode(struct qspinlock *lock, u32 val)
> >  	BUG();
> >  }
> >  
> > -static inline bool try_to_steal_lock(struct qspinlock *lock)
> > +static __always_inline void yield_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt)
>
> This name doesn't seem correct for the non paravirt case.

Well... a yield to a running CPU is just a relax in any case. I think
it's okay.

> > +{
> > +	int owner;
> > +	u32 yield_count;
> > +
> > +	BUG_ON(!(val & _Q_LOCKED_VAL));
> > +
> > +	if (!paravirt)
> > +		goto relax;
> > +
> > +	if (!pv_yield_owner)
> > +		goto relax;
> > +
> > +	owner = get_owner_cpu(val);
> > +	yield_count = yield_count_of(owner);
> > +
> > +	if ((yield_count & 1) == 0)
> > +		goto relax; /* owner vcpu is running */
>
> I wonder why not use vcpu_is_preempted()?

Because we use a particular yield_count for the yield hcall (it
tries to avoid the situation where the owner wakes up and may release
the lock and then we yield to it).

>
> > +
> > +	/*
> > +	 * Read the lock word after sampling the yield count. On the other side
> > +	 * there may a wmb because the yield count update is done by the
> > +	 * hypervisor preemption and the value update by the OS, however this
> > +	 * ordering might reduce the chance of out of order accesses and
> > +	 * improve the heuristic.
> > +	 */
> > +	smp_rmb();
> > +
> > +	if (READ_ONCE(lock->val) == val) {
> > +		yield_to_preempted(owner, yield_count);
> > +		/* Don't relax if we yielded. Maybe we should? */
> > +		return;
> > +	}
> > +relax:
> > +	cpu_relax();
> > +}
> > +
> > +
> > +static __always_inline bool try_to_steal_lock(struct qspinlock *lock, bool paravirt)
> >  {
> >  	int iters;
> >  
> > @@ -197,18 +242,18 @@ static inline bool try_to_steal_lock(struct qspinlock *lock)
> >  			continue;
> >  		}
> >  
> > -		cpu_relax();
> > +		yield_to_locked_owner(lock, val, paravirt);
> >  
> >  		iters++;
> >  
> > -		if (iters >= get_steal_spins())
> > +		if (iters >= get_steal_spins(paravirt))
> >  			break;
> >  	}
> >  
> >  	return false;
> >  }
> >  
> > -static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
> > +static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, bool paravirt)
> >  {
> >  	struct qnodes *qnodesp;
> >  	struct qnode *next, *node;
> > @@ -260,7 +305,7 @@ static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
> >  	if (!MAYBE_STEALERS) {
> >  		/* We're at the head of the waitqueue, wait for the lock. */
> >  		while ((val = READ_ONCE(lock->val)) & _Q_LOCKED_VAL)
> > -			cpu_relax();
> > +			yield_to_locked_owner(lock, val, paravirt);
> >  
> >  		/* If we're the last queued, must clean up the tail. */
> >  		if ((val & _Q_TAIL_CPU_MASK) == tail) {
> > @@ -278,10 +323,10 @@ static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
> >  again:
> >  		/* We're at the head of the waitqueue, wait for the lock. */
> >  		while ((val = READ_ONCE(lock->val)) & _Q_LOCKED_VAL) {
> > -			cpu_relax();
> > +			yield_to_locked_owner(lock, val, paravirt);
> >  
> >  			iters++;
> > -			if (!set_mustq && iters >= get_head_spins()) {
> > +			if (!set_mustq && iters >= get_head_spins(paravirt)) {
> >  				set_mustq = true;
> >  				lock_set_mustq(lock);
> >  				val |= _Q_MUST_Q_VAL;
> > @@ -320,10 +365,15 @@ static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
> >  
> >  void queued_spin_lock_slowpath(struct qspinlock *lock)
> >  {
> > -	if (try_to_steal_lock(lock))
> > -		return;
> > -
> > -	queued_spin_lock_mcs_queue(lock);
> > +	if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) && is_shared_processor()) {
> > +		if (try_to_steal_lock(lock, true))
> > +			return;
> > +		queued_spin_lock_mcs_queue(lock, true);
> > +	} else {
> > +		if (try_to_steal_lock(lock, false))
> > +			return;
> > +		queued_spin_lock_mcs_queue(lock, false);
> > +	}
> >  }
>
> There is not really a need for a conditional: 
>
> bool paravirt = IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) &&
> is_shared_processor();
>
> if (try_to_steal_lock(lock, paravirt))
> 	return;
>
> queued_spin_lock_mcs_queue(lock, paravirt);
>
>
> The paravirt parameter used by the various functions seems always to be
> equivalent to (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) && is_shared_processor()).
> I wonder if it would be simpler testing (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) && is_shared_processor())
> (using a helper function) in those functions instead passing it as a parameter?

You'd think so and yes semantically that's identical, but with my
version gcc-12 seems to inline each side and with yours they are
more shared. We actually want the separate versions because
is_shared_processor() is set at boot so we always only run one side
or the other so we want best efficiency possible and don't have the
icache pollution concern because the other side never runs.

At least that's the idea, that's what generic qspinlocks do too.

Thanks,
Nick


More information about the Linuxppc-dev mailing list