[RFC PATCH 3/4] powerpc/qspinlock: Remove !maybe_waiters special case queue head locking
Nicholas Piggin
npiggin at gmail.com
Tue Nov 15 03:11:18 AEDT 2022
With the update primitive that clears the tail if it matches, and is
tolerant of other queueing activity on the lock, there is no longer a
significant reason to keep the large !maybe_stealers special case, so
remove it.
Signed-off-by: Nicholas Piggin <npiggin at gmail.com>
---
arch/powerpc/lib/qspinlock.c | 124 +++++++++++++++--------------------
1 file changed, 53 insertions(+), 71 deletions(-)
diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
index 79793b3209ea..457e748b0078 100644
--- a/arch/powerpc/lib/qspinlock.c
+++ b/arch/powerpc/lib/qspinlock.c
@@ -523,7 +523,11 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
struct qnode *next, *node;
u32 val, old, tail;
bool seen_preempted = false;
+ bool sleepy = false;
+ bool mustq = false;
int idx;
+ int set_yield_cpu = -1;
+ int iters = 0;
BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
@@ -577,90 +581,68 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
smp_rmb(); /* acquire barrier for the mcs lock */
}
- if (!maybe_stealers) {
- int set_yield_cpu = -1;
-
- /* We're at the head of the waitqueue, wait for the lock. */
- spin_begin();
- for (;;) {
- val = READ_ONCE(lock->val);
- if (!(val & _Q_LOCKED_VAL))
- break;
-
- propagate_yield_cpu(node, val, &set_yield_cpu, paravirt);
- if (yield_head_to_locked_owner(lock, val, paravirt))
- seen_preempted = true;
- }
- spin_end();
-
- /* If we're the last queued, must clean up the tail. */
- old = trylock_clear_my_tail(lock, tail);
- BUG_ON(old & _Q_LOCKED_VAL);
- if ((old & _Q_TAIL_CPU_MASK) == tail)
- goto release;
-
- } else {
- int set_yield_cpu = -1;
- int iters = 0;
- bool sleepy = false;
- bool mustq = false;
+ /* We're at the head of the waitqueue, wait for the lock. */
+again:
+ spin_begin();
+ for (;;) {
bool preempted;
-again:
- /* We're at the head of the waitqueue, wait for the lock. */
- spin_begin();
- for (;;) {
- val = READ_ONCE(lock->val);
- if (!(val & _Q_LOCKED_VAL))
- break;
-
- if (paravirt && pv_sleepy_lock) {
- if (!sleepy) {
- if (val & _Q_SLEEPY_VAL) {
- seen_sleepy_lock();
- sleepy = true;
- } else if (recently_sleepy()) {
- sleepy = true;
- }
- }
- if (pv_sleepy_lock_sticky && seen_preempted &&
- !(val & _Q_SLEEPY_VAL)) {
- if (try_set_sleepy(lock, val))
- val |= _Q_SLEEPY_VAL;
+ val = READ_ONCE(lock->val);
+ if (!(val & _Q_LOCKED_VAL))
+ break;
+
+ if (paravirt && pv_sleepy_lock && maybe_stealers) {
+ if (!sleepy) {
+ if (val & _Q_SLEEPY_VAL) {
+ seen_sleepy_lock();
+ sleepy = true;
+ } else if (recently_sleepy()) {
+ sleepy = true;
}
}
+ if (pv_sleepy_lock_sticky && seen_preempted &&
+ !(val & _Q_SLEEPY_VAL)) {
+ if (try_set_sleepy(lock, val))
+ val |= _Q_SLEEPY_VAL;
+ }
+ }
- propagate_yield_cpu(node, val, &set_yield_cpu, paravirt);
- preempted = yield_head_to_locked_owner(lock, val, paravirt);
- if (preempted)
- seen_preempted = true;
+ propagate_yield_cpu(node, val, &set_yield_cpu, paravirt);
+ preempted = yield_head_to_locked_owner(lock, val, paravirt);
+ if (!maybe_stealers)
+ continue;
+
+ if (preempted)
+ seen_preempted = true;
- if (paravirt && preempted) {
- sleepy = true;
+ if (paravirt && preempted) {
+ sleepy = true;
- if (!pv_spin_on_preempted_owner)
- iters++;
- } else {
+ if (!pv_spin_on_preempted_owner)
iters++;
- }
+ } else {
+ iters++;
+ }
- if (!mustq && iters >= get_head_spins(paravirt, sleepy)) {
- mustq = true;
- set_mustq(lock);
- val |= _Q_MUST_Q_VAL;
- }
+ if (!mustq && iters >= get_head_spins(paravirt, sleepy)) {
+ mustq = true;
+ set_mustq(lock);
+ val |= _Q_MUST_Q_VAL;
}
- spin_end();
+ }
+ spin_end();
- /* If we're the last queued, must clean up the tail. */
- old = trylock_clear_my_tail(lock, tail);
- if (unlikely(old & _Q_LOCKED_VAL))
- goto again;
- if ((old & _Q_TAIL_CPU_MASK) == tail)
- goto release;
+ /* If we're the last queued, must clean up the tail. */
+ old = trylock_clear_my_tail(lock, tail);
+ if (unlikely(old & _Q_LOCKED_VAL)) {
+ BUG_ON(!maybe_stealers);
+ goto again; /* Can only be true if maybe_stealers. */
}
- /* contended path; must wait for next != NULL (MCS protocol) */
+ if ((old & _Q_TAIL_CPU_MASK) == tail)
+ goto release; /* We were the tail, no next. */
+
+ /* There is a next, must wait for node->next != NULL (MCS protocol) */
next = READ_ONCE(node->next);
if (!next) {
spin_begin();
--
2.37.2
More information about the Linuxppc-dev
mailing list