[PATCH v2 05/17] powerpc/qspinlock: allow new waiters to steal the lock before queueing
Nicholas Piggin
npiggin at gmail.com
Mon Nov 14 13:31:25 AEDT 2022
Allow new waiters a number of spins on the lock word before queueing,
which particularly helps paravirt performance when physical CPUs are
oversubscribed.
Signed-off-by: Nicholas Piggin <npiggin at gmail.com>
---
arch/powerpc/lib/qspinlock.c | 159 ++++++++++++++++++++++++++++++-----
1 file changed, 140 insertions(+), 19 deletions(-)
diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
index 6c58c24af5a0..872d4628a44d 100644
--- a/arch/powerpc/lib/qspinlock.c
+++ b/arch/powerpc/lib/qspinlock.c
@@ -19,8 +19,17 @@ struct qnodes {
struct qnode nodes[MAX_NODES];
};
+/* Tuning parameters */
+static int steal_spins __read_mostly = (1<<5);
+static bool maybe_stealers __read_mostly = true;
+
static DEFINE_PER_CPU_ALIGNED(struct qnodes, qnodes);
+static __always_inline int get_steal_spins(void)
+{
+ return steal_spins;
+}
+
static inline u32 encode_tail_cpu(int cpu)
{
return (cpu + 1) << _Q_TAIL_CPU_OFFSET;
@@ -50,15 +59,14 @@ static __always_inline void set_locked(struct qspinlock *lock)
BUG_ON(prev & _Q_LOCKED_VAL);
}
-/* Take lock, clearing tail, cmpxchg with old (which must not be locked) */
-static __always_inline int trylock_clear_tail_cpu(struct qspinlock *lock, u32 old)
+static __always_inline u32 __trylock_cmpxchg(struct qspinlock *lock, u32 old, u32 new)
{
u32 prev;
BUG_ON(old & _Q_LOCKED_VAL);
asm volatile(
-"1: lwarx %0,0,%1,%4 # trylock_clear_tail_cpu \n"
+"1: lwarx %0,0,%1,%4 # __trylock_cmpxchg \n"
" cmpw 0,%0,%2 \n"
" bne- 2f \n"
" stwcx. %3,0,%1 \n"
@@ -66,13 +74,27 @@ static __always_inline int trylock_clear_tail_cpu(struct qspinlock *lock, u32 ol
"\t" PPC_ACQUIRE_BARRIER " \n"
"2: \n"
: "=&r" (prev)
- : "r" (&lock->val), "r"(old), "r" (_Q_LOCKED_VAL),
+ : "r" (&lock->val), "r"(old), "r" (new),
"i" (IS_ENABLED(CONFIG_PPC64))
: "cr0", "memory");
return likely(prev == old);
}
+/* Take lock, clearing tail, cmpxchg with old (which must not be locked) */
+static __always_inline int trylock_clear_tail_cpu(struct qspinlock *lock, u32 val)
+{
+ return __trylock_cmpxchg(lock, val, _Q_LOCKED_VAL);
+}
+
+/* Take lock, preserving tail, cmpxchg with val (which must not be locked) */
+static __always_inline int trylock_with_tail_cpu(struct qspinlock *lock, u32 val)
+{
+ u32 newval = _Q_LOCKED_VAL | (val & _Q_TAIL_CPU_MASK);
+
+ return __trylock_cmpxchg(lock, val, newval);
+}
+
/*
* Publish our tail, replacing previous tail. Return previous value.
*
@@ -122,6 +144,30 @@ static struct qnode *get_tail_qnode(struct qspinlock *lock, u32 val)
BUG();
}
+static inline bool try_to_steal_lock(struct qspinlock *lock)
+{
+ int iters = 0;
+
+ if (!maybe_stealers)
+ return false;
+
+ /* Attempt to steal the lock */
+ do {
+ u32 val = READ_ONCE(lock->val);
+
+ if (unlikely(!(val & _Q_LOCKED_VAL))) {
+ if (trylock_with_tail_cpu(lock, val))
+ return true;
+ } else {
+ cpu_relax();
+ }
+
+ iters++;
+ } while (iters < get_steal_spins());
+
+ return false;
+}
+
static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
{
struct qnodes *qnodesp;
@@ -171,25 +217,49 @@ static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
smp_rmb(); /* acquire barrier for the mcs lock */
}
- /* We're at the head of the waitqueue, wait for the lock. */
- for (;;) {
- val = READ_ONCE(lock->val);
- if (!(val & _Q_LOCKED_VAL))
- break;
+ if (!maybe_stealers) {
+ /* We're at the head of the waitqueue, wait for the lock. */
+ for (;;) {
+ val = READ_ONCE(lock->val);
+ if (!(val & _Q_LOCKED_VAL))
+ break;
- cpu_relax();
- }
+ cpu_relax();
+ }
+
+ /* If we're the last queued, must clean up the tail. */
+ if ((val & _Q_TAIL_CPU_MASK) == tail) {
+ if (trylock_clear_tail_cpu(lock, val))
+ goto release;
+ /* Another waiter must have enqueued. */
+ }
+
+ /* We must be the owner, just set the lock bit and acquire */
+ set_locked(lock);
+ } else {
+again:
+ /* We're at the head of the waitqueue, wait for the lock. */
+ for (;;) {
+ val = READ_ONCE(lock->val);
+ if (!(val & _Q_LOCKED_VAL))
+ break;
- /* If we're the last queued, must clean up the tail. */
- if ((val & _Q_TAIL_CPU_MASK) == tail) {
- if (trylock_clear_tail_cpu(lock, val))
- goto release;
- /* Another waiter must have enqueued */
+ cpu_relax();
+ }
+
+ /* If we're the last queued, must clean up the tail. */
+ if ((val & _Q_TAIL_CPU_MASK) == tail) {
+ if (trylock_clear_tail_cpu(lock, val))
+ goto release;
+ /* Another waiter must have enqueued, or lock stolen. */
+ } else {
+ if (trylock_with_tail_cpu(lock, val))
+ goto unlock_next;
+ }
+ goto again;
}
- /* We must be the owner, just set the lock bit and acquire */
- set_locked(lock);
-
+unlock_next:
/* contended path; must wait for next != NULL (MCS protocol) */
while (!(next = READ_ONCE(node->next)))
cpu_relax();
@@ -209,6 +279,9 @@ static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
void queued_spin_lock_slowpath(struct qspinlock *lock)
{
+ if (try_to_steal_lock(lock))
+ return;
+
queued_spin_lock_mcs_queue(lock);
}
EXPORT_SYMBOL(queued_spin_lock_slowpath);
@@ -218,3 +291,51 @@ void pv_spinlocks_init(void)
{
}
#endif
+
+#include <linux/debugfs.h>
+static int steal_spins_set(void *data, u64 val)
+{
+ static DEFINE_MUTEX(lock);
+
+ /*
+ * The lock slow path has a !maybe_stealers case that can assume
+ * the head of queue will not see concurrent waiters. That waiter
+ * is unsafe in the presence of stealers, so must keep them away
+ * from one another.
+ */
+
+ mutex_lock(&lock);
+ if (val && !steal_spins) {
+ maybe_stealers = true;
+ /* wait for queue head waiter to go away */
+ synchronize_rcu();
+ steal_spins = val;
+ } else if (!val && steal_spins) {
+ steal_spins = val;
+ /* wait for all possible stealers to go away */
+ synchronize_rcu();
+ maybe_stealers = false;
+ } else {
+ steal_spins = val;
+ }
+ mutex_unlock(&lock);
+
+ return 0;
+}
+
+static int steal_spins_get(void *data, u64 *val)
+{
+ *val = steal_spins;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_steal_spins, steal_spins_get, steal_spins_set, "%llu\n");
+
+static __init int spinlock_debugfs_init(void)
+{
+ debugfs_create_file("qspl_steal_spins", 0600, arch_debugfs_dir, NULL, &fops_steal_spins);
+
+ return 0;
+}
+device_initcall(spinlock_debugfs_init);
--
2.37.2
More information about the Linuxppc-dev
mailing list