[PATCH 12/17] powerpc/qspinlock: add ability to prod new queue head CPU
Jordan Niethe
jniethe5 at gmail.com
Fri Aug 12 14:22:28 AEST 2022
On Thu, 2022-07-28 at 16:31 +1000, Nicholas Piggin wrote:
> After the head of the queue acquires the lock, it releases the
> next waiter in the queue to become the new head. Add an option
> to prod the new head if its vCPU was preempted. This may only
> have an effect if queue waiters are yielding.
>
> Disable this option by default for now, i.e., no logical change.
> ---
> arch/powerpc/lib/qspinlock.c | 29 ++++++++++++++++++++++++++++-
> 1 file changed, 28 insertions(+), 1 deletion(-)
>
> diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
> index 28c85a2d5635..3b10e31bcf0a 100644
> --- a/arch/powerpc/lib/qspinlock.c
> +++ b/arch/powerpc/lib/qspinlock.c
> @@ -12,6 +12,7 @@
> struct qnode {
> struct qnode *next;
> struct qspinlock *lock;
> + int cpu;
> int yield_cpu;
> u8 locked; /* 1 if lock acquired */
> };
> @@ -30,6 +31,7 @@ static bool pv_yield_owner __read_mostly = true;
> static bool pv_yield_allow_steal __read_mostly = false;
> static bool pv_yield_prev __read_mostly = true;
> static bool pv_yield_propagate_owner __read_mostly = true;
> +static bool pv_prod_head __read_mostly = false;
>
> static DEFINE_PER_CPU_ALIGNED(struct qnodes, qnodes);
>
> @@ -392,6 +394,7 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
> node = &qnodesp->nodes[idx];
> node->next = NULL;
> node->lock = lock;
> + node->cpu = smp_processor_id();
I suppose this could be used in some other places too.
For example change:
yield_to_prev(lock, node, prev, paravirt);
In yield_to_prev() it could then access the prev->cpu.
> node->yield_cpu = -1;
> node->locked = 0;
>
> @@ -483,7 +486,14 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
> * this store to locked. The corresponding barrier is the smp_rmb()
> * acquire barrier for mcs lock, above.
> */
> - WRITE_ONCE(next->locked, 1);
> + if (paravirt && pv_prod_head) {
> + int next_cpu = next->cpu;
> + WRITE_ONCE(next->locked, 1);
> + if (vcpu_is_preempted(next_cpu))
> + prod_cpu(next_cpu);
> + } else {
> + WRITE_ONCE(next->locked, 1);
> + }
>
> release:
> qnodesp->count--; /* release the node */
> @@ -622,6 +632,22 @@ static int pv_yield_propagate_owner_get(void *data, u64 *val)
>
> DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_propagate_owner, pv_yield_propagate_owner_get, pv_yield_propagate_owner_set, "%llu\n");
>
> +static int pv_prod_head_set(void *data, u64 val)
> +{
> + pv_prod_head = !!val;
> +
> + return 0;
> +}
> +
> +static int pv_prod_head_get(void *data, u64 *val)
> +{
> + *val = pv_prod_head;
> +
> + return 0;
> +}
> +
> +DEFINE_SIMPLE_ATTRIBUTE(fops_pv_prod_head, pv_prod_head_get, pv_prod_head_set, "%llu\n");
> +
> static __init int spinlock_debugfs_init(void)
> {
> debugfs_create_file("qspl_steal_spins", 0600, arch_debugfs_dir, NULL, &fops_steal_spins);
> @@ -631,6 +657,7 @@ static __init int spinlock_debugfs_init(void)
> debugfs_create_file("qspl_pv_yield_allow_steal", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_allow_steal);
> debugfs_create_file("qspl_pv_yield_prev", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_prev);
> debugfs_create_file("qspl_pv_yield_propagate_owner", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_propagate_owner);
> + debugfs_create_file("qspl_pv_prod_head", 0600, arch_debugfs_dir, NULL, &fops_pv_prod_head);
> }
>
> return 0;
More information about the Linuxppc-dev
mailing list