[PATCH v4 4/6] powerpc/pseries: implement paravirt qspinlocks for SPLPAR
Nicholas Piggin
npiggin at gmail.com
Fri Jul 24 23:14:21 AEST 2020
This implements the generic paravirt qspinlocks using H_PROD and H_CONFER to
kick and wait.
This uses an un-directed yield to any CPU rather than the directed yield to
a pre-empted lock holder that paravirtualised simple spinlocks use, that
requires no kick hcall. This is something that could be investigated and
improved in future.
Performance results can be found in the commit which added queued spinlocks.
Acked-by: Peter Zijlstra (Intel) <peterz at infradead.org>
Acked-by: Waiman Long <longman at redhat.com>
Signed-off-by: Nicholas Piggin <npiggin at gmail.com>
---
arch/powerpc/include/asm/paravirt.h | 28 ++++++++
arch/powerpc/include/asm/qspinlock.h | 66 +++++++++++++++++++
arch/powerpc/include/asm/qspinlock_paravirt.h | 7 ++
arch/powerpc/include/asm/spinlock.h | 4 ++
arch/powerpc/platforms/pseries/Kconfig | 9 ++-
arch/powerpc/platforms/pseries/setup.c | 4 +-
include/asm-generic/qspinlock.h | 2 +
7 files changed, 118 insertions(+), 2 deletions(-)
create mode 100644 arch/powerpc/include/asm/qspinlock_paravirt.h
diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h
index 339e8533464b..21e5f29ca251 100644
--- a/arch/powerpc/include/asm/paravirt.h
+++ b/arch/powerpc/include/asm/paravirt.h
@@ -28,6 +28,16 @@ static inline void yield_to_preempted(int cpu, u32 yield_count)
{
plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
}
+
+static inline void prod_cpu(int cpu)
+{
+ plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
+}
+
+static inline void yield_to_any(void)
+{
+ plpar_hcall_norets(H_CONFER, -1, 0);
+}
#else
static inline bool is_shared_processor(void)
{
@@ -44,6 +54,19 @@ static inline void yield_to_preempted(int cpu, u32 yield_count)
{
___bad_yield_to_preempted(); /* This would be a bug */
}
+
+extern void ___bad_yield_to_any(void);
+static inline void yield_to_any(void)
+{
+ ___bad_yield_to_any(); /* This would be a bug */
+}
+
+extern void ___bad_prod_cpu(void);
+static inline void prod_cpu(int cpu)
+{
+ ___bad_prod_cpu(); /* This would be a bug */
+}
+
#endif
#define vcpu_is_preempted vcpu_is_preempted
@@ -56,4 +79,9 @@ static inline bool vcpu_is_preempted(int cpu)
return false;
}
+static inline bool pv_is_native_spin_unlock(void)
+{
+ return !is_shared_processor();
+}
+
#endif /* _ASM_POWERPC_PARAVIRT_H */
diff --git a/arch/powerpc/include/asm/qspinlock.h b/arch/powerpc/include/asm/qspinlock.h
index c49e33e24edd..f5066f00a08c 100644
--- a/arch/powerpc/include/asm/qspinlock.h
+++ b/arch/powerpc/include/asm/qspinlock.h
@@ -3,9 +3,47 @@
#define _ASM_POWERPC_QSPINLOCK_H
#include <asm-generic/qspinlock_types.h>
+#include <asm/paravirt.h>
#define _Q_PENDING_LOOPS (1 << 9) /* not tuned */
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+extern void __pv_queued_spin_unlock(struct qspinlock *lock);
+
+static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
+{
+ if (!is_shared_processor())
+ native_queued_spin_lock_slowpath(lock, val);
+ else
+ __pv_queued_spin_lock_slowpath(lock, val);
+}
+
+#define queued_spin_unlock queued_spin_unlock
+static inline void queued_spin_unlock(struct qspinlock *lock)
+{
+ if (!is_shared_processor())
+ smp_store_release(&lock->locked, 0);
+ else
+ __pv_queued_spin_unlock(lock);
+}
+
+#else
+extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+#endif
+
+static __always_inline void queued_spin_lock(struct qspinlock *lock)
+{
+ u32 val = 0;
+
+ if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
+ return;
+
+ queued_spin_lock_slowpath(lock, val);
+}
+#define queued_spin_lock queued_spin_lock
+
#define smp_mb__after_spinlock() smp_mb()
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
@@ -20,6 +58,34 @@ static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
}
#define queued_spin_is_locked queued_spin_is_locked
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define SPIN_THRESHOLD (1<<15) /* not tuned */
+
+static __always_inline void pv_wait(u8 *ptr, u8 val)
+{
+ if (*ptr != val)
+ return;
+ yield_to_any();
+ /*
+ * We could pass in a CPU here if waiting in the queue and yield to
+ * the previous CPU in the queue.
+ */
+}
+
+static __always_inline void pv_kick(int cpu)
+{
+ prod_cpu(cpu);
+}
+
+extern void __pv_init_lock_hash(void);
+
+static inline void pv_spinlocks_init(void)
+{
+ __pv_init_lock_hash();
+}
+
+#endif
+
#include <asm-generic/qspinlock.h>
#endif /* _ASM_POWERPC_QSPINLOCK_H */
diff --git a/arch/powerpc/include/asm/qspinlock_paravirt.h b/arch/powerpc/include/asm/qspinlock_paravirt.h
new file mode 100644
index 000000000000..6b60e7736a47
--- /dev/null
+++ b/arch/powerpc/include/asm/qspinlock_paravirt.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_QSPINLOCK_PARAVIRT_H
+#define _ASM_POWERPC_QSPINLOCK_PARAVIRT_H
+
+EXPORT_SYMBOL(__pv_queued_spin_unlock);
+
+#endif /* _ASM_POWERPC_QSPINLOCK_PARAVIRT_H */
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 434615f1d761..6ec72282888d 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -10,5 +10,9 @@
#include <asm/simple_spinlock.h>
#endif
+#ifndef CONFIG_PARAVIRT_SPINLOCKS
+static inline void pv_spinlocks_init(void) { }
+#endif
+
#endif /* __KERNEL__ */
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 24c18362e5ea..5e037df2a3a1 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -25,15 +25,22 @@ config PPC_PSERIES
select SWIOTLB
default y
+config PARAVIRT_SPINLOCKS
+ bool
+
config PPC_SPLPAR
- depends on PPC_PSERIES
bool "Support for shared-processor logical partitions"
+ depends on PPC_PSERIES
+ select PARAVIRT_SPINLOCKS if PPC_QUEUED_SPINLOCKS
+ default y
help
Enabling this option will make the kernel run more efficiently
on logically-partitioned pSeries systems which use shared
processors, that is, which share physical processors between
two or more partitions.
+ Say Y if you are unsure.
+
config DTL
bool "Dispatch Trace Log"
depends on PPC_SPLPAR && DEBUG_FS
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 2db8469e475f..fa847d1f9d54 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -771,8 +771,10 @@ static void __init pSeries_setup_arch(void)
if (firmware_has_feature(FW_FEATURE_LPAR)) {
vpa_init(boot_cpuid);
- if (lppaca_shared_proc(get_lppaca()))
+ if (lppaca_shared_proc(get_lppaca())) {
static_branch_enable(&shared_processor);
+ pv_spinlocks_init();
+ }
ppc_md.power_save = pseries_lpar_idle;
ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index fb0a814d4395..38ca14e79a86 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -69,6 +69,7 @@ static __always_inline int queued_spin_trylock(struct qspinlock *lock)
extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+#ifndef queued_spin_lock
/**
* queued_spin_lock - acquire a queued spinlock
* @lock: Pointer to queued spinlock structure
@@ -82,6 +83,7 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock)
queued_spin_lock_slowpath(lock, val);
}
+#endif
#ifndef queued_spin_unlock
/**
--
2.23.0
More information about the Linuxppc-dev
mailing list