[PATCH] powerpc/rtas: upgrade internal arch spinlocks
Nathan Lynch
nathanl at linux.ibm.com
Tue Jan 10 15:42:55 AEDT 2023
At the time commit f97bb36f705d ("powerpc/rtas: Turn rtas lock into a
raw spinlock") was written, the spinlock lockup detection code called
__delay(), which will not make progress if the timebase is not
advancing. Since the interprocessor timebase synchronization sequence
for chrp, cell, and some now-unsupported Power models can temporarily
freeze the timebase through an RTAS function (freeze-time-base), the
lock that serializes most RTAS calls was converted to arch_spinlock_t
to prevent kernel hangs in the lockup detection code.
However, commit bc88c10d7e69 ("locking/spinlock/debug: Remove spinlock
lockup detection code") removed that inconvenient property from the
lock debug code several years ago. So now it should be safe to
reintroduce generic locks into the RTAS support code, primarily to
increase lockdep coverage.
Making rtas.lock a spinlock_t would violate lock type nesting rules
because it can be acquired while holding raw locks, e.g. pci_lock and
irq_desc->lock. So convert it to raw_spinlock_t. There's no apparent
reason not to upgrade timebase_lock as well.
Signed-off-by: Nathan Lynch <nathanl at linux.ibm.com>
---
arch/powerpc/include/asm/rtas-types.h | 2 +-
arch/powerpc/kernel/rtas.c | 52 ++++++++-------------------
2 files changed, 15 insertions(+), 39 deletions(-)
diff --git a/arch/powerpc/include/asm/rtas-types.h b/arch/powerpc/include/asm/rtas-types.h
index 8df6235d64d1..a58f96eb2d19 100644
--- a/arch/powerpc/include/asm/rtas-types.h
+++ b/arch/powerpc/include/asm/rtas-types.h
@@ -18,7 +18,7 @@ struct rtas_t {
unsigned long entry; /* physical address pointer */
unsigned long base; /* physical address pointer */
unsigned long size;
- arch_spinlock_t lock;
+ raw_spinlock_t lock;
struct rtas_args args;
struct device_node *dev; /* virtual address pointer */
};
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index deded51a7978..a834726f18e3 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -61,7 +61,7 @@ static inline void do_enter_rtas(unsigned long args)
}
struct rtas_t rtas = {
- .lock = __ARCH_SPIN_LOCK_UNLOCKED
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(rtas.lock),
};
EXPORT_SYMBOL(rtas);
@@ -80,28 +80,6 @@ unsigned long rtas_rmo_buf;
void (*rtas_flash_term_hook)(int);
EXPORT_SYMBOL(rtas_flash_term_hook);
-/* RTAS use home made raw locking instead of spin_lock_irqsave
- * because those can be called from within really nasty contexts
- * such as having the timebase stopped which would lockup with
- * normal locks and spinlock debugging enabled
- */
-static unsigned long lock_rtas(void)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- preempt_disable();
- arch_spin_lock(&rtas.lock);
- return flags;
-}
-
-static void unlock_rtas(unsigned long flags)
-{
- arch_spin_unlock(&rtas.lock);
- local_irq_restore(flags);
- preempt_enable();
-}
-
/*
* call_rtas_display_status and call_rtas_display_status_delay
* are designed only for very early low-level debugging, which
@@ -109,14 +87,14 @@ static void unlock_rtas(unsigned long flags)
*/
static void call_rtas_display_status(unsigned char c)
{
- unsigned long s;
+ unsigned long flags;
if (!rtas.base)
return;
- s = lock_rtas();
+ raw_spin_lock_irqsave(&rtas.lock, flags);
rtas_call_unlocked(&rtas.args, 10, 1, 1, NULL, c);
- unlock_rtas(s);
+ raw_spin_unlock_irqrestore(&rtas.lock, flags);
}
static void call_rtas_display_status_delay(char c)
@@ -534,7 +512,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
{
va_list list;
int i;
- unsigned long s;
+ unsigned long flags;
struct rtas_args *rtas_args;
char *buff_copy = NULL;
int ret;
@@ -557,8 +535,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
return -1;
}
- s = lock_rtas();
-
+ raw_spin_lock_irqsave(&rtas.lock, flags);
/* We use the global rtas args buffer */
rtas_args = &rtas.args;
@@ -576,7 +553,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
outputs[i] = be32_to_cpu(rtas_args->rets[i+1]);
ret = (nret > 0)? be32_to_cpu(rtas_args->rets[0]): 0;
- unlock_rtas(s);
+ raw_spin_unlock_irqrestore(&rtas.lock, flags);
if (buff_copy) {
log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0);
@@ -1268,7 +1245,7 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
buff_copy = get_errorlog_buffer();
- flags = lock_rtas();
+ raw_spin_lock_irqsave(&rtas.lock, flags);
rtas.args = args;
do_enter_rtas(__pa(&rtas.args));
@@ -1279,7 +1256,7 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
if (be32_to_cpu(args.rets[0]) == -1)
errbuf = __fetch_rtas_last_error(buff_copy);
- unlock_rtas(flags);
+ raw_spin_unlock_irqrestore(&rtas.lock, flags);
if (buff_copy) {
if (errbuf)
@@ -1401,19 +1378,18 @@ int __init early_init_dt_scan_rtas(unsigned long node,
return 1;
}
-static arch_spinlock_t timebase_lock;
+static DEFINE_RAW_SPINLOCK(timebase_lock);
static u64 timebase = 0;
void rtas_give_timebase(void)
{
unsigned long flags;
- local_irq_save(flags);
+ raw_spin_lock_irqsave(&timebase_lock, flags);
hard_irq_disable();
- arch_spin_lock(&timebase_lock);
rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
timebase = get_tb();
- arch_spin_unlock(&timebase_lock);
+ raw_spin_unlock(&timebase_lock);
while (timebase)
barrier();
@@ -1425,8 +1401,8 @@ void rtas_take_timebase(void)
{
while (!timebase)
barrier();
- arch_spin_lock(&timebase_lock);
+ raw_spin_lock(&timebase_lock);
set_tb(timebase >> 32, timebase & 0xffffffff);
timebase = 0;
- arch_spin_unlock(&timebase_lock);
+ raw_spin_unlock(&timebase_lock);
}
--
2.37.1
More information about the Linuxppc-dev
mailing list