[patch] spin-yield-2.6.11-rc1-A1
Ingo Molnar
mingo at elte.hu
Mon Jan 17 23:42:09 EST 2005
* Paul Mackerras <paulus at samba.org> wrote:
> > hm, how about calling __spin_yield() from _raw_spin_trylock(), if the
> > locking attempt was unsuccessful? This might be slightly incorrect if
> > the locking attempt is not connected to an actual spin-loop, but we do
> > have other spin-loops with open-coded trylocks that would benefit from
> > this optimization too.
>
> That would help, but we also need to yield while we are polling the
> lock until it becomes available. Otherwise we will only yield once;
> if we get another timeslice and the other cpu still hasn't finished
> with the lock (or another cpu has got it now), we will spin uselessly
> for the whole of our timeslice. Thus I think we need to yield in the
> polling loop, whether or not we also yield in _raw_spin_trylock.
ok - how about the (raw) patch below? (ontop of BK plus the latest
spin-nicer patch i sent earlier.) It builds/boots on x86 but is untested
on ppc64.
the idea is to make spin_yield() a generic function, with some related
namespace cleanups.
Ingo
Acked-by: Ingo Molnar <mingo at elte.hu>
--- linux/kernel/exit.c.orig
+++ linux/kernel/exit.c
@@ -861,8 +861,12 @@ task_t fastcall *next_thread(const task_
#ifdef CONFIG_SMP
if (!p->sighand)
BUG();
+#ifndef write_is_locked
+# warning please implement read_is_locked()/write_is_locked()!
+# define write_is_locked rwlock_is_locked
+#endif
if (!spin_is_locked(&p->sighand->siglock) &&
- !rwlock_is_locked(&tasklist_lock))
+ !write_is_locked(&tasklist_lock))
BUG();
#endif
return pid_task(p->pids[PIDTYPE_TGID].pid_list.next, PIDTYPE_TGID);
--- linux/kernel/spinlock.c.orig
+++ linux/kernel/spinlock.c
@@ -173,8 +173,8 @@ EXPORT_SYMBOL(_write_lock);
* (We do this in a function because inlining it would be excessive.)
*/
-#define BUILD_LOCK_OPS(op, locktype, is_locked_fn) \
-void __lockfunc _##op##_lock(locktype *lock) \
+#define BUILD_LOCK_OPS(op, locktype) \
+void __lockfunc _##op##_lock(locktype##_t *lock) \
{ \
preempt_disable(); \
for (;;) { \
@@ -183,15 +183,15 @@ void __lockfunc _##op##_lock(locktype *l
preempt_enable(); \
if (!(lock)->break_lock) \
(lock)->break_lock = 1; \
- while (is_locked_fn(lock) && (lock)->break_lock) \
- cpu_relax(); \
+ while (op##_is_locked(lock) && (lock)->break_lock) \
+ locktype##_yield(lock); \
preempt_disable(); \
} \
} \
\
EXPORT_SYMBOL(_##op##_lock); \
\
-unsigned long __lockfunc _##op##_lock_irqsave(locktype *lock) \
+unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
{ \
unsigned long flags; \
\
@@ -205,8 +205,8 @@ unsigned long __lockfunc _##op##_lock_ir
preempt_enable(); \
if (!(lock)->break_lock) \
(lock)->break_lock = 1; \
- while (is_locked_fn(lock) && (lock)->break_lock) \
- cpu_relax(); \
+ while (op##_is_locked(lock) && (lock)->break_lock) \
+ locktype##_yield(lock); \
preempt_disable(); \
} \
return flags; \
@@ -214,14 +214,14 @@ unsigned long __lockfunc _##op##_lock_ir
\
EXPORT_SYMBOL(_##op##_lock_irqsave); \
\
-void __lockfunc _##op##_lock_irq(locktype *lock) \
+void __lockfunc _##op##_lock_irq(locktype##_t *lock) \
{ \
_##op##_lock_irqsave(lock); \
} \
\
EXPORT_SYMBOL(_##op##_lock_irq); \
\
-void __lockfunc _##op##_lock_bh(locktype *lock) \
+void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
{ \
unsigned long flags; \
\
@@ -246,9 +246,9 @@ EXPORT_SYMBOL(_##op##_lock_bh)
* _[spin|read|write]_lock_irqsave()
* _[spin|read|write]_lock_bh()
*/
-BUILD_LOCK_OPS(spin, spinlock_t, spin_is_locked);
-BUILD_LOCK_OPS(read, rwlock_t, rwlock_is_locked);
-BUILD_LOCK_OPS(write, rwlock_t, spin_is_locked);
+BUILD_LOCK_OPS(spin, spinlock);
+BUILD_LOCK_OPS(read, rwlock);
+BUILD_LOCK_OPS(write, rwlock);
#endif /* CONFIG_PREEMPT */
--- linux/include/asm-i386/spinlock.h.orig
+++ linux/include/asm-i386/spinlock.h
@@ -7,6 +7,8 @@
#include <linux/config.h>
#include <linux/compiler.h>
+#include <asm-generic/spinlock.h>
+
asmlinkage int printk(const char * fmt, ...)
__attribute__ ((format (printf, 1, 2)));
@@ -15,7 +17,7 @@ asmlinkage int printk(const char * fmt,
*/
typedef struct {
- volatile unsigned int lock;
+ volatile unsigned int slock;
#ifdef CONFIG_DEBUG_SPINLOCK
unsigned magic;
#endif
@@ -43,7 +45,7 @@ typedef struct {
* We make no fairness assumptions. They have a cost.
*/
-#define spin_is_locked(x) (*(volatile signed char *)(&(x)->lock) <= 0)
+#define spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) <= 0)
#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
#define spin_lock_string \
@@ -83,7 +85,7 @@ typedef struct {
#define spin_unlock_string \
"movb $1,%0" \
- :"=m" (lock->lock) : : "memory"
+ :"=m" (lock->slock) : : "memory"
static inline void _raw_spin_unlock(spinlock_t *lock)
@@ -101,7 +103,7 @@ static inline void _raw_spin_unlock(spin
#define spin_unlock_string \
"xchgb %b0, %1" \
- :"=q" (oldval), "=m" (lock->lock) \
+ :"=q" (oldval), "=m" (lock->slock) \
:"0" (oldval) : "memory"
static inline void _raw_spin_unlock(spinlock_t *lock)
@@ -123,7 +125,7 @@ static inline int _raw_spin_trylock(spin
char oldval;
__asm__ __volatile__(
"xchgb %b0,%1"
- :"=q" (oldval), "=m" (lock->lock)
+ :"=q" (oldval), "=m" (lock->slock)
:"0" (0) : "memory");
return oldval > 0;
}
@@ -138,7 +140,7 @@ static inline void _raw_spin_lock(spinlo
#endif
__asm__ __volatile__(
spin_lock_string
- :"=m" (lock->lock) : : "memory");
+ :"=m" (lock->slock) : : "memory");
}
static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
@@ -151,7 +153,7 @@ static inline void _raw_spin_lock_flags
#endif
__asm__ __volatile__(
spin_lock_string_flags
- :"=m" (lock->lock) : "r" (flags) : "memory");
+ :"=m" (lock->slock) : "r" (flags) : "memory");
}
/*
@@ -186,7 +188,17 @@ typedef struct {
#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
-#define rwlock_is_locked(x) ((x)->lock != RW_LOCK_BIAS)
+/**
+ * read_is_locked - would read_trylock() fail?
+ * @lock: the rwlock in question.
+ */
+#define read_is_locked(x) (atomic_read((atomic_t *)&(x)->lock) <= 0)
+
+/**
+ * write_is_locked - would write_trylock() fail?
+ * @lock: the rwlock in question.
+ */
+#define write_is_locked(x) ((x)->lock != RW_LOCK_BIAS)
/*
* On x86, we implement read-write locks as a 32-bit counter
More information about the Linuxppc64-dev
mailing list