[patch] spin-nicer-2.6.11-rc1-A1

Ingo Molnar mingo at elte.hu
Mon Jan 17 22:32:17 EST 2005


* Paul Mackerras <paulus at samba.org> wrote:

> > +BUILD_LOCK_OPS(spin, spinlock_t, spin_is_locked);
> > +BUILD_LOCK_OPS(read, rwlock_t, rwlock_is_locked);
> 
> I don't think this is right - this means that a cpu trying to acquire
> a read lock will spin while any other cpu has a read lock.  We need to
> invent and use a rwlock_is_write_locked() here.  PPC64 and parisc have
> an is_write_locked() already, and it shouldn't be too hard to do one
> for the other architectures (i386 wants (signed int)rw->lock <= 0,
> most other arches seem to need (signed int)rw->lock < 0).
> 
> > +BUILD_LOCK_OPS(write, rwlock_t, spin_is_locked);
> 
> This one should be rwlock_is_locked, surely?  Otherwise the compiler
> will grizzle about us calling spin_is_locked with a rwlock_t *.

you are right on both counts. The patch below, ontop of current BK,
fixes both problems.

the first fix is that there was no compiler warning on x86 because it
uses macros - i fixed this by changing the spinlock field to be
'->slock'. (we could also use inline functions to get type protection, i
chose this solution because it was the easiest to do.)

the second fix is to split rwlock_is_locked() into two functions:

 +/**
 + * read_is_locked - would read_trylock() fail?
 + * @lock: the rwlock in question.
 + */
 +#define read_is_locked(x) (atomic_read((atomic_t *)&(x)->lock) <= 0)
 +
 +/**
 + * write_is_locked - would write_trylock() fail?
 + * @lock: the rwlock in question.
 + */
 +#define write_is_locked(x) ((x)->lock != RW_LOCK_BIAS)

this canonical naming of them also enabled the elimination of the newly
added 'is_locked_fn' argument to the BUILD_LOCK_OPS macro.

the third change was to change the other user of rwlock_is_locked(), and
to put a migration helper there: architectures that dont have
read/write_is_locked defined yet will get a #warning message but the
build will succeed. (except if PREEMPT is enabled - there we really
need.)

compile and boot-tested on x86, on SMP and UP, PREEMPT and !PREEMPT. 
Non-x86 architectures should work fine, except PREEMPT+SMP builds which
will need the read_is_locked()/write_is_locked() definitions.
!PREEMPT+SMP builds will work fine and will produce a #warning.

	Ingo

Signed-off-by: Ingo Molnar <mingo at elte.hu>

--- linux/kernel/spinlock.c.orig
+++ linux/kernel/spinlock.c
@@ -173,7 +173,7 @@ EXPORT_SYMBOL(_write_lock);
  * (We do this in a function because inlining it would be excessive.)
  */
 
-#define BUILD_LOCK_OPS(op, locktype, is_locked_fn)			\
+#define BUILD_LOCK_OPS(op, locktype)					\
 void __lockfunc _##op##_lock(locktype *lock)				\
 {									\
 	preempt_disable();						\
@@ -183,7 +183,7 @@ void __lockfunc _##op##_lock(locktype *l
 		preempt_enable();					\
 		if (!(lock)->break_lock)				\
 			(lock)->break_lock = 1;				\
-		while (is_locked_fn(lock) && (lock)->break_lock)	\
+		while (op##_is_locked(lock) && (lock)->break_lock)	\
 			cpu_relax();					\
 		preempt_disable();					\
 	}								\
@@ -205,7 +205,7 @@ unsigned long __lockfunc _##op##_lock_ir
 		preempt_enable();					\
 		if (!(lock)->break_lock)				\
 			(lock)->break_lock = 1;				\
-		while (is_locked_fn(lock) && (lock)->break_lock)	\
+		while (op##_is_locked(lock) && (lock)->break_lock)	\
 			cpu_relax();					\
 		preempt_disable();					\
 	}								\
@@ -246,9 +246,9 @@ EXPORT_SYMBOL(_##op##_lock_bh)
  *         _[spin|read|write]_lock_irqsave()
  *         _[spin|read|write]_lock_bh()
  */
-BUILD_LOCK_OPS(spin, spinlock_t, spin_is_locked);
-BUILD_LOCK_OPS(read, rwlock_t, rwlock_is_locked);
-BUILD_LOCK_OPS(write, rwlock_t, spin_is_locked);
+BUILD_LOCK_OPS(spin, spinlock_t);
+BUILD_LOCK_OPS(read, rwlock_t);
+BUILD_LOCK_OPS(write, rwlock_t);
 
 #endif /* CONFIG_PREEMPT */
 
--- linux/include/asm-i386/spinlock.h.orig
+++ linux/include/asm-i386/spinlock.h
@@ -15,7 +15,7 @@ asmlinkage int printk(const char * fmt, 
  */
 
 typedef struct {
-	volatile unsigned int lock;
+	volatile unsigned int slock;
 #ifdef CONFIG_DEBUG_SPINLOCK
 	unsigned magic;
 #endif
@@ -43,7 +43,7 @@ typedef struct {
  * We make no fairness assumptions. They have a cost.
  */
 
-#define spin_is_locked(x)	(*(volatile signed char *)(&(x)->lock) <= 0)
+#define spin_is_locked(x)	(*(volatile signed char *)(&(x)->slock) <= 0)
 #define spin_unlock_wait(x)	do { barrier(); } while(spin_is_locked(x))
 
 #define spin_lock_string \
@@ -83,7 +83,7 @@ typedef struct {
 
 #define spin_unlock_string \
 	"movb $1,%0" \
-		:"=m" (lock->lock) : : "memory"
+		:"=m" (lock->slock) : : "memory"
 
 
 static inline void _raw_spin_unlock(spinlock_t *lock)
@@ -101,7 +101,7 @@ static inline void _raw_spin_unlock(spin
 
 #define spin_unlock_string \
 	"xchgb %b0, %1" \
-		:"=q" (oldval), "=m" (lock->lock) \
+		:"=q" (oldval), "=m" (lock->slock) \
 		:"0" (oldval) : "memory"
 
 static inline void _raw_spin_unlock(spinlock_t *lock)
@@ -123,7 +123,7 @@ static inline int _raw_spin_trylock(spin
 	char oldval;
 	__asm__ __volatile__(
 		"xchgb %b0,%1"
-		:"=q" (oldval), "=m" (lock->lock)
+		:"=q" (oldval), "=m" (lock->slock)
 		:"0" (0) : "memory");
 	return oldval > 0;
 }
@@ -138,7 +138,7 @@ static inline void _raw_spin_lock(spinlo
 #endif
 	__asm__ __volatile__(
 		spin_lock_string
-		:"=m" (lock->lock) : : "memory");
+		:"=m" (lock->slock) : : "memory");
 }
 
 static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
@@ -151,7 +151,7 @@ static inline void _raw_spin_lock_flags 
 #endif
 	__asm__ __volatile__(
 		spin_lock_string_flags
-		:"=m" (lock->lock) : "r" (flags) : "memory");
+		:"=m" (lock->slock) : "r" (flags) : "memory");
 }
 
 /*
@@ -186,7 +186,17 @@ typedef struct {
 
 #define rwlock_init(x)	do { *(x) = RW_LOCK_UNLOCKED; } while(0)
 
-#define rwlock_is_locked(x) ((x)->lock != RW_LOCK_BIAS)
+/**
+ * read_is_locked - would read_trylock() fail?
+ * @lock: the rwlock in question.
+ */
+#define read_is_locked(x) (atomic_read((atomic_t *)&(x)->lock) <= 0)
+
+/**
+ * write_is_locked - would write_trylock() fail?
+ * @lock: the rwlock in question.
+ */
+#define write_is_locked(x) ((x)->lock != RW_LOCK_BIAS)
 
 /*
  * On x86, we implement read-write locks as a 32-bit counter
--- linux/kernel/exit.c.orig
+++ linux/kernel/exit.c
@@ -861,8 +861,12 @@ task_t fastcall *next_thread(const task_
 #ifdef CONFIG_SMP
 	if (!p->sighand)
 		BUG();
+#ifndef write_is_locked
+# warning please implement read_is_locked()/write_is_locked()!
+# define write_is_locked rwlock_is_locked
+#endif
 	if (!spin_is_locked(&p->sighand->siglock) &&
-				!rwlock_is_locked(&tasklist_lock))
+				!write_is_locked(&tasklist_lock))
 		BUG();
 #endif
 	return pid_task(p->pids[PIDTYPE_TGID].pid_list.next, PIDTYPE_TGID);



More information about the Linuxppc64-dev mailing list