[patch] spin-yield-2.6.11-rc1-A1

Ingo Molnar mingo at elte.hu
Wed Jan 19 18:55:00 EST 2005


* Ingo Molnar <mingo at elte.hu> wrote:

> > ok - how about the (raw) patch below? (ontop of BK plus the latest
> > spin-nicer patch i sent earlier.) It builds/boots on x86 but is
> > untested on ppc64.
> > 
> > the idea is to make spin_yield() a generic function, with some related
> > namespace cleanups.
> 
> wrong patch... Full patch against BK-curr attached.

the one below builds/boots as well ...

	Ingo

Signed-off-by: Ingo Molnar <mingo at elte.hu>

--- linux/kernel/exit.c.orig
+++ linux/kernel/exit.c
@@ -861,8 +861,12 @@ task_t fastcall *next_thread(const task_
 #ifdef CONFIG_SMP
 	if (!p->sighand)
 		BUG();
+#ifndef write_is_locked
+# warning please implement read_is_locked()/write_is_locked()!
+# define write_is_locked rwlock_is_locked
+#endif
 	if (!spin_is_locked(&p->sighand->siglock) &&
-				!rwlock_is_locked(&tasklist_lock))
+				!write_is_locked(&tasklist_lock))
 		BUG();
 #endif
 	return pid_task(p->pids[PIDTYPE_TGID].pid_list.next, PIDTYPE_TGID);
--- linux/kernel/spinlock.c.orig
+++ linux/kernel/spinlock.c
@@ -173,8 +173,8 @@ EXPORT_SYMBOL(_write_lock);
  * (We do this in a function because inlining it would be excessive.)
  */
 
-#define BUILD_LOCK_OPS(op, locktype, is_locked_fn)			\
-void __lockfunc _##op##_lock(locktype *lock)				\
+#define BUILD_LOCK_OPS(op, locktype)					\
+void __lockfunc _##op##_lock(locktype##_t *lock)			\
 {									\
 	preempt_disable();						\
 	for (;;) {							\
@@ -183,15 +183,15 @@ void __lockfunc _##op##_lock(locktype *l
 		preempt_enable();					\
 		if (!(lock)->break_lock)				\
 			(lock)->break_lock = 1;				\
-		while (is_locked_fn(lock) && (lock)->break_lock)	\
-			cpu_relax();					\
+		while (op##_is_locked(lock) && (lock)->break_lock)	\
+			locktype##_yield(lock);				\
 		preempt_disable();					\
 	}								\
 }									\
 									\
 EXPORT_SYMBOL(_##op##_lock);						\
 									\
-unsigned long __lockfunc _##op##_lock_irqsave(locktype *lock)		\
+unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock)	\
 {									\
 	unsigned long flags;						\
 									\
@@ -205,8 +205,8 @@ unsigned long __lockfunc _##op##_lock_ir
 		preempt_enable();					\
 		if (!(lock)->break_lock)				\
 			(lock)->break_lock = 1;				\
-		while (is_locked_fn(lock) && (lock)->break_lock)	\
-			cpu_relax();					\
+		while (op##_is_locked(lock) && (lock)->break_lock)	\
+			locktype##_yield(lock);				\
 		preempt_disable();					\
 	}								\
 	return flags;							\
@@ -214,14 +214,14 @@ unsigned long __lockfunc _##op##_lock_ir
 									\
 EXPORT_SYMBOL(_##op##_lock_irqsave);					\
 									\
-void __lockfunc _##op##_lock_irq(locktype *lock)			\
+void __lockfunc _##op##_lock_irq(locktype##_t *lock)			\
 {									\
 	_##op##_lock_irqsave(lock);					\
 }									\
 									\
 EXPORT_SYMBOL(_##op##_lock_irq);					\
 									\
-void __lockfunc _##op##_lock_bh(locktype *lock)				\
+void __lockfunc _##op##_lock_bh(locktype##_t *lock)			\
 {									\
 	unsigned long flags;						\
 									\
@@ -246,9 +246,9 @@ EXPORT_SYMBOL(_##op##_lock_bh)
  *         _[spin|read|write]_lock_irqsave()
  *         _[spin|read|write]_lock_bh()
  */
-BUILD_LOCK_OPS(spin, spinlock_t, spin_is_locked);
-BUILD_LOCK_OPS(read, rwlock_t, rwlock_is_locked);
-BUILD_LOCK_OPS(write, rwlock_t, spin_is_locked);
+BUILD_LOCK_OPS(spin, spinlock);
+BUILD_LOCK_OPS(read, rwlock);
+BUILD_LOCK_OPS(write, rwlock);
 
 #endif /* CONFIG_PREEMPT */
 
--- linux/arch/ppc64/lib/locks.c.orig
+++ linux/arch/ppc64/lib/locks.c
@@ -23,7 +23,7 @@
 /* waiting for a spinlock... */
 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
 
-void __spin_yield(spinlock_t *lock)
+void spinlock_yield(spinlock_t *lock)
 {
 	unsigned int lock_value, holder_cpu, yield_count;
 	struct paca_struct *holder_paca;
@@ -54,7 +54,7 @@ void __spin_yield(spinlock_t *lock)
  * This turns out to be the same for read and write locks, since
  * we only know the holder if it is write-locked.
  */
-void __rw_yield(rwlock_t *rw)
+void rwlock_yield(rwlock_t *rw)
 {
 	int lock_value;
 	unsigned int holder_cpu, yield_count;
@@ -87,7 +87,7 @@ void spin_unlock_wait(spinlock_t *lock)
 	while (lock->lock) {
 		HMT_low();
 		if (SHARED_PROCESSOR)
-			__spin_yield(lock);
+			spinlock_yield(lock);
 	}
 	HMT_medium();
 }
--- linux/include/asm-ia64/spinlock.h.orig
+++ linux/include/asm-ia64/spinlock.h
@@ -17,6 +17,8 @@
 #include <asm/intrinsics.h>
 #include <asm/system.h>
 
+#include <asm-generic/spinlock.h>
+
 typedef struct {
 	volatile unsigned int lock;
 #ifdef CONFIG_PREEMPT
--- linux/include/asm-generic/spinlock.h.orig
+++ linux/include/asm-generic/spinlock.h
@@ -0,0 +1,11 @@
+#ifndef _ASM_GENERIC_SPINLOCK_H
+#define _ASM_GENERIC_SPINLOCK_H
+
+/*
+ * Virtual platforms might use these to
+ * yield to specific virtual CPUs:
+ */
+#define spinlock_yield(lock)	cpu_relax()
+#define rwlock_yield(lock)	cpu_relax()
+
+#endif /* _ASM_GENERIC_SPINLOCK_H */
--- linux/include/linux/spinlock.h.orig
+++ linux/include/linux/spinlock.h
@@ -202,10 +202,12 @@ typedef struct {
 #define _raw_spin_lock(lock)	do { (void)(lock); } while(0)
 #define spin_is_locked(lock)	((void)(lock), 0)
 #define _raw_spin_trylock(lock)	(((void)(lock), 1))
-#define spin_unlock_wait(lock)	(void)(lock);
+#define spin_unlock_wait(lock)	(void)(lock)
 #define _raw_spin_unlock(lock) do { (void)(lock); } while(0)
 #endif /* CONFIG_DEBUG_SPINLOCK */
 
+#define spinlock_yield(lock)	(void)(lock)
+
 /* RW spinlocks: No debug version */
 
 #if (__GNUC__ > 2)
@@ -224,6 +226,8 @@ typedef struct {
 #define _raw_read_trylock(lock) ({ (void)(lock); (1); })
 #define _raw_write_trylock(lock) ({ (void)(lock); (1); })
 
+#define rwlock_yield(lock)	(void)(lock)
+
 #define _spin_trylock(lock)	({preempt_disable(); _raw_spin_trylock(lock) ? \
 				1 : ({preempt_enable(); 0;});})
 
--- linux/include/asm-i386/spinlock.h.orig
+++ linux/include/asm-i386/spinlock.h
@@ -7,6 +7,8 @@
 #include <linux/config.h>
 #include <linux/compiler.h>
 
+#include <asm-generic/spinlock.h>
+
 asmlinkage int printk(const char * fmt, ...)
 	__attribute__ ((format (printf, 1, 2)));
 
@@ -15,7 +17,7 @@ asmlinkage int printk(const char * fmt, 
  */
 
 typedef struct {
-	volatile unsigned int lock;
+	volatile unsigned int slock;
 #ifdef CONFIG_DEBUG_SPINLOCK
 	unsigned magic;
 #endif
@@ -43,7 +45,7 @@ typedef struct {
  * We make no fairness assumptions. They have a cost.
  */
 
-#define spin_is_locked(x)	(*(volatile signed char *)(&(x)->lock) <= 0)
+#define spin_is_locked(x)	(*(volatile signed char *)(&(x)->slock) <= 0)
 #define spin_unlock_wait(x)	do { barrier(); } while(spin_is_locked(x))
 
 #define spin_lock_string \
@@ -83,7 +85,7 @@ typedef struct {
 
 #define spin_unlock_string \
 	"movb $1,%0" \
-		:"=m" (lock->lock) : : "memory"
+		:"=m" (lock->slock) : : "memory"
 
 
 static inline void _raw_spin_unlock(spinlock_t *lock)
@@ -101,7 +103,7 @@ static inline void _raw_spin_unlock(spin
 
 #define spin_unlock_string \
 	"xchgb %b0, %1" \
-		:"=q" (oldval), "=m" (lock->lock) \
+		:"=q" (oldval), "=m" (lock->slock) \
 		:"0" (oldval) : "memory"
 
 static inline void _raw_spin_unlock(spinlock_t *lock)
@@ -123,7 +125,7 @@ static inline int _raw_spin_trylock(spin
 	char oldval;
 	__asm__ __volatile__(
 		"xchgb %b0,%1"
-		:"=q" (oldval), "=m" (lock->lock)
+		:"=q" (oldval), "=m" (lock->slock)
 		:"0" (0) : "memory");
 	return oldval > 0;
 }
@@ -138,7 +140,7 @@ static inline void _raw_spin_lock(spinlo
 #endif
 	__asm__ __volatile__(
 		spin_lock_string
-		:"=m" (lock->lock) : : "memory");
+		:"=m" (lock->slock) : : "memory");
 }
 
 static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
@@ -151,7 +153,7 @@ static inline void _raw_spin_lock_flags 
 #endif
 	__asm__ __volatile__(
 		spin_lock_string_flags
-		:"=m" (lock->lock) : "r" (flags) : "memory");
+		:"=m" (lock->slock) : "r" (flags) : "memory");
 }
 
 /*
@@ -186,7 +188,17 @@ typedef struct {
 
 #define rwlock_init(x)	do { *(x) = RW_LOCK_UNLOCKED; } while(0)
 
-#define rwlock_is_locked(x) ((x)->lock != RW_LOCK_BIAS)
+/**
+ * read_is_locked - would read_trylock() fail?
+ * @lock: the rwlock in question.
+ */
+#define read_is_locked(x) (atomic_read((atomic_t *)&(x)->lock) <= 0)
+
+/**
+ * write_is_locked - would write_trylock() fail?
+ * @lock: the rwlock in question.
+ */
+#define write_is_locked(x) ((x)->lock != RW_LOCK_BIAS)
 
 /*
  * On x86, we implement read-write locks as a 32-bit counter
--- linux/include/asm-ppc64/spinlock.h.orig
+++ linux/include/asm-ppc64/spinlock.h
@@ -64,11 +64,11 @@ static __inline__ void _raw_spin_unlock(
 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
 /* We only yield to the hypervisor if we are in shared processor mode */
 #define SHARED_PROCESSOR (get_paca()->lppaca.shared_proc)
-extern void __spin_yield(spinlock_t *lock);
-extern void __rw_yield(rwlock_t *lock);
+extern void spinlock_yield(spinlock_t *lock);
+extern void rwlock_yield(rwlock_t *lock);
 #else /* SPLPAR || ISERIES */
-#define __spin_yield(x)	barrier()
-#define __rw_yield(x)	barrier()
+#define spinlock_yield(x)	barrier()
+#define rwlock_yield(x)	barrier()
 #define SHARED_PROCESSOR	0
 #endif
 extern void spin_unlock_wait(spinlock_t *lock);
@@ -109,7 +109,7 @@ static void __inline__ _raw_spin_lock(sp
 		do {
 			HMT_low();
 			if (SHARED_PROCESSOR)
-				__spin_yield(lock);
+				spinlock_yield(lock);
 		} while (likely(lock->lock != 0));
 		HMT_medium();
 	}
@@ -127,7 +127,7 @@ static void __inline__ _raw_spin_lock_fl
 		do {
 			HMT_low();
 			if (SHARED_PROCESSOR)
-				__spin_yield(lock);
+				spinlock_yield(lock);
 		} while (likely(lock->lock != 0));
 		HMT_medium();
 		local_irq_restore(flags_dis);
@@ -201,7 +201,7 @@ static void __inline__ _raw_read_lock(rw
 		do {
 			HMT_low();
 			if (SHARED_PROCESSOR)
-				__rw_yield(rw);
+				rwlock_yield(rw);
 		} while (likely(rw->lock < 0));
 		HMT_medium();
 	}
@@ -258,7 +258,7 @@ static void __inline__ _raw_write_lock(r
 		do {
 			HMT_low();
 			if (SHARED_PROCESSOR)
-				__rw_yield(rw);
+				rwlock_yield(rw);
 		} while (likely(rw->lock != 0));
 		HMT_medium();
 	}
--- linux/include/asm-x86_64/spinlock.h.orig
+++ linux/include/asm-x86_64/spinlock.h
@@ -6,6 +6,8 @@
 #include <asm/page.h>
 #include <linux/config.h>
 
+#include <asm-generic/spinlock.h>
+
 extern int printk(const char * fmt, ...)
 	__attribute__ ((format (printf, 1, 2)));
 



More information about the Linuxppc64-dev mailing list