[Cbe-oss-dev] [PATCH 3/3] spu sched: replace active_mutex with a spinlock

Christoph Hellwig hch at lst.de
Thu Apr 5 04:49:35 EST 2007


The only reason active_mutex had to be a sleeping mutex was that
spu_free could potentially block.  With my last patch that's not
true anymore and we can use much less expensive spinlock protection
for it's small critical sections.


Signed-off-by: Christoph Hellwig <hch at lst.de>

Index: linux-2.6/arch/powerpc/platforms/cell/spufs/sched.c
===================================================================
--- linux-2.6.orig/arch/powerpc/platforms/cell/spufs/sched.c	2007-04-04 15:39:52.000000000 +0200
+++ linux-2.6/arch/powerpc/platforms/cell/spufs/sched.c	2007-04-04 15:44:31.000000000 +0200
@@ -51,7 +51,7 @@ struct spu_prio_array {
 	struct list_head runq[MAX_PRIO];
 	spinlock_t runq_lock;
 	struct list_head active_list[MAX_NUMNODES];
-	struct mutex active_mutex[MAX_NUMNODES];
+	spinlock_t active_lock[MAX_NUMNODES];
 };
 
 static struct spu_prio_array *spu_prio;
@@ -137,9 +137,9 @@ void spu_sched_tick(struct work_struct *
  */
 static void spu_add_to_active_list(struct spu *spu)
 {
-	mutex_lock(&spu_prio->active_mutex[spu->node]);
+	spin_lock(&spu_prio->active_lock[spu->node]);
 	list_add_tail(&spu->list, &spu_prio->active_list[spu->node]);
-	mutex_unlock(&spu_prio->active_mutex[spu->node]);
+	spin_unlock(&spu_prio->active_lock[spu->node]);
 }
 
 /**
@@ -150,9 +150,9 @@ static void spu_remove_from_active_list(
 {
 	int node = spu->node;
 
-	mutex_lock(&spu_prio->active_mutex[node]);
+	spin_lock(&spu_prio->active_lock[spu->node]);
 	list_del_init(&spu->list);
-	mutex_unlock(&spu_prio->active_mutex[node]);
+	spin_unlock(&spu_prio->active_lock[node]);
 }
 
 static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);
@@ -172,14 +172,14 @@ static void notify_spus_active(void)
 	 */
 	for (node = 0; node < MAX_NUMNODES; node++) {
 		struct spu *spu;
-		mutex_lock(&spu_prio->active_mutex[node]);
+		spin_lock(&spu_prio->active_lock[node]);
 		list_for_each_entry(spu, &spu_prio->active_list[node], list) {
 			struct spu_context *ctx = spu->ctx;
 			set_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags);
 			mb();
 			wake_up_all(&ctx->stop_wq);
 		}
-		mutex_unlock(&spu_prio->active_mutex[node]);
+		spin_unlock(&spu_prio->active_lock[node]);
 	}
 }
 
@@ -386,7 +386,7 @@ static struct spu *find_victim(struct sp
 		if (!node_allowed(node))
 			continue;
 
-		mutex_lock(&spu_prio->active_mutex[node]);
+		spin_lock(&spu_prio->active_lock[node]);
 		list_for_each_entry(spu, &spu_prio->active_list[node], list) {
 			struct spu_context *tmp = spu->ctx;
 
@@ -394,7 +394,7 @@ static struct spu *find_victim(struct sp
 			    (!victim || tmp->rt_priority < victim->rt_priority))
 				victim = spu->ctx;
 		}
-		mutex_unlock(&spu_prio->active_mutex[node]);
+		spin_unlock(&spu_prio->active_lock[node]);
 
 		if (victim) {
 			/*
@@ -533,7 +533,7 @@ int __init spu_sched_init(void)
 	}
 	__set_bit(MAX_PRIO, spu_prio->bitmap);
 	for (i = 0; i < MAX_NUMNODES; i++) {
-		mutex_init(&spu_prio->active_mutex[i]);
+		spin_lock_init(&spu_prio->active_lock[i]);
 		INIT_LIST_HEAD(&spu_prio->active_list[i]);
 	}
 	spin_lock_init(&spu_prio->runq_lock);
@@ -546,13 +546,13 @@ void __exit spu_sched_exit(void)
 	int node;
 
 	for (node = 0; node < MAX_NUMNODES; node++) {
-		mutex_lock(&spu_prio->active_mutex[node]);
+		spin_lock(&spu_prio->active_lock[node]);
 		list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node],
 					 list) {
 			list_del_init(&spu->list);
 			spu_free(spu);
 		}
-		mutex_unlock(&spu_prio->active_mutex[node]);
+		spin_unlock(&spu_prio->active_lock[node]);
 	}
 	kfree(spu_prio);
 	destroy_workqueue(spu_sched_wq);



More information about the cbe-oss-dev mailing list