[Cbe-oss-dev] [PATCH 01/11] powerpc/spufs: Change cbe_spu_info mutex_lock to spin_lock

Andre Detsch adetsch at br.ibm.com
Fri Sep 12 09:37:41 EST 2008


This structure groups the physical spus. The list_mutex must be changed
to a spin lock, because the runq_lock is a spin_lock.  You can't nest
mutexes under spin_locks.  The lock for the cbe_spu_info[] is taken
under the runq_lock as may spus need to be allocated to schedule a gang.

Change spu_bind_context() and spu_unbind_context() so that they are not
called under the new spin lock as that would cause a deadlock, if they
blocked on higher level allocations (mmap) that are protected by mutexes.

Signed-off-by: Luke Browning <lukebrowning at us.ibm.com>
Signed-off-by: Andre Detsch <adetsch at br.ibm.com>
---
 arch/powerpc/include/asm/spu.h            |    2 +-
 arch/powerpc/platforms/cell/spu_base.c    |    6 +-
 arch/powerpc/platforms/cell/spufs/sched.c |   82 
+++++++++++++++--------------
 3 files changed, 47 insertions(+), 43 deletions(-)

diff --git a/arch/powerpc/include/asm/spu.h b/arch/powerpc/include/asm/spu.h
index 8b2eb04..9d799b6 100644
--- a/arch/powerpc/include/asm/spu.h
+++ b/arch/powerpc/include/asm/spu.h
@@ -187,7 +187,7 @@ struct spu {
 };
 
 struct cbe_spu_info {
-	struct mutex list_mutex;
+	spinlock_t list_lock;
 	struct list_head spus;
 	int n_spus;
 	int nr_active;
diff --git a/arch/powerpc/platforms/cell/spu_base.c 
b/arch/powerpc/platforms/cell/spu_base.c
index a5bdb89..b1a97a1 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -650,10 +650,10 @@ static int __init create_spu(void *data)
 	if (ret)
 		goto out_free_irqs;
 
-	mutex_lock(&cbe_spu_info[spu->node].list_mutex);
+	spin_lock(&cbe_spu_info[spu->node].list_lock);
 	list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus);
 	cbe_spu_info[spu->node].n_spus++;
-	mutex_unlock(&cbe_spu_info[spu->node].list_mutex);
+	spin_unlock(&cbe_spu_info[spu->node].list_lock);
 
 	mutex_lock(&spu_full_list_mutex);
 	spin_lock_irqsave(&spu_full_list_lock, flags);
@@ -732,7 +732,7 @@ static int __init init_spu_base(void)
 	int i, ret = 0;
 
 	for (i = 0; i < MAX_NUMNODES; i++) {
-		mutex_init(&cbe_spu_info[i].list_mutex);
+		spin_lock_init(&cbe_spu_info[i].list_lock);
 		INIT_LIST_HEAD(&cbe_spu_info[i].spus);
 	}
 
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c 
b/arch/powerpc/platforms/cell/spufs/sched.c
index 897c740..386aa0a 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -153,11 +153,11 @@ void spu_update_sched_info(struct spu_context *ctx)
 		node = ctx->spu->node;
 
 		/*
-		 * Take list_mutex to sync with find_victim().
+		 * Take list_lock to sync with find_victim().
 		 */
-		mutex_lock(&cbe_spu_info[node].list_mutex);
+		spin_lock(&cbe_spu_info[node].list_lock);
 		__spu_update_sched_info(ctx);
-		mutex_unlock(&cbe_spu_info[node].list_mutex);
+		spin_unlock(&cbe_spu_info[node].list_lock);
 	} else {
 		__spu_update_sched_info(ctx);
 	}
@@ -179,9 +179,9 @@ static int node_allowed(struct spu_context *ctx, int node)
 {
 	int rval;
 
-	spin_lock(&spu_prio->runq_lock);
+	spin_lock(&cbe_spu_info[node].list_lock);
 	rval = __node_allowed(ctx, node);
-	spin_unlock(&spu_prio->runq_lock);
+	spin_unlock(&cbe_spu_info[node].list_lock);
 
 	return rval;
 }
@@ -199,7 +199,7 @@ void do_notify_spus_active(void)
 	for_each_online_node(node) {
 		struct spu *spu;
 
-		mutex_lock(&cbe_spu_info[node].list_mutex);
+		spin_lock(&cbe_spu_info[node].list_lock);
 		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
 			if (spu->alloc_state != SPU_FREE) {
 				struct spu_context *ctx = spu->ctx;
@@ -209,7 +209,7 @@ void do_notify_spus_active(void)
 				wake_up_all(&ctx->stop_wq);
 			}
 		}
-		mutex_unlock(&cbe_spu_info[node].list_mutex);
+		spin_unlock(&cbe_spu_info[node].list_lock);
 	}
 }
 
@@ -233,7 +233,6 @@ static void spu_bind_context(struct spu *spu, struct 
spu_context *ctx)
 	spu_associate_mm(spu, ctx->owner);
 
 	spin_lock_irq(&spu->register_lock);
-	spu->ctx = ctx;
 	spu->flags = 0;
 	ctx->spu = spu;
 	ctx->ops = &spu_hw_ops;
@@ -257,11 +256,11 @@ static void spu_bind_context(struct spu *spu, struct 
spu_context *ctx)
 }
 
 /*
- * Must be used with the list_mutex held.
+ * Must be used with the list_lock held.
  */
 static inline int sched_spu(struct spu *spu)
 {
-	BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex));
+	BUG_ON(!spin_is_locked(&cbe_spu_info[spu->node].list_lock));
 
 	return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED));
 }
@@ -319,7 +318,7 @@ static struct spu *aff_ref_location(struct spu_context 
*ctx, int mem_aff,
 			continue;
 
 		available_spus = 0;
-		mutex_lock(&cbe_spu_info[node].list_mutex);
+		spin_lock(&cbe_spu_info[node].list_lock);
 		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
 			if (spu->ctx && spu->ctx->gang
 					&& spu->ctx->aff_offset == 0)
@@ -329,18 +328,18 @@ static struct spu *aff_ref_location(struct spu_context 
*ctx, int mem_aff,
 				available_spus++;
 		}
 		if (available_spus < ctx->gang->contexts) {
-			mutex_unlock(&cbe_spu_info[node].list_mutex);
+			spin_unlock(&cbe_spu_info[node].list_lock);
 			continue;
 		}
 
 		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
 			if ((!mem_aff || spu->has_mem_affinity) &&
 							sched_spu(spu)) {
-				mutex_unlock(&cbe_spu_info[node].list_mutex);
+				spin_unlock(&cbe_spu_info[node].list_lock);
 				return spu;
 			}
 		}
-		mutex_unlock(&cbe_spu_info[node].list_mutex);
+		spin_unlock(&cbe_spu_info[node].list_lock);
 	}
 	return NULL;
 }
@@ -455,7 +454,6 @@ static void spu_unbind_context(struct spu *spu, struct 
spu_context *ctx)
 	spu->tgid = 0;
 	ctx->ops = &spu_backing_ops;
 	spu->flags = 0;
-	spu->ctx = NULL;
 	spin_unlock_irq(&spu->register_lock);
 
 	spu_associate_mm(spu, NULL);
@@ -570,11 +568,11 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
 			mutex_unlock(&ctx->gang->aff_mutex);
 			node = aff_ref_spu->node;
 
-			mutex_lock(&cbe_spu_info[node].list_mutex);
+			spin_lock(&cbe_spu_info[node].list_lock);
 			spu = ctx_location(aff_ref_spu, ctx->aff_offset, node);
 			if (spu && spu->alloc_state == SPU_FREE)
 				goto found;
-			mutex_unlock(&cbe_spu_info[node].list_mutex);
+			spin_unlock(&cbe_spu_info[node].list_lock);
 
 			atomic_dec(&ctx->gang->aff_sched_count);
 			goto not_found;
@@ -587,12 +585,12 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
 		if (!node_allowed(ctx, node))
 			continue;
 
-		mutex_lock(&cbe_spu_info[node].list_mutex);
+		spin_lock(&cbe_spu_info[node].list_lock);
 		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
 			if (spu->alloc_state == SPU_FREE)
 				goto found;
 		}
-		mutex_unlock(&cbe_spu_info[node].list_mutex);
+		spin_unlock(&cbe_spu_info[node].list_lock);
 	}
 
  not_found:
@@ -601,7 +599,7 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
 
  found:
 	spu->alloc_state = SPU_USED;
-	mutex_unlock(&cbe_spu_info[node].list_mutex);
+	spin_unlock(&cbe_spu_info[node].list_lock);
 	spu_context_trace(spu_get_idle__found, ctx, spu);
 	spu_init_channels(spu);
 	return spu;
@@ -635,7 +633,7 @@ static struct spu *find_victim(struct spu_context *ctx)
 		if (!node_allowed(ctx, node))
 			continue;
 
-		mutex_lock(&cbe_spu_info[node].list_mutex);
+		spin_lock(&cbe_spu_info[node].list_lock);
 		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
 			struct spu_context *tmp = spu->ctx;
 
@@ -647,7 +645,7 @@ static struct spu *find_victim(struct spu_context *ctx)
 		}
 		if (victim)
 			get_spu_context(victim);
-		mutex_unlock(&cbe_spu_info[node].list_mutex);
+		spin_unlock(&cbe_spu_info[node].list_lock);
 
 		if (victim) {
 			/*
@@ -681,10 +679,12 @@ static struct spu *find_victim(struct spu_context *ctx)
 
 			spu_context_trace(__spu_deactivate__unload, ctx, spu);
 
-			mutex_lock(&cbe_spu_info[node].list_mutex);
-			cbe_spu_info[node].nr_active--;
 			spu_unbind_context(spu, victim);
-			mutex_unlock(&cbe_spu_info[node].list_mutex);
+
+			spin_lock(&cbe_spu_info[node].list_lock);
+			spu->ctx = NULL;
+			cbe_spu_info[node].nr_active--;
+			spin_unlock(&cbe_spu_info[node].list_lock);
 
 			victim->stats.invol_ctx_switch++;
 			spu->stats.invol_ctx_switch++;
@@ -708,14 +708,16 @@ static void __spu_schedule(struct spu *spu, struct 
spu_context *ctx)
 
 	spu_set_timeslice(ctx);
 
-	mutex_lock(&cbe_spu_info[node].list_mutex);
+	spin_lock(&cbe_spu_info[node].list_lock);
 	if (spu->ctx == NULL) {
-		spu_bind_context(spu, ctx);
 		cbe_spu_info[node].nr_active++;
 		spu->alloc_state = SPU_USED;
+		spu->ctx = ctx;
 		success = 1;
 	}
-	mutex_unlock(&cbe_spu_info[node].list_mutex);
+	spin_unlock(&cbe_spu_info[node].list_lock);
+
+	spu_bind_context(spu, ctx);
 
 	if (success)
 		wake_up_all(&ctx->run_wq);
@@ -750,14 +752,16 @@ static void spu_unschedule(struct spu *spu, struct 
spu_context *ctx,
 {
 	int node = spu->node;
 
-	mutex_lock(&cbe_spu_info[node].list_mutex);
-	cbe_spu_info[node].nr_active--;
 	if (free_spu)
 		spu->alloc_state = SPU_FREE;
 	spu_unbind_context(spu, ctx);
+
+	spin_lock(&cbe_spu_info[node].list_lock);
+	cbe_spu_info[node].nr_active--;
+	spu->ctx = NULL;
 	ctx->stats.invol_ctx_switch++;
 	spu->stats.invol_ctx_switch++;
-	mutex_unlock(&cbe_spu_info[node].list_mutex);
+	spin_unlock(&cbe_spu_info[node].list_lock);
 }
 
 /**
@@ -946,7 +950,7 @@ out:
  *
  * Return the number of tasks currently running or waiting to run.
  *
- * Note that we don't take runq_lock / list_mutex here.  Reading
+ * Note that we don't take runq_lock / list_lock here.  Reading
  * a single 32bit value is atomic on powerpc, and we don't care
  * about memory ordering issues here.
  */
@@ -998,22 +1002,22 @@ static int spusched_thread(void *unused)
 		set_current_state(TASK_INTERRUPTIBLE);
 		schedule();
 		for (node = 0; node < MAX_NUMNODES; node++) {
-			struct mutex *mtx = &cbe_spu_info[node].list_mutex;
+			spinlock_t *l = &cbe_spu_info[node].list_lock;
 
-			mutex_lock(mtx);
+			spin_lock(l);
 			list_for_each_entry(spu, &cbe_spu_info[node].spus,
 					cbe_list) {
 				struct spu_context *ctx = spu->ctx;
 
 				if (ctx) {
 					get_spu_context(ctx);
-					mutex_unlock(mtx);
+					spin_unlock(l);
 					spusched_tick(ctx);
-					mutex_lock(mtx);
+					spin_lock(l);
 					put_spu_context(ctx);
 				}
 			}
-			mutex_unlock(mtx);
+			spin_unlock(l);
 		}
 	}
 
@@ -1150,11 +1154,11 @@ void spu_sched_exit(void)
 	kthread_stop(spusched_task);
 
 	for (node = 0; node < MAX_NUMNODES; node++) {
-		mutex_lock(&cbe_spu_info[node].list_mutex);
+		spin_lock(&cbe_spu_info[node].list_lock);
 		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
 			if (spu->alloc_state != SPU_FREE)
 				spu->alloc_state = SPU_FREE;
-		mutex_unlock(&cbe_spu_info[node].list_mutex);
+		spin_unlock(&cbe_spu_info[node].list_lock);
 	}
 	kfree(spu_prio);
 }
-- 
1.5.4.1




More information about the cbe-oss-dev mailing list