[Cbe-oss-dev] [PATCH 08/21] spusched: fix cpu/node binding

Jeremy Kerr jk at ozlabs.org
Fri Jun 29 10:57:56 EST 2007


From: Christoph Hellwig <hch at lst.de>

Add a cpus_allowed allowed filed to struct spu_context so that we always
use the cpu mask of the owning thread instead of the one happening to
call into the scheduler.  Also use this information in
grab_runnable_context to avoid spurious wakeups.

Signed-off-by: Christoph Hellwig <hch at lst.de>
Signed-off-by: Arnd Bergmann <arnd.bergmann at de.ibm.com>
Signed-off-by: Jeremy Kerr <jk at ozlabs.org>
---
 arch/powerpc/platforms/cell/spufs/context.c |    2 +-
 arch/powerpc/platforms/cell/spufs/sched.c   |   70 +++++++++++++++++++--------
 arch/powerpc/platforms/cell/spufs/spufs.h   |    2 +
 3 files changed, 52 insertions(+), 22 deletions(-)

diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index c778d91..6ff2a75 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -53,7 +53,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
 	INIT_LIST_HEAD(&ctx->rq);
 	if (gang)
 		spu_gang_add_ctx(gang, ctx);
-
+	ctx->cpus_allowed = current->cpus_allowed;
 	spu_set_timeslice(ctx);
 	goto out;
 out_free:
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 3707c7f..6927262 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -112,6 +112,16 @@ void __spu_update_sched_info(struct spu_context *ctx)
 	else
 		ctx->prio = current->static_prio;
 	ctx->policy = current->policy;
+
+	/*
+	 * A lot of places that don't hold active_mutex poke into
+	 * cpus_allowed, including grab_runnable_context which
+	 * already holds the runq_lock.  So abuse runq_lock
+	 * to protect this field aswell.
+	 */
+	spin_lock(&spu_prio->runq_lock);
+	ctx->cpus_allowed = current->cpus_allowed;
+	spin_unlock(&spu_prio->runq_lock);
 }
 
 void spu_update_sched_info(struct spu_context *ctx)
@@ -123,16 +133,27 @@ void spu_update_sched_info(struct spu_context *ctx)
 	mutex_unlock(&spu_prio->active_mutex[node]);
 }
 
-static inline int node_allowed(int node)
+static int __node_allowed(struct spu_context *ctx, int node)
 {
-	cpumask_t mask;
+	if (nr_cpus_node(node)) {
+		cpumask_t mask = node_to_cpumask(node);
 
-	if (!nr_cpus_node(node))
-		return 0;
-	mask = node_to_cpumask(node);
-	if (!cpus_intersects(mask, current->cpus_allowed))
-		return 0;
-	return 1;
+		if (cpus_intersects(mask, ctx->cpus_allowed))
+			return 1;
+	}
+
+	return 0;
+}
+
+static int node_allowed(struct spu_context *ctx, int node)
+{
+	int rval;
+
+	spin_lock(&spu_prio->runq_lock);
+	rval = __node_allowed(ctx, node);
+	spin_unlock(&spu_prio->runq_lock);
+
+	return rval;
 }
 
 /**
@@ -289,7 +310,7 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
 
 	for (n = 0; n < MAX_NUMNODES; n++, node++) {
 		node = (node < MAX_NUMNODES) ? node : 0;
-		if (!node_allowed(node))
+		if (!node_allowed(ctx, node))
 			continue;
 		spu = spu_alloc_node(node);
 		if (spu)
@@ -321,7 +342,7 @@ static struct spu *find_victim(struct spu_context *ctx)
 	node = cpu_to_node(raw_smp_processor_id());
 	for (n = 0; n < MAX_NUMNODES; n++, node++) {
 		node = (node < MAX_NUMNODES) ? node : 0;
-		if (!node_allowed(node))
+		if (!node_allowed(ctx, node))
 			continue;
 
 		mutex_lock(&spu_prio->active_mutex[node]);
@@ -416,23 +437,28 @@ int spu_activate(struct spu_context *ctx, unsigned long flags)
  * Remove the highest priority context on the runqueue and return it
  * to the caller.  Returns %NULL if no runnable context was found.
  */
-static struct spu_context *grab_runnable_context(int prio)
+static struct spu_context *grab_runnable_context(int prio, int node)
 {
-	struct spu_context *ctx = NULL;
+	struct spu_context *ctx;
 	int best;
 
 	spin_lock(&spu_prio->runq_lock);
 	best = sched_find_first_bit(spu_prio->bitmap);
-	if (best < prio) {
+	while (best < prio) {
 		struct list_head *rq = &spu_prio->runq[best];
 
-		BUG_ON(list_empty(rq));
-
-		ctx = list_entry(rq->next, struct spu_context, rq);
-		__spu_del_from_rq(ctx);
+		list_for_each_entry(ctx, rq, rq) {
+			/* XXX(hch): check for affinity here aswell */
+			if (__node_allowed(ctx, node)) {
+				__spu_del_from_rq(ctx);
+				goto found;
+			}
+		}
+		best++;
 	}
+	ctx = NULL;
+ found:
 	spin_unlock(&spu_prio->runq_lock);
-
 	return ctx;
 }
 
@@ -442,7 +468,7 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
 	struct spu_context *new = NULL;
 
 	if (spu) {
-		new = grab_runnable_context(max_prio);
+		new = grab_runnable_context(max_prio, spu->node);
 		if (new || force) {
 			spu_remove_from_active_list(spu);
 			spu_unbind_context(spu, ctx);
@@ -496,9 +522,11 @@ static void spusched_tick(struct spu_context *ctx)
 	 * tick and try again.
 	 */
 	if (mutex_trylock(&ctx->state_mutex)) {
-		struct spu_context *new = grab_runnable_context(ctx->prio + 1);
+ 		struct spu *spu = ctx->spu;
+		struct spu_context *new;
+
+		new = grab_runnable_context(ctx->prio + 1, spu->node);
 		if (new) {
- 			struct spu *spu = ctx->spu;
 
 			__spu_remove_from_active_list(spu);
 			spu_unbind_context(spu, ctx);
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index ff77f90..98d3c18 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -26,6 +26,7 @@
 #include <linux/mutex.h>
 #include <linux/spinlock.h>
 #include <linux/fs.h>
+#include <linux/cpumask.h>
 
 #include <asm/spu.h>
 #include <asm/spu_csa.h>
@@ -80,6 +81,7 @@ struct spu_context {
  	struct list_head rq;
 	unsigned int time_slice;
 	unsigned long sched_flags;
+	cpumask_t cpus_allowed;
 	int policy;
 	int prio;
 };
-- 
1.5.0.rc4.g85b1




More information about the cbe-oss-dev mailing list