[Cbe-oss-dev] [PATCH 08/16] spufs: reorganize spu_run_init

Jeremy Kerr jk at ozlabs.org
Thu Dec 20 18:39:59 EST 2007


From: Luke Browning <lukebr at linux.vnet.ibm.com>

This patch cleans up spu_run_init so that it does all of the spu
initialization for spufs_run_spu.  It initializes the spu context as
much as possible before it activates the spu and writes the runcntl
register.

Signed-off-by: Luke Browning <lukebr at linux.vnet.ibm.com>
Signed-off-by: Jeremy Kerr <jk at ozlabs.org>

---
 arch/powerpc/platforms/cell/spufs/run.c   |   55 ++++++++++++++++--------------
 arch/powerpc/platforms/cell/spufs/sched.c |   35 +++++++++++++------
 2 files changed, 56 insertions(+), 34 deletions(-)

diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index 3b3de6c..652ae13 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -152,23 +152,41 @@ out:
 static int spu_run_init(struct spu_context *ctx, u32 *npc)
 {
 	unsigned long runcntl;
+	int ret;
 
 	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
 
 	if (ctx->flags & SPU_CREATE_ISOLATE) {
+		/*
+		 * Force activation of spu.  Isolated state assumes that
+		 * special loader context is loaded and running on spu.
+		 */
+		if (ctx->state == SPU_STATE_SAVED) {
+			spu_set_timeslice(ctx);
+
+			ret = spu_activate(ctx, 0);
+			if (ret)
+				return ret;
+		}
 
 		if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
-			int ret = spu_setup_isolated(ctx);
+			ret = spu_setup_isolated(ctx);
 			if (ret)
 				return ret;
 		}
 
-		/* if userspace has set the runcntrl register (eg, to issue an
-		 * isolated exit), we need to re-set it here */
+		/*
+		 * If userspace has set the runcntrl register (eg, to
+		 * issue an isolated exit), we need to re-set it here
+		 */
 		runcntl = ctx->ops->runcntl_read(ctx) &
 			(SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
 		if (runcntl == 0)
 			runcntl = SPU_RUNCNTL_RUNNABLE;
+
+		spuctx_switch_state(ctx, SPU_UTIL_USER);
+		ctx->ops->runcntl_write(ctx, runcntl);
+
 	} else {
 		unsigned long privcntl;
 
@@ -180,11 +198,17 @@ static int spu_run_init(struct spu_context *ctx, u32 *npc)
 
 		ctx->ops->npc_write(ctx, *npc);
 		ctx->ops->privcntl_write(ctx, privcntl);
-	}
 
-	ctx->ops->runcntl_write(ctx, runcntl);
+		if (ctx->state == SPU_STATE_SAVED) {
+			spu_set_timeslice(ctx);
+			ret = spu_activate(ctx, 0);
+			if (ret)
+				return ret;
+		}
 
-	spuctx_switch_state(ctx, SPU_UTIL_USER);
+		spuctx_switch_state(ctx, SPU_UTIL_USER);
+		ctx->ops->runcntl_write(ctx, runcntl);
+	}
 
 	return 0;
 }
@@ -323,25 +347,8 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
 	ctx->event_return = 0;
 
 	spu_acquire(ctx);
-	if (ctx->state == SPU_STATE_SAVED) {
-		__spu_update_sched_info(ctx);
-		spu_set_timeslice(ctx);
 
-		ret = spu_activate(ctx, 0);
-		if (ret) {
-			spu_release(ctx);
-			goto out;
-		}
-	} else {
-		/*
-		 * We have to update the scheduling priority under active_mutex
-		 * to protect against find_victim().
-		 *
-		 * No need to update the timeslice ASAP, it will get updated
-		 * once the current one has expired.
-		 */
-		spu_update_sched_info(ctx);
-	}
+	spu_update_sched_info(ctx);
 
 	ret = spu_run_init(ctx, npc);
 	if (ret) {
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 82ea576..ef0e5e2 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -105,6 +105,12 @@ void spu_set_timeslice(struct spu_context *ctx)
 void __spu_update_sched_info(struct spu_context *ctx)
 {
 	/*
+	 * assert that the context is not on the runqueue, so it is safe
+	 * to change its scheduling parameters.
+	 */
+	BUG_ON(!list_empty(&ctx->rq));
+
+	/*
 	 * 32-Bit assignments are atomic on powerpc, and we don't care about
 	 * memory ordering here because retrieving the controlling thread is
 	 * per definition racy.
@@ -124,23 +130,28 @@ void __spu_update_sched_info(struct spu_context *ctx)
 	ctx->policy = current->policy;
 
 	/*
-	 * A lot of places that don't hold list_mutex poke into
-	 * cpus_allowed, including grab_runnable_context which
-	 * already holds the runq_lock.  So abuse runq_lock
-	 * to protect this field as well.
+	 * TO DO: the context may be loaded, so we may need to activate
+	 * it again on a different node. But it shouldn't hurt anything
+	 * to update its parameters, because we know that the scheduler
+	 * is not actively looking at this field, since it is not on the
+	 * runqueue. The context will be rescheduled on the proper node
+	 * if it is timesliced or preempted.
 	 */
-	spin_lock(&spu_prio->runq_lock);
 	ctx->cpus_allowed = current->cpus_allowed;
-	spin_unlock(&spu_prio->runq_lock);
 }
 
 void spu_update_sched_info(struct spu_context *ctx)
 {
-	int node = ctx->spu->node;
+	int node;
 
-	mutex_lock(&cbe_spu_info[node].list_mutex);
-	__spu_update_sched_info(ctx);
-	mutex_unlock(&cbe_spu_info[node].list_mutex);
+	if (ctx->state == SPU_STATE_RUNNABLE) {
+		node = ctx->spu->node;
+		mutex_lock(&cbe_spu_info[node].list_mutex);
+		__spu_update_sched_info(ctx);
+		mutex_unlock(&cbe_spu_info[node].list_mutex);
+	} else {
+		__spu_update_sched_info(ctx);
+	}
 }
 
 static int __node_allowed(struct spu_context *ctx, int node)
@@ -604,6 +615,10 @@ static struct spu *find_victim(struct spu_context *ctx)
 			 * higher priority contexts before lower priority
 			 * ones, so this is safe until we introduce
 			 * priority inheritance schemes.
+			 *
+			 * XXX if the highest priority context is locked,
+			 * this can loop a long time.  Might be better to
+			 * look at another context or give up after X retries.
 			 */
 			if (!mutex_trylock(&victim->state_mutex)) {
 				victim = NULL;



More information about the cbe-oss-dev mailing list