[Cbe-oss-dev] [PATCH] spusched: avoid false context switches in spu tick code
Christoph Hellwig
hch at lst.de
Fri Apr 6 03:38:21 EST 2007
On Thu, Apr 05, 2007 at 02:16:49PM -0300, Luke Browning wrote:
> This patch adds the run queue lock to the spu time slicing code
> so that it does not perform false context switches. It is needed
> to protect the runqueue prio bitmap. Also, once it decides to
> schedule a ctx on the runqueue, it removes it from the runqueue,
> so that it is not visible to other spus. This is needed to avoid
> thundering herd problems when high priority ctxts are added to
> the runqueue.
Heh, I just came up with a very similar patch. The big difference
is that it does various bits of restructuring to avoid code duplication,
and also fixes the spu_yield semantics. As a side-effect it also fixes
a day 0 problem where we call spu_free far too early:
Index: linux-2.6/arch/powerpc/platforms/cell/spufs/sched.c
===================================================================
--- linux-2.6.orig/arch/powerpc/platforms/cell/spufs/sched.c 2007-04-05 10:15:50.000000000 +0200
+++ linux-2.6/arch/powerpc/platforms/cell/spufs/sched.c 2007-04-05 16:41:55.000000000 +0200
@@ -98,7 +98,6 @@ void spu_sched_tick(struct work_struct *
{
struct spu_context *ctx =
container_of(work, struct spu_context, sched_work.work);
- struct spu *spu;
int preempted = 0;
/*
@@ -109,17 +108,7 @@ void spu_sched_tick(struct work_struct *
if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags))
return;
- mutex_lock(&ctx->state_mutex);
- spu = ctx->spu;
- if (spu) {
- int best = sched_find_first_bit(spu_prio->bitmap);
- if (best <= ctx->prio) {
- spu_deactivate(ctx);
- preempted = 1;
- }
- }
- mutex_unlock(&ctx->state_mutex);
-
+ preempted = spu_yield(ctx);
if (preempted) {
/*
* We need to break out of the wait loop in spu_run manually
@@ -312,31 +301,30 @@ static void spu_prio_wait(struct spu_con
}
/**
- * spu_reschedule - try to find a runnable context for a spu
+ * grab_runnable_context - try to find a runnable context
* @spu: spu available
*
- * This function is called whenever a spu becomes idle. It looks for the
- * most suitable runnable spu context and schedules it for execution.
+ * Remove the highest priority context on the runqueue and return it
+ * to the caller. Returns %NULL if no runnable context was found.
*/
-static void spu_reschedule(struct spu *spu)
+static struct spu_context *grab_runnable_context(void)
{
+ struct spu_context *ctx = NULL;
int best;
- spu_free(spu);
-
spin_lock(&spu_prio->runq_lock);
best = sched_find_first_bit(spu_prio->bitmap);
if (best < MAX_PRIO) {
struct list_head *rq = &spu_prio->runq[best];
- struct spu_context *ctx;
BUG_ON(list_empty(rq));
ctx = list_entry(rq->next, struct spu_context, rq);
__spu_del_from_rq(ctx);
- wake_up(&ctx->stop_wq);
}
spin_unlock(&spu_prio->runq_lock);
+
+ return ctx;
}
static struct spu *spu_get_idle(struct spu_context *ctx)
@@ -470,6 +458,29 @@ int spu_activate(struct spu_context *ctx
return -ERESTARTSYS;
}
+static int __spu_yield(struct spu_context *ctx, int force)
+{
+ struct spu *spu = ctx->spu;
+ int preempted = 0;
+
+ if (spu) {
+ struct spu_context *new;
+
+ new = grab_runnable_context();
+ if (new) {
+ spu_unbind_context(spu, ctx);
+ spu_free(spu);
+ wake_up(&new->stop_wq);
+ preempted = 1;
+ } else if (force) {
+ spu_unbind_context(spu, ctx);
+ spu_free(spu);
+ }
+ }
+
+ return preempted;
+}
+
/**
* spu_deactivate - unbind a context from it's physical spu
* @ctx: spu context to unbind
@@ -479,37 +490,28 @@ int spu_activate(struct spu_context *ctx
*/
void spu_deactivate(struct spu_context *ctx)
{
- struct spu *spu = ctx->spu;
-
- if (spu) {
- spu_unbind_context(spu, ctx);
- spu_reschedule(spu);
- }
+ __spu_yield(ctx, 1);
}
/**
* spu_yield - yield a physical spu if others are waiting
* @ctx: spu context to yield
*
- * Check if there is a higher priority context waiting and if yes
- * unbind @ctx from the physical spu and schedule the highest
- * priority context to run on the freed physical spu instead.
+ * Check if there is a higher or equal priority context waiting
+ * and if yes unbind @ctx from the physical spu and schedule the
+ * highest priority context to run on the freed physical spu instead.
+ *
+ * Returns 1 if the current context was preempted, otherwise 0.
*/
-void spu_yield(struct spu_context *ctx)
+int spu_yield(struct spu_context *ctx)
{
- struct spu *spu;
+ int preempted = 0;
- if (mutex_trylock(&ctx->state_mutex)) {
- if ((spu = ctx->spu) != NULL) {
- int best = sched_find_first_bit(spu_prio->bitmap);
- if (best < MAX_PRIO) {
- pr_debug("%s: yielding SPU %d NODE %d\n",
- __FUNCTION__, spu->number, spu->node);
- spu_deactivate(ctx);
- }
- }
- mutex_unlock(&ctx->state_mutex);
- }
+ mutex_lock(&ctx->state_mutex);
+ preempted = __spu_yield(ctx, 0);
+ mutex_unlock(&ctx->state_mutex);
+
+ return preempted;
}
int __init spu_sched_init(void)
Index: linux-2.6/arch/powerpc/platforms/cell/spufs/spufs.h
===================================================================
--- linux-2.6.orig/arch/powerpc/platforms/cell/spufs/spufs.h 2007-04-05 10:45:58.000000000 +0200
+++ linux-2.6/arch/powerpc/platforms/cell/spufs/spufs.h 2007-04-05 16:38:28.000000000 +0200
@@ -220,7 +220,7 @@ void spu_acquire_saved(struct spu_contex
int spu_activate(struct spu_context *ctx, unsigned long flags);
void spu_deactivate(struct spu_context *ctx);
-void spu_yield(struct spu_context *ctx);
+int spu_yield(struct spu_context *ctx);
void spu_switch_notify(struct spu *spu, struct spu_context *ctx);
void spu_start_tick(struct spu_context *ctx);
void spu_stop_tick(struct spu_context *ctx);
More information about the cbe-oss-dev
mailing list