[Cbe-oss-dev] [PATCH 3/3] spu sched: forced preemption at execution

Luke Browning LukeBrowning at us.ibm.com
Fri Feb 23 11:25:04 EST 2007







cbe-oss-dev-bounces+lukebrowning=us.ibm.com at ozlabs.org wrote on 02/09/2007
01:47:29 PM:

> On Fri, Feb 09, 2007 at 12:43:01AM +0100, Christoph Hellwig wrote:
> > If we start a spu context with realtime priority we want it to run
> > immediately and not wait until some other lower priority thread has
> > finished.  Try to find a suitable victim and use it's spu in this
> > case.
>
> Due to a mistake in my quilt usage this is missing the changes to
> context.c and spufs.h.  The full patch is below:
>
> Index: linux-2.6/arch/powerpc/platforms/cell/spufs/sched.c
> ===================================================================
> --- linux-2.6.orig/arch/powerpc/platforms/cell/spufs/sched.c
> 2007-02-09 16:08:51.000000000 +0100
> +++ linux-2.6/arch/powerpc/platforms/cell/spufs/sched.c   2007-02-09
> 16:08:57.000000000 +0100
> @@ -282,6 +282,74 @@
>  }
>
>  /**
> + * find_victim - find a lower priority context to preempt
> + * @ctx:   canidate context for running
> + *
> + * Returns the freed physical spu to run the new context on.
> + */
> +static struct spu *find_victim(struct spu_context *ctx)
> +{
> +   struct spu_context *victim = NULL;
> +   struct spu *spu;
> +   int node, n;
> +
> +   /*
> +    * Look for a possible preemption candidate on the local node first.
> +    * If there is no candidate look at the other nodes.  This isn't
> +    * exactly fair, but so far the whole spu schedule tries to keep
> +    * a strong node affinity.  We might want to fine-tune this in
> +    * the future.
> +    */
> + restart:
> +   node = cpu_to_node(raw_smp_processor_id());
> +   for (n = 0; n < MAX_NUMNODES; n++, node++) {
> +      node = (node < MAX_NUMNODES) ? node : 0;
> +      if (!node_allowed(node))
> +         continue;
> +
> +      mutex_lock(&spu_prio->active_mutex[node]);
> +      list_for_each_entry(spu, &spu_prio->active_list[node], list) {
> +         struct spu_context *tmp = spu->ctx;
> +
> +         if (tmp->rt_priority < ctx->rt_priority &&
> +             (!victim || tmp->rt_priority < victim->rt_priority))
> +            victim = spu->ctx;
> +      }
> +      mutex_unlock(&spu_prio->active_mutex[node]);
> +
> +      if (victim) {
> +         /*
> +          * This nests ctx->state_mutex, but we always lock
> +          * higher priority contexts before lower priority
> +          * ones, so this is safe until we introduce
> +          * priority inheritance schemes.
> +          */
> +         if (!mutex_trylock(&victim->state_mutex)) {
> +            victim = NULL;
> +            goto restart;
> +         }
> +
> +         spu = victim->spu;
> +         if (!spu) {

You might also want to retest the priority fields under the ctx lock to
make ensure that the priorities haven't changed.  The code above uses the
runqueue lock, which doesn't protect these fields, right?

> +            /*
> +             * This race can happen because we've dropped
> +             * the active list mutex.  No a problem, just
> +             * restart the search.
> +             */
> +            mutex_unlock(&victim->state_mutex);
> +            victim = NULL;
> +            goto restart;
> +         }
> +         spu_unbind_context(spu, victim);
> +         mutex_unlock(&victim->state_mutex);
> +         return spu;
> +      }
> +   }
> +
> +   return NULL;
> +}
> +
> +/**
>   * spu_activate - find a free spu for a context and execute it
>   * @ctx:   spu context to schedule
>   * @flags:   flags (currently ignored)
> @@ -300,6 +368,12 @@
>        struct spu *spu;
>
>        spu = spu_get_idle(ctx);
> +      /*
> +       * If this is a realtime thread we try to get it running by
> +       * preempting a lower priority thread.
> +       */
> +      if (!spu && ctx->rt_priority)
> +         spu = find_victim(ctx);
>        if (spu) {
>           spu_bind_context(spu, ctx);
>           return 0;
> Index: linux-2.6/arch/powerpc/platforms/cell/spufs/context.c
> ===================================================================
> --- linux-2.6.orig/arch/powerpc/platforms/cell/spufs/context.c
> 2007-02-09 16:09:34.000000000 +0100
> +++ linux-2.6/arch/powerpc/platforms/cell/spufs/context.c
> 2007-02-09 16:09:56.000000000 +0100
> @@ -53,6 +53,7 @@
>     ctx->owner = get_task_mm(current);
>     if (gang)
>        spu_gang_add_ctx(gang, ctx);
> +   ctx->rt_priority = current->rt_priority;
>     ctx->prio = current->prio;
>     goto out;
>  out_free:
> Index: linux-2.6/arch/powerpc/platforms/cell/spufs/spufs.h
> ===================================================================
> --- linux-2.6.orig/arch/powerpc/platforms/cell/spufs/spufs.h
> 2007-02-09 16:09:06.000000000 +0100
> +++ linux-2.6/arch/powerpc/platforms/cell/spufs/spufs.h   2007-02-09
> 16:09:17.000000000 +0100
> @@ -81,6 +81,7 @@
>     /* scheduler fields */
>      struct list_head rq;
>     unsigned long sched_flags;
> +   unsigned long rt_priority;
>     int prio;
>  };
>
> _______________________________________________
> cbe-oss-dev mailing list
> cbe-oss-dev at ozlabs.org
> https://ozlabs.org/mailman/listinfo/cbe-oss-dev
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.ozlabs.org/pipermail/cbe-oss-dev/attachments/20070222/b6770444/attachment.htm>


More information about the cbe-oss-dev mailing list