[Cbe-oss-dev] [PATCH 9/10] spu sched: implement real runqueues

Christoph Hellwig hch at lst.de
Mon Jan 8 09:15:32 EST 2007


This is the biggest patch in this series, and it reworks the guts of
the spu scheduler runqueue mechanism:

 - instead of embedding a waitqueue in the runqueue there is now a
   simple doubly-linked list, the actual wakeups happen by reusing
   the stop_wq in the spu context (maybe we should rename it one day)
 - spu_free and spu_prio_wakeup are merged into a single spu_reschedule
   function
 - various functionality is split out into small helpers, and kerneldoc
   comments are added in various places to document what's going on.
 - spu_activate is rewritten into a tight loop by removing test for
   various impossible conditions and using the infrastructure in this
   pach.


Signed-off-by: Christoph Hellwig <hch at lst.de>

Index: linux-2.6/arch/powerpc/platforms/cell/spufs/spufs.h
===================================================================
--- linux-2.6.orig/arch/powerpc/platforms/cell/spufs/spufs.h	2007-01-07 21:53:38.000000000 +0100
+++ linux-2.6/arch/powerpc/platforms/cell/spufs/spufs.h	2007-01-07 22:46:42.000000000 +0100
@@ -75,6 +75,7 @@
 	struct spu_gang *gang;
 
 	/* scheduler fields */
+ 	struct list_head rq;
 	int prio;
 };
 
Index: linux-2.6/arch/powerpc/platforms/cell/spufs/sched.c
===================================================================
--- linux-2.6.orig/arch/powerpc/platforms/cell/spufs/sched.c	2007-01-07 21:53:38.000000000 +0100
+++ linux-2.6/arch/powerpc/platforms/cell/spufs/sched.c	2007-01-07 22:46:42.000000000 +0100
@@ -49,9 +49,10 @@
 #define SPU_BITMAP_SIZE (((MAX_PRIO+BITS_PER_LONG)/BITS_PER_LONG)+1)
 struct spu_prio_array {
 	unsigned long bitmap[SPU_BITMAP_SIZE];
-	wait_queue_head_t waitq[MAX_PRIO];
+	struct list_head rq[MAX_PRIO];
 	struct list_head active_list[MAX_NUMNODES];
 	struct mutex active_mutex[MAX_NUMNODES];
+	struct mutex mutex;
 };
 
 static struct spu_prio_array *spu_prio;
@@ -196,61 +197,105 @@
 	return was_active;
 }
 
-static inline void spu_add_wq(wait_queue_head_t * wq, wait_queue_t * wait,
-			      int prio)
+/**
+ * spu_add_to_rq - add a context to the runqueue
+ * @ctx:       context to add
+ */
+static void spu_add_to_rq(struct spu_context *ctx)
 {
-	prepare_to_wait_exclusive(wq, wait, TASK_INTERRUPTIBLE);
-	set_bit(prio, spu_prio->bitmap);
+	mutex_lock(&spu_prio->mutex);
+	list_add_tail(&ctx->rq, &spu_prio->rq[ctx->prio]);
+	set_bit(ctx->prio, spu_prio->bitmap);
+	mutex_unlock(&spu_prio->mutex);
 }
 
-static inline void spu_del_wq(wait_queue_head_t * wq, wait_queue_t * wait,
-			      int prio)
+/**
+ * spu_del_from_rq - remove a context from the runqueue
+ * @ctx:       context to remove
+ */
+static void spu_del_from_rq(struct spu_context *ctx)
 {
-	u64 flags;
+	mutex_lock(&spu_prio->mutex);
+	/*
+	 * We have to be carefull here because there is a small
+	 * race condition where we received a singal just after
+	 * spu_reschedule() woke us up.
+	 */
+	if (!list_empty(&ctx->rq));
+		list_del_init(&ctx->rq);
 
-	__set_current_state(TASK_RUNNING);
+	if (list_empty(&spu_prio->rq[ctx->prio]))
+		clear_bit(ctx->prio, spu_prio->bitmap);
+	mutex_unlock(&spu_prio->mutex);
+}
 
-	spin_lock_irqsave(&wq->lock, flags);
+/**
+ * spu_grab_context - remove one context from the runqueue
+ * @prio:      priority of the context to be removed
+ *
+ * This function removes one context from the runqueue for priority @prio.
+ * If there is more than one context with the given priority the first
+ * task on the runqueue will be taken.
+ *
+ * Returns the spu_context it just removed.
+ *
+ * Must be called with spu_prio->mutex held.
+ */
+static struct spu_context *spu_grab_context(int prio)
+{
+	struct list_head *rq = &spu_prio->rq[prio];
+	struct spu_context *ctx = NULL;
 
-	remove_wait_queue_locked(wq, wait);
-	if (list_empty(&wq->task_list))
-		clear_bit(prio, spu_prio->bitmap);
+	if (!list_empty(rq)) {
+		ctx = list_entry(rq->next, struct spu_context, rq);
 
-	spin_unlock_irqrestore(&wq->lock, flags);
+		list_del_init(rq->next);
+		if (list_empty(rq))
+			clear_bit(prio, spu_prio->bitmap);
+	}
+
+	return ctx;
 }
 
-static void spu_prio_wait(struct spu_context *ctx, u64 flags)
+static void spu_prio_wait(struct spu_context *ctx)
 {
-	int prio = ctx->prio;
-	wait_queue_head_t *wq = &spu_prio->waitq[prio];
 	DEFINE_WAIT(wait);
 
-	if (ctx->spu)
-		return;
-
-	spu_add_wq(wq, &wait, prio);
+	prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
 
 	if (!signal_pending(current)) {
 		mutex_unlock(&ctx->state_mutex);
-		pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__,
-			 current->pid, current->prio);
 		schedule();
 		mutex_lock(&ctx->state_mutex);
 	}
-
-	spu_del_wq(wq, &wait, prio);
+	__set_current_state(TASK_RUNNING);
+	remove_wait_queue(&ctx->stop_wq, &wait);
 }
 
-static void spu_prio_wakeup(void)
+/**
+ * spu_reschedule - try to find a runnable context for a spu
+ * @spu:       spu available
+ *
+ * This function is called whenever a spu becomes idle.  It looks for the
+ * most suitable runnable spu context and schedules it for execution.
+ */
+static void spu_reschedule(struct spu *spu)
 {
-	int best = sched_find_first_bit(spu_prio->bitmap);
+	int best;
+
+	spu_free(spu);
+
+	mutex_lock(&spu_prio->mutex);
+	best = sched_find_first_bit(spu_prio->bitmap);
 	if (best < MAX_PRIO) {
-		wait_queue_head_t *wq = &spu_prio->waitq[best];
-		wake_up_interruptible_nr(wq, 1);
+		struct spu_context *ctx = spu_grab_context(best);
+		if (ctx)
+			wake_up(&ctx->stop_wq);
 	}
+	mutex_unlock(&spu_prio->mutex);
 }
 
-static struct spu *spu_get_idle(struct spu_context *ctx, u64 flags)
+static struct spu *spu_get_idle(struct spu_context *ctx)
 {
 	struct spu *spu = NULL;
 	int node = cpu_to_node(raw_smp_processor_id());
@@ -267,15 +312,6 @@
 	return spu;
 }
 
-static inline struct spu *spu_get(struct spu_context *ctx, u64 flags)
-{
-	/* Future: spu_get_idle() if possible,
-	 * otherwise try to preempt an active
-	 * context.
-	 */
-	return spu_get_idle(ctx, flags);
-}
-
 /* The three externally callable interfaces
  * for the scheduler begin here.
  *
@@ -284,32 +320,42 @@
  *	spu_yield	- yield an SPU if others are waiting.
  */
 
+/**
+ * spu_activate - find a free spu for a context and execute it
+ * @ctx:	spu context to schedule
+ * @flags:	flags (currently ignored)
+ *
+ * Tries to find a free spu to run @ctx.  If no free spu is availble
+ * add the context to the runqueue so it gets woken up once an spu
+ * is available.
+ */
 int spu_activate(struct spu_context *ctx, u64 flags)
 {
-	struct spu *spu;
-	int ret = 0;
 
-	for (;;) {
-		if (ctx->spu)
-			return 0;
-		spu = spu_get(ctx, flags);
-		if (spu != NULL) {
-			if (ctx->spu != NULL) {
-				spu_free(spu);
-				spu_prio_wakeup();
-				break;
-			}
+	if (ctx->spu)
+		return 0;
+
+	do {
+		struct spu *spu;
+
+		spu = spu_get_idle(ctx);
+		if (spu) {
 			spu_bind_context(spu, ctx);
-			break;
-		}
-		spu_prio_wait(ctx, flags);
-		if (signal_pending(current)) {
-			ret = -ERESTARTSYS;
-			spu_prio_wakeup();
-			break;
+			return 0;
 		}
-	}
-	return ret;
+
+		spu_add_to_rq(ctx);
+		spu_prio_wait(ctx);
+	} while (!signal_pending(current));
+
+
+	/*
+	 * If we were interruped we need to remove ourselves from the
+	 * runqueue.  Normally this is done by spu_grab_context in
+	 * the waking thread.
+	 */
+	spu_del_from_rq(ctx);
+	return -ERESTARTSYS;
 }
 
 void spu_deactivate(struct spu_context *ctx)
@@ -321,10 +367,8 @@
 	if (!spu)
 		return;
 	was_active = spu_unbind_context(spu, ctx);
-	if (was_active) {
-		spu_free(spu);
-		spu_prio_wakeup();
-	}
+	if (was_active)
+		spu_reschedule(spu);
 }
 
 void spu_yield(struct spu_context *ctx)
@@ -359,7 +403,7 @@
 		return 1;
 	}
 	for (i = 0; i < MAX_PRIO; i++) {
-		init_waitqueue_head(&spu_prio->waitq[i]);
+		INIT_LIST_HEAD(&spu_prio->rq[i]);
 		__clear_bit(i, spu_prio->bitmap);
 	}
 	__set_bit(MAX_PRIO, spu_prio->bitmap);
@@ -367,6 +411,7 @@
 		mutex_init(&spu_prio->active_mutex[i]);
 		INIT_LIST_HEAD(&spu_prio->active_list[i]);
 	}
+	mutex_init(&spu_prio->mutex);
 	return 0;
 }
 



More information about the cbe-oss-dev mailing list