[Cbe-oss-dev] [PATCH 2/3] spu sched: switch from workqueues to kthread + timer tick

Christoph Hellwig hch at lst.de
Mon Apr 16 05:31:36 EST 2007


On Thu, Apr 12, 2007 at 08:42:53PM +0200, Christoph Hellwig wrote:
> Get rid of the scheduler workqueues that complicated things a lot to
> a dedicated spu scheduler thread that gets woken by a traditional
> scheduler tick.  By default this scheduler tick runs a HZ * 10, aka
> one spu scheduler tick for every 10 cpu ticks.
> 
> Currently the tick is not disabled when we have less context than
> available spus, but I will implement this later.

This version of the patch had some issues with timer handling.  Updated
version below:

Index: linux-2.6/arch/powerpc/platforms/cell/spufs/sched.c
===================================================================
--- linux-2.6.orig/arch/powerpc/platforms/cell/spufs/sched.c	2007-04-12 19:45:37.000000000 +0200
+++ linux-2.6/arch/powerpc/platforms/cell/spufs/sched.c	2007-04-15 20:29:03.000000000 +0200
@@ -36,6 +36,7 @@
 #include <linux/numa.h>
 #include <linux/mutex.h>
 #include <linux/notifier.h>
+#include <linux/kthread.h>
 
 #include <asm/io.h>
 #include <asm/mmu_context.h>
@@ -46,6 +47,8 @@
 
 #define SPU_TIMESLICE	(HZ)
 
+#define SPUSCHED_TICK	(HZ / 100)
+
 struct spu_prio_array {
 	DECLARE_BITMAP(bitmap, MAX_PRIO);
 	struct list_head runq[MAX_PRIO];
@@ -55,7 +58,8 @@ struct spu_prio_array {
 };
 
 static struct spu_prio_array *spu_prio;
-static struct workqueue_struct *spu_sched_wq;
+static struct task_struct *spusched_task;
+static struct timer_list spusched_timer;
 
 static inline int node_allowed(int node)
 {
@@ -69,31 +73,6 @@ static inline int node_allowed(int node)
 	return 1;
 }
 
-void spu_start_tick(struct spu_context *ctx)
-{
-	if (ctx->policy == SCHED_RR) {
-		/*
-		 * Make sure the exiting bit is cleared.
-		 */
-		clear_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
-		mb();
-		queue_delayed_work(spu_sched_wq, &ctx->sched_work, SPU_TIMESLICE);
-	}
-}
-
-void spu_stop_tick(struct spu_context *ctx)
-{
-	if (ctx->policy == SCHED_RR) {
-		/*
-		 * While the work can be rearming normally setting this flag
-		 * makes sure it does not rearm itself anymore.
-		 */
-		set_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
-		mb();
-		cancel_delayed_work(&ctx->sched_work);
-	}
-}
-
 /**
  * spu_add_to_active_list - add spu to active list
  * @spu:	spu to add to the active list
@@ -105,6 +84,11 @@ static void spu_add_to_active_list(struc
 	mutex_unlock(&spu_prio->active_mutex[spu->node]);
 }
 
+static void __spu_remove_from_active_list(struct spu *spu)
+{
+	list_del_init(&spu->list);
+}
+
 /**
  * spu_remove_from_active_list - remove spu from active list
  * @spu:       spu to remove from the active list
@@ -114,7 +98,7 @@ static void spu_remove_from_active_list(
 	int node = spu->node;
 
 	mutex_lock(&spu_prio->active_mutex[node]);
-	list_del_init(&spu->list);
+	__spu_remove_from_active_list(spu);
 	mutex_unlock(&spu_prio->active_mutex[node]);
 }
 
@@ -193,7 +177,6 @@ static void spu_bind_context(struct spu 
 	spu->timestamp = jiffies;
 	spu_cpu_affinity_set(spu, raw_smp_processor_id());
 	spu_switch_notify(spu, ctx);
-	spu_add_to_active_list(spu);
 	ctx->state = SPU_STATE_RUNNABLE;
 }
 
@@ -212,7 +195,6 @@ static void spu_unbind_context(struct sp
 	if (!list_empty(&ctx->aff_list))
 		if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
 			ctx->gang->aff_ref_spu = NULL;
-	spu_remove_from_active_list(spu);
 	spu_switch_notify(spu, NULL);
 	spu_unmap_mappings(ctx);
 	spu_save(&ctx->csa, spu);
@@ -354,6 +336,7 @@ static struct spu *find_victim(struct sp
 				victim = NULL;
 				goto restart;
 			}
+			spu_remove_from_active_list(spu);
 			spu_unbind_context(spu, victim);
 			mutex_unlock(&victim->state_mutex);
 			/*
@@ -396,6 +379,7 @@ int spu_activate(struct spu_context *ctx
 			spu = find_victim(ctx);
 		if (spu) {
 			spu_bind_context(spu, ctx);
+			spu_add_to_active_list(spu);
 			return 0;
 		}
 
@@ -439,6 +423,7 @@ static int __spu_deactivate(struct spu_c
 	if (spu) {
 		new = grab_runnable_context(max_prio);
 		if (new || force) {
+			spu_remove_from_active_list(spu);
 			spu_unbind_context(spu, ctx);
 			spu_free(spu);
 			if (new)
@@ -477,51 +462,77 @@ void spu_yield(struct spu_context *ctx)
 	mutex_unlock(&ctx->state_mutex);
 }
 
-void spu_sched_tick(struct work_struct *work)
+static void spusched_tick(struct spu_context *ctx)
 {
-	struct spu_context *ctx =
-		container_of(work, struct spu_context, sched_work.work);
-	int preempted;
+	if (ctx->policy != SCHED_RR || --ctx->time_slice)
+		return;
 
 	/*
-	 * If this context is being stopped avoid rescheduling from the
-	 * scheduler tick because we would block on the state_mutex.
-	 * The caller will yield the spu later on anyway.
+	 * Unfortunately active_mutex ranks outside of state_mutex, so
+	 * we have to trylock here.  If we fail give the context another
+	 * tick and try again.
 	 */
-	if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags))
-		return;
+	if (mutex_trylock(&ctx->state_mutex)) {
+		struct spu_context *new = grab_runnable_context(ctx->prio + 1);
+		if (new) {
+ 			struct spu *spu = ctx->spu;
 
-	mutex_lock(&ctx->state_mutex);
-	preempted = __spu_deactivate(ctx, 0, ctx->prio + 1);
-	mutex_unlock(&ctx->state_mutex);
-
-	if (preempted) {
-		/*
-		 * We need to break out of the wait loop in spu_run manually
-		 * to ensure this context gets put on the runqueue again
-		 * ASAP.
-		 */
-		wake_up(&ctx->stop_wq);
+			__spu_remove_from_active_list(spu);
+			spu_unbind_context(spu, ctx);
+			spu_free(spu);
+			wake_up(&new->stop_wq);
+			/*
+			 * We need to break out of the wait loop in
+			 * spu_run manually to ensure this context
+			 * gets put on the runqueue again ASAP.
+			 */
+			wake_up(&ctx->stop_wq);
+		}
+		ctx->time_slice = SPU_DEF_TIMESLICE;
+		mutex_unlock(&ctx->state_mutex);
 	} else {
-		spu_start_tick(ctx);
+		ctx->time_slice++;
+	}
+}
+
+static void spusched_wake(unsigned long data)
+{
+	mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
+	wake_up_process(spusched_task);
+}
+
+static int spusched_thread(void *unused)
+{
+	struct spu *spu, *next;
+	int node;
+
+	setup_timer(&spusched_timer, spusched_wake, 0);
+	__mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
+
+	while (!kthread_should_stop()) {
+		schedule();
+		for (node = 0; node < MAX_NUMNODES; node++) {
+			mutex_lock(&spu_prio->active_mutex[node]);
+			list_for_each_entry_safe(spu, next,
+						 &spu_prio->active_list[node],
+						 list)
+				spusched_tick(spu->ctx);
+			mutex_unlock(&spu_prio->active_mutex[node]);
+		}
 	}
+
+	del_timer_sync(&spusched_timer);
+	return 0;
 }
 
 int __init spu_sched_init(void)
 {
 	int i;
 
-	spu_sched_wq = create_singlethread_workqueue("spusched");
-	if (!spu_sched_wq)
-		return 1;
-
 	spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
-	if (!spu_prio) {
-		printk(KERN_WARNING "%s: Unable to allocate priority queue.\n",
-		       __FUNCTION__);
-		       destroy_workqueue(spu_sched_wq);
-		return 1;
-	}
+	if (!spu_prio)
+		return -ENOMEM;
+
 	for (i = 0; i < MAX_PRIO; i++) {
 		INIT_LIST_HEAD(&spu_prio->runq[i]);
 		__clear_bit(i, spu_prio->bitmap);
@@ -532,7 +543,14 @@ int __init spu_sched_init(void)
 		INIT_LIST_HEAD(&spu_prio->active_list[i]);
 	}
 	spin_lock_init(&spu_prio->runq_lock);
+
+	spusched_task = kthread_run(spusched_thread, NULL, "spusched");
+	if (IS_ERR(spusched_task)) {
+		kfree(spu_prio);
+		return PTR_ERR(spusched_task);
+	}
 	return 0;
+
 }
 
 void __exit spu_sched_exit(void)
@@ -540,6 +558,8 @@ void __exit spu_sched_exit(void)
 	struct spu *spu, *tmp;
 	int node;
 
+	kthread_stop(spusched_task);
+
 	for (node = 0; node < MAX_NUMNODES; node++) {
 		mutex_lock(&spu_prio->active_mutex[node]);
 		list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node],
@@ -550,7 +570,6 @@ void __exit spu_sched_exit(void)
 		mutex_unlock(&spu_prio->active_mutex[node]);
 	}
 	kfree(spu_prio);
-	destroy_workqueue(spu_sched_wq);
 }
 
 static void aff_merge_remaining_ctxs(struct spu_gang *gang)
Index: linux-2.6/arch/powerpc/platforms/cell/spufs/context.c
===================================================================
--- linux-2.6.orig/arch/powerpc/platforms/cell/spufs/context.c	2007-04-12 19:45:23.000000000 +0200
+++ linux-2.6/arch/powerpc/platforms/cell/spufs/context.c	2007-04-15 20:29:03.000000000 +0200
@@ -58,7 +58,7 @@ struct spu_context *alloc_spu_context(st
 	ctx->rt_priority = current->rt_priority;
 	ctx->policy = current->policy;
 	ctx->prio = current->prio;
-	INIT_DELAYED_WORK(&ctx->sched_work, spu_sched_tick);
+	ctx->time_slice = SPU_DEF_TIMESLICE;
 	goto out;
 out_free:
 	kfree(ctx);
@@ -179,5 +179,3 @@ void * spu_get_profile_private_kref(stru
 	return ctx->prof_priv_kref;
 }
 EXPORT_SYMBOL_GPL(spu_get_profile_private_kref);
-
-
Index: linux-2.6/arch/powerpc/platforms/cell/spufs/run.c
===================================================================
--- linux-2.6.orig/arch/powerpc/platforms/cell/spufs/run.c	2007-04-12 19:45:24.000000000 +0200
+++ linux-2.6/arch/powerpc/platforms/cell/spufs/run.c	2007-04-15 20:25:30.000000000 +0200
@@ -148,7 +148,6 @@ static inline int spu_run_init(struct sp
 		if (runcntl == 0)
 			runcntl = SPU_RUNCNTL_RUNNABLE;
 	} else {
-		spu_start_tick(ctx);
 		ctx->ops->npc_write(ctx, *npc);
 	}
 
@@ -161,7 +160,6 @@ static inline int spu_run_fini(struct sp
 {
 	int ret = 0;
 
-	spu_stop_tick(ctx);
 	*status = ctx->ops->status_read(ctx);
 	*npc = ctx->ops->npc_read(ctx);
 	spu_release(ctx);
@@ -326,10 +324,8 @@ long spufs_run_spu(struct file *file, st
 
 		if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
 			ret = spu_reacquire_runnable(ctx, npc, &status);
-			if (ret) {
-				spu_stop_tick(ctx);
+			if (ret)
 				goto out2;
-			}
 			continue;
 		}
 		ret = spu_process_events(ctx);
Index: linux-2.6/arch/powerpc/platforms/cell/spufs/spufs.h
===================================================================
--- linux-2.6.orig/arch/powerpc/platforms/cell/spufs/spufs.h	2007-04-12 19:45:23.000000000 +0200
+++ linux-2.6/arch/powerpc/platforms/cell/spufs/spufs.h	2007-04-15 20:29:03.000000000 +0200
@@ -31,6 +31,8 @@
 #include <asm/spu_csa.h>
 #include <asm/spu_info.h>
 
+#define SPU_DEF_TIMESLICE	100
+
 /* The magic number for our file system */
 enum {
 	SPUFS_MAGIC = 0x23c9b64e,
@@ -41,8 +43,7 @@ struct spu_gang;
 
 /* ctx->sched_flags */
 enum {
-	SPU_SCHED_EXITING = 0,
-	SPU_SCHED_NOTIFY_ACTIVE,
+	SPU_SCHED_NOTIFY_ACTIVE = 0,
 };
 
 struct spu_context {
@@ -86,7 +87,7 @@ struct spu_context {
 
 	/* scheduler fields */
 	struct list_head rq;
-	struct delayed_work sched_work;
+	unsigned int time_slice;
 	unsigned long sched_flags;
 	unsigned long rt_priority;
 	int policy;
@@ -222,9 +223,6 @@ int spu_activate(struct spu_context *ctx
 void spu_deactivate(struct spu_context *ctx);
 void spu_yield(struct spu_context *ctx);
 void spu_switch_notify(struct spu *spu, struct spu_context *ctx);
-void spu_start_tick(struct spu_context *ctx);
-void spu_stop_tick(struct spu_context *ctx);
-void spu_sched_tick(struct work_struct *work);
 int __init spu_sched_init(void);
 void __exit spu_sched_exit(void);
 



More information about the cbe-oss-dev mailing list