[Cbe-oss-dev] [PATCH 3/3] spu sched: dynamic timeslicing for SCHED_OTHER

Christoph Hellwig hch at lst.de
Fri Apr 13 04:42:57 EST 2007


Enable preemptive scheduling for non-RT contexts.

We use the same algorithms as the CPU scheduler to calculate the time
slice length, and for now we also use the same timeslice length as the
CPU scheduler. This might be not enough for good performance and can be
changed after some benchmarking.

Note that currently we do not boost the priority for contexts waiting
on the runqueue for a long time, so contexts with a higher nice value
could starve ones with less priority.  This could easily be fixed once
the rework of the spu lists that Luke and I discussed is done.


Signed-off-by: Christoph Hellwig <hch at lst.de>

Index: linux-2.6/arch/powerpc/platforms/cell/spufs/context.c
===================================================================
--- linux-2.6.orig/arch/powerpc/platforms/cell/spufs/context.c	2007-04-12 18:45:38.000000000 +0200
+++ linux-2.6/arch/powerpc/platforms/cell/spufs/context.c	2007-04-12 18:47:11.000000000 +0200
@@ -55,10 +55,19 @@ struct spu_context *alloc_spu_context(st
 	INIT_LIST_HEAD(&ctx->aff_list);
 	if (gang)
 		spu_gang_add_ctx(gang, ctx);
-	ctx->rt_priority = current->rt_priority;
+
+	/*
+	 * We do our own priority calculations, so we normally want
+	 * ->static_prio to start with. Unfortunately thies field
+	 * contains junk for threads with a realtime scheduling
+	 * policy so we have to look at ->prio in this case.
+	 */
+	if (rt_prio(current->prio))
+		ctx->prio = current->prio;
+	else
+		ctx->prio = current->static_prio;
 	ctx->policy = current->policy;
-	ctx->prio = current->prio;
-	ctx->time_slice = SPU_DEF_TIMESLICE;
+	spu_set_timeslice(ctx);
 	goto out;
 out_free:
 	kfree(ctx);
Index: linux-2.6/arch/powerpc/platforms/cell/spufs/spufs.h
===================================================================
--- linux-2.6.orig/arch/powerpc/platforms/cell/spufs/spufs.h	2007-04-12 18:45:38.000000000 +0200
+++ linux-2.6/arch/powerpc/platforms/cell/spufs/spufs.h	2007-04-12 18:47:11.000000000 +0200
@@ -31,8 +31,6 @@
 #include <asm/spu_csa.h>
 #include <asm/spu_info.h>
 
-#define SPU_DEF_TIMESLICE	100
-
 /* The magic number for our file system */
 enum {
 	SPUFS_MAGIC = 0x23c9b64e,
@@ -89,7 +87,6 @@ struct spu_context {
 	struct list_head rq;
 	unsigned int time_slice;
 	unsigned long sched_flags;
-	unsigned long rt_priority;
 	int policy;
 	int prio;
 
@@ -223,6 +220,7 @@ int spu_activate(struct spu_context *ctx
 void spu_deactivate(struct spu_context *ctx);
 void spu_yield(struct spu_context *ctx);
 void spu_switch_notify(struct spu *spu, struct spu_context *ctx);
+void spu_set_timeslice(struct spu_context *ctx);
 int __init spu_sched_init(void);
 void __exit spu_sched_exit(void);
 
Index: linux-2.6/arch/powerpc/platforms/cell/spufs/sched.c
===================================================================
--- linux-2.6.orig/arch/powerpc/platforms/cell/spufs/sched.c	2007-04-12 18:46:44.000000000 +0200
+++ linux-2.6/arch/powerpc/platforms/cell/spufs/sched.c	2007-04-12 19:27:31.000000000 +0200
@@ -45,10 +45,6 @@
 #include <asm/spu_priv1.h>
 #include "spufs.h"
 
-#define SPU_TIMESLICE	(HZ)
-
-#define SPUSCHED_TICK	(HZ / 100)
-
 struct spu_prio_array {
 	DECLARE_BITMAP(bitmap, MAX_PRIO);
 	struct list_head runq[MAX_PRIO];
@@ -60,6 +56,46 @@ struct spu_prio_array {
 static struct spu_prio_array *spu_prio;
 static struct task_struct *spusched_task;
 
+/*
+ * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
+ */
+#define NORMAL_PRIO		120
+
+/*
+ * Frequency of the spu scheduler tick.  By default we do one SPU scheduler
+ * tick for every 10 CPU scheduler ticks.
+ */
+#define SPUSCHED_TICK		(HZ * 10)
+
+/*
+ * These are the 'tuning knobs' of the scheduler:
+ *
+ * Minimum timeslice is 5 msecs (or 10 jiffies, whichever is larger),
+ * default timeslice is 100 msecs, maximum timeslice is 800 msecs.
+ */
+#define MIN_SPU_TIMESLICE	max(5 * HZ / 100, 10)
+#define DEF_SPU_TIMESLICE	(100 * HZ / 100)
+
+#define MAX_USER_PRIO           (MAX_PRIO - MAX_RT_PRIO)
+#define SCALE_PRIO(x, prio) \
+	max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
+
+/*
+ * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
+ * [800ms ... 100ms ... 5ms]
+ *
+ * The higher a thread's priority, the bigger timeslices
+ * it gets during one round of execution. But even the lowest
+ * priority thread gets MIN_TIMESLICE worth of execution time.
+ */
+void spu_set_timeslice(struct spu_context *ctx)
+{
+	if (ctx->prio < NORMAL_PRIO)
+		ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio);
+	else
+		ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
+}
+
 static inline int node_allowed(int node)
 {
 	cpumask_t mask;
@@ -306,8 +342,8 @@ static struct spu *find_victim(struct sp
 		list_for_each_entry(spu, &spu_prio->active_list[node], list) {
 			struct spu_context *tmp = spu->ctx;
 
-			if (tmp->rt_priority < ctx->rt_priority &&
-			    (!victim || tmp->rt_priority < victim->rt_priority))
+			if (tmp->prio > ctx->prio &&
+			    (!victim || tmp->prio > victim->prio))
 				victim = spu->ctx;
 		}
 		mutex_unlock(&spu_prio->active_mutex[node]);
@@ -374,7 +410,7 @@ int spu_activate(struct spu_context *ctx
 		 * If this is a realtime thread we try to get it running by
 		 * preempting a lower priority thread.
 		 */
-		if (!spu && ctx->rt_priority)
+		if (!spu && rt_prio(ctx->prio))
 			spu = find_victim(ctx);
 		if (spu) {
 			spu_bind_context(spu, ctx);
@@ -463,7 +499,7 @@ void spu_yield(struct spu_context *ctx)
 
 static void spusched_tick(struct spu_context *ctx)
 {
-	if (ctx->policy != SCHED_RR || --ctx->time_slice)
+	if (ctx->policy == SCHED_FIFO || --ctx->time_slice)
 		return;
 
 	/*
@@ -487,7 +523,7 @@ static void spusched_tick(struct spu_con
 			 */
 			wake_up(&ctx->stop_wq);
 		}
-		ctx->time_slice = SPU_DEF_TIMESLICE;
+		spu_set_timeslice(ctx);
 		mutex_unlock(&ctx->state_mutex);
 	} else {
 		ctx->time_slice++;



More information about the cbe-oss-dev mailing list