[Cbe-oss-dev] [PATCH] spufs: implement /proc/spu_loadavg

Christoph Hellwig hch at lst.de
Mon Apr 16 05:33:34 EST 2007


Provide load averange information for spu context.  The format
is identical to /proc/loadavg, which is also where a lot of code
and concepts is borrowed from.


Signed-off-by: Christoph Hellwig <hch at lst.de>

Index: linux-2.6/arch/powerpc/platforms/cell/spufs/context.c
===================================================================
--- linux-2.6.orig/arch/powerpc/platforms/cell/spufs/context.c	2007-04-15 19:49:49.000000000 +0200
+++ linux-2.6/arch/powerpc/platforms/cell/spufs/context.c	2007-04-15 19:50:07.000000000 +0200
@@ -24,10 +24,14 @@
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <asm/atomic.h>
 #include <asm/spu.h>
 #include <asm/spu_csa.h>
 #include "spufs.h"
 
+
+atomic_t nr_spu_contexts = ATOMIC_INIT(0);
+
 struct spu_context *alloc_spu_context(struct spu_gang *gang)
 {
 	struct spu_context *ctx;
@@ -57,6 +61,8 @@ struct spu_context *alloc_spu_context(st
 		spu_gang_add_ctx(gang, ctx);
 	ctx->cpus_allowed = current->cpus_allowed;
 	spu_set_timeslice(ctx);
+
+	atomic_inc(&nr_spu_contexts);
 	goto out;
 out_free:
 	kfree(ctx);
@@ -78,6 +84,7 @@ void destroy_spu_context(struct kref *kr
 	if (ctx->prof_priv_kref)
 		kref_put(ctx->prof_priv_kref, ctx->prof_priv_release);
 	BUG_ON(!list_empty(&ctx->rq));
+	atomic_dec(&nr_spu_contexts);
 	kfree(ctx);
 }
 
Index: linux-2.6/arch/powerpc/platforms/cell/spufs/sched.c
===================================================================
--- linux-2.6.orig/arch/powerpc/platforms/cell/spufs/sched.c	2007-04-15 19:49:46.000000000 +0200
+++ linux-2.6/arch/powerpc/platforms/cell/spufs/sched.c	2007-04-15 19:52:30.000000000 +0200
@@ -37,6 +37,9 @@
 #include <linux/mutex.h>
 #include <linux/notifier.h>
 #include <linux/kthread.h>
+#include <linux/pid_namespace.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
 
 #include <asm/io.h>
 #include <asm/mmu_context.h>
@@ -51,8 +54,11 @@ struct spu_prio_array {
 	spinlock_t runq_lock;
 	struct list_head active_list[MAX_NUMNODES];
 	struct mutex active_mutex[MAX_NUMNODES];
+	int nr_active[MAX_NUMNODES];
+	int nr_waiting;
 };
 
+static unsigned long spu_avenrun[3];
 static struct spu_prio_array *spu_prio;
 static struct task_struct *spusched_task;
 static struct timer_list spusched_timer;
@@ -163,14 +169,18 @@ static int node_allowed(struct spu_conte
  */
 static void spu_add_to_active_list(struct spu *spu)
 {
-	mutex_lock(&spu_prio->active_mutex[spu->node]);
-	list_add_tail(&spu->list, &spu_prio->active_list[spu->node]);
-	mutex_unlock(&spu_prio->active_mutex[spu->node]);
+	int node = spu->node;
+
+	mutex_lock(&spu_prio->active_mutex[node]);
+	spu_prio->nr_active[node]++;
+	list_add_tail(&spu->list, &spu_prio->active_list[node]);
+	mutex_unlock(&spu_prio->active_mutex[node]);
 }
 
 static void __spu_remove_from_active_list(struct spu *spu)
 {
 	list_del_init(&spu->list);
+	spu_prio->nr_active[spu->node]--;
 }
 
 /**
@@ -306,6 +316,7 @@ static void __spu_add_to_rq(struct spu_c
 {
 	int prio = ctx->prio;
 
+	spu_prio->nr_waiting++;
 	list_add_tail(&ctx->rq, &spu_prio->runq[prio]);
 	set_bit(prio, spu_prio->bitmap);
 }
@@ -314,8 +325,10 @@ static void __spu_del_from_rq(struct spu
 {
 	int prio = ctx->prio;
 
-	if (!list_empty(&ctx->rq))
+	if (!list_empty(&ctx->rq)) {
 		list_del_init(&ctx->rq);
+		spu_prio->nr_waiting--;
+	}
 	if (list_empty(&spu_prio->runq[prio]))
 		clear_bit(prio, spu_prio->bitmap);
 }
@@ -586,10 +599,56 @@ static void spusched_tick(struct spu_con
 	}
 }
 
+/**
+ * count_active_contexts - count nr of active tasks
+ *
+ * Return the number of tasks currently running or waiting to run.
+ *
+ * Note that we don't take runq_lock / active_mutex here.  Reading
+ * a single 32bit value is atomic on powerpc, and we don't care
+ * about memory ordering issues here.
+ */
+static unsigned long count_active_contexts(void)
+{
+	int nr_active = 0, node;
+
+	for (node = 0; node < MAX_NUMNODES; node++)
+		nr_active += spu_prio->nr_active[node];
+	nr_active += spu_prio->nr_waiting;
+
+	return nr_active;
+}
+
+/**
+ * spu_calc_load - given tick count, update the avenrun load estimates.
+ * @tick:	tick count
+ *
+ * No locking against reading these values from userspace, as for
+ * the CPU loadavg code.
+ */
+static void spu_calc_load(unsigned long ticks)
+{
+	unsigned long active_tasks; /* fixed-point */
+	static int count = LOAD_FREQ;
+
+	count -= ticks;
+
+	if (unlikely(count < 0)) {
+		active_tasks = count_active_contexts() * FIXED_1;
+		do {
+			CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
+			CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
+			CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
+			count += LOAD_FREQ;
+		} while (count < 0);
+	}
+}
+
 static void spusched_wake(unsigned long data)
 {
 	mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
 	wake_up_process(spusched_task);
+	spu_calc_load(SPUSCHED_TICK);
 }
 
 static int spusched_thread(void *unused)
@@ -616,13 +675,52 @@ static int spusched_thread(void *unused)
 	return 0;
 }
 
+#define LOAD_INT(x) ((x) >> FSHIFT)
+#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
+
+static int show_spu_loadavg(struct seq_file *s, void *private)
+{
+	int a, b, c;
+
+	a = spu_avenrun[0] + (FIXED_1/200);
+	b = spu_avenrun[1] + (FIXED_1/200);
+	c = spu_avenrun[2] + (FIXED_1/200);
+
+	/*
+	 * Note that last_pid doesn't really make much sense for the
+	 * SPU loadavg (it even seems very odd on the CPU side..),
+	 * but we include it here to have a 100% compatible interface.
+	 */
+	seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
+		LOAD_INT(a), LOAD_FRAC(a),
+		LOAD_INT(b), LOAD_FRAC(b),
+		LOAD_INT(c), LOAD_FRAC(c),
+		count_active_contexts(),
+		atomic_read(&nr_spu_contexts),
+		current->nsproxy->pid_ns->last_pid);
+	return 0;
+}
+
+static int spu_loadavg_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, show_spu_loadavg, NULL);
+}
+
+static const struct file_operations spu_loadavg_fops = {
+	.open		= spu_loadavg_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
 int __init spu_sched_init(void)
 {
-	int i;
+	struct proc_dir_entry *entry;
+	int err = -ENOMEM, i;
 
 	spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
 	if (!spu_prio)
-		return -ENOMEM;
+		goto out;
 
 	for (i = 0; i < MAX_PRIO; i++) {
 		INIT_LIST_HEAD(&spu_prio->runq[i]);
@@ -637,11 +735,22 @@ int __init spu_sched_init(void)
 
 	spusched_task = kthread_run(spusched_thread, NULL, "spusched");
 	if (IS_ERR(spusched_task)) {
-		kfree(spu_prio);
-		return PTR_ERR(spusched_task);
+		err = PTR_ERR(spusched_task);
+		goto out_free_spu_prio;
 	}
+
+	entry = create_proc_entry("spu_loadavg", 0, NULL);
+	if (!entry)
+		goto out_stop_kthread;
+	entry->proc_fops = &spu_loadavg_fops;
 	return 0;
 
+ out_stop_kthread:
+	kthread_stop(spusched_task);
+ out_free_spu_prio:
+	kfree(spu_prio);
+ out:
+	return err;
 }
 
 void __exit spu_sched_exit(void)
@@ -649,6 +758,8 @@ void __exit spu_sched_exit(void)
 	struct spu *spu, *tmp;
 	int node;
 
+	remove_proc_entry("spu_loadavg", NULL);
+
 	kthread_stop(spusched_task);
 
 	for (node = 0; node < MAX_NUMNODES; node++) {
Index: linux-2.6/arch/powerpc/platforms/cell/spufs/spufs.h
===================================================================
--- linux-2.6.orig/arch/powerpc/platforms/cell/spufs/spufs.h	2007-04-15 19:49:49.000000000 +0200
+++ linux-2.6/arch/powerpc/platforms/cell/spufs/spufs.h	2007-04-15 19:50:07.000000000 +0200
@@ -198,6 +198,7 @@ int spufs_handle_class1(struct spu_conte
 struct spu *affinity_check(struct spu_context *ctx);
 
 /* context management */
+extern atomic_t nr_spu_contexts;
 static inline void spu_acquire(struct spu_context *ctx)
 {
 	mutex_lock(&ctx->state_mutex);



More information about the cbe-oss-dev mailing list