[Cbe-oss-dev] [patch 05/18 v2] cell: add placement computation for scheduling of affinity contexts

Andre Detsch adetsch at br.ibm.com
Thu May 3 05:15:40 EST 2007


Subject: cell: add placement computation for scheduling of affinity contexts
From: Andre Detsch <adetsch at br.ibm.com>

This patch provides the spu affinity placement logic for the spufs scheduler.
Each time a gang is going to be scheduled, the placement of a reference
context is defined. The placement of all other contexts with affinity from
the gang is defined based on this reference context location and on a
precomputed displacement offset.

Signed-off-by: Andre Detsch <adetsch at br.ibm.com>
Signed-off-by: Arnd Bergmann <arnd.bergmann at de.ibm.com>
---

Index: linux-2.6.21/arch/powerpc/platforms/cell/spufs/sched.c
===================================================================
--- linux-2.6.21.orig/arch/powerpc/platforms/cell/spufs/sched.c
+++ linux-2.6.21/arch/powerpc/platforms/cell/spufs/sched.c
@@ -184,6 +184,8 @@ static void spu_bind_context(struct spu 
 		 spu->number, spu->node);
 	if (ctx->flags & SPU_CREATE_NOSCHED)
 		atomic_inc(&cbe_spu_info[spu->node].reserved_spus);
+	if (!list_empty(&ctx->aff_list))
+		atomic_inc(&ctx->gang->aff_sched_count);
 	spu->ctx = ctx;
 	spu->flags = 0;
 	ctx->spu = spu;
@@ -217,6 +219,9 @@ static void spu_unbind_context(struct sp
 
 	if (spu->ctx->flags & SPU_CREATE_NOSCHED)
 		atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
+	if (!list_empty(&ctx->aff_list))
+		if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
+			ctx->gang->aff_ref_spu = NULL;
 	spu_remove_from_active_list(spu);
 	spu_switch_notify(spu, NULL);
 	spu_unmap_mappings(ctx);
@@ -520,3 +525,136 @@ void __exit spu_sched_exit(void)
 	kfree(spu_prio);
 	destroy_workqueue(spu_sched_wq);
 }
+
+static void aff_merge_remaining_ctxs(struct spu_gang *gang)
+{
+	struct spu_context *ctx;
+
+	list_for_each_entry(ctx, &gang->aff_list_head, aff_list) {
+		if (list_empty(&ctx->aff_list))
+			list_add(&ctx->aff_list, &gang->aff_list_head);
+	}
+	gang->aff_flags |= AFF_MERGED;
+}
+
+static void aff_set_offsets(struct spu_gang *gang)
+{
+	struct spu_context *ctx;
+	int offset;
+
+	offset = -1;
+	list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
+								aff_list) {
+		if (&ctx->aff_list == &gang->aff_list_head)
+			break;
+		ctx->aff_offset = offset--;
+	}
+
+	offset = 0;
+	list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) {
+		if (&ctx->aff_list == &gang->aff_list_head)
+			break;
+		ctx->aff_offset = offset++;
+	}
+
+	gang->aff_flags |= AFF_OFFSETS_SET;
+}
+
+static inline int spu_is_schedulable(struct spu *spu)
+{
+	return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED));
+}
+
+static struct spu *
+aff_ref_location(int mem_aff, int group_size, int prio, int lowest_offset)
+{
+	struct spu *spu;
+	int node, n;
+
+	/* TODO: A better algorithm could be used to find a good spu to be
+	 *       used as reference location for the ctxs chain.
+	 */
+	node = cpu_to_node(raw_smp_processor_id());
+	for (n = 0; n < MAX_NUMNODES; n++, node++) {
+		node = (node < MAX_NUMNODES) ? node : 0;
+		if (!node_allowed(node))
+			continue;
+		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
+			if ((!mem_aff || spu->has_mem_affinity) &&
+						spu_is_schedulable(spu))
+				return spu;
+		}
+	}
+	return NULL;
+}
+
+static void aff_set_ref_point_location(struct spu_gang *gang)
+{
+	int mem_aff, gs, lowest_offset;
+	struct spu_context *ctx;
+	struct list_head *tmp;
+
+	mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
+	lowest_offset = 0;
+	gs = 0;
+	list_for_each(tmp, &gang->aff_list_head)
+		gs++;
+
+	list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
+								aff_list) {
+		if (&ctx->aff_list == &gang->aff_list_head)
+			break;
+		lowest_offset = ctx->aff_offset;
+	}
+
+	gang->aff_ref_spu = aff_ref_location(mem_aff, gs, ctx->prio,
+							lowest_offset);
+}
+
+static struct spu* ctx_location(struct spu *ref, int offset)
+{
+	struct spu *spu;
+
+	spu = NULL;
+	if (offset >= 0) {
+		list_for_each_entry(spu, ref->aff_list.prev, aff_list) {
+			if (offset == 0)
+				break;
+			if (spu_is_schedulable(spu))
+				offset--;
+		}
+	} else {
+		list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) {
+			if (offset == 0)
+				break;
+			if (spu_is_schedulable(spu))
+				offset++;
+		}
+	}
+	return spu;
+}
+
+/**
+ * affinity_check is called each time a context is going to be scheduled.
+ * It returns the spu ptr on which the context must run.
+ */
+struct spu* affinity_check(struct spu_context *ctx)
+{
+	struct spu_gang *gang;
+
+	if (list_empty(&ctx->aff_list))
+		return NULL;
+	gang = ctx->gang;
+	mutex_lock(&gang->aff_mutex);
+	if (!gang->aff_ref_spu) {
+		if (!(gang->aff_flags & AFF_MERGED))
+			aff_merge_remaining_ctxs(gang);
+		if (!(gang->aff_flags & AFF_OFFSETS_SET))
+			aff_set_offsets(gang);
+		aff_set_ref_point_location(gang);
+	}
+	mutex_unlock(&gang->aff_mutex);
+	if (!gang->aff_ref_spu)
+		return NULL;
+	return ctx_location(gang->aff_ref_spu, ctx->aff_offset);
+}
Index: linux-2.6.21/arch/powerpc/platforms/cell/spufs/spufs.h
===================================================================
--- linux-2.6.21.orig/arch/powerpc/platforms/cell/spufs/spufs.h
+++ linux-2.6.21/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -91,6 +91,7 @@ struct spu_context {
 
 	struct list_head aff_list;
 	int aff_head;
+	int aff_offset;
 };
 
 struct spu_gang {
@@ -103,6 +104,8 @@ struct spu_gang {
 	struct list_head aff_list_head;
 	struct mutex aff_mutex;
 	int aff_flags;
+	struct spu *aff_ref_spu;
+	atomic_t aff_sched_count;
 };
 
 /* Flag bits for spu_gang aff_flags */
@@ -188,6 +191,9 @@ void spu_gang_add_ctx(struct spu_gang *g
 /* fault handling */
 int spufs_handle_class1(struct spu_context *ctx);
 
+/* affinity */
+struct spu *affinity_check(struct spu_context *ctx);
+
 /* context management */
 static inline void spu_acquire(struct spu_context *ctx)
 {
Index: linux-2.6.21/arch/powerpc/platforms/cell/spufs/gang.c
===================================================================
--- linux-2.6.21.orig/arch/powerpc/platforms/cell/spufs/gang.c
+++ linux-2.6.21/arch/powerpc/platforms/cell/spufs/gang.c
@@ -38,6 +38,7 @@ struct spu_gang *alloc_spu_gang(void)
 	mutex_init(&gang->aff_mutex);
 	INIT_LIST_HEAD(&gang->list);
 	INIT_LIST_HEAD(&gang->aff_list_head);
+	gang->aff_sched_count = (atomic_t)ATOMIC_INIT(0);
 
 out:
 	return gang;
@@ -75,8 +76,10 @@ void spu_gang_remove_ctx(struct spu_gang
 {
 	mutex_lock(&gang->mutex);
 	WARN_ON(ctx->gang != gang);
-	if (!list_empty(&ctx->aff_list))
+	if (!list_empty(&ctx->aff_list)) {
 		list_del_init(&ctx->aff_list);
+		gang->aff_flags &= ~AFF_OFFSETS_SET;
+	}
 	list_del_init(&ctx->gang_list);
 	gang->contexts--;
 	mutex_unlock(&gang->mutex);




More information about the cbe-oss-dev mailing list