[Cbe-oss-dev] [PATCH 11/11 v2] powerpc/spufs: Implement SPU affinity on top of gang scheduling

Andre Detsch adetsch at br.ibm.com
Fri Sep 12 22:22:02 EST 2008


SPU affinity, originally implemented before we had gang
scheduling, was disabled after gang scheduling was
introduced.

This patch re-enables SPU affinity, making it fit the new scheduling
algorithm.

Signed-off-by: Andre Detsch <adetsch at br.ibm.com>
---
 arch/powerpc/platforms/cell/spufs/sched.c |  169 
+++++++++++++----------------
 arch/powerpc/platforms/cell/spufs/spufs.h |    1 -
 2 files changed, 78 insertions(+), 92 deletions(-)

diff --git a/arch/powerpc/platforms/cell/spufs/sched.c 
b/arch/powerpc/platforms/cell/spufs/sched.c
index 8326034..d4ef6e3 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -348,126 +348,97 @@ static void aff_set_offsets(struct spu_gang *gang)
 	gang->aff_flags |= AFF_OFFSETS_SET;
 }
 
-static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
-		 int group_size, int lowest_offset)
+static struct spu *ctx_location(struct spu *ref, int offset,
+				struct spu_gang *gang)
+{
+	struct spu *spu;
+
+	spu = NULL;
+	if (offset >= 0) {
+		list_for_each_entry(spu, ref->aff_list.prev, aff_list) {
+			if ((!gang && spu->alloc_state == SPU_FREE) ||
+							spu->gang == gang) {
+				if (offset == 0)
+					break;
+				offset--;
+			}
+		}
+	} else {
+		list_for_each_entry_reverse(spu, ref->aff_list.next,
+					    aff_list) {
+			if ((!gang && spu->alloc_state == SPU_FREE) ||
+							spu->gang == gang) {
+				if (offset == 0)
+					break;
+				offset++;
+			}
+		}
+	}
+
+	return spu;
+}
+
+static int aff_place_gang(struct spu_gang *gang)
 {
 	struct spu *spu;
 	int node, n;
+	struct spu_context *ctx;
+	int mem_aff;
+
+
+	BUG_ON(list_empty(&gang->aff_list_head));
+
+	if (!(gang->aff_flags & AFF_MERGED))
+		aff_merge_remaining_ctxs(gang);
+	if (!(gang->aff_flags & AFF_OFFSETS_SET))
+		aff_set_offsets(gang);
+
+	mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
 
-	/*
-	 * TODO: A better algorithm could be used to find a good spu to be
-	 *       used as reference location for the ctxs chain.
-	 */
 	node = cpu_to_node(raw_smp_processor_id());
 	for (n = 0; n < MAX_NUMNODES; n++, node++) {
 		int available_spus;
 
 		node = (node < MAX_NUMNODES) ? node : 0;
-		if (!node_allowed(ctx->gang, node))
+		if (!node_allowed(gang, node))
 			continue;
 
 		available_spus = 0;
 		spin_lock(&cbe_spu_info[node].list_lock);
 		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
-			if (spu->ctx && spu->ctx->gang
-					&& spu->ctx->aff_offset == 0)
-				available_spus -=
-					(spu->ctx->gang->contexts - 1);
-			else
+			if (spu->alloc_state == SPU_FREE)
 				available_spus++;
 		}
-		if (available_spus < ctx->gang->contexts) {
+		if (available_spus < gang->contexts) {
 			spin_unlock(&cbe_spu_info[node].list_lock);
 			continue;
 		}
 
 		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
 			if ((!mem_aff || spu->has_mem_affinity) &&
-							sched_spu(spu)) {
+						spu->alloc_state == SPU_FREE) {
+				gang->aff_ref_spu = spu;
 				spin_unlock(&cbe_spu_info[node].list_lock);
-				return spu;
 			}
 		}
-		spin_unlock(&cbe_spu_info[node].list_lock);
-	}
-	return NULL;
-}
 
-static void aff_set_ref_point_location(struct spu_gang *gang)
-{
-	int mem_aff, gs, lowest_offset;
-	struct spu_context *ctx;
-	struct spu *tmp;
-
-	mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
-	lowest_offset = 0;
-	gs = 0;
-
-	list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
-		gs++;
+		list_for_each_entry(ctx, &gang->list, gang_list) {
+			spu = ctx_location(gang->aff_ref_spu, ctx->aff_offset,
+					   NULL);
+			BUG_ON(!spu);
+			spu->gang = gang;
+		}
 
-	list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
-								aff_list) {
-		if (&ctx->aff_list == &gang->aff_list_head)
-			break;
-		lowest_offset = ctx->aff_offset;
+		spin_unlock(&cbe_spu_info[node].list_lock);
+		return 1;
 	}
-
-	gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs,
-							lowest_offset);
+	return 0;
 }
 
-static struct spu *ctx_location(struct spu *ref, int offset, int node)
-{
-	struct spu *spu;
-
-	spu = NULL;
-	if (offset >= 0) {
-		list_for_each_entry(spu, ref->aff_list.prev, aff_list) {
-			BUG_ON(spu->node != node);
-			if (offset == 0)
-				break;
-			if (sched_spu(spu))
-				offset--;
-		}
-	} else {
-		list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) {
-			BUG_ON(spu->node != node);
-			if (offset == 0)
-				break;
-			if (sched_spu(spu))
-				offset++;
-		}
-	}
-
-	return spu;
-}
 
-/*
- * affinity_check is called each time a context is going to be scheduled.
- * It returns the spu ptr on which the context must run.
- */
 static int has_affinity(struct spu_gang *gang)
 {
-	if (list_empty(&gang->aff_list_head))
-		return 0;
-
-	/*
-	 * TODO: fix SPU Affinity to work with gang scheduling.
-	 */
-
-	if (atomic_read(&gang->aff_sched_count) == 0)
-		gang->aff_ref_spu = NULL;
-
-	if (!gang->aff_ref_spu) {
-		if (!(gang->aff_flags & AFF_MERGED))
-			aff_merge_remaining_ctxs(gang);
-		if (!(gang->aff_flags & AFF_OFFSETS_SET))
-			aff_set_offsets(gang);
-		aff_set_ref_point_location(gang);
-	}
-
-	return gang->aff_ref_spu != NULL;
+	return !list_empty(&gang->aff_list_head);
 }
 
 /**
@@ -486,9 +457,6 @@ static void spu_unbind_context(struct spu *spu, struct 
spu_context *ctx)
  	if (spu->ctx->flags & SPU_CREATE_NOSCHED)
 		atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
 
-	if (ctx->gang)
-		atomic_dec_if_positive(&ctx->gang->aff_sched_count);
-
 	spu_switch_notify(spu, NULL);
 	spu_unmap_mappings(ctx);
 	spu_save(&ctx->csa, spu);
@@ -582,6 +550,21 @@ static struct spu *spu_bind(struct spu_gang *gang,
 		if (!node_allowed(gang, node))
 			continue;
 
+		if (has_affinity(gang)) {
+			spin_lock(&cbe_spu_info[node].list_lock);
+			spu = ctx_location(gang->aff_ref_spu, ctx->aff_offset,
+					   gang);
+			if (spu && (spu->alloc_state == SPU_FREE) &&
+			    (spu->gang == gang)) {
+				goto found;
+			}
+
+			/* If ctx_location returns an spu, it should be valid */
+			WARN_ON(spu);
+
+			spin_unlock(&cbe_spu_info[node].list_lock);
+		}
+
 		spin_lock(&cbe_spu_info[node].list_lock);
 		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
 			if ((spu->alloc_state == SPU_FREE) &&
@@ -646,6 +629,7 @@ static void spu_unschedule(struct spu_gang *gang)
 		spin_unlock(&cbe_spu_info[node].list_lock);
 		mutex_unlock(&ctx->state_mutex);
 	}
+	gang->aff_ref_spu = NULL;
 }
 
 static int spu_get_idle(struct spu_gang *gang, int node)
@@ -656,7 +640,9 @@ static int spu_get_idle(struct spu_gang *gang, int node)
 
 	spu_context_nospu_trace(spu_get_idle__enter, gang);
 
-	/* TO DO: SPU affinity scheduling. */
+	if (has_affinity(gang) && !gang->aff_ref_spu)
+		ret = aff_place_gang(gang);
+		goto spu_get_idle_out;
 
 	mode = SPU_RESERVE;
 
@@ -676,6 +662,7 @@ spu_get_idle_top:
 			continue;
 
 		spin_lock(&cbe_spu_info[lnode].list_lock);
+
 		list_for_each_entry(spu, &cbe_spu_info[lnode].spus, cbe_list) {
 			switch (mode) {
 			case SPU_RESERVE :
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h 
b/arch/powerpc/platforms/cell/spufs/spufs.h
index 6afc514..907baf9 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -178,7 +178,6 @@ struct spu_gang {
 	struct mutex aff_mutex;
 	int aff_flags;
 	struct spu *aff_ref_spu;
-	atomic_t aff_sched_count;
 
 	/* spu scheduler statistics for zombie ctxts */
 	struct {
-- 
1.5.4.1




More information about the cbe-oss-dev mailing list