[Cbe-oss-dev] (no subject)

adetsch at br.ibm.com adetsch at br.ibm.com
Thu Dec 11 06:03:32 EST 2008


>From b61550e4a6de4d2cf164740183e39b70d51f4466 Mon Sep 17 00:00:00 2001
In-Reply-To: <200812101654.05091.adetsch at br.ibm.com>
References: <200812101654.05091.adetsch at br.ibm.com>
From: Andre Detsch <adetsch at br.ibm.com>
Date: Wed, 10 Dec 2008 16:03:33 -0300
Subject: [PATCH 11/18] powerpc/spufs: Implement SPU affinity on top of gang scheduling
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Content-Disposition: inline
Message-Id: <200812101703.33496.adetsch at br.ibm.com>

SPU affinity, originally implemented before we had gang
scheduling, was disabled after gang scheduling was
introduced.

This patch re-enables SPU affinity, making it fit the new scheduling
algorithm.

Signed-off-by: Andre Detsch <adetsch at br.ibm.com>
---
 arch/powerpc/platforms/cell/spufs/sched.c |  185 
++++++++++++++---------------
 arch/powerpc/platforms/cell/spufs/spufs.h |    1 -
 2 files changed, 88 insertions(+), 98 deletions(-)

diff --git a/arch/powerpc/platforms/cell/spufs/sched.c 
b/arch/powerpc/platforms/cell/spufs/sched.c
index 3bc0308..5289cdb 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -349,16 +349,58 @@ static void aff_set_offsets(struct spu_gang *gang)
 	gang->aff_flags |= AFF_OFFSETS_SET;
 }
 
-static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
-		 int group_size, int lowest_offset)
+
+static inline int is_spu_available(struct spu *spu, struct spu_gang *gang)
+{
+	return spu->alloc_state == SPU_FREE && (!spu->gang || spu->gang == gang);
+}
+
+static struct spu *ctx_location(struct spu *ref, int offset,
+				struct spu_gang *gang, int reserve)
+{
+	struct spu *spu;
+
+	if (!offset)
+		return ref;
+
+	if (offset > 0) {
+		offset--;
+		list_for_each_entry(spu, &ref->aff_list, aff_list) {
+			if ((reserve && is_spu_available(spu, gang)) ||
+			    (!reserve && spu->gang == gang))
+				if (!offset--)
+					return spu;
+		}
+	} else {
+		offset++;
+		list_for_each_entry_reverse(spu, &ref->aff_list, aff_list) {
+			if ((reserve && is_spu_available(spu, gang)) ||
+			    (!reserve && spu->gang == gang))
+				if (!offset++)
+					return spu;
+		}
+	}
+
+	return NULL;
+}
+
+static int aff_place_gang(struct spu_gang *gang)
 {
 	struct spu *spu;
 	int node, n;
+	struct spu_context *ctx;
+	int mem_aff;
+
+
+	BUG_ON(list_empty(&gang->aff_list_head));
+
+	if (!(gang->aff_flags & AFF_MERGED))
+		aff_merge_remaining_ctxs(gang);
+	if (!(gang->aff_flags & AFF_OFFSETS_SET))
+		aff_set_offsets(gang);
+
+	mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
 
-	/*
-	 * TODO: A better algorithm could be used to find a good spu to be
-	 *       used as reference location for the ctxs chain.
-	 */
 	node = cpu_to_node(raw_smp_processor_id());
 	for (n = 0; n < MAX_NUMNODES; n++, node++) {
 		/*
@@ -373,109 +415,51 @@ static struct spu *aff_ref_location(struct spu_context 
*ctx, int mem_aff,
 		int available_spus;
 
 		node = (node < MAX_NUMNODES) ? node : 0;
-		if (!node_allowed(ctx->gang, node))
+		if (!node_allowed(gang, node))
 			continue;
 
 		available_spus = 0;
 		mutex_lock(&cbe_spu_info[node].list_mutex);
 		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
-			if (spu->ctx && spu->ctx->gang && !spu->ctx->aff_offset
-					&& spu->ctx->gang->aff_ref_spu)
-				available_spus -= spu->ctx->gang->contexts;
-			available_spus++;
+			if (is_spu_available(spu, gang))
+				available_spus++;
 		}
-		if (available_spus < ctx->gang->contexts) {
+		if (available_spus < gang->contexts) {
 			mutex_unlock(&cbe_spu_info[node].list_mutex);
 			continue;
 		}
 
 		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
 			if ((!mem_aff || spu->has_mem_affinity) &&
-							sched_spu(spu)) {
-				mutex_unlock(&cbe_spu_info[node].list_mutex);
-				return spu;
-			}
+						   is_spu_available(spu, gang))
+				gang->aff_ref_spu = spu;
 		}
-		mutex_unlock(&cbe_spu_info[node].list_mutex);
-	}
-	return NULL;
-}
 
-static void aff_set_ref_point_location(struct spu_gang *gang)
-{
-	int mem_aff, gs, lowest_offset;
-	struct spu_context *ctx;
-	struct spu *tmp;
-
-	mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
-	lowest_offset = 0;
-	gs = 0;
+		if (!gang->aff_ref_spu) {
+			list_for_each_entry(spu, &cbe_spu_info[node].spus,
+								cbe_list) {
+				if (is_spu_available(spu, gang))
+					gang->aff_ref_spu = spu;
+			}
+		}
 
-	list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
-		gs++;
+		list_for_each_entry(ctx, &gang->list, gang_list) {
+			spu = ctx_location(gang->aff_ref_spu, ctx->aff_offset,
+								gang, 1);
+			BUG_ON(!spu);
+			spu->gang = gang;
+		}
 
-	list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
-								aff_list) {
-		if (&ctx->aff_list == &gang->aff_list_head)
-			break;
-		lowest_offset = ctx->aff_offset;
+		mutex_unlock(&cbe_spu_info[node].list_mutex);
+		return 1;
 	}
-
-	gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs,
-							lowest_offset);
+	return 0;
 }
 
-static struct spu *ctx_location(struct spu *ref, int offset, int node)
-{
-	struct spu *spu;
-
-	spu = NULL;
-	if (offset >= 0) {
-		list_for_each_entry(spu, ref->aff_list.prev, aff_list) {
-			BUG_ON(spu->node != node);
-			if (offset == 0)
-				break;
-			if (sched_spu(spu))
-				offset--;
-		}
-	} else {
-		list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) {
-			BUG_ON(spu->node != node);
-			if (offset == 0)
-				break;
-			if (sched_spu(spu))
-				offset++;
-		}
-	}
-
-	return spu;
-}
 
-/*
- * affinity_check is called each time a context is going to be scheduled.
- * It returns the spu ptr on which the context must run.
- */
 static int has_affinity(struct spu_gang *gang)
 {
-	if (list_empty(&gang->aff_list_head))
-		return 0;
-
-	/*
-	 * TODO: fix SPU Affinity to work with gang scheduling.
-	 */
-
-	if (atomic_read(&gang->aff_sched_count) == 0)
-		gang->aff_ref_spu = NULL;
-
-	if (!gang->aff_ref_spu) {
-		if (!(gang->aff_flags & AFF_MERGED))
-			aff_merge_remaining_ctxs(gang);
-		if (!(gang->aff_flags & AFF_OFFSETS_SET))
-			aff_set_offsets(gang);
-		aff_set_ref_point_location(gang);
-	}
-
-	return gang->aff_ref_spu != NULL;
+	return !list_empty(&gang->aff_list_head);
 }
 
 /**
@@ -491,17 +475,9 @@ static void spu_unbind_context(struct spu *spu, struct 
spu_context *ctx)
 
 	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
 
- 	if (spu->ctx->flags & SPU_CREATE_NOSCHED)
+	if (spu->ctx->flags & SPU_CREATE_NOSCHED)
 		atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
 
-	if (ctx->gang)
-		/*
-		 * If ctx->gang->aff_sched_count is positive, SPU affinity is
-		 * being considered in this gang. Using atomic_dec_if_positive
-		 * allow us to skip an explicit check for affinity in this gang
-		 */
-		atomic_dec_if_positive(&ctx->gang->aff_sched_count);
-
 	spu_switch_notify(spu, NULL);
 	spu_unmap_mappings(ctx);
 	spu_save(&ctx->csa, spu);
@@ -591,6 +567,16 @@ static struct spu *spu_bind(struct spu_gang *gang,
 	if (node == SPU_PLACE_ALL)
 		node = cpu_to_node(raw_smp_processor_id());
 
+	if (gang->aff_ref_spu) {
+		mutex_lock(&cbe_spu_info[gang->aff_ref_spu->node].list_mutex);
+		spu = ctx_location(gang->aff_ref_spu, ctx->aff_offset, gang, 0);
+		BUG_ON(!spu);
+		BUG_ON(spu->alloc_state != SPU_FREE);
+		BUG_ON(spu->gang != gang);
+		mutex_unlock(&cbe_spu_info[gang->aff_ref_spu->node].list_mutex);
+		goto found;
+	}
+
 	for (n = 0; n < MAX_NUMNODES; n++, node++) {
 		node = (node < MAX_NUMNODES) ? node : 0;
 		if (!node_allowed(gang, node))
@@ -660,6 +646,7 @@ static void spu_unschedule(struct spu_gang *gang)
 		mutex_unlock(&cbe_spu_info[node].list_mutex);
 		mutex_unlock(&ctx->state_mutex);
 	}
+	gang->aff_ref_spu = NULL;
 }
 
 static int spu_get_idle(struct spu_gang *gang, int node)
@@ -670,7 +657,10 @@ static int spu_get_idle(struct spu_gang *gang, int node)
 
 	spu_context_nospu_trace(spu_get_idle__enter, gang);
 
-	/* TO DO: SPU affinity scheduling. */
+	if (has_affinity(gang) && (!gang->aff_ref_spu)) {
+		if (aff_place_gang(gang))
+			goto spu_get_idle_out;
+	}
 
 	mode = SPU_RESERVE;
 
@@ -690,6 +680,7 @@ spu_get_idle_top:
 			continue;
 
 		mutex_lock(&cbe_spu_info[lnode].list_mutex);
+
 		list_for_each_entry(spu, &cbe_spu_info[lnode].spus, cbe_list) {
 			switch (mode) {
 			case SPU_RESERVE :
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h 
b/arch/powerpc/platforms/cell/spufs/spufs.h
index bbbfc6a..468ee37 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -177,7 +177,6 @@ struct spu_gang {
 	struct mutex aff_mutex;
 	int aff_flags;
 	struct spu *aff_ref_spu;
-	atomic_t aff_sched_count;
 
 	/* spu scheduler statistics for zombie ctxts */
 	struct {
-- 
1.5.4.3




More information about the cbe-oss-dev mailing list