[Cbe-oss-dev] [PATCH 11/11] powerpc/spufs: Implement SPU affinity on top of gang scheduling
Andre Detsch
adetsch at br.ibm.com
Fri Sep 12 09:38:27 EST 2008
SPU affinity, originally implemented before we had gang
scheduling, was disabled after gang scheduling was
introduced.
This patch re-enables SPU affinity, making it fit the new scheduling
algorithm.
Signed-off-by: Andre Detsch <adetsch at br.ibm.com>
---
arch/powerpc/platforms/cell/spufs/sched.c | 60
+++++++++++++++++------------
arch/powerpc/platforms/cell/spufs/spufs.h | 1 -
2 files changed, 35 insertions(+), 26 deletions(-)
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c
b/arch/powerpc/platforms/cell/spufs/sched.c
index 8326034..c34e53f 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -443,31 +443,20 @@ static struct spu *ctx_location(struct spu *ref, int
offset, int node)
return spu;
}
-/*
- * affinity_check is called each time a context is going to be scheduled.
- * It returns the spu ptr on which the context must run.
- */
-static int has_affinity(struct spu_gang *gang)
+static void set_affinity(struct spu_gang *gang)
{
- if (list_empty(&gang->aff_list_head))
- return 0;
-
- /*
- * TODO: fix SPU Affinity to work with gang scheduling.
- */
-
- if (atomic_read(&gang->aff_sched_count) == 0)
- gang->aff_ref_spu = NULL;
+ BUG_ON(list_empty(&gang->aff_list_head));
- if (!gang->aff_ref_spu) {
- if (!(gang->aff_flags & AFF_MERGED))
- aff_merge_remaining_ctxs(gang);
- if (!(gang->aff_flags & AFF_OFFSETS_SET))
- aff_set_offsets(gang);
- aff_set_ref_point_location(gang);
- }
+ if (!(gang->aff_flags & AFF_MERGED))
+ aff_merge_remaining_ctxs(gang);
+ if (!(gang->aff_flags & AFF_OFFSETS_SET))
+ aff_set_offsets(gang);
+ aff_set_ref_point_location(gang);
+}
- return gang->aff_ref_spu != NULL;
+static int has_affinity(struct spu_gang *gang)
+{
+ return !list_empty(&gang->aff_list_head);
}
/**
@@ -486,9 +475,6 @@ static void spu_unbind_context(struct spu *spu, struct
spu_context *ctx)
if (spu->ctx->flags & SPU_CREATE_NOSCHED)
atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
- if (ctx->gang)
- atomic_dec_if_positive(&ctx->gang->aff_sched_count);
-
spu_switch_notify(spu, NULL);
spu_unmap_mappings(ctx);
spu_save(&ctx->csa, spu);
@@ -582,6 +568,15 @@ static struct spu *spu_bind(struct spu_gang *gang,
if (!node_allowed(gang, node))
continue;
+ if (has_affinity(gang)) {
+ spin_lock(&cbe_spu_info[node].list_lock);
+ spu = ctx_location(gang->aff_ref_spu, ctx->aff_offset,
+ node);
+ if (spu && spu->alloc_state == SPU_FREE)
+ goto found;
+ spin_unlock(&cbe_spu_info[node].list_lock);
+ }
+
spin_lock(&cbe_spu_info[node].list_lock);
list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
if ((spu->alloc_state == SPU_FREE) &&
@@ -608,6 +603,9 @@ static void __spu_schedule(struct spu_gang *gang, int
node_chosen)
spu_del_from_rq(gang);
+ if (has_affinity(gang))
+ set_affinity(gang);
+
list_for_each_entry(ctx, &gang->list, gang_list) {
mutex_lock(&ctx->state_mutex);
BUG_ON(ctx->spu);
@@ -657,6 +655,18 @@ static int spu_get_idle(struct spu_gang *gang, int node)
spu_context_nospu_trace(spu_get_idle__enter, gang);
/* TO DO: SPU affinity scheduling. */
+#if 0
+ if (has_affinity(gang)) {
+ aff_ref_spu = ctx->gang->aff_ref_spu;
+ node = aff_ref_spu->node;
+
+ mutex_lock(&cbe_spu_info[node].list_mutex);
+ spu = ctx_location(aff_ref_spu, ctx->aff_offset, node);
+ if (spu && spu->alloc_state == SPU_FREE)
+ goto found;
+ mutex_unlock(&cbe_spu_info[node].list_mutex);
+ }
+#endif
mode = SPU_RESERVE;
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h
b/arch/powerpc/platforms/cell/spufs/spufs.h
index 6afc514..907baf9 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -178,7 +178,6 @@ struct spu_gang {
struct mutex aff_mutex;
int aff_flags;
struct spu *aff_ref_spu;
- atomic_t aff_sched_count;
/* spu scheduler statistics for zombie ctxts */
struct {
--
1.5.4.1
More information about the cbe-oss-dev
mailing list