[Cbe-oss-dev] [PATCH] spufs: fix race condition on gang->aff_ref_spu

Andre Detsch adetsch at br.ibm.com
Thu Aug 16 06:50:03 EST 2007


Subject: spufs: fix race condition on gang->aff_ref_spu

From: Andre Detsch <adetsch at br.ibm.com>

Affinity reference point location (gang->aff_ref_spu) is reset
when the whole gang is descheduled. However, the last member of
a gang can be descheduled while we are trying to schedule another
member of the gang. This was leading to a race condition, and
the code was using gang->aff_ref_spu in an unsafe manner.

By holding the gang->aff_mutex a little bit longer, and increment
gang->aff_sched_count (which controls when gang->aff_ref_spu
should be reset) a little bit earlier, the problem is fixed.

Signed-off-by: Andre Detsch <adetsch at br.ibm.com>

Index: linux-2.6.22/arch/powerpc/platforms/cell/spufs/sched.c
===================================================================
--- linux-2.6.22.orig/arch/powerpc/platforms/cell/spufs/sched.c
+++ linux-2.6.22/arch/powerpc/platforms/cell/spufs/sched.c
@@ -348,7 +348,6 @@ static int has_affinity(struct spu_conte
 	if (list_empty(&ctx->aff_list))
 		return 0;
 
-	mutex_lock(&gang->aff_mutex);
 	if (!gang->aff_ref_spu) {
 		if (!(gang->aff_flags & AFF_MERGED))
 			aff_merge_remaining_ctxs(gang);
@@ -356,7 +355,6 @@ static int has_affinity(struct spu_conte
 			aff_set_offsets(gang);
 		aff_set_ref_point_location(gang);
 	}
-	mutex_unlock(&gang->aff_mutex);
 
 	return gang->aff_ref_spu != NULL;
 }
@@ -375,8 +373,6 @@ static void spu_bind_context(struct spu 
 
 	if (ctx->flags & SPU_CREATE_NOSCHED)
 		atomic_inc(&cbe_spu_info[spu->node].reserved_spus);
-	if (!list_empty(&ctx->aff_list))
-		atomic_inc(&ctx->gang->aff_sched_count);
 
 	ctx->stats.slb_flt_base = spu->stats.slb_flt;
 	ctx->stats.class2_intr_base = spu->stats.class2_intr;
@@ -418,9 +414,16 @@ static void spu_unbind_context(struct sp
 
  	if (spu->ctx->flags & SPU_CREATE_NOSCHED)
 		atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
- 	if (!list_empty(&ctx->aff_list))
- 		if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
- 			ctx->gang->aff_ref_spu = NULL;
+
+	if (ctx->gang){
+		mutex_lock(&ctx->gang->aff_mutex);
+		if (has_affinity(ctx)) {
+			if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
+				ctx->gang->aff_ref_spu = NULL;
+		}
+		mutex_unlock(&ctx->gang->aff_mutex);
+	}
+
 	spu_switch_notify(spu, NULL);
 	spu_unmap_mappings(ctx);
 	spu_save(&ctx->csa, spu);
@@ -511,20 +514,32 @@ static void spu_prio_wait(struct spu_con
 
 static struct spu *spu_get_idle(struct spu_context *ctx)
 {
-	struct spu *spu;
+	struct spu *spu, *aff_ref_spu;
 	int node, n;
 
-	if (has_affinity(ctx)) {
-		node = ctx->gang->aff_ref_spu->node;
+	if (ctx->gang) {
+		mutex_lock(&ctx->gang->aff_mutex);
+		if (has_affinity(ctx)) {
+			aff_ref_spu = ctx->gang->aff_ref_spu;
+			atomic_inc(&ctx->gang->aff_sched_count);
+			mutex_unlock(&ctx->gang->aff_mutex);
+			node = aff_ref_spu->node;
 
-		mutex_lock(&cbe_spu_info[node].list_mutex);
-		spu = ctx_location(ctx->gang->aff_ref_spu, ctx->aff_offset, node);
-		if (spu && spu->alloc_state == SPU_FREE)
-			goto found;
-		mutex_unlock(&cbe_spu_info[node].list_mutex);
-		return NULL;
-	}
+			mutex_lock(&cbe_spu_info[node].list_mutex);
+			spu = ctx_location(aff_ref_spu, ctx->aff_offset, node);
+			if (spu && spu->alloc_state == SPU_FREE)
+				goto found;
+			mutex_unlock(&cbe_spu_info[node].list_mutex);
 
+			mutex_lock(&ctx->gang->aff_mutex);
+			if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
+				ctx->gang->aff_ref_spu = NULL;
+			mutex_unlock(&ctx->gang->aff_mutex);
+
+			return NULL;
+		}
+		mutex_unlock(&ctx->gang->aff_mutex);
+	}
 	node = cpu_to_node(raw_smp_processor_id());
 	for (n = 0; n < MAX_NUMNODES; n++, node++) {
 		node = (node < MAX_NUMNODES) ? node : 0;



More information about the cbe-oss-dev mailing list