[Cbe-oss-dev] [PATCH 6/6] spufs: remove run mutex
Luke Browning
lukebr at linux.vnet.ibm.com
Thu Feb 7 04:43:44 EST 2008
Remove context specific run_mutex. It does not protect any context specific
state. It is used to ensure that the spu_run system call is not invoked
multiple times for the same context by two or more threads concurrently.
The ctx->tid field can be used for the same purpose without having to
add extra unnecessary locking.
Signed-off-by: Luke Browning <lukebrowning at us.ibm.com>
---
Index: spufs/arch/powerpc/platforms/cell/spufs/context.c
===================================================================
--- spufs.orig/arch/powerpc/platforms/cell/spufs/context.c 2008-02-06 01:08:05.000000000 -0200
+++ spufs/arch/powerpc/platforms/cell/spufs/context.c 2008-02-06 13:29:34.000000000 -0200
@@ -47,7 +47,6 @@
mutex_init(&ctx->mapping_lock);
kref_init(&ctx->kref);
mutex_init(&ctx->state_mutex);
- mutex_init(&ctx->run_mutex);
init_waitqueue_head(&ctx->ibox_wq);
init_waitqueue_head(&ctx->wbox_wq);
init_waitqueue_head(&ctx->stop_wq);
@@ -61,7 +60,7 @@
if (gang)
spu_gang_add_ctx(gang, ctx);
- __spu_update_sched_info(ctx);
+ (void) __spu_update_sched_info(ctx);
spu_set_timeslice(ctx);
ctx->stats.util_state = SPU_UTIL_IDLE_LOADED;
Index: spufs/arch/powerpc/platforms/cell/spufs/run.c
===================================================================
--- spufs.orig/arch/powerpc/platforms/cell/spufs/run.c 2008-02-06 12:51:26.000000000 -0200
+++ spufs/arch/powerpc/platforms/cell/spufs/run.c 2008-02-06 13:18:51.000000000 -0200
@@ -162,6 +162,7 @@
unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
int ret;
+ spu_enable_spu(ctx);
spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
/*
@@ -228,6 +229,8 @@
{
int ret = 0;
+ spu_disable_spu(ctx);
+
spu_del_from_rq(ctx);
*status = ctx->ops->status_read(ctx);
@@ -337,17 +340,17 @@
struct spu *spu;
u32 status;
- if (mutex_lock_interruptible(&ctx->run_mutex))
+ ret = spu_acquire(ctx);
+ if (ret)
return -ERESTARTSYS;
- spu_enable_spu(ctx);
ctx->event_return = 0;
- ret = spu_acquire(ctx);
- if (ret)
- goto out_unlock;
-
- spu_update_sched_info(ctx);
+ ret = spu_update_sched_info(ctx);
+ if (ret) {
+ spu_release(ctx);
+ goto out;
+ }
ret = spu_run_init(ctx, npc);
if (ret) {
@@ -398,7 +401,6 @@
SPU_STATUS_STOPPED_BY_HALT |
SPU_STATUS_SINGLE_STEP)));
- spu_disable_spu(ctx);
ret = spu_run_fini(ctx, npc, &status);
if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
@@ -425,7 +427,6 @@
out:
*event = ctx->event_return;
-out_unlock:
- mutex_unlock(&ctx->run_mutex);
+
return ret;
}
Index: spufs/arch/powerpc/platforms/cell/spufs/spufs.h
===================================================================
--- spufs.orig/arch/powerpc/platforms/cell/spufs/spufs.h 2008-02-06 12:48:22.000000000 -0200
+++ spufs/arch/powerpc/platforms/cell/spufs/spufs.h 2008-02-06 13:28:36.000000000 -0200
@@ -62,7 +62,6 @@
enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state;
struct mutex state_mutex;
- struct mutex run_mutex;
struct mm_struct *owner;
@@ -257,8 +256,8 @@
void spu_yield(struct spu_context *ctx);
void spu_switch_notify(struct spu *spu, struct spu_context *ctx);
void spu_set_timeslice(struct spu_context *ctx);
-void spu_update_sched_info(struct spu_context *ctx);
-void __spu_update_sched_info(struct spu_context *ctx);
+int spu_update_sched_info(struct spu_context *ctx);
+int __spu_update_sched_info(struct spu_context *ctx);
int __init spu_sched_init(void);
void spu_sched_exit(void);
Index: spufs/arch/powerpc/platforms/cell/spufs/sched.c
===================================================================
--- spufs.orig/arch/powerpc/platforms/cell/spufs/sched.c 2008-02-06 13:16:22.000000000 -0200
+++ spufs/arch/powerpc/platforms/cell/spufs/sched.c 2008-02-06 13:26:59.000000000 -0200
@@ -102,7 +102,7 @@
/*
* Update scheduling information from the owning thread.
*/
-void __spu_update_sched_info(struct spu_context *ctx)
+int __spu_update_sched_info(struct spu_context *ctx)
{
/*
* assert that the context is not on the runqueue, so it is safe
@@ -114,7 +114,11 @@
* 32-Bit assignments are atomic on powerpc, and we don't care about
* memory ordering here because retrieving the controlling thread is
* per definition racy.
+ *
+ * Can't change controlling threads.
*/
+ if (ctx->tid && ctx->tid != current->pid)
+ return -EPERM;
ctx->tid = current->pid;
/*
@@ -138,11 +142,14 @@
* if it is timesliced or preempted.
*/
ctx->cpus_allowed = current->cpus_allowed;
+
+ return 0;
}
-void spu_update_sched_info(struct spu_context *ctx)
+int spu_update_sched_info(struct spu_context *ctx)
{
int node;
+ int ret;
if (ctx->state == SPU_STATE_RUNNABLE) {
node = ctx->spu->node;
@@ -151,11 +158,13 @@
* Take list_mutex to sync with find_victim().
*/
mutex_lock(&cbe_spu_info[node].list_mutex);
- __spu_update_sched_info(ctx);
+ ret = __spu_update_sched_info(ctx);
mutex_unlock(&cbe_spu_info[node].list_mutex);
} else {
- __spu_update_sched_info(ctx);
+ ret = __spu_update_sched_info(ctx);
}
+
+ return ret;
}
static int __node_allowed(struct spu_context *ctx, int node)
More information about the cbe-oss-dev
mailing list