Drop the spu_context{,_nospu}_trace macros and use trace_mark directly. The macros had few flexibility in the sense that they imposed all markers to receive either a context pointer or a context+spu pointer pair. We must be able to log other information aside from these two types to get really useful traces, and if we used macros, we'd need to add many more to cover all cases. It is better to just use trace_mark by hand so that each marker can carry its own format. Furthermore, it is expected that the kernel will gain markers everywhere at some point, and having macros just in spufs while the other code uses trace_mark will make things confusing. Signed-off-by: Julio M. Merino Vidal Index: linux-2.6.25.y/arch/powerpc/platforms/cell/spufs/context.c =================================================================== --- linux-2.6.25.y.orig/arch/powerpc/platforms/cell/spufs/context.c +++ linux-2.6.25.y/arch/powerpc/platforms/cell/spufs/context.c @@ -78,7 +78,7 @@ void destroy_spu_context(struct kref *kr { struct spu_context *ctx; ctx = container_of(kref, struct spu_context, kref); - spu_context_nospu_trace(destroy_spu_context__enter, ctx); + trace_mark(destroy_spu_context__enter, "ctx %p", ctx); mutex_lock(&ctx->state_mutex); spu_deactivate(ctx); mutex_unlock(&ctx->state_mutex); @@ -151,7 +151,7 @@ int spu_acquire_saved(struct spu_context { int ret; - spu_context_nospu_trace(spu_acquire_saved__enter, ctx); + trace_mark(spu_acquire_saved__enter, "ctx %p", ctx); ret = spu_acquire(ctx); if (ret) Index: linux-2.6.25.y/arch/powerpc/platforms/cell/spufs/spufs.h =================================================================== --- linux-2.6.25.y.orig/arch/powerpc/platforms/cell/spufs/spufs.h +++ linux-2.6.25.y/arch/powerpc/platforms/cell/spufs/spufs.h @@ -329,9 +329,4 @@ extern void spu_free_lscsa(struct spu_st extern void spuctx_switch_state(struct spu_context *ctx, enum spu_utilization_state new_state); -#define spu_context_trace(name, ctx, spu) \ - trace_mark(name, "ctx %p spu %p", ctx, spu); -#define spu_context_nospu_trace(name, ctx) \ - trace_mark(name, "ctx %p", ctx); - #endif Index: linux-2.6.25.y/arch/powerpc/platforms/cell/spufs/sched.c =================================================================== --- linux-2.6.25.y.orig/arch/powerpc/platforms/cell/spufs/sched.c +++ linux-2.6.25.y/arch/powerpc/platforms/cell/spufs/sched.c @@ -217,7 +217,7 @@ void do_notify_spus_active(void) */ static void spu_bind_context(struct spu *spu, struct spu_context *ctx) { - spu_context_trace(spu_bind_context__enter, ctx, spu); + trace_mark(spu_bind_context__enter, "ctx %p spu %p", ctx, spu); spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); @@ -400,7 +400,7 @@ static int has_affinity(struct spu_conte */ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) { - spu_context_trace(spu_unbind_context__enter, ctx, spu); + trace_mark(spu_unbind_context__enter, "ctx %p spu %p", ctx, spu); spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); @@ -529,7 +529,7 @@ static struct spu *spu_get_idle(struct s struct spu *spu, *aff_ref_spu; int node, n; - spu_context_nospu_trace(spu_get_idle__enter, ctx); + trace_mark(spu_get_idle__enter, "ctx %p", ctx); if (ctx->gang) { mutex_lock(&ctx->gang->aff_mutex); @@ -568,13 +568,13 @@ static struct spu *spu_get_idle(struct s } not_found: - spu_context_nospu_trace(spu_get_idle__not_found, ctx); + trace_mark(spu_get_idle__not_found, "ctx %p", ctx); return NULL; found: spu->alloc_state = SPU_USED; mutex_unlock(&cbe_spu_info[node].list_mutex); - spu_context_trace(spu_get_idle__found, ctx, spu); + trace_mark(spu_get_idle__found, "ctx %p spu %p", ctx, spu); spu_init_channels(spu); return spu; } @@ -591,7 +591,7 @@ static struct spu *find_victim(struct sp struct spu *spu; int node, n; - spu_context_nospu_trace(spu_find_victim__enter, ctx); + trace_mark(spu_find_victim__enter, "ctx %p", ctx); /* * Look for a possible preemption candidate on the local node first. @@ -646,7 +646,7 @@ static struct spu *find_victim(struct sp goto restart; } - spu_context_trace(__spu_deactivate__unload, ctx, spu); + trace_mark(__spu_deactivate__unload, "ctx %p spu %p", ctx, spu); mutex_lock(&cbe_spu_info[node].list_mutex); cbe_spu_info[node].nr_active--; @@ -830,7 +830,7 @@ static int __spu_deactivate(struct spu_c */ void spu_deactivate(struct spu_context *ctx) { - spu_context_nospu_trace(spu_deactivate__enter, ctx); + trace_mark(spu_deactivate__enter, "ctx %p", ctx); __spu_deactivate(ctx, 1, MAX_PRIO); } @@ -844,7 +844,7 @@ void spu_deactivate(struct spu_context * */ void spu_yield(struct spu_context *ctx) { - spu_context_nospu_trace(spu_yield__enter, ctx); + trace_mark(spu_yield__enter, "ctx %p", ctx); if (!(ctx->flags & SPU_CREATE_NOSCHED)) { mutex_lock(&ctx->state_mutex); __spu_deactivate(ctx, 0, MAX_PRIO); @@ -872,7 +872,7 @@ static noinline void spusched_tick(struc spu = ctx->spu; - spu_context_trace(spusched_tick__preempt, ctx, spu); + trace_mark(spusched_tick__preempt, "ctx %p spu %p", ctx, spu); new = grab_runnable_context(ctx->prio + 1, spu->node); if (new) { @@ -880,7 +880,7 @@ static noinline void spusched_tick(struc if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) spu_add_to_rq(ctx); } else { - spu_context_nospu_trace(spusched_tick__newslice, ctx); + trace_mark(spusched_tick__newslice, "ctx %p", ctx); ctx->time_slice++; } out: Index: linux-2.6.25.y/arch/powerpc/platforms/cell/spufs/run.c =================================================================== --- linux-2.6.25.y.orig/arch/powerpc/platforms/cell/spufs/run.c +++ linux-2.6.25.y/arch/powerpc/platforms/cell/spufs/run.c @@ -15,7 +15,7 @@ void spufs_stop_callback(struct spu *spu { struct spu_context *ctx = spu->ctx; - spu_context_trace(spufs_stop_callback__enter, ctx, spu); + trace_mark(spufs_stop_callback__enter, "ctx %p spu %p", ctx, spu); /* * It should be impossible to preempt a context while an exception Index: linux-2.6.25.y/arch/powerpc/platforms/cell/spufs/file.c =================================================================== --- linux-2.6.25.y.orig/arch/powerpc/platforms/cell/spufs/file.c +++ linux-2.6.25.y/arch/powerpc/platforms/cell/spufs/file.c @@ -360,7 +360,7 @@ static unsigned long spufs_ps_nopfn(stru unsigned long area, offset = address - vma->vm_start; int ret = 0; - spu_context_nospu_trace(spufs_ps_nopfn__enter, ctx); + trace_mark(spufs_ps_nopfn__enter, "ctx %p", ctx); offset += vma->vm_pgoff << PAGE_SHIFT; if (offset >= ps_size) @@ -386,14 +386,14 @@ static unsigned long spufs_ps_nopfn(stru if (ctx->state == SPU_STATE_SAVED) { up_read(¤t->mm->mmap_sem); - spu_context_nospu_trace(spufs_ps_nopfn__sleep, ctx); + trace_mark(spufs_ps_nopfn__sleep, "ctx %p", ctx); ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); - spu_context_trace(spufs_ps_nopfn__wake, ctx, ctx->spu); + trace_mark(spufs_ps_nopfn__wake, "ctx %p spu %p", ctx, ctx->spu); down_read(¤t->mm->mmap_sem); } else { area = ctx->spu->problem_phys + ps_offs; vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT); - spu_context_trace(spufs_ps_nopfn__insert, ctx, ctx->spu); + trace_mark(spufs_ps_nopfn__insert, "ctx %p spu %p", ctx, ctx->spu); } if (!ret) -- Julio M. Merino Vidal