After DMA error (misaligned, wrong length, etc) MFC's DMA queue has to be purged in order to resume the context. The purge will mark all DMA requests as invalid. This patch was initially created in order to recover from DMA errors. Later on I decided not to recover from DMA bugs but stop with BUG() since those errors are an exception and should happen under normal circumstances. Signed-off-by: Sebastian Siewior Index: b/arch/powerpc/platforms/cell/spufs/backing_ops.c =================================================================== --- a/arch/powerpc/platforms/cell/spufs/backing_ops.c +++ b/arch/powerpc/platforms/cell/spufs/backing_ops.c @@ -359,6 +359,26 @@ static void spu_backing_restart_dma(stru /* nothing to do here */ } +static void spu_backing_purga_dma_queue(struct spu_context *ctx) +{ + struct spu_priv2_collapsed *p2 = &ctx->csa.priv2; + struct mfc_cq_sr *mfc_ctx_sr; + unsigned int i; + + spin_lock(&ctx->csa.register_lock); + /* purge all DMA requests on PPU & SPU side by removing the valid bit */ + for (i=0; i< 16; i++) { + mfc_ctx_sr = &p2->spuq[i]; + mfc_ctx_sr->u.mfc_cq_ims &= ~MFC_SPU_CQ_ISM_VALID; + } + for (i=0; i< 8; i++) { + mfc_ctx_sr = &p2->puq[i]; + mfc_ctx_sr->u.mfc_cq_ims &= ~MFC_PPU_CQ_ISM_VALID; + } + + spin_unlock(&ctx->csa.register_lock); +} + struct spu_context_ops spu_backing_ops = { .mbox_read = spu_backing_mbox_read, .mbox_stat_read = spu_backing_mbox_stat_read, @@ -387,4 +407,5 @@ struct spu_context_ops spu_backing_ops = .get_mfc_free_elements = spu_backing_get_mfc_free_elements, .send_mfc_command = spu_backing_send_mfc_command, .restart_dma = spu_backing_restart_dma, + .purga_dma_queue = spu_backing_purga_dma_queue, }; Index: b/arch/powerpc/platforms/cell/spufs/hw_ops.c =================================================================== --- a/arch/powerpc/platforms/cell/spufs/hw_ops.c +++ b/arch/powerpc/platforms/cell/spufs/hw_ops.c @@ -312,6 +312,25 @@ static void spu_hw_restart_dma(struct sp out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); } +static void spu_hw_purga_dma_queue(struct spu_context *ctx) +{ + struct spu_priv2 __iomem *priv2 = ctx->spu->priv2; + unsigned long long status; + unsigned long long num = 0; + + spin_lock_irq(&ctx->spu->register_lock); + out_be64(&priv2->mfc_control_RW, MFC_CNTL_PURGE_DMA_REQUEST); + eieio(); + + do { + num++; + status = in_be64(&priv2->mfc_control_RW); + } while ((status & MFC_CNTL_PURGE_DMA_STATUS_MASK) != + MFC_CNTL_PURGE_DMA_COMPLETE); + + spin_unlock_irq(&ctx->spu->register_lock); +} + struct spu_context_ops spu_hw_ops = { .mbox_read = spu_hw_mbox_read, .mbox_stat_read = spu_hw_mbox_stat_read, @@ -338,4 +357,5 @@ struct spu_context_ops spu_hw_ops = { .get_mfc_free_elements = spu_hw_get_mfc_free_elements, .send_mfc_command = spu_hw_send_mfc_command, .restart_dma = spu_hw_restart_dma, + .purga_dma_queue = spu_hw_purga_dma_queue, }; Index: b/arch/powerpc/platforms/cell/spufs/spufs.h =================================================================== --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -143,6 +143,7 @@ struct spu_context_ops { void (*proxydma_info_read) (struct spu_context * ctx, struct spu_proxydma_info * info); void (*restart_dma)(struct spu_context *ctx); + void (*purga_dma_queue)(struct spu_context *ctx); }; extern struct spu_context_ops spu_hw_ops; Index: b/include/asm-powerpc/spu.h =================================================================== --- a/include/asm-powerpc/spu.h +++ b/include/asm-powerpc/spu.h @@ -279,7 +279,15 @@ struct mfc_cq_sr { u64 mfc_cq_data0_RW; u64 mfc_cq_data1_RW; u64 mfc_cq_data2_RW; - u64 mfc_cq_data3_RW; + union { + u64 mfc_cq_data3_RW; + struct { + u32 mfc_cq_ims; +#define MFC_SPU_CQ_ISM_VALID (1u << 15) + u32 tag_word; +#define MFC_PPU_CQ_ISM_VALID (1u << 15) + } u; + }; }; struct spu_problem { --