[Cbe-oss-dev] [PATCH 12/13] spufs: use #defines for SPU class [012] exception status

Jeremy Kerr jk at ozlabs.org
Mon Nov 19 14:36:53 EST 2007


Add a few #defines for the class 0, 1 and 2 interrupt status bits, and
use them instead of magic numbers when we're setting or checking for
these interrupts.

Also, add a #define for the class 2 mailbox threshold interrupt mask.

Signed-off-by: Jeremy Kerr <jk at ozlabs.org>

---
 arch/powerpc/platforms/cell/spu_base.c          |   42 ++++++++++++------------
 arch/powerpc/platforms/cell/spufs/backing_ops.c |   17 ++++++---
 arch/powerpc/platforms/cell/spufs/hw_ops.c      |   14 ++++----
 include/asm-powerpc/spu.h                       |   14 ++++++++
 4 files changed, 54 insertions(+), 33 deletions(-)

diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 4de5437..b3bcdf6 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -347,18 +347,18 @@ spu_irq_class_0_bottom(struct spu *spu)
 	stat = spu->class_0_pending;
 	spu->class_0_pending = 0;
 
-	if (stat & 1) /* invalid DMA alignment */
+	if (stat & CLASS0_DMA_ALIGNMENT_INTR)
 		__spu_trap_dma_align(spu);
 
-	if (stat & 2) /* invalid MFC DMA */
+	if (stat & CLASS0_INVALID_DMA_COMMAND_INTR)
 		__spu_trap_invalid_dma(spu);
 
-	if (stat & 4) /* error on SPU */
+	if (stat & CLASS0_SPU_ERROR_INTR)
 		__spu_trap_error(spu);
 
 	spin_unlock_irqrestore(&spu->register_lock, flags);
 
-	return (stat & 0x7) ? -EIO : 0;
+	return (stat & CLASS0_INTR_MASK) ? -EIO : 0;
 }
 EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
 
@@ -376,24 +376,23 @@ spu_irq_class_1(int irq, void *data)
 	stat  = spu_int_stat_get(spu, 1) & mask;
 	dar   = spu_mfc_dar_get(spu);
 	dsisr = spu_mfc_dsisr_get(spu);
-	if (stat & 2) /* mapping fault */
+	if (stat & CLASS1_STORAGE_FAULT_INTR)
 		spu_mfc_dsisr_set(spu, 0ul);
 	spu_int_stat_clear(spu, 1, stat);
 	spin_unlock(&spu->register_lock);
 	pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
 			dar, dsisr);
 
-	if (stat & 1) /* segment fault */
+	if (stat & CLASS1_SEGMENT_FAULT_INTR)
 		__spu_trap_data_seg(spu, dar);
 
-	if (stat & 2) { /* mapping fault */
+	if (stat & CLASS1_STORAGE_FAULT_INTR)
 		__spu_trap_data_map(spu, dar, dsisr);
-	}
 
-	if (stat & 4) /* ls compare & suspend on get */
+	if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_GET_INTR)
 		;
 
-	if (stat & 8) /* ls compare & suspend on put */
+	if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR)
 		;
 
 	return stat ? IRQ_HANDLED : IRQ_NONE;
@@ -405,6 +404,8 @@ spu_irq_class_2(int irq, void *data)
 	struct spu *spu;
 	unsigned long stat;
 	unsigned long mask;
+	const int mailbox_intrs =
+		CLASS2_MAILBOX_THRESHOLD_INTR | CLASS2_MAILBOX_INTR;
 
 	spu = data;
 	spin_lock(&spu->register_lock);
@@ -412,31 +413,30 @@ spu_irq_class_2(int irq, void *data)
 	mask = spu_int_mask_get(spu, 2);
 	/* ignore interrupts we're not waiting for */
 	stat &= mask;
-	/*
-	 * mailbox interrupts (0x1 and 0x10) are level triggered.
-	 * mask them now before acknowledging.
-	 */
-	if (stat & 0x11)
-		spu_int_mask_and(spu, 2, ~(stat & 0x11));
+
+	/* mailbox interrupts are level triggered. mask them now before
+	 * acknowledging */
+	if (stat & mailbox_intrs)
+		spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs));
 	/* acknowledge all interrupts before the callbacks */
 	spu_int_stat_clear(spu, 2, stat);
 	spin_unlock(&spu->register_lock);
 
 	pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
 
-	if (stat & 1)  /* PPC core mailbox */
+	if (stat & CLASS2_MAILBOX_INTR)
 		spu->ibox_callback(spu);
 
-	if (stat & 2) /* SPU stop-and-signal */
+	if (stat & CLASS2_SPU_STOP_INTR)
 		spu->stop_callback(spu);
 
-	if (stat & 4) /* SPU halted */
+	if (stat & CLASS2_SPU_HALT_INTR)
 		spu->stop_callback(spu);
 
-	if (stat & 8) /* DMA tag group complete */
+	if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR)
 		spu->mfc_callback(spu);
 
-	if (stat & 0x10) /* SPU mailbox threshold */
+	if (stat & CLASS2_MAILBOX_THRESHOLD_INTR)
 		spu->wbox_callback(spu);
 
 	spu->stats.class2_intr++;
diff --git a/arch/powerpc/platforms/cell/spufs/backing_ops.c b/arch/powerpc/platforms/cell/spufs/backing_ops.c
index 97b2d5e..d449553 100644
--- a/arch/powerpc/platforms/cell/spufs/backing_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/backing_ops.c
@@ -106,16 +106,20 @@ static unsigned int spu_backing_mbox_stat_poll(struct spu_context *ctx,
 		if (stat & 0xff0000)
 			ret |= POLLIN | POLLRDNORM;
 		else {
-			ctx->csa.priv1.int_stat_class2_RW &= ~0x1;
-			ctx->csa.priv1.int_mask_class2_RW |= 0x1;
+			ctx->csa.priv1.int_stat_class2_RW &=
+				~CLASS2_MAILBOX_INTR;
+			ctx->csa.priv1.int_mask_class2_RW |=
+				CLASS2_ENABLE_MAILBOX_INTR;
 		}
 	}
 	if (events & (POLLOUT | POLLWRNORM)) {
 		if (stat & 0x00ff00)
 			ret = POLLOUT | POLLWRNORM;
 		else {
-			ctx->csa.priv1.int_stat_class2_RW &= ~0x10;
-			ctx->csa.priv1.int_mask_class2_RW |= 0x10;
+			ctx->csa.priv1.int_stat_class2_RW &=
+				~CLASS2_MAILBOX_THRESHOLD_INTR;
+			ctx->csa.priv1.int_mask_class2_RW |=
+				CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR;
 		}
 	}
 	spin_unlock_irq(&ctx->csa.register_lock);
@@ -139,7 +143,7 @@ static int spu_backing_ibox_read(struct spu_context *ctx, u32 * data)
 		ret = 4;
 	} else {
 		/* make sure we get woken up by the interrupt */
-		ctx->csa.priv1.int_mask_class2_RW |= 0x1UL;
+		ctx->csa.priv1.int_mask_class2_RW |= CLASS2_ENABLE_MAILBOX_INTR;
 		ret = 0;
 	}
 	spin_unlock(&ctx->csa.register_lock);
@@ -169,7 +173,8 @@ static int spu_backing_wbox_write(struct spu_context *ctx, u32 data)
 	} else {
 		/* make sure we get woken up by the interrupt when space
 		   becomes available */
-		ctx->csa.priv1.int_mask_class2_RW |= 0x10;
+		ctx->csa.priv1.int_mask_class2_RW |=
+			CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR;
 		ret = 0;
 	}
 	spin_unlock(&ctx->csa.register_lock);
diff --git a/arch/powerpc/platforms/cell/spufs/hw_ops.c b/arch/powerpc/platforms/cell/spufs/hw_ops.c
index a7767e3..64f8540 100644
--- a/arch/powerpc/platforms/cell/spufs/hw_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/hw_ops.c
@@ -76,16 +76,18 @@ static unsigned int spu_hw_mbox_stat_poll(struct spu_context *ctx,
 		if (stat & 0xff0000)
 			ret |= POLLIN | POLLRDNORM;
 		else {
-			spu_int_stat_clear(spu, 2, 0x1);
-			spu_int_mask_or(spu, 2, 0x1);
+			spu_int_stat_clear(spu, 2, CLASS2_MAILBOX_INTR);
+			spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
 		}
 	}
 	if (events & (POLLOUT | POLLWRNORM)) {
 		if (stat & 0x00ff00)
 			ret = POLLOUT | POLLWRNORM;
 		else {
-			spu_int_stat_clear(spu, 2, 0x10);
-			spu_int_mask_or(spu, 2, 0x10);
+			spu_int_stat_clear(spu, 2,
+					CLASS2_MAILBOX_THRESHOLD_INTR);
+			spu_int_mask_or(spu, 2,
+					CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR);
 		}
 	}
 	spin_unlock_irq(&spu->register_lock);
@@ -106,7 +108,7 @@ static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data)
 		ret = 4;
 	} else {
 		/* make sure we get woken up by the interrupt */
-		spu_int_mask_or(spu, 2, 0x1);
+		spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
 		ret = 0;
 	}
 	spin_unlock_irq(&spu->register_lock);
@@ -127,7 +129,7 @@ static int spu_hw_wbox_write(struct spu_context *ctx, u32 data)
 	} else {
 		/* make sure we get woken up by the interrupt when space
 		   becomes available */
-		spu_int_mask_or(spu, 2, 0x10);
+		spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR);
 		ret = 0;
 	}
 	spin_unlock_irq(&spu->register_lock);
diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h
index 0fc901b..770d8da 100644
--- a/include/asm-powerpc/spu.h
+++ b/include/asm-powerpc/spu.h
@@ -526,8 +526,22 @@ struct spu_priv1 {
 #define CLASS2_ENABLE_SPU_STOP_INTR			0x2L
 #define CLASS2_ENABLE_SPU_HALT_INTR			0x4L
 #define CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR	0x8L
+#define CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR		0x10L
 	u8  pad_0x118_0x140[0x28];				/* 0x118 */
 	u64 int_stat_RW[3];					/* 0x140 */
+#define CLASS0_DMA_ALIGNMENT_INTR			0x1L
+#define CLASS0_INVALID_DMA_COMMAND_INTR			0x2L
+#define CLASS0_SPU_ERROR_INTR				0x4L
+#define CLASS0_INTR_MASK				0x7L
+#define CLASS1_SEGMENT_FAULT_INTR			0x1L
+#define CLASS1_STORAGE_FAULT_INTR			0x2L
+#define CLASS1_LS_COMPARE_SUSPEND_ON_GET_INTR		0x4L
+#define CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR		0x8L
+#define CLASS2_MAILBOX_INTR				0x1L
+#define CLASS2_SPU_STOP_INTR				0x2L
+#define CLASS2_SPU_HALT_INTR				0x4L
+#define CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR		0x8L
+#define CLASS2_MAILBOX_THRESHOLD_INTR			0x10L
 	u8  pad_0x158_0x180[0x28];				/* 0x158 */
 	u64 int_route_RW;					/* 0x180 */
 



More information about the cbe-oss-dev mailing list