[RFC] spufs: wrap spu priveleged register access
Geoff Levand
geoffrey.levand at am.sony.com
Wed Dec 7 11:34:00 EST 2005
The current spufs implementation accesses privileged (privilege 1)
spu registers directly, which may not be allowed by a hypervisor.
This patch adds wrapper functions that can be implemented as needed as
either platform specific hypervisor calls or direct register accesses.
Included is a sample of support for a fictitious hypervisor.
This patch is just to give an idea, please re-write it as you like.
It may be a good idea to wrap not only irq_mask/stat but also
any other regs, and to remove generic functions like spu_priv1_get64/put64()
since each access may be mapped to different hypervisor calls.
Arnd mentioned it would be best to arrange for runtime
configuration possibly using firmware_has_feature().
Signed-off-by: Masato Noguchi <Masato.Noguchi at jp.sony.com>
Signed-off-by: Geoff Levand <geoff.levand at am.sony.com>
Index: linux-2.6.15-rc4-cell/arch/powerpc/platforms/cell/spu_base.c
===================================================================
--- linux-2.6.15-rc4-cell.orig/arch/powerpc/platforms/cell/spu_base.c 2005-12-02 16:26:20.000000000 -0800
+++ linux-2.6.15-rc4-cell/arch/powerpc/platforms/cell/spu_base.c 2005-12-02 16:27:40.000000000 -0800
@@ -141,8 +141,8 @@
/* atomically disable SPU mailbox interrupts */
spin_lock(&spu->register_lock);
- out_be64(&spu->priv1->int_mask_class2_RW,
- in_be64(&spu->priv1->int_mask_class2_RW) & ~0x1);
+ spu_irq_mask_set(spu, 2,
+ spu_irq_mask_get(spu, 2) & ~0x1);
spin_unlock(&spu->register_lock);
return 0;
}
@@ -177,8 +177,8 @@
/* atomically disable SPU mailbox interrupts */
spin_lock(&spu->register_lock);
- out_be64(&spu->priv1->int_mask_class2_RW,
- in_be64(&spu->priv1->int_mask_class2_RW) & ~0x10);
+ spu_irq_mask_set(spu, 2,
+ spu_irq_mask_get(spu, 2) & ~0x10);
spin_unlock(&spu->register_lock);
return 0;
}
@@ -202,7 +202,7 @@
spu->class_0_pending = 0;
- stat = in_be64(&spu->priv1->int_stat_class0_RW);
+ stat = spu_irq_stat_get(spu, 0);
if (stat & 1) /* invalid MFC DMA */
__spu_trap_invalid_dma(spu);
@@ -213,7 +213,7 @@
if (stat & 4) /* error on SPU */
__spu_trap_error(spu);
- out_be64(&spu->priv1->int_stat_class0_RW, stat);
+ spu_irq_stat_clear(spu, 0, stat);
return 0;
}
@@ -227,13 +227,13 @@
/* atomically read & clear class1 status. */
spin_lock(&spu->register_lock);
- mask = in_be64(&spu->priv1->int_mask_class1_RW);
- stat = in_be64(&spu->priv1->int_stat_class1_RW) & mask;
- dar = in_be64(&spu->priv1->mfc_dar_RW);
- dsisr = in_be64(&spu->priv1->mfc_dsisr_RW);
+ mask = spu_irq_mask_get(spu, 1);
+ stat = spu_irq_stat_get(spu, 1) & mask;
+ dar = spu_priv1_get64(spu, mfc_dar_RW);
+ dsisr = spu_priv1_get64(spu, mfc_dsisr_RW);
if (stat & 2) /* mapping fault */
- out_be64(&spu->priv1->mfc_dsisr_RW, 0UL);
- out_be64(&spu->priv1->int_stat_class1_RW, stat);
+ spu_priv1_set64(spu, mfc_dsisr_RW, 0UL);
+ spu_irq_stat_clear(spu, 1, stat);
spin_unlock(&spu->register_lock);
if (stat & 1) /* segment fault */
@@ -259,10 +259,10 @@
unsigned long stat;
spu = data;
- stat = in_be64(&spu->priv1->int_stat_class2_RW);
+ stat = spu_irq_stat_get(spu, 2);
pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat,
- in_be64(&spu->priv1->int_mask_class2_RW));
+ spu_irq_mask_get(spu, int_mask_class2_RW));
if (stat & 1) /* PPC core mailbox */
@@ -280,7 +280,7 @@
if (stat & 0x10) /* SPU mailbox threshold */
__spu_trap_spubox(spu);
- out_be64(&spu->priv1->int_stat_class2_RW, stat);
+ spu_irq_stat_set(spu, 2, stat);
return stat ? IRQ_HANDLED : IRQ_NONE;
}
@@ -297,21 +297,21 @@
spu_irq_class_0, 0, spu->irq_c0, spu);
if (ret)
goto out;
- out_be64(&spu->priv1->int_mask_class0_RW, 0x7);
+ spu_irq_mask_set(spu, 0, 0x7);
snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number);
ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc,
spu_irq_class_1, 0, spu->irq_c1, spu);
if (ret)
goto out1;
- out_be64(&spu->priv1->int_mask_class1_RW, 0x3);
+ spu_irq_mask_set(spu, 1, 0x3);
snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number);
ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc,
spu_irq_class_2, 0, spu->irq_c2, spu);
if (ret)
goto out2;
- out_be64(&spu->priv1->int_mask_class2_RW, 0xe);
+ spu_irq_mask_set(spu, 2, 0xe);
goto out;
out2:
@@ -373,9 +373,9 @@
static void spu_init_regs(struct spu *spu)
{
- out_be64(&spu->priv1->int_mask_class0_RW, 0x7);
- out_be64(&spu->priv1->int_mask_class1_RW, 0x3);
- out_be64(&spu->priv1->int_mask_class2_RW, 0xe);
+ spu_irq_mask_set(spu, 0, 0x7);
+ spu_irq_mask_set(spu, 1, 0x3);
+ spu_irq_mask_set(spu, 2, 0xe);
}
struct spu *spu_alloc(void)
@@ -523,13 +523,11 @@
int spu_run(struct spu *spu)
{
struct spu_problem __iomem *prob;
- struct spu_priv1 __iomem *priv1;
struct spu_priv2 __iomem *priv2;
u32 status;
int ret;
prob = spu->problem;
- priv1 = spu->priv1;
priv2 = spu->priv2;
/* Let SPU run. */
@@ -561,7 +559,7 @@
cpu_relax();
out_be64(&priv2->slb_invalidate_all_W, 0);
- out_be64(&priv1->tlb_invalidate_entry_W, 0UL);
+ spu_priv1_set64(spu, tlb_invalidate_entry_W, 0UL);
eieio();
/* Check for SPU breakpoint. */
Index: linux-2.6.15-rc4-cell/arch/powerpc/platforms/cell/spufs/hw_ops.c
===================================================================
--- linux-2.6.15-rc4-cell.orig/arch/powerpc/platforms/cell/spufs/hw_ops.c 2005-12-02 16:26:20.000000000 -0800
+++ linux-2.6.15-rc4-cell/arch/powerpc/platforms/cell/spufs/hw_ops.c 2005-12-02 16:27:40.000000000 -0800
@@ -62,7 +62,6 @@
{
struct spu *spu = ctx->spu;
struct spu_problem __iomem *prob = spu->problem;
- struct spu_priv1 __iomem *priv1 = spu->priv1;
struct spu_priv2 __iomem *priv2 = spu->priv2;
int ret;
@@ -73,8 +72,8 @@
ret = 4;
} else {
/* make sure we get woken up by the interrupt */
- out_be64(&priv1->int_mask_class2_RW,
- in_be64(&priv1->int_mask_class2_RW) | 0x1);
+ spu_irq_mask_set(spu, 2,
+ spu_irq_mask_get(spu, 2) | 0x1);
ret = 0;
}
spin_unlock_irq(&spu->register_lock);
@@ -85,7 +84,6 @@
{
struct spu *spu = ctx->spu;
struct spu_problem __iomem *prob = spu->problem;
- struct spu_priv1 __iomem *priv1 = spu->priv1;
int ret;
spin_lock_irq(&spu->register_lock);
@@ -96,8 +94,8 @@
} else {
/* make sure we get woken up by the interrupt when space
becomes available */
- out_be64(&priv1->int_mask_class2_RW,
- in_be64(&priv1->int_mask_class2_RW) | 0x10);
+ spu_irq_mask_set(spu, 2,
+ spu_irq_mask_get(spu, 2) | 0x10);
ret = 0;
}
spin_unlock_irq(&spu->register_lock);
Index: linux-2.6.15-rc4-cell/arch/powerpc/platforms/cell/spufs/switch.c
===================================================================
--- linux-2.6.15-rc4-cell.orig/arch/powerpc/platforms/cell/spufs/switch.c 2005-12-02 16:26:20.000000000 -0800
+++ linux-2.6.15-rc4-cell/arch/powerpc/platforms/cell/spufs/switch.c 2005-12-02 16:27:40.000000000 -0800
@@ -108,8 +108,6 @@
static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
{
- struct spu_priv1 __iomem *priv1 = spu->priv1;
-
/* Save, Step 3:
* Restore, Step 2:
* Save INT_Mask_class0 in CSA.
@@ -122,15 +120,15 @@
spin_lock_irq(&spu->register_lock);
if (csa) {
csa->priv1.int_mask_class0_RW =
- in_be64(&priv1->int_mask_class0_RW);
+ spu_irq_mask_get(spu, 0);
csa->priv1.int_mask_class1_RW =
- in_be64(&priv1->int_mask_class1_RW);
+ spu_irq_mask_get(spu, 1);
csa->priv1.int_mask_class2_RW =
- in_be64(&priv1->int_mask_class2_RW);
+ spu_irq_mask_get(spu, 2);
}
- out_be64(&priv1->int_mask_class0_RW, 0UL);
- out_be64(&priv1->int_mask_class1_RW, 0UL);
- out_be64(&priv1->int_mask_class2_RW, 0UL);
+ spu_irq_mask_set(spu, 0, 0UL);
+ spu_irq_mask_set(spu, 1, 0UL);
+ spu_irq_mask_set(spu, 2, 0UL);
eieio();
spin_unlock_irq(&spu->register_lock);
}
@@ -217,12 +215,10 @@
static inline void save_mfc_sr1(struct spu_state *csa, struct spu *spu)
{
- struct spu_priv1 __iomem *priv1 = spu->priv1;
-
/* Save, Step 10:
* Save MFC_SR1 in the CSA.
*/
- csa->priv1.mfc_sr1_RW = in_be64(&priv1->mfc_sr1_RW);
+ csa->priv1.mfc_sr1_RW = spu_priv1_get64(spu, mfc_sr1_RW);
}
static inline void save_spu_status(struct spu_state *csa, struct spu *spu)
@@ -316,15 +312,13 @@
static inline void issue_mfc_tlbie(struct spu_state *csa, struct spu *spu)
{
- struct spu_priv1 __iomem *priv1 = spu->priv1;
-
/* Save, Step 17:
* Restore, Step 12.
* Restore, Step 48.
* Write TLB_Invalidate_Entry[IS,VPN,L,Lp]=0 register.
* Then issue a PPE sync instruction.
*/
- out_be64(&priv1->tlb_invalidate_entry_W, 0UL);
+ spu_priv1_set64(spu, tlb_invalidate_entry_W, 0UL);
mb();
}
@@ -434,25 +428,21 @@
static inline void save_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
{
- struct spu_priv1 __iomem *priv1 = spu->priv1;
-
/* Save, Step 25:
* Save the MFC_TCLASS_ID register in
* the CSA.
*/
- csa->priv1.mfc_tclass_id_RW = in_be64(&priv1->mfc_tclass_id_RW);
+ csa->priv1.mfc_tclass_id_RW = spu_priv1_get64(spu, mfc_tclass_id_RW);
}
static inline void set_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
{
- struct spu_priv1 __iomem *priv1 = spu->priv1;
-
/* Save, Step 26:
* Restore, Step 23.
* Write the MFC_TCLASS_ID register with
* the value 0x10000000.
*/
- out_be64(&priv1->mfc_tclass_id_RW, 0x10000000);
+ spu_priv1_set64(spu, mfc_tclass_id_RW, 0x10000000);
eieio();
}
@@ -482,14 +472,13 @@
static inline void save_mfc_slbs(struct spu_state *csa, struct spu *spu)
{
- struct spu_priv1 __iomem *priv1 = spu->priv1;
struct spu_priv2 __iomem *priv2 = spu->priv2;
int i;
/* Save, Step 29:
* If MFC_SR1[R]='1', save SLBs in CSA.
*/
- if (in_be64(&priv1->mfc_sr1_RW) & MFC_STATE1_RELOCATE_MASK) {
+ if (spu_priv1_get64(spu, mfc_sr1_RW) & MFC_STATE1_RELOCATE_MASK) {
csa->priv2.slb_index_W = in_be64(&priv2->slb_index_W);
for (i = 0; i < 8; i++) {
out_be64(&priv2->slb_index_W, i);
@@ -503,8 +492,6 @@
static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu)
{
- struct spu_priv1 __iomem *priv1 = spu->priv1;
-
/* Save, Step 30:
* Restore, Step 18:
* Write MFC_SR1 with MFC_SR1[D=0,S=1] and
@@ -516,9 +503,9 @@
* MFC_SR1[Pr] bit is not set.
*
*/
- out_be64(&priv1->mfc_sr1_RW, (MFC_STATE1_MASTER_RUN_CONTROL_MASK |
- MFC_STATE1_RELOCATE_MASK |
- MFC_STATE1_BUS_TLBIE_MASK));
+ spu_priv1_set64(spu, mfc_sr1_RW, (MFC_STATE1_MASTER_RUN_CONTROL_MASK |
+ MFC_STATE1_RELOCATE_MASK |
+ MFC_STATE1_BUS_TLBIE_MASK));
}
static inline void save_spu_npc(struct spu_state *csa, struct spu *spu)
@@ -595,16 +582,14 @@
static inline void save_mfc_rag(struct spu_state *csa, struct spu *spu)
{
- struct spu_priv1 __iomem *priv1 = spu->priv1;
-
/* Save, Step 38:
* Save RA_GROUP_ID register and the
* RA_ENABLE reigster in the CSA.
*/
csa->priv1.resource_allocation_groupID_RW =
- in_be64(&priv1->resource_allocation_groupID_RW);
+ spu_priv1_get64(spu, resource_allocation_groupID_RW);
csa->priv1.resource_allocation_enable_RW =
- in_be64(&priv1->resource_allocation_enable_RW);
+ spu_priv1_get64(spu, resource_allocation_enable_RW);
}
static inline void save_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
@@ -722,14 +707,13 @@
static inline void invalidate_slbs(struct spu_state *csa, struct spu *spu)
{
- struct spu_priv1 __iomem *priv1 = spu->priv1;
struct spu_priv2 __iomem *priv2 = spu->priv2;
/* Save, Step 45:
* Restore, Step 19:
* If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All.
*/
- if (in_be64(&priv1->mfc_sr1_RW) & MFC_STATE1_RELOCATE_MASK) {
+ if (spu_priv1_get64(spu, mfc_sr1_RW) & MFC_STATE1_RELOCATE_MASK) {
out_be64(&priv2->slb_invalidate_all_W, 0UL);
eieio();
}
@@ -798,7 +782,6 @@
static inline void enable_interrupts(struct spu_state *csa, struct spu *spu)
{
- struct spu_priv1 __iomem *priv1 = spu->priv1;
unsigned long class1_mask = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
CLASS1_ENABLE_STORAGE_FAULT_INTR;
@@ -811,12 +794,12 @@
* (translation) interrupts.
*/
spin_lock_irq(&spu->register_lock);
- out_be64(&priv1->int_stat_class0_RW, ~(0UL));
- out_be64(&priv1->int_stat_class1_RW, ~(0UL));
- out_be64(&priv1->int_stat_class2_RW, ~(0UL));
- out_be64(&priv1->int_mask_class0_RW, 0UL);
- out_be64(&priv1->int_mask_class1_RW, class1_mask);
- out_be64(&priv1->int_mask_class2_RW, 0UL);
+ spu_irq_stat_clear(spu, 0, ~(0UL));
+ spu_irq_stat_clear(spu, 1, ~(0UL));
+ spu_irq_stat_clear(spu, 2, ~(0UL));
+ spu_irq_mask_set(spu, 0, 0UL);
+ spu_irq_mask_set(spu, 1, 0UL);
+ spu_irq_mask_set(spu, 2, 0UL);
spin_unlock_irq(&spu->register_lock);
}
@@ -954,7 +937,6 @@
static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu)
{
- struct spu_priv1 __iomem *priv1 = spu->priv1;
struct spu_problem __iomem *prob = spu->problem;
u32 mask = MFC_TAGID_TO_TAGMASK(0);
unsigned long flags;
@@ -971,14 +953,13 @@
POLL_WHILE_FALSE(in_be32(&prob->dma_tagstatus_R) & mask);
local_irq_save(flags);
- out_be64(&priv1->int_stat_class0_RW, ~(0UL));
- out_be64(&priv1->int_stat_class2_RW, ~(0UL));
+ spu_irq_stat_clear(spu, 0, ~(0UL));
+ spu_irq_stat_clear(spu, 2, ~(0UL));
local_irq_restore(flags);
}
static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu)
{
- struct spu_priv1 __iomem *priv1 = spu->priv1;
struct spu_problem __iomem *prob = spu->problem;
unsigned long flags;
@@ -991,8 +972,8 @@
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
local_irq_save(flags);
- out_be64(&priv1->int_stat_class0_RW, ~(0UL));
- out_be64(&priv1->int_stat_class2_RW, ~(0UL));
+ spu_irq_stat_clear(spu, 0, ~(0UL));
+ spu_irq_stat_clear(spu, 2, ~(0UL));
local_irq_restore(flags);
}
@@ -1091,7 +1072,6 @@
static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
- struct spu_priv1 __iomem *priv1 = spu->priv1;
/* Restore, Step 10:
* If SPU_Status[R]=0 and SPU_Status[E,L,IS]=1,
@@ -1100,8 +1080,8 @@
if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) {
if (in_be32(&prob->spu_status_R) &
SPU_STATUS_ISOLATED_EXIT_STAUTUS) {
- out_be64(&priv1->mfc_sr1_RW,
- MFC_STATE1_MASTER_RUN_CONTROL_MASK);
+ spu_priv1_set64(spu, mfc_sr1_RW,
+ MFC_STATE1_MASTER_RUN_CONTROL_MASK);
eieio();
out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
eieio();
@@ -1112,8 +1092,8 @@
SPU_STATUS_ISOLATED_LOAD_STAUTUS)
|| (in_be32(&prob->spu_status_R) &
SPU_STATUS_ISOLATED_STATE)) {
- out_be64(&priv1->mfc_sr1_RW,
- MFC_STATE1_MASTER_RUN_CONTROL_MASK);
+ spu_priv1_set64(spu, mfc_sr1_RW,
+ MFC_STATE1_MASTER_RUN_CONTROL_MASK);
eieio();
out_be32(&prob->spu_runcntl_RW, 0x2);
eieio();
@@ -1281,16 +1261,14 @@
static inline void restore_mfc_rag(struct spu_state *csa, struct spu *spu)
{
- struct spu_priv1 __iomem *priv1 = spu->priv1;
-
/* Restore, Step 29:
* Restore RA_GROUP_ID register and the
* RA_ENABLE reigster from the CSA.
*/
- out_be64(&priv1->resource_allocation_groupID_RW,
- csa->priv1.resource_allocation_groupID_RW);
- out_be64(&priv1->resource_allocation_enable_RW,
- csa->priv1.resource_allocation_enable_RW);
+ spu_priv1_set64(spu, resource_allocation_groupID_RW,
+ csa->priv1.resource_allocation_groupID_RW);
+ spu_priv1_set64(spu, resource_allocation_enable_RW,
+ csa->priv1.resource_allocation_enable_RW);
}
static inline void send_restore_code(struct spu_state *csa, struct spu *spu)
@@ -1433,8 +1411,6 @@
static inline void clear_interrupts(struct spu_state *csa, struct spu *spu)
{
- struct spu_priv1 __iomem *priv1 = spu->priv1;
-
/* Restore, Step 49:
* Write INT_MASK_class0 with value of 0.
* Write INT_MASK_class1 with value of 0.
@@ -1444,12 +1420,12 @@
* Write INT_STAT_class2 with value of -1.
*/
spin_lock_irq(&spu->register_lock);
- out_be64(&priv1->int_mask_class0_RW, 0UL);
- out_be64(&priv1->int_mask_class1_RW, 0UL);
- out_be64(&priv1->int_mask_class2_RW, 0UL);
- out_be64(&priv1->int_stat_class0_RW, ~(0UL));
- out_be64(&priv1->int_stat_class1_RW, ~(0UL));
- out_be64(&priv1->int_stat_class2_RW, ~(0UL));
+ spu_irq_mask_set(spu, 0, 0UL);
+ spu_irq_mask_set(spu, 1, 0UL);
+ spu_irq_mask_set(spu, 2, 0UL);
+ spu_irq_stat_clear(spu, 0, ~(0UL));
+ spu_irq_stat_clear(spu, 1, ~(0UL));
+ spu_irq_stat_clear(spu, 2, ~(0UL));
spin_unlock_irq(&spu->register_lock);
}
@@ -1546,12 +1522,10 @@
static inline void restore_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
{
- struct spu_priv1 __iomem *priv1 = spu->priv1;
-
/* Restore, Step 56:
* Restore the MFC_TCLASS_ID register from CSA.
*/
- out_be64(&priv1->mfc_tclass_id_RW, csa->priv1.mfc_tclass_id_RW);
+ spu_priv1_set64(spu, mfc_tclass_id_RW, csa->priv1.mfc_tclass_id_RW);
eieio();
}
@@ -1713,7 +1687,6 @@
static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu)
{
- struct spu_priv1 __iomem *priv1 = spu->priv1;
struct spu_priv2 __iomem *priv2 = spu->priv2;
u64 dummy = 0UL;
@@ -1724,8 +1697,7 @@
if ((csa->prob.mb_stat_R & 0xFF0000) == 0) {
dummy = in_be64(&priv2->puint_mb_R);
eieio();
- out_be64(&priv1->int_stat_class2_RW,
- CLASS2_ENABLE_MAILBOX_INTR);
+ spu_irq_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
eieio();
}
}
@@ -1753,12 +1725,10 @@
static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu)
{
- struct spu_priv1 __iomem *priv1 = spu->priv1;
-
/* Restore, Step 69:
* Restore the MFC_SR1 register from CSA.
*/
- out_be64(&priv1->mfc_sr1_RW, csa->priv1.mfc_sr1_RW);
+ spu_priv1_set64(spu, mfc_sr1_RW, csa->priv1.mfc_sr1_RW);
eieio();
}
@@ -1816,15 +1786,13 @@
static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu)
{
- struct spu_priv1 __iomem *priv1 = spu->priv1;
-
/* Restore, Step 75:
* Re-enable SPU interrupts.
*/
spin_lock_irq(&spu->register_lock);
- out_be64(&priv1->int_mask_class0_RW, csa->priv1.int_mask_class0_RW);
- out_be64(&priv1->int_mask_class1_RW, csa->priv1.int_mask_class1_RW);
- out_be64(&priv1->int_mask_class2_RW, csa->priv1.int_mask_class2_RW);
+ spu_irq_mask_set(spu, 0, csa->priv1.int_mask_class0_RW);
+ spu_irq_mask_set(spu, 1, csa->priv1.int_mask_class1_RW);
+ spu_irq_mask_set(spu, 2, csa->priv1.int_mask_class2_RW);
spin_unlock_irq(&spu->register_lock);
}
Index: linux-2.6.15-rc4-cell/include/asm-powerpc/spu.h
===================================================================
--- linux-2.6.15-rc4-cell.orig/include/asm-powerpc/spu.h 2005-12-02 16:26:20.000000000 -0800
+++ linux-2.6.15-rc4-cell/include/asm-powerpc/spu.h 2005-12-02 16:27:40.000000000 -0800
@@ -576,4 +576,64 @@
u64 spu_trace_cntl; /* 0x1070 */
} __attribute__ ((aligned(0x2000)));
+
+/* priv1 access */
+
+#ifdef CONFIG_ON_HYPERVISOR_XXXXX
+ /* examples for a fictitious hypervisor */
+
+#include <hypervisor_calls.h>
+
+inline u64 spu_irq_mask_get(struct spu *spu, int cls)
+{
+ u64 __val;
+ hvcall_spu_get_irq_mask(spu->spu_magical_id,
+ cls,
+ &__val);
+ return __val;
+}
+
+#define spu_irq_mask_set(spu, cls, mask) \
+ hvcall_spu_get_irq_mask(spu->spu_magical_id, \
+ cls, \
+ mask);
+
+#define spu_irq_stat_get(spu, cls) \
+ hvcall_spu_get_interrupt_status(spu->spu_magical_id, \
+ cls);
+#define spu_irq_stat_clear(spu, cls, val) \
+ hvcall_spu_clear_interrupt_status(spu->spu_magical_id, \
+ cls, val);
+
+inline u64 spu_priv1_get64(struct spu *spu, int cls)
+{
+ u64 __val;
+ hvcall_spu_get_priv1(spu->spu_magical_id,
+ offsetof(struct spu_priv1, reg),
+ &__val);
+ return __val;
+}
+
+#define spu_priv1_set64(spu, cls, val) \
+ hvcall_spu_set_priv1(spu->spu_magical_id, \
+ offsetof(struct spu_priv1, reg), \
+ val);
+
+#else /* CONFIG_ON_HYPERVISOR_XXXXX */
+
+#define spu_irq_mask_get(spu, cls) \
+ spu_priv1_get64(spu, int_mask_class ## cls ## _RW)
+#define spu_irq_mask_set(spu, cls, mask) \
+ spu_priv1_set64(spu, int_mask_class ## cls ## _RW, mask)
+
+#define spu_irq_stat_get(spu, cls) \
+ spu_priv1_get64(spu, int_mask_class ## cls ## _RW)
+#define spu_irq_stat_clear(spu, cls, stat) \
+ spu_priv1_set64(spu, int_mask_class ## cls ## _RW, stat)
+
+#define spu_priv1_get64(spu, reg) in_be64(&(spu)->priv1->reg)
+#define spu_priv1_set64(spu, reg, val) out_be64(&(spu)->priv1->reg, val)
+
+#endif /* CONFIG_ON_HYPERVISOR_XXXXX */
+
#endif
More information about the Linuxppc64-dev
mailing list