[Skiboot] [PATCH 08/12] xive: make endian-clean
Cédric Le Goater
clg at kaod.org
Tue Oct 1 15:41:52 AEST 2019
On 01/10/2019 07:24, Oliver O'Halloran wrote:
> On Sun, 2019-09-29 at 17:46 +1000, Nicholas Piggin wrote:
>> Convert xive opal calls, dt construction, and in-memory hardware tables
>> to use explicit endian conversions.
>
> Cedric, ack/nak?
Sorry. I was not in CC:
Let's be honest. This is very valid but the result is dreadful.
> I'm a bit iffy about the __prefixes since the in-memory tables are
> homogenously BE. If we're going to be more concerned about endian
> safety going forward I'd prefer we use the __beXX annotations and some
> kind of automated checking rather than ad-hoc conventions.
I don't mind the __be types but I don't understand the __ prefix in
the word structures. is that a requirement ?
> We might also want to do something similar to what we did with the
> radix tables in Linux where constants we use change based on endianness
> rather than having the code do cpu_to_beXX all over the place.
I introduced helpers in QEMU to deal with endianness, as we have
the same problem on the pseries and PowerNV machines. It does
not cover all cases but it cleans up the code a lot.
https://git.qemu.org/?p=qemu.git;a=blob;f=include/hw/ppc/xive_regs.h;h=08c8bf7172e22ec964603c2192fd462a1ae918a2;hb=HEAD#l162
Are we in a hurry ? This is also completely ruining my patchset ...
Nick, do you want me to take over ? I will complain less :)
Thanks,
C.
>>
>> Signed-off-by: Nicholas Piggin <npiggin at gmail.com>
>> ---
>> hw/xive.c | 386 ++++++++++++++++++++++++++-----------------------
>> include/xive.h | 50 +++----
>> 2 files changed, 230 insertions(+), 206 deletions(-)
>>
>> diff --git a/hw/xive.c b/hw/xive.c
>> index 96a9bc647..8bfe74db3 100644
>> --- a/hw/xive.c
>> +++ b/hw/xive.c
>> @@ -421,7 +421,7 @@ struct xive {
>> /* Indirect NVT/VP table. NULL entries are unallocated, count is
>> * the numbre of pointers (ie, sub page placeholders).
>> */
>> - uint64_t *vp_ind_base;
>> + __be64 *vp_ind_base;
>> uint32_t vp_ind_count;
>> #else
>> void *vp_base;
>> @@ -805,7 +805,7 @@ static struct xive_eq *xive_get_eq(struct xive *x, unsigned int idx)
>> #ifdef USE_INDIRECT
>> if (idx >= (x->eq_ind_count * EQ_PER_PAGE))
>> return NULL;
>> - p = (struct xive_eq *)(x->eq_ind_base[idx / EQ_PER_PAGE] &
>> + p = (struct xive_eq *)(be64_to_cpu(x->eq_ind_base[idx / EQ_PER_PAGE]) &
>> VSD_ADDRESS_MASK);
>> if (!p)
>> return NULL;
>> @@ -845,11 +845,11 @@ static struct xive_ive *xive_get_ive(struct xive *x, unsigned int isn)
>> /* If using single-escalation, don't let anybody get to the individual
>> * esclation interrupts
>> */
>> - if (eq->w0 & EQ_W0_UNCOND_ESCALATE)
>> + if (be32_to_cpu(eq->__w0) & EQ_W0_UNCOND_ESCALATE)
>> return NULL;
>>
>> /* Grab the buried IVE */
>> - return (struct xive_ive *)(char *)&eq->w4;
>> + return (struct xive_ive *)(char *)&eq->__w4;
>> } else {
>> /* Check the block matches */
>> if (isn < x->int_base || isn >= x->int_max) {
>> @@ -874,7 +874,7 @@ static struct xive_vp *xive_get_vp(struct xive *x, unsigned int idx)
>>
>> #ifdef USE_INDIRECT
>> assert(idx < (x->vp_ind_count * VP_PER_PAGE));
>> - p = (struct xive_vp *)(x->vp_ind_base[idx / VP_PER_PAGE] &
>> + p = (struct xive_vp *)(be64_to_cpu(x->vp_ind_base[idx / VP_PER_PAGE]) &
>> VSD_ADDRESS_MASK);
>> if (!p)
>> return NULL;
>> @@ -893,8 +893,8 @@ static void xive_init_default_vp(struct xive_vp *vp,
>> memset(vp, 0, sizeof(struct xive_vp));
>>
>> /* Stash the EQ base in the pressure relief interrupt field */
>> - vp->w1 = (eq_blk << 28) | eq_idx;
>> - vp->w0 = VP_W0_VALID;
>> + vp->__w1 = cpu_to_be32((eq_blk << 28) | eq_idx);
>> + vp->__w0 = cpu_to_be32(VP_W0_VALID);
>> }
>>
>> static void xive_init_emu_eq(uint32_t vp_blk, uint32_t vp_idx,
>> @@ -903,17 +903,16 @@ static void xive_init_emu_eq(uint32_t vp_blk, uint32_t vp_idx,
>> {
>> memset(eq, 0, sizeof(struct xive_eq));
>>
>> - eq->w1 = EQ_W1_GENERATION;
>> - eq->w3 = ((uint64_t)backing_page) & 0xffffffff;
>> - eq->w2 = (((uint64_t)backing_page)) >> 32 & 0x0fffffff;
>> - eq->w6 = SETFIELD(EQ_W6_NVT_BLOCK, 0ul, vp_blk) |
>> - SETFIELD(EQ_W6_NVT_INDEX, 0ul, vp_idx);
>> - eq->w7 = SETFIELD(EQ_W7_F0_PRIORITY, 0ul, prio);
>> - eq->w0 = EQ_W0_VALID | EQ_W0_ENQUEUE |
>> - SETFIELD(EQ_W0_QSIZE, 0ul, EQ_QSIZE_64K) |
>> - EQ_W0_FIRMWARE;
>> + eq->__w1 = cpu_to_be32(EQ_W1_GENERATION);
>> + eq->__w3 = cpu_to_be32(((uint64_t)backing_page) & 0xffffffff);
>> + eq->__w2 = cpu_to_be32(((((uint64_t)backing_page)) >> 32) & 0x0fffffff);
>> + eq->__w6 = cpu_to_be32(SETFIELD(EQ_W6_NVT_BLOCK, 0ul, vp_blk) |
>> + SETFIELD(EQ_W6_NVT_INDEX, 0ul, vp_idx));
>> + eq->__w7 = cpu_to_be32(SETFIELD(EQ_W7_F0_PRIORITY, 0ul, prio));
>> + eq->__w0 = cpu_to_be32(EQ_W0_VALID | EQ_W0_ENQUEUE |
>> + SETFIELD(EQ_W0_QSIZE, 0ul, EQ_QSIZE_64K) | EQ_W0_FIRMWARE);
>> #ifdef EQ_ALWAYS_NOTIFY
>> - eq->w0 |= EQ_W0_UCOND_NOTIFY;
>> + eq->__w0 |= cpu_to_be32(EQ_W0_UCOND_NOTIFY);
>> #endif
>> }
>>
>> @@ -926,8 +925,8 @@ static uint32_t *xive_get_eq_buf(uint32_t eq_blk, uint32_t eq_idx)
>> assert(x);
>> eq = xive_get_eq(x, eq_idx);
>> assert(eq);
>> - assert(eq->w0 & EQ_W0_VALID);
>> - addr = (((uint64_t)eq->w2) & 0x0fffffff) << 32 | eq->w3;
>> + assert(be32_to_cpu(eq->__w0) & EQ_W0_VALID);
>> + addr = (((uint64_t)be32_to_cpu(eq->__w2)) & 0x0fffffff) << 32 | be32_to_cpu(eq->__w3);
>>
>> return (uint32_t *)addr;
>> }
>> @@ -998,8 +997,8 @@ static uint32_t xive_alloc_eq_set(struct xive *x, bool alloc_indirect __unused)
>> }
>> }
>> memset(page, 0, 0x10000);
>> - x->eq_ind_base[ind_idx] = vsd_flags |
>> - (((uint64_t)page) & VSD_ADDRESS_MASK);
>> + x->eq_ind_base[ind_idx] = cpu_to_be64(vsd_flags |
>> + (((uint64_t)page) & VSD_ADDRESS_MASK));
>> /* Any cache scrub needed ? */
>> }
>> #endif /* USE_INDIRECT */
>> @@ -1046,7 +1045,7 @@ static bool xive_provision_vp_ind(struct xive *x, uint32_t vp_idx, uint32_t orde
>> vsd = ((uint64_t)page) & VSD_ADDRESS_MASK;
>> vsd |= SETFIELD(VSD_TSIZE, 0ull, 4);
>> vsd |= SETFIELD(VSD_MODE, 0ull, VSD_MODE_EXCLUSIVE);
>> - x->vp_ind_base[i] = vsd;
>> + x->vp_ind_base[i] = cpu_to_be64(vsd);
>> }
>> return true;
>> }
>> @@ -1456,7 +1455,7 @@ static int64_t __xive_cache_watch(struct xive *x, enum xive_cache_type ctype,
>> * one written.
>> */
>> for (i = start_dword + dword_count - 1; i >= start_dword ;i--) {
>> - uint64_t dw = ((uint64_t *)new_data)[i - start_dword];
>> + uint64_t dw = be64_to_cpu(((__be64 *)new_data)[i - start_dword]);
>> __xive_regw(x, dreg0 + i * 8, dreg0x + i, dw, NULL);
>> }
>>
>> @@ -1846,7 +1845,7 @@ static bool xive_prealloc_tables(struct xive *x)
>> }
>> /* SBEs are initialized to 0b01 which corresponds to "ints off" */
>> memset(x->sbe_base, 0x55, SBE_SIZE);
>> - xive_dbg(x, "SBE at %p size 0x%x\n", x->sbe_base, IVT_SIZE);
>> + xive_dbg(x, "SBE at %p size 0x%x\n", x->sbe_base, SBE_SIZE);
>>
>> /* EAS/IVT entries are 8 bytes */
>> x->ivt_base = local_alloc(x->chip_id, IVT_SIZE, IVT_SIZE);
>> @@ -1919,7 +1918,7 @@ static bool xive_prealloc_tables(struct xive *x)
>> vsd |= SETFIELD(VSD_TSIZE, 0ull, 4);
>> vsd |= SETFIELD(VSD_MODE, 0ull, VSD_MODE_EXCLUSIVE);
>> vsd |= VSD_FIRMWARE;
>> - x->vp_ind_base[i] = vsd;
>> + x->vp_ind_base[i] = cpu_to_be64(vsd);
>> }
>>
>> #else /* USE_INDIRECT */
>> @@ -1966,7 +1965,7 @@ static void xive_add_provisioning_properties(void)
>> count = xive_block_count;
>> #endif
>> for (i = 0; i < count; i++)
>> - chips[i] = xive_block_to_chip[i];
>> + chips[i] = cpu_to_be32(xive_block_to_chip[i]);
>> dt_add_property(xive_dt_node, "ibm,xive-provision-chips",
>> chips, 4 * count);
>> }
>> @@ -2094,7 +2093,8 @@ uint32_t xive_alloc_hw_irqs(uint32_t chip_id, uint32_t count, uint32_t align)
>> for (i = 0; i < count; i++) {
>> struct xive_ive *ive = xive_get_ive(x, base + i);
>>
>> - ive->w = IVE_VALID | IVE_MASKED | SETFIELD(IVE_EQ_DATA, 0ul, base + i);
>> + ive->__w = cpu_to_be64(IVE_VALID | IVE_MASKED |
>> + SETFIELD(IVE_EQ_DATA, 0ul, base + i));
>> }
>>
>> unlock(&x->lock);
>> @@ -2140,8 +2140,8 @@ uint32_t xive_alloc_ipi_irqs(uint32_t chip_id, uint32_t count, uint32_t align)
>> for (i = 0; i < count; i++) {
>> struct xive_ive *ive = xive_get_ive(x, base + i);
>>
>> - ive->w = IVE_VALID | IVE_MASKED |
>> - SETFIELD(IVE_EQ_DATA, 0ul, base + i);
>> + ive->__w = cpu_to_be64(IVE_VALID | IVE_MASKED |
>> + SETFIELD(IVE_EQ_DATA, 0ul, base + i));
>> }
>>
>> unlock(&x->lock);
>> @@ -2267,6 +2267,7 @@ static bool xive_get_irq_targetting(uint32_t isn, uint32_t *out_target,
>> uint32_t eq_blk, eq_idx;
>> uint32_t vp_blk __unused, vp_idx;
>> uint32_t prio, server;
>> + uint64_t ive_w;
>> bool is_escalation = GIRQ_IS_ESCALATION(isn);
>>
>> /* Find XIVE on which the IVE resides */
>> @@ -2277,17 +2278,18 @@ static bool xive_get_irq_targetting(uint32_t isn, uint32_t *out_target,
>> ive = xive_get_ive(x, isn);
>> if (!ive)
>> return false;
>> - if (!(ive->w & IVE_VALID) && !is_escalation) {
>> + ive_w = be64_to_cpu(ive->__w);
>> + if (!(ive_w & IVE_VALID) && !is_escalation) {
>> xive_err(x, "ISN %x lead to invalid IVE !\n", isn);
>> return false;
>> }
>>
>> if (out_lirq)
>> - *out_lirq = GETFIELD(IVE_EQ_DATA, ive->w);
>> + *out_lirq = GETFIELD(IVE_EQ_DATA, ive_w);
>>
>> /* Find the EQ and its xive instance */
>> - eq_blk = GETFIELD(IVE_EQ_BLOCK, ive->w);
>> - eq_idx = GETFIELD(IVE_EQ_INDEX, ive->w);
>> + eq_blk = GETFIELD(IVE_EQ_BLOCK, ive_w);
>> + eq_idx = GETFIELD(IVE_EQ_INDEX, ive_w);
>> eq_x = xive_from_vc_blk(eq_blk);
>>
>> /* This can fail if the interrupt hasn't been initialized yet
>> @@ -2302,15 +2304,15 @@ static bool xive_get_irq_targetting(uint32_t isn, uint32_t *out_target,
>> /* XXX Check valid and format 0 */
>>
>> /* No priority conversion, return the actual one ! */
>> - if (ive->w & IVE_MASKED)
>> + if (ive_w & IVE_MASKED)
>> prio = 0xff;
>> else
>> - prio = GETFIELD(EQ_W7_F0_PRIORITY, eq->w7);
>> + prio = GETFIELD(EQ_W7_F0_PRIORITY, be32_to_cpu(eq->__w7));
>> if (out_prio)
>> *out_prio = prio;
>>
>> - vp_blk = GETFIELD(EQ_W6_NVT_BLOCK, eq->w6);
>> - vp_idx = GETFIELD(EQ_W6_NVT_INDEX, eq->w6);
>> + vp_blk = GETFIELD(EQ_W6_NVT_BLOCK, be32_to_cpu(eq->__w6));
>> + vp_idx = GETFIELD(EQ_W6_NVT_INDEX, be32_to_cpu(eq->__w6));
>> server = VP2PIR(vp_blk, vp_idx);
>>
>> if (out_target)
>> @@ -2360,8 +2362,8 @@ static inline bool xive_eq_for_target(uint32_t target, uint8_t prio,
>> /* Grab it, it's in the pressure relief interrupt field,
>> * top 4 bits are the block (word 1).
>> */
>> - eq_blk = vp->w1 >> 28;
>> - eq_idx = vp->w1 & 0x0fffffff;
>> + eq_blk = be32_to_cpu(vp->__w1) >> 28;
>> + eq_idx = be32_to_cpu(vp->__w1) & 0x0fffffff;
>>
>> /* Currently the EQ block and VP block should be the same */
>> if (eq_blk != vp_blk) {
>> @@ -2397,7 +2399,7 @@ static int64_t xive_set_irq_targetting(uint32_t isn, uint32_t target,
>> ive = xive_get_ive(x, isn);
>> if (!ive)
>> return OPAL_PARAMETER;
>> - if (!(ive->w & IVE_VALID) && !is_escalation) {
>> + if (!(be64_to_cpu(ive->__w) & IVE_VALID) && !is_escalation) {
>> xive_err(x, "ISN %x lead to invalid IVE !\n", isn);
>> return OPAL_PARAMETER;
>> }
>> @@ -2409,7 +2411,7 @@ static int64_t xive_set_irq_targetting(uint32_t isn, uint32_t target,
>> prio = XIVE_EMULATION_PRIO;
>>
>> /* Read existing IVE */
>> - new_ive = ive->w;
>> + new_ive = be64_to_cpu(ive->__w);
>>
>> /* Are we masking ? */
>> if (prio == 0xff && !is_escalation) {
>> @@ -2420,7 +2422,7 @@ static int64_t xive_set_irq_targetting(uint32_t isn, uint32_t target,
>> prio = 7;
>> } else {
>> /* Unmasking */
>> - new_ive = ive->w & ~IVE_MASKED;
>> + new_ive = be64_to_cpu(ive->__w) & ~IVE_MASKED;
>> xive_vdbg(x, "ISN %x unmasked !\n", isn);
>>
>> /* For normal interrupt sources, keep track of which ones
>> @@ -2460,7 +2462,7 @@ static int64_t xive_set_irq_targetting(uint32_t isn, uint32_t target,
>> 2, 1, &new_ive, true, synchronous);
>> } else {
>> sync();
>> - ive->w = new_ive;
>> + ive->__w = cpu_to_be64(new_ive);
>> rc = xive_ivc_scrub(x, x->block_id, GIRQ_TO_IDX(isn));
>> }
>>
>> @@ -2641,6 +2643,7 @@ void __xive_source_eoi(struct irq_source *is, uint32_t isn)
>> uint32_t idx = isn - s->esb_base;
>> struct xive_ive *ive;
>> void *mmio_base;
>> + uint64_t ive_w;
>> uint64_t eoi_val;
>>
>> /* Grab the IVE */
>> @@ -2649,13 +2652,15 @@ void __xive_source_eoi(struct irq_source *is, uint32_t isn)
>> return;
>> ive += GIRQ_TO_IDX(isn);
>>
>> + ive_w = be64_to_cpu(ive->__w);
>> +
>> /* XXX To fix the races with mask/unmask potentially causing
>> * multiple queue entries, we need to keep track of EOIs here,
>> * before the masked test below
>> */
>>
>> /* If it's invalid or masked, don't do anything */
>> - if ((ive->w & IVE_MASKED) || !(ive->w & IVE_VALID))
>> + if ((ive_w & IVE_MASKED) || !(ive_w & IVE_VALID))
>> return;
>>
>> /* Grab MMIO control address for that ESB */
>> @@ -3029,13 +3034,17 @@ static bool xive_check_eq_update(struct xive *x, uint32_t idx, struct xive_eq *e
>> if (memcmp(eq, &eq2, sizeof(struct xive_eq)) != 0) {
>> xive_err(x, "EQ update mismatch idx %d\n", idx);
>> xive_err(x, "want: %08x %08x %08x %08x\n",
>> - eq->w0, eq->w1, eq->w2, eq->w3);
>> + be32_to_cpu(eq->__w0), be32_to_cpu(eq->__w1),
>> + be32_to_cpu(eq->__w2), be32_to_cpu(eq->__w3));
>> xive_err(x, " %08x %08x %08x %08x\n",
>> - eq->w4, eq->w5, eq->w6, eq->w7);
>> + be32_to_cpu(eq->__w4), be32_to_cpu(eq->__w5),
>> + be32_to_cpu(eq->__w6), be32_to_cpu(eq->__w7));
>> xive_err(x, "got : %08x %08x %08x %08x\n",
>> - eq2.w0, eq2.w1, eq2.w2, eq2.w3);
>> + be32_to_cpu(eq2.__w0), be32_to_cpu(eq2.__w1),
>> + be32_to_cpu(eq2.__w2), be32_to_cpu(eq2.__w3));
>> xive_err(x, " %08x %08x %08x %08x\n",
>> - eq2.w4, eq2.w5, eq2.w6, eq2.w7);
>> + be32_to_cpu(eq2.__w4), be32_to_cpu(eq2.__w5),
>> + be32_to_cpu(eq2.__w6), be32_to_cpu(eq2.__w7));
>> return false;
>> }
>> return true;
>> @@ -3051,13 +3060,17 @@ static bool xive_check_vpc_update(struct xive *x, uint32_t idx, struct xive_vp *
>> if (memcmp(vp, &vp2, sizeof(struct xive_vp)) != 0) {
>> xive_err(x, "VP update mismatch idx %d\n", idx);
>> xive_err(x, "want: %08x %08x %08x %08x\n",
>> - vp->w0, vp->w1, vp->w2, vp->w3);
>> + be32_to_cpu(vp->__w0), be32_to_cpu(vp->__w1),
>> + be32_to_cpu(vp->__w2), be32_to_cpu(vp->__w3));
>> xive_err(x, " %08x %08x %08x %08x\n",
>> - vp->w4, vp->w5, vp->w6, vp->w7);
>> + be32_to_cpu(vp->__w4), be32_to_cpu(vp->__w5),
>> + be32_to_cpu(vp->__w6), be32_to_cpu(vp->__w7));
>> xive_err(x, "got : %08x %08x %08x %08x\n",
>> - vp2.w0, vp2.w1, vp2.w2, vp2.w3);
>> + be32_to_cpu(vp2.__w0), be32_to_cpu(vp2.__w1),
>> + be32_to_cpu(vp2.__w2), be32_to_cpu(vp2.__w3));
>> xive_err(x, " %08x %08x %08x %08x\n",
>> - vp2.w4, vp2.w5, vp2.w6, vp2.w7);
>> + be32_to_cpu(vp2.__w4), be32_to_cpu(vp2.__w5),
>> + be32_to_cpu(vp2.__w6), be32_to_cpu(vp2.__w7));
>> return false;
>> }
>> return true;
>> @@ -3089,7 +3102,7 @@ static void xive_special_cache_check(struct xive *x, uint32_t blk, uint32_t idx)
>>
>> memset(vp_m, (~i) & 0xff, sizeof(*vp_m));
>> sync();
>> - vp.w1 = (i << 16) | i;
>> + vp.__w1 = cpu_to_be32((i << 16) | i);
>> xive_vpc_cache_update(x, blk, idx,
>> 0, 8, &vp, false, true);
>> if (!xive_check_vpc_update(x, idx, &vp)) {
>> @@ -3132,6 +3145,7 @@ static void xive_setup_hw_for_emu(struct xive_cpu_state *xs)
>>
>> /* Use the cache watch to write it out */
>> lock(&x_eq->lock);
>> +
>> xive_eqc_cache_update(x_eq, xs->eq_blk,
>> xs->eq_idx + XIVE_EMULATION_PRIO,
>> 0, 4, &eq, false, true);
>> @@ -3349,7 +3363,7 @@ static void xive_init_cpu_properties(struct cpu_thread *cpu)
>> t = (i == 0) ? cpu : find_cpu_by_pir(cpu->pir + i);
>> if (!t)
>> continue;
>> - iprop[i][0] = t->xstate->ipi_irq;
>> + iprop[i][0] = cpu_to_be32(t->xstate->ipi_irq);
>> iprop[i][1] = 0; /* Edge */
>> }
>> dt_add_property(cpu->node, "interrupts", iprop, cpu_thread_count * 8);
>> @@ -3420,7 +3434,7 @@ static uint32_t xive_read_eq(struct xive_cpu_state *xs, bool just_peek)
>> unlock(&xs->xive->lock);
>> eq = xive_get_eq(xs->xive, xs->eq_idx + XIVE_EMULATION_PRIO);
>> prerror("EQ @%p W0=%08x W1=%08x qbuf @%p\n",
>> - eq, eq->w0, eq->w1, xs->eqbuf);
>> + eq, be32_to_cpu(eq->__w0), be32_to_cpu(eq->__w1), xs->eqbuf);
>> }
>> log_add(xs, LOG_TYPE_POPQ, 7, cur,
>> xs->eqbuf[(xs->eqptr + 1) & xs->eqmsk],
>> @@ -3637,20 +3651,20 @@ static bool check_misrouted_ipi(struct cpu_thread *me, uint32_t irq)
>> xive_cpu_err(me, "no ive attached\n");
>> return true;
>> }
>> - xive_cpu_err(me, "ive=%016llx\n", ive->w);
>> + xive_cpu_err(me, "ive=%016llx\n", be64_to_cpu(ive->__w));
>> for_each_chip(chip) {
>> x = chip->xive;
>> if (!x)
>> continue;
>> ive = x->ivt_base;
>> for (i = 0; i < MAX_INT_ENTRIES; i++) {
>> - if ((ive[i].w & IVE_EQ_DATA) == irq) {
>> + if ((be64_to_cpu(ive[i].__w) & IVE_EQ_DATA) == irq) {
>> eq_blk = GETFIELD(IVE_EQ_BLOCK, ive[i].w);
>> eq_idx = GETFIELD(IVE_EQ_INDEX, ive[i].w);
>> xive_cpu_err(me, "Found source: 0x%x ive=%016llx\n"
>> " eq 0x%x/%x",
>> BLKIDX_TO_GIRQ(x->block_id, i),
>> - ive[i].w, eq_blk, eq_idx);
>> + be64_to_cpu(ive[i].__w), eq_blk, eq_idx);
>> xive_dump_eq(eq_blk, eq_idx);
>> }
>> }
>> @@ -3668,7 +3682,7 @@ static inline bool check_misrouted_ipi(struct cpu_thread *c __unused,
>> }
>> #endif
>>
>> -static int64_t opal_xive_get_xirr(uint32_t *out_xirr, bool just_poll)
>> +static int64_t opal_xive_get_xirr(__be32 *out_xirr, bool just_poll)
>> {
>> struct cpu_thread *c = this_cpu();
>> struct xive_cpu_state *xs = c->xstate;
>> @@ -3752,7 +3766,7 @@ static int64_t opal_xive_get_xirr(uint32_t *out_xirr, bool just_poll)
>> false, false);
>> unlock(&xs->xive->lock);
>> eq = xive_get_eq(xs->xive, xs->eq_idx + XIVE_EMULATION_PRIO);
>> - log_add(xs, LOG_TYPE_EQD, 2, eq->w0, eq->w1);
>> + log_add(xs, LOG_TYPE_EQD, 2, cpu_to_be32(eq->__w0), cpu_to_be32(eq->__w1));
>> }
>> #endif /* XIVE_PERCPU_LOG */
>>
>> @@ -3775,7 +3789,7 @@ static int64_t opal_xive_get_xirr(uint32_t *out_xirr, bool just_poll)
>> if (check_misrouted_ipi(c, val))
>> val = 2;
>>
>> - *out_xirr = (old_cppr << 24) | val;
>> + *out_xirr = cpu_to_be32((old_cppr << 24) | val);
>>
>> /* If we are polling, that's it */
>> if (just_poll)
>> @@ -3812,9 +3826,9 @@ static int64_t opal_xive_get_xirr(uint32_t *out_xirr, bool just_poll)
>> skip:
>>
>> log_add(xs, LOG_TYPE_XIRR2, 5, xs->cppr, xs->pending,
>> - *out_xirr, xs->eqptr, xs->eqgen);
>> + be32_to_cpu(*out_xirr), xs->eqptr, xs->eqgen);
>> xive_cpu_vdbg(c, " returning XIRR=%08x, pending=0x%x\n",
>> - *out_xirr, xs->pending);
>> + be32_to_cpu(*out_xirr), xs->pending);
>>
>> unlock(&xs->lock);
>>
>> @@ -3890,11 +3904,11 @@ static uint64_t xive_convert_irq_flags(uint64_t iflags)
>> }
>>
>> static int64_t opal_xive_get_irq_info(uint32_t girq,
>> - uint64_t *out_flags,
>> - uint64_t *out_eoi_page,
>> - uint64_t *out_trig_page,
>> - uint32_t *out_esb_shift,
>> - uint32_t *out_src_chip)
>> + __be64 *out_flags,
>> + __be64 *out_eoi_page,
>> + __be64 *out_trig_page,
>> + __be32 *out_esb_shift,
>> + __be32 *out_src_chip)
>> {
>> struct irq_source *is = irq_find_source(girq);
>> struct xive_src *s = container_of(is, struct xive_src, is);
>> @@ -3909,7 +3923,7 @@ static int64_t opal_xive_get_irq_info(uint32_t girq,
>> assert(is->ops == &xive_irq_source_ops);
>>
>> if (out_flags)
>> - *out_flags = xive_convert_irq_flags(s->flags);
>> + *out_flags = cpu_to_be64(xive_convert_irq_flags(s->flags));
>>
>> /*
>> * If the orig source has a set_xive callback, then set
>> @@ -3918,15 +3932,15 @@ static int64_t opal_xive_get_irq_info(uint32_t girq,
>> */
>> if (out_flags && s->orig_ops) {
>> if (s->orig_ops->set_xive)
>> - *out_flags |= OPAL_XIVE_IRQ_MASK_VIA_FW;
>> + *out_flags |= cpu_to_be64(OPAL_XIVE_IRQ_MASK_VIA_FW);
>> if (s->orig_ops->eoi)
>> - *out_flags |= OPAL_XIVE_IRQ_EOI_VIA_FW;
>> + *out_flags |= cpu_to_be64(OPAL_XIVE_IRQ_EOI_VIA_FW);
>> }
>>
>> idx = girq - s->esb_base;
>>
>> if (out_esb_shift)
>> - *out_esb_shift = s->esb_shift;
>> + *out_esb_shift = cpu_to_be32(s->esb_shift);
>>
>> mm_base = (uint64_t)s->esb_mmio + (1ull << s->esb_shift) * idx;
>>
>> @@ -3942,27 +3956,31 @@ static int64_t opal_xive_get_irq_info(uint32_t girq,
>> trig_page = mm_base;
>>
>> if (out_eoi_page)
>> - *out_eoi_page = eoi_page;
>> + *out_eoi_page = cpu_to_be64(eoi_page);
>> if (out_trig_page)
>> - *out_trig_page = trig_page;
>> + *out_trig_page = cpu_to_be64(trig_page);
>> if (out_src_chip)
>> - *out_src_chip = GIRQ_TO_CHIP(girq);
>> + *out_src_chip = cpu_to_be32(GIRQ_TO_CHIP(girq));
>>
>> return OPAL_SUCCESS;
>> }
>>
>> static int64_t opal_xive_get_irq_config(uint32_t girq,
>> - uint64_t *out_vp,
>> + __be64 *out_vp,
>> uint8_t *out_prio,
>> - uint32_t *out_lirq)
>> + __be32 *out_lirq)
>> {
>> uint32_t vp;
>> + uint32_t lirq;
>> + uint8_t prio;
>>
>> if (xive_mode != XIVE_MODE_EXPL)
>> return OPAL_WRONG_STATE;
>>
>> - if (xive_get_irq_targetting(girq, &vp, out_prio, out_lirq)) {
>> - *out_vp = vp;
>> + if (xive_get_irq_targetting(girq, &vp, &prio, &lirq)) {
>> + *out_vp = cpu_to_be64(vp);
>> + *out_prio = prio;
>> + *out_lirq = cpu_to_be32(lirq);
>> return OPAL_SUCCESS;
>> } else
>> return OPAL_PARAMETER;
>> @@ -3993,15 +4011,16 @@ static int64_t opal_xive_set_irq_config(uint32_t girq,
>> }
>>
>> static int64_t opal_xive_get_queue_info(uint64_t vp, uint32_t prio,
>> - uint64_t *out_qpage,
>> - uint64_t *out_qsize,
>> - uint64_t *out_qeoi_page,
>> - uint32_t *out_escalate_irq,
>> - uint64_t *out_qflags)
>> + __be64 *out_qpage,
>> + __be64 *out_qsize,
>> + __be64 *out_qeoi_page,
>> + __be32 *out_escalate_irq,
>> + __be64 *out_qflags)
>> {
>> uint32_t blk, idx;
>> struct xive *x;
>> struct xive_eq *eq;
>> + uint32_t eq_w0;
>>
>> if (xive_mode != XIVE_MODE_EXPL)
>> return OPAL_WRONG_STATE;
>> @@ -4017,22 +4036,24 @@ static int64_t opal_xive_get_queue_info(uint64_t vp, uint32_t prio,
>> if (!eq)
>> return OPAL_PARAMETER;
>>
>> + eq_w0 = be32_to_cpu(eq->__w0);
>> +
>> if (out_escalate_irq) {
>> uint32_t esc_idx = idx;
>>
>> /* If escalations are routed to a single queue, fix up
>> * the escalation interrupt number here.
>> */
>> - if (eq->w0 & EQ_W0_UNCOND_ESCALATE)
>> + if (eq_w0 & EQ_W0_UNCOND_ESCALATE)
>> esc_idx |= 7;
>> *out_escalate_irq =
>> - MAKE_ESCALATION_GIRQ(blk, esc_idx);
>> + cpu_to_be32(MAKE_ESCALATION_GIRQ(blk, esc_idx));
>> }
>>
>> /* If this is a single-escalation gather queue, that's all
>> * there is to return
>> */
>> - if (eq->w0 & EQ_W0_SILENT_ESCALATE) {
>> + if (eq_w0 & EQ_W0_SILENT_ESCALATE) {
>> if (out_qflags)
>> *out_qflags = 0;
>> if (out_qpage)
>> @@ -4045,30 +4066,30 @@ static int64_t opal_xive_get_queue_info(uint64_t vp, uint32_t prio,
>> }
>>
>> if (out_qpage) {
>> - if (eq->w0 & EQ_W0_ENQUEUE)
>> + if (eq_w0 & EQ_W0_ENQUEUE)
>> *out_qpage =
>> - (((uint64_t)(eq->w2 & 0x0fffffff)) << 32) | eq->w3;
>> + cpu_to_be64((((uint64_t)(be32_to_cpu(eq->__w2) & 0x0fffffff)) << 32) | be32_to_cpu(eq->__w3));
>> else
>> *out_qpage = 0;
>> }
>> if (out_qsize) {
>> - if (eq->w0 & EQ_W0_ENQUEUE)
>> - *out_qsize = GETFIELD(EQ_W0_QSIZE, eq->w0) + 12;
>> + if (eq_w0 & EQ_W0_ENQUEUE)
>> + *out_qsize = cpu_to_be64(GETFIELD(EQ_W0_QSIZE, eq_w0) + 12);
>> else
>> *out_qsize = 0;
>> }
>> if (out_qeoi_page) {
>> *out_qeoi_page =
>> - (uint64_t)x->eq_mmio + idx * 0x20000;
>> + cpu_to_be64((uint64_t)x->eq_mmio + idx * 0x20000);
>> }
>> if (out_qflags) {
>> *out_qflags = 0;
>> - if (eq->w0 & EQ_W0_VALID)
>> - *out_qflags |= OPAL_XIVE_EQ_ENABLED;
>> - if (eq->w0 & EQ_W0_UCOND_NOTIFY)
>> - *out_qflags |= OPAL_XIVE_EQ_ALWAYS_NOTIFY;
>> - if (eq->w0 & EQ_W0_ESCALATE_CTL)
>> - *out_qflags |= OPAL_XIVE_EQ_ESCALATE;
>> + if (eq_w0 & EQ_W0_VALID)
>> + *out_qflags |= cpu_to_be64(OPAL_XIVE_EQ_ENABLED);
>> + if (eq_w0 & EQ_W0_UCOND_NOTIFY)
>> + *out_qflags |= cpu_to_be64(OPAL_XIVE_EQ_ALWAYS_NOTIFY);
>> + if (eq_w0 & EQ_W0_ESCALATE_CTL)
>> + *out_qflags |= cpu_to_be64(OPAL_XIVE_EQ_ESCALATE);
>> }
>>
>> return OPAL_SUCCESS;
>> @@ -4076,9 +4097,9 @@ static int64_t opal_xive_get_queue_info(uint64_t vp, uint32_t prio,
>>
>> static void xive_cleanup_eq(struct xive_eq *eq)
>> {
>> - eq->w0 = eq->w0 & EQ_W0_FIRMWARE;
>> - eq->w1 = EQ_W1_ESe_Q | EQ_W1_ESn_Q;
>> - eq->w2 = eq->w3 = eq->w4 = eq->w5 = eq->w6 = eq->w7 = 0;
>> + eq->__w0 = cpu_to_be32(be32_to_cpu(eq->__w0) & EQ_W0_FIRMWARE);
>> + eq->__w1 = cpu_to_be32(EQ_W1_ESe_Q | EQ_W1_ESn_Q);
>> + eq->__w2 = eq->__w3 = eq->__w4 = eq->__w5 = eq->__w6 = eq->__w7 = 0;
>> }
>>
>> static int64_t opal_xive_set_queue_info(uint64_t vp, uint32_t prio,
>> @@ -4110,7 +4131,7 @@ static int64_t opal_xive_set_queue_info(uint64_t vp, uint32_t prio,
>> /* If this is a silent escalation queue, it cannot be
>> * configured directly
>> */
>> - if (old_eq->w0 & EQ_W0_SILENT_ESCALATE)
>> + if (be32_to_cpu(old_eq->__w0) & EQ_W0_SILENT_ESCALATE)
>> return OPAL_PARAMETER;
>>
>> /* This shouldn't fail or xive_eq_for_target would have
>> @@ -4132,14 +4153,14 @@ static int64_t opal_xive_set_queue_info(uint64_t vp, uint32_t prio,
>> case 16:
>> case 21:
>> case 24:
>> - eq.w3 = ((uint64_t)qpage) & 0xffffffff;
>> - eq.w2 = (((uint64_t)qpage)) >> 32 & 0x0fffffff;
>> - eq.w0 |= EQ_W0_ENQUEUE;
>> - eq.w0 = SETFIELD(EQ_W0_QSIZE, eq.w0, qsize - 12);
>> + eq.__w3 = cpu_to_be32(((uint64_t)qpage) & 0xffffffff);
>> + eq.__w2 = cpu_to_be32((((uint64_t)qpage)) >> 32 & 0x0fffffff);
>> + eq.__w0 = cpu_to_be32(be32_to_cpu(eq.__w0) | EQ_W0_ENQUEUE);
>> + eq.__w0 = cpu_to_be32(SETFIELD(EQ_W0_QSIZE, be32_to_cpu(eq.__w0), qsize - 12));
>> break;
>> case 0:
>> - eq.w2 = eq.w3 = 0;
>> - eq.w0 &= ~EQ_W0_ENQUEUE;
>> + eq.__w2 = eq.__w3 = 0;
>> + eq.__w0 = cpu_to_be32(be32_to_cpu(eq.__w0) & ~EQ_W0_ENQUEUE);
>> break;
>> default:
>> return OPAL_PARAMETER;
>> @@ -4148,34 +4169,35 @@ static int64_t opal_xive_set_queue_info(uint64_t vp, uint32_t prio,
>> /* Ensure the priority and target are correctly set (they will
>> * not be right after allocation
>> */
>> - eq.w6 = SETFIELD(EQ_W6_NVT_BLOCK, 0ul, vp_blk) |
>> - SETFIELD(EQ_W6_NVT_INDEX, 0ul, vp_idx);
>> - eq.w7 = SETFIELD(EQ_W7_F0_PRIORITY, 0ul, prio);
>> + eq.__w6 = cpu_to_be32(SETFIELD(EQ_W6_NVT_BLOCK, 0ul, vp_blk) |
>> + SETFIELD(EQ_W6_NVT_INDEX, 0ul, vp_idx));
>> + eq.__w7 = cpu_to_be32(SETFIELD(EQ_W7_F0_PRIORITY, 0ul, prio));
>> /* XXX Handle group i bit when needed */
>>
>> /* Always notify flag */
>> if (qflags & OPAL_XIVE_EQ_ALWAYS_NOTIFY)
>> - eq.w0 |= EQ_W0_UCOND_NOTIFY;
>> + eq.__w0 = cpu_to_be32(be32_to_cpu(eq.__w0) | EQ_W0_UCOND_NOTIFY);
>> else
>> - eq.w0 &= ~EQ_W0_UCOND_NOTIFY;
>> + eq.__w0 = cpu_to_be32(be32_to_cpu(eq.__w0) & ~EQ_W0_UCOND_NOTIFY);
>>
>> /* Escalation flag */
>> if (qflags & OPAL_XIVE_EQ_ESCALATE)
>> - eq.w0 |= EQ_W0_ESCALATE_CTL;
>> + eq.__w0 = cpu_to_be32(be32_to_cpu(eq.__w0) | EQ_W0_ESCALATE_CTL);
>> else
>> - eq.w0 &= ~EQ_W0_ESCALATE_CTL;
>> + eq.__w0 = cpu_to_be32(be32_to_cpu(eq.__w0) & ~EQ_W0_ESCALATE_CTL);
>>
>> /* Unconditionally clear the current queue pointer, set
>> * generation to 1 and disable escalation interrupts.
>> */
>> - eq.w1 = EQ_W1_GENERATION |
>> - (old_eq->w1 & (EQ_W1_ESe_P | EQ_W1_ESe_Q |
>> - EQ_W1_ESn_P | EQ_W1_ESn_Q));
>> + eq.__w1 = cpu_to_be32(EQ_W1_GENERATION |
>> + (be32_to_cpu(old_eq->__w1) &
>> + (EQ_W1_ESe_P | EQ_W1_ESe_Q |
>> + EQ_W1_ESn_P | EQ_W1_ESn_Q)));
>>
>> /* Enable. We always enable backlog for an enabled queue
>> * otherwise escalations won't work.
>> */
>> - eq.w0 |= EQ_W0_VALID | EQ_W0_BACKLOG;
>> + eq.__w0 = cpu_to_be32(be32_to_cpu(eq.__w0) | EQ_W0_VALID | EQ_W0_BACKLOG);
>> } else
>> xive_cleanup_eq(&eq);
>>
>> @@ -4188,8 +4210,8 @@ static int64_t opal_xive_set_queue_info(uint64_t vp, uint32_t prio,
>> }
>>
>> static int64_t opal_xive_get_queue_state(uint64_t vp, uint32_t prio,
>> - uint32_t *out_qtoggle,
>> - uint32_t *out_qindex)
>> + __be32 *out_qtoggle,
>> + __be32 *out_qindex)
>> {
>> uint32_t blk, idx;
>> struct xive *x;
>> @@ -4219,11 +4241,11 @@ static int64_t opal_xive_get_queue_state(uint64_t vp, uint32_t prio,
>> return rc;
>>
>> /* We don't do disable queues */
>> - if (!(eq->w0 & EQ_W0_VALID))
>> + if (!(be32_to_cpu(eq->__w0) & EQ_W0_VALID))
>> return OPAL_WRONG_STATE;
>>
>> - *out_qtoggle = GETFIELD(EQ_W1_GENERATION, eq->w1);
>> - *out_qindex = GETFIELD(EQ_W1_PAGE_OFF, eq->w1);
>> + *out_qtoggle = cpu_to_be32(GETFIELD(EQ_W1_GENERATION, be32_to_cpu(eq->__w1)));
>> + *out_qindex = cpu_to_be32(GETFIELD(EQ_W1_PAGE_OFF, be32_to_cpu(eq->__w1)));
>>
>> return OPAL_SUCCESS;
>> }
>> @@ -4251,13 +4273,13 @@ static int64_t opal_xive_set_queue_state(uint64_t vp, uint32_t prio,
>> return OPAL_PARAMETER;
>>
>> /* We don't do disable queues */
>> - if (!(eq->w0 & EQ_W0_VALID))
>> + if (!(be32_to_cpu(eq->__w0) & EQ_W0_VALID))
>> return OPAL_WRONG_STATE;
>>
>> new_eq = *eq;
>>
>> - new_eq.w1 = SETFIELD(EQ_W1_GENERATION, new_eq.w1, qtoggle);
>> - new_eq.w1 = SETFIELD(EQ_W1_PAGE_OFF, new_eq.w1, qindex);
>> + new_eq.__w1 = cpu_to_be32(SETFIELD(EQ_W1_GENERATION, be32_to_cpu(new_eq.__w1), qtoggle));
>> + new_eq.__w1 = cpu_to_be32(SETFIELD(EQ_W1_PAGE_OFF, be32_to_cpu(new_eq.__w1), qindex));
>>
>> lock(&x->lock);
>> rc = xive_eqc_cache_update(x, blk, idx, 0, 4, &new_eq, false, false);
>> @@ -4289,10 +4311,10 @@ static int64_t opal_xive_donate_page(uint32_t chip_id, uint64_t addr)
>> }
>>
>> static int64_t opal_xive_get_vp_info(uint64_t vp_id,
>> - uint64_t *out_flags,
>> - uint64_t *out_cam_value,
>> - uint64_t *out_report_cl_pair,
>> - uint32_t *out_chip_id)
>> + __be64 *out_flags,
>> + __be64 *out_cam_value,
>> + __be64 *out_report_cl_pair,
>> + __be32 *out_chip_id)
>> {
>> struct xive *x;
>> struct xive_vp *vp;
>> @@ -4334,22 +4356,22 @@ static int64_t opal_xive_get_vp_info(uint64_t vp_id,
>> eq = xive_get_eq(x, eq_idx);
>> if (!eq)
>> return OPAL_PARAMETER;
>> - if (vp->w0 & VP_W0_VALID)
>> - *out_flags |= OPAL_XIVE_VP_ENABLED;
>> - if (eq->w0 & EQ_W0_SILENT_ESCALATE)
>> - *out_flags |= OPAL_XIVE_VP_SINGLE_ESCALATION;
>> + if (be32_to_cpu(vp->__w0) & VP_W0_VALID)
>> + *out_flags |= cpu_to_be64(OPAL_XIVE_VP_ENABLED);
>> + if (be32_to_cpu(eq->__w0) & EQ_W0_SILENT_ESCALATE)
>> + *out_flags |= cpu_to_be64(OPAL_XIVE_VP_SINGLE_ESCALATION);
>> }
>>
>> if (out_cam_value)
>> - *out_cam_value = (blk << 19) | idx;
>> + *out_cam_value = cpu_to_be64((blk << 19) | idx);
>>
>> if (out_report_cl_pair) {
>> - *out_report_cl_pair = ((uint64_t)(vp->w6 & 0x0fffffff)) << 32;
>> - *out_report_cl_pair |= vp->w7 & 0xffffff00;
>> + *out_report_cl_pair = cpu_to_be64(((uint64_t)(be32_to_cpu(vp->__w6) & 0x0fffffff)) << 32);
>> + *out_report_cl_pair |= cpu_to_be64(be32_to_cpu(vp->__w7) & 0xffffff00);
>> }
>>
>> if (out_chip_id)
>> - *out_chip_id = xive_block_to_chip[blk];
>> + *out_chip_id = cpu_to_be32(xive_block_to_chip[blk]);
>>
>> return OPAL_SUCCESS;
>> }
>> @@ -4377,8 +4399,8 @@ static int64_t xive_setup_silent_gather(uint64_t vp_id, bool enable)
>> /* If trying to enable silent gather, make sure prio 7 is not
>> * already enabled as a normal queue
>> */
>> - if (enable && (eq_orig->w0 & EQ_W0_VALID) &&
>> - !(eq_orig->w0 & EQ_W0_SILENT_ESCALATE)) {
>> + if (enable && (be32_to_cpu(eq_orig->__w0) & EQ_W0_VALID) &&
>> + !(be32_to_cpu(eq_orig->__w0) & EQ_W0_SILENT_ESCALATE)) {
>> xive_dbg(x, "Attempt at enabling silent gather but"
>> " prio 7 queue already in use\n");
>> return OPAL_PARAMETER;
>> @@ -4388,15 +4410,14 @@ static int64_t xive_setup_silent_gather(uint64_t vp_id, bool enable)
>>
>> if (enable) {
>> /* W0: Enabled and "s" set, no other bit */
>> - eq.w0 &= EQ_W0_FIRMWARE;
>> - eq.w0 |= EQ_W0_VALID | EQ_W0_SILENT_ESCALATE |
>> - EQ_W0_ESCALATE_CTL | EQ_W0_BACKLOG;
>> + eq.__w0 = cpu_to_be32((be32_to_cpu(eq.__w0) & EQ_W0_FIRMWARE)
>> + | EQ_W0_VALID | EQ_W0_SILENT_ESCALATE |
>> + EQ_W0_ESCALATE_CTL | EQ_W0_BACKLOG);
>>
>> /* W1: Mark ESn as 01, ESe as 00 */
>> - eq.w1 &= ~EQ_W1_ESn_P;
>> - eq.w1 |= EQ_W1_ESn_Q;
>> - eq.w1 &= ~(EQ_W1_ESe);
>> - } else if (eq.w0 & EQ_W0_SILENT_ESCALATE)
>> + eq.__w1 = cpu_to_be32(((be32_to_cpu(eq.__w1) & ~EQ_W1_ESn_P)
>> + | EQ_W1_ESn_Q) & ~EQ_W1_ESe);
>> + } else if (be32_to_cpu(eq.__w0) & EQ_W0_SILENT_ESCALATE)
>> xive_cleanup_eq(&eq);
>>
>> if (!memcmp(eq_orig, &eq, sizeof(eq)))
>> @@ -4417,19 +4438,19 @@ static int64_t xive_setup_silent_gather(uint64_t vp_id, bool enable)
>> eq = *eq_orig;
>> if (enable) {
>> /* Set new "u" bit */
>> - eq.w0 |= EQ_W0_UNCOND_ESCALATE;
>> + eq.__w0 = cpu_to_be32(be32_to_cpu(eq.__w0) | EQ_W0_UNCOND_ESCALATE);
>>
>> /* Re-route escalation interrupt (previous
>> * route is lost !) to the gather queue
>> */
>> - eq.w4 = SETFIELD(EQ_W4_ESC_EQ_BLOCK,
>> - eq.w4, blk);
>> - eq.w4 = SETFIELD(EQ_W4_ESC_EQ_INDEX,
>> - eq.w4, idx + 7);
>> - } else if (eq.w0 & EQ_W0_UNCOND_ESCALATE) {
>> + eq.__w4 = cpu_to_be32(SETFIELD(EQ_W4_ESC_EQ_BLOCK,
>> + be32_to_cpu(eq.__w4), blk));
>> + eq.__w4 = cpu_to_be32(SETFIELD(EQ_W4_ESC_EQ_INDEX,
>> + be32_to_cpu(eq.__w4), idx + 7));
>> + } else if (be32_to_cpu(eq.__w0) & EQ_W0_UNCOND_ESCALATE) {
>> /* Clear the "u" bit, disable escalations if it was set */
>> - eq.w0 &= ~EQ_W0_UNCOND_ESCALATE;
>> - eq.w0 &= ~EQ_W0_ESCALATE_CTL;
>> + eq.__w0 = cpu_to_be32(be32_to_cpu(eq.__w0) &
>> + ~(EQ_W0_UNCOND_ESCALATE | EQ_W0_ESCALATE_CTL));
>> }
>> if (!memcmp(eq_orig, &eq, sizeof(eq)))
>> continue;
>> @@ -4470,16 +4491,16 @@ static int64_t opal_xive_set_vp_info(uint64_t vp_id,
>>
>> vp_new = *vp;
>> if (flags & OPAL_XIVE_VP_ENABLED) {
>> - vp_new.w0 |= VP_W0_VALID;
>> - vp_new.w6 = report_cl_pair >> 32;
>> - vp_new.w7 = report_cl_pair & 0xffffffff;
>> + vp_new.__w0 = cpu_to_be32(be32_to_cpu(vp_new.__w0) | VP_W0_VALID);
>> + vp_new.__w6 = cpu_to_be32(report_cl_pair >> 32);
>> + vp_new.__w7 = cpu_to_be32(report_cl_pair & 0xffffffff);
>>
>> if (flags & OPAL_XIVE_VP_SINGLE_ESCALATION)
>> rc = xive_setup_silent_gather(vp_id, true);
>> else
>> rc = xive_setup_silent_gather(vp_id, false);
>> } else {
>> - vp_new.w0 = vp_new.w6 = vp_new.w7 = 0;
>> + vp_new.__w0 = vp_new.__w6 = vp_new.__w7 = 0;
>> rc = xive_setup_silent_gather(vp_id, false);
>> }
>>
>> @@ -4504,7 +4525,7 @@ bail:
>> return rc;
>> }
>>
>> -static int64_t opal_xive_get_vp_state(uint64_t vp_id, uint64_t *out_state)
>> +static int64_t opal_xive_get_vp_state(uint64_t vp_id, __be64 *out_state)
>> {
>> struct xive *x;
>> struct xive_vp *vp;
>> @@ -4530,14 +4551,14 @@ static int64_t opal_xive_get_vp_state(uint64_t vp_id, uint64_t *out_state)
>> if (rc)
>> return rc;
>>
>> - if (!(vp->w0 & VP_W0_VALID))
>> + if (!(be32_to_cpu(vp->__w0) & VP_W0_VALID))
>> return OPAL_WRONG_STATE;
>>
>> /*
>> * Return word4 and word5 which contain the saved HW thread
>> * context. The IPB register is all we care for now on P9.
>> */
>> - *out_state = (((uint64_t)vp->w4) << 32) | vp->w5;
>> + *out_state = cpu_to_be64((((uint64_t)be32_to_cpu(vp->__w4)) << 32) | be32_to_cpu(vp->__w5));
>>
>> return OPAL_SUCCESS;
>> }
>> @@ -4627,7 +4648,7 @@ static void xive_cleanup_vp_ind(struct xive *x)
>>
>> xive_dbg(x, "Cleaning up %d VP ind entries...\n", x->vp_ind_count);
>> for (i = 0; i < x->vp_ind_count; i++) {
>> - if (x->vp_ind_base[i] & VSD_FIRMWARE) {
>> + if (be64_to_cpu(x->vp_ind_base[i]) & VSD_FIRMWARE) {
>> xive_dbg(x, " %04x ... skip (firmware)\n", i);
>> continue;
>> }
>> @@ -4645,7 +4666,7 @@ static void xive_cleanup_eq_ind(struct xive *x)
>>
>> xive_dbg(x, "Cleaning up %d EQ ind entries...\n", x->eq_ind_count);
>> for (i = 0; i < x->eq_ind_count; i++) {
>> - if (x->eq_ind_base[i] & VSD_FIRMWARE) {
>> + if (be64_to_cpu(x->eq_ind_base[i]) & VSD_FIRMWARE) {
>> xive_dbg(x, " %04x ... skip (firmware)\n", i);
>> continue;
>> }
>> @@ -4692,25 +4713,28 @@ static void xive_reset_one(struct xive *x)
>> eq_firmware = false;
>> for (j = 0; j < 8; j++) {
>> uint32_t idx = (i << 3) | j;
>> + uint32_t w0;
>>
>> eq = xive_get_eq(x, idx);
>> if (!eq)
>> continue;
>>
>> + w0 = be32_to_cpu(eq->__w0);
>> +
>> /* We need to preserve the firmware bit, otherwise
>> * we will incorrectly free the EQs that are reserved
>> * for the physical CPUs
>> */
>> - if (eq->w0 & EQ_W0_VALID) {
>> - if (!(eq->w0 & EQ_W0_FIRMWARE))
>> + if (w0 & EQ_W0_VALID) {
>> + if (!(w0 & EQ_W0_FIRMWARE))
>> xive_dbg(x, "EQ 0x%x:0x%x is valid at reset: %08x %08x\n",
>> - x->block_id, idx, eq->w0, eq->w1);
>> + x->block_id, idx, w0, be32_to_cpu(eq->__w1));
>> eq0 = *eq;
>> xive_cleanup_eq(&eq0);
>> xive_eqc_cache_update(x, x->block_id,
>> idx, 0, 4, &eq0, false, true);
>> }
>> - if (eq->w0 & EQ_W0_FIRMWARE)
>> + if (w0 & EQ_W0_FIRMWARE)
>> eq_firmware = true;
>> }
>> if (!eq_firmware)
>> @@ -4747,7 +4771,7 @@ static void xive_reset_one(struct xive *x)
>> #endif
>> /* Is the VP valid ? */
>> vp = xive_get_vp(x, i);
>> - if (!vp || !(vp->w0 & VP_W0_VALID))
>> + if (!vp || !(be32_to_cpu(vp->__w0) & VP_W0_VALID))
>> continue;
>>
>> /* Clear it */
>> @@ -4935,16 +4959,16 @@ static int64_t opal_xive_free_vp_block(uint64_t vp_base)
>> }
>>
>> /* VP must be disabled */
>> - if (vp->w0 & VP_W0_VALID) {
>> + if (be32_to_cpu(vp->__w0) & VP_W0_VALID) {
>> prlog(PR_ERR, "XIVE: freeing active VP %d\n", vp_id);
>> return OPAL_XIVE_FREE_ACTIVE;
>> }
>>
>> /* Not populated */
>> - if (vp->w1 == 0)
>> + if (vp->__w1 == 0)
>> continue;
>> - eq_blk = vp->w1 >> 28;
>> - eq_idx = vp->w1 & 0x0fffffff;
>> + eq_blk = be32_to_cpu(vp->__w1) >> 28;
>> + eq_idx = be32_to_cpu(vp->__w1) & 0x0fffffff;
>>
>> lock(&x->lock);
>>
>> @@ -4955,7 +4979,7 @@ static int64_t opal_xive_free_vp_block(uint64_t vp_base)
>> struct xive *eq_x = xive_from_vc_blk(eq_blk);
>> struct xive_eq eq, *orig_eq = xive_get_eq(eq_x, eq_idx + j);
>>
>> - if (!(orig_eq->w0 & EQ_W0_VALID))
>> + if (!(be32_to_cpu(orig_eq->__w0) & EQ_W0_VALID))
>> continue;
>>
>> prlog(PR_WARNING, "XIVE: freeing VP %d with queue %d active\n",
>> @@ -4966,7 +4990,7 @@ static int64_t opal_xive_free_vp_block(uint64_t vp_base)
>> }
>>
>> /* Mark it not populated so we don't try to free it again */
>> - vp->w1 = 0;
>> + vp->__w1 = 0;
>>
>> if (eq_blk != blk) {
>> prerror("XIVE: Block mismatch trying to free EQs\n");
>> @@ -5043,8 +5067,8 @@ static int64_t opal_xive_alloc_vp_block(uint32_t alloc_order)
>> * it out of the cache.
>> */
>> memset(vp, 0, sizeof(*vp));
>> - vp->w1 = (blk << 28) | eqs;
>> - vp->w5 = 0xff000000;
>> + vp->__w1 = cpu_to_be32((blk << 28) | eqs);
>> + vp->__w5 = cpu_to_be32(0xff000000);
>> }
>> return vp_base;
>> fail:
>> @@ -5081,7 +5105,7 @@ static int64_t xive_try_allocate_irq(struct xive *x)
>> unlock(&x->lock);
>> return OPAL_PARAMETER;
>> }
>> - ive->w = IVE_VALID | IVE_MASKED | SETFIELD(IVE_EQ_DATA, 0ul, girq);
>> + ive->__w = cpu_to_be64(IVE_VALID | IVE_MASKED | SETFIELD(IVE_EQ_DATA, 0ul, girq));
>> unlock(&x->lock);
>>
>> return girq;
>> @@ -5150,7 +5174,7 @@ static int64_t opal_xive_free_irq(uint32_t girq)
>> xive_update_irq_mask(s, girq - s->esb_base, true);
>>
>> /* Mark the IVE masked and invalid */
>> - ive->w = IVE_MASKED | IVE_VALID;
>> + ive->__w = cpu_to_be64(IVE_MASKED | IVE_VALID);
>> xive_ivc_scrub(x, x->block_id, idx);
>>
>> /* Free it */
>> @@ -5290,7 +5314,7 @@ static int64_t __opal_xive_dump_emu(struct xive_cpu_state *xs, uint32_t pir)
>> false, false);
>> eq = xive_get_eq(xs->xive, xs->eq_idx + XIVE_EMULATION_PRIO);
>> prlog(PR_INFO, "CPU[%04x]: EQ @%p W0=%08x W1=%08x qbuf @%p\n",
>> - pir, eq, eq->w0, eq->w1, xs->eqbuf);
>> + pir, eq, be32_to_cpu(eq->__w0), be32_to_cpu(eq->__w1), xs->eqbuf);
>>
>> return OPAL_SUCCESS;
>> }
>> diff --git a/include/xive.h b/include/xive.h
>> index b88cdabea..95113efa8 100644
>> --- a/include/xive.h
>> +++ b/include/xive.h
>> @@ -382,7 +382,7 @@ struct xive_ive {
>> /* Use a single 64-bit definition to make it easier to
>> * perform atomic updates
>> */
>> - uint64_t w;
>> + __be64 __w;
>> #define IVE_VALID PPC_BIT(0)
>> #define IVE_EQ_BLOCK PPC_BITMASK(4,7) /* Destination EQ block# */
>> #define IVE_EQ_INDEX PPC_BITMASK(8,31) /* Destination EQ index */
>> @@ -392,7 +392,7 @@ struct xive_ive {
>>
>> /* EQ */
>> struct xive_eq {
>> - uint32_t w0;
>> + __be32 __w0;
>> #define EQ_W0_VALID PPC_BIT32(0) /* "v" bit */
>> #define EQ_W0_ENQUEUE PPC_BIT32(1) /* "q" bit */
>> #define EQ_W0_UCOND_NOTIFY PPC_BIT32(2) /* "n" bit */
>> @@ -407,7 +407,7 @@ struct xive_eq {
>> #define EQ_QSIZE_4K 0
>> #define EQ_QSIZE_64K 4
>> #define EQ_W0_HWDEP PPC_BITMASK32(24,31)
>> - uint32_t w1;
>> + __be32 __w1;
>> #define EQ_W1_ESn PPC_BITMASK32(0,1)
>> #define EQ_W1_ESn_P PPC_BIT32(0)
>> #define EQ_W1_ESn_Q PPC_BIT32(1)
>> @@ -416,21 +416,21 @@ struct xive_eq {
>> #define EQ_W1_ESe_Q PPC_BIT32(3)
>> #define EQ_W1_GENERATION PPC_BIT32(9)
>> #define EQ_W1_PAGE_OFF PPC_BITMASK32(10,31)
>> - uint32_t w2;
>> + __be32 __w2;
>> #define EQ_W2_MIGRATION_REG PPC_BITMASK32(0,3)
>> #define EQ_W2_OP_DESC_HI PPC_BITMASK32(4,31)
>> - uint32_t w3;
>> + __be32 __w3;
>> #define EQ_W3_OP_DESC_LO PPC_BITMASK32(0,31)
>> - uint32_t w4;
>> + __be32 __w4;
>> #define EQ_W4_ESC_EQ_BLOCK PPC_BITMASK32(4,7)
>> #define EQ_W4_ESC_EQ_INDEX PPC_BITMASK32(8,31)
>> - uint32_t w5;
>> + __be32 __w5;
>> #define EQ_W5_ESC_EQ_DATA PPC_BITMASK32(1,31)
>> - uint32_t w6;
>> + __be32 __w6;
>> #define EQ_W6_FORMAT_BIT PPC_BIT32(8)
>> #define EQ_W6_NVT_BLOCK PPC_BITMASK32(9,12)
>> #define EQ_W6_NVT_INDEX PPC_BITMASK32(13,31)
>> - uint32_t w7;
>> + __be32 __w7;
>> #define EQ_W7_F0_IGNORE PPC_BIT32(0)
>> #define EQ_W7_F0_BLK_GROUPING PPC_BIT32(1)
>> #define EQ_W7_F0_PRIORITY PPC_BITMASK32(8,15)
>> @@ -440,24 +440,24 @@ struct xive_eq {
>>
>> /* VP */
>> struct xive_vp {
>> - uint32_t w0;
>> + __be32 __w0;
>> #define VP_W0_VALID PPC_BIT32(0)
>> - uint32_t w1;
>> - uint32_t w2;
>> - uint32_t w3;
>> - uint32_t w4;
>> - uint32_t w5;
>> - uint32_t w6;
>> - uint32_t w7;
>> - uint32_t w8;
>> + __be32 __w1;
>> + __be32 __w2;
>> + __be32 __w3;
>> + __be32 __w4;
>> + __be32 __w5;
>> + __be32 __w6;
>> + __be32 __w7;
>> + __be32 __w8;
>> #define VP_W8_GRP_VALID PPC_BIT32(0)
>> - uint32_t w9;
>> - uint32_t wa;
>> - uint32_t wb;
>> - uint32_t wc;
>> - uint32_t wd;
>> - uint32_t we;
>> - uint32_t wf;
>> + __be32 __w9;
>> + __be32 __wa;
>> + __be32 __wb;
>> + __be32 __wc;
>> + __be32 __wd;
>> + __be32 __we;
>> + __be32 __wf;
>> };
>> /* Internal APIs to other modules */
>
More information about the Skiboot
mailing list