[Skiboot] [EXTERNAL] [PATCH v6 13/29] xive: make endian-clean

Cédric Le Goater clg at kaod.org
Thu Nov 7 18:38:04 AEDT 2019


On 07/11/2019 07:32, Nicholas Piggin wrote:
> Nicholas Piggin's on November 7, 2019 4:26 pm:
>> Cédric Le Goater's on November 7, 2019 1:38 am:
>>> On 06/11/2019 13:10, Nicholas Piggin wrote:
>>>> Convert xive opal calls, dt construction, and in-memory hardware tables
>>>> to use explicit endian conversions.
>>>
>>> Thanks for reworking the patch with helper routines. This is a huge 
>>> work. You are setting the standards pretty high for xive2 !
>>
>> Well once I took your nice helpers then it's only mindless conversions.
>> I thought I'd better do it rather than pull you away from useful work :)
>>
>>> Did you run a KVM guest ? 
>>
>> I just pulled the latest qemu and tried. Had some bugs in the
>> conversion, so here is an updated patch. I'll not re-send the series
>> until you get a chance to ack it, at least.
> 
> Oh, one other thing while I was fixing one of my bugs, what do you
> think of this?
> 
>     xive: improve cache watch type checking
>     
>     The cache update functions take void * pointers and repeat the same
>     parameters. Make these more specific by adding another function, and
>     so the data pointer can be typed, and some arguments can be inferred.

This is good. I tripped over the same problem on QEMU side when modeling
the cache updates.

But, I would prefer if it came as cleanup on the current code before adding 
the BE helpers.

Thanks,

C. 


> 
> diff --git a/hw/xive.c b/hw/xive.c
> index b52857cc2..84d7275c5 100644
> --- a/hw/xive.c
> +++ b/hw/xive.c
> @@ -1042,7 +1042,7 @@ enum xive_cache_type {
>  static int64_t __xive_cache_watch(struct xive *x, enum xive_cache_type ctype,
>  				  uint64_t block, uint64_t idx,
>  				  uint32_t start_dword, uint32_t dword_count,
> -				  void *new_data, bool light_watch,
> +				  __be64 *new_data, bool light_watch,
>  				  bool synchronous);
>  
>  static void xive_scrub_workaround_vp(struct xive *x, uint32_t block, uint32_t idx __unused)
> @@ -1197,7 +1197,7 @@ static int64_t xive_eqc_scrub(struct xive *x, uint64_t block, uint64_t idx)
>  static int64_t __xive_cache_watch(struct xive *x, enum xive_cache_type ctype,
>  				  uint64_t block, uint64_t idx,
>  				  uint32_t start_dword, uint32_t dword_count,
> -				  void *new_data, bool light_watch,
> +				  __be64 *new_data, bool light_watch,
>  				  bool synchronous)
>  {
>  	uint64_t sreg, sregx, dreg0, dreg0x;
> @@ -1250,7 +1250,7 @@ static int64_t __xive_cache_watch(struct xive *x, enum xive_cache_type ctype,
>  		 * one written.
>  		 */
>  		for (i = start_dword + dword_count - 1; i >= start_dword ;i--) {
> -			uint64_t dw = be64_to_cpu(((__be64 *)new_data)[i - start_dword]);
> +			uint64_t dw = be64_to_cpu(new_data[i - start_dword]);
>  			__xive_regw(x, dreg0 + i * 8, dreg0x + i, dw, NULL);
>  		}
>  
> @@ -1284,24 +1284,28 @@ static int64_t __xive_cache_watch(struct xive *x, enum xive_cache_type ctype,
>  	return __xive_cache_scrub(x, ctype, block, idx, false, false);
>  }
>  
> +static int64_t xive_escalation_ive_cache_update(struct xive *x, uint64_t block,
> +				     uint64_t idx, struct xive_ive *ive,
> +				     bool synchronous)
> +{
> +	return __xive_cache_watch(x, xive_cache_eqc, block, idx,
> +				  2, 1, &ive->w, true, synchronous);
> +}
> +
>  static int64_t xive_eqc_cache_update(struct xive *x, uint64_t block,
> -				     uint64_t idx, uint32_t start_dword,
> -				     uint32_t dword_count, void *new_data,
> -				     bool light_watch, bool synchronous)
> +				     uint64_t idx, struct xive_eq *eq,
> +				     bool synchronous)
>  {
>  	return __xive_cache_watch(x, xive_cache_eqc, block, idx,
> -				  start_dword, dword_count,
> -				  new_data, light_watch, synchronous);
> +				  0, 4, (__be64 *)eq, false, synchronous);
>  }
>  
>  static int64_t xive_vpc_cache_update(struct xive *x, uint64_t block,
> -				     uint64_t idx, uint32_t start_dword,
> -				     uint32_t dword_count, void *new_data,
> -				     bool light_watch, bool synchronous)
> +				     uint64_t idx, struct xive_vp *vp,
> +				     bool synchronous)
>  {
>  	return __xive_cache_watch(x, xive_cache_vpc, block, idx,
> -				  start_dword, dword_count,
> -				  new_data, light_watch, synchronous);
> +				  0, 8, (__be64 *)vp, false, synchronous);
>  }
>  
>  static bool xive_set_vsd(struct xive *x, uint32_t tbl, uint32_t idx, uint64_t v)
> @@ -2126,10 +2130,9 @@ static int64_t xive_set_irq_targetting(uint32_t isn, uint32_t target,
>  				       bool synchronous)
>  {
>  	struct xive *x;
> -	struct xive_ive *ive;
> +	struct xive_ive *ive, new_ive;
>  	uint32_t eq_blk, eq_idx;
>  	bool is_escalation = GIRQ_IS_ESCALATION(isn);
> -	__be64 new_ive;
>  	int64_t rc;
>  
>  	/* Find XIVE on which the IVE resides */
> @@ -2152,18 +2155,18 @@ static int64_t xive_set_irq_targetting(uint32_t isn, uint32_t target,
>  		prio = XIVE_EMULATION_PRIO;
>  
>  	/* Read existing IVE */
> -	new_ive = ive->w;
> +	new_ive = *ive;
>  
>  	/* Are we masking ? */
>  	if (prio == 0xff && !is_escalation) {
> -		new_ive = xive_set_field64(IVE_MASKED, new_ive, 1);
> +		new_ive.w = xive_set_field64(IVE_MASKED, new_ive.w, 1);
>  		xive_vdbg(x, "ISN %x masked !\n", isn);
>  
>  		/* Put prio 7 in the EQ */
>  		prio = XIVE_MAX_PRIO;
>  	} else {
>  		/* Unmasking */
> -		new_ive = xive_set_field64(IVE_MASKED, new_ive, 0);
> +		new_ive.w = xive_set_field64(IVE_MASKED, new_ive.w, 0);
>  		xive_vdbg(x, "ISN %x unmasked !\n", isn);
>  
>  		/* For normal interrupt sources, keep track of which ones
> @@ -2187,23 +2190,23 @@ static int64_t xive_set_irq_targetting(uint32_t isn, uint32_t target,
>  		/* Try to update it atomically to avoid an intermediary
>  		 * stale state
>  		 */
> -		new_ive = xive_set_field64(IVE_EQ_BLOCK, new_ive, eq_blk);
> -		new_ive = xive_set_field64(IVE_EQ_INDEX, new_ive, eq_idx);
> +		new_ive.w = xive_set_field64(IVE_EQ_BLOCK, new_ive.w, eq_blk);
> +		new_ive.w = xive_set_field64(IVE_EQ_INDEX, new_ive.w, eq_idx);
>  	}
> -	new_ive = xive_set_field64(IVE_EQ_DATA, new_ive, lirq);
> +	new_ive.w = xive_set_field64(IVE_EQ_DATA, new_ive.w, lirq);
>  
>  	xive_vdbg(x,"ISN %x routed to eq %x/%x lirq=%08x IVE=%016llx !\n",
> -		  isn, eq_blk, eq_idx, lirq, be64_to_cpu(new_ive));
> +		  isn, eq_blk, eq_idx, lirq, be64_to_cpu(new_ive.w));
>  
>  	/* Updating the cache differs between real IVEs and escalation
>  	 * IVEs inside an EQ
>  	 */
>  	if (is_escalation) {
> -		rc = xive_eqc_cache_update(x, x->block_id, GIRQ_TO_IDX(isn),
> -					   2, 1, &new_ive, true, synchronous);
> +		rc = xive_escalation_ive_cache_update(x, x->block_id,
> +				GIRQ_TO_IDX(isn), &new_ive, synchronous);
>  	} else {
>  		sync();
> -		ive->w = new_ive;
> +		*ive = new_ive;
>  		rc = xive_ivc_scrub(x, x->block_id, GIRQ_TO_IDX(isn));
>  	}
>  
> @@ -2831,8 +2834,7 @@ static void xive_special_cache_check(struct xive *x, uint32_t blk, uint32_t idx)
>  		memset(vp_m, (~i) & 0xff, sizeof(*vp_m));
>  		sync();
>  		vp.w1 = cpu_to_be32((i << 16) | i);
> -		xive_vpc_cache_update(x, blk, idx,
> -				      0, 8, &vp, false, true);
> +		xive_vpc_cache_update(x, blk, idx, &vp, true);
>  		if (!xive_check_vpc_update(x, idx, &vp)) {
>  			xive_dbg(x, "Test failed at %d iterations\n", i);
>  			return;
> @@ -2874,9 +2876,7 @@ static void xive_setup_hw_for_emu(struct xive_cpu_state *xs)
>  	/* Use the cache watch to write it out */
>  	lock(&x_eq->lock);
>  
> -	xive_eqc_cache_update(x_eq, xs->eq_blk,
> -			      xs->eq_idx + XIVE_EMULATION_PRIO,
> -			      0, 4, &eq, false, true);
> +	xive_eqc_cache_update(x_eq, xs->eq_blk, xs->eq_idx + XIVE_EMULATION_PRIO, &eq, true);
>  	xive_check_eq_update(x_eq, xs->eq_idx + XIVE_EMULATION_PRIO, &eq);
>  
>  	/* Extra testing of cache watch & scrub facilities */
> @@ -2888,8 +2888,7 @@ static void xive_setup_hw_for_emu(struct xive_cpu_state *xs)
>  
>  	/* Use the cache watch to write it out */
>  	lock(&x_vp->lock);
> -	xive_vpc_cache_update(x_vp, xs->vp_blk, xs->vp_idx,
> -			      0, 8, &vp, false, true);
> +	xive_vpc_cache_update(x_vp, xs->vp_blk, xs->vp_idx, &vp, true);
>  	xive_check_vpc_update(x_vp, xs->vp_idx, &vp);
>  	unlock(&x_vp->lock);
>  }
> @@ -2937,8 +2936,7 @@ static void xive_init_cpu_exploitation(struct xive_cpu_state *xs)
>  
>  	/* Use the cache watch to write it out */
>  	lock(&x_vp->lock);
> -	xive_vpc_cache_update(x_vp, xs->vp_blk, xs->vp_idx,
> -			      0, 8, &vp, false, true);
> +	xive_vpc_cache_update(x_vp, xs->vp_blk, xs->vp_idx, &vp, true);
>  	unlock(&x_vp->lock);
>  
>  	/* Clenaup remaining state */
> @@ -3913,7 +3911,7 @@ static int64_t opal_xive_set_queue_info(uint64_t vp, uint32_t prio,
>  
>  	/* Update EQ, non-synchronous */
>  	lock(&x->lock);
> -	rc = xive_eqc_cache_update(x, blk, idx, 0, 4, &eq, false, false);
> +	rc = xive_eqc_cache_update(x, blk, idx, &eq, false);
>  	unlock(&x->lock);
>  
>  	return rc;
> @@ -3992,7 +3990,7 @@ static int64_t opal_xive_set_queue_state(uint64_t vp, uint32_t prio,
>  	new_eq.w1 = xive_set_field32(EQ_W1_PAGE_OFF, new_eq.w1, qindex);
>  
>  	lock(&x->lock);
> -	rc = xive_eqc_cache_update(x, blk, idx, 0, 4, &new_eq, false, false);
> +	rc = xive_eqc_cache_update(x, blk, idx, &new_eq, false);
>  	unlock(&x->lock);
>  
>  	return rc;
> @@ -4137,8 +4135,7 @@ static int64_t xive_setup_silent_gather(uint64_t vp_id, bool enable)
>  	if (!memcmp(eq_orig, &eq, sizeof(eq)))
>  		rc = 0;
>  	else
> -		rc = xive_eqc_cache_update(x, blk, idx + 7, 0, 4, &eq,
> -					   false, false);
> +		rc = xive_eqc_cache_update(x, blk, idx + 7, &eq, false);
>  	if (rc)
>  		return rc;
>  
> @@ -4168,8 +4165,7 @@ static int64_t xive_setup_silent_gather(uint64_t vp_id, bool enable)
>  		}
>  		if (!memcmp(eq_orig, &eq, sizeof(eq)))
>  			continue;
> -		rc = xive_eqc_cache_update(x, blk, idx + i, 0, 4, &eq,
> -					   false, false);
> +		rc = xive_eqc_cache_update(x, blk, idx + i, &eq, false);
>  		if (rc)
>  			break;
>  	}
> @@ -4224,7 +4220,7 @@ static int64_t opal_xive_set_vp_info(uint64_t vp_id,
>  		goto bail;
>  	}
>  
> -	rc = xive_vpc_cache_update(x, blk, idx, 0, 8, &vp_new, false, false);
> +	rc = xive_vpc_cache_update(x, blk, idx, &vp_new, false);
>  	if (rc)
>  		goto bail;
>  
> @@ -4440,8 +4436,7 @@ static void xive_reset_one(struct xive *x)
>  						 x->block_id, idx, be32_to_cpu(eq->w0), be32_to_cpu(eq->w1));
>  				eq0 = *eq;
>  				xive_cleanup_eq(&eq0);
> -				xive_eqc_cache_update(x, x->block_id,
> -						      idx, 0, 4, &eq0, false, true);
> +				xive_eqc_cache_update(x, x->block_id, idx, &eq0, true);
>  			}
>  			if (xive_get_field32(EQ_W0_FIRMWARE, eq->w0))
>  				eq_firmware = true;
> @@ -4479,8 +4474,7 @@ static void xive_reset_one(struct xive *x)
>  
>  		/* Clear it */
>  		xive_dbg(x, "VP 0x%x:0x%x is valid at reset\n", x->block_id, i);
> -		xive_vpc_cache_update(x, x->block_id,
> -				      i, 0, 8, &vp0, false, true);
> +		xive_vpc_cache_update(x, x->block_id, i, &vp0, true);
>  	}
>  
>  	/* Forget about remaining donated pages */
> @@ -4671,7 +4665,7 @@ static int64_t opal_xive_free_vp_block(uint64_t vp_base)
>  			      vp_id, j);
>  			eq = *orig_eq;
>  			xive_cleanup_eq(&eq);
> -			xive_eqc_cache_update(x, eq_blk, eq_idx + j, 0, 4, &eq, false, true);
> +			xive_eqc_cache_update(x, eq_blk, eq_idx + j, &eq, true);
>  		}
>  
>  		/* Mark it not populated so we don't try to free it again */
> 



More information about the Skiboot mailing list