[PATCH] powerpc/papr_scm: Make 'perf_stats' invisible if perf-stats unavailable

Aneesh Kumar K.V aneesh.kumar at linux.ibm.com
Thu May 6 15:02:20 AEST 2021


Vaibhav Jain <vaibhav at linux.ibm.com> writes:

> In case performance stats for an nvdimm are not available, reading the
> 'perf_stats' sysfs file returns an -ENOENT error. A better approach is
> to make the 'perf_stats' file entirely invisible to indicate that
> performance stats for an nvdimm are unavailable.
>
> So this patch updates 'papr_nd_attribute_group' to add a 'is_visible'
> callback implemented as newly introduced 'papr_nd_attribute_visible()'
> that returns an appropriate mode in case performance stats aren't
> supported in a given nvdimm.
>
> Also the initialization of 'papr_scm_priv.stat_buffer_len' is moved
> from papr_scm_nvdimm_init() to papr_scm_probe() so that it value is
> available when 'papr_nd_attribute_visible()' is called during nvdimm
> initialization.
>
> Fixes: 2d02bf835e57('powerpc/papr_scm: Fetch nvdimm performance stats from PHYP')
> Signed-off-by: Vaibhav Jain <vaibhav at linux.ibm.com>
> ---
>  arch/powerpc/platforms/pseries/papr_scm.c | 37 ++++++++++++++++-------
>  1 file changed, 26 insertions(+), 11 deletions(-)
>
> diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
> index 12f1513f0fca..90f0af8fefe8 100644
> --- a/arch/powerpc/platforms/pseries/papr_scm.c
> +++ b/arch/powerpc/platforms/pseries/papr_scm.c
> @@ -907,6 +907,20 @@ static ssize_t flags_show(struct device *dev,
>  }
>  DEVICE_ATTR_RO(flags);
>  
> +umode_t papr_nd_attribute_visible(struct kobject *kobj, struct attribute *attr,
> +				  int n)
> +{
> +	struct device *dev = container_of(kobj, typeof(*dev), kobj);
> +	struct nvdimm *nvdimm = to_nvdimm(dev);
> +	struct papr_scm_priv *p = nvdimm_provider_data(nvdimm);
> +
> +	/* For if perf-stats not available remove perf_stats sysfs */
> +	if (attr == &dev_attr_perf_stats.attr && p->stat_buffer_len == 0)
> +		return 0;
> +
> +	return attr->mode;
> +}
> +
>  /* papr_scm specific dimm attributes */
>  static struct attribute *papr_nd_attributes[] = {
>  	&dev_attr_flags.attr,
> @@ -916,6 +930,7 @@ static struct attribute *papr_nd_attributes[] = {
>  
>  static struct attribute_group papr_nd_attribute_group = {
>  	.name = "papr",
> +	.is_visible = papr_nd_attribute_visible,
>  	.attrs = papr_nd_attributes,
>  };
>  
> @@ -931,7 +946,6 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
>  	struct nd_region_desc ndr_desc;
>  	unsigned long dimm_flags;
>  	int target_nid, online_nid;
> -	ssize_t stat_size;
>  
>  	p->bus_desc.ndctl = papr_scm_ndctl;
>  	p->bus_desc.module = THIS_MODULE;
> @@ -1016,16 +1030,6 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
>  	list_add_tail(&p->region_list, &papr_nd_regions);
>  	mutex_unlock(&papr_ndr_lock);
>  
> -	/* Try retriving the stat buffer and see if its supported */
> -	stat_size = drc_pmem_query_stats(p, NULL, 0);
> -	if (stat_size > 0) {
> -		p->stat_buffer_len = stat_size;
> -		dev_dbg(&p->pdev->dev, "Max perf-stat size %lu-bytes\n",
> -			p->stat_buffer_len);
> -	} else {
> -		dev_info(&p->pdev->dev, "Dimm performance stats unavailable\n");
> -	}
> -
>  	return 0;
>  
>  err:	nvdimm_bus_unregister(p->bus);
> @@ -1102,6 +1106,7 @@ static int papr_scm_probe(struct platform_device *pdev)
>  	u64 blocks, block_size;
>  	struct papr_scm_priv *p;
>  	const char *uuid_str;
> +	ssize_t stat_size;
>  	u64 uuid[2];
>  	int rc;
>  
> @@ -1179,6 +1184,16 @@ static int papr_scm_probe(struct platform_device *pdev)
>  	p->res.name  = pdev->name;
>  	p->res.flags = IORESOURCE_MEM;
>  
> +	/* Try retriving the stat buffer and see if its supported */
> +	stat_size = drc_pmem_query_stats(p, NULL, 0);
> +	if (stat_size > 0) {
> +		p->stat_buffer_len = stat_size;
> +		dev_dbg(&p->pdev->dev, "Max perf-stat size %lu-bytes\n",
> +			p->stat_buffer_len);
> +	} else {
> +		dev_info(&p->pdev->dev, "Dimm performance stats unavailable\n");
> +	}

With this patch https://lore.kernel.org/linuxppc-dev/20210505191606.51666-1-vaibhav@linux.ibm.com
We are adding details of whyy performance stat query hcall failed. Do we
need to print again here?  Are we being more verbose here?

-aneesh


More information about the Linuxppc-dev mailing list