[PATCH 2/2] tools/perf: Fix out of bound access to cpu mask array

R Nageswara Sastry rnsastry at linux.ibm.com
Mon Sep 5 16:56:58 AEST 2022



On 05/09/22 10:24 am, Athira Rajeev wrote:
> The cpu mask init code in "record__mmap_cpu_mask_init"
> function access "bits" array part of "struct mmap_cpu_mask".
> The size of this array is the value from cpu__max_cpu().cpu.
> This array is used to contain the cpumask value for each
> cpu. While setting bit for each cpu, it calls "set_bit" function
> which access index in "bits" array. If we provide a command
> line option to -C which is greater than the number of CPU's
> present in the system, the set_bit could access an array
> member which is out-of the array size. This is because
> currently, there is no boundary check for the CPU. This will
> result in seg fault:
> 
> <<>>
> ./perf record -C 12341234 ls
> Perf can support 2048 CPUs. Consider raising MAX_NR_CPUS
> Segmentation fault (core dumped)
> <<>>
> 
> Debugging with gdb, points to function flow as below:
> 
> <<>>
> set_bit
> record__mmap_cpu_mask_init
> record__init_thread_default_masks
> record__init_thread_masks
> cmd_record
> <<>>
> 
> Fix this by adding boundary check for the array.
> 
> After the patch:
> <<>>
> ./perf record -C 12341234 ls
> Perf can support 2048 CPUs. Consider raising MAX_NR_CPUS
> Failed to initialize parallel data streaming masks
> <<>>
> 
> With this fix, if -C is given a non-exsiting CPU, perf
> record will fail with:
> 
> <<>>
>   ./perf record -C 50 ls
> Failed to initialize parallel data streaming masks
> <<>>
> 
> Reported-by: Nageswara Sastry <rnsastry at linux.ibm.com>

Tested-by: Nageswara Sastry <rnsastry at linux.ibm.com>

> Signed-off-by: Athira Rajeev <atrajeev at linux.vnet.ibm.com>
> ---
>   tools/perf/builtin-record.c | 26 ++++++++++++++++++++------
>   1 file changed, 20 insertions(+), 6 deletions(-)
> 
> diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
> index 4713f0f3a6cf..09b68d76bbdc 100644
> --- a/tools/perf/builtin-record.c
> +++ b/tools/perf/builtin-record.c
> @@ -3358,16 +3358,22 @@ static struct option __record_options[] = {
>   
>   struct option *record_options = __record_options;
>   
> -static void record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cpu_map *cpus)
> +static int record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cpu_map *cpus)
>   {
>   	struct perf_cpu cpu;
>   	int idx;
>   
>   	if (cpu_map__is_dummy(cpus))
> -		return;
> +		return 0;
>   
> -	perf_cpu_map__for_each_cpu(cpu, idx, cpus)
> +	perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
> +		/* Return ENODEV is input cpu is greater than max cpu */
> +		if ((unsigned long)cpu.cpu > mask->nbits)
> +			return -ENODEV;
>   		set_bit(cpu.cpu, mask->bits);
> +	}
> +
> +	return 0;
>   }
>   
>   static int record__mmap_cpu_mask_init_spec(struct mmap_cpu_mask *mask, const char *mask_spec)
> @@ -3379,7 +3385,9 @@ static int record__mmap_cpu_mask_init_spec(struct mmap_cpu_mask *mask, const cha
>   		return -ENOMEM;
>   
>   	bitmap_zero(mask->bits, mask->nbits);
> -	record__mmap_cpu_mask_init(mask, cpus);
> +	if (record__mmap_cpu_mask_init(mask, cpus))
> +		return -ENODEV;
> +
>   	perf_cpu_map__put(cpus);
>   
>   	return 0;
> @@ -3461,7 +3469,12 @@ static int record__init_thread_masks_spec(struct record *rec, struct perf_cpu_ma
>   		pr_err("Failed to allocate CPUs mask\n");
>   		return ret;
>   	}
> -	record__mmap_cpu_mask_init(&cpus_mask, cpus);
> +
> +	ret = record__mmap_cpu_mask_init(&cpus_mask, cpus);
> +	if (ret) {
> +		pr_err("Failed to init cpu mask\n");
> +		goto out_free_cpu_mask;
> +	}
>   
>   	ret = record__thread_mask_alloc(&full_mask, cpu__max_cpu().cpu);
>   	if (ret) {
> @@ -3702,7 +3715,8 @@ static int record__init_thread_default_masks(struct record *rec, struct perf_cpu
>   	if (ret)
>   		return ret;
>   
> -	record__mmap_cpu_mask_init(&rec->thread_masks->maps, cpus);
> +	if (record__mmap_cpu_mask_init(&rec->thread_masks->maps, cpus))
> +		return -ENODEV;
>   
>   	rec->nr_threads = 1;
>   

-- 
Thanks and Regards
R.Nageswara Sastry


More information about the Linuxppc-dev mailing list