[PATCH 05/14] cpumask: deprecate cpumask_next_wrap()
Bjorn Helgaas
helgaas at kernel.org
Sat Jan 4 04:39:30 AEDT 2025
On Sat, Dec 28, 2024 at 10:49:37AM -0800, Yury Norov wrote:
> The next patche aligns implementation of cpumask_next_wrap() with the
> generic version in find.h which changes function signature.
s/patche/patch/
I guess this is an indirect reference to find_next_bit_wrap()? If so,
I think mentioning the function name would be more useful than
referring to "the generic version in find.h".
> To make the transition smooth, this patch deprecates current
> implementation by adding an _old suffix. The following patches switch
> current users to the new implementation one by one.
>
> No functional changes were intended.
>
> Signed-off-by: Yury Norov <yury.norov at gmail.com>
> ---
> arch/s390/kernel/processor.c | 2 +-
> drivers/nvme/host/tcp.c | 2 +-
> drivers/pci/controller/pci-hyperv.c | 2 +-
> drivers/scsi/lpfc/lpfc_init.c | 2 +-
> include/linux/cpumask.h | 4 ++--
> kernel/padata.c | 2 +-
> lib/cpumask.c | 6 +++---
> 7 files changed, 10 insertions(+), 10 deletions(-)
>
> diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
> index 5ce9a795a0fe..42ca61909030 100644
> --- a/arch/s390/kernel/processor.c
> +++ b/arch/s390/kernel/processor.c
> @@ -72,7 +72,7 @@ void notrace stop_machine_yield(const struct cpumask *cpumask)
> this_cpu = smp_processor_id();
> if (__this_cpu_inc_return(cpu_relax_retry) >= spin_retry) {
> __this_cpu_write(cpu_relax_retry, 0);
> - cpu = cpumask_next_wrap(this_cpu, cpumask, this_cpu, false);
> + cpu = cpumask_next_wrap_old(this_cpu, cpumask, this_cpu, false);
> if (cpu >= nr_cpu_ids)
> return;
> if (arch_vcpu_is_preempted(cpu))
> diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
> index 28c76a3e1bd2..054904376c3c 100644
> --- a/drivers/nvme/host/tcp.c
> +++ b/drivers/nvme/host/tcp.c
> @@ -1578,7 +1578,7 @@ static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
> if (wq_unbound)
> queue->io_cpu = WORK_CPU_UNBOUND;
> else
> - queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
> + queue->io_cpu = cpumask_next_wrap_old(n - 1, cpu_online_mask, -1, false);
> }
>
> static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid)
> diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
> index cdd5be16021d..86d1c2be8eb5 100644
> --- a/drivers/pci/controller/pci-hyperv.c
> +++ b/drivers/pci/controller/pci-hyperv.c
> @@ -1757,7 +1757,7 @@ static int hv_compose_multi_msi_req_get_cpu(void)
>
> spin_lock_irqsave(&multi_msi_cpu_lock, flags);
>
> - cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask, nr_cpu_ids,
> + cpu_next = cpumask_next_wrap_old(cpu_next, cpu_online_mask, nr_cpu_ids,
> false);
> cpu = cpu_next;
>
> diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
> index 7f57397d91a9..31622fb0614a 100644
> --- a/drivers/scsi/lpfc/lpfc_init.c
> +++ b/drivers/scsi/lpfc/lpfc_init.c
> @@ -12876,7 +12876,7 @@ lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
>
> if (offline) {
> /* Find next online CPU on original mask */
> - cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
> + cpu_next = cpumask_next_wrap_old(cpu, orig_mask, cpu, true);
> cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
>
> /* Found a valid CPU */
> diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
> index 30042351f15f..b267a4f6a917 100644
> --- a/include/linux/cpumask.h
> +++ b/include/linux/cpumask.h
> @@ -296,7 +296,7 @@ unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
>
> #if NR_CPUS == 1
> static __always_inline
> -unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
> +unsigned int cpumask_next_wrap_old(int n, const struct cpumask *mask, int start, bool wrap)
> {
> cpumask_check(start);
> if (n != -1)
> @@ -312,7 +312,7 @@ unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, boo
> return cpumask_first(mask);
> }
> #else
> -unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
> +unsigned int __pure cpumask_next_wrap_old(int n, const struct cpumask *mask, int start, bool wrap);
> #endif
>
> /**
> diff --git a/kernel/padata.c b/kernel/padata.c
> index d51bbc76b227..454ff2fca40b 100644
> --- a/kernel/padata.c
> +++ b/kernel/padata.c
> @@ -274,7 +274,7 @@ static struct padata_priv *padata_find_next(struct parallel_data *pd,
> if (remove_object) {
> list_del_init(&padata->list);
> ++pd->processed;
> - pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
> + pd->cpu = cpumask_next_wrap_old(cpu, pd->cpumask.pcpu, -1, false);
> }
>
> spin_unlock(&reorder->lock);
> diff --git a/lib/cpumask.c b/lib/cpumask.c
> index e77ee9d46f71..c9a9b451772a 100644
> --- a/lib/cpumask.c
> +++ b/lib/cpumask.c
> @@ -8,7 +8,7 @@
> #include <linux/numa.h>
>
> /**
> - * cpumask_next_wrap - helper to implement for_each_cpu_wrap
> + * cpumask_next_wrap_old - helper to implement for_each_cpu_wrap
> * @n: the cpu prior to the place to search
> * @mask: the cpumask pointer
> * @start: the start point of the iteration
> @@ -19,7 +19,7 @@
> * Note: the @wrap argument is required for the start condition when
> * we cannot assume @start is set in @mask.
> */
> -unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
> +unsigned int cpumask_next_wrap_old(int n, const struct cpumask *mask, int start, bool wrap)
> {
> unsigned int next;
>
> @@ -37,7 +37,7 @@ unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, boo
>
> return next;
> }
> -EXPORT_SYMBOL(cpumask_next_wrap);
> +EXPORT_SYMBOL(cpumask_next_wrap_old);
>
> /* These are not inline because of header tangles. */
> #ifdef CONFIG_CPUMASK_OFFSTACK
> --
> 2.43.0
>
More information about the Linuxppc-dev
mailing list