[PATCH v4] erofs: lazily initialize per-CPU workers and CPU hotplug hooks

Hongbo Li lihongbo22 at huawei.com
Thu Apr 24 13:03:03 AEST 2025



On 2025/4/23 14:10, Sandeep Dhavale wrote:
> Currently, when EROFS is built with per-CPU workers, the workers are
> started and CPU hotplug hooks are registered during module initialization.
> This leads to unnecessary worker start/stop cycles during CPU hotplug
> events, particularly on Android devices that frequently suspend and resume.
> 
> This change defers the initialization of per-CPU workers and the
> registration of CPU hotplug hooks until the first EROFS mount. This
> ensures that these resources are only allocated and managed when EROFS is
> actually in use.
> 
> The tear down of per-CPU workers and unregistration of CPU hotplug hooks
> still occurs during z_erofs_exit_subsystem(), but only if they were
> initialized.
> 
> Signed-off-by: Sandeep Dhavale <dhavale at google.com>
> ---
> v3: https://lore.kernel.org/linux-erofs/20250422234546.2932092-1-dhavale@google.com/
> Changes since v3:
> - fold z_erofs_init_pcpu_workers() in the caller and rename the caller
> 
>   fs/erofs/zdata.c | 61 +++++++++++++++++++++++++++++++++++-------------
>   1 file changed, 45 insertions(+), 16 deletions(-)
> 
> diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
> index 0671184d9cf1..647a8340c9a1 100644
> --- a/fs/erofs/zdata.c
> +++ b/fs/erofs/zdata.c
> @@ -291,6 +291,9 @@ static struct workqueue_struct *z_erofs_workqueue __read_mostly;
>   
>   #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
>   static struct kthread_worker __rcu **z_erofs_pcpu_workers;
> +static atomic_t erofs_percpu_workers_initialized = ATOMIC_INIT(0);
> +static int erofs_cpu_hotplug_init(void);
> +static void erofs_cpu_hotplug_destroy(void);
>   
>   static void erofs_destroy_percpu_workers(void)
>   {
> @@ -336,9 +339,40 @@ static int erofs_init_percpu_workers(void)
>   	}
>   	return 0;
>   }
> +
> +static int z_erofs_init_pcpu_workers(void)
> +{
> +	int err;
> +
> +	if (atomic_xchg(&erofs_percpu_workers_initialized, 1))
> +		return 0;
> +
> +	err = erofs_init_percpu_workers();
> +	if (err)
> +		goto err_init_percpu_workers;
> +
> +	err = erofs_cpu_hotplug_init();
> +	if (err < 0)
> +		goto err_cpuhp_init;
> +	return err;
> +
> +err_cpuhp_init:
> +	erofs_destroy_percpu_workers();
> +err_init_percpu_workers:
> +	atomic_set(&erofs_percpu_workers_initialized, 0);
> +	return err;
> +}
> +
> +static void z_erofs_destroy_pcpu_workers(void)
> +{
> +	if (!atomic_xchg(&erofs_percpu_workers_initialized, 0))
> +		return;
> +	erofs_cpu_hotplug_destroy();
> +	erofs_destroy_percpu_workers();
> +}
>   #else
> -static inline void erofs_destroy_percpu_workers(void) {}
> -static inline int erofs_init_percpu_workers(void) { return 0; }
> +static inline int z_erofs_init_pcpu_workers(void) { return 0; }
> +static inline void z_erofs_destroy_pcpu_workers(void) {}
>   #endif
>   
>   #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_EROFS_FS_PCPU_KTHREAD)
> @@ -405,8 +439,7 @@ static inline void erofs_cpu_hotplug_destroy(void) {}
>   
>   void z_erofs_exit_subsystem(void)
>   {
> -	erofs_cpu_hotplug_destroy();
> -	erofs_destroy_percpu_workers();
> +	z_erofs_destroy_pcpu_workers();
>   	destroy_workqueue(z_erofs_workqueue);
>   	z_erofs_destroy_pcluster_pool();
>   	z_erofs_exit_decompressor();
> @@ -430,19 +463,8 @@ int __init z_erofs_init_subsystem(void)
>   		goto err_workqueue_init;
>   	}
>   
> -	err = erofs_init_percpu_workers();
> -	if (err)
> -		goto err_pcpu_worker;
> -
> -	err = erofs_cpu_hotplug_init();
> -	if (err < 0)
> -		goto err_cpuhp_init;
>   	return err;
>   
> -err_cpuhp_init:
> -	erofs_destroy_percpu_workers();
> -err_pcpu_worker:
> -	destroy_workqueue(z_erofs_workqueue);
>   err_workqueue_init:
>   	z_erofs_destroy_pcluster_pool();
>   err_pcluster_pool:
> @@ -644,10 +666,17 @@ static const struct address_space_operations z_erofs_cache_aops = {
>   
>   int z_erofs_init_super(struct super_block *sb)
>   {
> -	struct inode *const inode = new_inode(sb);
> +	struct inode *inode;
> +	int err;
>   
> +	err = z_erofs_init_pcpu_workers();
> +	if (err)
> +		return err;
> +
This will slow down the first mount action, but it seems unavoidable for 
now. :)

Reviewed-by: Hongbo Li <lihongbo22 at huawei.com>

Thanks,
Hongbo

> +	inode = new_inode(sb);
>   	if (!inode)
>   		return -ENOMEM;
> +
>   	set_nlink(inode, 1);
>   	inode->i_size = OFFSET_MAX;
>   	inode->i_mapping->a_ops = &z_erofs_cache_aops;


More information about the Linux-erofs mailing list