[PATCH] erofs: free pcluster right after decompression if cache decompression is disabled
Chunhai Guo
guochunhai at vivo.com
Tue Aug 13 20:28:35 AEST 2024
When the erofs cache decompression is disabled
(EROFS_ZIP_CACHE_DISABLED), all pages in pcluster are freed right after
decompression. There is no need to cache the pcluster as well and it can
be freed.
Signed-off-by: Chunhai Guo <guochunhai at vivo.com>
---
fs/erofs/internal.h | 3 ++-
fs/erofs/zdata.c | 4 ++--
fs/erofs/zutil.c | 32 +++++++++++++++++++-------------
3 files changed, 23 insertions(+), 16 deletions(-)
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index e768990bf20f..cc6a61a422d8 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -449,7 +449,8 @@ static inline void erofs_pagepool_add(struct page **pagepool, struct page *page)
void erofs_release_pages(struct page **pagepool);
#ifdef CONFIG_EROFS_FS_ZIP
-void erofs_workgroup_put(struct erofs_workgroup *grp);
+void erofs_workgroup_put(struct erofs_sb_info *sbi,
+ struct erofs_workgroup *grp);
struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
pgoff_t index);
struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb,
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 428ab617e0e4..03b939dc2943 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -924,7 +924,7 @@ static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe)
* any longer if the pcluster isn't hosted by ourselves.
*/
if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE)
- erofs_workgroup_put(&pcl->obj);
+ erofs_workgroup_put(EROFS_I_SB(fe->inode), &pcl->obj);
fe->pcl = NULL;
}
@@ -1355,7 +1355,7 @@ static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
if (z_erofs_is_inline_pcluster(be.pcl))
z_erofs_free_pcluster(be.pcl);
else
- erofs_workgroup_put(&be.pcl->obj);
+ erofs_workgroup_put(EROFS_SB(io->sb), &be.pcl->obj);
}
}
diff --git a/fs/erofs/zutil.c b/fs/erofs/zutil.c
index 9b53883e5caf..4f5783cca5c6 100644
--- a/fs/erofs/zutil.c
+++ b/fs/erofs/zutil.c
@@ -284,18 +284,6 @@ static void __erofs_workgroup_free(struct erofs_workgroup *grp)
erofs_workgroup_free_rcu(grp);
}
-void erofs_workgroup_put(struct erofs_workgroup *grp)
-{
- if (lockref_put_or_lock(&grp->lockref))
- return;
-
- DBG_BUGON(__lockref_is_dead(&grp->lockref));
- if (grp->lockref.count == 1)
- atomic_long_inc(&erofs_global_shrink_cnt);
- --grp->lockref.count;
- spin_unlock(&grp->lockref.lock);
-}
-
static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
struct erofs_workgroup *grp)
{
@@ -310,7 +298,8 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
* the XArray. Otherwise some cached pages could be still attached to
* the orphan old workgroup when the new one is available in the tree.
*/
- if (erofs_try_to_free_all_cached_folios(sbi, grp))
+ if (!erofs_is_cache_disabled(sbi)
+ && erofs_try_to_free_all_cached_folios(sbi, grp))
goto out;
/*
@@ -329,6 +318,23 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
return free;
}
+void erofs_workgroup_put(struct erofs_sb_info *sbi, struct erofs_workgroup *grp)
+{
+ if (lockref_put_or_lock(&grp->lockref))
+ return;
+
+ DBG_BUGON(__lockref_is_dead(&grp->lockref));
+ if (--grp->lockref.count == 0)
+ atomic_long_inc(&erofs_global_shrink_cnt);
+ spin_unlock(&grp->lockref.lock);
+
+ if (erofs_is_cache_disabled(sbi)) {
+ xa_lock(&sbi->managed_pslots);
+ erofs_try_to_release_workgroup(sbi, grp);
+ xa_unlock(&sbi->managed_pslots);
+ }
+}
+
static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
unsigned long nr_shrink)
{
--
2.25.1
More information about the Linux-erofs
mailing list