[PATCH] erofs: add a global page pool for lz4 decompression
Chunhai Guo
guochunhai at vivo.com
Fri Dec 29 00:00:53 AEDT 2023
Using a global page pool for LZ4 decompression significantly reduces the
time spent on page allocation in low memory scenarios.
The table below shows the reduction in time spent on page allocation for
LZ4 decompression when using a global page pool.
The results were obtained from multi-app launch benchmarks on ARM64
Android devices running the 5.15 kernel.
+--------------+---------------+--------------+---------+
| | w/o page pool | w/ page pool | diff |
+--------------+---------------+--------------+---------+
| Average (ms) | 3434 | 21 | -99.38% |
+--------------+---------------+--------------+---------+
Based on the benchmark logs, it appears that 256 pages are sufficient
for most cases, but this can be adjusted as needed. Additionally,
turning on CONFIG_EROFS_FS_DEBUG will simplify the tuning process.
This patch currently only supports the LZ4 decompressor, other
decompressors will be supported in the next step.
Signed-off-by: Chunhai Guo <guochunhai at vivo.com>
---
fs/erofs/compress.h | 1 +
fs/erofs/decompressor.c | 42 ++++++++++++--
fs/erofs/internal.h | 5 ++
fs/erofs/super.c | 1 +
fs/erofs/utils.c | 121 ++++++++++++++++++++++++++++++++++++++++
5 files changed, 165 insertions(+), 5 deletions(-)
diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h
index 279933e007d2..67202b97d47b 100644
--- a/fs/erofs/compress.h
+++ b/fs/erofs/compress.h
@@ -31,6 +31,7 @@ struct z_erofs_decompressor {
/* some special page->private (unsigned long, see below) */
#define Z_EROFS_SHORTLIVED_PAGE (-1UL << 2)
#define Z_EROFS_PREALLOCATED_PAGE (-2UL << 2)
+#define Z_EROFS_POOL_PAGE (-3UL << 2)
/*
* For all pages in a pcluster, page->private should be one of
diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c
index d08a6ee23ac5..41b34f01416f 100644
--- a/fs/erofs/decompressor.c
+++ b/fs/erofs/decompressor.c
@@ -54,6 +54,7 @@ static int z_erofs_load_lz4_config(struct super_block *sb,
sbi->lz4.max_distance_pages = distance ?
DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
LZ4_MAX_DISTANCE_PAGES;
+ erofs_global_page_pool_init();
return erofs_pcpubuf_growsize(sbi->lz4.max_pclusterblks);
}
@@ -111,15 +112,42 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
victim = availables[--top];
get_page(victim);
} else {
- victim = erofs_allocpage(pagepool,
+ victim = erofs_allocpage_for_decmpr(pagepool,
GFP_KERNEL | __GFP_NOFAIL);
- set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
}
rq->out[i] = victim;
}
return kaddr ? 1 : 0;
}
+static void z_erofs_lz4_post_destpages(struct z_erofs_lz4_decompress_ctx *ctx)
+{
+ struct z_erofs_decompress_req *rq = ctx->rq;
+ unsigned int i, j, nrpage;
+ struct page *page_dedup[LZ4_MAX_DISTANCE_PAGES] = { NULL };
+
+ for (i = 0, nrpage = 0; i < ctx->outpages; ++i) {
+ struct page *const page = rq->out[i];
+
+ if (page && page_private(page) == Z_EROFS_POOL_PAGE) {
+ for (j = 0; j < nrpage; ++j) {
+ if (page_dedup[j] == page)
+ break;
+ }
+ if (j == nrpage) {
+ WARN_ON_ONCE(nrpage >= LZ4_MAX_DISTANCE_PAGES);
+ page_dedup[nrpage] = page;
+ nrpage++;
+ } else
+ put_page(page);
+ rq->out[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < nrpage; ++i)
+ erofs_put_page_for_decmpr(page_dedup[i]);
+}
+
static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
void *inpage, void *out, unsigned int *inputmargin,
int *maptype, bool may_inplace)
@@ -297,14 +325,16 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
/* general decoding path which can be used for all cases */
ret = z_erofs_lz4_prepare_dstpages(&ctx, pagepool);
if (ret < 0) {
- return ret;
+ goto out;
} else if (ret > 0) {
dst = page_address(*rq->out);
dst_maptype = 1;
} else {
dst = erofs_vm_map_ram(rq->out, ctx.outpages);
- if (!dst)
- return -ENOMEM;
+ if (!dst) {
+ ret = -ENOMEM;
+ goto out;
+ }
dst_maptype = 2;
}
@@ -314,6 +344,8 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
kunmap_local(dst);
else if (dst_maptype == 2)
vm_unmap_ram(dst, ctx.outpages);
+out:
+ z_erofs_lz4_post_destpages(&ctx);
return ret;
}
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index b0409badb017..d8e5ac30bf62 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -534,4 +534,9 @@ static inline void erofs_fscache_unregister_cookie(struct erofs_fscache *fscache
#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
+int erofs_global_page_pool_init(void);
+void erofs_global_page_pool_exit(void);
+struct page *erofs_allocpage_for_decmpr(struct page **pagepool, gfp_t gfp);
+void erofs_put_page_for_decmpr(struct page *page);
+
#endif /* __EROFS_INTERNAL_H */
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index 3789d6224513..8e50f8dbd8fe 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -940,6 +940,7 @@ static void __exit erofs_module_exit(void)
erofs_exit_shrinker();
kmem_cache_destroy(erofs_inode_cachep);
erofs_pcpubuf_exit();
+ erofs_global_page_pool_exit();
}
static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c
index 5dea308764b4..1bdebe6e682d 100644
--- a/fs/erofs/utils.c
+++ b/fs/erofs/utils.c
@@ -4,6 +4,7 @@
* https://www.huawei.com/
*/
#include "internal.h"
+#include "compress.h"
struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp)
{
@@ -284,4 +285,124 @@ void erofs_exit_shrinker(void)
{
shrinker_free(erofs_shrinker_info);
}
+
+struct erofs_page_pool {
+ struct page *pagepool;
+ spinlock_t lock;
+ int nrpages_max;
+#ifdef CONFIG_EROFS_FS_DEBUG
+ int nrpages_cur;
+ int nrpages_min;
+#endif
+};
+
+static struct erofs_page_pool global_page_pool;
+
+struct page *erofs_allocpage_for_decmpr(struct page **pagepool, gfp_t gfp)
+{
+ struct page *page = *pagepool;
+
+ if (page) {
+ DBG_BUGON(page_ref_count(page) != 1);
+ *pagepool = (struct page *)page_private(page);
+ set_page_private(page, Z_EROFS_SHORTLIVED_PAGE);
+ } else {
+ spin_lock(&global_page_pool.lock);
+ page = global_page_pool.pagepool;
+ if (page) {
+ global_page_pool.pagepool =
+ (struct page *)page_private(page);
+ DBG_BUGON(page_ref_count(page) != 1);
+ set_page_private(page, Z_EROFS_POOL_PAGE);
+#ifdef CONFIG_EROFS_FS_DEBUG
+ global_page_pool.nrpages_cur--;
+ if (global_page_pool.nrpages_min
+ > global_page_pool.nrpages_cur) {
+ global_page_pool.nrpages_min
+ = global_page_pool.nrpages_cur;
+ pr_info("erofs global_page_pool nrpages_min %u\n",
+ global_page_pool.nrpages_min);
+ }
+#endif
+ spin_unlock(&global_page_pool.lock);
+ } else {
+ spin_unlock(&global_page_pool.lock);
+ page = alloc_page(gfp);
+ set_page_private(page, Z_EROFS_SHORTLIVED_PAGE);
+ }
+ }
+
+ return page;
+}
+
+/*
+ * This function only releases pages allocated from global page pool.
+ */
+void erofs_put_page_for_decmpr(struct page *page)
+{
+ if (!page)
+ return;
+
+ if (page_private(page) == Z_EROFS_POOL_PAGE) {
+ DBG_BUGON(page_ref_count(page) != 1);
+ spin_lock(&global_page_pool.lock);
+ set_page_private(page,
+ (unsigned long)global_page_pool.pagepool);
+ global_page_pool.pagepool = page;
+#ifdef CONFIG_EROFS_FS_DEBUG
+ global_page_pool.nrpages_cur++;
+#endif
+ spin_unlock(&global_page_pool.lock);
+ }
+}
+
+#define PAGE_POOL_INIT_PAGES 128
+int erofs_global_page_pool_init(void)
+{
+ int i;
+ struct page *page;
+
+ if (global_page_pool.nrpages_max)
+ return 0;
+
+ spin_lock_init(&global_page_pool.lock);
+ global_page_pool.pagepool = NULL;
+ global_page_pool.nrpages_max = PAGE_POOL_INIT_PAGES;
+#ifdef CONFIG_EROFS_FS_DEBUG
+ global_page_pool.nrpages_min = INT_MAX;
+ global_page_pool.nrpages_cur = 0;
+#endif
+ for (i = 0; i < global_page_pool.nrpages_max; i++) {
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ pr_err("failed to alloc page for erofs page pool\n");
+ return 0;
+ }
+ set_page_private(page,
+ (unsigned long)global_page_pool.pagepool);
+ global_page_pool.pagepool = page;
+#ifdef CONFIG_EROFS_FS_DEBUG
+ global_page_pool.nrpages_cur++;
+#endif
+ }
+
+ return 0;
+}
+
+void erofs_global_page_pool_exit(void)
+{
+ struct page *pagepool = global_page_pool.pagepool;
+
+ while (pagepool) {
+ struct page *page = pagepool;
+
+ pagepool = (struct page *)page_private(page);
+ put_page(page);
+#ifdef CONFIG_EROFS_FS_DEBUG
+ global_page_pool.nrpages_cur--;
+#endif
+ }
+ global_page_pool.nrpages_max = 0;
+}
+
#endif /* !CONFIG_EROFS_FS_ZIP */
--
2.25.1
More information about the Linux-erofs
mailing list