[PATCH v7 3/3] erofs-utils: optimize buffer allocation logic
Gao Xiang
hsiangkao at aol.com
Sat Jan 23 04:11:53 AEDT 2021
From: Hu Weiwen <sehuww at mail.scut.edu.cn>
When using EROFS to pack our dataset which consists of millions of
files, mkfs.erofs is very slow compared with mksquashfs.
The bottleneck is `erofs_balloc' and `erofs_mapbh' function, which
iterate over all previously allocated buffer blocks, making the
complexity of the algrithm O(N^2) where N is the number of files.
With this patch:
* global `last_mapped_block' is mantained to avoid full scan in
`erofs_mapbh` function.
* global `mapped_buckets' maintains a list of already mapped buffer
blocks for each type and for each possible used bytes in the last
EROFS_BLKSIZ. Then it is used to identify the most suitable blocks in
future `erofs_balloc', avoiding full scan. Note that not-mapped (and the
last mapped) blocks can be expended, so we deal with them separately.
When I test it with ImageNet dataset (1.33M files, 147GiB), it takes
about 4 hours. Most time is spent on IO.
Cc: Huang Jianan <jnhuang95 at gmail.com>
Signed-off-by: Hu Weiwen <sehuww at mail.scut.edu.cn>
Signed-off-by: Gao Xiang <hsiangkao at aol.com>
---
include/erofs/cache.h | 1 +
lib/cache.c | 105 ++++++++++++++++++++++++++++++++++++------
2 files changed, 93 insertions(+), 13 deletions(-)
diff --git a/include/erofs/cache.h b/include/erofs/cache.h
index f8dff67b9736..611ca5b8432b 100644
--- a/include/erofs/cache.h
+++ b/include/erofs/cache.h
@@ -39,6 +39,7 @@ struct erofs_buffer_head {
struct erofs_buffer_block {
struct list_head list;
+ struct list_head mapped_list;
erofs_blk_t blkaddr;
int type;
diff --git a/lib/cache.c b/lib/cache.c
index f02413d0f887..40d3b1f3f4d5 100644
--- a/lib/cache.c
+++ b/lib/cache.c
@@ -18,6 +18,11 @@ static struct erofs_buffer_block blkh = {
};
static erofs_blk_t tail_blkaddr;
+/* buckets for all mapped buffer blocks to boost up allocation */
+static struct list_head mapped_buckets[META + 1][EROFS_BLKSIZ];
+/* last mapped buffer block to accelerate erofs_mapbh() */
+static struct erofs_buffer_block *last_mapped_block = &blkh;
+
static bool erofs_bh_flush_drop_directly(struct erofs_buffer_head *bh)
{
return erofs_bh_flush_generic_end(bh);
@@ -62,15 +67,32 @@ struct erofs_bhops erofs_buf_write_bhops = {
/* return buffer_head of erofs super block (with size 0) */
struct erofs_buffer_head *erofs_buffer_init(void)
{
+ int i, j;
struct erofs_buffer_head *bh = erofs_balloc(META, 0, 0, 0);
if (IS_ERR(bh))
return bh;
bh->op = &erofs_skip_write_bhops;
+
+ for (i = 0; i < ARRAY_SIZE(mapped_buckets); i++)
+ for (j = 0; j < ARRAY_SIZE(mapped_buckets[0]); j++)
+ init_list_head(&mapped_buckets[i][j]);
return bh;
}
+static void erofs_bupdate_mapped(struct erofs_buffer_block *bb)
+{
+ struct list_head *bkt;
+
+ if (bb->blkaddr == NULL_ADDR)
+ return;
+
+ bkt = mapped_buckets[bb->type] + bb->buffers.off % EROFS_BLKSIZ;
+ list_del(&bb->mapped_list);
+ list_add_tail(&bb->mapped_list, bkt);
+}
+
/* return occupied bytes in specific buffer block if succeed */
static int __erofs_battach(struct erofs_buffer_block *bb,
struct erofs_buffer_head *bh,
@@ -110,6 +132,7 @@ static int __erofs_battach(struct erofs_buffer_block *bb,
/* need to update the tail_blkaddr */
if (tailupdate)
tail_blkaddr = blkaddr + BLK_ROUND_UP(bb->buffers.off);
+ erofs_bupdate_mapped(bb);
}
return (alignedoffset + incr) % EROFS_BLKSIZ;
}
@@ -132,20 +155,62 @@ static int erofs_bfind_for_attach(int type, erofs_off_t size,
struct erofs_buffer_block **bbp)
{
struct erofs_buffer_block *cur, *bb;
- unsigned int used0, usedmax;
+ unsigned int used0, usedmax, used;
+ int used_before, ret;
used0 = (size + required_ext) % EROFS_BLKSIZ + inline_ext;
/* inline data should be in the same fs block */
if (used0 > EROFS_BLKSIZ)
return -ENOSPC;
+ if (!used0 || alignsize == EROFS_BLKSIZ) {
+ *bbp = NULL;
+ return 0;
+ }
+
usedmax = 0;
bb = NULL;
- list_for_each_entry(cur, &blkh.list, list) {
- int ret;
- unsigned int used_before, used;
+ /* try to find a most-fit mapped buffer block first */
+ if (size + required_ext + inline_ext >= EROFS_BLKSIZ)
+ goto skip_mapped;
+
+ used_before = rounddown(EROFS_BLKSIZ -
+ (size + required_ext + inline_ext), alignsize);
+ do {
+ struct list_head *bt = mapped_buckets[type] + used_before;
+ if (list_empty(bt))
+ continue;
+ cur = list_first_entry(bt, struct erofs_buffer_block,
+ mapped_list);
+
+ /* last mapped block can be expended, don't handle it here */
+ if (cur == last_mapped_block)
+ continue;
+
+ ret = __erofs_battach(cur, NULL, size, alignsize,
+ required_ext + inline_ext, true);
+ if (ret < 0) {
+ DBG_BUGON(1);
+ continue;
+ }
+
+ /* should contain all data in the current block */
+ used = ret + required_ext + inline_ext;
+ DBG_BUGON(used > EROFS_BLKSIZ);
+
+ bb = cur;
+ usedmax = used;
+ break;
+ } while (--used_before > 0);
+
+skip_mapped:
+ /* try to start from the last mapped one, which can be expended */
+ cur = last_mapped_block;
+ if (cur == &blkh)
+ cur = list_next_entry(cur, list);
+ for (; cur != &blkh; cur = list_next_entry(cur, list)) {
used_before = cur->buffers.off % EROFS_BLKSIZ;
/* skip if buffer block is just full */
@@ -195,6 +260,8 @@ struct erofs_buffer_head *erofs_balloc(int type, erofs_off_t size,
if (ret < 0)
return ERR_PTR(ret);
+
+ DBG_BUGON(type < 0 || type > META);
alignsize = ret;
/* try to find if we could reuse an allocated buffer block */
@@ -218,6 +285,7 @@ struct erofs_buffer_head *erofs_balloc(int type, erofs_off_t size,
bb->buffers.off = 0;
init_list_head(&bb->buffers.list);
list_add_tail(&bb->list, &blkh.list);
+ init_list_head(&bb->mapped_list);
bh = malloc(sizeof(struct erofs_buffer_head));
if (!bh) {
@@ -266,8 +334,11 @@ static erofs_blk_t __erofs_mapbh(struct erofs_buffer_block *bb)
{
erofs_blk_t blkaddr;
- if (bb->blkaddr == NULL_ADDR)
+ if (bb->blkaddr == NULL_ADDR) {
bb->blkaddr = tail_blkaddr;
+ last_mapped_block = bb;
+ erofs_bupdate_mapped(bb);
+ }
blkaddr = bb->blkaddr + BLK_ROUND_UP(bb->buffers.off);
if (blkaddr > tail_blkaddr)
@@ -278,15 +349,18 @@ static erofs_blk_t __erofs_mapbh(struct erofs_buffer_block *bb)
erofs_blk_t erofs_mapbh(struct erofs_buffer_block *bb)
{
- struct erofs_buffer_block *t, *nt;
+ struct erofs_buffer_block *t = last_mapped_block;
- if (!bb || bb->blkaddr == NULL_ADDR) {
- list_for_each_entry_safe(t, nt, &blkh.list, list) {
- (void)__erofs_mapbh(t);
- if (t == bb)
- break;
- }
- }
+ if (bb && bb->blkaddr != NULL_ADDR)
+ return bb->blkaddr;
+ do {
+ t = list_next_entry(t, list);
+ if (t == &blkh)
+ break;
+
+ DBG_BUGON(t->blkaddr != NULL_ADDR);
+ (void)__erofs_mapbh(t);
+ } while (t != bb);
return tail_blkaddr;
}
@@ -328,6 +402,7 @@ bool erofs_bflush(struct erofs_buffer_block *bb)
erofs_dbg("block %u to %u flushed", p->blkaddr, blkaddr - 1);
+ list_del(&p->mapped_list);
list_del(&p->list);
free(p);
}
@@ -351,6 +426,10 @@ void erofs_bdrop(struct erofs_buffer_head *bh, bool tryrevoke)
if (!list_empty(&bb->buffers.list))
return;
+ if (bb == last_mapped_block)
+ last_mapped_block = list_prev_entry(bb, list);
+
+ list_del(&bb->mapped_list);
list_del(&bb->list);
free(bb);
--
2.24.0
More information about the Linux-erofs
mailing list