[PATCH v11 10/25] mm: Put readahead pages in cache earlier
Matthew Wilcox
willy at infradead.org
Wed Apr 15 01:02:18 AEST 2020
From: "Matthew Wilcox (Oracle)" <willy at infradead.org>
When populating the page cache for readahead, mappings that use
->readpages must populate the page cache themselves as the pages are
passed on a linked list which would normally be used for the page cache's
LRU. For mappings that use ->readpage or the upcoming ->readahead method,
we can put the pages into the page cache as soon as they're allocated,
which solves a race between readahead and direct IO. It also lets us
remove the gfp argument from read_pages().
Use the new readahead_page() API to implement the repeated calls to
->readpage(), just like most filesystems will.
Signed-off-by: Matthew Wilcox (Oracle) <willy at infradead.org>
Reviewed-by: Christoph Hellwig <hch at lst.de>
Reviewed-by: William Kucharski <william.kucharski at oracle.com>
---
mm/readahead.c | 46 ++++++++++++++++++++++++++++------------------
1 file changed, 28 insertions(+), 18 deletions(-)
diff --git a/mm/readahead.c b/mm/readahead.c
index ddc63d3b07b8..e52b3a7b9da5 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -114,14 +114,14 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
EXPORT_SYMBOL(read_cache_pages);
static void read_pages(struct readahead_control *rac, struct list_head *pages,
- gfp_t gfp)
+ bool skip_page)
{
const struct address_space_operations *aops = rac->mapping->a_ops;
+ struct page *page;
struct blk_plug plug;
- unsigned page_idx;
if (!readahead_count(rac))
- return;
+ goto out;
blk_start_plug(&plug);
@@ -130,23 +130,23 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages,
readahead_count(rac));
/* Clean up the remaining pages */
put_pages_list(pages);
- goto out;
- }
-
- for (page_idx = 0; page_idx < readahead_count(rac); page_idx++) {
- struct page *page = lru_to_page(pages);
- list_del(&page->lru);
- if (!add_to_page_cache_lru(page, rac->mapping, page->index,
- gfp))
+ rac->_index += rac->_nr_pages;
+ rac->_nr_pages = 0;
+ } else {
+ while ((page = readahead_page(rac))) {
aops->readpage(rac->file, page);
- put_page(page);
+ put_page(page);
+ }
}
-out:
blk_finish_plug(&plug);
BUG_ON(!list_empty(pages));
- rac->_nr_pages = 0;
+ BUG_ON(readahead_count(rac));
+
+out:
+ if (skip_page)
+ rac->_index++;
}
/*
@@ -168,6 +168,7 @@ void __do_page_cache_readahead(struct address_space *mapping,
struct readahead_control rac = {
.mapping = mapping,
.file = filp,
+ ._index = index,
};
unsigned long i;
@@ -183,6 +184,8 @@ void __do_page_cache_readahead(struct address_space *mapping,
if (index + i > end_index)
break;
+ BUG_ON(index + i != rac._index + rac._nr_pages);
+
page = xa_load(&mapping->i_pages, index + i);
if (page && !xa_is_value(page)) {
/*
@@ -190,15 +193,22 @@ void __do_page_cache_readahead(struct address_space *mapping,
* contiguous pages before continuing with the next
* batch.
*/
- read_pages(&rac, &page_pool, gfp_mask);
+ read_pages(&rac, &page_pool, true);
continue;
}
page = __page_cache_alloc(gfp_mask);
if (!page)
break;
- page->index = index + i;
- list_add(&page->lru, &page_pool);
+ if (mapping->a_ops->readpages) {
+ page->index = index + i;
+ list_add(&page->lru, &page_pool);
+ } else if (add_to_page_cache_lru(page, mapping, index + i,
+ gfp_mask) < 0) {
+ put_page(page);
+ read_pages(&rac, &page_pool, true);
+ continue;
+ }
if (i == nr_to_read - lookahead_size)
SetPageReadahead(page);
rac._nr_pages++;
@@ -209,7 +219,7 @@ void __do_page_cache_readahead(struct address_space *mapping,
* uptodate then the caller will launch readpage again, and
* will then handle the error.
*/
- read_pages(&rac, &page_pool, gfp_mask);
+ read_pages(&rac, &page_pool, false);
}
/*
--
2.25.1
More information about the Linux-erofs
mailing list