[PATCH 25/49] mm/sparse-vmemmap: support vmemmap-optimizable compound page population
Muchun Song
songmuchun at bytedance.com
Sun Apr 5 22:52:16 AEST 2026
Previously, vmemmap optimization (HVO) was tightly coupled with HugeTLB
and relied on CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP. With the recent
introduction of compound page order to struct mem_section, we can now
generalize this optimization to be based on sections rather than being
HugeTLB-specific.
This patch refactors the vmemmap population logic to utilize the new
section-level order information by updating vmemmap_pte_populate() to
dynamically allocates or reuses the shared tail page if a section
contains optimizable compound pages.
These changes centralize the HVO logic within the core sparse-vmemmap
code, reducing code duplication and paving the way for unifying the vmemmap
optimization paths for both HugeTLB and DAX.
Signed-off-by: Muchun Song <songmuchun at bytedance.com>
---
include/linux/mmzone.h | 8 ++++-
mm/internal.h | 3 ++
mm/sparse-vmemmap.c | 66 +++++++++++++++++++++++++-----------------
mm/sparse.c | 30 +++++++++++++++++--
4 files changed, 78 insertions(+), 29 deletions(-)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 620503aa29ba..e4d37492ca63 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1145,7 +1145,7 @@ struct zone {
/* Zone statistics */
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
-#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
struct page *vmemmap_tails[NR_OPTIMIZABLE_FOLIO_SIZES];
#endif
} ____cacheline_internodealigned_in_smp;
@@ -2250,6 +2250,12 @@ static inline unsigned int section_order(const struct mem_section *section)
}
#endif
+static inline bool section_vmemmap_optimizable(const struct mem_section *section)
+{
+ return is_power_of_2(sizeof(struct page)) &&
+ section_order(section) >= OPTIMIZABLE_FOLIO_MIN_ORDER;
+}
+
void sparse_init_early_section(int nid, struct page *map, unsigned long pnum,
unsigned long flags);
diff --git a/mm/internal.h b/mm/internal.h
index 1060d7c07f5b..c0d0f546864c 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -996,6 +996,9 @@ static inline void __section_mark_present(struct mem_section *ms,
ms->section_mem_map |= SECTION_MARKED_PRESENT;
}
+
+int section_vmemmap_pages(unsigned long pfn, unsigned long nr_pages,
+ struct vmem_altmap *altmap, struct dev_pagemap *pgmap);
#else
static inline void sparse_init(void) {}
#endif /* CONFIG_SPARSEMEM */
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 2a6c3c82f9f5..6522c36aac20 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -144,17 +144,47 @@ void __meminit vmemmap_verify(pte_t *pte, int node,
start, end - 1);
}
+static struct zone __meminit *pfn_to_zone(unsigned long pfn, int nid)
+{
+ pg_data_t *pgdat = NODE_DATA(nid);
+
+ for (enum zone_type zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
+ struct zone *zone = &pgdat->node_zones[zone_type];
+
+ if (zone_spans_pfn(zone, pfn))
+ return zone;
+ }
+
+ return NULL;
+}
+
+static __meminit struct page *vmemmap_get_tail(unsigned int order, struct zone *zone);
+
static pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
struct vmem_altmap *altmap,
unsigned long ptpfn)
{
pte_t *pte = pte_offset_kernel(pmd, addr);
+
if (pte_none(ptep_get(pte))) {
pte_t entry;
- void *p;
+
+ if (vmemmap_page_optimizable((struct page *)addr) &&
+ ptpfn == (unsigned long)-1) {
+ struct page *page;
+ unsigned long pfn = page_to_pfn((struct page *)addr);
+ const struct mem_section *ms = __pfn_to_section(pfn);
+
+ page = vmemmap_get_tail(section_order(ms),
+ pfn_to_zone(pfn, node));
+ if (!page)
+ return NULL;
+ ptpfn = page_to_pfn(page);
+ }
if (ptpfn == (unsigned long)-1) {
- p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap);
+ void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap);
+
if (!p)
return NULL;
ptpfn = PHYS_PFN(__pa(p));
@@ -323,7 +353,6 @@ void vmemmap_wrprotect_hvo(unsigned long addr, unsigned long end,
}
}
-#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
static __meminit struct page *vmemmap_get_tail(unsigned int order, struct zone *zone)
{
struct page *p, *tail;
@@ -352,6 +381,7 @@ static __meminit struct page *vmemmap_get_tail(unsigned int order, struct zone *
return tail;
}
+#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
int __meminit vmemmap_populate_hvo(unsigned long addr, unsigned long end,
unsigned int order, struct zone *zone,
unsigned long headsize)
@@ -404,6 +434,9 @@ int __meminit vmemmap_populate_hugepages(unsigned long start, unsigned long end,
return vmemmap_populate_compound_pages(start, end, node, pgmap);
for (addr = start; addr < end; addr = next) {
+ unsigned long pfn = page_to_pfn((struct page *)addr);
+ const struct mem_section *ms = __pfn_to_section(pfn);
+
next = pmd_addr_end(addr, end);
pgd = vmemmap_pgd_populate(addr, node);
@@ -419,7 +452,7 @@ int __meminit vmemmap_populate_hugepages(unsigned long start, unsigned long end,
return -ENOMEM;
pmd = pmd_offset(pud, addr);
- if (pmd_none(pmdp_get(pmd))) {
+ if (pmd_none(pmdp_get(pmd)) && !section_vmemmap_optimizable(ms)) {
void *p;
p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
@@ -437,8 +470,10 @@ int __meminit vmemmap_populate_hugepages(unsigned long start, unsigned long end,
*/
return -ENOMEM;
}
- } else if (vmemmap_check_pmd(pmd, node, addr, next))
+ } else if (vmemmap_check_pmd(pmd, node, addr, next)) {
+ VM_BUG_ON(section_vmemmap_optimizable(ms));
continue;
+ }
if (vmemmap_populate_basepages(addr, next, node, altmap, pgmap))
return -ENOMEM;
}
@@ -705,27 +740,6 @@ static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
return rc;
}
-static int __meminit section_vmemmap_pages(unsigned long pfn, unsigned long nr_pages,
- struct vmem_altmap *altmap, struct dev_pagemap *pgmap)
-{
- unsigned int order = pgmap ? pgmap->vmemmap_shift : 0;
- unsigned long pages_per_compound = 1L << order;
-
- VM_BUG_ON(!IS_ALIGNED(pfn | nr_pages, min(pages_per_compound, PAGES_PER_SECTION)));
- VM_BUG_ON(pfn_to_section_nr(pfn) != pfn_to_section_nr(pfn + nr_pages - 1));
-
- if (!vmemmap_can_optimize(altmap, pgmap))
- return DIV_ROUND_UP(nr_pages * sizeof(struct page), PAGE_SIZE);
-
- if (order < PFN_SECTION_SHIFT)
- return VMEMMAP_RESERVE_NR * nr_pages / pages_per_compound;
-
- if (IS_ALIGNED(pfn, pages_per_compound))
- return VMEMMAP_RESERVE_NR;
-
- return 0;
-}
-
/*
* To deactivate a memory region, there are 3 cases to handle:
*
diff --git a/mm/sparse.c b/mm/sparse.c
index cfe4ffd89baf..62659752980e 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -345,6 +345,32 @@ static void __init sparse_usage_fini(void)
sparse_usagebuf = sparse_usagebuf_end = NULL;
}
+int __meminit section_vmemmap_pages(unsigned long pfn, unsigned long nr_pages,
+ struct vmem_altmap *altmap, struct dev_pagemap *pgmap)
+{
+ const struct mem_section *ms = __pfn_to_section(pfn);
+ unsigned int order = pgmap ? pgmap->vmemmap_shift : section_order(ms);
+ unsigned long pages_per_compound = 1L << order;
+ unsigned int vmemmap_pages = OPTIMIZED_FOLIO_VMEMMAP_PAGES;
+
+ if (vmemmap_can_optimize(altmap, pgmap))
+ vmemmap_pages = VMEMMAP_RESERVE_NR;
+
+ VM_BUG_ON(!IS_ALIGNED(pfn | nr_pages, min(pages_per_compound, PAGES_PER_SECTION)));
+ VM_BUG_ON(pfn_to_section_nr(pfn) != pfn_to_section_nr(pfn + nr_pages - 1));
+
+ if (!vmemmap_can_optimize(altmap, pgmap) && !section_vmemmap_optimizable(ms))
+ return DIV_ROUND_UP(nr_pages * sizeof(struct page), PAGE_SIZE);
+
+ if (order < PFN_SECTION_SHIFT)
+ return vmemmap_pages * nr_pages / pages_per_compound;
+
+ if (IS_ALIGNED(pfn, pages_per_compound))
+ return vmemmap_pages;
+
+ return 0;
+}
+
/*
* Initialize sparse on a specific node. The node spans [pnum_begin, pnum_end)
* And number of present sections in this node is map_count.
@@ -376,8 +402,8 @@ static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
nid, NULL, NULL);
if (!map)
panic("Populate section (%ld) on node[%d] failed\n", pnum, nid);
- memmap_boot_pages_add(DIV_ROUND_UP(PAGES_PER_SECTION * sizeof(struct page),
- PAGE_SIZE));
+ memmap_boot_pages_add(section_vmemmap_pages(pfn, PAGES_PER_SECTION,
+ NULL, NULL));
sparse_init_early_section(nid, map, pnum, 0);
}
}
--
2.20.1
More information about the Linuxppc-dev
mailing list