[PATCH 16/49] mm/hugetlb: initialize vmemmap optimization in early stage
Muchun Song
songmuchun at bytedance.com
Sun Apr 5 22:52:07 AEST 2026
Move pfn_to_zone() to be available for hugetlb_vmemmap_init_early().
Populate vmemmap HVO in hugetlb_vmemmap_init_early() for bootmem allocated
huge pages.
The zone information is already available in hugetlb_vmemmap_init_early(),
so there is no need to wait for hugetlb_vmemmap_init_late() to access it.
This prepares for the removal of hugetlb_vmemmap_init_late().
Signed-off-by: Muchun Song <songmuchun at bytedance.com>
---
mm/hugetlb_vmemmap.c | 38 ++++++++++++++++++++++++--------------
1 file changed, 24 insertions(+), 14 deletions(-)
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index 50b7123f3bdd..e25c70453928 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -745,6 +745,20 @@ static bool vmemmap_should_optimize_bootmem_page(struct huge_bootmem_page *m)
return true;
}
+static struct zone *pfn_to_zone(unsigned nid, unsigned long pfn)
+{
+ struct zone *zone;
+ enum zone_type zone_type;
+
+ for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
+ zone = &NODE_DATA(nid)->node_zones[zone_type];
+ if (zone_spans_pfn(zone, pfn))
+ return zone;
+ }
+
+ return NULL;
+}
+
/*
* Initialize memmap section for a gigantic page, HVO-style.
*/
@@ -752,6 +766,7 @@ void __init hugetlb_vmemmap_init_early(int nid)
{
unsigned long psize, paddr, section_size;
unsigned long ns, i, pnum, pfn, nr_pages;
+ unsigned long start, end;
struct huge_bootmem_page *m = NULL;
void *map;
@@ -761,6 +776,8 @@ void __init hugetlb_vmemmap_init_early(int nid)
section_size = (1UL << PA_SECTION_SHIFT);
list_for_each_entry(m, &huge_boot_pages[nid], list) {
+ struct zone *zone;
+
if (!vmemmap_should_optimize_bootmem_page(m))
continue;
@@ -769,6 +786,13 @@ void __init hugetlb_vmemmap_init_early(int nid)
paddr = virt_to_phys(m);
pfn = PHYS_PFN(paddr);
map = pfn_to_page(pfn);
+ start = (unsigned long)map;
+ end = start + nr_pages * sizeof(struct page);
+ zone = pfn_to_zone(nid, pfn);
+
+ BUG_ON(vmemmap_populate_hvo(start, end, huge_page_order(m->hstate),
+ zone, HUGETLB_VMEMMAP_RESERVE_SIZE));
+ memmap_boot_pages_add(HUGETLB_VMEMMAP_RESERVE_SIZE / PAGE_SIZE);
pnum = pfn_to_section_nr(pfn);
ns = psize / section_size;
@@ -784,20 +808,6 @@ void __init hugetlb_vmemmap_init_early(int nid)
}
}
-static struct zone *pfn_to_zone(unsigned nid, unsigned long pfn)
-{
- struct zone *zone;
- enum zone_type zone_type;
-
- for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
- zone = &NODE_DATA(nid)->node_zones[zone_type];
- if (zone_spans_pfn(zone, pfn))
- return zone;
- }
-
- return NULL;
-}
-
void __init hugetlb_vmemmap_init_late(int nid)
{
struct huge_bootmem_page *m, *tm;
--
2.20.1
More information about the Linuxppc-dev
mailing list