[PATCH 31/49] mm/hugetlb: remove HUGE_BOOTMEM_HVO flag and simplify pre-HVO logic
Muchun Song
songmuchun at bytedance.com
Sun Apr 5 22:52:22 AEST 2026
The pre-HVO feature is used to optimize the vmemmap pages of HugeTLB
bootmem pages. Previously, the HUGE_BOOTMEM_HVO flag was used to
indicate whether a bootmem page has been pre-optimized.
However, we can directly determine if a huge page is pre-optimized by
checking its section's optimization status using
section_vmemmap_optimizable(). The pre-initialization mechanism of
vmemmap has been completely removed in previous patches, making the
HUGE_BOOTMEM_HVO flag and its related checks redundant.
By directly using section_vmemmap_optimizable(), we can safely remove
the HUGE_BOOTMEM_HVO flag, clean up the associated state maintenance in
struct huge_bootmem_page, and simplify the bootmem page optimization
checks in the hugetlb initialization path.
Signed-off-by: Muchun Song <songmuchun at bytedance.com>
---
include/linux/hugetlb.h | 5 ++---
mm/hugetlb.c | 16 ++--------------
mm/hugetlb_vmemmap.c | 5 -----
3 files changed, 4 insertions(+), 22 deletions(-)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 23d95ed6121f..6bedeaee9b79 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -695,9 +695,8 @@ struct huge_bootmem_page {
struct cma *cma;
};
-#define HUGE_BOOTMEM_HVO 0x0001
-#define HUGE_BOOTMEM_ZONES_VALID 0x0002
-#define HUGE_BOOTMEM_CMA 0x0004
+#define HUGE_BOOTMEM_ZONES_VALID BIT(0)
+#define HUGE_BOOTMEM_CMA BIT(1)
int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list);
int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index dff94ab7040a..59728e942384 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3193,11 +3193,6 @@ static void __init hugetlb_folio_init_vmemmap(struct folio *folio,
prep_compound_head(&folio->page, huge_page_order(h));
}
-static bool __init hugetlb_bootmem_page_prehvo(struct huge_bootmem_page *m)
-{
- return m->flags & HUGE_BOOTMEM_HVO;
-}
-
/*
* memblock-allocated pageblocks might not have the migrate type set
* if marked with the 'noinit' flag. Set it to the default (MIGRATE_MOVABLE)
@@ -3284,16 +3279,9 @@ static void __init gather_bootmem_prealloc_node(unsigned long nid)
OPTIMIZED_FOLIO_VMEMMAP_PAGE_STRUCTS);
init_new_hugetlb_folio(folio);
- if (hugetlb_bootmem_page_prehvo(m)) {
- /*
- * If pre-HVO was done, just set the
- * flag, the HVO code will then skip
- * this folio.
- */
+ if (section_vmemmap_optimizable(__pfn_to_section(folio_pfn(folio))))
folio_set_hugetlb_vmemmap_optimized(folio);
- section_set_order_pfn_range(folio_pfn(folio),
- pages_per_huge_page(h), 0);
- }
+ section_set_order_pfn_range(folio_pfn(folio), folio_nr_pages(folio), 0);
if (hugetlb_bootmem_page_earlycma(m))
folio_set_hugetlb_cma(folio);
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index 8c567b8c67cc..a190b9b94346 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -711,12 +711,7 @@ void __init hugetlb_vmemmap_optimize_bootmem_page(struct huge_bootmem_page *m)
if (!READ_ONCE(vmemmap_optimize_enabled))
return;
- if (!hugetlb_vmemmap_optimizable(h))
- return;
-
section_set_order_pfn_range(pfn, pages_per_huge_page(h), huge_page_order(h));
- if (section_vmemmap_optimizable(__pfn_to_section(pfn)))
- m->flags |= HUGE_BOOTMEM_HVO;
}
static const struct ctl_table hugetlb_vmemmap_sysctls[] = {
--
2.20.1
More information about the Linuxppc-dev
mailing list