[PATCH 33/49] mm: introduce CONFIG_SPARSEMEM_VMEMMAP_OPTIMIZATION
Muchun Song
songmuchun at bytedance.com
Sun Apr 5 22:52:24 AEST 2026
Previously, the vmemmap optimization logic in mm/sparse-vmemmap.c was
closely tied to HugeTLB via CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP.
With recent refactoring (e.g., introducing compound page order to struct
mem_section), the core vmemmap optimization machinery has become more
generic and can be utilized by other subsystems like DAX.
To reflect this generalization and decouple the core optimization logic
from HugeTLB-specific configurations, this patch introduces a new common
Kconfig option: CONFIG_SPARSEMEM_VMEMMAP_OPTIMIZATION.
Both HugeTLB and DAX now select this generic option, ensuring that the
shared optimization infrastructure is enabled whenever either subsystem
requires it.
Signed-off-by: Muchun Song <songmuchun at bytedance.com>
---
fs/Kconfig | 1 +
include/linux/mmzone.h | 33 ++++++++++++++++++---------------
include/linux/page-flags.h | 5 +----
mm/Kconfig | 5 +++++
4 files changed, 25 insertions(+), 19 deletions(-)
diff --git a/fs/Kconfig b/fs/Kconfig
index e70aa5f0429a..9b56a90e13db 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -278,6 +278,7 @@ config HUGETLB_PAGE_OPTIMIZE_VMEMMAP
def_bool HUGETLB_PAGE
depends on ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
depends on SPARSEMEM_VMEMMAP
+ select SPARSEMEM_VMEMMAP_OPTIMIZATION
config HUGETLB_PMD_PAGE_TABLE_SHARING
def_bool HUGETLB_PAGE
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 75425407e0c4..6edcb0cc46c4 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -102,9 +102,9 @@
*
* HVO which is only active if the size of struct page is a power of 2.
*/
-#define MAX_FOLIO_VMEMMAP_ALIGN \
- (IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP) && \
- is_power_of_2(sizeof(struct page)) ? \
+#define MAX_FOLIO_VMEMMAP_ALIGN \
+ (IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP_OPTIMIZATION) && \
+ is_power_of_2(sizeof(struct page)) ? \
MAX_FOLIO_NR_PAGES * sizeof(struct page) : 0)
/* The number of vmemmap pages required by a vmemmap-optimized folio. */
@@ -115,7 +115,8 @@
#define __NR_OPTIMIZABLE_FOLIO_SIZES (MAX_FOLIO_ORDER - OPTIMIZABLE_FOLIO_MIN_ORDER + 1)
#define NR_OPTIMIZABLE_FOLIO_SIZES \
- (__NR_OPTIMIZABLE_FOLIO_SIZES > 0 ? __NR_OPTIMIZABLE_FOLIO_SIZES : 0)
+ ((__NR_OPTIMIZABLE_FOLIO_SIZES > 0 && \
+ IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP_OPTIMIZATION)) ? __NR_OPTIMIZABLE_FOLIO_SIZES : 0)
enum migratetype {
MIGRATE_UNMOVABLE,
@@ -2014,7 +2015,7 @@ struct mem_section {
*/
struct page_ext *page_ext;
#endif
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
+#ifdef CONFIG_SPARSEMEM_VMEMMAP_OPTIMIZATION
/*
* The order of compound pages in this section. Typically, the section
* holds compound pages of this order; a larger compound page will span
@@ -2194,7 +2195,19 @@ static inline bool pfn_section_first_valid(struct mem_section *ms, unsigned long
*pfn = (*pfn & PAGE_SECTION_MASK) + (bit * PAGES_PER_SUBSECTION);
return true;
}
+#else
+static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
+{
+ return 1;
+}
+
+static inline bool pfn_section_first_valid(struct mem_section *ms, unsigned long *pfn)
+{
+ return true;
+}
+#endif
+#ifdef CONFIG_SPARSEMEM_VMEMMAP_OPTIMIZATION
static inline void section_set_order(struct mem_section *section, unsigned int order)
{
VM_BUG_ON(section->order && order && section->order != order);
@@ -2206,16 +2219,6 @@ static inline unsigned int section_order(const struct mem_section *section)
return section->order;
}
#else
-static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
-{
- return 1;
-}
-
-static inline bool pfn_section_first_valid(struct mem_section *ms, unsigned long *pfn)
-{
- return true;
-}
-
static inline void section_set_order(struct mem_section *section, unsigned int order)
{
}
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 0e03d816e8b9..12665b34586c 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -208,14 +208,11 @@ enum pageflags {
static __always_inline bool compound_info_has_mask(void)
{
/*
- * Limit mask usage to HugeTLB vmemmap optimization (HVO) where it
- * makes a difference.
- *
* The approach with mask would work in the wider set of conditions,
* but it requires validating that struct pages are naturally aligned
* for all orders up to the MAX_FOLIO_ORDER, which can be tricky.
*/
- if (!IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP))
+ if (!IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP_OPTIMIZATION))
return false;
return is_power_of_2(sizeof(struct page));
diff --git a/mm/Kconfig b/mm/Kconfig
index 3cce862088f1..e81aa77182b2 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -410,12 +410,17 @@ config SPARSEMEM_VMEMMAP
pfn_to_page and page_to_pfn operations. This is the most
efficient option when sufficient kernel resources are available.
+config SPARSEMEM_VMEMMAP_OPTIMIZATION
+ bool
+ depends on SPARSEMEM_VMEMMAP
+
#
# Select this config option from the architecture Kconfig, if it is preferred
# to enable the feature of HugeTLB/dev_dax vmemmap optimization.
#
config ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
bool
+ select SPARSEMEM_VMEMMAP_OPTIMIZATION if SPARSEMEM_VMEMMAP
config ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
bool
--
2.20.1
More information about the Linuxppc-dev
mailing list