[PATCH 30/49] mm/sparse-vmemmap: remove unused SPARSEMEM_VMEMMAP_PREINIT feature
Muchun Song
songmuchun at bytedance.com
Sun Apr 5 22:52:21 AEST 2026
Since the bootmem vmemmap optimization has been reimplemented to use
the new early compound vmemmap infrastructure, the old
SPARSEMEM_VMEMMAP_PREINIT feature and its related code (e.g.,
sparse_vmemmap_init_nid_early(), preinited_vmemmap_section()) are no
longer used.
Remove them to clean up the code.
Signed-off-by: Muchun Song <songmuchun at bytedance.com>
---
arch/x86/Kconfig | 1 -
fs/Kconfig | 1 -
include/linux/mmzone.h | 25 -------------------------
mm/Kconfig | 5 -----
mm/sparse-vmemmap.c | 13 -------------
mm/sparse.c | 23 ++++++++---------------
6 files changed, 8 insertions(+), 60 deletions(-)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 99bb5217649a..f19625648f0f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -148,7 +148,6 @@ config X86
select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANT_OPTIMIZE_DAX_VMEMMAP if X86_64
select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP if X86_64
- select ARCH_WANT_HUGETLB_VMEMMAP_PREINIT if X86_64
select ARCH_WANTS_THP_SWAP if X86_64
select ARCH_HAS_PARANOID_L1D_FLUSH
select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
diff --git a/fs/Kconfig b/fs/Kconfig
index 43cb06de297f..e70aa5f0429a 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -278,7 +278,6 @@ config HUGETLB_PAGE_OPTIMIZE_VMEMMAP
def_bool HUGETLB_PAGE
depends on ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
depends on SPARSEMEM_VMEMMAP
- select SPARSEMEM_VMEMMAP_PREINIT if ARCH_WANT_HUGETLB_VMEMMAP_PREINIT
config HUGETLB_PMD_PAGE_TABLE_SHARING
def_bool HUGETLB_PAGE
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 0bd20efac427..75425407e0c4 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -2078,9 +2078,6 @@ enum {
SECTION_IS_EARLY_BIT,
#ifdef CONFIG_ZONE_DEVICE
SECTION_TAINT_ZONE_DEVICE_BIT,
-#endif
-#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
- SECTION_IS_VMEMMAP_PREINIT_BIT,
#endif
SECTION_MAP_LAST_BIT,
};
@@ -2092,9 +2089,6 @@ enum {
#ifdef CONFIG_ZONE_DEVICE
#define SECTION_TAINT_ZONE_DEVICE BIT(SECTION_TAINT_ZONE_DEVICE_BIT)
#endif
-#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
-#define SECTION_IS_VMEMMAP_PREINIT BIT(SECTION_IS_VMEMMAP_PREINIT_BIT)
-#endif
#define SECTION_MAP_MASK (~(BIT(SECTION_MAP_LAST_BIT) - 1))
#define SECTION_NID_SHIFT SECTION_MAP_LAST_BIT
@@ -2149,24 +2143,6 @@ static inline int online_device_section(const struct mem_section *section)
}
#endif
-#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
-static inline int preinited_vmemmap_section(const struct mem_section *section)
-{
- return (section &&
- (section->section_mem_map & SECTION_IS_VMEMMAP_PREINIT));
-}
-
-void sparse_vmemmap_init_nid_early(int nid);
-#else
-static inline int preinited_vmemmap_section(const struct mem_section *section)
-{
- return 0;
-}
-static inline void sparse_vmemmap_init_nid_early(int nid)
-{
-}
-#endif
-
static inline int online_section_nr(unsigned long nr)
{
return online_section(__nr_to_section(nr));
@@ -2407,7 +2383,6 @@ static inline unsigned long next_present_section_nr(unsigned long section_nr)
#endif
#else
-#define sparse_vmemmap_init_nid_early(_nid) do {} while (0)
#define pfn_in_present_section pfn_valid
#endif /* CONFIG_SPARSEMEM */
diff --git a/mm/Kconfig b/mm/Kconfig
index e8bf1e9e6ad9..3cce862088f1 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -410,8 +410,6 @@ config SPARSEMEM_VMEMMAP
pfn_to_page and page_to_pfn operations. This is the most
efficient option when sufficient kernel resources are available.
-config SPARSEMEM_VMEMMAP_PREINIT
- bool
#
# Select this config option from the architecture Kconfig, if it is preferred
# to enable the feature of HugeTLB/dev_dax vmemmap optimization.
@@ -422,9 +420,6 @@ config ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
config ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
bool
-config ARCH_WANT_HUGETLB_VMEMMAP_PREINIT
- bool
-
config HAVE_MEMBLOCK_PHYS_MAP
bool
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 9da49b0d03f0..c35d912a1fef 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -543,19 +543,6 @@ struct page * __meminit __populate_section_memmap(unsigned long pfn,
return pfn_to_page(pfn);
}
-#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
-/*
- * This is called just before initializing sections for a NUMA node.
- * Any special initialization that needs to be done before the
- * generic initialization can be done from here. Sections that
- * are initialized in hooks called from here will be skipped by
- * the generic initialization.
- */
-void __init sparse_vmemmap_init_nid_early(int nid)
-{
-}
-#endif
-
static void subsection_mask_set(unsigned long *map, unsigned long pfn,
unsigned long nr_pages)
{
diff --git a/mm/sparse.c b/mm/sparse.c
index 7779554c5a0c..04c641b97325 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -385,27 +385,20 @@ static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
panic("The node[%d] usemap allocation failed\n", nid);
sparse_buffer_init(map_count * section_map_size(), nid);
- sparse_vmemmap_init_nid_early(nid);
-
for_each_present_section_nr(pnum_begin, pnum) {
- struct mem_section *ms;
unsigned long pfn = section_nr_to_pfn(pnum);
+ struct page *map;
if (pnum >= pnum_end)
break;
- ms = __nr_to_section(pnum);
- if (!preinited_vmemmap_section(ms)) {
- struct page *map;
-
- map = __populate_section_memmap(pfn, PAGES_PER_SECTION,
- nid, NULL, NULL);
- if (!map)
- panic("Populate section (%ld) on node[%d] failed\n", pnum, nid);
- memmap_boot_pages_add(section_vmemmap_pages(pfn, PAGES_PER_SECTION,
- NULL, NULL));
- sparse_init_early_section(nid, map, pnum, 0);
- }
+ map = __populate_section_memmap(pfn, PAGES_PER_SECTION,
+ nid, NULL, NULL);
+ if (!map)
+ panic("Populate section (%ld) on node[%d] failed\n", pnum, nid);
+ memmap_boot_pages_add(section_vmemmap_pages(pfn, PAGES_PER_SECTION,
+ NULL, NULL));
+ sparse_init_early_section(nid, map, pnum, 0);
}
sparse_usage_fini();
sparse_buffer_fini();
--
2.20.1
More information about the Linuxppc-dev
mailing list