[PATCH 46/49] mm/sparse: replace pgmap with order and zone in sparse_add_section()

Muchun Song songmuchun at bytedance.com
Sun Apr 5 22:52:37 AEST 2026


The sparse_add_section() function was using the struct dev_pagemap
argument only to extract the vmemmap_shift value to set the compound
page order for vmemmap optimization, and to set the zone to ZONE_DEVICE.

Since the full struct dev_pagemap is not needed here and is being
removed from the rest of the vmemmap APIs, replace the pgmap parameter
with direct order and zone parameters in sparse_add_section().

This cleanly decouples the sparse memory infrastructure from the
ZONE_DEVICE struct dev_pagemap. The main motivation behind this decouple
is to make sparse_add_section() a more generic memory population interface
that can be easily reused for other non ZONE_DEVICE population use cases
in the future.

Signed-off-by: Muchun Song <songmuchun at bytedance.com>
---
 include/linux/memory_hotplug.h |  2 +-
 mm/memory_hotplug.c            | 10 ++++++++--
 mm/sparse-vmemmap.c            | 14 +++++++-------
 3 files changed, 16 insertions(+), 10 deletions(-)

diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 815e908c4135..089052d64b01 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -305,7 +305,7 @@ extern void remove_pfn_range_from_zone(struct zone *zone,
 				       unsigned long nr_pages);
 extern int sparse_add_section(int nid, unsigned long pfn,
 		unsigned long nr_pages, struct vmem_altmap *altmap,
-		struct dev_pagemap *pgmap);
+		unsigned int order, enum zone_type zone);
 extern void sparse_remove_section(unsigned long pfn, unsigned long nr_pages,
 				  struct vmem_altmap *altmap);
 extern struct zone *zone_for_pfn_range(enum mmop online_type,
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 68dd56dd9f74..0f7707f3d4bb 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -385,10 +385,17 @@ int __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
 	unsigned long cur_nr_pages;
 	int err;
 	struct vmem_altmap *altmap = params->altmap;
+	unsigned int order = params->pgmap ? params->pgmap->vmemmap_shift : 0;
+	enum zone_type zid = 0;
 
 	if (WARN_ON_ONCE(!pgprot_val(params->pgprot)))
 		return -EINVAL;
 
+#ifdef CONFIG_ZONE_DEVICE
+	if (params->pgmap)
+		zid = ZONE_DEVICE;
+#endif
+
 	VM_BUG_ON(!mhp_range_allowed(PFN_PHYS(pfn), nr_pages * PAGE_SIZE, false));
 
 	if (altmap) {
@@ -412,8 +419,7 @@ int __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
 		/* Select all remaining pages up to the next section boundary */
 		cur_nr_pages = min(end_pfn - pfn,
 				   SECTION_ALIGN_UP(pfn + 1) - pfn);
-		err = sparse_add_section(nid, pfn, cur_nr_pages, altmap,
-					 params->pgmap);
+		err = sparse_add_section(nid, pfn, cur_nr_pages, altmap, order, zid);
 		if (err)
 			break;
 		cond_resched();
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 68dcc52591d5..894352cb8957 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -666,7 +666,8 @@ static struct page * __meminit section_activate(int nid, unsigned long pfn,
  * @start_pfn: start pfn of the memory range
  * @nr_pages: number of pfns to add in the section
  * @altmap: alternate pfns to allocate the memmap backing store
- * @pgmap: alternate compound page geometry for devmap mappings
+ * @order: section order
+ * @zone: section zone. Note that it is ignored when @order is 0.
  *
  * This is only intended for hotplug.
  *
@@ -681,7 +682,7 @@ static struct page * __meminit section_activate(int nid, unsigned long pfn,
  */
 int __meminit sparse_add_section(int nid, unsigned long start_pfn,
 		unsigned long nr_pages, struct vmem_altmap *altmap,
-		struct dev_pagemap *pgmap)
+		unsigned int order, enum zone_type zone)
 {
 	unsigned long section_nr = pfn_to_section_nr(start_pfn);
 	struct mem_section *ms;
@@ -693,11 +694,10 @@ int __meminit sparse_add_section(int nid, unsigned long start_pfn,
 		return ret;
 
 	ms = __nr_to_section(section_nr);
-	if (!altmap && pgmap && nr_pages == PAGES_PER_SECTION) {
-		section_set_order(ms, pgmap->vmemmap_shift);
-#ifdef CONFIG_ZONE_DEVICE
-		section_set_zone(ms, ZONE_DEVICE);
-#endif
+	/* HVO is not supported when memmap pages are backed by an altmap. */
+	if (!altmap && nr_pages == PAGES_PER_SECTION && order) {
+		section_set_order(ms, order);
+		section_set_zone(ms, zone);
 	}
 	memmap = section_activate(nid, start_pfn, nr_pages, altmap);
 	if (IS_ERR(memmap))
-- 
2.20.1



More information about the Linuxppc-dev mailing list