[PATCH V6 1/2] mm/page_alloc: Make alloc_gigantic_page() available for general use

Anshuman Khandual anshuman.khandual at arm.com
Tue Oct 15 20:21:41 AEDT 2019


alloc_gigantic_page() implements an allocation method where it scans over
various zones looking for a large contiguous memory block which could not
have been allocated through the buddy allocator. A subsequent patch which
tests arch page table helpers needs such a method to allocate PUD_SIZE
sized memory block. In the future such methods might have other use cases
as well. So alloc_gigantic_page() has been split carving out actual memory
allocation method and made available via new alloc_gigantic_page_order()
which is wrapped under CONFIG_CONTIG_ALLOC.

Cc: Andrew Morton <akpm at linux-foundation.org>
Cc: Vlastimil Babka <vbabka at suse.cz>
Cc: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
Cc: Thomas Gleixner <tglx at linutronix.de>
Cc: Mike Rapoport <rppt at linux.vnet.ibm.com>
Cc: Mike Kravetz <mike.kravetz at oracle.com>
Cc: Jason Gunthorpe <jgg at ziepe.ca>
Cc: Dan Williams <dan.j.williams at intel.com>
Cc: Peter Zijlstra <peterz at infradead.org>
Cc: Michal Hocko <mhocko at kernel.org>
Cc: Mark Rutland <mark.rutland at arm.com>
Cc: Mark Brown <broonie at kernel.org>
Cc: Steven Price <Steven.Price at arm.com>
Cc: Ard Biesheuvel <ard.biesheuvel at linaro.org>
Cc: Masahiro Yamada <yamada.masahiro at socionext.com>
Cc: Kees Cook <keescook at chromium.org>
Cc: Tetsuo Handa <penguin-kernel at i-love.sakura.ne.jp>
Cc: Matthew Wilcox <willy at infradead.org>
Cc: Sri Krishna chowdary <schowdary at nvidia.com>
Cc: Dave Hansen <dave.hansen at intel.com>
Cc: Russell King - ARM Linux <linux at armlinux.org.uk>
Cc: Michael Ellerman <mpe at ellerman.id.au>
Cc: Paul Mackerras <paulus at samba.org>
Cc: Martin Schwidefsky <schwidefsky at de.ibm.com>
Cc: Heiko Carstens <heiko.carstens at de.ibm.com>
Cc: "David S. Miller" <davem at davemloft.net>
Cc: Vineet Gupta <vgupta at synopsys.com>
Cc: James Hogan <jhogan at kernel.org>
Cc: Paul Burton <paul.burton at mips.com>
Cc: Ralf Baechle <ralf at linux-mips.org>
Cc: Kirill A. Shutemov <kirill at shutemov.name>
Cc: Gerald Schaefer <gerald.schaefer at de.ibm.com>
Cc: Christophe Leroy <christophe.leroy at c-s.fr>
Cc: David Rientjes <rientjes at google.com>
Cc: Andrea Arcangeli <aarcange at redhat.com>
Cc: Oscar Salvador <osalvador at suse.de>
Cc: Mel Gorman <mgorman at techsingularity.net>
Cc: linux-snps-arc at lists.infradead.org
Cc: linux-mips at vger.kernel.org
Cc: linux-arm-kernel at lists.infradead.org
Cc: linux-ia64 at vger.kernel.org
Cc: linuxppc-dev at lists.ozlabs.org
Cc: linux-s390 at vger.kernel.org
Cc: linux-sh at vger.kernel.org
Cc: sparclinux at vger.kernel.org
Cc: x86 at kernel.org
Cc: linux-kernel at vger.kernel.org
Signed-off-by: Anshuman Khandual <anshuman.khandual at arm.com>
---
 include/linux/gfp.h |  3 ++
 mm/hugetlb.c        | 76 +----------------------------------
 mm/page_alloc.c     | 98 +++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 102 insertions(+), 75 deletions(-)

diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index fb07b503dc45..379ad23437d1 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -589,6 +589,9 @@ static inline bool pm_suspended_storage(void)
 /* The below functions must be run on a range from a single zone. */
 extern int alloc_contig_range(unsigned long start, unsigned long end,
 			      unsigned migratetype, gfp_t gfp_mask);
+extern struct page *alloc_gigantic_page_order(unsigned int order,
+					      gfp_t gfp_mask, int nid,
+					      nodemask_t *nodemask);
 #endif
 void free_contig_range(unsigned long pfn, unsigned int nr_pages);
 
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 977f9a323a7a..d199556a4a2c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1023,86 +1023,12 @@ static void free_gigantic_page(struct page *page, unsigned int order)
 }
 
 #ifdef CONFIG_CONTIG_ALLOC
-static int __alloc_gigantic_page(unsigned long start_pfn,
-				unsigned long nr_pages, gfp_t gfp_mask)
-{
-	unsigned long end_pfn = start_pfn + nr_pages;
-	return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
-				  gfp_mask);
-}
-
-static bool pfn_range_valid_gigantic(struct zone *z,
-			unsigned long start_pfn, unsigned long nr_pages)
-{
-	unsigned long i, end_pfn = start_pfn + nr_pages;
-	struct page *page;
-
-	for (i = start_pfn; i < end_pfn; i++) {
-		if (!pfn_valid(i))
-			return false;
-
-		page = pfn_to_page(i);
-
-		if (page_zone(page) != z)
-			return false;
-
-		if (PageReserved(page))
-			return false;
-
-		if (page_count(page) > 0)
-			return false;
-
-		if (PageHuge(page))
-			return false;
-	}
-
-	return true;
-}
-
-static bool zone_spans_last_pfn(const struct zone *zone,
-			unsigned long start_pfn, unsigned long nr_pages)
-{
-	unsigned long last_pfn = start_pfn + nr_pages - 1;
-	return zone_spans_pfn(zone, last_pfn);
-}
-
 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
 		int nid, nodemask_t *nodemask)
 {
 	unsigned int order = huge_page_order(h);
-	unsigned long nr_pages = 1 << order;
-	unsigned long ret, pfn, flags;
-	struct zonelist *zonelist;
-	struct zone *zone;
-	struct zoneref *z;
-
-	zonelist = node_zonelist(nid, gfp_mask);
-	for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nodemask) {
-		spin_lock_irqsave(&zone->lock, flags);
 
-		pfn = ALIGN(zone->zone_start_pfn, nr_pages);
-		while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
-			if (pfn_range_valid_gigantic(zone, pfn, nr_pages)) {
-				/*
-				 * We release the zone lock here because
-				 * alloc_contig_range() will also lock the zone
-				 * at some point. If there's an allocation
-				 * spinning on this lock, it may win the race
-				 * and cause alloc_contig_range() to fail...
-				 */
-				spin_unlock_irqrestore(&zone->lock, flags);
-				ret = __alloc_gigantic_page(pfn, nr_pages, gfp_mask);
-				if (!ret)
-					return pfn_to_page(pfn);
-				spin_lock_irqsave(&zone->lock, flags);
-			}
-			pfn += nr_pages;
-		}
-
-		spin_unlock_irqrestore(&zone->lock, flags);
-	}
-
-	return NULL;
+	return alloc_gigantic_page_order(order, gfp_mask, nid, nodemask);
 }
 
 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6ab8eb670fd3..0f67367213c6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -8497,6 +8497,104 @@ int alloc_contig_range(unsigned long start, unsigned long end,
 				pfn_max_align_up(end), migratetype);
 	return ret;
 }
+
+static int __alloc_gigantic_page(unsigned long start_pfn,
+				 unsigned long nr_pages, gfp_t gfp_mask)
+{
+	unsigned long end_pfn = start_pfn + nr_pages;
+
+	return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
+				  gfp_mask);
+}
+
+static bool pfn_range_valid_gigantic(struct zone *z, unsigned long start_pfn,
+				     unsigned long nr_pages)
+{
+	unsigned long i, end_pfn = start_pfn + nr_pages;
+	struct page *page;
+
+	for (i = start_pfn; i < end_pfn; i++) {
+		if (!pfn_valid(i))
+			return false;
+
+		page = pfn_to_page(i);
+
+		if (page_zone(page) != z)
+			return false;
+
+		if (PageReserved(page))
+			return false;
+
+		if (page_count(page) > 0)
+			return false;
+
+		if (PageHuge(page))
+			return false;
+	}
+	return true;
+}
+
+static bool zone_spans_last_pfn(const struct zone *zone,
+				unsigned long start_pfn, unsigned long nr_pages)
+{
+	unsigned long last_pfn = start_pfn + nr_pages - 1;
+
+	return zone_spans_pfn(zone, last_pfn);
+}
+
+/**
+ * alloc_gigantic_page_order() -- tries to allocate given order of pages
+ * @order:	allocation order (greater than MAX_ORDER)
+ * @gfp_mask:	GFP mask to use during compaction
+ * @nid:	allocation node
+ * @nodemask:	allocation nodemask
+ *
+ * This routine is an wrapper around alloc_contig_range() which scans over
+ * all zones on an applicable zonelist to find a contiguous pfn range which
+ * can the be allocated with alloc_contig_range(). This routine is intended
+ * to be used for allocations greater than MAX_ORDER.
+ *
+ * Return: page on success or NULL on failure. On success a memory block
+ * of 'order' starting with 'page' has been allocated successfully. Memory
+ * allocated here needs to be freed with free_contig_range().
+ */
+struct page *alloc_gigantic_page_order(unsigned int order, gfp_t gfp_mask,
+				       int nid, nodemask_t *nodemask)
+{
+	unsigned long nr_pages = 1 << order;
+	unsigned long ret, pfn, flags;
+	struct zonelist *zonelist;
+	struct zone *zone;
+	struct zoneref *z;
+
+	zonelist = node_zonelist(nid, gfp_mask);
+	for_each_zone_zonelist_nodemask(zone, z, zonelist,
+					gfp_zone(gfp_mask), nodemask) {
+		spin_lock_irqsave(&zone->lock, flags);
+
+		pfn = ALIGN(zone->zone_start_pfn, nr_pages);
+		while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
+			if (pfn_range_valid_gigantic(zone, pfn, nr_pages)) {
+				/*
+				 * We release the zone lock here because
+				 * alloc_contig_range() will also lock the zone
+				 * at some point. If there's an allocation
+				 * spinning on this lock, it may win the race
+				 * and cause alloc_contig_range() to fail...
+				 */
+				spin_unlock_irqrestore(&zone->lock, flags);
+				ret = __alloc_gigantic_page(pfn, nr_pages,
+							gfp_mask);
+				if (!ret)
+					return pfn_to_page(pfn);
+				spin_lock_irqsave(&zone->lock, flags);
+			}
+			pfn += nr_pages;
+		}
+		spin_unlock_irqrestore(&zone->lock, flags);
+	}
+	return NULL;
+}
 #endif /* CONFIG_CONTIG_ALLOC */
 
 void free_contig_range(unsigned long pfn, unsigned int nr_pages)
-- 
2.20.1



More information about the Linuxppc-dev mailing list