[PATCH 3/5] powerpc: Always panic if lmb_alloc() fails

Michael Ellerman michael at ellerman.id.au
Wed Jan 25 19:31:28 EST 2006


Currently most callers of lmb_alloc() don't check if it worked or not, if it
ever does weird bad things will probably happen. The few callers who do check
just panic or BUG_ON.

So make lmb_alloc() panic internally, to catch bugs at the source. The few
callers who did check the result no longer need to.

The only caller that did anything interesting with the return result was
careful_allocation(). For it we create __lmb_alloc_base() which _doesn't_ panic
automatically, a little messy, but passable.

Signed-off-by: Michael Ellerman <michael at ellerman.id.au>
---

 arch/powerpc/kernel/prom.c       |    4 ----
 arch/powerpc/mm/hash_utils_64.c  |    1 -
 arch/powerpc/mm/lmb.c            |   14 ++++++++++++++
 arch/powerpc/mm/mem.c            |    1 -
 arch/powerpc/mm/numa.c           |    4 ++--
 arch/powerpc/mm/stab.c           |    4 ----
 arch/powerpc/sysdev/dart_iommu.c |    2 --
 include/asm-powerpc/lmb.h        |    2 ++
 8 files changed, 18 insertions(+), 14 deletions(-)

Index: linux/arch/powerpc/mm/lmb.c
===================================================================
--- linux.orig/arch/powerpc/mm/lmb.c
+++ linux/arch/powerpc/mm/lmb.c
@@ -226,6 +226,20 @@ unsigned long __init lmb_alloc(unsigned 
 unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align,
 				    unsigned long max_addr)
 {
+	unsigned long alloc;
+
+	alloc = __lmb_alloc_base(size, align, max_addr);
+
+	if (alloc < 0)
+		panic("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
+				size, max_addr);
+
+	return alloc;
+}
+
+unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align,
+				    unsigned long max_addr)
+{
 	long i, j;
 	unsigned long base = 0;
 
Index: linux/arch/powerpc/kernel/prom.c
===================================================================
--- linux.orig/arch/powerpc/kernel/prom.c
+++ linux/arch/powerpc/kernel/prom.c
@@ -831,10 +831,6 @@ void __init unflatten_device_tree(void)
 
 	/* Allocate memory for the expanded device tree */
 	mem = lmb_alloc(size + 4, __alignof__(struct device_node));
-	if (!mem) {
-		DBG("Couldn't allocate memory with lmb_alloc()!\n");
-		panic("Couldn't allocate memory with lmb_alloc()!\n");
-	}
 	mem = (unsigned long) __va(mem);
 
 	((u32 *)mem)[size / 4] = 0xdeadbeef;
Index: linux/arch/powerpc/mm/hash_utils_64.c
===================================================================
--- linux.orig/arch/powerpc/mm/hash_utils_64.c
+++ linux/arch/powerpc/mm/hash_utils_64.c
@@ -430,7 +430,6 @@ void __init htab_initialize(void)
 		 * the absolute address space.
 		 */
 		table = lmb_alloc(htab_size_bytes, htab_size_bytes);
-		BUG_ON(table == 0);
 
 		DBG("Hash table allocated at %lx, size: %lx\n", table,
 		    htab_size_bytes);
Index: linux/arch/powerpc/mm/mem.c
===================================================================
--- linux.orig/arch/powerpc/mm/mem.c
+++ linux/arch/powerpc/mm/mem.c
@@ -249,7 +249,6 @@ void __init do_init_bootmem(void)
 	bootmap_pages = bootmem_bootmap_pages(total_pages);
 
 	start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
-	BUG_ON(!start);
 
 	boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
 
Index: linux/arch/powerpc/mm/numa.c
===================================================================
--- linux.orig/arch/powerpc/mm/numa.c
+++ linux/arch/powerpc/mm/numa.c
@@ -570,11 +570,11 @@ static void __init *careful_allocation(i
 				       unsigned long end_pfn)
 {
 	int new_nid;
-	unsigned long ret = lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
+	unsigned long ret = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
 
 	/* retry over all memory */
 	if (!ret)
-		ret = lmb_alloc_base(size, align, lmb_end_of_DRAM());
+		ret = __lmb_alloc_base(size, align, lmb_end_of_DRAM());
 
 	if (!ret)
 		panic("numa.c: cannot allocate %lu bytes on node %d",
Index: linux/arch/powerpc/mm/stab.c
===================================================================
--- linux.orig/arch/powerpc/mm/stab.c
+++ linux/arch/powerpc/mm/stab.c
@@ -247,10 +247,6 @@ void stabs_alloc(void)
 
 		newstab = lmb_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE,
 					 1<<SID_SHIFT);
-		if (! newstab)
-			panic("Unable to allocate segment table for CPU %d.\n",
-			      cpu);
-
 		newstab = (unsigned long)__va(newstab);
 
 		memset((void *)newstab, 0, HW_PAGE_SIZE);
Index: linux/arch/powerpc/sysdev/dart_iommu.c
===================================================================
--- linux.orig/arch/powerpc/sysdev/dart_iommu.c
+++ linux/arch/powerpc/sysdev/dart_iommu.c
@@ -194,8 +194,6 @@ static int dart_init(struct device_node 
 	 * prefetching into invalid pages and corrupting data
 	 */
 	tmp = lmb_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE);
-	if (!tmp)
-		panic("DART: Cannot allocate spare page!");
 	dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) &
 					 DARTMAP_RPNMASK);
 
Index: linux/include/asm-powerpc/lmb.h
===================================================================
--- linux.orig/include/asm-powerpc/lmb.h
+++ linux/include/asm-powerpc/lmb.h
@@ -48,6 +48,8 @@ extern long __init lmb_reserve(unsigned 
 extern unsigned long __init lmb_alloc(unsigned long, unsigned long);
 extern unsigned long __init lmb_alloc_base(unsigned long, unsigned long,
 					   unsigned long);
+extern unsigned long __init __lmb_alloc_base(unsigned long, unsigned long,
+					   unsigned long);
 extern unsigned long __init lmb_phys_mem_size(void);
 extern unsigned long __init lmb_end_of_DRAM(void);
 extern unsigned long __init lmb_abs_to_phys(unsigned long);



More information about the Linuxppc64-dev mailing list