[PATCH 2/2] Cleanup loops using lmb regions

Gustavo Sverzut Barbieri gsbarb at br.ibm.com
Thu Sep 29 04:25:20 EST 2005


This is the major change, it changes loops using lmb.region to use the new 
macros:

	for_each_lmb_region()
	for_each_lmb_region_reversed()

As Dave already told me in private, although this is a cleanup, it adds code. 
I couldn't avoid it since some places like lmb_coalesce_regions() start 
counting from position != 0.

Any ideas are welcome.

PS: It was only tested on OpenPower 720.

-- 
Gustavo Sverzut Barbieri
------------------------
LTC - Brazil
-------------- next part --------------
Subject: [PATCH 2/2] Cleanup loops using lmb regions

---

 arch/ppc64/kernel/lmb.c    |  154 ++++++++++++++++++++++++++------------------
 arch/ppc64/mm/hash_utils.c |    9 +--
 arch/ppc64/mm/init.c       |   63 +++++++-----------
 arch/ppc64/mm/numa.c       |    8 +-
 include/asm-ppc64/lmb.h    |   33 +++++++--
 5 files changed, 148 insertions(+), 119 deletions(-)

97f36e75ea988eaa6d4b6cfdaf568799fdcf2672
diff --git a/arch/ppc64/kernel/lmb.c b/arch/ppc64/kernel/lmb.c
--- a/arch/ppc64/kernel/lmb.c
+++ b/arch/ppc64/kernel/lmb.c
@@ -22,34 +22,45 @@
 
 struct lmb lmb;
 
+static inline int
+lmb_region_valid(struct lmb_region *region, struct lmb_property *property)
+{
+	return ((property >= region->region) &&
+		(lmb_region_index(region, property) < region->cnt));
+}
+
+
 #undef DEBUG
 
 void lmb_dump_all(void)
 {
 #ifdef DEBUG
-	unsigned long i;
+	struct lmb_property *prop;
 
 	udbg_printf("lmb_dump_all:\n");
 	udbg_printf("    memory.cnt		  = 0x%lx\n",
 		    lmb.memory.cnt);
 	udbg_printf("    memory.size		  = 0x%lx\n",
 		    lmb.memory.size);
-	for (i=0; i < lmb.memory.cnt ;i++) {
+
+	for_each_lmb_region(lmb.memory, prop) {
+		unsigned long i = lmb_region_index(&(lmb.memory), prop);
 		udbg_printf("    memory.region[0x%x].base       = 0x%lx\n",
-			    i, lmb.memory.region[i].base);
+			    i, prop->base);
 		udbg_printf("		      .size     = 0x%lx\n",
-			    lmb.memory.region[i].size);
+			    prop->size);
 	}
 
 	udbg_printf("\n    reserved.cnt	  = 0x%lx\n",
 		    lmb.reserved.cnt);
 	udbg_printf("    reserved.size	  = 0x%lx\n",
 		    lmb.reserved.size);
-	for (i=0; i < lmb.reserved.cnt ;i++) {
+	for_each_lmb_region(lmb.reserved, prop) {
+		unsigned long i = lmb_region_index(&(lmb.reserved), prop);
 		udbg_printf("    reserved.region[0x%x].base       = 0x%lx\n",
-			    i, lmb.reserved.region[i].base);
+			    i, prop->base);
 		udbg_printf("		      .size     = 0x%lx\n",
-			    lmb.reserved.region[i].size);
+			    prop->size);
 	}
 #endif /* DEBUG */
 }
@@ -88,12 +99,24 @@ lmb_regions_adjacent(struct lmb_region *
 static void __init
 lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
 {
-	unsigned long i;
-
-	rgn->region[r1].size += rgn->region[r2].size;
-	for (i=r2; i < rgn->cnt-1; i++) {
-		rgn->region[i].base = rgn->region[i+1].base;
-		rgn->region[i].size = rgn->region[i+1].size;
+	struct lmb_property *prop;
+	struct lmb_property *p1 = &rgn->region[r1];
+	struct lmb_property *p2 = &rgn->region[r2];
+
+	BUG_ON(p1->base >= p2->base);
+
+	p1->size += p2->size;
+
+	for_each_lmb_region(*rgn, prop) {
+		if (prop < p2)
+			continue; /* start action just from r2 to the end */
+		else {
+			struct lmb_property *next_prop = prop + 1;
+			if (lmb_region_valid(rgn, next_prop)) {
+				prop->base = next_prop->base;
+				prop->size = next_prop->size;
+			}
+		}
 	}
 	rgn->cnt--;
 }
@@ -119,12 +142,12 @@ lmb_init(void)
 void __init
 lmb_analyze(void)
 {
-	int i;
+	struct lmb_property *prop;
 
 	lmb.memory.size = 0;
 
-	for (i = 0; i < lmb.memory.cnt; i++)
-		lmb.memory.size += lmb.memory.region[i].size;
+	for_each_lmb_region(lmb.memory, prop)
+		lmb.memory.size += prop->size;
 }
 
 /* This routine called with relocation disabled. */
@@ -133,26 +156,26 @@ lmb_add_region(struct lmb_region *rgn, u
 {
 	unsigned long i, coalesced = 0;
 	long adjacent;
+	struct lmb_property *prop;
 
 	/* First try and coalesce this LMB with another. */
-	for (i=0; i < rgn->cnt; i++) {
-		unsigned long rgnbase = rgn->region[i].base;
-		unsigned long rgnsize = rgn->region[i].size;
-
-		adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize);
+	for_each_lmb_region(*rgn, prop) {
+		adjacent = lmb_addrs_adjacent(base, size,
+					      prop->base, prop->size);
 		if ( adjacent > 0 ) {
-			rgn->region[i].base -= size;
-			rgn->region[i].size += size;
+			prop->base -= size;
+			prop->size += size;
 			coalesced++;
 			break;
 		}
 		else if ( adjacent < 0 ) {
-			rgn->region[i].size += size;
+			prop->size += size;
 			coalesced++;
 			break;
 		}
 	}
 
+	i = lmb_region_index(rgn, prop);
 	if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) {
 		lmb_coalesce_regions(rgn, i, i+1);
 		coalesced++;
@@ -165,13 +188,14 @@ lmb_add_region(struct lmb_region *rgn, u
 	}
 
 	/* Couldn't coalesce the LMB, so add it to the sorted table. */
-	for (i=rgn->cnt-1; i >= 0; i--) {
-		if (base < rgn->region[i].base) {
-			rgn->region[i+1].base = rgn->region[i].base;
-			rgn->region[i+1].size = rgn->region[i].size;
+	for_each_lmb_region_reversed(*rgn, prop) {
+		struct lmb_property *next_prop = prop + 1;
+		if (base < prop->base) {
+			next_prop->base = prop->base;
+			next_prop->size = prop->size;
 		}  else {
-			rgn->region[i+1].base = base;
-			rgn->region[i+1].size = size;
+			next_prop->base = base;
+			next_prop->size = size;
 			break;
 		}
 	}
@@ -205,17 +229,14 @@ lmb_reserve(unsigned long base, unsigned
 long __init
 lmb_overlaps_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
 {
-	unsigned long i;
+	struct lmb_property *prop;
 
-	for (i=0; i < rgn->cnt; i++) {
-		unsigned long rgnbase = rgn->region[i].base;
-		unsigned long rgnsize = rgn->region[i].size;
-		if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) {
-			break;
-		}
+	for_each_lmb_region(*rgn, prop) {
+		if (lmb_addrs_overlap(base, size, prop->base, prop->size))
+			return lmb_region_index(rgn, prop);
 	}
 
-	return (i < rgn->cnt) ? i : -1;
+	return -1;
 }
 
 unsigned long __init
@@ -227,35 +248,39 @@ lmb_alloc(unsigned long size, unsigned l
 unsigned long __init
 lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr)
 {
-	long i, j;
 	unsigned long base = 0;
+	struct lmb_property *prop;
 
-	for (i=lmb.memory.cnt-1; i >= 0; i--) {
-		unsigned long lmbbase = lmb.memory.region[i].base;
-		unsigned long lmbsize = lmb.memory.region[i].size;
+	for_each_lmb_region_reversed(lmb.memory, prop) {
+		unsigned long addr;
+		unsigned long end = prop->base + prop->size;
 
 		if ( max_addr == LMB_ALLOC_ANYWHERE )
-			base = _ALIGN_DOWN(lmbbase+lmbsize-size, align);
-		else if ( lmbbase < max_addr )
-			base = _ALIGN_DOWN(min(lmbbase+lmbsize,max_addr)-size, align);
+			addr = end;
+		else if (prop->base < max_addr)
+			addr = min(end, max_addr);
 		else
 			continue;
 
-		while ( (lmbbase <= base) &&
-			((j = lmb_overlaps_region(&lmb.reserved,base,size)) >= 0) ) {
-			base = _ALIGN_DOWN(lmb.reserved.region[j].base-size, align);
-		}
+		base = _ALIGN_DOWN(addr - size, align);
 
-		if ( (base != 0) && (lmbbase <= base) )
-			break;
-	}
+		while (base >= prop->base) {
+			long j =lmb_overlaps_region(&lmb.reserved, base, size);
 
-	if ( i < 0 )
-		return 0;
+			if (j < 0)
+				break;
+
+			addr = lmb.reserved.region[j].base;
+			base = _ALIGN_DOWN(addr - size, align);
+		}
 
-	lmb_add_region(&lmb.reserved, base, size);
+		if ((base != 0) && (base >= prop->base)) {
+			lmb_add_region(&lmb.reserved, base, size);
+			return base;
+		}
+	}
 
-	return base;
+	return 0;
 }
 
 /* You must call lmb_analyze() before this. */
@@ -280,20 +305,21 @@ lmb_end_of_DRAM(void)
 void __init lmb_enforce_memory_limit(void)
 {
 	extern unsigned long memory_limit;
-	unsigned long i, limit;
+	unsigned long limit;
+	struct lmb_property *prop;
 
 	if (! memory_limit)
 		return;
 
 	limit = memory_limit;
-	for (i = 0; i < lmb.memory.cnt; i++) {
-		if (limit > lmb.memory.region[i].size) {
-			limit -= lmb.memory.region[i].size;
-			continue;
+	for_each_lmb_region(lmb.memory, prop) {
+		if (limit <= prop->size) {
+			long i = lmb_region_index(&(lmb.memory), prop);
+			prop->size = limit;
+			lmb.memory.cnt = i + 1;
+			break;
 		}
 
-		lmb.memory.region[i].size = limit;
-		lmb.memory.cnt = i + 1;
-		break;
+		limit -= prop->size;
 	}
 }
diff --git a/arch/ppc64/mm/hash_utils.c b/arch/ppc64/mm/hash_utils.c
--- a/arch/ppc64/mm/hash_utils.c
+++ b/arch/ppc64/mm/hash_utils.c
@@ -152,9 +152,10 @@ void __init htab_initialize(void)
 	unsigned long table, htab_size_bytes;
 	unsigned long pteg_count;
 	unsigned long mode_rw;
-	int i, use_largepages = 0;
+	int use_largepages = 0;
 	unsigned long base = 0, size = 0;
 	extern unsigned long tce_alloc_start, tce_alloc_end;
+	struct lmb_property *prop;
 
 	DBG(" -> htab_initialize()\n");
 
@@ -209,9 +210,9 @@ void __init htab_initialize(void)
 		use_largepages = 1;
 
 	/* create bolted the linear mapping in the hash table */
-	for (i=0; i < lmb.memory.cnt; i++) {
-		base = lmb.memory.region[i].base + KERNELBASE;
-		size = lmb.memory.region[i].size;
+	for_each_lmb_region(lmb.memory, prop) {
+		base = prop->base + KERNELBASE;
+		size = prop->size;
 
 		DBG("creating mapping for region: %lx : %lx\n", base, size);
 
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c
--- a/arch/ppc64/mm/init.c
+++ b/arch/ppc64/mm/init.c
@@ -462,7 +462,7 @@ void destroy_context(struct mm_struct *m
 void __init mm_init_ppc64(void)
 {
 #ifndef CONFIG_PPC_ISERIES
-	unsigned long i;
+	struct lmb_property *prop, *prev_prop=NULL;
 #endif
 
 	ppc64_boot_msg(0x100, "MM Init");
@@ -480,17 +480,16 @@ void __init mm_init_ppc64(void)
 	 */
 
 #ifndef CONFIG_PPC_ISERIES
-	for (i = 1; i < lmb.memory.cnt; i++) {
-		unsigned long base, prevbase, prevsize;
-
-		prevbase = lmb.memory.region[i-1].base;
-		prevsize = lmb.memory.region[i-1].size;
-		base = lmb.memory.region[i].base;
-		if (base > (prevbase + prevsize)) {
-			io_hole_start = prevbase + prevsize;
-			io_hole_size = base  - (prevbase + prevsize);
-			break;
+	for_each_lmb_region(lmb.memory, prop) {
+		if (prev_prop != NULL) {
+			unsigned long end = prev_prop->base + prev_prop->size;
+			if (prop->base > end) {
+				io_hole_start = end;
+				io_hole_size = prop->base - io_hole_start;
+				break;
+			}
 		}
+		prev_prop = prop;
 	}
 #endif /* CONFIG_PPC_ISERIES */
 	if (io_hole_start)
@@ -506,18 +505,13 @@ void __init mm_init_ppc64(void)
  */
 int page_is_ram(unsigned long pfn)
 {
-	int i;
 	unsigned long paddr = (pfn << PAGE_SHIFT);
+	struct lmb_property *prop;
 
-	for (i=0; i < lmb.memory.cnt; i++) {
-		unsigned long base;
-
-		base = lmb.memory.region[i].base;
-
-		if ((paddr >= base) &&
-			(paddr < (base + lmb.memory.region[i].size))) {
+	for_each_lmb_region(lmb.memory, prop) {
+		if ((paddr >= prop->base) &&
+		    (paddr < (prop->base + prop->size)))
 			return 1;
-		}
 	}
 
 	return 0;
@@ -531,10 +525,10 @@ EXPORT_SYMBOL(page_is_ram);
 #ifndef CONFIG_NEED_MULTIPLE_NODES
 void __init do_init_bootmem(void)
 {
-	unsigned long i;
 	unsigned long start, bootmap_pages;
 	unsigned long total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
 	int boot_mapsize;
+	struct lmb_property *prop;
 
 	/*
 	 * Find an area to use for the bootmem bitmap.  Calculate the size of
@@ -553,18 +547,15 @@ void __init do_init_bootmem(void)
 	/* Add all physical memory to the bootmem map, mark each area
 	 * present.
 	 */
-	for (i=0; i < lmb.memory.cnt; i++)
-		free_bootmem(lmb.memory.region[i].base,
-			     lmb_size_bytes(&lmb.memory, i));
+	for_each_lmb_region(lmb.memory, prop)
+		free_bootmem(prop->base, lmb_size_bytes(prop));
 
 	/* reserve the sections we're already using */
-	for (i=0; i < lmb.reserved.cnt; i++)
-		reserve_bootmem(lmb.reserved.region[i].base,
-				lmb_size_bytes(&lmb.reserved, i));
-
-	for (i=0; i < lmb.memory.cnt; i++)
-		memory_present(0, lmb_start_pfn(&lmb.memory, i),
-			       lmb_end_pfn(&lmb.memory, i));
+	for_each_lmb_region(lmb.reserved, prop)
+		reserve_bootmem(prop->base, lmb_size_bytes(prop));
+
+	for_each_lmb_region(lmb.memory, prop)
+		memory_present(0, lmb_start_pfn(prop), lmb_end_pfn(prop));
 }
 
 /*
@@ -599,21 +590,17 @@ static struct kcore_list kcore_vmem;
 
 static int __init setup_kcore(void)
 {
-	int i;
+	struct lmb_property *prop;
 
-	for (i=0; i < lmb.memory.cnt; i++) {
-		unsigned long base, size;
+	for_each_lmb_region(lmb.memory, prop) {
 		struct kcore_list *kcore_mem;
 
-		base = lmb.memory.region[i].base;
-		size = lmb.memory.region[i].size;
-
 		/* GFP_ATOMIC to avoid might_sleep warnings during boot */
 		kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
 		if (!kcore_mem)
 			panic("mem_init: kmalloc failed\n");
 
-		kclist_add(kcore_mem, __va(base), size);
+		kclist_add(kcore_mem, __va(prop->base), prop->size);
 	}
 
 	kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
diff --git a/arch/ppc64/mm/numa.c b/arch/ppc64/mm/numa.c
--- a/arch/ppc64/mm/numa.c
+++ b/arch/ppc64/mm/numa.c
@@ -581,9 +581,9 @@ void __init do_init_bootmem(void)
 
 	for_each_online_node(nid) {
 		unsigned long start_paddr, end_paddr;
-		int i;
 		unsigned long bootmem_paddr;
 		unsigned long bootmap_pages;
+		struct lmb_property *prop;
 
 		start_paddr = init_node_data[nid].node_start_pfn * PAGE_SIZE;
 		end_paddr = init_node_data[nid].node_end_pfn * PAGE_SIZE;
@@ -667,9 +667,9 @@ new_range:
 		/*
 		 * Mark reserved regions on this node
 		 */
-		for (i = 0; i < lmb.reserved.cnt; i++) {
-			unsigned long physbase = lmb.reserved.region[i].base;
-			unsigned long size = lmb.reserved.region[i].size;
+		for_each_lmb_region(lmb.reserved, prop) {
+			unsigned long physbase = prop->base;
+			unsigned long size = prop->size;
 
 			if (pa_to_nid(physbase) != nid &&
 			    pa_to_nid(physbase+size-1) != nid)
diff --git a/include/asm-ppc64/lmb.h b/include/asm-ppc64/lmb.h
--- a/include/asm-ppc64/lmb.h
+++ b/include/asm-ppc64/lmb.h
@@ -57,25 +57,40 @@ extern void lmb_dump_all(void);
 extern unsigned long io_hole_start;
 
 static inline unsigned long
-lmb_size_bytes(struct lmb_region *type, unsigned long region_nr)
+lmb_size_bytes(struct lmb_property *prop)
 {
-	return type->region[region_nr].size;
+	return prop->size;
 }
 static inline unsigned long
-lmb_size_pages(struct lmb_region *type, unsigned long region_nr)
+lmb_size_pages(struct lmb_property *prop)
 {
-	return lmb_size_bytes(type, region_nr) >> PAGE_SHIFT;
+	return lmb_size_bytes(prop) >> PAGE_SHIFT;
 }
 static inline unsigned long
-lmb_start_pfn(struct lmb_region *type, unsigned long region_nr)
+lmb_start_pfn(struct lmb_property *prop)
 {
-	return type->region[region_nr].base >> PAGE_SHIFT;
+	return prop->base >> PAGE_SHIFT;
 }
 static inline unsigned long
-lmb_end_pfn(struct lmb_region *type, unsigned long region_nr)
+lmb_end_pfn(struct lmb_property *prop)
 {
-	return lmb_start_pfn(type, region_nr) +
-	       lmb_size_pages(type, region_nr);
+	return lmb_start_pfn(prop) + lmb_size_pages(prop);
 }
 
+static inline long
+lmb_region_index(struct lmb_region *region, struct lmb_property *prop)
+{
+	return (prop - region->region);
+}
+
+#define for_each_lmb_region(lmb_region, itr)                                  \
+       for ((itr) = (lmb_region).region;                                      \
+            (itr) < (lmb_region).region + (lmb_region).cnt;                   \
+            (itr)++)
+
+#define for_each_lmb_region_reversed(lmb_region, itr)                         \
+       for ((itr) = (lmb_region).region + ((lmb_region).cnt - 1);             \
+            (itr) >= (lmb_region).region;                                     \
+            (itr)--)
+
 #endif /* _PPC64_LMB_H */
-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 189 bytes
Desc: not available
Url : http://ozlabs.org/pipermail/linuxppc64-dev/attachments/20050928/be30904c/attachment.pgp 


More information about the Linuxppc64-dev mailing list