[PATCH] [POWERPC] ppc405 Fix arithmatic rollover bug when memory size under 16M

Grant Likely grant.likely at secretlab.ca
Wed Oct 31 17:41:20 EST 2007


From: Grant Likely <grant.likely at secretlab.ca>

mmu_mapin_ram() loops over total_lowmem to setup page tables.  However, if
total_lowmem is less that 16M, the subtraction rolls over and results in
a number just under 4G (because total_lowmem is an unsigned value).

This patch rejigs the loop from countup to countdown to eliminate the
bug.

Special thanks to Magnus Hjorth who wrote the original patch to fix this
bug.  This patch improves on his by making the loop code simpler (which
also eliminates the possibility of another rollover at the high end)
and also applies the change to arch/powerpc.

Signed-off-by: Grant Likely <grant.likely at secretlab.ca>
---

Josh, I've tested this change on arch/ppc, but not arch/powerpc.  The
ppc and powerpc code are darn near identical so it should be correct,
but you might want to boot a kernel before you push it to Paulus
anyway.

Cheers,
g.

 arch/powerpc/mm/40x_mmu.c |   17 ++++++++---------
 arch/ppc/mm/4xx_mmu.c     |   17 ++++++++---------
 2 files changed, 16 insertions(+), 18 deletions(-)

diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c
index e067df8..3899ea9 100644
--- a/arch/powerpc/mm/40x_mmu.c
+++ b/arch/powerpc/mm/40x_mmu.c
@@ -98,13 +98,12 @@ unsigned long __init mmu_mapin_ram(void)
 
 	v = KERNELBASE;
 	p = PPC_MEMSTART;
-	s = 0;
+	s = total_lowmem;
 
-	if (__map_without_ltlbs) {
-		return s;
-	}
+	if (__map_without_ltlbs)
+		return 0;
 
-	while (s <= (total_lowmem - LARGE_PAGE_SIZE_16M)) {
+	while (s >= LARGE_PAGE_SIZE_16M) {
 		pmd_t *pmdp;
 		unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE;
 
@@ -116,10 +115,10 @@ unsigned long __init mmu_mapin_ram(void)
 
 		v += LARGE_PAGE_SIZE_16M;
 		p += LARGE_PAGE_SIZE_16M;
-		s += LARGE_PAGE_SIZE_16M;
+		s -= LARGE_PAGE_SIZE_16M;
 	}
 
-	while (s <= (total_lowmem - LARGE_PAGE_SIZE_4M)) {
+	while (s >= LARGE_PAGE_SIZE_4M) {
 		pmd_t *pmdp;
 		unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE;
 
@@ -128,8 +127,8 @@ unsigned long __init mmu_mapin_ram(void)
 
 		v += LARGE_PAGE_SIZE_4M;
 		p += LARGE_PAGE_SIZE_4M;
-		s += LARGE_PAGE_SIZE_4M;
+		s -= LARGE_PAGE_SIZE_4M;
 	}
 
-	return s;
+	return total_lowmem - s;
 }
diff --git a/arch/ppc/mm/4xx_mmu.c b/arch/ppc/mm/4xx_mmu.c
index 838e09d..ea785db 100644
--- a/arch/ppc/mm/4xx_mmu.c
+++ b/arch/ppc/mm/4xx_mmu.c
@@ -99,13 +99,12 @@ unsigned long __init mmu_mapin_ram(void)
 
 	v = KERNELBASE;
 	p = PPC_MEMSTART;
-	s = 0;
+	s = total_lowmem;
 
-	if (__map_without_ltlbs) {
-		return s;
-	}
+	if (__map_without_ltlbs)
+		return 0;
 
-	while (s <= (total_lowmem - LARGE_PAGE_SIZE_16M)) {
+	while (s >= LARGE_PAGE_SIZE_16M) {
 		pmd_t *pmdp;
 		unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE;
 
@@ -117,10 +116,10 @@ unsigned long __init mmu_mapin_ram(void)
 
 		v += LARGE_PAGE_SIZE_16M;
 		p += LARGE_PAGE_SIZE_16M;
-		s += LARGE_PAGE_SIZE_16M;
+		s -= LARGE_PAGE_SIZE_16M;
 	}
 
-	while (s <= (total_lowmem - LARGE_PAGE_SIZE_4M)) {
+	while (s >= LARGE_PAGE_SIZE_4M) {
 		pmd_t *pmdp;
 		unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE;
 
@@ -129,8 +128,8 @@ unsigned long __init mmu_mapin_ram(void)
 
 		v += LARGE_PAGE_SIZE_4M;
 		p += LARGE_PAGE_SIZE_4M;
-		s += LARGE_PAGE_SIZE_4M;
+		s -= LARGE_PAGE_SIZE_4M;
 	}
 
-	return s;
+	return total_lowmem - s;
 }




More information about the Linuxppc-dev mailing list