CONFIG_PAGEALLOC_DEBUG

Anton Blanchard anton at samba.org
Tue Feb 24 08:56:11 EST 2004


Hi,

Heres a first stab at CONFIG_PAGEALLOC_DEBUG. Its a useful debug feature
where you unmap unused pages, catching use after free bugs etc.

It only works on pseries SMP at the moment, we really need to rework how
we do it. The current updateboltedpp hooks arent good enough because
they only write protect but still allow reading.

At the moment I just turn off the valid bit and leave the entry there,
but that wont work on LPAR. I think we will have to remove the bolted
entry completely and reinsert it.

You might have to tune how the slab cache interacts (for maximum
coverage you pretty much want all allocations even small ones to end up
on their own page, and you dont want any of the slab caches to be
operating)

Anton

---

 foobar-anton/arch/ppc64/Kconfig               |    8 ++++++++
 foobar-anton/arch/ppc64/kernel/iSeries_htab.c |    7 ++++---
 foobar-anton/arch/ppc64/kernel/idle.c         |   11 +++++++++++
 foobar-anton/arch/ppc64/kernel/pSeries_htab.c |   25 +++++++++++--------------
 foobar-anton/arch/ppc64/kernel/pSeries_lpar.c |   11 ++++-------
 foobar-anton/arch/ppc64/mm/hash_utils.c       |   11 +++++++++++
 foobar-anton/arch/ppc64/mm/init.c             |    2 +-
 foobar-anton/include/asm-ppc64/cacheflush.h   |    5 +++++
 foobar-anton/include/asm-ppc64/cputable.h     |    7 +++++--
 foobar-anton/include/asm-ppc64/machdep.h      |    4 ++--
 mm/slab.c                                     |    0
 11 files changed, 62 insertions(+), 29 deletions(-)

diff -puN arch/ppc64/Kconfig~ppc64-config_pagealloc_debug arch/ppc64/Kconfig
--- foobar/arch/ppc64/Kconfig~ppc64-config_pagealloc_debug	2004-02-21 13:58:15.922539797 +1100
+++ foobar-anton/arch/ppc64/Kconfig	2004-02-21 13:58:15.996534209 +1100
@@ -401,6 +401,14 @@ config DEBUG_SPINLOCK_SLEEP
 	  If you say Y here, various routines which may sleep will become very
 	  noisy if they are called with a spinlock held.

+config DEBUG_PAGEALLOC
+	bool "Page alloc debugging"
+	depends on DEBUG_KERNEL
+	help
+	  Unmap pages from the kernel linear mapping after free_pages().
+	  This results in a large slowdown, but helps to find certain types
+	  of memory corruptions.
+
 endmenu

 source "security/Kconfig"
diff -puN arch/ppc64/kernel/iSeries_htab.c~ppc64-config_pagealloc_debug arch/ppc64/kernel/iSeries_htab.c
--- foobar/arch/ppc64/kernel/iSeries_htab.c~ppc64-config_pagealloc_debug	2004-02-21 13:58:15.928539344 +1100
+++ foobar-anton/arch/ppc64/kernel/iSeries_htab.c	2004-02-21 13:58:15.997534133 +1100
@@ -167,7 +167,7 @@ static long iSeries_hpte_find(unsigned l
  *
  * No need to lock here because we should be the only user.
  */
-static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
+static void iSeries_hpte_updatevalid(unsigned long valid, unsigned long ea)
 {
 	unsigned long vsid,va,vpn;
 	long slot;
@@ -176,8 +176,9 @@ static void iSeries_hpte_updateboltedpp(
 	va = (vsid << 28) | (ea & 0x0fffffff);
 	vpn = va >> PAGE_SHIFT;
 	slot = iSeries_hpte_find(vpn);
-	if (slot == -1)
-		panic("updateboltedpp: Could not find page to bolt\n");
+	BUG_ON(slot == -1);
+
+	/* XXX FIXME */
 	HvCallHpt_setPp(slot, newpp);
 }

diff -puN arch/ppc64/kernel/idle.c~ppc64-config_pagealloc_debug arch/ppc64/kernel/idle.c
--- foobar/arch/ppc64/kernel/idle.c~ppc64-config_pagealloc_debug	2004-02-21 13:58:15.934538891 +1100
+++ foobar-anton/arch/ppc64/kernel/idle.c	2004-02-21 14:49:51.761292296 +1100
@@ -132,6 +132,17 @@ int default_idle(void)
 {
 	long oldval;

+#if 0
+	struct page *tmp = alloc_pages(GFP_KERNEL, 0);
+	unsigned char *foo =  __va(page_to_pfn(tmp) << PAGE_SHIFT);
+	foo[0] = '1';
+	free_pages(foo, 0);
+
+	printk("use after free: %p\n", foo);
+	printk("%c\n", foo[0]);
+	printk("passed\n");
+#endif
+
 	while (1) {
 		oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);

diff -puN arch/ppc64/kernel/pSeries_htab.c~ppc64-config_pagealloc_debug arch/ppc64/kernel/pSeries_htab.c
--- foobar/arch/ppc64/kernel/pSeries_htab.c~ppc64-config_pagealloc_debug	2004-02-21 13:58:15.939538513 +1100
+++ foobar-anton/arch/ppc64/kernel/pSeries_htab.c	2004-02-21 15:07:47.212994956 +1100
@@ -60,11 +60,11 @@ long pSeries_hpte_insert(unsigned long h
 	for (i = 0; i < HPTES_PER_GROUP; i++) {
 		dw0 = hptep->dw0.dw0;

-		if (!dw0.v) {
+		if (!dw0.v && !dw0.bolted) {
 			/* retry with lock held */
 			pSeries_lock_hpte(hptep);
 			dw0 = hptep->dw0.dw0;
-			if (!dw0.v)
+			if (!dw0.v && !dw0.bolted)
 				break;
 			pSeries_unlock_hpte(hptep);
 		}
@@ -177,7 +177,7 @@ static long pSeries_hpte_find(unsigned l
 			hptep = htab_data.htab + slot;
 			dw0 = hptep->dw0.dw0;

-			if ((dw0.avpn == (vpn >> 11)) && dw0.v &&
+			if ((dw0.avpn == (vpn >> 11)) && dw0.bolted &&
 			    (dw0.h == j)) {
 				/* HPTE matches */
 				if (j)
@@ -230,14 +230,12 @@ static long pSeries_hpte_updatepp(unsign
 }

 /*
- * Update the page protection bits. Intended to be used to create
- * guard pages for kernel data structures on pages which are bolted
- * in the HPT. Assumes pages being operated on will not be stolen.
- * Does not work on large pages.
+ * Change the valid bit on bolted pages. Used by debugging code such
+ * as CONFIG_PAGEALLOC_DEBUG to cause accesses on certain pages to fault.
  *
- * No need to lock here because we should be the only user.
+ * We assume the caller provides any locking.
  */
-static void pSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
+static void pSeries_hpte_updatevalid(unsigned long valid, unsigned long ea)
 {
 	unsigned long vsid, va, vpn, flags;
 	long slot;
@@ -248,11 +246,10 @@ static void pSeries_hpte_updateboltedpp(
 	vpn = va >> PAGE_SHIFT;

 	slot = pSeries_hpte_find(vpn);
-	if (slot == -1)
-		panic("could not find page to bolt\n");
-	hptep = htab_data.htab + slot;
+	BUG_ON(slot == -1);

-	set_pp_bit(newpp, hptep);
+	hptep = htab_data.htab + slot;
+	hptep->dw0.dw0.v = valid;

 	/* Ensure it is out of the tlb too */
 	spin_lock_irqsave(&pSeries_tlbie_lock, flags);
@@ -376,7 +373,7 @@ void hpte_init_pSeries(void)

 	ppc_md.hpte_invalidate	= pSeries_hpte_invalidate;
 	ppc_md.hpte_updatepp	= pSeries_hpte_updatepp;
-	ppc_md.hpte_updateboltedpp = pSeries_hpte_updateboltedpp;
+	ppc_md.hpte_updatevalid = pSeries_hpte_updatevalid;
 	ppc_md.hpte_insert	= pSeries_hpte_insert;
 	ppc_md.hpte_remove     	= pSeries_hpte_remove;

diff -puN arch/ppc64/kernel/pSeries_lpar.c~ppc64-config_pagealloc_debug arch/ppc64/kernel/pSeries_lpar.c
--- foobar/arch/ppc64/kernel/pSeries_lpar.c~ppc64-config_pagealloc_debug	2004-02-21 13:58:15.945538060 +1100
+++ foobar-anton/arch/ppc64/kernel/pSeries_lpar.c	2004-02-21 13:58:16.003533680 +1100
@@ -487,8 +487,7 @@ static long pSeries_lpar_hpte_find(unsig
 	return -1;
 }

-static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
-					     unsigned long ea)
+static void pSeries_lpar_hpte_updatevalid(unsigned long valid, unsigned long ea)
 {
 	unsigned long lpar_rc;
 	unsigned long vsid, va, vpn, flags;
@@ -499,11 +498,9 @@ static void pSeries_lpar_hpte_updatebolt
 	vpn = va >> PAGE_SHIFT;

 	slot = pSeries_lpar_hpte_find(vpn);
-	if (slot == -1)
-		panic("updateboltedpp: Could not find page to bolt\n");
+	BUG_ON(slot == -1);

-	flags = newpp & 3;
-	lpar_rc = plpar_pte_protect(flags, slot, 0);
+	/* XXX FIXME */

 	if (lpar_rc != H_Success)
 		panic("Bad return code from pte bolted protect rc = %lx\n",
@@ -555,7 +552,7 @@ void pSeries_lpar_mm_init(void)
 {
 	ppc_md.hpte_invalidate	= pSeries_lpar_hpte_invalidate;
 	ppc_md.hpte_updatepp	= pSeries_lpar_hpte_updatepp;
-	ppc_md.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp;
+	ppc_md.hpte_updatevalid = pSeries_lpar_hpte_updatevalid;
 	ppc_md.hpte_insert	= pSeries_lpar_hpte_insert;
 	ppc_md.hpte_remove	= pSeries_lpar_hpte_remove;
 	ppc_md.flush_hash_range	= pSeries_lpar_flush_hash_range;
diff -puN arch/ppc64/mm/hash_utils.c~ppc64-config_pagealloc_debug arch/ppc64/mm/hash_utils.c
--- foobar/arch/ppc64/mm/hash_utils.c~ppc64-config_pagealloc_debug	2004-02-21 13:58:15.951537607 +1100
+++ foobar-anton/arch/ppc64/mm/hash_utils.c	2004-02-21 13:58:16.005533529 +1100
@@ -357,3 +357,14 @@ void __init htab_finish_init(void)
 	make_bl(htab_call_hpte_remove, ppc_md.hpte_remove);
 	make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp);
 }
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+void kernel_map_pages(struct page *page, int numpages, int enable)
+{
+	int i;
+
+	for (i = 0; i < numpages; i++)
+		ppc_md.hpte_updatevalid(enable,
+			(unsigned long)page_address(page) + PAGE_SIZE * i);
+}
+#endif
diff -puN arch/ppc64/mm/init.c~ppc64-config_pagealloc_debug arch/ppc64/mm/init.c
--- foobar/arch/ppc64/mm/init.c~ppc64-config_pagealloc_debug	2004-02-21 13:58:15.958537079 +1100
+++ foobar-anton/arch/ppc64/mm/init.c	2004-02-21 14:51:33.299823789 +1100
@@ -666,7 +666,7 @@ void __init mm_init_ppc64(void)
 	for (index = 0; index < NR_CPUS; index++) {
 		lpaca = &paca[index];
 		guard_page = ((unsigned long)lpaca) + 0x1000;
-		ppc_md.hpte_updateboltedpp(PP_RXRX, guard_page);
+		ppc_md.hpte_updatevalid(0, guard_page);
 	}

 	ppc64_boot_msg(0x100, "MM Init Done");
diff -puN include/asm-ppc64/cacheflush.h~ppc64-config_pagealloc_debug include/asm-ppc64/cacheflush.h
--- foobar/include/asm-ppc64/cacheflush.h~ppc64-config_pagealloc_debug	2004-02-21 13:58:15.963536701 +1100
+++ foobar-anton/include/asm-ppc64/cacheflush.h	2004-02-21 13:58:16.009533227 +1100
@@ -32,4 +32,9 @@ do { memcpy(dst, src, len); \

 extern void __flush_dcache_icache(void *page_va);

+#ifdef CONFIG_DEBUG_PAGEALLOC
+/* internal debugging function */
+void kernel_map_pages(struct page *page, int numpages, int enable);
+#endif
+
 #endif /* _PPC64_CACHEFLUSH_H */
diff -puN include/asm-ppc64/cputable.h~ppc64-config_pagealloc_debug include/asm-ppc64/cputable.h
--- foobar/include/asm-ppc64/cputable.h~ppc64-config_pagealloc_debug	2004-02-21 13:58:15.969536248 +1100
+++ foobar-anton/include/asm-ppc64/cputable.h	2004-02-21 13:58:16.010533152 +1100
@@ -139,8 +139,11 @@ extern firmware_feature_t firmware_featu
                                  CPU_FTR_TLBIEL | CPU_FTR_NOEXECUTE | \
                                  CPU_FTR_NODSISRALIGN)

-/* iSeries doesn't support large pages */
-#ifdef CONFIG_PPC_ISERIES
+/*
+ * iSeries doesn't support large pages and we cant use large pages when
+ * page alloc debug is enabled
+ */
+#if defined(CONFIG_PPC_ISERIES) || defined(CONFIG_DEBUG_PAGEALLOC)
 #define CPU_FTR_PPCAS_ARCH_V2	(CPU_FTR_PPCAS_ARCH_V2_BASE)
 #else
 #define CPU_FTR_PPCAS_ARCH_V2	(CPU_FTR_PPCAS_ARCH_V2_BASE | CPU_FTR_16M_PAGE)
diff -puN include/asm-ppc64/machdep.h~ppc64-config_pagealloc_debug include/asm-ppc64/machdep.h
--- foobar/include/asm-ppc64/machdep.h~ppc64-config_pagealloc_debug	2004-02-21 13:58:15.975535795 +1100
+++ foobar-anton/include/asm-ppc64/machdep.h	2004-02-21 13:58:16.011533076 +1100
@@ -40,8 +40,8 @@ struct machdep_calls {
 					 unsigned long va,
 					 int large,
 					 int local);
-	void            (*hpte_updateboltedpp)(unsigned long newpp,
-					       unsigned long ea);
+	void            (*hpte_updatevalid)(unsigned long valid,
+					    unsigned long ea);
 	long		(*hpte_insert)(unsigned long hpte_group,
 				       unsigned long va,
 				       unsigned long prpn,
diff -puN mm/slab.c~ppc64-config_pagealloc_debug mm/slab.c
diff -puN -L arch/ppc64/mm/ash_utils.c /dev/null /dev/null

_

** Sent via the linuxppc64-dev mail list. See http://lists.linuxppc.org/





More information about the Linuxppc64-dev mailing list