[PATCH 3/7] powerpc/mm: Fix kernel RAM protection after freeing unused memory on PPC32
Christophe Leroy
christophe.leroy at c-s.fr
Tue May 30 01:32:00 AEST 2017
As seen below, allthough the init sections have been freed, the
associated memory area is still marked as executable in the
page tables.
~ dmesg
[ 5.860093] Freeing unused kernel memory: 592K (c0570000 - c0604000)
~ cat /sys/kernel/debug/kernel_page_tables
---[ Start of kernel VM ]---
0xc0000000-0xc0497fff 4704K rw X present dirty accessed shared
0xc0498000-0xc056ffff 864K rw present dirty accessed shared
0xc0570000-0xc059ffff 192K rw X present dirty accessed shared
0xc05a0000-0xc7ffffff 125312K rw present dirty accessed shared
---[ vmalloc() Area ]---
This patch fixes that.
The implementation is done by reusing the change_page_attr()
function implemented for CONFIG_DEBUG_PAGEALLOC
Signed-off-by: Christophe Leroy <christophe.leroy at c-s.fr>
---
arch/powerpc/mm/mem.c | 1 +
arch/powerpc/mm/mmu_decl.h | 3 +++
arch/powerpc/mm/pgtable_32.c | 13 ++++++++++---
3 files changed, 14 insertions(+), 3 deletions(-)
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 8e9bef964dbf..250601ccc907 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -393,6 +393,7 @@ void free_initmem(void)
{
ppc_md.progress = ppc_printk_progress;
free_initmem_default(POISON_FREE_INITMEM);
+ remap_init_ram();
}
#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index d46128b22150..207af7ad3bda 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -94,6 +94,7 @@ extern void _tlbia(void);
#ifdef CONFIG_PPC32
extern void mapin_ram(void);
+void remap_init_ram(void);
extern void setbat(int index, unsigned long virt, phys_addr_t phys,
unsigned int size, pgprot_t prot);
@@ -105,6 +106,8 @@ struct hash_pte;
extern struct hash_pte *Hash, *Hash_end;
extern unsigned long Hash_size, Hash_mask;
+#else
+static inline void remap_init_ram(void) {}
#endif /* CONFIG_PPC32 */
extern unsigned long ioremap_bot;
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index bdfee8e62a6a..0dc9c9d8fafb 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -323,8 +323,6 @@ get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
return(retval);
}
-#ifdef CONFIG_DEBUG_PAGEALLOC
-
static int __change_page_attr_noflush(struct page *page, pgprot_t prot)
{
pte_t *kpte;
@@ -347,7 +345,7 @@ static int __change_page_attr_noflush(struct page *page, pgprot_t prot)
/*
* Change the page attributes of an page in the linear mapping.
*
- * THIS CONFLICTS WITH BAT MAPPINGS, DEBUG USE ONLY
+ * THIS DOES NOTHING WITH BAT MAPPINGS, DEBUG USE ONLY
*/
static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
{
@@ -368,7 +366,16 @@ static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
return err;
}
+void remap_init_ram(void)
+{
+ struct page *page = virt_to_page(_sinittext);
+ unsigned long numpages = PFN_UP((unsigned long)_einittext) -
+ PFN_DOWN((unsigned long)_sinittext);
+
+ change_page_attr(page, numpages, PAGE_KERNEL);
+}
+#ifdef CONFIG_DEBUG_PAGEALLOC
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
if (PageHighMem(page))
--
2.12.0
More information about the Linuxppc-dev
mailing list