[PATCH v2] powerpc: Track backing pages allocated by vmemmap_populate()

Mark Nelson markn at au1.ibm.com
Tue Apr 13 14:16:23 EST 2010


We need to keep track of the backing pages that get allocated by
vmemmap_populate() so that when we use kdump, the dump-capture kernel knows
where these pages are.

We use a linked list of structures that contain the physical address of the
backing page and corresponding virtual address to track the backing pages.
We can use an hlist to save space, because we never remove nodes.

vmemmap_populate() is called either at boot-time or on a memory hotplug
operation. We don't have to worry about the boot-time calls because they
will be inherently single-threaded, and for a memory hotplug operation
vmemmap_populate() is called through:
sparse_add_one_section()
            |
            V
kmalloc_section_memmap()
            |
            V
sparse_mem_map_populate()
            |
            V
vmemmap_populate()
and in sparse_add_one_section() we're protected by pgdat_resize_lock().
So, we don't need a spinlock to protect the vmemmap_list.

Signed-off-by: Mark Nelson <markn at au1.ibm.com>
---

changes since v1:
 - use an hlist to save space in the structure
 - remove the spinlock because it's not needed

 arch/powerpc/include/asm/pgalloc-64.h |    7 +++++++
 arch/powerpc/mm/init_64.c             |   24 ++++++++++++++++++++++++
 2 files changed, 31 insertions(+)

Index: upstream/arch/powerpc/include/asm/pgalloc-64.h
===================================================================
--- upstream.orig/arch/powerpc/include/asm/pgalloc-64.h
+++ upstream/arch/powerpc/include/asm/pgalloc-64.h
@@ -10,6 +10,13 @@
 #include <linux/slab.h>
 #include <linux/cpumask.h>
 #include <linux/percpu.h>
+#include <linux/list.h>
+
+struct vmemmap_backing {
+	unsigned long phys;
+	unsigned long virt_addr;
+	struct hlist_node hlist;
+};
 
 /*
  * Functions that deal with pagetables that could be at any level of
Index: upstream/arch/powerpc/mm/init_64.c
===================================================================
--- upstream.orig/arch/powerpc/mm/init_64.c
+++ upstream/arch/powerpc/mm/init_64.c
@@ -43,6 +43,7 @@
 #include <linux/lmb.h>
 #include <linux/hugetlb.h>
 #include <linux/slab.h>
+#include <linux/list.h>
 
 #include <asm/pgalloc.h>
 #include <asm/page.h>
@@ -252,6 +253,27 @@ static void __meminit vmemmap_create_map
 }
 #endif /* CONFIG_PPC_BOOK3E */
 
+HLIST_HEAD(vmemmap_list);
+
+static __meminit void vmemmap_list_populate(unsigned long phys,
+					    unsigned long start,
+					    int node)
+{
+	struct vmemmap_backing *vmem_back;
+
+	vmem_back = vmemmap_alloc_block(sizeof(struct vmemmap_backing), node);
+	if (unlikely(!vmem_back)) {
+		WARN_ON(1);
+		return;
+	}
+
+	vmem_back->phys = phys;
+	vmem_back->virt_addr = start;
+	INIT_HLIST_NODE(&vmem_back->hlist);
+
+	hlist_add_head(&vmem_back->hlist, &vmemmap_list);
+}
+
 int __meminit vmemmap_populate(struct page *start_page,
 			       unsigned long nr_pages, int node)
 {
@@ -276,6 +298,8 @@ int __meminit vmemmap_populate(struct pa
 		if (!p)
 			return -ENOMEM;
 
+		vmemmap_list_populate(__pa(p), start, node);
+
 		pr_debug("      * %016lx..%016lx allocated at %p\n",
 			 start, start + page_size, p);
 


More information about the Linuxppc-dev mailing list