[RFC] hugetlb: Allow update_mmu_cache to handle hugetlb pages

Adam Litke agl at us.ibm.com
Thu Oct 20 05:46:46 EST 2005


Hugetlb demand faulting is now in -mm and copy-on-write support will be
coming soon.  When updating huge ptes, we'll need to call
update_mmu_cache() which currently can't handle huge pages.  This patch
fixes the problem.  Is this acceptable? or is there a more elegant way
to solve this?

 hash_utils.c  |    6 ++++--
 hugetlbpage.c |    4 ----
 init.c        |   17 ++++++++++++-----
 3 files changed, 16 insertions(+), 11 deletions(-)
diff -upN reference/arch/ppc64/mm/hash_utils.c current/arch/ppc64/mm/hash_utils.c
--- reference/arch/ppc64/mm/hash_utils.c
+++ current/arch/ppc64/mm/hash_utils.c
@@ -343,9 +343,11 @@ int hash_page(unsigned long ea, unsigned
 		local = 1;
 
 	/* Is this a huge page ? */
-	if (unlikely(in_hugepage_area(mm->context, ea)))
+	if (unlikely(in_hugepage_area(mm->context, ea))) {
+		spin_lock(&mm->page_table_lock);
 		ret = hash_huge_page(mm, access, ea, vsid, local);
-	else {
+		spin_unlock(&mm->page_table_lock);
+	} else {
 		ptep = find_linux_pte(pgdir, ea);
 		if (ptep == NULL)
 			return 1;
diff -upN reference/arch/ppc64/mm/hugetlbpage.c current/arch/ppc64/mm/hugetlbpage.c
--- reference/arch/ppc64/mm/hugetlbpage.c
+++ current/arch/ppc64/mm/hugetlbpage.c
@@ -625,8 +625,6 @@ int hash_huge_page(struct mm_struct *mm,
 	long slot;
 	int err = 1;
 
-	spin_lock(&mm->page_table_lock);
-
 	ptep = huge_pte_offset(mm, ea);
 
 	/* Search the Linux page table for a match with va */
@@ -739,7 +737,5 @@ repeat:
 	err = 0;
 
  out:
-	spin_unlock(&mm->page_table_lock);
-
 	return err;
 }
diff -upN reference/arch/ppc64/mm/init.c current/arch/ppc64/mm/init.c
--- reference/arch/ppc64/mm/init.c
+++ current/arch/ppc64/mm/init.c
@@ -769,6 +769,7 @@ void update_mmu_cache(struct vm_area_str
 	int local = 0;
 	cpumask_t tmp;
 	unsigned long flags;
+	int huge = in_hugepage_area(vma->vm_mm->context, ea);
 
 	/* handle i-cache coherency */
 	if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
@@ -792,9 +793,11 @@ void update_mmu_cache(struct vm_area_str
 	if (pgdir == NULL)
 		return;
 
-	ptep = find_linux_pte(pgdir, ea);
-	if (!ptep)
-		return;
+	if (likely(!huge)) {
+		ptep = find_linux_pte(pgdir, ea);
+		if (!ptep)
+			return;
+	}
 
 	vsid = get_vsid(vma->vm_mm->context.id, ea);
 
@@ -803,8 +806,12 @@ void update_mmu_cache(struct vm_area_str
 	if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
 		local = 1;
 
-	__hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep,
-		    0x300, local);
+	if (likely(!huge))
+		__hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid,
+				ptep, 0x300, local);
+	else
+		hash_huge_page(vma->vm_mm, pte_val(pte), ea, vsid, local);
+		
 	local_irq_restore(flags);
 }
 

-- 
Adam Litke - (agl at us.ibm.com)
IBM Linux Technology Center




More information about the Linuxppc64-dev mailing list