[PATCH 1/1] Fixup write permission of TLB on powerpc e500 core

Shan Hai haishan.bai at gmail.com
Fri Jul 15 18:07:18 EST 2011


The kernel has no write permission on COW pages by default on e500 core, this
will cause endless loop in futex_lock_pi, because futex code assumes the kernel
has write permission on COW pages. Grant write permission to the kernel on COW
pages when access violation page fault occurs.

Signed-off-by: Shan Hai <haishan.bai at gmail.com>
---
 arch/powerpc/include/asm/futex.h |   11 ++++++++++-
 arch/powerpc/include/asm/tlb.h   |   25 +++++++++++++++++++++++++
 2 files changed, 35 insertions(+), 1 deletions(-)

diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index c94e4a3..54c3e74 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -8,6 +8,7 @@
 #include <asm/errno.h>
 #include <asm/synch.h>
 #include <asm/asm-compat.h>
+#include <asm/tlb.h>
 
 #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
   __asm__ __volatile ( \
@@ -113,7 +114,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
         : "cc", "memory");
 
 	*uval = prev;
-        return ret;
+
+	/* Futex assumes the kernel has permission to write to
+	 * COW pages, grant the kernel write permission on COW
+	 * pages because it has none by default.
+	 */
+	if (ret == -EFAULT)
+		__tlb_fixup_write_permission(current->mm, (unsigned long)uaddr);
+
+	return ret;
 }
 
 #endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index e2b428b..3863c6a 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -45,5 +45,30 @@ static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
 #endif
 }
 
+/* Grant write permission to the kernel on a page. */
+static inline void __tlb_fixup_write_permission(struct mm_struct *mm,
+						unsigned long address)
+{
+#if defined(CONFIG_FSL_BOOKE)
+	/* Grant write permission to the kernel on a page by setting TLB.SW
+	 * bit, the bit setting operation is tricky here, calling
+	 * handle_mm_fault with FAULT_FLAG_WRITE causes _PAGE_DIRTY bit of
+	 * the pte to be set, the _PAGE_DIRTY of the pte is translated into
+	 * TLB.SW on Powerpc e500 core.
+	 */
+
+	struct vm_area_struct *vma;
+
+	vma = find_vma(mm, address);
+	if (likely(vma)) {
+		/* only fixup present page */
+		if (follow_page(vma, address, FOLL_WRITE)) {
+			handle_mm_fault(mm, vma, address, FAULT_FLAG_WRITE);
+			flush_tlb_page(vma, address);
+		}
+	}
+#endif
+}
+
 #endif /* __KERNEL__ */
 #endif /* __ASM_POWERPC_TLB_H */
-- 
1.7.1



More information about the Linuxppc-dev mailing list