[PATCH 3/7] powerpc/mm/radix: Add tlb flush of THP ptes
Aneesh Kumar K.V
aneesh.kumar at linux.vnet.ibm.com
Thu May 19 17:58:46 AEST 2016
Instead of flushing the entire mm, implement a flush_pmd_tlb_range
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar at linux.vnet.ibm.com>
---
.../powerpc/include/asm/book3s/64/tlbflush-radix.h | 4 ++
arch/powerpc/include/asm/book3s/64/tlbflush.h | 9 ++++
arch/powerpc/mm/pgtable-book3s64.c | 4 +-
arch/powerpc/mm/tlb-radix.c | 54 ++++++++++++++++++++++
4 files changed, 69 insertions(+), 2 deletions(-)
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index 13ef38828dfe..823528d34688 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -10,6 +10,10 @@ static inline int mmu_get_ap(int psize)
return mmu_psize_defs[psize].ap;
}
+extern void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
+ unsigned long end, int psize);
+extern void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
extern void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
extern void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end);
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h
index d98424ae356c..f0d6c9d38916 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
@@ -7,6 +7,15 @@
#include <asm/book3s/64/tlbflush-hash.h>
#include <asm/book3s/64/tlbflush-radix.h>
+#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
+static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ if (radix_enabled())
+ return radix__flush_pmd_tlb_range(vma, start, end);
+ return hash__flush_tlb_range(vma, start, end);
+}
+
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index 670318766545..7bb8acffe876 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -33,7 +33,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
changed = !pmd_same(*(pmdp), entry);
if (changed) {
__ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry));
- flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+ flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
}
return changed;
}
@@ -66,7 +66,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
- flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+ flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
/*
* This ensures that generic code that rely on IRQ disabling
* to prevent a parallel THP split work as expected.
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 5807f5d72e1b..fe2fc58d2e00 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -243,3 +243,57 @@ void radix__tlb_flush(struct mmu_gather *tlb)
struct mm_struct *mm = tlb->mm;
radix__flush_tlb_mm(mm);
}
+
+#define TLB_FLUSH_ALL -1UL
+/*
+ * Number of pages above which we will do a bcast tlbie. Just a
+ * number at this point copied from x86
+ */
+static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
+
+void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
+ unsigned long end, int psize)
+{
+ unsigned int pid;
+ unsigned long addr;
+ int local = mm_is_core_local(mm);
+ unsigned long ap = mmu_get_ap(psize);
+ int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
+ unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;
+
+
+ preempt_disable();
+ pid = mm ? mm->context.id : 0;
+ if (unlikely(pid == MMU_NO_CONTEXT))
+ goto err_out;
+
+ if (end == TLB_FLUSH_ALL ||
+ (end - start) > tlb_single_page_flush_ceiling * page_size) {
+ if (local)
+ _tlbiel_pid(pid);
+ else
+ _tlbie_pid(pid);
+ goto err_out;
+ }
+ for (addr = start; addr < end; addr += page_size) {
+
+ if (local)
+ _tlbiel_va(addr, pid, ap);
+ else {
+ if (lock_tlbie)
+ raw_spin_lock(&native_tlbie_lock);
+ _tlbie_va(addr, pid, ap);
+ if (lock_tlbie)
+ raw_spin_unlock(&native_tlbie_lock);
+ }
+ }
+err_out:
+ preempt_enable();
+}
+
+void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
+}
+EXPORT_SYMBOL(radix__flush_pmd_tlb_range);
--
2.7.4
More information about the Linuxppc-dev
mailing list