ppc64 PTE hacks

Anton Blanchard anton at samba.org
Fri Jan 9 17:18:06 EST 2004


> I just remembered we never merged this patch from Paul. It would be
> great to get rid of the flush_tlb_* functions.

Here it is updated for 2.6, using percpu data etc. Its currently getting
some stress testing and if that passes and there are no concerns I'll
merge it in. As Ben mentioned we need it for page aging to work.

Anton

===== arch/ppc64/kernel/pSeries_htab.c 1.13 vs edited =====
--- 1.13/arch/ppc64/kernel/pSeries_htab.c	Fri Dec  5 10:00:40 2003
+++ edited/arch/ppc64/kernel/pSeries_htab.c	Fri Jan  9 14:39:15 2004
@@ -300,7 +300,7 @@
 	int i, j;
 	HPTE *hptep;
 	Hpte_dword0 dw0;
-	struct ppc64_tlb_batch *batch = &ppc64_tlb_batch[smp_processor_id()];
+	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);

 	/* XXX fix for large ptes */
 	unsigned long large = 0;
===== arch/ppc64/kernel/pSeries_lpar.c 1.35 vs edited =====
--- 1.35/arch/ppc64/kernel/pSeries_lpar.c	Thu Nov 13 10:23:27 2003
+++ edited/arch/ppc64/kernel/pSeries_lpar.c	Fri Jan  9 14:39:15 2004
@@ -602,7 +602,7 @@
 {
 	int i;
 	unsigned long flags;
-	struct ppc64_tlb_batch *batch = &ppc64_tlb_batch[smp_processor_id()];
+	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);

 	spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);

===== arch/ppc64/kernel/process.c 1.44 vs edited =====
--- 1.44/arch/ppc64/kernel/process.c	Wed Dec 17 15:27:52 2003
+++ edited/arch/ppc64/kernel/process.c	Fri Jan  9 14:39:15 2004
@@ -49,6 +49,7 @@
 #include <asm/hardirq.h>
 #include <asm/cputable.h>
 #include <asm/sections.h>
+#include <asm/tlbflush.h>

 #ifndef CONFIG_SMP
 struct task_struct *last_task_used_math = NULL;
@@ -145,6 +146,8 @@
 	if (new->thread.regs && last_task_used_altivec == new)
 		new->thread.regs->msr |= MSR_VEC;
 #endif /* CONFIG_ALTIVEC */
+
+	flush_tlb_pending();

 	new_thread = &new->thread;
 	old_thread = &current->thread;
===== arch/ppc64/mm/Makefile 1.9 vs edited =====
--- 1.9/arch/ppc64/mm/Makefile	Wed Dec 17 16:08:23 2003
+++ edited/arch/ppc64/mm/Makefile	Fri Jan  9 14:39:18 2004
@@ -4,6 +4,6 @@

 EXTRA_CFLAGS += -mno-minimal-toc

-obj-y := fault.o init.o extable.o imalloc.o hash_utils.o hash_low.o
+obj-y := fault.o init.o extable.o imalloc.o hash_utils.o hash_low.o tlb.o
 obj-$(CONFIG_DISCONTIGMEM) += numa.o
 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
===== arch/ppc64/mm/hash_utils.c 1.44 vs edited =====
--- 1.44/arch/ppc64/mm/hash_utils.c	Sun Jan  4 21:47:33 2004
+++ edited/arch/ppc64/mm/hash_utils.c	Fri Jan  9 14:39:18 2004
@@ -325,8 +325,7 @@
 		ppc_md.flush_hash_range(context, number, local);
 	} else {
 		int i;
-		struct ppc64_tlb_batch *batch =
-			&ppc64_tlb_batch[smp_processor_id()];
+		struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);

 		for (i = 0; i < number; i++)
 			flush_hash_page(context, batch->addr[i], batch->pte[i],
===== arch/ppc64/mm/init.c 1.54 vs edited =====
--- 1.54/arch/ppc64/mm/init.c	Sun Jan  4 21:47:33 2004
+++ edited/arch/ppc64/mm/init.c	Fri Jan  9 15:38:03 2004
@@ -170,17 +170,27 @@
 	printk("%d pages swap cached\n",cached);
 }

-void *
-ioremap(unsigned long addr, unsigned long size)
-{
 #ifdef CONFIG_PPC_ISERIES
+
+void *ioremap(unsigned long addr, unsigned long size)
+{
 	return (void*)addr;
+}
+
+void iounmap(void *addr)
+{
+	return;
+}
+
 #else
+
+void *
+ioremap(unsigned long addr, unsigned long size)
+{
 	void *ret = __ioremap(addr, size, _PAGE_NO_CACHE);
 	if(mem_init_done)
 		return eeh_ioremap(addr, ret);	/* may remap the addr */
 	return ret;
-#endif
 }

 void *
@@ -326,7 +336,7 @@
  *
  * XXX	what about calls before mem_init_done (ie python_countermeasures())
  */
-void pSeries_iounmap(void *addr)
+void iounmap(void *addr)
 {
 	unsigned long address, start, end, size;
 	struct mm_struct *mm;
@@ -352,29 +362,18 @@
 	spin_lock(&mm->page_table_lock);

 	dir = pgd_offset_i(address);
-	flush_cache_all();
+	flush_cache_vunmap(address, end);
 	do {
 		unmap_im_area_pmd(dir, address, end - address);
 		address = (address + PGDIR_SIZE) & PGDIR_MASK;
 		dir++;
 	} while (address && (address < end));
-	__flush_tlb_range(mm, start, end);
+	flush_tlb_kernel_range(start, end);

 	spin_unlock(&mm->page_table_lock);
 	return;
 }

-void iounmap(void *addr)
-{
-#ifdef CONFIG_PPC_ISERIES
-	/* iSeries I/O Remap is a noop              */
-	return;
-#else
-	/* DRENG / PPPBBB todo */
-	return pSeries_iounmap(addr);
-#endif
-}
-
 int iounmap_explicit(void *addr, unsigned long size)
 {
 	struct vm_struct *area;
@@ -463,152 +462,7 @@
 	}
 }

-void
-flush_tlb_mm(struct mm_struct *mm)
-{
-	struct vm_area_struct *mp;
-
-	spin_lock(&mm->page_table_lock);
-
-	for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
-		__flush_tlb_range(mm, mp->vm_start, mp->vm_end);
-
-	/* XXX are there races with checking cpu_vm_mask? - Anton */
-	cpus_clear(mm->cpu_vm_mask);
-
-	spin_unlock(&mm->page_table_lock);
-}
-
-/*
- * Callers should hold the mm->page_table_lock
- */
-void
-flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
-{
-	unsigned long context = 0;
-	pgd_t *pgd;
-	pmd_t *pmd;
-	pte_t *ptep;
-	pte_t pte;
-	int local = 0;
-	cpumask_t tmp;
-
-	switch( REGION_ID(vmaddr) ) {
-	case VMALLOC_REGION_ID:
-		pgd = pgd_offset_k( vmaddr );
-		break;
-	case IO_REGION_ID:
-		pgd = pgd_offset_i( vmaddr );
-		break;
-	case USER_REGION_ID:
-		pgd = pgd_offset( vma->vm_mm, vmaddr );
-		context = vma->vm_mm->context;
-
-		/* XXX are there races with checking cpu_vm_mask? - Anton */
-		tmp = cpumask_of_cpu(smp_processor_id());
-		if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
-			local = 1;
-
-		break;
-	default:
-		panic("flush_tlb_page: invalid region 0x%016lx", vmaddr);
-
-	}
-
-	if (!pgd_none(*pgd)) {
-		pmd = pmd_offset(pgd, vmaddr);
-		if (pmd_present(*pmd)) {
-			ptep = pte_offset_kernel(pmd, vmaddr);
-			/* Check if HPTE might exist and flush it if so */
-			pte = __pte(pte_update(ptep, _PAGE_HPTEFLAGS, 0));
-			if ( pte_val(pte) & _PAGE_HASHPTE ) {
-				flush_hash_page(context, vmaddr, pte, local);
-			}
-		}
-		WARN_ON(pmd_hugepage(*pmd));
-	}
-}
-
-struct ppc64_tlb_batch ppc64_tlb_batch[NR_CPUS];
-
-void
-__flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
-{
-	pgd_t *pgd;
-	pmd_t *pmd;
-	pte_t *ptep;
-	pte_t pte;
-	unsigned long pgd_end, pmd_end;
-	unsigned long context = 0;
-	struct ppc64_tlb_batch *batch = &ppc64_tlb_batch[smp_processor_id()];
-	unsigned long i = 0;
-	int local = 0;
-	cpumask_t tmp;
-
-	switch(REGION_ID(start)) {
-	case VMALLOC_REGION_ID:
-		pgd = pgd_offset_k(start);
-		break;
-	case IO_REGION_ID:
-		pgd = pgd_offset_i(start);
-		break;
-	case USER_REGION_ID:
-		pgd = pgd_offset(mm, start);
-		context = mm->context;
-
-		/* XXX are there races with checking cpu_vm_mask? - Anton */
-		tmp = cpumask_of_cpu(smp_processor_id());
-		if (cpus_equal(mm->cpu_vm_mask, tmp))
-			local = 1;
-
-		break;
-	default:
-		panic("flush_tlb_range: invalid region for start (%016lx) and end (%016lx)\n", start, end);
-	}
-
-	do {
-		pgd_end = (start + PGDIR_SIZE) & PGDIR_MASK;
-		if (pgd_end > end)
-			pgd_end = end;
-		if (!pgd_none(*pgd)) {
-			pmd = pmd_offset(pgd, start);
-			do {
-				pmd_end = (start + PMD_SIZE) & PMD_MASK;
-				if (pmd_end > end)
-					pmd_end = end;
-				if (pmd_present(*pmd)) {
-					ptep = pte_offset_kernel(pmd, start);
-					do {
-						if (pte_val(*ptep) & _PAGE_HASHPTE) {
-							pte = __pte(pte_update(ptep, _PAGE_HPTEFLAGS, 0));
-							if (pte_val(pte) & _PAGE_HASHPTE) {
-								batch->pte[i] = pte;
-								batch->addr[i] = start;
-								i++;
-								if (i == PPC64_TLB_BATCH_NR) {
-									flush_hash_range(context, i, local);
-									i = 0;
-								}
-							}
-						}
-						start += PAGE_SIZE;
-						++ptep;
-					} while (start < pmd_end);
-				} else {
-					WARN_ON(pmd_hugepage(*pmd));
-					start = pmd_end;
-				}
-				++pmd;
-			} while (start < pgd_end);
-		} else {
-			start = pgd_end;
-		}
-		++pgd;
-	} while (start < end);
-
-	if (i)
-		flush_hash_range(context, i, local);
-}
+#endif

 void free_initmem(void)
 {
===== arch/ppc64/mm/tlb.c 1.54 vs edited =====
--- /dev/null	2004-01-07 16:07:03.000000000 +1100
+++ edited/arch/ppc64/mm/tlb.c	2003-12-28 16:19:25.000000000 +1100
@@ -0,0 +1,84 @@
+/*
+ * This file contains the routines for flushing entries from the
+ * TLB and MMU hash table.
+ *
+ *  Derived from arch/ppc64/mm/init.c:
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt at linuxppc.org)
+ *
+ *  Modifications by Paul Mackerras (PowerMac) (paulus at cs.anu.edu.au)
+ *  and Cort Dougan (PReP) (cort at cs.nmt.edu)
+ *    Copyright (C) 1996 Paul Mackerras
+ *  Amiga/APUS changes by Jesper Skov (jskov at cygnus.co.uk).
+ *
+ *  Derived from "arch/i386/mm/init.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *
+ *  Dave Engebretsen <engebret at us.ibm.com>
+ *      Rework for PPC64 port.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/percpu.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+#include <asm/tlb.h>
+
+DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
+
+/*
+ * Update the MMU hash table to correspond with a change to
+ * a Linux PTE.  If wrprot is true, it is permissible to
+ * change the existing HPTE to read-only rather than removing it
+ * (if we remove it we should clear the _PTE_HPTEFLAGS bits).
+ */
+void hpte_update(pte_t *ptep, unsigned long pte, int wrprot)
+{
+	struct page *ptepage;
+	struct mm_struct *mm;
+	unsigned long addr;
+	int i;
+	unsigned long context = 0;
+	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
+
+	ptepage = virt_to_page(ptep);
+	mm = (struct mm_struct *) ptepage->mapping;
+	addr = ptepage->index + (((unsigned long)ptep & ~PAGE_MASK) << 9);
+	if (REGION_ID(addr) == USER_REGION_ID)
+		context = mm->context;
+	i = batch->index;
+	if (unlikely(i != 0 && context != batch->context)) {
+		flush_tlb_pending();
+		i = 0;
+	}
+	if (i == 0) {
+		batch->context = context;
+		batch->mm = mm;
+	}
+	batch->pte[i] = __pte(pte);
+	batch->addr[i] = addr;
+	batch->index = ++i;
+	if (i >= PPC64_TLB_BATCH_NR)
+		flush_tlb_pending();
+}
+
+void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
+{
+	int i;
+	int local = 0;
+
+	i = batch->index;
+	if (batch->mm->cpu_vm_mask == (1 << smp_processor_id()))
+		local = 1;
+	if (i == 1)
+		flush_hash_page(batch->context, batch->addr[0], batch->pte[0],
+				local);
+	else
+		flush_hash_range(batch->context, i, local);
+	batch->index = 0;
+}===== include/asm-ppc64/pgtable.h 1.30 vs edited =====
--- 1.30/include/asm-ppc64/pgtable.h	Wed Dec 17 16:08:23 2003
+++ edited/include/asm-ppc64/pgtable.h	Fri Jan  9 14:39:18 2004
@@ -11,6 +11,7 @@
 #include <asm/processor.h>		/* For TASK_SIZE */
 #include <asm/mmu.h>
 #include <asm/page.h>
+#include <asm/tlbflush.h>
 #endif /* __ASSEMBLY__ */

 /* PMD_SHIFT determines what a second-level page table entry can map */
@@ -288,71 +289,93 @@

 /* Atomic PTE updates */

-static inline unsigned long pte_update( pte_t *p, unsigned long clr,
-					unsigned long set )
+static inline unsigned long pte_update(pte_t *p, unsigned long clr)
 {
 	unsigned long old, tmp;
-
+
 	__asm__ __volatile__(
 	"1:	ldarx	%0,0,%3		# pte_update\n\
-	andi.	%1,%0,%7\n\
+	andi.	%1,%0,%6\n\
 	bne-	1b \n\
 	andc	%1,%0,%4 \n\
-	or	%1,%1,%5 \n\
 	stdcx.	%1,0,%3 \n\
 	bne-	1b"
 	: "=&r" (old), "=&r" (tmp), "=m" (*p)
-	: "r" (p), "r" (clr), "r" (set), "m" (*p), "i" (_PAGE_BUSY)
+	: "r" (p), "r" (clr), "m" (*p), "i" (_PAGE_BUSY)
 	: "cc" );
 	return old;
 }

+/* PTE updating functions */
+extern void hpte_update(pte_t *ptep, unsigned long pte, int wrprot);
+
 static inline int ptep_test_and_clear_young(pte_t *ptep)
 {
-	return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0;
+	unsigned long old;
+
+	old = pte_update(ptep, _PAGE_ACCESSED | _PAGE_HPTEFLAGS);
+	if (old & _PAGE_HASHPTE) {
+		hpte_update(ptep, old, 0);
+		flush_tlb_pending();	/* XXX generic code doesn't flush */
+	}
+	return (old & _PAGE_ACCESSED) != 0;
 }

 static inline int ptep_test_and_clear_dirty(pte_t *ptep)
 {
-	return (pte_update(ptep, _PAGE_DIRTY, 0) & _PAGE_DIRTY) != 0;
-}
+	unsigned long old;

-static inline pte_t ptep_get_and_clear(pte_t *ptep)
-{
-	return __pte(pte_update(ptep, ~_PAGE_HPTEFLAGS, 0));
+	old = pte_update(ptep, _PAGE_DIRTY);
+	if ((~old & (_PAGE_HASHPTE | _PAGE_RW | _PAGE_DIRTY)) == 0)
+		hpte_update(ptep, old, 1);
+	return (old & _PAGE_DIRTY) != 0;
 }

 static inline void ptep_set_wrprotect(pte_t *ptep)
 {
-	pte_update(ptep, _PAGE_RW, 0);
+	unsigned long old;
+
+	old = pte_update(ptep, _PAGE_RW);
+	if ((~old & (_PAGE_HASHPTE | _PAGE_RW | _PAGE_DIRTY)) == 0)
+		hpte_update(ptep, old, 1);
 }

-static inline void ptep_mkdirty(pte_t *ptep)
+static inline pte_t ptep_get_and_clear(pte_t *ptep)
 {
-	pte_update(ptep, 0, _PAGE_DIRTY);
+	unsigned long old = pte_update(ptep, ~0UL);
+
+	if (old & _PAGE_HASHPTE)
+		hpte_update(ptep, old, 0);
+	return __pte(old);
 }

-/*
- * Macro to mark a page protection value as "uncacheable".
- */
-#define pgprot_noncached(prot)	(__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
+static inline void pte_clear(pte_t * ptep)
+{
+	unsigned long old = pte_update(ptep, ~0UL);

-#define pte_same(A,B)	(((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
+	if (old & _PAGE_HASHPTE)
+		hpte_update(ptep, old, 0);
+}

 /*
  * set_pte stores a linux PTE into the linux page table.
- * On machines which use an MMU hash table we avoid changing the
- * _PAGE_HASHPTE bit.
  */
 static inline void set_pte(pte_t *ptep, pte_t pte)
 {
-	pte_update(ptep, ~_PAGE_HPTEFLAGS, pte_val(pte) & ~_PAGE_HPTEFLAGS);
+	/* XXX is there a better way to handle this? */
+	if (pte_present(*ptep))
+		pte_clear(ptep);
+	if (pte_present(pte))
+		flush_tlb_pending();
+	*ptep = __pte(pte_val(pte)) & ~_PAGE_HPTEFLAGS;
 }

-static inline void pte_clear(pte_t * ptep)
-{
-	pte_update(ptep, ~_PAGE_HPTEFLAGS, 0);
-}
+/*
+ * Macro to mark a page protection value as "uncacheable".
+ */
+#define pgprot_noncached(prot)	(__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
+
+#define pte_same(A,B)	(((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)

 extern unsigned long ioremap_bot, ioremap_base;

===== include/asm-ppc64/tlb.h 1.10 vs edited =====
--- 1.10/include/asm-ppc64/tlb.h	Wed Dec 17 15:51:16 2003
+++ edited/include/asm-ppc64/tlb.h	Fri Jan  9 14:39:18 2004
@@ -12,11 +12,9 @@
 #ifndef _PPC64_TLB_H
 #define _PPC64_TLB_H

-#include <asm/pgtable.h>
 #include <asm/tlbflush.h>
-#include <asm/page.h>
-#include <asm/mmu.h>

+struct mmu_gather;
 static inline void tlb_flush(struct mmu_gather *tlb);

 /* Avoid pulling in another include just for this */
@@ -29,66 +27,13 @@
 #define tlb_start_vma(tlb, vma)	do { } while (0)
 #define tlb_end_vma(tlb, vma)	do { } while (0)

-/* Should make this at least as large as the generic batch size, but it
- * takes up too much space */
-#define PPC64_TLB_BATCH_NR 192
-
-struct ppc64_tlb_batch {
-	unsigned long index;
-	pte_t pte[PPC64_TLB_BATCH_NR];
-	unsigned long addr[PPC64_TLB_BATCH_NR];
-	unsigned long vaddr[PPC64_TLB_BATCH_NR];
-};
-
-extern struct ppc64_tlb_batch ppc64_tlb_batch[NR_CPUS];
-
-static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
-					unsigned long address)
-{
-	int cpu = smp_processor_id();
-	struct ppc64_tlb_batch *batch = &ppc64_tlb_batch[cpu];
-	unsigned long i = batch->index;
-	pte_t pte;
-	cpumask_t local_cpumask = cpumask_of_cpu(cpu);
-
-	if (pte_val(*ptep) & _PAGE_HASHPTE) {
-		pte = __pte(pte_update(ptep, _PAGE_HPTEFLAGS, 0));
-		if (pte_val(pte) & _PAGE_HASHPTE) {
-
-			batch->pte[i] = pte;
-			batch->addr[i] = address;
-			i++;
-
-			if (i == PPC64_TLB_BATCH_NR) {
-				int local = 0;
-
-				if (cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask))
-					local = 1;
-
-				flush_hash_range(tlb->mm->context, i, local);
-				i = 0;
-			}
-		}
-	}
-
-	batch->index = i;
-}
+#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)

 extern void pte_free_finish(void);

 static inline void tlb_flush(struct mmu_gather *tlb)
 {
-	int cpu = smp_processor_id();
-	struct ppc64_tlb_batch *batch = &ppc64_tlb_batch[cpu];
-	int local = 0;
-	cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
-
-	if (cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask))
-		local = 1;
-
-	flush_hash_range(tlb->mm->context, batch->index, local);
-	batch->index = 0;
-
+	flush_tlb_pending();
 	pte_free_finish();
 }

===== include/asm-ppc64/tlbflush.h 1.4 vs edited =====
--- 1.4/include/asm-ppc64/tlbflush.h	Fri Jun  7 18:21:41 2002
+++ edited/include/asm-ppc64/tlbflush.h	Fri Jan  9 14:39:18 2004
@@ -1,10 +1,6 @@
 #ifndef _PPC64_TLBFLUSH_H
 #define _PPC64_TLBFLUSH_H

-#include <linux/threads.h>
-#include <linux/mm.h>
-#include <asm/page.h>
-
 /*
  * TLB flushing:
  *
@@ -15,21 +11,37 @@
  *  - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
  */

-extern void flush_tlb_mm(struct mm_struct *mm);
-extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
-extern void __flush_tlb_range(struct mm_struct *mm,
-			    unsigned long start, unsigned long end);
-#define flush_tlb_range(vma, start, end) \
-	__flush_tlb_range(vma->vm_mm, start, end)
+#include <linux/percpu.h>
+#include <asm/page.h>
+
+#define PPC64_TLB_BATCH_NR 192

-#define flush_tlb_kernel_range(start, end) \
-	__flush_tlb_range(&init_mm, (start), (end))
+struct mm_struct;
+struct ppc64_tlb_batch {
+	unsigned long index;
+	unsigned long context;
+	struct mm_struct *mm;
+	pte_t pte[PPC64_TLB_BATCH_NR];
+	unsigned long addr[PPC64_TLB_BATCH_NR];
+	unsigned long vaddr[PPC64_TLB_BATCH_NR];
+};
+DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);

-static inline void flush_tlb_pgtables(struct mm_struct *mm,
-				      unsigned long start, unsigned long end)
+extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
+
+static inline void flush_tlb_pending(void)
 {
-	/* PPC has hw page tables. */
+	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
+
+	if (batch->index)
+		__flush_tlb_pending(batch);
 }
+
+#define flush_tlb_mm(mm)			flush_tlb_pending()
+#define flush_tlb_page(vma, addr)		flush_tlb_pending()
+#define flush_tlb_range(vma, start, end)	flush_tlb_pending()
+#define flush_tlb_kernel_range(start, end)	flush_tlb_pending()
+#define flush_tlb_pgtables(mm, start, end)	do { } while (0)

 extern void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte,
 			    int local);

** Sent via the linuxppc64-dev mail list. See http://lists.linuxppc.org/




More information about the Linuxppc64-dev mailing list