ppc64 PTE hacks

Anton Blanchard anton at samba.org
Sat Jan 24 20:35:09 EST 2004


> Here it is updated for 2.6, using percpu data etc. Its currently getting
> some stress testing and if that passes and there are no concerns I'll
> merge it in. As Ben mentioned we need it for page aging to work.

It turns out there were some nasty bugs (rmap stuff wasnt working on
vmalloc regions). We were also doing spurious flushes on ptes that
previously had the DIRTY/RW bits changed.

Im stressing this for a while, if things look good and there are no
complaints I'll check it in.

Anton

===== arch/ppc64/kernel/pSeries_htab.c 1.14 vs edited =====
--- 1.14/arch/ppc64/kernel/pSeries_htab.c	Tue Jan 20 13:07:05 2004
+++ edited/arch/ppc64/kernel/pSeries_htab.c	Sat Jan 24 17:04:43 2004
@@ -300,7 +300,7 @@
 	int i, j;
 	HPTE *hptep;
 	Hpte_dword0 dw0;
-	struct ppc64_tlb_batch *batch = &ppc64_tlb_batch[smp_processor_id()];
+	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);

 	/* XXX fix for large ptes */
 	unsigned long large = 0;
===== arch/ppc64/kernel/pSeries_lpar.c 1.37 vs edited =====
--- 1.37/arch/ppc64/kernel/pSeries_lpar.c	Fri Jan 23 11:18:06 2004
+++ edited/arch/ppc64/kernel/pSeries_lpar.c	Sat Jan 24 17:04:43 2004
@@ -420,10 +420,8 @@

 	lpar_rc = plpar_pte_protect(flags, slot, (avpn << 7));

-	if (lpar_rc == H_Not_Found) {
-		udbg_printf("updatepp missed\n");
+	if (lpar_rc == H_Not_Found)
 		return -1;
-	}

 	if (lpar_rc != H_Success)
 		panic("bad return code from pte protect rc = %lx\n", lpar_rc);
@@ -521,10 +519,8 @@
 	lpar_rc = plpar_pte_remove(H_AVPN, slot, (avpn << 7), &dummy1,
 				   &dummy2);

-	if (lpar_rc == H_Not_Found) {
-		udbg_printf("invalidate missed\n");
+	if (lpar_rc == H_Not_Found)
 		return;
-	}

 	if (lpar_rc != H_Success)
 		panic("Bad return code from invalidate rc = %lx\n", lpar_rc);
@@ -539,7 +535,7 @@
 {
 	int i;
 	unsigned long flags;
-	struct ppc64_tlb_batch *batch = &ppc64_tlb_batch[smp_processor_id()];
+	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);

 	spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);

===== arch/ppc64/kernel/process.c 1.46 vs edited =====
--- 1.46/arch/ppc64/kernel/process.c	Thu Jan 22 18:37:14 2004
+++ edited/arch/ppc64/kernel/process.c	Sat Jan 24 17:04:44 2004
@@ -49,14 +49,20 @@
 #include <asm/hardirq.h>
 #include <asm/cputable.h>
 #include <asm/sections.h>
+#include <asm/tlbflush.h>

 #ifndef CONFIG_SMP
 struct task_struct *last_task_used_math = NULL;
 struct task_struct *last_task_used_altivec = NULL;
 #endif

-struct mm_struct ioremap_mm = { pgd             : ioremap_dir
-                               ,page_table_lock : SPIN_LOCK_UNLOCKED };
+struct mm_struct ioremap_mm = {
+	.pgd		= ioremap_dir,
+	.mm_users	= ATOMIC_INIT(2),
+	.mm_count	= ATOMIC_INIT(1),
+	.cpu_vm_mask	= CPU_MASK_ALL,
+	.page_table_lock = SPIN_LOCK_UNLOCKED,
+};

 char *sysmap = NULL;
 unsigned long sysmap_size = 0;
@@ -145,6 +151,8 @@
 	if (new->thread.regs && last_task_used_altivec == new)
 		new->thread.regs->msr |= MSR_VEC;
 #endif /* CONFIG_ALTIVEC */
+
+	flush_tlb_pending();

 	new_thread = &new->thread;
 	old_thread = &current->thread;
===== arch/ppc64/mm/Makefile 1.12 vs edited =====
--- 1.12/arch/ppc64/mm/Makefile	Thu Jan 22 16:29:08 2004
+++ edited/arch/ppc64/mm/Makefile	Sat Jan 24 17:04:44 2004
@@ -4,6 +4,6 @@

 EXTRA_CFLAGS += -mno-minimal-toc

-obj-y := fault.o init.o imalloc.o hash_utils.o hash_low.o
+obj-y := fault.o init.o imalloc.o hash_utils.o hash_low.o tlb.o
 obj-$(CONFIG_DISCONTIGMEM) += numa.o
 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
===== arch/ppc64/mm/hash_utils.c 1.45 vs edited =====
--- 1.45/arch/ppc64/mm/hash_utils.c	Tue Jan 20 13:07:09 2004
+++ edited/arch/ppc64/mm/hash_utils.c	Sat Jan 24 17:04:44 2004
@@ -325,8 +325,7 @@
 		ppc_md.flush_hash_range(context, number, local);
 	} else {
 		int i;
-		struct ppc64_tlb_batch *batch =
-			&ppc64_tlb_batch[smp_processor_id()];
+		struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);

 		for (i = 0; i < number; i++)
 			flush_hash_page(context, batch->addr[i], batch->pte[i],
===== arch/ppc64/mm/init.c 1.55 vs edited =====
--- 1.55/arch/ppc64/mm/init.c	Tue Jan 20 13:07:09 2004
+++ edited/arch/ppc64/mm/init.c	Sat Jan 24 17:04:44 2004
@@ -90,57 +90,6 @@
 /* max amount of RAM to use */
 unsigned long __max_memory;

-/* This is declared as we are using the more or less generic
- * include/asm-ppc64/tlb.h file -- tgall
- */
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
-unsigned long pte_freelist_forced_free;
-
-static void pte_free_smp_sync(void *arg)
-{
-	/* Do nothing, just ensure we sync with all CPUs */
-}
-
-/* This is only called when we are critically out of memory
- * (and fail to get a page in pte_free_tlb).
- */
-void pte_free_now(struct page *ptepage)
-{
-	pte_freelist_forced_free++;
-
-	smp_call_function(pte_free_smp_sync, NULL, 0, 1);
-
-	pte_free(ptepage);
-}
-
-static void pte_free_rcu_callback(void *arg)
-{
-	struct pte_freelist_batch *batch = arg;
-	unsigned int i;
-
-	for (i = 0; i < batch->index; i++)
-		pte_free(batch->pages[i]);
-	free_page((unsigned long)batch);
-}
-
-void pte_free_submit(struct pte_freelist_batch *batch)
-{
-	INIT_RCU_HEAD(&batch->rcu);
-	call_rcu(&batch->rcu, pte_free_rcu_callback, batch);
-}
-
-void pte_free_finish(void)
-{
-	/* This is safe as we are holding page_table_lock */
-	struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
-
-	if (*batchp == NULL)
-		return;
-	pte_free_submit(*batchp);
-	*batchp = NULL;
-}
-
 void show_mem(void)
 {
 	int total = 0, reserved = 0;
@@ -170,17 +119,27 @@
 	printk("%d pages swap cached\n",cached);
 }

-void *
-ioremap(unsigned long addr, unsigned long size)
-{
 #ifdef CONFIG_PPC_ISERIES
+
+void *ioremap(unsigned long addr, unsigned long size)
+{
 	return (void*)addr;
+}
+
+void iounmap(void *addr)
+{
+	return;
+}
+
 #else
+
+void *
+ioremap(unsigned long addr, unsigned long size)
+{
 	void *ret = __ioremap(addr, size, _PAGE_NO_CACHE);
 	if(mem_init_done)
 		return eeh_ioremap(addr, ret);	/* may remap the addr */
 	return ret;
-#endif
 }

 void *
@@ -326,7 +285,7 @@
  *
  * XXX	what about calls before mem_init_done (ie python_countermeasures())
  */
-void pSeries_iounmap(void *addr)
+void iounmap(void *addr)
 {
 	unsigned long address, start, end, size;
 	struct mm_struct *mm;
@@ -352,29 +311,18 @@
 	spin_lock(&mm->page_table_lock);

 	dir = pgd_offset_i(address);
-	flush_cache_all();
+	flush_cache_vunmap(address, end);
 	do {
 		unmap_im_area_pmd(dir, address, end - address);
 		address = (address + PGDIR_SIZE) & PGDIR_MASK;
 		dir++;
 	} while (address && (address < end));
-	__flush_tlb_range(mm, start, end);
+	flush_tlb_kernel_range(start, end);

 	spin_unlock(&mm->page_table_lock);
 	return;
 }

-void iounmap(void *addr)
-{
-#ifdef CONFIG_PPC_ISERIES
-	/* iSeries I/O Remap is a noop              */
-	return;
-#else
-	/* DRENG / PPPBBB todo */
-	return pSeries_iounmap(addr);
-#endif
-}
-
 int iounmap_explicit(void *addr, unsigned long size)
 {
 	struct vm_struct *area;
@@ -463,152 +411,7 @@
 	}
 }

-void
-flush_tlb_mm(struct mm_struct *mm)
-{
-	struct vm_area_struct *mp;
-
-	spin_lock(&mm->page_table_lock);
-
-	for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
-		__flush_tlb_range(mm, mp->vm_start, mp->vm_end);
-
-	/* XXX are there races with checking cpu_vm_mask? - Anton */
-	cpus_clear(mm->cpu_vm_mask);
-
-	spin_unlock(&mm->page_table_lock);
-}
-
-/*
- * Callers should hold the mm->page_table_lock
- */
-void
-flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
-{
-	unsigned long context = 0;
-	pgd_t *pgd;
-	pmd_t *pmd;
-	pte_t *ptep;
-	pte_t pte;
-	int local = 0;
-	cpumask_t tmp;
-
-	switch( REGION_ID(vmaddr) ) {
-	case VMALLOC_REGION_ID:
-		pgd = pgd_offset_k( vmaddr );
-		break;
-	case IO_REGION_ID:
-		pgd = pgd_offset_i( vmaddr );
-		break;
-	case USER_REGION_ID:
-		pgd = pgd_offset( vma->vm_mm, vmaddr );
-		context = vma->vm_mm->context;
-
-		/* XXX are there races with checking cpu_vm_mask? - Anton */
-		tmp = cpumask_of_cpu(smp_processor_id());
-		if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
-			local = 1;
-
-		break;
-	default:
-		panic("flush_tlb_page: invalid region 0x%016lx", vmaddr);
-
-	}
-
-	if (!pgd_none(*pgd)) {
-		pmd = pmd_offset(pgd, vmaddr);
-		if (pmd_present(*pmd)) {
-			ptep = pte_offset_kernel(pmd, vmaddr);
-			/* Check if HPTE might exist and flush it if so */
-			pte = __pte(pte_update(ptep, _PAGE_HPTEFLAGS, 0));
-			if ( pte_val(pte) & _PAGE_HASHPTE ) {
-				flush_hash_page(context, vmaddr, pte, local);
-			}
-		}
-		WARN_ON(pmd_hugepage(*pmd));
-	}
-}
-
-struct ppc64_tlb_batch ppc64_tlb_batch[NR_CPUS];
-
-void
-__flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
-{
-	pgd_t *pgd;
-	pmd_t *pmd;
-	pte_t *ptep;
-	pte_t pte;
-	unsigned long pgd_end, pmd_end;
-	unsigned long context = 0;
-	struct ppc64_tlb_batch *batch = &ppc64_tlb_batch[smp_processor_id()];
-	unsigned long i = 0;
-	int local = 0;
-	cpumask_t tmp;
-
-	switch(REGION_ID(start)) {
-	case VMALLOC_REGION_ID:
-		pgd = pgd_offset_k(start);
-		break;
-	case IO_REGION_ID:
-		pgd = pgd_offset_i(start);
-		break;
-	case USER_REGION_ID:
-		pgd = pgd_offset(mm, start);
-		context = mm->context;
-
-		/* XXX are there races with checking cpu_vm_mask? - Anton */
-		tmp = cpumask_of_cpu(smp_processor_id());
-		if (cpus_equal(mm->cpu_vm_mask, tmp))
-			local = 1;
-
-		break;
-	default:
-		panic("flush_tlb_range: invalid region for start (%016lx) and end (%016lx)\n", start, end);
-	}
-
-	do {
-		pgd_end = (start + PGDIR_SIZE) & PGDIR_MASK;
-		if (pgd_end > end)
-			pgd_end = end;
-		if (!pgd_none(*pgd)) {
-			pmd = pmd_offset(pgd, start);
-			do {
-				pmd_end = (start + PMD_SIZE) & PMD_MASK;
-				if (pmd_end > end)
-					pmd_end = end;
-				if (pmd_present(*pmd)) {
-					ptep = pte_offset_kernel(pmd, start);
-					do {
-						if (pte_val(*ptep) & _PAGE_HASHPTE) {
-							pte = __pte(pte_update(ptep, _PAGE_HPTEFLAGS, 0));
-							if (pte_val(pte) & _PAGE_HASHPTE) {
-								batch->pte[i] = pte;
-								batch->addr[i] = start;
-								i++;
-								if (i == PPC64_TLB_BATCH_NR) {
-									flush_hash_range(context, i, local);
-									i = 0;
-								}
-							}
-						}
-						start += PAGE_SIZE;
-						++ptep;
-					} while (start < pmd_end);
-				} else {
-					WARN_ON(pmd_hugepage(*pmd));
-					start = pmd_end;
-				}
-				++pmd;
-			} while (start < pgd_end);
-		} else {
-			start = pgd_end;
-		}
-		++pgd;
-	} while (start < end);
-
-	if (i)
-		flush_hash_range(context, i, local);
-}
+#endif

 void free_initmem(void)
 {
===== include/asm-ppc64/pgtable.h 1.33 vs edited =====
--- 1.33/include/asm-ppc64/pgtable.h	Thu Jan 22 17:11:59 2004
+++ edited/include/asm-ppc64/pgtable.h	Sat Jan 24 17:04:45 2004
@@ -12,6 +12,7 @@
 #include <asm/processor.h>		/* For TASK_SIZE */
 #include <asm/mmu.h>
 #include <asm/page.h>
+#include <asm/tlbflush.h>
 #endif /* __ASSEMBLY__ */

 /* PMD_SHIFT determines what a second-level page table entry can map */
@@ -289,71 +290,115 @@

 /* Atomic PTE updates */

-static inline unsigned long pte_update( pte_t *p, unsigned long clr,
-					unsigned long set )
+static inline unsigned long pte_update(pte_t *p, unsigned long clr)
 {
 	unsigned long old, tmp;
-
+
 	__asm__ __volatile__(
 	"1:	ldarx	%0,0,%3		# pte_update\n\
-	andi.	%1,%0,%7\n\
+	andi.	%1,%0,%6\n\
 	bne-	1b \n\
 	andc	%1,%0,%4 \n\
-	or	%1,%1,%5 \n\
 	stdcx.	%1,0,%3 \n\
 	bne-	1b"
 	: "=&r" (old), "=&r" (tmp), "=m" (*p)
-	: "r" (p), "r" (clr), "r" (set), "m" (*p), "i" (_PAGE_BUSY)
+	: "r" (p), "r" (clr), "m" (*p), "i" (_PAGE_BUSY)
 	: "cc" );
 	return old;
 }

+/* PTE updating functions */
+extern void hpte_update(pte_t *ptep, unsigned long pte, int wrprot);
+
 static inline int ptep_test_and_clear_young(pte_t *ptep)
 {
-	return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0;
+	unsigned long old;
+
+	old = pte_update(ptep, _PAGE_ACCESSED | _PAGE_HPTEFLAGS);
+	if (old & _PAGE_HASHPTE) {
+		hpte_update(ptep, old, 0);
+		flush_tlb_pending();	/* XXX generic code doesn't flush */
+	}
+	return (old & _PAGE_ACCESSED) != 0;
 }

+/*
+ * On RW/DIRTY bit transitions we can avoid flushing the hpte. For the
+ * moment we do it but we need to test if the optimisation is worth it.
+ */
+#if 1
 static inline int ptep_test_and_clear_dirty(pte_t *ptep)
 {
-	return (pte_update(ptep, _PAGE_DIRTY, 0) & _PAGE_DIRTY) != 0;
+	unsigned long old;
+
+	old = pte_update(ptep, _PAGE_DIRTY | _PAGE_HPTEFLAGS);
+	if (old & _PAGE_HASHPTE)
+		hpte_update(ptep, old, 0);
+	return (old & _PAGE_DIRTY) != 0;
 }

-static inline pte_t ptep_get_and_clear(pte_t *ptep)
+static inline void ptep_set_wrprotect(pte_t *ptep)
+{
+	unsigned long old;
+
+	old = pte_update(ptep, _PAGE_RW | _PAGE_HPTEFLAGS);
+	if (old & _PAGE_HASHPTE)
+		hpte_update(ptep, old, 0);
+}
+#else
+static inline int ptep_test_and_clear_dirty(pte_t *ptep)
 {
-	return __pte(pte_update(ptep, ~_PAGE_HPTEFLAGS, 0));
+	unsigned long old;
+
+	old = pte_update(ptep, _PAGE_DIRTY);
+	if ((~old & (_PAGE_HASHPTE | _PAGE_RW | _PAGE_DIRTY)) == 0)
+		hpte_update(ptep, old, 1);
+	return (old & _PAGE_DIRTY) != 0;
 }

 static inline void ptep_set_wrprotect(pte_t *ptep)
 {
-	pte_update(ptep, _PAGE_RW, 0);
+	unsigned long old;
+
+	old = pte_update(ptep, _PAGE_RW);
+	if ((~old & (_PAGE_HASHPTE | _PAGE_RW | _PAGE_DIRTY)) == 0)
+		hpte_update(ptep, old, 1);
 }
+#endif

-static inline void ptep_mkdirty(pte_t *ptep)
+static inline pte_t ptep_get_and_clear(pte_t *ptep)
 {
-	pte_update(ptep, 0, _PAGE_DIRTY);
+	unsigned long old = pte_update(ptep, ~0UL);
+
+	if (old & _PAGE_HASHPTE)
+		hpte_update(ptep, old, 0);
+	return __pte(old);
 }

-/*
- * Macro to mark a page protection value as "uncacheable".
- */
-#define pgprot_noncached(prot)	(__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
+static inline void pte_clear(pte_t * ptep)
+{
+	unsigned long old = pte_update(ptep, ~0UL);

-#define pte_same(A,B)	(((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
+	if (old & _PAGE_HASHPTE)
+		hpte_update(ptep, old, 0);
+}

 /*
  * set_pte stores a linux PTE into the linux page table.
- * On machines which use an MMU hash table we avoid changing the
- * _PAGE_HASHPTE bit.
  */
 static inline void set_pte(pte_t *ptep, pte_t pte)
 {
-	pte_update(ptep, ~_PAGE_HPTEFLAGS, pte_val(pte) & ~_PAGE_HPTEFLAGS);
+	if (pte_present(*ptep))
+		pte_clear(ptep);
+	*ptep = __pte(pte_val(pte)) & ~_PAGE_HPTEFLAGS;
 }

-static inline void pte_clear(pte_t * ptep)
-{
-	pte_update(ptep, ~_PAGE_HPTEFLAGS, 0);
-}
+/*
+ * Macro to mark a page protection value as "uncacheable".
+ */
+#define pgprot_noncached(prot)	(__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
+
+#define pte_same(A,B)	(((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)

 extern unsigned long ioremap_bot, ioremap_base;

===== include/asm-ppc64/tlb.h 1.11 vs edited =====
--- 1.11/include/asm-ppc64/tlb.h	Tue Jan 20 13:08:24 2004
+++ edited/include/asm-ppc64/tlb.h	Sat Jan 24 17:04:45 2004
@@ -12,11 +12,9 @@
 #ifndef _PPC64_TLB_H
 #define _PPC64_TLB_H

-#include <asm/pgtable.h>
 #include <asm/tlbflush.h>
-#include <asm/page.h>
-#include <asm/mmu.h>

+struct mmu_gather;
 static inline void tlb_flush(struct mmu_gather *tlb);

 /* Avoid pulling in another include just for this */
@@ -29,66 +27,13 @@
 #define tlb_start_vma(tlb, vma)	do { } while (0)
 #define tlb_end_vma(tlb, vma)	do { } while (0)

-/* Should make this at least as large as the generic batch size, but it
- * takes up too much space */
-#define PPC64_TLB_BATCH_NR 192
-
-struct ppc64_tlb_batch {
-	unsigned long index;
-	pte_t pte[PPC64_TLB_BATCH_NR];
-	unsigned long addr[PPC64_TLB_BATCH_NR];
-	unsigned long vaddr[PPC64_TLB_BATCH_NR];
-};
-
-extern struct ppc64_tlb_batch ppc64_tlb_batch[NR_CPUS];
-
-static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
-					unsigned long address)
-{
-	int cpu = smp_processor_id();
-	struct ppc64_tlb_batch *batch = &ppc64_tlb_batch[cpu];
-	unsigned long i = batch->index;
-	pte_t pte;
-	cpumask_t local_cpumask = cpumask_of_cpu(cpu);
-
-	if (pte_val(*ptep) & _PAGE_HASHPTE) {
-		pte = __pte(pte_update(ptep, _PAGE_HPTEFLAGS, 0));
-		if (pte_val(pte) & _PAGE_HASHPTE) {
-
-			batch->pte[i] = pte;
-			batch->addr[i] = address;
-			i++;
-
-			if (i == PPC64_TLB_BATCH_NR) {
-				int local = 0;
-
-				if (cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask))
-					local = 1;
-
-				flush_hash_range(tlb->mm->context, i, local);
-				i = 0;
-			}
-		}
-	}
-
-	batch->index = i;
-}
+#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)

 extern void pte_free_finish(void);

 static inline void tlb_flush(struct mmu_gather *tlb)
 {
-	int cpu = smp_processor_id();
-	struct ppc64_tlb_batch *batch = &ppc64_tlb_batch[cpu];
-	int local = 0;
-	cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
-
-	if (cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask))
-		local = 1;
-
-	flush_hash_range(tlb->mm->context, batch->index, local);
-	batch->index = 0;
-
+	flush_tlb_pending();
 	pte_free_finish();
 }

===== include/asm-ppc64/tlbflush.h 1.4 vs edited =====
--- 1.4/include/asm-ppc64/tlbflush.h	Fri Jun  7 18:21:41 2002
+++ edited/include/asm-ppc64/tlbflush.h	Sat Jan 24 17:04:45 2004
@@ -1,10 +1,6 @@
 #ifndef _PPC64_TLBFLUSH_H
 #define _PPC64_TLBFLUSH_H

-#include <linux/threads.h>
-#include <linux/mm.h>
-#include <asm/page.h>
-
 /*
  * TLB flushing:
  *
@@ -15,21 +11,37 @@
  *  - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
  */

-extern void flush_tlb_mm(struct mm_struct *mm);
-extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
-extern void __flush_tlb_range(struct mm_struct *mm,
-			    unsigned long start, unsigned long end);
-#define flush_tlb_range(vma, start, end) \
-	__flush_tlb_range(vma->vm_mm, start, end)
+#include <linux/percpu.h>
+#include <asm/page.h>
+
+#define PPC64_TLB_BATCH_NR 192

-#define flush_tlb_kernel_range(start, end) \
-	__flush_tlb_range(&init_mm, (start), (end))
+struct mm_struct;
+struct ppc64_tlb_batch {
+	unsigned long index;
+	unsigned long context;
+	struct mm_struct *mm;
+	pte_t pte[PPC64_TLB_BATCH_NR];
+	unsigned long addr[PPC64_TLB_BATCH_NR];
+	unsigned long vaddr[PPC64_TLB_BATCH_NR];
+};
+DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);

-static inline void flush_tlb_pgtables(struct mm_struct *mm,
-				      unsigned long start, unsigned long end)
+extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
+
+static inline void flush_tlb_pending(void)
 {
-	/* PPC has hw page tables. */
+	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
+
+	if (batch->index)
+		__flush_tlb_pending(batch);
 }
+
+#define flush_tlb_mm(mm)			flush_tlb_pending()
+#define flush_tlb_page(vma, addr)		flush_tlb_pending()
+#define flush_tlb_range(vma, start, end)	flush_tlb_pending()
+#define flush_tlb_kernel_range(start, end)	flush_tlb_pending()
+#define flush_tlb_pgtables(mm, start, end)	do { } while (0)

 extern void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte,
 			    int local);
===== include/linux/init_task.h 1.27 vs edited =====
--- 1.27/include/linux/init_task.h	Tue Aug 19 12:46:23 2003
+++ edited/include/linux/init_task.h	Sat Jan 24 17:04:46 2004
@@ -40,6 +40,7 @@
 	.mmap_sem	= __RWSEM_INITIALIZER(name.mmap_sem),	\
 	.page_table_lock =  SPIN_LOCK_UNLOCKED, 		\
 	.mmlist		= LIST_HEAD_INIT(name.mmlist),		\
+	.cpu_vm_mask	= CPU_MASK_ALL,				\
 	.default_kioctx = INIT_KIOCTX(name.default_kioctx, name),	\
 }

===== mm/vmalloc.c 1.29 vs edited =====
--- 1.29/mm/vmalloc.c	Wed Oct  8 12:53:44 2003
+++ edited/mm/vmalloc.c	Sat Jan 24 17:04:46 2004
@@ -114,15 +114,16 @@
 			       unsigned long size, pgprot_t prot,
 			       struct page ***pages)
 {
-	unsigned long end;
+	unsigned long base, end;

+	base = address & PGDIR_MASK;
 	address &= ~PGDIR_MASK;
 	end = address + size;
 	if (end > PGDIR_SIZE)
 		end = PGDIR_SIZE;

 	do {
-		pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
+		pte_t * pte = pte_alloc_kernel(&init_mm, pmd, base + address);
 		if (!pte)
 			return -ENOMEM;
 		if (map_area_pte(pte, address, end - address, prot, pages))

** Sent via the linuxppc64-dev mail list. See http://lists.linuxppc.org/





More information about the Linuxppc64-dev mailing list