Linuxppc-dev Digest, Vol 49, Issue 89

limitjiang 03212009 at bjtu.edu.cn
Thu Sep 18 12:57:12 EST 2008




2008-09-18 



limitjiang 



发件人: linuxppc-dev-request at ozlabs.org 
发送时间: 2008-09-18  09:55:11 
收件人: linuxppc-dev at ozlabs.org 
抄送: 
主题: Linuxppc-dev Digest, Vol 49, Issue 89 
 
Send Linuxppc-dev mailing list submissions to
linuxppc-dev at ozlabs.org

To subscribe or unsubscribe via the World Wide Web, visit
https://ozlabs.org/mailman/listinfo/linuxppc-dev
or, via email, send a message with subject or body 'help' to
linuxppc-dev-request at ozlabs.org

You can reach the person managing the list at
linuxppc-dev-owner at ozlabs.org

When replying, please edit your Subject line so it is more specific
than "Re: Contents of Linuxppc-dev digest..."


Today's Topics:

   1. [PATCH v7 2/4] powerpc: Fixes for CONFIG_PTE_64BIT for SMP
      support (Kumar Gala)
   2. [PATCH v7 3/4] powerpc/fsl-booke: Fixup 64-bit PTE reading
      for SMP support (Kumar Gala)
   3. [PATCH v7 4/4] powerpc/mm: Implement _PAGE_SPECIAL &
      pte_special() for 32-bit (Kumar Gala)


----------------------------------------------------------------------

Message: 1
Date: Wed, 17 Sep 2008 18:00:03 -0500
From: Kumar Gala  <galak at kernel.crashing.org >
Subject: [PATCH v7 2/4] powerpc: Fixes for CONFIG_PTE_64BIT for SMP
support
To: linuxppc-dev at ozlabs.org
Message-ID:
<1221692405-19880-2-git-send-email-galak at kernel.crashing.org >

There are some minor issues with support 64-bit PTEs on a 32-bit processor
when dealing with SMP.

* We need to order the stores in set_pte_at to make sure the flag word
  is set second.
* Change pte_clear to use pte_update so only the flag word is cleared
* Added a check to set_pte_at to clear the pte if it happened to be set.

Signed-off-by: Kumar Gala  <galak at kernel.crashing.org >
---

Changed the set_pte_at checking to deal with the case that it's called
with a pte that has pte_present and clearing it w/proper ordering.

- k

 arch/powerpc/include/asm/highmem.h       |    2 +-
 arch/powerpc/include/asm/pgtable-ppc32.h |   28 +++++++++++++++++++++++-----
 2 files changed, 24 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
index 5d99b64..91c5895 100644
--- a/arch/powerpc/include/asm/highmem.h
+++ b/arch/powerpc/include/asm/highmem.h
@@ -84,7 +84,7 @@ static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgpro
 #ifdef CONFIG_DEBUG_HIGHMEM
  BUG_ON(!pte_none(*(kmap_pte-idx)));
 #endif
- set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
+ __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
  flush_tlb_page(NULL, vaddr);

  return (void*) vaddr;
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
index 6fe39e3..d1d23b9 100644
--- a/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -517,7 +517,8 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);

 #define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
 #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
-#define pte_clear(mm,addr,ptep) do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0)
+#define pte_clear(mm, addr, ptep) \
+ do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)

 #define pmd_none(pmd) (!pmd_val(pmd))
 #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
@@ -612,9 +613,6 @@ static inline unsigned long pte_update(pte_t *p,
  return old;
 }
 #else /* CONFIG_PTE_64BIT */
-/* TODO: Change that to only modify the low word and move set_pte_at()
- * out of line
- */
 static inline unsigned long long pte_update(pte_t *p,
      unsigned long clr,
      unsigned long set)
@@ -652,16 +650,36 @@ static inline unsigned long long pte_update(pte_t *p,
  * On machines which use an MMU hash table we avoid changing the
  * _PAGE_HASHPTE bit.
  */
-static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+
+static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
        pte_t *ptep, pte_t pte)
 {
 #if _PAGE_HASHPTE != 0
  pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE);
+#elif defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP)
+ __asm__ __volatile__("\
+ stw%U0%X0 %2,%0\n\
+ eieio\n\
+ stw%U0%X0 %L2,%1"
+ : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
+ : "r" (pte) : "memory");
 #else
  *ptep = pte;
 #endif
 }

+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+       pte_t *ptep, pte_t pte)
+{
+#if defined(CONFIG_PTE_64BIT)
+ if (unlikely(pte_present(*ptep))) {
+ pte_clear(mm, addr, ptep);
+ smp_wmb();
+ }
+#endif
+ __set_pte_at(mm, addr, ptep, pte);
+}
+
 /*
  * 2.6 calls this without flushing the TLB entry; this is wrong
  * for our hash-based implementation, we fix that up here.
-- 
1.5.5.1



------------------------------

Message: 2
Date: Wed, 17 Sep 2008 18:00:04 -0500
From: Kumar Gala  <galak at kernel.crashing.org >
Subject: [PATCH v7 3/4] powerpc/fsl-booke: Fixup 64-bit PTE reading
for SMP support
To: linuxppc-dev at ozlabs.org
Message-ID:
<1221692405-19880-3-git-send-email-galak at kernel.crashing.org >

We need to create a false data dependency to ensure the loads of
the pte are done in the right order.

Signed-off-by: Kumar Gala  <galak at kernel.crashing.org >
---
 arch/powerpc/kernel/head_fsl_booke.S |   26 +++++++++++++++++++++-----
 1 files changed, 21 insertions(+), 5 deletions(-)

diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 3cb52fa..377e0c1 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -579,13 +579,19 @@ interrupt_base:

  FIND_PTE
  andc. r13,r13,r11 /* Check permission */
- bne 2f /* Bail if permission mismach */

 #ifdef CONFIG_PTE_64BIT
- lwz r13, 0(r12)
+#ifdef CONFIG_SMP
+ subf r10,r11,r12 /* create false data dep */
+ lwzx r13,r11,r10 /* Get upper pte bits */
+#else
+ lwz r13,0(r12) /* Get upper pte bits */
+#endif
 #endif

-  /* Jump to common tlb load */
+ bne 2f /* Bail if permission/valid mismach */
+
+ /* Jump to common tlb load */
  b finish_tlb_load
 2:
  /* The bailout.  Restore registers to pre-exception conditions
@@ -640,10 +646,20 @@ interrupt_base:

  FIND_PTE
  andc. r13,r13,r11 /* Check permission */
+
+#ifdef CONFIG_PTE_64BIT
+#ifdef CONFIG_SMP
+ subf r10,r11,r12 /* create false data dep */
+ lwzx r13,r11,r10 /* Get upper pte bits */
+#else
+ lwz r13,0(r12) /* Get upper pte bits */
+#endif
+#endif
+
  bne 2f /* Bail if permission mismach */

 #ifdef CONFIG_PTE_64BIT
- lwz r13, 0(r12)
+ lwz r13,0(r12)
 #endif

  /* Jump to common TLB load point */
@@ -702,7 +718,7 @@ interrupt_base:
 /*
  * Both the instruction and data TLB miss get to this
  * point to load the TLB.
- * r10 - EA of fault
+ * r10 - available to use
  * r11 - TLB (info from Linux PTE)
  * r12 - available to use
  * r13 - upper bits of PTE (if PTE_64BIT) or available to use
-- 
1.5.5.1



------------------------------

Message: 3
Date: Wed, 17 Sep 2008 18:00:05 -0500
From: Kumar Gala  <galak at kernel.crashing.org >
Subject: [PATCH v7 4/4] powerpc/mm: Implement _PAGE_SPECIAL &
pte_special() for 32-bit
To: linuxppc-dev at ozlabs.org
Message-ID:
<1221692405-19880-4-git-send-email-galak at kernel.crashing.org >

Implement _PAGE_SPECIAL and pte_special() for 32-bit powerpc. This bit will
be used by the fast get_user_pages() to differenciate PTEs that correspond
to a valid struct page from special mappings that don't such as IO mappings
obtained via io_remap_pfn_ranges().

We currently only implement this on sub-arch that support SMP or will so
in the future (6xx, 44x, FSL-BookE) and not (8xx, 40x).

Signed-off-by: Kumar Gala  <galak at kernel.crashing.org >
Acked-by: Benjamin Herrenschmidt  <benh at kernel.crashing.org >
---
 arch/powerpc/include/asm/pgtable-ppc32.h |   15 +++++++++++++--
 1 files changed, 13 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
index d1d23b9..e8f31a5 100644
--- a/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -261,6 +261,7 @@ extern int icache_44x_need_flush;
 #define _PAGE_HWEXEC 0x00000004 /* H: Execute permission */
 #define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */
 #define _PAGE_DIRTY 0x00000010 /* S: Page dirty */
+#define _PAGE_SPECIAL 0x00000020 /* S: Special page */
 #define _PAGE_USER 0x00000040 /* S: User page */
 #define _PAGE_ENDIAN 0x00000080 /* H: E bit */
 #define _PAGE_GUARDED 0x00000100 /* H: G bit */
@@ -276,6 +277,7 @@ extern int icache_44x_need_flush;
 /* ERPN in a PTE never gets cleared, ignore it */
 #define _PTE_NONE_MASK 0xffffffff00000000ULL

+#define __HAVE_ARCH_PTE_SPECIAL

 #elif defined(CONFIG_FSL_BOOKE)
 /*
@@ -305,6 +307,7 @@ extern int icache_44x_need_flush;
 #define _PAGE_COHERENT 0x00100 /* H: M bit */
 #define _PAGE_NO_CACHE 0x00200 /* H: I bit */
 #define _PAGE_WRITETHRU 0x00400 /* H: W bit */
+#define _PAGE_SPECIAL 0x00800 /* S: Special page */

 #ifdef CONFIG_PTE_64BIT
 /* ERPN in a PTE never gets cleared, ignore it */
@@ -315,6 +318,8 @@ extern int icache_44x_need_flush;
 #define _PMD_PRESENT_MASK (PAGE_MASK)
 #define _PMD_BAD (~PAGE_MASK)

+#define __HAVE_ARCH_PTE_SPECIAL
+
 #elif defined(CONFIG_8xx)
 /* Definitions for 8xx embedded chips. */
 #define _PAGE_PRESENT 0x0001 /* Page is valid */
@@ -362,6 +367,7 @@ extern int icache_44x_need_flush;
 #define _PAGE_ACCESSED 0x100 /* R: page referenced */
 #define _PAGE_EXEC 0x200 /* software: i-cache coherency required */
 #define _PAGE_RW 0x400 /* software: user write access allowed */
+#define _PAGE_SPECIAL 0x800 /* software: Special page */

 #define _PTE_NONE_MASK _PAGE_HASHPTE

@@ -372,6 +378,8 @@ extern int icache_44x_need_flush;
 /* Hash table based platforms need atomic updates of the linux PTE */
 #define PTE_ATOMIC_UPDATES 1

+#define __HAVE_ARCH_PTE_SPECIAL
+
 #endif

 /*
@@ -404,6 +412,9 @@ extern int icache_44x_need_flush;
 #ifndef _PAGE_WRITETHRU
 #define _PAGE_WRITETHRU 0
 #endif
+#ifndef _PAGE_SPECIAL
+#define _PAGE_SPECIAL 0
+#endif
 #ifndef _PMD_PRESENT_MASK
 #define _PMD_PRESENT_MASK _PMD_PRESENT
 #endif
@@ -534,7 +545,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
 static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
-static inline int pte_special(pte_t pte) { return 0; }
+static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }

 static inline void pte_uncache(pte_t pte)       { pte_val(pte) |= _PAGE_NO_CACHE; }
 static inline void pte_cache(pte_t pte)         { pte_val(pte) &= ~_PAGE_NO_CACHE; }
@@ -553,7 +564,7 @@ static inline pte_t pte_mkdirty(pte_t pte) {
 static inline pte_t pte_mkyoung(pte_t pte) {
  pte_val(pte) |= _PAGE_ACCESSED; return pte; }
 static inline pte_t pte_mkspecial(pte_t pte) {
- return pte; }
+ pte_val(pte) |= _PAGE_SPECIAL; return pte; }
 static inline unsigned long pte_pgprot(pte_t pte)
 {
  return __pgprot(pte_val(pte)) & PAGE_PROT_BITS;
-- 
1.5.5.1



------------------------------

_______________________________________________
Linuxppc-dev mailing list
Linuxppc-dev at ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev

End of Linuxppc-dev Digest, Vol 49, Issue 89
********************************************
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.ozlabs.org/pipermail/linuxppc-dev/attachments/20080918/ff09f61c/attachment.htm>


More information about the Linuxppc-dev mailing list