[PATCH v3 3/3] powerpc: mm: support page table check
Christophe Leroy
christophe.leroy at csgroup.eu
Thu Nov 3 19:43:07 AEDT 2022
Le 24/10/2022 à 02:35, Rohan McLure a écrit :
> On creation and clearing of a page table mapping, instrument such calls
> by invoking page_table_check_pte_set and page_table_check_pte_clear
> respectively. These calls serve as a sanity check against illegal
> mappings.
>
> Enable ARCH_SUPPORTS_PAGE_TABLE_CHECK for all ppc64, and 32-bit
> platforms implementing Book3S.
>
> Change pud_pfn to be a runtime bug rather than a build bug as it is
> consumed by page_table_check_pud_{clear,set} which are not called.
>
> See also:
>
> riscv support in commit 3fee229a8eb9 ("riscv/mm: enable
> ARCH_SUPPORTS_PAGE_TABLE_CHECK")
> arm64 in commit 42b2547137f5 ("arm64/mm: enable
> ARCH_SUPPORTS_PAGE_TABLE_CHECK")
> x86_64 in commit d283d422c6c4 ("x86: mm: add x86_64 support for page table
> check")
>
> Signed-off-by: Rohan McLure <rmclure at linux.ibm.com>
Reviewed-by: Christophe Leroy <christophe.leroy at csgroup.eu>
> ---
> V2: Update spacing and types assigned to pte_update calls.
> V3: Update one last pte_update call to remove __pte invocation.
> ---
> arch/powerpc/Kconfig | 1 +
> arch/powerpc/include/asm/book3s/32/pgtable.h | 9 ++++++++-
> arch/powerpc/include/asm/book3s/64/pgtable.h | 18 +++++++++++++++---
> arch/powerpc/include/asm/nohash/32/pgtable.h | 7 ++++++-
> arch/powerpc/include/asm/nohash/64/pgtable.h | 8 ++++++--
> arch/powerpc/include/asm/nohash/pgtable.h | 1 +
> 6 files changed, 37 insertions(+), 7 deletions(-)
>
> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> index 4c466acdc70d..6c213ac46a92 100644
> --- a/arch/powerpc/Kconfig
> +++ b/arch/powerpc/Kconfig
> @@ -149,6 +149,7 @@ config PPC
> select ARCH_STACKWALK
> select ARCH_SUPPORTS_ATOMIC_RMW
> select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC_BOOK3S || PPC_8xx || 40x
> + select ARCH_SUPPORTS_PAGE_TABLE_CHECK
> select ARCH_USE_BUILTIN_BSWAP
> select ARCH_USE_CMPXCHG_LOCKREF if PPC64
> select ARCH_USE_MEMTEST
> diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
> index 8bf1c538839a..6a592426b935 100644
> --- a/arch/powerpc/include/asm/book3s/32/pgtable.h
> +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
> @@ -53,6 +53,8 @@
>
> #ifndef __ASSEMBLY__
>
> +#include <linux/page_table_check.h>
> +
> static inline bool pte_user(pte_t pte)
> {
> return pte_val(pte) & _PAGE_USER;
> @@ -353,7 +355,11 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
> static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
> pte_t *ptep)
> {
> - return __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0));
> + pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0));
> +
> + page_table_check_pte_clear(mm, addr, old_pte);
> +
> + return old_pte;
> }
>
> #define __HAVE_ARCH_PTEP_SET_WRPROTECT
> @@ -545,6 +551,7 @@ static inline bool pmd_user(pmd_t pmd)
> static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
> pte_t *ptep, pte_t pte, int percpu)
> {
> + page_table_check_pte_set(mm, addr, ptep, pte);
> #if defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
> /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
> * helper pte_update() which does an atomic update. We need to do that
> diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
> index 3083111f9d0a..b5c5718d9b90 100644
> --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
> +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
> @@ -181,6 +181,8 @@
> #define PAGE_AGP (PAGE_KERNEL_NC)
>
> #ifndef __ASSEMBLY__
> +#include <linux/page_table_check.h>
> +
> /*
> * page table defines
> */
> @@ -484,8 +486,11 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
> static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
> unsigned long addr, pte_t *ptep)
> {
> - unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
> - return __pte(old);
> + pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~0UL, 0, 0));
> +
> + page_table_check_pte_clear(mm, addr, old_pte);
> +
> + return old_pte;
> }
>
> #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
> @@ -494,11 +499,16 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
> pte_t *ptep, int full)
> {
> if (full && radix_enabled()) {
> + pte_t old_pte;
> +
> /*
> * We know that this is a full mm pte clear and
> * hence can be sure there is no parallel set_pte.
> */
> - return radix__ptep_get_and_clear_full(mm, addr, ptep, full);
> + old_pte = radix__ptep_get_and_clear_full(mm, addr, ptep, full);
> + page_table_check_pte_clear(mm, addr, old_pte);
> +
> + return old_pte;
> }
> return ptep_get_and_clear(mm, addr, ptep);
> }
> @@ -884,6 +894,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
> */
> pte = __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PTE));
>
> + page_table_check_pte_set(mm, addr, ptep, pte);
> +
> if (radix_enabled())
> return radix__set_pte_at(mm, addr, ptep, pte, percpu);
> return hash__set_pte_at(mm, addr, ptep, pte, percpu);
> diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
> index b92044d9d778..61e96f82044a 100644
> --- a/arch/powerpc/include/asm/nohash/32/pgtable.h
> +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
> @@ -166,6 +166,7 @@ void unmap_kernel_page(unsigned long va);
> #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
>
> #ifndef __ASSEMBLY__
> +#include <linux/page_table_check.h>
>
> #define pte_clear(mm, addr, ptep) \
> do { pte_update(mm, addr, ptep, ~0, 0, 0); } while (0)
> @@ -305,7 +306,11 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
> static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
> pte_t *ptep)
> {
> - return __pte(pte_update(mm, addr, ptep, ~0, 0, 0));
> + pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~0, 0, 0));
> +
> + page_table_check_pte_clear(mm, addr, old_pte);
> +
> + return old_pte;
> }
>
> #define __HAVE_ARCH_PTEP_SET_WRPROTECT
> diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
> index 23c5135178d1..fedcdf2a959d 100644
> --- a/arch/powerpc/include/asm/nohash/64/pgtable.h
> +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
> @@ -83,6 +83,7 @@
> #define H_PAGE_4K_PFN 0
>
> #ifndef __ASSEMBLY__
> +#include <linux/page_table_check.h>
> /* pte_clear moved to later in this file */
>
> static inline pte_t pte_mkwrite(pte_t pte)
> @@ -253,8 +254,11 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
> static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
> unsigned long addr, pte_t *ptep)
> {
> - unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
> - return __pte(old);
> + pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~0UL, 0, 0));
> +
> + page_table_check_pte_clear(mm, addr, old_pte);
> +
> + return old_pte;
> }
>
> static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
> diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
> index b499da6c1a99..62b221b7cccf 100644
> --- a/arch/powerpc/include/asm/nohash/pgtable.h
> +++ b/arch/powerpc/include/asm/nohash/pgtable.h
> @@ -185,6 +185,7 @@ extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
> static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
> pte_t *ptep, pte_t pte, int percpu)
> {
> + page_table_check_pte_set(mm, addr, ptep, pte);
> /* Second case is 32-bit with 64-bit PTE. In this case, we
> * can just store as long as we do the two halves in the right order
> * with a barrier in between.
More information about the Linuxppc-dev
mailing list