[PATCH v5 5/5] powerpc: mm: support page table check
Rohan McLure
rmclure at linux.ibm.com
Fri Nov 18 11:21:46 AEDT 2022
On creation and clearing of a page table mapping, instrument such calls
by invoking page_table_check_pte_set and page_table_check_pte_clear
respectively. These calls serve as a sanity check against illegal
mappings.
Enable ARCH_SUPPORTS_PAGE_TABLE_CHECK for all ppc64, and 32-bit
platforms implementing Book3S.
Change pud_pfn to be a runtime bug rather than a build bug as it is
consumed by page_table_check_pud_{clear,set} which are not called.
See also:
riscv support in commit 3fee229a8eb9 ("riscv/mm: enable
ARCH_SUPPORTS_PAGE_TABLE_CHECK")
arm64 in commit 42b2547137f5 ("arm64/mm: enable
ARCH_SUPPORTS_PAGE_TABLE_CHECK")
x86_64 in commit d283d422c6c4 ("x86: mm: add x86_64 support for page table
check")
Reviewed-by: Russell Currey <ruscur at russell.cc>
Reviewed-by: Christophe Leroy <christophe.leroy at csgroup.eu>
Signed-off-by: Rohan McLure <rmclure at linux.ibm.com>
---
V2: Update spacing and types assigned to pte_update calls.
V3: Update one last pte_update call to remove __pte invocation.
---
arch/powerpc/Kconfig | 1 +
arch/powerpc/include/asm/book3s/32/pgtable.h | 9 ++++++++-
arch/powerpc/include/asm/book3s/64/pgtable.h | 18 +++++++++++++++---
arch/powerpc/include/asm/nohash/32/pgtable.h | 7 ++++++-
arch/powerpc/include/asm/nohash/64/pgtable.h | 8 ++++++--
arch/powerpc/include/asm/nohash/pgtable.h | 1 +
6 files changed, 37 insertions(+), 7 deletions(-)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 699df27b0e2f..1d943a13a204 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -150,6 +150,7 @@ config PPC
select ARCH_STACKWALK
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC_BOOK3S || PPC_8xx || 40x
+ select ARCH_SUPPORTS_PAGE_TABLE_CHECK
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF if PPC64
select ARCH_USE_MEMTEST
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index b2fdd3cc81de..44703c8c590c 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -53,6 +53,8 @@
#ifndef __ASSEMBLY__
+#include <linux/page_table_check.h>
+
static inline bool pte_user(pte_t pte)
{
return pte_val(pte) & _PAGE_USER;
@@ -337,7 +339,11 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- return __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0));
+ pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0));
+
+ page_table_check_pte_clear(mm, addr, old_pte);
+
+ return old_pte;
}
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
@@ -529,6 +535,7 @@ static inline bool pmd_user(pmd_t pmd)
static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, int percpu)
{
+ page_table_check_pte_set(mm, addr, ptep, pte);
#if defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
/* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
* helper pte_update() which does an atomic update. We need to do that
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index e04e3cd378a6..436632d04304 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -162,6 +162,8 @@
#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
#ifndef __ASSEMBLY__
+#include <linux/page_table_check.h>
+
/*
* page table defines
*/
@@ -465,8 +467,11 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
- return __pte(old);
+ pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~0UL, 0, 0));
+
+ page_table_check_pte_clear(mm, addr, old_pte);
+
+ return old_pte;
}
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
@@ -475,11 +480,16 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
pte_t *ptep, int full)
{
if (full && radix_enabled()) {
+ pte_t old_pte;
+
/*
* We know that this is a full mm pte clear and
* hence can be sure there is no parallel set_pte.
*/
- return radix__ptep_get_and_clear_full(mm, addr, ptep, full);
+ old_pte = radix__ptep_get_and_clear_full(mm, addr, ptep, full);
+ page_table_check_pte_clear(mm, addr, old_pte);
+
+ return old_pte;
}
return ptep_get_and_clear(mm, addr, ptep);
}
@@ -865,6 +875,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
*/
pte = __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PTE));
+ page_table_check_pte_set(mm, addr, ptep, pte);
+
if (radix_enabled())
return radix__set_pte_at(mm, addr, ptep, pte, percpu);
return hash__set_pte_at(mm, addr, ptep, pte, percpu);
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index 94b2a53f73d5..b3b8a843a1bd 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -166,6 +166,7 @@ void unmap_kernel_page(unsigned long va);
#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
#ifndef __ASSEMBLY__
+#include <linux/page_table_check.h>
#define pte_clear(mm, addr, ptep) \
do { pte_update(mm, addr, ptep, ~0, 0, 0); } while (0)
@@ -310,7 +311,11 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- return __pte(pte_update(mm, addr, ptep, ~0, 0, 0));
+ pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~0, 0, 0));
+
+ page_table_check_pte_clear(mm, addr, old_pte);
+
+ return old_pte;
}
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index 69304159aafc..2488da8f0deb 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -83,6 +83,7 @@
#define H_PAGE_4K_PFN 0
#ifndef __ASSEMBLY__
+#include <linux/page_table_check.h>
/* pte_clear moved to later in this file */
static inline pte_t pte_mkwrite(pte_t pte)
@@ -258,8 +259,11 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
- return __pte(old);
+ pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~0UL, 0, 0));
+
+ page_table_check_pte_clear(mm, addr, old_pte);
+
+ return old_pte;
}
static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index 455ae13822ee..0454ed762d39 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -170,6 +170,7 @@ extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, int percpu)
{
+ page_table_check_pte_set(mm, addr, ptep, pte);
/* Second case is 32-bit with 64-bit PTE. In this case, we
* can just store as long as we do the two halves in the right order
* with a barrier in between.
--
2.37.2
More information about the Linuxppc-dev
mailing list