[PATCH 4/4] powerpc: Enable KFENCE on BOOK3S/64
Jordan Niethe
jniethe5 at gmail.com
Mon May 17 16:16:58 AEST 2021
From: Christophe Leroy <christophe.leroy at csgroup.eu>
This reuses the DEBUG_PAGEALLOC logic.
Tested with CONFIG_KFENCE + CONFIG_KUNIT + CONFIG_KFENCE_KUNIT_TEST on
radix and hash.
Signed-off-by: Christophe Leroy <christophe.leroy at csgroup.eu>
[jpn: Handle radix]
Signed-off-by: Jordan Niethe <jniethe5 at gmail.com>
---
arch/powerpc/Kconfig | 2 +-
arch/powerpc/include/asm/book3s/64/pgtable.h | 2 +-
arch/powerpc/include/asm/kfence.h | 19 +++++++++++++++++++
arch/powerpc/mm/book3s64/hash_utils.c | 12 ++++++------
arch/powerpc/mm/book3s64/radix_pgtable.c | 8 +++++---
5 files changed, 32 insertions(+), 11 deletions(-)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 6df64d6815df..1743364d7370 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -196,7 +196,7 @@ config PPC
select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14
select HAVE_ARCH_KASAN_VMALLOC if PPC32 && PPC_PAGE_SHIFT <= 14
select HAVE_ARCH_KGDB
- select HAVE_ARCH_KFENCE if PPC32
+ select HAVE_ARCH_KFENCE if ARCH_SUPPORTS_DEBUG_PAGEALLOC
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
select HAVE_ARCH_NVRAM_OPS
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index b89482aed82a..35300f2ee5d0 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -822,7 +822,7 @@ static inline bool check_pte_access(unsigned long access, unsigned long ptev)
* Generic functions with hash/radix callbacks
*/
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
static inline void __kernel_map_pages(struct page *page, int numpages, int enable)
{
if (radix_enabled())
diff --git a/arch/powerpc/include/asm/kfence.h b/arch/powerpc/include/asm/kfence.h
index a9846b68c6b9..9d388df7c1a8 100644
--- a/arch/powerpc/include/asm/kfence.h
+++ b/arch/powerpc/include/asm/kfence.h
@@ -11,11 +11,29 @@
#include <linux/mm.h>
#include <asm/pgtable.h>
+#if defined(CONFIG_PPC64) && !defined(PPC64_ELF_ABI_v2)
+#define ARCH_FUNC_PREFIX "."
+#endif
+
static inline bool arch_kfence_init_pool(void)
{
return true;
}
+#ifdef CONFIG_PPC64
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+ struct page *page;
+
+ page = virt_to_page(addr);
+ if (protect)
+ __kernel_map_pages(page, 1, 0);
+ else
+ __kernel_map_pages(page, 1, 1);
+
+ return true;
+}
+#else
static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
pte_t *kpte = virt_to_kpte(addr);
@@ -29,5 +47,6 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
return true;
}
+#endif
#endif /* __ASM_POWERPC_KFENCE_H */
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index fe5cf1cf4dd5..fecb379426e7 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -323,8 +323,8 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
break;
cond_resched();
- if (debug_pagealloc_enabled() &&
- (paddr >> PAGE_SHIFT) < linear_map_hash_count)
+ if (debug_pagealloc_enabled_or_kfence() &&
+ (paddr >> PAGE_SHIFT) < linear_map_hash_count)
linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
}
return ret < 0 ? ret : 0;
@@ -672,7 +672,7 @@ static void __init htab_init_page_sizes(void)
bool aligned = true;
init_hpte_page_sizes();
- if (!debug_pagealloc_enabled()) {
+ if (!debug_pagealloc_enabled_or_kfence()) {
/*
* Pick a size for the linear mapping. Currently, we only
* support 16M, 1M and 4K which is the default
@@ -960,7 +960,7 @@ static void __init htab_initialize(void)
prot = pgprot_val(PAGE_KERNEL);
- if (debug_pagealloc_enabled()) {
+ if (debug_pagealloc_enabled_or_kfence()) {
linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
linear_map_hash_slots = memblock_alloc_try_nid(
linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT,
@@ -1936,7 +1936,7 @@ long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
return slot;
}
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
static DEFINE_SPINLOCK(linear_map_hash_lock);
static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
@@ -2009,7 +2009,7 @@ void hash__kernel_map_pages(struct page *page, int numpages, int enable)
}
local_irq_restore(flags);
}
-#endif /* CONFIG_DEBUG_PAGEALLOC */
+#endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_KFENCE */
void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size)
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 2aa81b9e354a..b984876ff1ca 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -33,6 +33,8 @@
#include <trace/events/thp.h>
+#include <mm/mmu_decl.h>
+
unsigned int mmu_pid_bits;
unsigned int mmu_base_pid;
unsigned long radix_mem_block_size __ro_after_init;
@@ -335,7 +337,7 @@ static void __init radix_init_pgtable(void)
phys_addr_t start, end;
u64 i;
- if (debug_pagealloc_enabled())
+ if (debug_pagealloc_enabled_or_kfence())
size = PAGE_SIZE;
/* We don't support slb for radix */
@@ -879,7 +881,7 @@ int __meminit radix__create_section_mapping(unsigned long start,
{
unsigned long size = radix_mem_block_size;
- if (debug_pagealloc_enabled())
+ if (debug_pagealloc_enabled_or_kfence())
size = PAGE_SIZE;
if (end >= RADIX_VMALLOC_START) {
@@ -1176,7 +1178,7 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
return 1;
}
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
void radix__kernel_map_pages(struct page *page, int numpages, int enable)
{
unsigned long addr = (unsigned long)page_address(page);
--
2.25.1
More information about the Linuxppc-dev
mailing list