[PATCH 1/4] kasan: allow arches to provide their own early shadow setup
Daniel Axtens
dja at axtens.net
Wed Aug 7 09:38:24 AEST 2019
powerpc supports several different MMUs. In particular, book3s
machines support both a hash-table based MMU and a radix MMU.
These MMUs support different numbers of entries per directory
level: the PTES_PER_* defines evaluate to variables, not constants.
This leads to complier errors as global variables must have constant
sizes.
Allow architectures to manage their own early shadow variables so we
can work around this on powerpc.
Signed-off-by: Daniel Axtens <dja at axtens.net>
---
Changes from RFC:
- To make checkpatch happy, move ARCH_HAS_KASAN_EARLY_SHADOW from
a random #define to a config option selected when building for
ppc64 book3s
---
include/linux/kasan.h | 2 ++
lib/Kconfig.kasan | 3 +++
mm/kasan/init.c | 10 ++++++++++
3 files changed, 15 insertions(+)
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index ec81113fcee4..15933da52a3e 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -14,11 +14,13 @@ struct task_struct;
#include <asm/kasan.h>
#include <asm/pgtable.h>
+#ifndef CONFIG_ARCH_HAS_KASAN_EARLY_SHADOW
extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE];
extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD];
extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
+#endif
int kasan_populate_early_shadow(const void *shadow_start,
const void *shadow_end);
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index a320dc2e9317..0621a0129c04 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -9,6 +9,9 @@ config HAVE_ARCH_KASAN_SW_TAGS
config HAVE_ARCH_KASAN_VMALLOC
bool
+config ARCH_HAS_KASAN_EARLY_SHADOW
+ bool
+
config CC_HAS_KASAN_GENERIC
def_bool $(cc-option, -fsanitize=kernel-address)
diff --git a/mm/kasan/init.c b/mm/kasan/init.c
index ce45c491ebcd..7ef2b87a7988 100644
--- a/mm/kasan/init.c
+++ b/mm/kasan/init.c
@@ -31,10 +31,14 @@
* - Latter it reused it as zero shadow to cover large ranges of memory
* that allowed to access, but not handled by kasan (vmalloc/vmemmap ...).
*/
+#ifndef CONFIG_ARCH_HAS_KASAN_EARLY_SHADOW
unsigned char kasan_early_shadow_page[PAGE_SIZE] __page_aligned_bss;
+#endif
#if CONFIG_PGTABLE_LEVELS > 4
+#ifndef CONFIG_ARCH_HAS_KASAN_EARLY_SHADOW
p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D] __page_aligned_bss;
+#endif
static inline bool kasan_p4d_table(pgd_t pgd)
{
return pgd_page(pgd) == virt_to_page(lm_alias(kasan_early_shadow_p4d));
@@ -46,7 +50,9 @@ static inline bool kasan_p4d_table(pgd_t pgd)
}
#endif
#if CONFIG_PGTABLE_LEVELS > 3
+#ifndef CONFIG_ARCH_HAS_KASAN_EARLY_SHADOW
pud_t kasan_early_shadow_pud[PTRS_PER_PUD] __page_aligned_bss;
+#endif
static inline bool kasan_pud_table(p4d_t p4d)
{
return p4d_page(p4d) == virt_to_page(lm_alias(kasan_early_shadow_pud));
@@ -58,7 +64,9 @@ static inline bool kasan_pud_table(p4d_t p4d)
}
#endif
#if CONFIG_PGTABLE_LEVELS > 2
+#ifndef CONFIG_ARCH_HAS_KASAN_EARLY_SHADOW
pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD] __page_aligned_bss;
+#endif
static inline bool kasan_pmd_table(pud_t pud)
{
return pud_page(pud) == virt_to_page(lm_alias(kasan_early_shadow_pmd));
@@ -69,7 +77,9 @@ static inline bool kasan_pmd_table(pud_t pud)
return false;
}
#endif
+#ifndef CONFIG_ARCH_HAS_KASAN_EARLY_SHADOW
pte_t kasan_early_shadow_pte[PTRS_PER_PTE] __page_aligned_bss;
+#endif
static inline bool kasan_pte_table(pmd_t pmd)
{
--
2.20.1
More information about the Linuxppc-dev
mailing list