[PATCH v9 11/11] powerpc/32s: set up an early static hash table for KASAN.
Christophe Leroy
christophe.leroy at c-s.fr
Fri Mar 1 23:33:49 AEDT 2019
KASAN requires early activation of hash table, before memblock()
functions are available.
This patch implements an early hash_table statically defined in
__initdata.
During early boot, a single page table is used.
For hash32, when doing the final init, one page table is allocated
for each PGD entry because of the _PAGE_HASHPTE flag which can't be
common to several virt pages. This is done after memblock get
available but before switching to the final hash table, otherwise
there are issues with TLB flushing due to the shared entries.
For hash32, the zero shadow page gets mapped with PAGE_READONLY instead
of PAGE_KERNEL_RO, because the PP bits don't provide a RO kernel, so
PAGE_KERNEL_RO is equivalent to PAGE_KERNEL. By using PAGE_READONLY,
the page is RO for both kernel and user, but this is not a security issue
as it contains only zeroes.
Signed-off-by: Christophe Leroy <christophe.leroy at c-s.fr>
---
arch/powerpc/include/asm/kasan.h | 1 +
arch/powerpc/kernel/head_32.S | 43 ++++++++++++++++++++++-------
arch/powerpc/mm/kasan/kasan_init_32.c | 51 ++++++++++++++++++++++++++++++-----
arch/powerpc/mm/mmu_decl.h | 1 +
4 files changed, 79 insertions(+), 17 deletions(-)
diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h
index 74a4ba9fb8a3..c9fe0369a8fc 100644
--- a/arch/powerpc/include/asm/kasan.h
+++ b/arch/powerpc/include/asm/kasan.h
@@ -29,6 +29,7 @@
#ifdef CONFIG_KASAN
void kasan_early_init(void);
+void kasan_mmu_init(void);
void kasan_init(void);
#else
static inline void kasan_init(void) { }
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index e644aab2cf5b..7f7fbdd73b79 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -160,6 +160,10 @@ __after_mmu_off:
bl flush_tlbs
bl initial_bats
+ bl load_segment_registers
+#ifdef CONFIG_KASAN
+ bl early_hash_table
+#endif
#if defined(CONFIG_BOOTX_TEXT)
bl setup_disp_bat
#endif
@@ -205,7 +209,7 @@ __after_mmu_off:
*/
turn_on_mmu:
mfmsr r0
- ori r0,r0,MSR_DR|MSR_IR
+ ori r0,r0,MSR_DR|MSR_IR|MSR_RI
mtspr SPRN_SRR1,r0
lis r0,start_here at h
ori r0,r0,start_here at l
@@ -881,11 +885,24 @@ _ENTRY(__restore_cpu_setup)
blr
#endif /* !defined(CONFIG_PPC_BOOK3S_32) */
-
/*
* Load stuff into the MMU. Intended to be called with
* IR=0 and DR=0.
*/
+#ifdef CONFIG_KASAN
+early_hash_table:
+ sync /* Force all PTE updates to finish */
+ isync
+ tlbia /* Clear all TLB entries */
+ sync /* wait for tlbia/tlbie to finish */
+ TLBSYNC /* ... on all CPUs */
+ /* Load the SDR1 register (hash table base & size) */
+ lis r6, early_hash - PAGE_OFFSET at h
+ ori r6, r6, 3 /* 256kB table */
+ mtspr SPRN_SDR1, r6
+ blr
+#endif
+
load_up_mmu:
sync /* Force all PTE updates to finish */
isync
@@ -897,14 +914,6 @@ load_up_mmu:
tophys(r6,r6)
lwz r6,_SDR1 at l(r6)
mtspr SPRN_SDR1,r6
- li r0,16 /* load up segment register values */
- mtctr r0 /* for context 0 */
- lis r3,0x2000 /* Ku = 1, VSID = 0 */
- li r4,0
-3: mtsrin r3,r4
- addi r3,r3,0x111 /* increment VSID */
- addis r4,r4,0x1000 /* address of next segment */
- bdnz 3b
/* Load the BAT registers with the values set up by MMU_init.
MMU_init takes care of whether we're on a 601 or not. */
@@ -926,6 +935,17 @@ BEGIN_MMU_FTR_SECTION
END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
blr
+load_segment_registers:
+ li r0, 16 /* load up segment register values */
+ mtctr r0 /* for context 0 */
+ lis r3, 0x2000 /* Ku = 1, VSID = 0 */
+ li r4, 0
+3: mtsrin r3, r4
+ addi r3, r3, 0x111 /* increment VSID */
+ addis r4, r4, 0x1000 /* address of next segment */
+ bdnz 3b
+ blr
+
/*
* This is where the main kernel code starts.
*/
@@ -961,6 +981,9 @@ start_here:
bl __save_cpu_setup
bl MMU_init
BEGIN_MMU_FTR_SECTION
+#ifdef CONFIG_KASAN
+ bl kasan_mmu_init
+#endif
bl MMU_init_hw_patch
END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c
index cc788917ce38..f6dbc537c051 100644
--- a/arch/powerpc/mm/kasan/kasan_init_32.c
+++ b/arch/powerpc/mm/kasan/kasan_init_32.c
@@ -39,7 +39,10 @@ static int kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_
if (!new)
return -ENOMEM;
- kasan_populate_pte(new, PAGE_KERNEL_RO);
+ if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ kasan_populate_pte(new, PAGE_READONLY);
+ else
+ kasan_populate_pte(new, PAGE_KERNEL_RO);
pmd_populate_kernel(&init_mm, pmd, new);
}
return 0;
@@ -60,10 +63,13 @@ static int __ref kasan_init_region(void *start, size_t size)
unsigned long k_cur;
pmd_t *pmd;
void *block = NULL;
- int ret = kasan_init_shadow_page_tables(k_start, k_end);
- if (ret)
- return ret;
+ if (!early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
+ int ret = kasan_init_shadow_page_tables(k_start, k_end);
+
+ if (ret)
+ return ret;
+ }
if (!slab_is_available())
block = memblock_alloc(k_end - k_start, PAGE_SIZE);
@@ -84,15 +90,26 @@ static int __ref kasan_init_region(void *start, size_t size)
static void __init kasan_remap_early_shadow_ro(void)
{
- kasan_populate_pte(kasan_early_shadow_pte, PAGE_KERNEL_RO);
+ if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ kasan_populate_pte(kasan_early_shadow_pte, PAGE_READONLY);
+ else
+ kasan_populate_pte(kasan_early_shadow_pte, PAGE_KERNEL_RO);
+
flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END);
}
-void __init kasan_init(void)
+void __init kasan_mmu_init(void)
{
int ret;
struct memblock_region *reg;
+ if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
+ ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
+
+ if (ret)
+ panic("kasan: kasan_init_shadow_page_tables() failed");
+ }
+
for_each_memblock(memory, reg) {
phys_addr_t base = reg->base;
phys_addr_t top = min(base + reg->size, total_lowmem);
@@ -104,6 +121,12 @@ void __init kasan_init(void)
if (ret)
panic("kasan: kasan_init_region() failed");
}
+}
+
+void __init kasan_init(void)
+{
+ if (!early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ kasan_mmu_init();
kasan_remap_early_shadow_ro();
@@ -131,6 +154,20 @@ void *module_alloc(unsigned long size)
}
#endif
+#ifdef CONFIG_PPC_BOOK3S_32
+u8 __initdata early_hash[256 << 10] __aligned(256 << 10) = {0};
+
+static void __init kasan_early_hash_table(void)
+{
+ modify_instruction_site(&patch__hash_page_A0, 0xffff, __pa(early_hash) >> 16);
+ modify_instruction_site(&patch__flush_hash_A0, 0xffff, __pa(early_hash) >> 16);
+
+ Hash = (struct hash_pte *)early_hash;
+}
+#else
+static void __init kasan_early_hash_table(void) {}
+#endif
+
void __init kasan_early_init(void)
{
unsigned long addr = KASAN_SHADOW_START;
@@ -148,5 +185,5 @@ void __init kasan_early_init(void)
} while (pmd++, addr = next, addr != end);
if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
- WARN(1, "KASAN not supported on hash 6xx");
+ kasan_early_hash_table();
}
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index d726ff776054..31fce3914ddc 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -106,6 +106,7 @@ extern unsigned int rtas_data, rtas_size;
struct hash_pte;
extern struct hash_pte *Hash, *Hash_end;
extern unsigned long Hash_size, Hash_mask;
+extern u8 early_hash[];
#endif /* CONFIG_PPC32 */
--
2.13.3
More information about the Linuxppc-dev
mailing list