[PATCH V2 10/10] powerpc/mm: kasan: Add kasan support for ppc64

Andrey Ryabinin ryabinin.a.a at gmail.com
Thu Aug 27 22:16:28 AEST 2015


2015-08-26 11:26 GMT+03:00 Aneesh Kumar K.V <aneesh.kumar at linux.vnet.ibm.com>:
> +               k_start = (unsigned long)kasan_mem_to_shadow(start);
> +               k_end = (unsigned long)kasan_mem_to_shadow(end);
> +               for (; k_start < k_end; k_start += page_size) {
> +                       p = vmemmap_alloc_block(page_size, node);
> +                       if (!p) {
> +                               pr_info("Disabled Kasan, for lack of free mem\n");
> +                               /* Free the stuff or panic ? */

vmemmap_alloc_block() panics on allocation failure, so you don't need
this if block.
You could replace this with memblock_virt_alloc_try_nid_nopanic(), but
note that if/when
we will have working asan-stack=1 there will be no way for fallback.

> +                               return;
> +                       }
> +                       htab_bolt_mapping(k_start, k_start + page_size,
> +                                         __pa(p), pgprot_val(PAGE_KERNEL),
> +                                         mmu_vmemmap_psize, mmu_kernel_ssize);
> +               }
> +       }
> +       /*
> +        * At this point kasan is fully initialized. Enable error messages
> +        */
> +       init_task.kasan_depth = 0;
> +       __kasan_enabled = true;
> +       pr_info("Kernel address sanitizer initialized\n");
> +}
> diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
> index 736d18b3cefd..154bd8a0b437 100644
> --- a/arch/powerpc/mm/slb_low.S
> +++ b/arch/powerpc/mm/slb_low.S
> @@ -80,11 +80,15 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
>         /* Check virtual memmap region. To be patches at kernel boot */
>         cmpldi  cr0,r9,0xf
>         bne     1f
> +2:
>  .globl slb_miss_kernel_load_vmemmap
>  slb_miss_kernel_load_vmemmap:
>         li      r11,0
>         b       6f
>  1:
> +       /* Kasan region same as vmemmap mapping */
> +       cmpldi  cr0,r9,0xe
> +       beq     2b
>  #endif /* CONFIG_SPARSEMEM_VMEMMAP */
>
>         /* vmalloc mapping gets the encoding from the PACA as the mapping
> diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
> index c140e94c7c72..7a7c9d54f80e 100644
> --- a/arch/powerpc/platforms/Kconfig.cputype
> +++ b/arch/powerpc/platforms/Kconfig.cputype
> @@ -75,6 +75,7 @@ config PPC_BOOK3S_64
>         select HAVE_ARCH_TRANSPARENT_HUGEPAGE if PPC_64K_PAGES
>         select ARCH_SUPPORTS_NUMA_BALANCING
>         select IRQ_WORK
> +       select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP
>
>  config PPC_BOOK3E_64
>         bool "Embedded processors"
> --
> 2.5.0
>


More information about the Linuxppc-dev mailing list