[PATCH v8 1/5] kasan: support backing vmalloc space with real shadow memory
Andrey Ryabinin
aryabinin at virtuozzo.com
Sat Oct 12 06:57:28 AEDT 2019
On 10/1/19 9:58 AM, Daniel Axtens wrote:
> core_initcall(kasan_memhotplug_init);
> #endif
> +
> +#ifdef CONFIG_KASAN_VMALLOC
> +static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
> + void *unused)
> +{
> + unsigned long page;
> + pte_t pte;
> +
> + if (likely(!pte_none(*ptep)))
> + return 0;
> +
> + page = __get_free_page(GFP_KERNEL);
> + if (!page)
> + return -ENOMEM;
> +
> + memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE);
> + pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL);
> +
> + /*
> + * Ensure poisoning is visible before the shadow is made visible
> + * to other CPUs.
> + */
> + smp_wmb();
I'm not quite understand what this barrier do and why it needed.
And if it's really needed there should be a pairing barrier
on the other side which I don't see.
> +
> + spin_lock(&init_mm.page_table_lock);
> + if (likely(pte_none(*ptep))) {
> + set_pte_at(&init_mm, addr, ptep, pte);
> + page = 0;
> + }
> + spin_unlock(&init_mm.page_table_lock);
> + if (page)
> + free_page(page);
> + return 0;
> +}
> +
...
> @@ -754,6 +769,8 @@ merge_or_add_vmap_area(struct vmap_area *va,
> }
>
> insert:
> + kasan_release_vmalloc(orig_start, orig_end, va->va_start, va->va_end);
> +
> if (!merged) {
> link_va(va, root, parent, link, head);
> augment_tree_propagate_from(va);
> @@ -2068,6 +2085,22 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
>
> setup_vmalloc_vm(area, va, flags, caller);
>
> + /*
> + * For KASAN, if we are in vmalloc space, we need to cover the shadow
> + * area with real memory. If we come here through VM_ALLOC, this is
> + * done by a higher level function that has access to the true size,
> + * which might not be a full page.
> + *
> + * We assume module space comes via VM_ALLOC path.
> + */
> + if (is_vmalloc_addr(area->addr) && !(area->flags & VM_ALLOC)) {
> + if (kasan_populate_vmalloc(area->size, area)) {
> + unmap_vmap_area(va);
> + kfree(area);
> + return NULL;
> + }
> + }
> +
> return area;
> }
>
> @@ -2245,6 +2278,9 @@ static void __vunmap(const void *addr, int deallocate_pages)
> debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
> debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
>
> + if (area->flags & VM_KASAN)
> + kasan_poison_vmalloc(area->addr, area->size);
> +
> vm_remove_mappings(area, deallocate_pages);
>
> if (deallocate_pages) {
> @@ -2497,6 +2533,9 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
> if (!addr)
> return NULL;
>
> + if (kasan_populate_vmalloc(real_size, area))
> + return NULL;
> +
KASAN itself uses __vmalloc_node_range() to allocate and map shadow in memory online callback.
So we should either skip non-vmalloc and non-module addresses here or teach kasan's memory online/offline
callbacks to not use __vmalloc_node_range() (do something similar to kasan_populate_vmalloc() perhaps?).
More information about the Linuxppc-dev
mailing list