[PATCH v10 01/10] powerpc/mm: Implement set_memory() routines
Aneesh Kumar K.V
aneesh.kumar at linux.ibm.com
Thu Apr 1 15:37:08 AEDT 2021
Jordan Niethe <jniethe5 at gmail.com> writes:
> From: Russell Currey <ruscur at russell.cc>
>
> The set_memory_{ro/rw/nx/x}() functions are required for STRICT_MODULE_RWX,
> and are generally useful primitives to have. This implementation is
> designed to be completely generic across powerpc's many MMUs.
>
> It's possible that this could be optimised to be faster for specific
> MMUs, but the focus is on having a generic and safe implementation for
> now.
>
> This implementation does not handle cases where the caller is attempting
> to change the mapping of the page it is executing from, or if another
> CPU is concurrently using the page being altered. These cases likely
> shouldn't happen, but a more complex implementation with MMU-specific code
> could safely handle them, so that is left as a TODO for now.
>
> On hash the linear mapping is not kept in the linux pagetable, so this
> will not change the protection if used on that range. Currently these
> functions are not used on the linear map so just WARN for now.
>
> These functions do nothing if STRICT_KERNEL_RWX is not enabled.
>
> Reviewed-by: Daniel Axtens <dja at axtens.net>
> Signed-off-by: Russell Currey <ruscur at russell.cc>
> Signed-off-by: Christophe Leroy <christophe.leroy at csgroup.eu>
> [jpn: -rebase on next plus "powerpc/mm/64s: Allow STRICT_KERNEL_RWX again"
> - WARN on hash linear map]
> Signed-off-by: Jordan Niethe <jniethe5 at gmail.com>
> ---
> v10: WARN if trying to change the hash linear map
> ---
> arch/powerpc/Kconfig | 1 +
> arch/powerpc/include/asm/set_memory.h | 32 ++++++++++
> arch/powerpc/mm/Makefile | 2 +-
> arch/powerpc/mm/pageattr.c | 88 +++++++++++++++++++++++++++
> 4 files changed, 122 insertions(+), 1 deletion(-)
> create mode 100644 arch/powerpc/include/asm/set_memory.h
> create mode 100644 arch/powerpc/mm/pageattr.c
>
> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> index fc7f5c5933e6..4498a27ac9db 100644
> --- a/arch/powerpc/Kconfig
> +++ b/arch/powerpc/Kconfig
> @@ -135,6 +135,7 @@ config PPC
> select ARCH_HAS_MEMBARRIER_CALLBACKS
> select ARCH_HAS_MEMBARRIER_SYNC_CORE
> select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64
> + select ARCH_HAS_SET_MEMORY
> select ARCH_HAS_STRICT_KERNEL_RWX if ((PPC_BOOK3S_64 || PPC32) && !HIBERNATION)
> select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
> select ARCH_HAS_UACCESS_FLUSHCACHE
> diff --git a/arch/powerpc/include/asm/set_memory.h b/arch/powerpc/include/asm/set_memory.h
> new file mode 100644
> index 000000000000..64011ea444b4
> --- /dev/null
> +++ b/arch/powerpc/include/asm/set_memory.h
> @@ -0,0 +1,32 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef _ASM_POWERPC_SET_MEMORY_H
> +#define _ASM_POWERPC_SET_MEMORY_H
> +
> +#define SET_MEMORY_RO 0
> +#define SET_MEMORY_RW 1
> +#define SET_MEMORY_NX 2
> +#define SET_MEMORY_X 3
> +
> +int change_memory_attr(unsigned long addr, int numpages, long action);
> +
> +static inline int set_memory_ro(unsigned long addr, int numpages)
> +{
> + return change_memory_attr(addr, numpages, SET_MEMORY_RO);
> +}
> +
> +static inline int set_memory_rw(unsigned long addr, int numpages)
> +{
> + return change_memory_attr(addr, numpages, SET_MEMORY_RW);
> +}
> +
> +static inline int set_memory_nx(unsigned long addr, int numpages)
> +{
> + return change_memory_attr(addr, numpages, SET_MEMORY_NX);
> +}
> +
> +static inline int set_memory_x(unsigned long addr, int numpages)
> +{
> + return change_memory_attr(addr, numpages, SET_MEMORY_X);
> +}
> +
> +#endif
> diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
> index 3b4e9e4e25ea..d8a08abde1ae 100644
> --- a/arch/powerpc/mm/Makefile
> +++ b/arch/powerpc/mm/Makefile
> @@ -5,7 +5,7 @@
>
> ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
>
> -obj-y := fault.o mem.o pgtable.o mmap.o maccess.o \
> +obj-y := fault.o mem.o pgtable.o mmap.o maccess.o pageattr.o \
> init_$(BITS).o pgtable_$(BITS).o \
> pgtable-frag.o ioremap.o ioremap_$(BITS).o \
> init-common.o mmu_context.o drmem.o
> diff --git a/arch/powerpc/mm/pageattr.c b/arch/powerpc/mm/pageattr.c
> new file mode 100644
> index 000000000000..9efcb01088da
> --- /dev/null
> +++ b/arch/powerpc/mm/pageattr.c
> @@ -0,0 +1,88 @@
> +// SPDX-License-Identifier: GPL-2.0
> +
> +/*
> + * MMU-generic set_memory implementation for powerpc
> + *
> + * Copyright 2019, IBM Corporation.
> + */
> +
> +#include <linux/mm.h>
> +#include <linux/set_memory.h>
> +
> +#include <asm/mmu.h>
> +#include <asm/page.h>
> +#include <asm/pgtable.h>
> +
> +
> +/*
> + * Updates the attributes of a page in three steps:
> + *
> + * 1. invalidate the page table entry
> + * 2. flush the TLB
> + * 3. install the new entry with the updated attributes
> + *
> + * This is unsafe if the caller is attempting to change the mapping of the
> + * page it is executing from, or if another CPU is concurrently using the
> + * page being altered.
> + *
> + * TODO make the implementation resistant to this.
> + *
> + * NOTE: can be dangerous to call without STRICT_KERNEL_RWX
> + */
> +static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
> +{
> + long action = (long)data;
> + pte_t pte;
> +
> + spin_lock(&init_mm.page_table_lock);
> +
> + /* invalidate the PTE so it's safe to modify */
> + pte = ptep_get_and_clear(&init_mm, addr, ptep);
> + flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
> +
> + /* modify the PTE bits as desired, then apply */
> + switch (action) {
> + case SET_MEMORY_RO:
> + pte = pte_wrprotect(pte);
> + break;
> + case SET_MEMORY_RW:
> + pte = pte_mkwrite(pte);
> + break;
> + case SET_MEMORY_NX:
> + pte = pte_exprotect(pte);
> + break;
> + case SET_MEMORY_X:
> + pte = pte_mkexec(pte);
> + break;
> + default:
> + WARN_ON_ONCE(1);
> + break;
> + }
> +
> + set_pte_at(&init_mm, addr, ptep, pte);
> + spin_unlock(&init_mm.page_table_lock);
> +
> + return 0;
> +}
> +
> +int change_memory_attr(unsigned long addr, int numpages, long action)
> +{
> + unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
> + unsigned long sz = numpages * PAGE_SIZE;
> +
> + if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
> + return 0;
What restrictions imposed by that config are we dependent on here?
> +
> + if (numpages <= 0)
> + return 0;
> +
> +#ifdef CONFIG_PPC_BOOK3S_64
> + if (WARN_ON_ONCE(!radix_enabled() &&
> + get_region_id(addr) == LINEAR_MAP_REGION_ID)) {
> + return -1;
> + }
> +#endif
What about VMEMMAP_REGIOND_ID
> +
> + return apply_to_existing_page_range(&init_mm, start, sz,
> + change_page_attr, (void *)action);
That handles on 64K mapping. What about linear map? Also there is a
patchset implementing hugepage for vmalloc mapping.
> +}
> --
> 2.25.1
More information about the Linuxppc-dev
mailing list