[PATCH v4 1/4] powerpc/mm: refactor radix physical page mapping
Aneesh Kumar K.V
aneesh.kumar at linux.vnet.ibm.com
Wed Jan 4 16:04:25 AEDT 2017
Reza Arbab <arbab at linux.vnet.ibm.com> writes:
> Move the page mapping code in radix_init_pgtable() into a separate
> function that will also be used for memory hotplug.
>
> The current goto loop progressively decreases its mapping size as it
> covers the tail of a range whose end is unaligned. Change this to a for
> loop which can do the same for both ends of the range.
>
We lost the below in the change.
pr_info("Mapping range 0x%lx - 0x%lx with 0x%lx\n",
(unsigned long)base, (unsigned long)end,
linear_page_size);
Is there a way to dump the range and the size with which we mapped that
range ?
> Signed-off-by: Reza Arbab <arbab at linux.vnet.ibm.com>
> ---
> arch/powerpc/mm/pgtable-radix.c | 69 ++++++++++++++++++-----------------------
> 1 file changed, 31 insertions(+), 38 deletions(-)
>
> diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
> index 623a0dc..5cee6d1 100644
> --- a/arch/powerpc/mm/pgtable-radix.c
> +++ b/arch/powerpc/mm/pgtable-radix.c
> @@ -107,54 +107,47 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
> return 0;
> }
>
> +static int __meminit create_physical_mapping(unsigned long start,
> + unsigned long end)
> +{
> + unsigned long mapping_size;
> +
> + start = _ALIGN_UP(start, PAGE_SIZE);
> + for (; start < end; start += mapping_size) {
> + unsigned long gap = end - start;
> + int rc;
> +
> + if (IS_ALIGNED(start, PUD_SIZE) && gap >= PUD_SIZE &&
> + mmu_psize_defs[MMU_PAGE_1G].shift)
> + mapping_size = PUD_SIZE;
> + else if (IS_ALIGNED(start, PMD_SIZE) && gap >= PMD_SIZE &&
> + mmu_psize_defs[MMU_PAGE_2M].shift)
> + mapping_size = PMD_SIZE;
> + else
> + mapping_size = PAGE_SIZE;
> +
> + rc = radix__map_kernel_page((unsigned long)__va(start), start,
> + PAGE_KERNEL_X, mapping_size);
> + if (rc)
> + return rc;
> + }
> +
> + return 0;
> +}
> +
> static void __init radix_init_pgtable(void)
> {
> - int loop_count;
> - u64 base, end, start_addr;
> unsigned long rts_field;
> struct memblock_region *reg;
> - unsigned long linear_page_size;
>
> /* We don't support slb for radix */
> mmu_slb_size = 0;
> /*
> * Create the linear mapping, using standard page size for now
> */
> - loop_count = 0;
> - for_each_memblock(memory, reg) {
> -
> - start_addr = reg->base;
> -
> -redo:
> - if (loop_count < 1 && mmu_psize_defs[MMU_PAGE_1G].shift)
> - linear_page_size = PUD_SIZE;
> - else if (loop_count < 2 && mmu_psize_defs[MMU_PAGE_2M].shift)
> - linear_page_size = PMD_SIZE;
> - else
> - linear_page_size = PAGE_SIZE;
> -
> - base = _ALIGN_UP(start_addr, linear_page_size);
> - end = _ALIGN_DOWN(reg->base + reg->size, linear_page_size);
> -
> - pr_info("Mapping range 0x%lx - 0x%lx with 0x%lx\n",
> - (unsigned long)base, (unsigned long)end,
> - linear_page_size);
> -
> - while (base < end) {
> - radix__map_kernel_page((unsigned long)__va(base),
> - base, PAGE_KERNEL_X,
> - linear_page_size);
> - base += linear_page_size;
> - }
> - /*
> - * map the rest using lower page size
> - */
> - if (end < reg->base + reg->size) {
> - start_addr = end;
> - loop_count++;
> - goto redo;
> - }
> - }
> + for_each_memblock(memory, reg)
> + WARN_ON(create_physical_mapping(reg->base,
> + reg->base + reg->size));
> /*
> * Allocate Partition table and process table for the
> * host.
> --
> 1.8.3.1
More information about the Linuxppc-dev
mailing list