[PATCH -V8 0/11] arch/powerpc: Add 64TB support to ppc64
Benjamin Herrenschmidt
benh at kernel.crashing.org
Fri Sep 7 17:53:37 EST 2012
On Fri, 2012-09-07 at 11:12 +0530, Aneesh Kumar K.V wrote:
>
> diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
> index 428f23e..057a12a 100644
> --- a/arch/powerpc/include/asm/mmu-hash64.h
> +++ b/arch/powerpc/include/asm/mmu-hash64.h
> @@ -14,6 +14,7 @@
>
> #include <asm/asm-compat.h>
> #include <asm/page.h>
> +#include <asm/pgtable-ppc64-range.h>
Nah, that's all too gross... I think the right thing to do is to move
the slice stuff out of page_64.h
> /*
> * Segment table
> @@ -415,12 +416,7 @@ extern void slb_set_size(u16 size);
> add rt,rt,rx
>
> /* 4 bits per slice and we have one slice per 1TB */
> -#if 0 /* We can't directly include pgtable.h hence this hack */
> #define SLICE_ARRAY_SIZE (PGTABLE_RANGE >> 41)
> -#else
> -/* Right now we only support 64TB */
> -#define SLICE_ARRAY_SIZE 32
> -#endif
>
> #ifndef __ASSEMBLY__
>
> diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
> index b55beb4..01ab518 100644
> --- a/arch/powerpc/include/asm/page_64.h
> +++ b/arch/powerpc/include/asm/page_64.h
> @@ -78,16 +78,14 @@ extern u64 ppc64_pft_size;
> #define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
> #define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT)
>
> -/* 1 bit per slice and we have one slice per 1TB */
> -#if 0 /* We can't directly include pgtable.h hence this hack */
> -#define SLICE_MASK_SIZE (PGTABLE_RANGE >> 43)
> -#else
> -/*
> +/* 1 bit per slice and we have one slice per 1TB
> * Right now we support only 64TB.
> * IF we change this we will have to change the type
> * of high_slices
> */
> #define SLICE_MASK_SIZE 8
> +#if (PGTABLE_RANGE >> 43) > SLICE_MASK_SIZE
> +#error PGTABLE_RANGE exceeds slice_mask high_slices size
> #endif
>
> #ifndef __ASSEMBLY__
> diff --git a/arch/powerpc/include/asm/pgtable-ppc64-range.h b/arch/powerpc/include/asm/pgtable-ppc64-range.h
> new file mode 100644
> index 0000000..04a825c
> --- /dev/null
> +++ b/arch/powerpc/include/asm/pgtable-ppc64-range.h
> @@ -0,0 +1,16 @@
> +#ifndef _ASM_POWERPC_PGTABLE_PPC64_RANGE_H_
> +#define _ASM_POWERPC_PGTABLE_PPC64_RANGE_H_
> +
> +#ifdef CONFIG_PPC_64K_PAGES
> +#include <asm/pgtable-ppc64-64k.h>
> +#else
> +#include <asm/pgtable-ppc64-4k.h>
> +#endif
> +
> +/*
> + * Size of EA range mapped by our pagetables.
> + */
> +#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
> + PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
> +#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
> +#endif
> diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
> index dea953f..ee783b4 100644
> --- a/arch/powerpc/include/asm/pgtable-ppc64.h
> +++ b/arch/powerpc/include/asm/pgtable-ppc64.h
> @@ -13,13 +13,7 @@
>
> #define FIRST_USER_ADDRESS 0
>
> -/*
> - * Size of EA range mapped by our pagetables.
> - */
> -#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
> - PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
> -#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
> -
> +#include <asm/pgtable-ppc64-range.h>
>
> /* Some sanity checking */
> #if TASK_SIZE_USER64 > PGTABLE_RANGE
> @@ -32,14 +26,6 @@
> #endif
> #endif
>
> -#if (PGTABLE_RANGE >> 41) > SLICE_ARRAY_SIZE
> -#error PGTABLE_RANGE exceeds SLICE_ARRAY_SIZE
> -#endif
> -
> -#if (PGTABLE_RANGE >> 43) > SLICE_MASK_SIZE
> -#error PGTABLE_RANGE exceeds slice_mask high_slices size
> -#endif
> -
> /*
> * Define the address range of the kernel non-linear virtual area
> */
Ben.
More information about the Linuxppc-dev
mailing list