[PATCH v1 3/4] powerpc: Add support for GENERIC_EARLY_IOREMAP

Christophe Leroy christophe.leroy at c-s.fr
Fri Sep 13 02:01:31 AEST 2019



Le 12/09/2019 à 17:50, Aneesh Kumar K.V a écrit :
> Christophe Leroy <christophe.leroy at c-s.fr> writes:
> 
>> Le 12/09/2019 à 17:37, Aneesh Kumar K.V a écrit :
>>> Christophe Leroy <christophe.leroy at c-s.fr> writes:
>>>
>>>> Add support for GENERIC_EARLY_IOREMAP.
>>>>
>>>> Let's define 16 slots of 256Kbytes each for early ioremap.
>>>>
>>>> Signed-off-by: Christophe Leroy <christophe.leroy at c-s.fr>
>>>> ---
>>>>    arch/powerpc/Kconfig              |  1 +
>>>>    arch/powerpc/include/asm/Kbuild   |  1 +
>>>>    arch/powerpc/include/asm/fixmap.h | 12 ++++++++++++
>>>>    arch/powerpc/kernel/setup_32.c    |  3 +++
>>>>    arch/powerpc/kernel/setup_64.c    |  3 +++
>>>>    5 files changed, 20 insertions(+)
>>>>
>>>> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
>>>> index 6a7c797fa9d2..8fe252962518 100644
>>>> --- a/arch/powerpc/Kconfig
>>>> +++ b/arch/powerpc/Kconfig
>>>> @@ -161,6 +161,7 @@ config PPC
>>>>    	select GENERIC_CMOS_UPDATE
>>>>    	select GENERIC_CPU_AUTOPROBE
>>>>    	select GENERIC_CPU_VULNERABILITIES	if PPC_BARRIER_NOSPEC
>>>> +	select GENERIC_EARLY_IOREMAP
>>>>    	select GENERIC_IRQ_SHOW
>>>>    	select GENERIC_IRQ_SHOW_LEVEL
>>>>    	select GENERIC_PCI_IOMAP		if PCI
>>>> diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
>>>> index 9a1d2fc6ceb7..30829120659c 100644
>>>> --- a/arch/powerpc/include/asm/Kbuild
>>>> +++ b/arch/powerpc/include/asm/Kbuild
>>>> @@ -12,3 +12,4 @@ generic-y += preempt.h
>>>>    generic-y += vtime.h
>>>>    generic-y += msi.h
>>>>    generic-y += simd.h
>>>> +generic-y += early_ioremap.h
>>>> diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
>>>> index 722289a1d000..d5c4d357bd33 100644
>>>> --- a/arch/powerpc/include/asm/fixmap.h
>>>> +++ b/arch/powerpc/include/asm/fixmap.h
>>>> @@ -15,6 +15,7 @@
>>>>    #define _ASM_FIXMAP_H
>>>>    
>>>>    #ifndef __ASSEMBLY__
>>>> +#include <linux/sizes.h>
>>>>    #include <asm/page.h>
>>>>    #include <asm/pgtable.h>
>>>>    #ifdef CONFIG_HIGHMEM
>>>> @@ -64,6 +65,14 @@ enum fixed_addresses {
>>>>    		       FIX_IMMR_SIZE,
>>>>    #endif
>>>>    	/* FIX_PCIE_MCFG, */
>>>> +	__end_of_permanent_fixed_addresses,
>>>> +
>>>> +#define NR_FIX_BTMAPS		(SZ_256K / PAGE_SIZE)
>>>> +#define FIX_BTMAPS_SLOTS	16
>>>> +#define TOTAL_FIX_BTMAPS	(NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
>>>> +
>>>> +	FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
>>>> +	FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
>>>>    	__end_of_fixed_addresses
>>>>    };
>>>>    
>>>> @@ -71,6 +80,7 @@ enum fixed_addresses {
>>>>    #define FIXADDR_START		(FIXADDR_TOP - __FIXADDR_SIZE)
>>>>    
>>>>    #define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NCG
>>>> +#define FIXMAP_PAGE_IO	PAGE_KERNEL_NCG
>>>>    
>>>>    #include <asm-generic/fixmap.h>
>>>>    
>>>> @@ -85,5 +95,7 @@ static inline void __set_fixmap(enum fixed_addresses idx,
>>>>    	map_kernel_page(__fix_to_virt(idx), phys, flags);
>>>>    }
>>>>    
>>>> +#define __early_set_fixmap	__set_fixmap
>>>> +
>>>>    #endif /* !__ASSEMBLY__ */
>>>>    #endif
>>>> diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
>>>> index a7541edf0cdb..dcffe927f5b9 100644
>>>> --- a/arch/powerpc/kernel/setup_32.c
>>>> +++ b/arch/powerpc/kernel/setup_32.c
>>>> @@ -44,6 +44,7 @@
>>>>    #include <asm/asm-prototypes.h>
>>>>    #include <asm/kdump.h>
>>>>    #include <asm/feature-fixups.h>
>>>> +#include <asm/early_ioremap.h>
>>>>    
>>>>    #include "setup.h"
>>>>    
>>>> @@ -80,6 +81,8 @@ notrace void __init machine_init(u64 dt_ptr)
>>>>    	/* Configure static keys first, now that we're relocated. */
>>>>    	setup_feature_keys();
>>>>    
>>>> +	early_ioremap_setup();
>>>> +
>>>>    	/* Enable early debugging if any specified (see udbg.h) */
>>>>    	udbg_early_init();
>>>>    
>>>> diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
>>>> index 44b4c432a273..b85f6a1cc3a1 100644
>>>> --- a/arch/powerpc/kernel/setup_64.c
>>>> +++ b/arch/powerpc/kernel/setup_64.c
>>>> @@ -65,6 +65,7 @@
>>>>    #include <asm/hw_irq.h>
>>>>    #include <asm/feature-fixups.h>
>>>>    #include <asm/kup.h>
>>>> +#include <asm/early_ioremap.h>
>>>>    
>>>>    #include "setup.h"
>>>>    
>>>> @@ -338,6 +339,8 @@ void __init early_setup(unsigned long dt_ptr)
>>>>    	apply_feature_fixups();
>>>>    	setup_feature_keys();
>>>>    
>>>> +	early_ioremap_setup();
>>>> +
>>>>    	/* Initialize the hash table or TLB handling */
>>>>    	early_init_mmu();
>>>>    
>>>
>>> Can we remove early_ioremap_range() after this?
>>>
>>
>> Yes, once all early callers of ioremap functions are converted to using
>> early_ioremap()
> 
> Why can't we switch the early callers to early_ioremap and print a
> warning?
> 
> ie,
> if (!slab_available()) {
>     pr_warn("switch to early_ioremap");
>     early_ioremap();
> }
> 

Because:
- early_iounmap() requires the size of the area to be freed unlike iounmap()
- early_ioremap() is for ephemeral mappings. All early mapping must be 
gone at the end of init (this is verified by 
late_initcall(check_early_ioremap_leak))

Second point means another approach has to be taken for installing early 
permanent mapping, for instance by using fixmaps.

Christophe


More information about the Linuxppc-dev mailing list