[PATCH 17/21] mm: free_area_init: allow defining max_zone_pfn in descending order
Baoquan He
bhe at redhat.com
Thu Apr 23 12:57:20 AEST 2020
On 04/23/20 at 10:53am, Baoquan He wrote:
> On 04/12/20 at 10:48pm, Mike Rapoport wrote:
> > From: Mike Rapoport <rppt at linux.ibm.com>
> >
> > Some architectures (e.g. ARC) have the ZONE_HIGHMEM zone below the
> > ZONE_NORMAL. Allowing free_area_init() parse max_zone_pfn array even it is
> > sorted in descending order allows using free_area_init() on such
> > architectures.
> >
> > Add top -> down traversal of max_zone_pfn array in free_area_init() and use
> > the latter in ARC node/zone initialization.
>
> Or maybe leave ARC as is. The change in this patchset doesn't impact
> ARC's handling about zone initialization, leaving it as is can reduce
> the complication in implementation of free_area_init(), which is a
> common function. So I personally don't see a strong motivation to have
> this patch.
OK, seems this patch is prepared to simplify free_area_init_node(), so
take back what I said at above.
Then this looks necessary, even though it introduces special case into
common function free_area_init().
Reviewed-by: Baoquan He <bhe at redhat.com>
>
> >
> > Signed-off-by: Mike Rapoport <rppt at linux.ibm.com>
> > ---
> > arch/arc/mm/init.c | 36 +++++++-----------------------------
> > mm/page_alloc.c | 24 +++++++++++++++++++-----
> > 2 files changed, 26 insertions(+), 34 deletions(-)
> >
> > diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
> > index 0920c969c466..41eb9be1653c 100644
> > --- a/arch/arc/mm/init.c
> > +++ b/arch/arc/mm/init.c
> > @@ -63,11 +63,13 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
> >
> > low_mem_sz = size;
> > in_use = 1;
> > + memblock_add_node(base, size, 0);
> > } else {
> > #ifdef CONFIG_HIGHMEM
> > high_mem_start = base;
> > high_mem_sz = size;
> > in_use = 1;
> > + memblock_add_node(base, size, 1);
> > #endif
> > }
> >
> > @@ -83,8 +85,7 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
> > */
> > void __init setup_arch_memory(void)
> > {
> > - unsigned long zones_size[MAX_NR_ZONES];
> > - unsigned long zones_holes[MAX_NR_ZONES];
> > + unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
> >
> > init_mm.start_code = (unsigned long)_text;
> > init_mm.end_code = (unsigned long)_etext;
> > @@ -115,7 +116,6 @@ void __init setup_arch_memory(void)
> > * the crash
> > */
> >
> > - memblock_add_node(low_mem_start, low_mem_sz, 0);
> > memblock_reserve(CONFIG_LINUX_LINK_BASE,
> > __pa(_end) - CONFIG_LINUX_LINK_BASE);
> >
> > @@ -133,22 +133,7 @@ void __init setup_arch_memory(void)
> > memblock_dump_all();
> >
> > /*----------------- node/zones setup --------------------------*/
> > - memset(zones_size, 0, sizeof(zones_size));
> > - memset(zones_holes, 0, sizeof(zones_holes));
> > -
> > - zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn;
> > - zones_holes[ZONE_NORMAL] = 0;
> > -
> > - /*
> > - * We can't use the helper free_area_init(zones[]) because it uses
> > - * PAGE_OFFSET to compute the @min_low_pfn which would be wrong
> > - * when our kernel doesn't start at PAGE_OFFSET, i.e.
> > - * PAGE_OFFSET != CONFIG_LINUX_RAM_BASE
> > - */
> > - free_area_init_node(0, /* node-id */
> > - zones_size, /* num pages per zone */
> > - min_low_pfn, /* first pfn of node */
> > - zones_holes); /* holes */
> > + max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
> >
> > #ifdef CONFIG_HIGHMEM
> > /*
> > @@ -168,20 +153,13 @@ void __init setup_arch_memory(void)
> > min_high_pfn = PFN_DOWN(high_mem_start);
> > max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
> >
> > - zones_size[ZONE_NORMAL] = 0;
> > - zones_holes[ZONE_NORMAL] = 0;
> > -
> > - zones_size[ZONE_HIGHMEM] = max_high_pfn - min_high_pfn;
> > - zones_holes[ZONE_HIGHMEM] = 0;
> > -
> > - free_area_init_node(1, /* node-id */
> > - zones_size, /* num pages per zone */
> > - min_high_pfn, /* first pfn of node */
> > - zones_holes); /* holes */
> > + max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
> >
> > high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
> > kmap_init();
> > #endif
> > +
> > + free_area_init(max_zone_pfn);
> > }
> >
> > /*
> > diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> > index 343d87b8697d..376434c7a78b 100644
> > --- a/mm/page_alloc.c
> > +++ b/mm/page_alloc.c
> > @@ -7429,7 +7429,8 @@ static void check_for_memory(pg_data_t *pgdat, int nid)
> > void __init free_area_init(unsigned long *max_zone_pfn)
> > {
> > unsigned long start_pfn, end_pfn;
> > - int i, nid;
> > + int i, nid, zone;
> > + bool descending = false;
> >
> > /* Record where the zone boundaries are */
> > memset(arch_zone_lowest_possible_pfn, 0,
> > @@ -7439,13 +7440,26 @@ void __init free_area_init(unsigned long *max_zone_pfn)
> >
> > start_pfn = find_min_pfn_with_active_regions();
> >
> > + /*
> > + * Some architecturs, e.g. ARC may have ZONE_HIGHMEM below
> > + * ZONE_NORMAL. For such cases we allow max_zone_pfn sorted in the
> > + * descending order
> > + */
> > + if (MAX_NR_ZONES > 1 && max_zone_pfn[0] > max_zone_pfn[1])
> > + descending = true;
> > +
> > for (i = 0; i < MAX_NR_ZONES; i++) {
> > - if (i == ZONE_MOVABLE)
> > + if (descending)
> > + zone = MAX_NR_ZONES - i - 1;
> > + else
> > + zone = i;
> > +
> > + if (zone == ZONE_MOVABLE)
> > continue;
> >
> > - end_pfn = max(max_zone_pfn[i], start_pfn);
> > - arch_zone_lowest_possible_pfn[i] = start_pfn;
> > - arch_zone_highest_possible_pfn[i] = end_pfn;
> > + end_pfn = max(max_zone_pfn[zone], start_pfn);
> > + arch_zone_lowest_possible_pfn[zone] = start_pfn;
> > + arch_zone_highest_possible_pfn[zone] = end_pfn;
> >
> > start_pfn = end_pfn;
> > }
> > --
> > 2.25.1
> >
More information about the Linuxppc-dev
mailing list