<div dir="auto"><div><div class="gmail_quote"><div dir="ltr" class="gmail_attr">On Tue, 28 Jul 2020, 07:16 Mike Rapoport, <<a href="mailto:rppt@kernel.org" target="_blank" rel="noreferrer">rppt@kernel.org</a>> wrote:<br></div><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">From: Mike Rapoport <<a href="mailto:rppt@linux.ibm.com" rel="noreferrer noreferrer" target="_blank">rppt@linux.ibm.com</a>><br>
<br>
There are several occurrences of the following pattern:<br>
<br>
for_each_memblock(memory, reg) {<br>
start = __pfn_to_phys(memblock_region_memory_base_pfn(reg);<br>
end = __pfn_to_phys(memblock_region_memory_end_pfn(reg));<br>
<br>
/* do something with start and end */<br>
}<br>
<br>
Using for_each_mem_range() iterator is more appropriate in such cases and<br>
allows simpler and cleaner code.<br>
<br>
Signed-off-by: Mike Rapoport <<a href="mailto:rppt@linux.ibm.com" rel="noreferrer noreferrer" target="_blank">rppt@linux.ibm.com</a>><br>
---<br>
arch/arm/kernel/setup.c | 18 +++++++----<br>
arch/arm/mm/mmu.c | 39 ++++++++----------------<br>
arch/arm/mm/pmsa-v7.c | 20 ++++++------<br>
arch/arm/mm/pmsa-v8.c | 17 +++++------<br>
arch/arm/xen/mm.c | 7 +++--<br>
arch/arm64/mm/kasan_init.c | 8 ++---<br>
arch/arm64/mm/mmu.c | 11 ++-----<br>
arch/c6x/kernel/setup.c | 9 +++---<br>
arch/microblaze/mm/init.c | 9 +++---<br>
arch/mips/cavium-octeon/dma-octeon.c | 12 ++++----<br>
arch/mips/kernel/setup.c | 31 +++++++++----------<br>
arch/openrisc/mm/init.c | 8 +++--<br>
arch/powerpc/kernel/fadump.c | 27 +++++++---------<br>
arch/powerpc/mm/book3s64/hash_utils.c | 16 +++++-----<br>
arch/powerpc/mm/book3s64/radix_pgtable.c | 11 +++----<br>
arch/powerpc/mm/kasan/kasan_init_32.c | 8 ++---<br>
arch/powerpc/mm/mem.c | 16 ++++++----<br>
arch/powerpc/mm/pgtable_32.c | 8 ++---<br>
arch/riscv/mm/init.c | 24 ++++++---------<br>
arch/riscv/mm/kasan_init.c | 10 +++---<br>
arch/s390/kernel/setup.c | 27 ++++++++++------<br>
arch/s390/mm/vmem.c | 16 +++++-----<br>
arch/sparc/mm/init_64.c | 12 +++-----<br>
drivers/bus/mvebu-mbus.c | 12 ++++----<br>
drivers/s390/char/zcore.c | 9 +++---<br>
25 files changed, 187 insertions(+), 198 deletions(-)<br>
<br>
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c<br>
index d8e18cdd96d3..3f65d0ac9f63 100644<br>
--- a/arch/arm/kernel/setup.c<br>
+++ b/arch/arm/kernel/setup.c<br>
@@ -843,19 +843,25 @@ early_param("mem", early_mem);<br>
<br>
static void __init request_standard_resources(const struct machine_desc *mdesc)<br>
{<br>
- struct memblock_region *region;<br>
+ phys_addr_t start, end, res_end;<br>
struct resource *res;<br>
+ u64 i;<br>
<br>
kernel_code.start = virt_to_phys(_text);<br>
kernel_code.end = virt_to_phys(__init_begin - 1);<br>
kernel_data.start = virt_to_phys(_sdata);<br>
kernel_data.end = virt_to_phys(_end - 1);<br>
<br>
- for_each_memblock(memory, region) {<br>
- phys_addr_t start = __pfn_to_phys(memblock_region_memory_base_pfn(region));<br>
- phys_addr_t end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;<br>
+ for_each_mem_range(i, &start, &end) {<br>
unsigned long boot_alias_start;<br>
<br>
+ /*<br>
+ * In memblock, end points to the first byte after the<br>
+ * range while in resourses, end points to the last byte in<br>
+ * the range.<br>
+ */<br>
+ res_end = end - 1;<br>
+<br>
/*<br>
* Some systems have a special memory alias which is only<br>
* used for booting. We need to advertise this region to<br>
@@ -869,7 +875,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)<br>
__func__, sizeof(*res));<br>
res->name = "System RAM (boot alias)";<br>
res->start = boot_alias_start;<br>
- res->end = phys_to_idmap(end);<br>
+ res->end = phys_to_idmap(res_end);<br>
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;<br>
request_resource(&iomem_resource, res);<br>
}<br>
@@ -880,7 +886,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)<br>
sizeof(*res));<br>
res->name = "System RAM";<br>
res->start = start;<br>
- res->end = end;<br>
+ res->end = res_end;<br>
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;<br>
<br>
request_resource(&iomem_resource, res);<br>
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c<br>
index 628028bfbb92..a149d9cb4fdb 100644<br>
--- a/arch/arm/mm/mmu.c<br>
+++ b/arch/arm/mm/mmu.c<br>
@@ -1155,9 +1155,8 @@ phys_addr_t arm_lowmem_limit __initdata = 0;<br>
<br>
void __init adjust_lowmem_bounds(void)<br>
{<br>
- phys_addr_t memblock_limit = 0;<br>
- u64 vmalloc_limit;<br>
- struct memblock_region *reg;<br>
+ phys_addr_t block_start, block_end, memblock_limit = 0;<br>
+ u64 vmalloc_limit, i;<br>
phys_addr_t lowmem_limit = 0;<br>
<br>
/*<br>
@@ -1173,26 +1172,18 @@ void __init adjust_lowmem_bounds(void)<br>
* The first usable region must be PMD aligned. Mark its start<br>
* as MEMBLOCK_NOMAP if it isn't<br>
*/<br>
- for_each_memblock(memory, reg) {<br>
- if (!memblock_is_nomap(reg)) {<br>
- if (!IS_ALIGNED(reg->base, PMD_SIZE)) {<br>
- phys_addr_t len;<br>
+ for_each_mem_range(i, &block_start, &block_end) {<br>
+ if (!IS_ALIGNED(block_start, PMD_SIZE)) {<br>
+ phys_addr_t len;<br>
<br>
- len = round_up(reg->base, PMD_SIZE) - reg->base;<br>
- memblock_mark_nomap(reg->base, len);<br>
- }<br>
- break;<br>
+ len = round_up(block_start, PMD_SIZE) - block_start;<br>
+ memblock_mark_nomap(block_start, len);<br>
}<br>
+ break;<br>
}<br>
<br>
- for_each_memblock(memory, reg) {<br>
- phys_addr_t block_start = reg->base;<br>
- phys_addr_t block_end = reg->base + reg->size;<br>
-<br>
- if (memblock_is_nomap(reg))<br>
- continue;<br>
-<br>
- if (reg->base < vmalloc_limit) {<br>
+ for_each_mem_range(i, &block_start, &block_end) {<br>
+ if (block_start < vmalloc_limit) {<br>
if (block_end > lowmem_limit)<br>
/*<br>
* Compare as u64 to ensure vmalloc_limit does<br>
@@ -1441,19 +1432,15 @@ static void __init kmap_init(void)<br>
<br>
static void __init map_lowmem(void)<br>
{<br>
- struct memblock_region *reg;<br>
phys_addr_t kernel_x_start = round_down(__pa(KERNEL_START), SECTION_SIZE);<br>
phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);<br>
+ phys_addr_t start, end;<br>
+ u64 i;<br>
<br>
/* Map all the lowmem memory banks. */<br>
- for_each_memblock(memory, reg) {<br>
- phys_addr_t start = reg->base;<br>
- phys_addr_t end = start + reg->size;<br>
+ for_each_mem_range(i, &start, &end) {<br>
struct map_desc map;<br>
<br>
- if (memblock_is_nomap(reg))<br>
- continue;<br>
-<br>
if (end > arm_lowmem_limit)<br>
end = arm_lowmem_limit;<br>
if (start >= end)<br>
diff --git a/arch/arm/mm/pmsa-v7.c b/arch/arm/mm/pmsa-v7.c<br>
index 699fa2e88725..44b7644a4237 100644<br>
--- a/arch/arm/mm/pmsa-v7.c<br>
+++ b/arch/arm/mm/pmsa-v7.c<br>
@@ -231,10 +231,9 @@ static int __init allocate_region(phys_addr_t base, phys_addr_t size,<br>
void __init pmsav7_adjust_lowmem_bounds(void)<br>
{<br>
phys_addr_t specified_mem_size = 0, total_mem_size = 0;<br>
- struct memblock_region *reg;<br>
- bool first = true;<br>
phys_addr_t mem_start;<br>
phys_addr_t mem_end;<br>
+ phys_addr_t reg_start, reg_end;<br>
unsigned int mem_max_regions;<br>
int num, i;<br>
<br>
@@ -262,20 +261,19 @@ void __init pmsav7_adjust_lowmem_bounds(void)<br>
mem_max_regions -= num;<br>
#endif<br>
<br>
- for_each_memblock(memory, reg) {<br>
- if (first) {<br>
+ for_each_mem_range(i, ®_start, ®_end) {<br>
+ if (i == 0) {<br>
phys_addr_t phys_offset = PHYS_OFFSET;<br>
<br>
/*<br>
* Initially only use memory continuous from<br>
* PHYS_OFFSET */<br>
- if (reg->base != phys_offset)<br>
+ if (reg_start != phys_offset)<br>
panic("First memory bank must be contiguous from PHYS_OFFSET");<br>
<br>
- mem_start = reg->base;<br>
- mem_end = reg->base + reg->size;<br>
- specified_mem_size = reg->size;<br>
- first = false;<br>
+ mem_start = reg_start;<br>
+ mem_end = reg_end<br>
+ specified_mem_size = mem_end - mem_start;<br>
} else {<br>
/*<br>
* memblock auto merges contiguous blocks, remove<br>
@@ -283,8 +281,8 @@ void __init pmsav7_adjust_lowmem_bounds(void)<br>
* blocks separately while iterating)<br>
*/<br>
pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",<br>
- &mem_end, ®->base);<br>
- memblock_remove(reg->base, 0 - reg->base);<br>
+ &mem_end, ®_start);<br>
+ memblock_remove(reg_start, 0 - reg_start);<br>
break;<br>
}<br>
}<br>
diff --git a/arch/arm/mm/pmsa-v8.c b/arch/arm/mm/pmsa-v8.c<br>
index 0d7d5fb59247..b39e74b48437 100644<br>
--- a/arch/arm/mm/pmsa-v8.c<br>
+++ b/arch/arm/mm/pmsa-v8.c<br>
@@ -94,20 +94,19 @@ static __init bool is_region_fixed(int number)<br>
void __init pmsav8_adjust_lowmem_bounds(void)<br>
{<br>
phys_addr_t mem_end;<br>
- struct memblock_region *reg;<br>
- bool first = true;<br>
+ phys_addr_t reg_start, reg_end;<br>
+ int i;<br>
<br>
- for_each_memblock(memory, reg) {<br>
- if (first) {<br>
+ for_each_mem_range(i, ®_start, ®_end) {<br>
+ if (i == 0) {<br>
phys_addr_t phys_offset = PHYS_OFFSET;<br>
<br>
/*<br>
* Initially only use memory continuous from<br>
* PHYS_OFFSET */<br>
- if (reg->base != phys_offset)<br>
+ if (reg_start != phys_offset)<br>
panic("First memory bank must be contiguous from PHYS_OFFSET");<br>
- mem_end = reg->base + reg->size;<br>
- first = false;<br>
+ mem_end = reg_end;<br>
} else {<br>
/*<br>
* memblock auto merges contiguous blocks, remove<br>
@@ -115,8 +114,8 @@ void __init pmsav8_adjust_lowmem_bounds(void)<br>
* blocks separately while iterating)<br>
*/<br>
pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",<br>
- &mem_end, ®->base);<br>
- memblock_remove(reg->base, 0 - reg->base);<br>
+ &mem_end, ®_start);<br>
+ memblock_remove(reg_start, 0 - reg_start);<br>
break;<br>
}<br>
}<br>
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c<br>
index d40e9e5fc52b..05f24ff41e36 100644<br>
--- a/arch/arm/xen/mm.c<br>
+++ b/arch/arm/xen/mm.c<br>
@@ -24,11 +24,12 @@<br>
<br>
unsigned long xen_get_swiotlb_free_pages(unsigned int order)<br>
{<br>
- struct memblock_region *reg;<br>
+ phys_addr_t base;<br>
gfp_t flags = __GFP_NOWARN|__GFP_KSWAPD_RECLAIM;<br>
+ u64 i;<br>
<br>
- for_each_memblock(memory, reg) {<br>
- if (reg->base < (phys_addr_t)0xffffffff) {<br>
+ for_each_mem_range(i, &base, NULL) {<br>
+ if (base < (phys_addr_t)0xffffffff) {<br>
if (IS_ENABLED(CONFIG_ZONE_DMA32))<br>
flags |= __GFP_DMA32;<br>
else<br>
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c<br>
index 7291b26ce788..1faa086f9193 100644<br>
--- a/arch/arm64/mm/kasan_init.c<br>
+++ b/arch/arm64/mm/kasan_init.c<br>
@@ -212,7 +212,7 @@ void __init kasan_init(void)<br>
{<br>
u64 kimg_shadow_start, kimg_shadow_end;<br>
u64 mod_shadow_start, mod_shadow_end;<br>
- struct memblock_region *reg;<br>
+ phys_addr_t _start, _end;<br>
int i;<br>
<br>
kimg_shadow_start = (u64)kasan_mem_to_shadow(_text) & PAGE_MASK;<br>
@@ -246,9 +246,9 @@ void __init kasan_init(void)<br>
kasan_populate_early_shadow((void *)mod_shadow_end,<br>
(void *)kimg_shadow_start);<br>
<br>
- for_each_memblock(memory, reg) {<br>
- void *start = (void *)__phys_to_virt(reg->base);<br>
- void *end = (void *)__phys_to_virt(reg->base + reg->size);<br>
+ for_each_mem_range(i, &start, &end) {<br>
+ void *_start = (void *)__phys_to_virt(_start);<br>
+ void *end = (void *)__phys_to_virt(_end);<br>
<br>
if (start >= end)<br>
break;<br>
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c<br>
index 1df25f26571d..327264fb83fb 100644<br>
--- a/arch/arm64/mm/mmu.c<br>
+++ b/arch/arm64/mm/mmu.c<br>
@@ -461,8 +461,9 @@ static void __init map_mem(pgd_t *pgdp)<br>
{<br>
phys_addr_t kernel_start = __pa_symbol(_text);<br>
phys_addr_t kernel_end = __pa_symbol(__init_begin);<br>
- struct memblock_region *reg;<br>
+ phys_addr_t start, end;<br>
int flags = 0;<br>
+ u64 i;<br>
<br>
if (rodata_full || debug_pagealloc_enabled())<br>
flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;<br>
@@ -481,15 +482,9 @@ static void __init map_mem(pgd_t *pgdp)<br>
#endif<br>
<br>
/* map all the memory banks */<br>
- for_each_memblock(memory, reg) {<br>
- phys_addr_t start = reg->base;<br>
- phys_addr_t end = start + reg->size;<br>
-<br>
+ for_each_mem_range(i, &start, &end) {<br>
if (start >= end)<br>
break;<br>
- if (memblock_is_nomap(reg))<br>
- continue;<br>
-<br>
__map_memblock(pgdp, start, end, PAGE_KERNEL, flags);<br>
}<br>
<br>
diff --git a/arch/c6x/kernel/setup.c b/arch/c6x/kernel/setup.c<br>
index 8ef35131f999..9254c3b794a5 100644<br>
--- a/arch/c6x/kernel/setup.c<br>
+++ b/arch/c6x/kernel/setup.c<br>
@@ -287,7 +287,8 @@ notrace void __init machine_init(unsigned long dt_ptr)<br>
<br>
void __init setup_arch(char **cmdline_p)<br>
{<br>
- struct memblock_region *reg;<br>
+ phys_addr_t start, end;<br>
+ u64 i;<br>
<br>
printk(KERN_INFO "Initializing kernel\n");<br>
<br>
@@ -351,9 +352,9 @@ void __init setup_arch(char **cmdline_p)<br>
disable_caching(ram_start, ram_end - 1);<br>
<br>
/* Set caching of external RAM used by Linux */<br>
- for_each_memblock(memory, reg)<br>
- enable_caching(CACHE_REGION_START(reg->base),<br>
- CACHE_REGION_START(reg->base + reg->size - 1));<br>
+ for_each_mem_range(i, &start, &end)<br>
+ enable_caching(CACHE_REGION_START(start),<br>
+ CACHE_REGION_START(end - 1));<br>
<br>
#ifdef CONFIG_BLK_DEV_INITRD<br>
/*<br>
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c<br>
index 49e0c241f9b1..15403b5adfcf 100644<br>
--- a/arch/microblaze/mm/init.c<br>
+++ b/arch/microblaze/mm/init.c<br>
@@ -106,13 +106,14 @@ static void __init paging_init(void)<br>
void __init setup_memory(void)<br>
{<br>
#ifndef CONFIG_MMU<br>
- struct memblock_region *reg;<br>
u32 kernel_align_start, kernel_align_size;<br>
+ phys_addr_t start, end;<br>
+ u64 i;<br>
<br>
/* Find main memory where is the kernel */<br>
- for_each_memblock(memory, reg) {<br>
- memory_start = (u32)reg->base;<br>
- lowmem_size = reg->size;<br>
+ for_each_mem_range(i, &start, &end) {<br>
+ memory_start = start;<br>
+ lowmem_size = end - start;<br>
if ((memory_start <= (u32)_text) &&<br>
((u32)_text <= (memory_start + lowmem_size - 1))) {<br>
memory_size = lowmem_size;<br>
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c<br>
index 14ea680d180e..d938c1f7c1e1 100644<br>
--- a/arch/mips/cavium-octeon/dma-octeon.c<br>
+++ b/arch/mips/cavium-octeon/dma-octeon.c<br>
@@ -190,25 +190,25 @@ char *octeon_swiotlb;<br>
<br>
void __init plat_swiotlb_setup(void)<br>
{<br>
- struct memblock_region *mem;<br>
+ phys_addr_t start, end;<br>
phys_addr_t max_addr;<br>
phys_addr_t addr_size;<br>
size_t swiotlbsize;<br>
unsigned long swiotlb_nslabs;<br>
+ u64 i;<br>
<br>
max_addr = 0;<br>
addr_size = 0;<br>
<br>
- for_each_memblock(memory, mem) {<br>
+ for_each_mem_range(i, &start, &end) {<br>
/* These addresses map low for PCI. */<br>
if (mem->base > 0x410000000ull && !OCTEON_IS_OCTEON2())<br>
continue;<br>
<br>
- addr_size += mem->size;<br>
-<br>
- if (max_addr < mem->base + mem->size)<br>
- max_addr = mem->base + mem->size;<br>
+ addr_size += (end - start);<br>
<br>
+ if (max_addr < end)<br>
+ max_addr = end;<br>
}<br>
<br>
swiotlbsize = PAGE_SIZE;<br>
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c<br>
index 7b537fa2035d..eaac1b66026d 100644<br>
--- a/arch/mips/kernel/setup.c<br>
+++ b/arch/mips/kernel/setup.c<br>
@@ -300,8 +300,9 @@ static void __init bootmem_init(void)<br>
<br>
static void __init bootmem_init(void)<br>
{<br>
- struct memblock_region *mem;<br>
phys_addr_t ramstart, ramend;<br>
+ phys_addr_t start, end;<br>
+ u64 i;<br>
<br>
ramstart = memblock_start_of_DRAM();<br>
ramend = memblock_end_of_DRAM();<br>
@@ -338,18 +339,13 @@ static void __init bootmem_init(void)<br>
<br>
min_low_pfn = ARCH_PFN_OFFSET;<br>
max_pfn = PFN_DOWN(ramend);<br>
- for_each_memblock(memory, mem) {<br>
- unsigned long start = memblock_region_memory_base_pfn(mem);<br>
- unsigned long end = memblock_region_memory_end_pfn(mem);<br>
-<br>
+ for_each_mem_range(i, &start, &end) {<br>
/*<br>
* Skip highmem here so we get an accurate max_low_pfn if low<br>
* memory stops short of high memory.<br>
* If the region overlaps HIGHMEM_START, end is clipped so<br>
* max_pfn excludes the highmem portion.<br>
*/<br>
- if (memblock_is_nomap(mem))<br>
- continue;<br>
if (start >= PFN_DOWN(HIGHMEM_START))<br>
continue;<br>
if (end > PFN_DOWN(HIGHMEM_START))<br>
@@ -458,13 +454,12 @@ early_param("memmap", early_parse_memmap);<br>
unsigned long setup_elfcorehdr, setup_elfcorehdr_size;<br>
static int __init early_parse_elfcorehdr(char *p)<br>
{<br>
- struct memblock_region *mem;<br>
+ phys_addr_t start, end;<br>
+ u64 i;<br>
<br>
setup_elfcorehdr = memparse(p, &p);<br>
<br>
- for_each_memblock(memory, mem) {<br>
- unsigned long start = mem->base;<br>
- unsigned long end = start + mem->size;<br>
+ for_each_mem_range(i, &start, &end) {<br>
if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {<br>
/*<br>
* Reserve from the elf core header to the end of<br>
@@ -728,7 +723,8 @@ static void __init arch_mem_init(char **cmdline_p)<br>
<br>
static void __init resource_init(void)<br>
{<br>
- struct memblock_region *region;<br>
+ phys_addr_t start, end;<br>
+ u64 i;<br>
<br>
if (UNCAC_BASE != IO_BASE)<br>
return;<br>
@@ -740,9 +736,7 @@ static void __init resource_init(void)<br>
bss_resource.start = __pa_symbol(&__bss_start);<br>
bss_resource.end = __pa_symbol(&__bss_stop) - 1;<br>
<br>
- for_each_memblock(memory, region) {<br>
- phys_addr_t start = PFN_PHYS(memblock_region_memory_base_pfn(region));<br>
- phys_addr_t end = PFN_PHYS(memblock_region_memory_end_pfn(region)) - 1;<br>
+ for_each_mem_range(i, &start, &end) {<br>
struct resource *res;<br>
<br>
res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);<br>
@@ -751,7 +745,12 @@ static void __init resource_init(void)<br>
sizeof(struct resource));<br>
<br>
res->start = start;<br>
- res->end = end;<br>
+ /*<br>
+ * In memblock, end points to the first byte after the<br>
+ * range while in resourses, end points to the last byte in<br>
+ * the range.<br>
+ */<br>
+ res->end = end - 1;<br>
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;<br>
res->name = "System RAM";<br>
<br>
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c<br>
index 3d7c79c7745d..8348feaaf46e 100644<br>
--- a/arch/openrisc/mm/init.c<br>
+++ b/arch/openrisc/mm/init.c<br>
@@ -64,6 +64,7 @@ extern const char _s_kernel_ro[], _e_kernel_ro[];<br>
*/<br>
static void __init map_ram(void)<br>
{<br>
+ phys_addr_t start, end;<br>
unsigned long v, p, e;<br>
pgprot_t prot;<br>
pgd_t *pge;<br>
@@ -71,6 +72,7 @@ static void __init map_ram(void)<br>
pud_t *pue;<br>
pmd_t *pme;<br>
pte_t *pte;<br>
+ u64 i;<br>
/* These mark extents of read-only kernel pages...<br>
* ...from vmlinux.lds.S<br>
*/<br>
@@ -78,9 +80,9 @@ static void __init map_ram(void)<br>
<br>
v = PAGE_OFFSET;<br>
<br>
- for_each_memblock(memory, region) {<br>
- p = (u32) region->base & PAGE_MASK;<br>
- e = p + (u32) region->size;<br>
+ for_each_mem_range(i, &start, &end) {<br>
+ p = (u32) start & PAGE_MASK;<br>
+ e = (u32) end;<br>
<br>
v = (u32) __va(p);<br>
pge = pgd_offset_k(v);<br>
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c<br>
index fdbafe417139..435b98d069eb 100644<br>
--- a/arch/powerpc/kernel/fadump.c<br>
+++ b/arch/powerpc/kernel/fadump.c<br>
@@ -180,13 +180,13 @@ int is_fadump_active(void)<br>
*/<br>
static bool is_fadump_mem_area_contiguous(u64 d_start, u64 d_end)<br>
{<br>
- struct memblock_region *reg;<br>
+ phys_addr_t reg_start, reg_end;<br>
bool ret = false;<br>
- u64 start, end;<br>
+ u64 i, start, end;<br>
<br>
- for_each_memblock(memory, reg) {<br>
- start = max_t(u64, d_start, reg->base);<br>
- end = min_t(u64, d_end, (reg->base + reg->size));<br>
+ for_each_mem_range(i, ®_start, ®_end) {<br>
+ start = max_t(u64, d_start, reg_start);<br>
+ end = min_t(u64, d_end, reg_end));<br>
if (d_start < end) {<br>
/* Memory hole from d_start to start */<br>
if (start > d_start)<br>
@@ -413,7 +413,7 @@ static int __init fadump_get_boot_mem_regions(void)<br>
{<br>
unsigned long base, size, cur_size, hole_size, last_end;<br>
unsigned long mem_size = fw_dump.boot_memory_size;<br>
- struct memblock_region *reg;<br>
+ phys_addr_t reg_start, reg_end;<br>
int ret = 1;<br>
<br>
fw_dump.boot_mem_regs_cnt = 0;<br>
@@ -421,9 +421,8 @@ static int __init fadump_get_boot_mem_regions(void)<br>
last_end = 0;<br>
hole_size = 0;<br>
cur_size = 0;<br>
- for_each_memblock(memory, reg) {<br>
- base = reg->base;<br>
- size = reg->size;<br>
+ for_each_mem_range(i, ®_start, ®_end) {<br>
+ size = reg_end - reg_start;<br>
hole_size += (base - last_end);<br>
<br>
if ((cur_size + size) >= mem_size) {<br>
@@ -959,9 +958,8 @@ static int fadump_init_elfcore_header(char *bufp)<br>
*/<br>
static int fadump_setup_crash_memory_ranges(void)<br>
{<br>
- struct memblock_region *reg;<br>
- u64 start, end;<br>
- int i, ret;<br>
+ u64 i, start, end;<br>
+ int ret;<br>
<br>
pr_debug("Setup crash memory ranges.\n");<br>
crash_mrange_info.mem_range_cnt = 0;<br>
@@ -979,10 +977,7 @@ static int fadump_setup_crash_memory_ranges(void)<br>
return ret;<br>
}<br>
<br>
- for_each_memblock(memory, reg) {<br>
- start = (u64)reg->base;<br>
- end = start + (u64)reg->size;<br>
-<br>
+ for_each_mem_range(i, &start, end) {<br></blockquote></div></div><div dir="auto"><br></div><div dir="auto">I don't know anything about this code, but from pure pattern matching it looks like you missed a & here.</div><div dir="auto"><br></div><div dir="auto"><div class="gmail_quote"><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">
/*<br>
* skip the memory chunk that is already added<br>
* (0 through boot_memory_top).<br>
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c<br>
index 468169e33c86..9ba76b075b11 100644<br>
--- a/arch/powerpc/mm/book3s64/hash_utils.c<br>
+++ b/arch/powerpc/mm/book3s64/hash_utils.c<br>
@@ -7,7 +7,7 @@<br>
*<br>
* SMP scalability work:<br>
* Copyright (C) 2001 Anton Blanchard <<a href="mailto:anton@au.ibm.com" rel="noreferrer noreferrer" target="_blank">anton@au.ibm.com</a>>, IBM<br>
- * <br>
+ *<br>
* Module name: htab.c<br>
*<br>
* Description:<br>
@@ -862,8 +862,8 @@ static void __init htab_initialize(void)<br>
unsigned long table;<br>
unsigned long pteg_count;<br>
unsigned long prot;<br>
- unsigned long base = 0, size = 0;<br>
- struct memblock_region *reg;<br>
+ phys_addr_t base = 0, size = 0, end;<br>
+ u64 i;<br>
<br>
DBG(" -> htab_initialize()\n");<br>
<br>
@@ -879,7 +879,7 @@ static void __init htab_initialize(void)<br>
/*<br>
* Calculate the required size of the htab. We want the number of<br>
* PTEGs to equal one half the number of real pages.<br>
- */ <br>
+ */<br>
htab_size_bytes = htab_get_table_size();<br>
pteg_count = htab_size_bytes >> 7;<br>
<br>
@@ -889,7 +889,7 @@ static void __init htab_initialize(void)<br>
firmware_has_feature(FW_FEATURE_PS3_LV1)) {<br>
/* Using a hypervisor which owns the htab */<br>
htab_address = NULL;<br>
- _SDR1 = 0; <br>
+ _SDR1 = 0;<br>
#ifdef CONFIG_FA_DUMP<br>
/*<br>
* If firmware assisted dump is active firmware preserves<br>
@@ -955,9 +955,9 @@ static void __init htab_initialize(void)<br>
#endif /* CONFIG_DEBUG_PAGEALLOC */<br>
<br>
/* create bolted the linear mapping in the hash table */<br>
- for_each_memblock(memory, reg) {<br>
- base = (unsigned long)__va(reg->base);<br>
- size = reg->size;<br>
+ for_each_mem_range(i, &base, &end) {<br>
+ size = end - base;<br>
+ base = (unsigned long)__va(base);<br>
<br>
DBG("creating mapping for region: %lx..%lx (prot: %lx)\n",<br>
base, size, prot);<br>
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c<br>
index bb00e0cba119..65657b920847 100644<br>
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c<br>
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c<br>
@@ -318,28 +318,27 @@ static int __meminit create_physical_mapping(unsigned long start,<br>
static void __init radix_init_pgtable(void)<br>
{<br>
unsigned long rts_field;<br>
- struct memblock_region *reg;<br>
+ phys_addr_t start, end;<br>
+ u64 i;<br>
<br>
/* We don't support slb for radix */<br>
mmu_slb_size = 0;<br>
/*<br>
* Create the linear mapping, using standard page size for now<br>
*/<br>
- for_each_memblock(memory, reg) {<br>
+ for_each_mem_range(i, &start, &end) {<br>
/*<br>
* The memblock allocator is up at this point, so the<br>
* page tables will be allocated within the range. No<br>
* need or a node (which we don't have yet).<br>
*/<br>
<br>
- if ((reg->base + reg->size) >= RADIX_VMALLOC_START) {<br>
+ if (end >= RADIX_VMALLOC_START) {<br>
pr_warn("Outside the supported range\n");<br>
continue;<br>
}<br>
<br>
- WARN_ON(create_physical_mapping(reg->base,<br>
- reg->base + reg->size,<br>
- -1, PAGE_KERNEL));<br>
+ WARN_ON(create_physical_mapping(start, end, -1, PAGE_KERNEL));<br>
}<br>
<br>
/* Find out how many PID bits are supported */<br>
diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c<br>
index 0760e1e754e4..6e73434e4e41 100644<br>
--- a/arch/powerpc/mm/kasan/kasan_init_32.c<br>
+++ b/arch/powerpc/mm/kasan/kasan_init_32.c<br>
@@ -120,11 +120,11 @@ static void __init kasan_unmap_early_shadow_vmalloc(void)<br>
static void __init kasan_mmu_init(void)<br>
{<br>
int ret;<br>
- struct memblock_region *reg;<br>
+ phys_addr_t base, end;<br>
+ u64 i;<br>
<br>
- for_each_memblock(memory, reg) {<br>
- phys_addr_t base = reg->base;<br>
- phys_addr_t top = min(base + reg->size, total_lowmem);<br>
+ for_each_mem_range(i, &base, &end) {<br>
+ phys_addr_t top = min(end, total_lowmem);<br>
<br>
if (base >= top)<br>
continue;<br>
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c<br>
index 38d1acd7c8ef..0248b6d58fcd 100644<br>
--- a/arch/powerpc/mm/mem.c<br>
+++ b/arch/powerpc/mm/mem.c<br>
@@ -593,20 +593,24 @@ void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,<br>
*/<br>
static int __init add_system_ram_resources(void)<br>
{<br>
- struct memblock_region *reg;<br>
+ phys_addr_t start, end;<br>
+ u64 i;<br>
<br>
- for_each_memblock(memory, reg) {<br>
+ for_each_mem_range(i, &start, &end) {<br>
struct resource *res;<br>
- unsigned long base = reg->base;<br>
- unsigned long size = reg->size;<br>
<br>
res = kzalloc(sizeof(struct resource), GFP_KERNEL);<br>
WARN_ON(!res);<br>
<br>
if (res) {<br>
res->name = "System RAM";<br>
- res->start = base;<br>
- res->end = base + size - 1;<br>
+ res->start = start;<br>
+ /*<br>
+ * In memblock, end points to the first byte after<br>
+ * the range while in resourses, end points to the<br>
+ * last byte in the range.<br>
+ */<br>
+ res->end = end - 1;<br>
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;<br>
WARN_ON(request_resource(&iomem_resource, res) < 0);<br>
}<br>
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c<br>
index 6eb4eab79385..079159e97bca 100644<br>
--- a/arch/powerpc/mm/pgtable_32.c<br>
+++ b/arch/powerpc/mm/pgtable_32.c<br>
@@ -123,11 +123,11 @@ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)<br>
<br>
void __init mapin_ram(void)<br>
{<br>
- struct memblock_region *reg;<br>
+ phys_addr_t base, end;<br>
+ u64 i;<br>
<br>
- for_each_memblock(memory, reg) {<br>
- phys_addr_t base = reg->base;<br>
- phys_addr_t top = min(base + reg->size, total_lowmem);<br>
+ for_each_mem_range(i, &base, &end) {<br>
+ phys_addr_t top = min(end, total_lowmem);<br>
<br>
if (base >= top)<br>
continue;<br>
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c<br>
index 7440ba2cdaaa..2abe1165fe56 100644<br>
--- a/arch/riscv/mm/init.c<br>
+++ b/arch/riscv/mm/init.c<br>
@@ -145,21 +145,22 @@ static phys_addr_t dtb_early_pa __initdata;<br>
<br>
void __init setup_bootmem(void)<br>
{<br>
- struct memblock_region *reg;<br>
+ phys_addr_t start, end;<br>
phys_addr_t mem_size = 0;<br>
phys_addr_t total_mem = 0;<br>
phys_addr_t mem_start, end = 0;<br>
phys_addr_t vmlinux_end = __pa_symbol(&_end);<br>
phys_addr_t vmlinux_start = __pa_symbol(&_start);<br>
+ u64 i;<br>
<br>
/* Find the memory region containing the kernel */<br>
- for_each_memblock(memory, reg) {<br>
- end = reg->base + reg->size;<br>
+ for_each_mem_range(i, &start, &end) {<br>
+ phys_addr_t size = end - start;<br>
if (!total_mem)<br>
- mem_start = reg->base;<br>
- if (reg->base <= vmlinux_start && vmlinux_end <= end)<br>
- BUG_ON(reg->size == 0);<br>
- total_mem = total_mem + reg->size;<br>
+ mem_start = start;<br>
+ if (start <= vmlinux_start && vmlinux_end <= end)<br>
+ BUG_ON(size == 0);<br>
+ total_mem = total_mem + size;<br>
}<br>
<br>
/*<br>
@@ -456,7 +457,7 @@ static void __init setup_vm_final(void)<br>
{<br>
uintptr_t va, map_size;<br>
phys_addr_t pa, start, end;<br>
- struct memblock_region *reg;<br>
+ u64 i;<br>
<br>
/* Set mmu_enabled flag */<br>
mmu_enabled = true;<br>
@@ -467,14 +468,9 @@ static void __init setup_vm_final(void)<br>
PGDIR_SIZE, PAGE_TABLE);<br>
<br>
/* Map all memory banks */<br>
- for_each_memblock(memory, reg) {<br>
- start = reg->base;<br>
- end = start + reg->size;<br>
-<br>
+ for_each_mem_range(i, &start, &end) {<br>
if (start >= end)<br>
break;<br>
- if (memblock_is_nomap(reg))<br>
- continue;<br>
if (start <= __pa(PAGE_OFFSET) &&<br>
__pa(PAGE_OFFSET) < end)<br>
start = __pa(PAGE_OFFSET);<br>
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c<br>
index 87b4ab3d3c77..12ddd1f6bf70 100644<br>
--- a/arch/riscv/mm/kasan_init.c<br>
+++ b/arch/riscv/mm/kasan_init.c<br>
@@ -85,16 +85,16 @@ static void __init populate(void *start, void *end)<br>
<br>
void __init kasan_init(void)<br>
{<br>
- struct memblock_region *reg;<br>
- unsigned long i;<br>
+ phys_addr_t _start, _end;<br>
+ u64 i;<br>
<br>
kasan_populate_early_shadow((void *)KASAN_SHADOW_START,<br>
(void *)kasan_mem_to_shadow((void *)<br>
VMALLOC_END));<br>
<br>
- for_each_memblock(memory, reg) {<br>
- void *start = (void *)__va(reg->base);<br>
- void *end = (void *)__va(reg->base + reg->size);<br>
+ for_each_mem_range(i, &_start, &_end) {<br>
+ void *start = (void *)_start;<br>
+ void *end = (void *)_end;<br>
<br>
if (start >= end)<br>
break;<br>
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c<br>
index 8b284cf6e199..b6c4a0c5ff86 100644<br>
--- a/arch/s390/kernel/setup.c<br>
+++ b/arch/s390/kernel/setup.c<br>
@@ -198,7 +198,7 @@ static void __init conmode_default(void)<br>
cpcmd("QUERY TERM", query_buffer, 1024, NULL);<br>
ptr = strstr(query_buffer, "CONMODE");<br>
/*<br>
- * Set the conmode to 3215 so that the device recognition <br>
+ * Set the conmode to 3215 so that the device recognition<br>
* will set the cu_type of the console to 3215. If the<br>
* conmode is 3270 and we don't set it back then both<br>
* 3215 and the 3270 driver will try to access the console<br>
@@ -258,7 +258,7 @@ static inline void setup_zfcpdump(void) {}<br>
<br>
/*<br>
* Reboot, halt and power_off stubs. They just call _machine_restart,<br>
- * _machine_halt or _machine_power_off. <br>
+ * _machine_halt or _machine_power_off.<br>
*/<br>
<br>
void machine_restart(char *command)<br>
@@ -484,8 +484,9 @@ static struct resource __initdata *standard_resources[] = {<br>
static void __init setup_resources(void)<br>
{<br>
struct resource *res, *std_res, *sub_res;<br>
- struct memblock_region *reg;<br>
+ phys_addr_t start, end;<br>
int j;<br>
+ u64 i;<br>
<br>
code_resource.start = (unsigned long) _text;<br>
code_resource.end = (unsigned long) _etext - 1;<br>
@@ -494,7 +495,7 @@ static void __init setup_resources(void)<br>
bss_resource.start = (unsigned long) __bss_start;<br>
bss_resource.end = (unsigned long) __bss_stop - 1;<br>
<br>
- for_each_memblock(memory, reg) {<br>
+ for_each_mem_range(i, &start, &end) {<br>
res = memblock_alloc(sizeof(*res), 8);<br>
if (!res)<br>
panic("%s: Failed to allocate %zu bytes align=0x%x\n",<br>
@@ -502,8 +503,13 @@ static void __init setup_resources(void)<br>
res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;<br>
<br>
res->name = "System RAM";<br>
- res->start = reg->base;<br>
- res->end = reg->base + reg->size - 1;<br>
+ res->start = start;<br>
+ /*<br>
+ * In memblock, end points to the first byte after the<br>
+ * range while in resourses, end points to the last byte in<br>
+ * the range.<br>
+ */<br>
+ res->end = end - 1;<br>
request_resource(&iomem_resource, res);<br>
<br>
for (j = 0; j < ARRAY_SIZE(standard_resources); j++) {<br>
@@ -819,14 +825,15 @@ static void __init reserve_kernel(void)<br>
<br>
static void __init setup_memory(void)<br>
{<br>
- struct memblock_region *reg;<br>
+ phys_addr_t start, end;<br>
+ u64 i;<br>
<br>
/*<br>
* Init storage key for present memory<br>
*/<br>
- for_each_memblock(memory, reg) {<br>
- storage_key_init_range(reg->base, reg->base + reg->size);<br>
- }<br>
+ for_each_mem_range(i, &start, &end)<br>
+ storage_key_init_range(start, end);<br>
+<br>
psw_set_key(PAGE_DEFAULT_KEY);<br>
<br>
/* Only cosmetics */<br>
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c<br>
index 8b6282cf7d13..30076ecc3eb7 100644<br>
--- a/arch/s390/mm/vmem.c<br>
+++ b/arch/s390/mm/vmem.c<br>
@@ -399,10 +399,11 @@ int vmem_add_mapping(unsigned long start, unsigned long size)<br>
*/<br>
void __init vmem_map_init(void)<br>
{<br>
- struct memblock_region *reg;<br>
+ phys_addr_t start, end;<br>
+ u64 i;<br>
<br>
- for_each_memblock(memory, reg)<br>
- vmem_add_mem(reg->base, reg->size);<br>
+ for_each_mem_range(i, &start, &end)<br>
+ vmem_add_mem(start, end - start);<br>
__set_memory((unsigned long)_stext,<br>
(unsigned long)(_etext - _stext) >> PAGE_SHIFT,<br>
SET_MEMORY_RO | SET_MEMORY_X);<br>
@@ -428,16 +429,17 @@ void __init vmem_map_init(void)<br>
*/<br>
static int __init vmem_convert_memory_chunk(void)<br>
{<br>
- struct memblock_region *reg;<br>
+ phys_addr_t start, end;<br>
struct memory_segment *seg;<br>
+ u64 i;<br>
<br>
mutex_lock(&vmem_mutex);<br>
- for_each_memblock(memory, reg) {<br>
+ for_each_mem_range(i, &start, &end) {<br>
seg = kzalloc(sizeof(*seg), GFP_KERNEL);<br>
if (!seg)<br>
panic("Out of memory...\n");<br>
- seg->start = reg->base;<br>
- seg->size = reg->size;<br>
+ seg->start = start;<br>
+ seg->size = end - start;<br>
insert_memory_segment(seg);<br>
}<br>
mutex_unlock(&vmem_mutex);<br>
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c<br>
index 02e6e5e0f106..de63c002638e 100644<br>
--- a/arch/sparc/mm/init_64.c<br>
+++ b/arch/sparc/mm/init_64.c<br>
@@ -1192,18 +1192,14 @@ int of_node_to_nid(struct device_node *dp)<br>
<br>
static void __init add_node_ranges(void)<br>
{<br>
- struct memblock_region *reg;<br>
+ phys_addr_t start, end;<br>
unsigned long prev_max;<br>
+ u64 i;<br>
<br>
memblock_resized:<br>
prev_max = memblock.memory.max;<br>
<br>
- for_each_memblock(memory, reg) {<br>
- unsigned long size = reg->size;<br>
- unsigned long start, end;<br>
-<br>
- start = reg->base;<br>
- end = start + size;<br>
+ for_each_mem_range(i, &start, &end) {<br>
while (start < end) {<br>
unsigned long this_end;<br>
int nid;<br>
@@ -1211,7 +1207,7 @@ static void __init add_node_ranges(void)<br>
this_end = memblock_nid_range(start, end, &nid);<br>
<br>
numadbg("Setting memblock NUMA node nid[%d] "<br>
- "start[%lx] end[%lx]\n",<br>
+ "start[%llx] end[%lx]\n",<br>
nid, start, this_end);<br>
<br>
memblock_set_node(start, this_end - start,<br>
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c<br>
index 5b2a11a88951..2519ceede64b 100644<br>
--- a/drivers/bus/mvebu-mbus.c<br>
+++ b/drivers/bus/mvebu-mbus.c<br>
@@ -610,23 +610,23 @@ static unsigned int armada_xp_mbus_win_remap_offset(int win)<br>
static void __init<br>
mvebu_mbus_find_bridge_hole(uint64_t *start, uint64_t *end)<br>
{<br>
- struct memblock_region *r;<br>
- uint64_t s = 0;<br>
+ phys_addr_t reg_start, reg_end;<br>
+ uint64_t i, s = 0;<br>
<br>
- for_each_memblock(memory, r) {<br>
+ for_each_mem_range(i, ®_start, ®_end) {<br>
/*<br>
* This part of the memory is above 4 GB, so we don't<br>
* care for the MBus bridge hole.<br>
*/<br>
- if (r->base >= 0x100000000ULL)<br>
+ if (reg_start >= 0x100000000ULL)<br>
continue;<br>
<br>
/*<br>
* The MBus bridge hole is at the end of the RAM under<br>
* the 4 GB limit.<br>
*/<br>
- if (r->base + r->size > s)<br>
- s = r->base + r->size;<br>
+ if (reg_end > s)<br>
+ s = reg_end;<br>
}<br>
<br>
*start = s;<br>
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c<br>
index 08f812475f5e..484b1ec9a1bc 100644<br>
--- a/drivers/s390/char/zcore.c<br>
+++ b/drivers/s390/char/zcore.c<br>
@@ -148,18 +148,19 @@ static ssize_t zcore_memmap_read(struct file *filp, char __user *buf,<br>
<br>
static int zcore_memmap_open(struct inode *inode, struct file *filp)<br>
{<br>
- struct memblock_region *reg;<br>
+ phys_addr_t start, end;<br>
char *buf;<br>
int i = 0;<br>
+ u64 r;<br>
<br>
buf = kcalloc(memblock.memory.cnt, CHUNK_INFO_SIZE, GFP_KERNEL);<br>
if (!buf) {<br>
return -ENOMEM;<br>
}<br>
- for_each_memblock(memory, reg) {<br>
+ for_each_mem_range(r, &start, &end) {<br>
sprintf(buf + (i++ * CHUNK_INFO_SIZE), "%016llx %016llx ",<br>
- (unsigned long long) reg->base,<br>
- (unsigned long long) reg->size);<br>
+ (unsigned long long) start,<br>
+ (unsigned long long) (end - start));<br>
}<br>
filp->private_data = buf;<br>
return nonseekable_open(inode, filp);<br>
-- <br>
2.26.2<br>
<br>
<br>
_______________________________________________<br>
linux-riscv mailing list<br>
<a href="mailto:linux-riscv@lists.infradead.org" rel="noreferrer noreferrer" target="_blank">linux-riscv@lists.infradead.org</a><br>
<a href="http://lists.infradead.org/mailman/listinfo/linux-riscv" rel="noreferrer noreferrer noreferrer" target="_blank">http://lists.infradead.org/mailman/listinfo/linux-riscv</a><br>
</blockquote></div></div></div>