[PATCH v3] powerpc/kdump: Add support for crashkernel CMA reservation
Sourabh Jain
sourabhjain at linux.ibm.com
Sat Oct 25 04:01:18 AEDT 2025
Commit 35c18f2933c5 ("Add a new optional ",cma" suffix to the
crashkernel= command line option") and commit ab475510e042 ("kdump:
implement reserve_crashkernel_cma") added CMA support for kdump
crashkernel reservation.
Extend crashkernel CMA reservation support to powerpc.
The following changes are made to enable CMA reservation on powerpc:
- Parse and obtain the CMA reservation size along with other crashkernel
parameters
- Call reserve_crashkernel_cma() to allocate the CMA region for kdump
- Include the CMA-reserved ranges in the usable memory ranges for the
kdump kernel to use.
- Exclude the CMA-reserved ranges from the crash kernel memory to
prevent them from being exported through /proc/vmcore.
With the introduction of the CMA crashkernel regions,
crash_exclude_mem_range() needs to be called multiple times to exclude
both crashk_res and crashk_cma_ranges from the crash memory ranges. To
avoid repetitive logic for validating mem_ranges size and handling
reallocation when required, this functionality is moved to a new wrapper
function crash_exclude_mem_range_guarded().
To ensure proper CMA reservation, reserve_crashkernel_cma() is called
after pageblock_order is initialized.
Cc: Jiri Bohac <jbohac at suse.cz>
Cc: Hari Bathini <hbathini at linux.ibm.com>
Cc: Madhavan Srinivasan <maddy at linux.ibm.com>
Cc: Mahesh Salgaonkar <mahesh at linux.ibm.com>
Cc: Michael Ellerman <mpe at ellerman.id.au>
Cc: Ritesh Harjani (IBM) <ritesh.list at gmail.com>
Cc: Shivang Upadhyay <shivangu at linux.ibm.com>
Signed-off-by: Sourabh Jain <sourabhjain at linux.ibm.com>
---
Changlog:
v1 -> v2:
- Fix build warnings, ensure kernel build when CONFIG_CMA is
disabled
v2 -> v3
- While excluding crashk_cma_ranges, reallocate mem_ranges if there is
insufficient space to split crash memory range
- Added a wrapper, crash_exclude_mem_range_guarded(), to simplify the
logic for checking mem_ranges size and handling reallocation
---
arch/powerpc/include/asm/kexec.h | 2 ++
arch/powerpc/kernel/setup-common.c | 4 ++-
arch/powerpc/kexec/core.c | 10 ++++++-
arch/powerpc/kexec/ranges.c | 44 ++++++++++++++++++++++--------
4 files changed, 47 insertions(+), 13 deletions(-)
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index 4bbf9f699aaa..bd4a6c42a5f3 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -115,9 +115,11 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, struct crash_mem
#ifdef CONFIG_CRASH_RESERVE
int __init overlaps_crashkernel(unsigned long start, unsigned long size);
extern void arch_reserve_crashkernel(void);
+extern void kdump_cma_reserve(void);
#else
static inline void arch_reserve_crashkernel(void) {}
static inline int overlaps_crashkernel(unsigned long start, unsigned long size) { return 0; }
+static inline void kdump_cma_reserve(void) { }
#endif
#if defined(CONFIG_CRASH_DUMP)
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 68d47c53876c..c8c42b419742 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -35,6 +35,7 @@
#include <linux/of_irq.h>
#include <linux/hugetlb.h>
#include <linux/pgtable.h>
+#include <asm/kexec.h>
#include <asm/io.h>
#include <asm/paca.h>
#include <asm/processor.h>
@@ -995,11 +996,12 @@ void __init setup_arch(char **cmdline_p)
initmem_init();
/*
- * Reserve large chunks of memory for use by CMA for fadump, KVM and
+ * Reserve large chunks of memory for use by CMA for kdump, fadump, KVM and
* hugetlb. These must be called after initmem_init(), so that
* pageblock_order is initialised.
*/
fadump_cma_init();
+ kdump_cma_reserve();
kvm_cma_reserve();
gigantic_hugetlb_cma_reserve();
diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c
index d1a2d755381c..25744737eff5 100644
--- a/arch/powerpc/kexec/core.c
+++ b/arch/powerpc/kexec/core.c
@@ -33,6 +33,8 @@ void machine_kexec_cleanup(struct kimage *image)
{
}
+unsigned long long cma_size;
+
/*
* Do not allocate memory (or fail in any way) in machine_kexec().
* We are past the point of no return, committed to rebooting now.
@@ -110,7 +112,7 @@ void __init arch_reserve_crashkernel(void)
/* use common parsing */
ret = parse_crashkernel(boot_command_line, total_mem_sz, &crash_size,
- &crash_base, NULL, NULL, NULL);
+ &crash_base, NULL, &cma_size, NULL);
if (ret)
return;
@@ -130,6 +132,12 @@ void __init arch_reserve_crashkernel(void)
reserve_crashkernel_generic(crash_size, crash_base, 0, false);
}
+void __init kdump_cma_reserve(void)
+{
+ if (cma_size)
+ reserve_crashkernel_cma(cma_size);
+}
+
int __init overlaps_crashkernel(unsigned long start, unsigned long size)
{
return (start + size) > crashk_res.start && start <= crashk_res.end;
diff --git a/arch/powerpc/kexec/ranges.c b/arch/powerpc/kexec/ranges.c
index 3702b0bdab14..c61aa96f0942 100644
--- a/arch/powerpc/kexec/ranges.c
+++ b/arch/powerpc/kexec/ranges.c
@@ -515,7 +515,7 @@ int get_exclude_memory_ranges(struct crash_mem **mem_ranges)
*/
int get_usable_memory_ranges(struct crash_mem **mem_ranges)
{
- int ret;
+ int ret, i;
/*
* Early boot failure observed on guests when low memory (first memory
@@ -528,6 +528,13 @@ int get_usable_memory_ranges(struct crash_mem **mem_ranges)
if (ret)
goto out;
+ for (i = 0; i < crashk_cma_cnt; i++) {
+ ret = add_mem_range(mem_ranges, crashk_cma_ranges[i].start,
+ crashk_cma_ranges[i].end);
+ if (ret)
+ goto out;
+ }
+
ret = add_rtas_mem_range(mem_ranges);
if (ret)
goto out;
@@ -546,6 +553,23 @@ int get_usable_memory_ranges(struct crash_mem **mem_ranges)
#endif /* CONFIG_KEXEC_FILE */
#ifdef CONFIG_CRASH_DUMP
+static int crash_exclude_mem_range_guarded(struct crash_mem **mem_ranges,
+ unsigned long long mstart,
+ unsigned long long mend)
+{
+ struct crash_mem *tmem = *mem_ranges;
+
+ /* Reallocate memory ranges if there is no space to split ranges */
+ tmem = *mem_ranges;
+ if (tmem && (tmem->nr_ranges == tmem->max_nr_ranges)) {
+ tmem = realloc_mem_ranges(mem_ranges);
+ if (!tmem)
+ return -ENOMEM;
+ }
+
+ return crash_exclude_mem_range(tmem, crashk_res.start, crashk_res.end);
+}
+
/**
* get_crash_memory_ranges - Get crash memory ranges. This list includes
* first/crashing kernel's memory regions that
@@ -557,7 +581,6 @@ int get_usable_memory_ranges(struct crash_mem **mem_ranges)
int get_crash_memory_ranges(struct crash_mem **mem_ranges)
{
phys_addr_t base, end;
- struct crash_mem *tmem;
u64 i;
int ret;
@@ -582,19 +605,18 @@ int get_crash_memory_ranges(struct crash_mem **mem_ranges)
sort_memory_ranges(*mem_ranges, true);
}
- /* Reallocate memory ranges if there is no space to split ranges */
- tmem = *mem_ranges;
- if (tmem && (tmem->nr_ranges == tmem->max_nr_ranges)) {
- tmem = realloc_mem_ranges(mem_ranges);
- if (!tmem)
- goto out;
- }
-
/* Exclude crashkernel region */
- ret = crash_exclude_mem_range(tmem, crashk_res.start, crashk_res.end);
+ ret = crash_exclude_mem_range_guarded(mem_ranges, crashk_res.start, crashk_res.end);
if (ret)
goto out;
+ for (i = 0; i < crashk_cma_cnt; ++i) {
+ ret = crash_exclude_mem_range_guarded(mem_ranges, crashk_cma_ranges[i].start,
+ crashk_cma_ranges[i].end);
+ if (ret)
+ goto out;
+ }
+
/*
* FIXME: For now, stay in parity with kexec-tools but if RTAS/OPAL
* regions are exported to save their context at the time of
--
2.51.0
More information about the Linuxppc-dev
mailing list