[PATCHv8 3/5] powerpc/setup: Handle the case when boot_cpuid greater than nr_cpus
Pingfan Liu
piliu at redhat.com
Mon Oct 9 22:30:34 AEDT 2023
If the boot_cpuid is smaller than nr_cpus, it requires extra effort to
ensure the boot_cpu is in cpu_present_mask. This can be achieved by
reserving the last quota for the boot cpu.
Note: the restriction on nr_cpus will be lifted with more effort in the
successive patches
Signed-off-by: Pingfan Liu <piliu at redhat.com>
Cc: Michael Ellerman <mpe at ellerman.id.au>
Cc: Nicholas Piggin <npiggin at gmail.com>
Cc: Christophe Leroy <christophe.leroy at csgroup.eu>
Cc: Mahesh Salgaonkar <mahesh at linux.ibm.com>
Cc: Wen Xiong <wenxiong at linux.ibm.com>
Cc: Baoquan He <bhe at redhat.com>
Cc: Ming Lei <ming.lei at redhat.com>
Cc: kexec at lists.infradead.org
To: linuxppc-dev at lists.ozlabs.org
---
arch/powerpc/kernel/setup-common.c | 25 ++++++++++++++++++++++---
1 file changed, 22 insertions(+), 3 deletions(-)
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 81291e13dec0..f9ef0a2666b0 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -454,8 +454,8 @@ struct interrupt_server_node {
void __init smp_setup_cpu_maps(void)
{
struct device_node *dn;
- int shift = 0, cpu = 0;
- int j, nthreads = 1;
+ int terminate, shift = 0, cpu = 0;
+ int j, bt_thread = 0, nthreads = 1;
int len;
struct interrupt_server_node *intserv_node, *n;
struct list_head *bt_node, head;
@@ -518,6 +518,7 @@ void __init smp_setup_cpu_maps(void)
for (j = 0 ; j < nthreads; j++) {
if (be32_to_cpu(intserv[j]) == boot_cpu_hwid) {
bt_node = &intserv_node->node;
+ bt_thread = j;
found_boot_cpu = true;
/*
* Record the round-shift between dt
@@ -537,11 +538,21 @@ void __init smp_setup_cpu_maps(void)
/* Select the primary thread, the boot cpu's slibing, as the logic 0 */
list_add_tail(&head, bt_node);
pr_info("the round shift between dt seq and the cpu logic number: %d\n", shift);
+ terminate = nr_cpu_ids;
list_for_each_entry(intserv_node, &head, node) {
+ j = 0;
+ /* Choose a start point to cover the boot cpu */
+ if (nr_cpu_ids - 1 < bt_thread) {
+ /*
+ * The processor core puts assumption on the thread id,
+ * not to breach the assumption.
+ */
+ terminate = nr_cpu_ids - 1;
+ }
avail = intserv_node->avail;
nthreads = intserv_node->len / sizeof(int);
- for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
+ for (; j < nthreads && cpu < terminate; j++) {
set_cpu_present(cpu, avail);
set_cpu_possible(cpu, true);
cpu_to_phys_id[cpu] = be32_to_cpu(intserv_node->intserv[j]);
@@ -549,6 +560,14 @@ void __init smp_setup_cpu_maps(void)
j, cpu, be32_to_cpu(intserv_node->intserv[j]));
cpu++;
}
+ /* Online the boot cpu */
+ if (nr_cpu_ids - 1 < bt_thread) {
+ set_cpu_present(bt_thread, avail);
+ set_cpu_possible(bt_thread, true);
+ cpu_to_phys_id[bt_thread] = be32_to_cpu(intserv_node->intserv[bt_thread]);
+ DBG(" thread %d -> cpu %d (hard id %d)\n",
+ bt_thread, bt_thread, be32_to_cpu(intserv_node->intserv[bt_thread]));
+ }
}
list_for_each_entry_safe(intserv_node, n, &head, node) {
--
2.31.1
More information about the Linuxppc-dev
mailing list