[PATCH v2] powerpc: Merge hardirq stack and softirq stack

Christophe Leroy christophe.leroy at csgroup.eu
Fri Jul 8 00:23:03 AEST 2022


__do_IRQ() doesn't switch on hardirq stack if we are on softirq stack.

do_softirq() bail out early without doing anything when already in
an interrupt.

invoke_softirq() is on task_stack when it calls do_softirq_own_stack().

So there are neither situation where we switch from hardirq stack to
softirq stack nor from softirq stack to hardirq stack.

It is therefore not necessary to have two stacks because they are
never used at the same time.

Merge both stacks into a new one called normirq_ctx.

Signed-off-by: Christophe Leroy <christophe.leroy at csgroup.eu>
---
 arch/powerpc/include/asm/irq.h |  3 +--
 arch/powerpc/kernel/irq.c      | 18 +++++++-----------
 arch/powerpc/kernel/process.c  |  6 +-----
 arch/powerpc/kernel/setup_32.c |  6 ++----
 arch/powerpc/kernel/setup_64.c |  6 ++----
 5 files changed, 13 insertions(+), 26 deletions(-)

diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index 5c1516a5ba8f..137909a6e0c1 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -49,8 +49,7 @@ extern void *mcheckirq_ctx[NR_CPUS];
 /*
  * Per-cpu stacks for handling hard and soft interrupts.
  */
-extern void *hardirq_ctx[NR_CPUS];
-extern void *softirq_ctx[NR_CPUS];
+extern void *normirq_ctx[NR_CPUS];
 
 void __do_IRQ(struct pt_regs *regs);
 extern void __init init_IRQ(void);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index d50a18888bd9..0ac0e7ddf8ac 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -274,15 +274,14 @@ static __always_inline void call_do_irq(struct pt_regs *regs, void *sp)
 void __do_IRQ(struct pt_regs *regs)
 {
 	struct pt_regs *old_regs = set_irq_regs(regs);
-	void *cursp, *irqsp, *sirqsp;
+	void *cursp, *irqsp;
 
 	/* Switch to the irq stack to handle this */
 	cursp = (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
-	irqsp = hardirq_ctx[raw_smp_processor_id()];
-	sirqsp = softirq_ctx[raw_smp_processor_id()];
+	irqsp = normirq_ctx[raw_smp_processor_id()];
 
 	/* Already there ? If not switch stack and call */
-	if (unlikely(cursp == irqsp || cursp == sirqsp))
+	if (unlikely(cursp == irqsp))
 		__do_irq(regs, current_stack_pointer);
 	else
 		call_do_irq(regs, irqsp);
@@ -305,10 +304,8 @@ static void __init vmap_irqstack_init(void)
 {
 	int i;
 
-	for_each_possible_cpu(i) {
-		softirq_ctx[i] = alloc_vm_stack();
-		hardirq_ctx[i] = alloc_vm_stack();
-	}
+	for_each_possible_cpu(i)
+		normirq_ctx[i] = alloc_vm_stack();
 }
 
 
@@ -330,12 +327,11 @@ void    *dbgirq_ctx[NR_CPUS] __read_mostly;
 void *mcheckirq_ctx[NR_CPUS] __read_mostly;
 #endif
 
-void *softirq_ctx[NR_CPUS] __read_mostly;
-void *hardirq_ctx[NR_CPUS] __read_mostly;
+void *normirq_ctx[NR_CPUS] __read_mostly;
 
 void do_softirq_own_stack(void)
 {
-	call_do_softirq(softirq_ctx[smp_processor_id()]);
+	call_do_softirq(normirq_ctx[smp_processor_id()]);
 }
 
 irq_hw_number_t virq_to_hw(unsigned int virq)
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 0fbda89cd1bb..c17c974e5723 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -2089,11 +2089,7 @@ static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
 	unsigned long stack_page;
 	unsigned long cpu = task_cpu(p);
 
-	stack_page = (unsigned long)hardirq_ctx[cpu];
-	if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
-		return 1;
-
-	stack_page = (unsigned long)softirq_ctx[cpu];
+	stack_page = (unsigned long)normirq_ctx[cpu];
 	if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
 		return 1;
 
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 813261789303..cad0e4fbdd4b 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -158,10 +158,8 @@ void __init irqstack_early_init(void)
 
 	/* interrupt stacks must be in lowmem, we get that for free on ppc32
 	 * as the memblock is limited to lowmem by default */
-	for_each_possible_cpu(i) {
-		softirq_ctx[i] = alloc_stack();
-		hardirq_ctx[i] = alloc_stack();
-	}
+	for_each_possible_cpu(i)
+		normirq_ctx[i] = alloc_stack();
 }
 
 #ifdef CONFIG_VMAP_STACK
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 2b2d0b0fbb30..2fe727e01937 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -717,10 +717,8 @@ void __init irqstack_early_init(void)
 	 * cannot afford to take SLB misses on them. They are not
 	 * accessed in realmode.
 	 */
-	for_each_possible_cpu(i) {
-		softirq_ctx[i] = alloc_stack(limit, i);
-		hardirq_ctx[i] = alloc_stack(limit, i);
-	}
+	for_each_possible_cpu(i)
+		normirq_ctx[i] = alloc_stack(limit, i);
 }
 
 #ifdef CONFIG_PPC_BOOK3E
-- 
2.36.1



More information about the Linuxppc-dev mailing list