[PATCH][2.6] Nested Interrupt support

Jake Moilanen moilanen at austin.ibm.com
Thu Jan 15 02:32:26 EST 2004


The xics code is not behaving completly correct.  When a hw interrupt is
taken the CPPR is changed to 0x5.  If while this interrupt is being
processed, the CPU gets interrupted with a higher priority interrupt (eg
IPI), the IPI's EOI will write the CPPR back down to 0xFF instead of
what it was at when it interrupted the hw interrupt (0x5).

One concern I have is at the end of ppc_irq_dispatch_handler(), there is
a check to see if the desc->handler went away due to an interrupt being
disabled.  If the handler does go away, desc->handler->end will not be
called and the irq_stack will get out of sync.  I could not find
anywhere were this handler would actually be removed (eg function
pointer set to zero).  Why is this code still here?

Thanks,
Jake




-------------- next part --------------
# This is a BitKeeper generated patch for the following project:
# Project Name: Linux kernel tree
# This patch format is intended for GNU patch command version 2.5 or higher.
# This patch includes the following deltas:
#	           ChangeSet	1.1344  -> 1.1345 
#	arch/ppc64/kernel/xics.c	1.36    -> 1.37   
#
# The following is the BitKeeper ChangeSet Log
# --------------------------------------------
# 04/01/14	moilanen at threadlp13.austin.ibm.com	1.1345
# Nested interrupt support.
# --------------------------------------------
#
diff -Nru a/arch/ppc64/kernel/xics.c b/arch/ppc64/kernel/xics.c
--- a/arch/ppc64/kernel/xics.c	Wed Jan 14 09:07:59 2004
+++ b/arch/ppc64/kernel/xics.c	Wed Jan 14 09:07:59 2004
@@ -92,6 +92,21 @@
 static unsigned int default_server = 0xFF;
 static unsigned int default_distrib_server = 0;
 
+/* Number of nested IRQs we can store */
+#define IRQ_DEPTH 2
+
+struct cpu_irq_stack
+{
+	int depth;
+	int priority[IRQ_DEPTH];
+	int irq[IRQ_DEPTH];
+};
+
+struct cpu_irq_stack _irq_stack[NR_CPUS];
+
+#define irq_stack _irq_stack[smp_processor_id()]
+#define irq_stack_depth (irq_stack).depth
+
 /*
  * XICS only has a single IPI, so encode the messages per CPU
  */
@@ -293,20 +308,36 @@
 void xics_end_irq(unsigned int irq)
 {
 	int cpu = smp_processor_id();
+	unsigned int priority;
+
+	if (irq >= 0 && irq != irq_offset_up(xics_irq_8259_cascade)) {
+		irq_stack_depth--;
+		priority = irq_stack.priority[irq_stack_depth];
+	} else {
+		priority = 0xff;
+	}
 
 	iosync();
-	ops->xirr_info_set(cpu, ((0xff<<24) | (irq_offset_down(irq))));
+	ops->xirr_info_set(cpu, (priority<<24) | (irq_offset_down(irq)));
 
 }
 
 void xics_mask_and_ack_irq(u_int irq)
 {
 	int cpu = smp_processor_id();
+	unsigned int priority;
 
 	if (irq < irq_offset_value()) {
+		if (irq >= 0) {
+			irq_stack_depth--;
+			priority = irq_stack.priority[irq_stack_depth];
+		} else {
+			priority = 0xff;
+		}
+
 		i8259_pic.ack(irq);
 		iosync();
-		ops->xirr_info_set(cpu, ((0xff<<24) |
+		ops->xirr_info_set(cpu, ((priority<<24) |
 					 xics_irq_8259_cascade_real));
 		iosync();
 	}
@@ -316,10 +347,12 @@
 {
 	u_int cpu = smp_processor_id();
 	u_int vec;
+	u_int priority;
 	int irq;
 
 	vec = ops->xirr_info_get(cpu);
-	/*  (vec >> 24) == old priority */
+
+	priority = vec >> 24;
 	vec &= 0x00ffffff;
 
 	/* for sanity, this had better be < NR_IRQS - 16 */
@@ -336,6 +369,13 @@
 	} else {
 		irq = irq_offset_up(vec);
 	}
+
+	if (irq >= 0) {
+		irq_stack.priority[irq_stack_depth] = priority;
+		irq_stack.irq[irq_stack_depth] = irq;
+		irq_stack_depth++;
+	}
+
 	return irq;
 }
 
@@ -404,7 +444,7 @@
 
 void xics_init_IRQ(void)
 {
-	int i;
+	int i, j;
 	unsigned long intr_size = 0;
 	struct device_node *np;
 	uint *ireg, ilen, indx = 0;
@@ -522,6 +562,14 @@
 	xics_8259_pic.disable = i8259_pic.disable;
 	for (i = 0; i < 16; ++i)
 		get_real_irq_desc(i)->handler = &xics_8259_pic;
+
+	for (i = 0; i < NR_CPUS; i++) {
+		_irq_stack[i].depth = 0;
+		for (j = 0; j < IRQ_DEPTH; j++) {
+			_irq_stack[i].priority[j] = 0xff;
+			_irq_stack[i].irq[j] = -1;
+		}
+	}
 
 	ops->cppr_info(boot_cpuid, 0xff);
 	iosync();


More information about the Linuxppc64-dev mailing list