[RFC/PATCH 2/2] handle cpu device node addition and removal

Nathan Lynch nathanl at austin.ibm.com
Wed Jan 26 16:11:05 EST 2005


Using the notifier chain in a previous patch, handle addition and
removal of processors on pSeries LPAR.  The new notifier call updates
cpu_present_map and sets hw_cpu_id in the paca appropriately.  Note
that we must handle more than one cpu being added or going away to
account for SMT processors.

This allows us to stop abusing cpu_present_map, and lets us get rid of
find_physical_cpu_to_start, which has always been a bit dodgy.

The code which updates cpu_present_map I plan to move to the generic
hotplug cpu code someday, but I think this is a good intermediate
step for now.

Tested on Power5.

Signed-off-by: Nathan Lynch <nathanl at austin.ibm.com>


---


diff -puN arch/ppc64/kernel/pSeries_smp.c~cpu-dlpar-notifier arch/ppc64/kernel/pSeries_smp.c
--- linux-2.6.11-rc2-mm1/arch/ppc64/kernel/pSeries_smp.c~cpu-dlpar-notifier	2005-01-25 22:57:15.000000000 -0600
+++ linux-2.6.11-rc2-mm1-nathanl/arch/ppc64/kernel/pSeries_smp.c	2005-01-25 22:57:15.000000000 -0600
@@ -27,6 +27,7 @@
 #include <linux/err.h>
 #include <linux/sysdev.h>
 #include <linux/cpu.h>
+#include <linux/notifier.h>
 
 #include <asm/ptrace.h>
 #include <asm/atomic.h>
@@ -125,54 +126,6 @@ void pSeries_cpu_die(unsigned int cpu)
 	paca[cpu].cpu_start = 0;
 }
 
-/* Search all cpu device nodes for an offline logical cpu.  If a
- * device node has a "ibm,my-drc-index" property (meaning this is an
- * LPAR), paranoid-check whether we own the cpu.  For each "thread"
- * of a cpu, if it is offline and has the same hw index as before,
- * grab that in preference.
- */
-static unsigned int find_physical_cpu_to_start(unsigned int old_hwindex)
-{
-	struct device_node *np = NULL;
-	unsigned int best = -1U;
-
-	while ((np = of_find_node_by_type(np, "cpu"))) {
-		int nr_threads, len;
-		u32 *index = (u32 *)get_property(np, "ibm,my-drc-index", NULL);
-		u32 *tid = (u32 *)
-			get_property(np, "ibm,ppc-interrupt-server#s", &len);
-
-		if (!tid)
-			tid = (u32 *)get_property(np, "reg", &len);
-
-		if (!tid)
-			continue;
-
-		/* If there is a drc-index, make sure that we own
-		 * the cpu.
-		 */
-		if (index) {
-			int state;
-			int rc = rtas_get_sensor(9003, *index, &state);
-			if (rc != 0 || state != 1)
-				continue;
-		}
-
-		nr_threads = len / sizeof(u32);
-
-		while (nr_threads--) {
-			if (0 == query_cpu_stopped(tid[nr_threads])) {
-				best = tid[nr_threads];
-				if (best == old_hwindex)
-					goto out;
-			}
-		}
-	}
-out:
-	of_node_put(np);
-	return best;
-}
-
 /**
  * smp_startup_cpu() - start the given cpu
  *
@@ -189,25 +142,16 @@ static inline int __devinit smp_startup_
 	int status;
 	unsigned long start_here = __pa((u32)*((unsigned long *)
 					       pSeries_secondary_smp_init));
-	unsigned int pcpu;
+	unsigned int pcpu = get_hard_smp_processor_id(lcpu);
 
 	/* At boot time the cpus are already spinning in hold
 	 * loops, so nothing to do. */
  	if (system_state < SYSTEM_RUNNING)
 		return 1;
 
-	pcpu = find_physical_cpu_to_start(get_hard_smp_processor_id(lcpu));
-	if (pcpu == -1U) {
-		printk(KERN_INFO "No more cpus available, failing\n");
-		return 0;
-	}
-
 	/* Fixup atomic count: it exited inside IRQ handler. */
 	paca[lcpu].__current->thread_info->preempt_count	= 0;
 
-	/* At boot this is done in prom.c. */
-	paca[lcpu].hw_cpu_id = pcpu;
-
 	status = rtas_call(rtas_token("start-cpu"), 3, 1, NULL,
 			   pcpu, start_here, lcpu);
 	if (status != 0) {
@@ -324,6 +268,116 @@ static struct smp_ops_t pSeries_xics_smp
 	.setup_cpu	= smp_xics_setup_cpu,
 };
 
+/*
+ * Update cpu_present_map and paca for a new cpu node.  Would like to
+ * move parts of this to generic code so that hotplug events are
+ * generated for each new cpu, but this is needed for now.
+ */
+static int pSeries_add_processor(struct device_node *node)
+{
+	unsigned int cpu;
+	cpumask_t candidate_map, tmp = CPU_MASK_NONE;
+	int err = 0, len, nthreads, i;
+	u32 *intserv;
+
+	intserv = (u32 *)get_property(node, "ibm,ppc-interrupt-server#s",
+								&len);
+	if (!intserv)
+		goto out;
+	nthreads = len / sizeof(u32);
+	for (i = 0; i < nthreads; i ++)
+		cpu_set(i, tmp);
+
+	lock_cpu_hotplug();
+
+	cpus_xor(candidate_map, cpu_possible_map, cpu_present_map);
+	err = -EINVAL;
+	if (cpus_empty(candidate_map))
+		goto out_unlock;
+
+	while (!cpus_empty(tmp))
+		if (cpus_subset(tmp, candidate_map))
+			/* Found a range where we can insert the new cpu(s) */
+			break;
+		else
+			cpus_shift_left(tmp, tmp, nthreads);
+
+	if (cpus_empty(tmp)) {
+		printk(KERN_INFO "Unable to find space in cpu_present_map for"
+		       " processor %s with %d thread(s)\n", node->name,
+		       nthreads);
+		goto out_unlock;
+	}
+
+	for_each_cpu_mask(cpu, tmp) {
+		BUG_ON(cpu_isset(cpu, cpu_present_map));
+		cpu_set(cpu, cpu_present_map);
+		set_hard_smp_processor_id(cpu, *intserv++);
+	}
+	err = 0;
+out_unlock:
+	unlock_cpu_hotplug();
+out:
+	return err;
+}
+
+/*
+ * Update present map for a cpu node which is going away, and set the
+ * "hard" id in the paca(s) to -1 to be consistent with boot time
+ * convention for non-present cpus.
+ */
+static int pSeries_remove_processor(struct device_node *node)
+{
+	unsigned int cpu;
+	int len, nthreads, i;
+	u32 *intserv = (u32 *)get_property(node, "ibm,ppc-interrupt-server#s",
+								&len);
+	if (!intserv)
+		return 0;
+
+	nthreads = len / sizeof(u32);
+
+	lock_cpu_hotplug();
+	for (i = 0; i < nthreads; i++) {
+		for_each_present_cpu(cpu) {
+			if (get_hard_smp_processor_id(cpu) == intserv[i]) {
+				BUG_ON(cpu_online(cpu));
+				cpu_clear(cpu, cpu_present_map);
+				set_hard_smp_processor_id(cpu, -1);
+				break;
+			}
+		}
+		if (cpu == NR_CPUS)
+			printk(KERN_WARNING "Could not find cpu to remove "
+			       "with physical id 0x%x\n", intserv[i]);
+	}
+	unlock_cpu_hotplug();
+	return 0;
+}
+
+static int pSeries_smp_notifier(struct notifier_block *nb, unsigned long action, void *_node)
+{
+	struct device_node *node = _node;
+	int err = NOTIFY_OK;
+
+	switch (action) {
+	case OF_RECONFIG_ADD:
+		if (pSeries_add_processor(node))
+			err = NOTIFY_BAD;
+		break;
+	case OF_RECONFIG_REMOVE:
+		if (pSeries_remove_processor(node))
+			err = NOTIFY_BAD;
+	default:
+		err = NOTIFY_DONE;
+	}
+	return err;
+}
+
+static struct notifier_block pSeries_smp_nb = {
+	.notifier_call = pSeries_smp_notifier,
+};
+
 /* This is called very early */
 void __init smp_init_pSeries(void)
 {
@@ -362,6 +416,9 @@ void __init smp_init_pSeries(void)
 		smp_ops->take_timebase = pSeries_take_timebase;
 	}
 
+	if (systemcfg->platform == PLATFORM_PSERIES_LPAR)
+		register_of_reconfig_notifier(&pSeries_smp_nb);
+
 	DBG(" <- smp_init_pSeries()\n");
 }
 
diff -puN arch/ppc64/kernel/smp.c~cpu-dlpar-notifier arch/ppc64/kernel/smp.c
--- linux-2.6.11-rc2-mm1/arch/ppc64/kernel/smp.c~cpu-dlpar-notifier	2005-01-25 22:57:15.000000000 -0600
+++ linux-2.6.11-rc2-mm1-nathanl/arch/ppc64/kernel/smp.c	2005-01-25 22:57:15.000000000 -0600
@@ -526,14 +526,6 @@ void __init smp_cpus_done(unsigned int m
 	smp_ops->setup_cpu(boot_cpuid);
 
 	set_cpus_allowed(current, old_mask);
-
-	/*
-	 * We know at boot the maximum number of cpus we can add to
-	 * a partition and set cpu_possible_map accordingly. cpu_present_map
-	 * needs to match for the hotplug code to allow us to hot add
-	 * any offline cpus.
-	 */
-	cpu_present_map = cpu_possible_map;
 }
 
 #ifdef CONFIG_HOTPLUG_CPU

_





More information about the Linuxppc64-dev mailing list