[Skiboot] [PATCH] fast-reboot: parallel memory clearing

Nicholas Piggin npiggin at gmail.com
Wed Apr 18 13:55:13 AEST 2018


On Tue, 17 Apr 2018 20:11:41 +1000
Stewart Smith <stewart at linux.ibm.com> wrote:

> Nicholas Piggin <nicholas.piggin at gmail.com> writes:

> > Bonus points for scheduling memory clearing on to node local cores.
> > That looks like it will take a bit of work in the job scheduler code,
> > so probably best left to patch 2.  
> 
> Yeah, I initially started down that route and quickly realised that it
> needed multiple patches :)
> 

I made a start on the job scheduler side when I did the clearing patch,
before giving up on the bit you just did :)

Here's the patch if you want (not really tested). I'd be nice to put the
chip id into memory regions, then it seems like it should be pretty easy
to add on top of your patch.

I made it strict (run on this chip_id or return NULL) because I figured
firmware might get some particular requirements like that. But it's
easy to make it a fallback, just test if (!cpu) cpu = cpu_find_job_target(-1);
-- you'd have to put a fallback into the caller otherwise.

Thanks,
Nick

---
 core/cpu.c    | 77 ++++++++++++++++++++++++++++++++++++++++-----------
 include/cpu.h |  4 +++
 2 files changed, 65 insertions(+), 16 deletions(-)

diff --git a/core/cpu.c b/core/cpu.c
index 6826fee0..04de8c34 100644
--- a/core/cpu.c
+++ b/core/cpu.c
@@ -103,7 +103,11 @@ static void cpu_wake(struct cpu_thread *cpu)
 	}
 }
 
-static struct cpu_thread *cpu_find_job_target(void)
+/*
+ * If chip_id is >= 0, schedule the job on that node.
+ * Otherwise schedule the job anywhere.
+ */
+static struct cpu_thread *cpu_find_job_target(int32_t chip_id)
 {
 	struct cpu_thread *cpu, *best, *me = this_cpu();
 	uint32_t best_count;
@@ -123,6 +127,8 @@ static struct cpu_thread *cpu_find_job_target(void)
 	for_each_available_cpu(cpu) {
 		if (cpu == me || !cpu_is_thread0(cpu) || cpu->job_has_no_return)
 			continue;
+		if (chip_id >= 0 && cpu->chip_id != chip_id)
+			continue;
 		if (cpu->job_count)
 			continue;
 		lock(&cpu->job_lock);
@@ -142,6 +148,8 @@ static struct cpu_thread *cpu_find_job_target(void)
 	for_each_available_cpu(cpu) {
 		if (cpu == me || cpu->job_has_no_return)
 			continue;
+		if (chip_id >= 0 && cpu->chip_id != chip_id)
+			continue;
 		if (!best || cpu->job_count < best_count) {
 			best = cpu;
 			best_count = cpu->job_count;
@@ -164,6 +172,26 @@ static struct cpu_thread *cpu_find_job_target(void)
 	return NULL;
 }
 
+/* job_lock is held, returns with it released */
+static void queue_job_on_cpu(struct cpu_thread *cpu, struct cpu_job *job)
+{
+	/* That's bad, the job will never run */
+	if (cpu->job_has_no_return) {
+		prlog(PR_WARNING, "WARNING ! Job %s scheduled on CPU 0x%x"
+		      " which has a no-return job on its queue !\n",
+		      job->name, cpu->pir);
+		backtrace();
+	}
+	list_add_tail(&cpu->job_queue, &job->link);
+	if (job->no_return)
+		cpu->job_has_no_return = true;
+	else
+		cpu->job_count++;
+	if (pm_enabled)
+		cpu_wake(cpu);
+	unlock(&cpu->job_lock);
+}
+
 struct cpu_job *__cpu_queue_job(struct cpu_thread *cpu,
 				const char *name,
 				void (*func)(void *data), void *data,
@@ -193,7 +221,7 @@ struct cpu_job *__cpu_queue_job(struct cpu_thread *cpu,
 
 	/* Pick a candidate. Returns with target queue locked */
 	if (cpu == NULL)
-		cpu = cpu_find_job_target();
+		cpu = cpu_find_job_target(-1);
 	else if (cpu != this_cpu())
 		lock(&cpu->job_lock);
 	else
@@ -206,21 +234,38 @@ struct cpu_job *__cpu_queue_job(struct cpu_thread *cpu,
 		return job;
 	}
 
-	/* That's bad, the job will never run */
-	if (cpu->job_has_no_return) {
-		prlog(PR_WARNING, "WARNING ! Job %s scheduled on CPU 0x%x"
-		      " which has a no-return job on its queue !\n",
-		      job->name, cpu->pir);
-		backtrace();
+	queue_job_on_cpu(cpu, job);
+
+	return job;
+}
+
+struct cpu_job *cpu_queue_job_on_node(uint32_t chip_id,
+				const char *name,
+				void (*func)(void *data), void *data)
+{
+	struct cpu_thread *cpu;
+	struct cpu_job *job;
+
+	job = zalloc(sizeof(struct cpu_job));
+	if (!job)
+		return NULL;
+	job->func = func;
+	job->data = data;
+	job->name = name;
+	job->complete = false;
+	job->no_return = false;
+
+	/* Pick a candidate. Returns with target queue locked */
+	cpu = cpu_find_job_target(chip_id);
+
+	/* Can't be scheduled, run it now */
+	if (cpu == NULL) {
+		unlock(&cpu->job_lock);
+		free(job);
+		return NULL;
 	}
-	list_add_tail(&cpu->job_queue, &job->link);
-	if (no_return)
-		cpu->job_has_no_return = true;
-	else
-		cpu->job_count++;
-	if (pm_enabled)
-		cpu_wake(cpu);
-	unlock(&cpu->job_lock);
+
+	queue_job_on_cpu(cpu, job);
 
 	return job;
 }
diff --git a/include/cpu.h b/include/cpu.h
index b7cd588d..175b7c28 100644
--- a/include/cpu.h
+++ b/include/cpu.h
@@ -281,6 +281,10 @@ static inline struct cpu_job *cpu_queue_job(struct cpu_thread *cpu,
 	return __cpu_queue_job(cpu, name, func, data, false);
 }
 
+extern struct cpu_job *cpu_queue_job_on_node(uint32_t chip_id,
+				       const char *name,
+				       void (*func)(void *data), void *data);
+
 
 /* Poll job status, returns true if completed */
 extern bool cpu_poll_job(struct cpu_job *job);
-- 
2.17.0



More information about the Skiboot mailing list