[Skiboot] [RFC PATCH 2/2] New LE OPAL calling convention

Nicholas Piggin npiggin at gmail.com
Mon Dec 2 18:43:16 AEDT 2019


This is a new LE calling convention for LE skiboot builds. It is simply
called as an indirect function, with the first argument selecting the
opal call token, and the subsequent arguments are the opal call args.

The caller's stack is used by OPAL.

Linux tries very hard to ensure the stack is re-entrant across interrupts
including non-maskable ones. We've currently hacked around this in skiboot
by using a different part of the skiboot stack if OPAL is re-entered, but
this is fragile and error prone.

Currently relocation must be disabled before calling OPAL, this convention
can support relocation-on, provided that the kernel is providing virtual
memory mappings to OPAL (which comes later). All together that would bring
the cost of an OPAL call down to similar to that of a kernel module function
call.
---
 core/cpu.c    |  9 +--------
 core/opal.c   | 39 +++++++++++++++++++++++++++++++++++++--
 include/cpu.h |  9 +++++++++
 3 files changed, 47 insertions(+), 10 deletions(-)

diff --git a/core/cpu.c b/core/cpu.c
index d5b7d623b..0c13f29de 100644
--- a/core/cpu.c
+++ b/core/cpu.c
@@ -23,14 +23,7 @@
 /* The cpu_threads array is static and indexed by PIR in
  * order to speed up lookup from asm entry points
  */
-struct cpu_stack {
-	union {
-		uint8_t	stack[STACK_SIZE];
-		struct cpu_thread cpu;
-	};
-} __align(STACK_SIZE);
-
-static struct cpu_stack * const cpu_stacks = (struct cpu_stack *)CPU_STACKS_BASE;
+struct cpu_stack * const cpu_stacks = (struct cpu_stack *)CPU_STACKS_BASE;
 unsigned int cpu_thread_count;
 unsigned int cpu_max_pir;
 struct cpu_thread *boot_cpu;
diff --git a/core/opal.c b/core/opal.c
index da746e805..2d2ecab7b 100644
--- a/core/opal.c
+++ b/core/opal.c
@@ -371,6 +371,33 @@ static void add_opal_firmware_node(void)
 	add_opal_firmware_exports_node(firmware);
 }
 
+typedef int64_t (*opal_call_fn)(uint64_t r3, uint64_t r4, uint64_t r5,
+				uint64_t r6, uint64_t r7, uint64_t r8,
+				uint64_t r9);
+
+static int64_t opal_v4_le_entry(uint64_t r3, uint64_t r4, uint64_t r5,
+				uint64_t r6, uint64_t r7, uint64_t r8,
+				uint64_t r9, uint64_t r10)
+{
+	opal_call_fn *fn;
+	uint64_t pir;
+	uint64_t r16;
+
+	pir = mfspr(SPR_PIR);
+	r16 = (uint64_t)__this_cpu;
+	__this_cpu = &cpu_stacks[pir].cpu;
+
+	assert(!(mfmsr() & (MSR_IR|MSR_DR|MSR_EE)));
+
+	fn = (opal_call_fn *)(&opal_branch_table[r3]);
+
+	r3 = (*fn)(r4, r5, r6, r7, r8, r9, r10);
+
+	__this_cpu = (struct cpu_thread *)r16;
+
+	return r3;
+}
+
 void add_opal_node(void)
 {
 	uint64_t base, entry, size;
@@ -395,16 +422,24 @@ void add_opal_node(void)
 	dt_add_property_cells(opal_node, "#address-cells", 0);
 	dt_add_property_cells(opal_node, "#size-cells", 0);
 
-	if (proc_gen < proc_gen_p9)
+	if (proc_gen < proc_gen_p9) {
 		dt_add_property_strings(opal_node, "compatible", "ibm,opal-v2",
 					"ibm,opal-v3");
-	else
+	} else if (HAVE_LITTLE_ENDIAN) {
+		dt_add_property_strings(opal_node, "compatible", "ibm,opal-v3",
+					"ibm,opal-v4");
+	} else {
 		dt_add_property_strings(opal_node, "compatible", "ibm,opal-v3");
+	}
 
 	dt_add_property_cells(opal_node, "opal-msg-async-num", OPAL_MAX_ASYNC_COMP);
 	dt_add_property_cells(opal_node, "opal-msg-size", OPAL_MSG_SIZE);
 	dt_add_property_u64(opal_node, "opal-base-address", base);
 	dt_add_property_u64(opal_node, "opal-entry-address", entry);
+	if (HAVE_LITTLE_ENDIAN) {
+		dt_add_property_u64(opal_node, "opal-v4-le-entry-address",
+						(uint64_t)&opal_v4_le_entry);
+	}
 	dt_add_property_u64(opal_node, "opal-boot-address", (uint64_t)&boot_entry);
 	dt_add_property_u64(opal_node, "opal-runtime-size", size);
 
diff --git a/include/cpu.h b/include/cpu.h
index 9b7f41dfb..0ccbb2674 100644
--- a/include/cpu.h
+++ b/include/cpu.h
@@ -211,6 +211,15 @@ extern u8 get_available_nr_cores_in_chip(u32 chip_id);
 	for (core = first_available_core_in_chip(chip_id); core; \
 		core = next_available_core_in_chip(core, chip_id))
 
+struct cpu_stack {
+	union {
+		uint8_t	stack[STACK_SIZE];
+		struct cpu_thread cpu;
+	};
+} __align(STACK_SIZE);
+
+struct cpu_stack * const cpu_stacks;
+
 /* Return the caller CPU (only after init_cpu_threads) */
 register struct cpu_thread *__this_cpu asm("r16");
 static inline __nomcount struct cpu_thread *this_cpu(void)
-- 
2.23.0



More information about the Skiboot mailing list