[Skiboot] [PATCH 2/9] move the __this_cpu register to r16, reserve r13-r15
Nicholas Piggin
npiggin at gmail.com
Thu Feb 27 05:34:01 AEDT 2020
There have been several bugs between Linux and OPAL caused by both
using r13 for their primary per-CPU data address. This patch moves
OPAL to use r16 for this, and prevents the compiler from touching
r13-r15 (r14,r15 allow Linux to use additional fixed registers in
future).
This helps code to be a little more robust, and may make crashes
in OPAL (or debugging with pdbg or in simulators) easier to debug by
having easy access to the PACA.
Later, if we allow interrupts (other than non-maskable) to be taken when
running in skiboot, Linux's interrupt return handler does not restore
r13 if the interrupt was taken in PR=0 state, which would corrupt the
skiboot r13 register, so this allows for the possibility, although it
will have to become a formal OPAL ABI requirement if we rely on it.
Signed-off-by: Nicholas Piggin <npiggin at gmail.com>
---
Makefile.main | 11 +++++++++--
asm/head.S | 36 ++++++++++++++++++------------------
asm/misc.S | 8 ++++----
include/cpu.h | 2 +-
4 files changed, 32 insertions(+), 25 deletions(-)
diff --git a/Makefile.main b/Makefile.main
index daca012be..f0213a312 100644
--- a/Makefile.main
+++ b/Makefile.main
@@ -96,7 +96,14 @@ endif
CFLAGS := -fno-strict-aliasing -pie -fpie -fno-pic -m64 -fno-asynchronous-unwind-tables
CFLAGS += -mcpu=power8
CFLAGS += -Wl,--oformat,elf64-powerpc -ggdb
-CFLAGS += $(call try-cflag,$(CC),-ffixed-r13)
+# r13,r14,r15 are preserved for OS to use as fixed registers.
+# These could be saved and restored in and out of skiboot, but it's more
+# robust to avoid touching them.
+CFLAGS += -ffixed-r13
+CFLAGS += -ffixed-r14
+CFLAGS += -ffixed-r15
+# r16 is skiboot's per-CPU data pointer.
+CFLAGS += -ffixed-r16
CFLAGS += $(call try-cflag,$(CC),-std=gnu11)
ifeq ($(LITTLE_ENDIAN),1)
@@ -127,7 +134,7 @@ endif
# Check if the new parametrized stack protector option is supported
# by gcc, otherwise disable stack protector
-STACK_PROT_CFLAGS := -mstack-protector-guard=tls -mstack-protector-guard-reg=r13
+STACK_PROT_CFLAGS := -mstack-protector-guard=tls -mstack-protector-guard-reg=r16
STACK_PROT_CFLAGS += -mstack-protector-guard-offset=0
HAS_STACK_PROT := $(call test_cflag,$(CC),$(STACK_PROT_CFLAGS))
diff --git a/asm/head.S b/asm/head.S
index 0b4b1a5f0..143f8af53 100644
--- a/asm/head.S
+++ b/asm/head.S
@@ -25,7 +25,7 @@
addi stack_reg,stack_reg,EMERGENCY_CPU_STACKS_OFFSET at l;
#define GET_CPU() \
- clrrdi %r13,%r1,STACK_SHIFT
+ clrrdi %r16,%r1,STACK_SHIFT
#define SAVE_GPR(reg,sp) std %r##reg,STACK_GPR##reg(sp)
#define REST_GPR(reg,sp) ld %r##reg,STACK_GPR##reg(sp)
@@ -403,7 +403,7 @@ boot_entry:
* before relocation so we need to keep track of its location to wake
* them up.
*/
- mr %r15,%r30
+ mr %r18,%r30
/* Check if we need to copy ourselves up and update %r30 to
* be our new offset
@@ -449,7 +449,7 @@ boot_entry:
/* Tell secondaries to move to second stage (relocated) spin loop */
LOAD_IMM32(%r3, boot_flag - __head)
- add %r3,%r3,%r15
+ add %r3,%r3,%r18
li %r0,1
stw %r0,0(%r3)
@@ -464,18 +464,18 @@ boot_entry:
addi %r3,%r3,8
bdnz 1b
- /* Get our per-cpu pointer into r13 */
+ /* Get our per-cpu pointer into r16 */
GET_CPU()
#ifdef STACK_CHECK_ENABLED
/* Initialize stack bottom mark to 0, it will be updated in C code */
li %r0,0
- std %r0,CPUTHREAD_STACK_BOT_MARK(%r13)
+ std %r0,CPUTHREAD_STACK_BOT_MARK(%r16)
#endif
/* Initialize the stack guard */
LOAD_IMM64(%r3,STACK_CHECK_GUARD_BASE);
xor %r3,%r3,%r31
- std %r3,0(%r13)
+ std %r3,0(%r16)
/* Jump to C */
mr %r3,%r27
@@ -536,7 +536,7 @@ secondary_not_found:
b .
call_relocate:
- mflr %r14
+ mflr %r17
LOAD_IMM32(%r4,__dynamic_start - __head)
LOAD_IMM32(%r5,__rela_dyn_start - __head)
add %r4,%r4,%r30
@@ -545,7 +545,7 @@ call_relocate:
bl relocate
cmpwi %r3,0
bne 1f
- mtlr %r14
+ mtlr %r17
blr
1: /* Fatal relocate failure */
attn
@@ -592,12 +592,12 @@ reset_wakeup:
/* Get PIR */
mfspr %r31,SPR_PIR
- /* Get that CPU stack base and use it to restore r13 */
+ /* Get that CPU stack base and use it to restore r16 */
GET_STACK(%r1,%r31)
GET_CPU()
/* Restore original stack pointer */
- ld %r1,CPUTHREAD_SAVE_R1(%r13)
+ ld %r1,CPUTHREAD_SAVE_R1(%r16)
/* Restore more stuff */
lwz %r4,STACK_CR(%r1)
@@ -655,7 +655,7 @@ reset_fast_reboot_wakeup:
/* Get PIR */
mfspr %r31,SPR_PIR
- /* Get that CPU stack base and use it to restore r13 */
+ /* Get that CPU stack base and use it to restore r16 */
GET_STACK(%r1,%r31)
GET_CPU()
@@ -923,17 +923,17 @@ opal_entry:
std %r9,STACK_GPR9(%r1)
std %r10,STACK_GPR10(%r1)
- /* Save Token (r0), LR and r13 */
+ /* Save Token (r0), LR and r16 */
mflr %r12
std %r0,STACK_GPR0(%r1)
- std %r13,STACK_GPR13(%r1)
+ std %r16,STACK_GPR16(%r1)
std %r12,STACK_LR(%r1)
/* Get the CPU thread */
GET_CPU()
/* Store token in CPU thread */
- std %r0,CPUTHREAD_CUR_TOKEN(%r13)
+ std %r0,CPUTHREAD_CUR_TOKEN(%r16)
/* Mark the stack frame */
li %r12,STACK_ENTRY_OPAL_API
@@ -975,14 +975,14 @@ opal_entry:
bl opal_exit_check /* r3 is preserved */
/*
- * Restore r1 and r13 before decrementing in_opal_call.
- * Move per-cpu pointer to volatile r12, restore lr, r1, r13.
+ * Restore r1 and r16 before decrementing in_opal_call.
+ * Move per-cpu pointer to volatile r12, restore lr, r1, r16.
*/
.Lreturn:
ld %r12,STACK_LR(%r1)
mtlr %r12
- mr %r12,%r13
- ld %r13,STACK_GPR13(%r1)
+ mr %r12,%r16
+ ld %r16,STACK_GPR16(%r1)
ld %r1,STACK_GPR1(%r1)
.Lreject:
sync /* release barrier vs quiescing */
diff --git a/asm/misc.S b/asm/misc.S
index 647f60b26..9904b806f 100644
--- a/asm/misc.S
+++ b/asm/misc.S
@@ -213,7 +213,7 @@ enter_p8_pm_state:
bl pm_save_regs
/* Save stack pointer in struct cpu_thread */
- std %r1,CPUTHREAD_SAVE_R1(%r13)
+ std %r1,CPUTHREAD_SAVE_R1(%r16)
/* Winkle or nap ? */
cmpli %cr0,0,%r3,0
@@ -221,7 +221,7 @@ enter_p8_pm_state:
/* nap sequence */
ptesync
-0: ld %r0,CPUTHREAD_SAVE_R1(%r13)
+0: ld %r0,CPUTHREAD_SAVE_R1(%r16)
cmpd cr0,%r0,%r0
bne 0b
PPC_INST_NAP
@@ -229,7 +229,7 @@ enter_p8_pm_state:
/* rvwinkle sequence */
1: ptesync
-0: ld %r0,CPUTHREAD_SAVE_R1(%r13)
+0: ld %r0,CPUTHREAD_SAVE_R1(%r16)
cmpd cr0,%r0,%r0
bne 0b
PPC_INST_RVWINKLE
@@ -250,7 +250,7 @@ enter_p9_pm_state:
bl pm_save_regs
/* Save stack pointer in struct cpu_thread */
- std %r1,CPUTHREAD_SAVE_R1(%r13)
+ std %r1,CPUTHREAD_SAVE_R1(%r16)
mtspr SPR_PSSCR,%r3
PPC_INST_STOP
diff --git a/include/cpu.h b/include/cpu.h
index 686310d71..9b7f41dfb 100644
--- a/include/cpu.h
+++ b/include/cpu.h
@@ -212,7 +212,7 @@ extern u8 get_available_nr_cores_in_chip(u32 chip_id);
core = next_available_core_in_chip(core, chip_id))
/* Return the caller CPU (only after init_cpu_threads) */
-register struct cpu_thread *__this_cpu asm("r13");
+register struct cpu_thread *__this_cpu asm("r16");
static inline __nomcount struct cpu_thread *this_cpu(void)
{
return __this_cpu;
--
2.23.0
More information about the Skiboot
mailing list