[PATCH] powerpc: Add hibernation support for FSL BookE processors

Anton Vorontsov avorontsov at ru.mvista.com
Thu Dec 31 04:42:44 EST 2009


This is started as swsusp_32.S modifications, but the amount of #ifdefs
made the whole file horribly unreadable, so let's put the support into
its own separate file.

The code should be relatively easy to modify to support 44x BookEs as
well, but since I don't have any 44x to test, let's confine the code to
FSL BookE. (The only FSL-specific parts are 'flush_dcache_L1' and TLB
invalidation code).

Signed-off-by: Anton Vorontsov <avorontsov at ru.mvista.com>
---
 arch/powerpc/kernel/Makefile           |    8 +-
 arch/powerpc/kernel/swsusp_fsl_booke.S |  196 ++++++++++++++++++++++++++++++++
 2 files changed, 202 insertions(+), 2 deletions(-)
 create mode 100644 arch/powerpc/kernel/swsusp_fsl_booke.S

diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index c002b04..e9d7601 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -57,8 +57,12 @@ obj-$(CONFIG_CRASH_DUMP)	+= crash_dump.o
 obj-$(CONFIG_E500)		+= idle_e500.o
 obj-$(CONFIG_6xx)		+= idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o
 obj-$(CONFIG_TAU)		+= tau_6xx.o
-obj-$(CONFIG_HIBERNATION)	+= swsusp.o suspend.o \
-				   swsusp_$(CONFIG_WORD_SIZE).o
+obj-$(CONFIG_HIBERNATION)	+= swsusp.o suspend.o
+ifeq ($(CONFIG_FSL_BOOKE),y)
+obj-$(CONFIG_HIBERNATION)	+= swsusp_fsl_booke.o
+else
+obj-$(CONFIG_HIBERNATION)	+= swsusp_$(CONFIG_WORD_SIZE).o
+endif
 obj64-$(CONFIG_HIBERNATION)	+= swsusp_asm64.o
 obj-$(CONFIG_MODULES)		+= module.o module_$(CONFIG_WORD_SIZE).o
 obj-$(CONFIG_44x)		+= cpu_setup_44x.o
diff --git a/arch/powerpc/kernel/swsusp_fsl_booke.S b/arch/powerpc/kernel/swsusp_fsl_booke.S
new file mode 100644
index 0000000..3b35e8e
--- /dev/null
+++ b/arch/powerpc/kernel/swsusp_fsl_booke.S
@@ -0,0 +1,196 @@
+/*
+ * Based on swsusp_32.S, modified for FSL BookE by
+ * Anton Vorontsov <avorontsov at ru.mvista.com>
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ */
+
+#include <linux/threads.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/cputable.h>
+#include <asm/thread_info.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/mmu.h>
+
+/*
+ * Structure for storing CPU registers on the save area.
+ */
+#define SL_SP		0
+#define SL_PC		4
+#define SL_MSR		8
+#define SL_SDR1		0xc
+#define SL_SPRG0	0x10
+#define SL_SPRG1	0x14
+#define SL_SPRG2	0x18
+#define SL_SPRG3	0x1c
+#define SL_SPRG4	0x20
+#define SL_SPRG5	0x24
+#define SL_SPRG6	0x28
+#define SL_SPRG7	0x2c
+#define SL_TBU		0x30
+#define SL_TBL		0x34
+#define SL_R2		0x38
+#define SL_CR		0x3c
+#define SL_LR		0x40
+#define SL_R12		0x44	/* r12 to r31 */
+#define SL_SIZE		(SL_R12 + 80)
+
+	.section .data
+	.align	5
+
+_GLOBAL(swsusp_save_area)
+	.space	SL_SIZE
+
+
+	.section .text
+	.align	5
+
+_GLOBAL(swsusp_arch_suspend)
+	lis	r11,swsusp_save_area at h
+	ori	r11,r11,swsusp_save_area at l
+
+	mflr	r0
+	stw	r0,SL_LR(r11)
+	mfcr	r0
+	stw	r0,SL_CR(r11)
+	stw	r1,SL_SP(r11)
+	stw	r2,SL_R2(r11)
+	stmw	r12,SL_R12(r11)
+
+	/* Save MSR & SDR1 */
+	mfmsr	r4
+	stw	r4,SL_MSR(r11)
+	mfsdr1	r4
+	stw	r4,SL_SDR1(r11)
+
+	/* Get a stable timebase and save it */
+1:	mftbu	r4
+	stw	r4,SL_TBU(r11)
+	mftb	r5
+	stw	r5,SL_TBL(r11)
+	mftbu	r3
+	cmpw	r3,r4
+	bne	1b
+
+	/* Save SPRGs */
+	mfsprg	r4,0
+	stw	r4,SL_SPRG0(r11)
+	mfsprg	r4,1
+	stw	r4,SL_SPRG1(r11)
+	mfsprg	r4,2
+	stw	r4,SL_SPRG2(r11)
+	mfsprg	r4,3
+	stw	r4,SL_SPRG3(r11)
+	mfsprg	r4,4
+	stw	r4,SL_SPRG4(r11)
+	mfsprg	r4,5
+	stw	r4,SL_SPRG5(r11)
+	mfsprg	r4,6
+	stw	r4,SL_SPRG6(r11)
+	mfsprg	r4,7
+	stw	r4,SL_SPRG7(r11)
+
+	/* Call the low level suspend stuff (we should probably have made
+	 * a stackframe...
+	 */
+	bl	swsusp_save
+
+	/* Restore LR from the save area */
+	lis	r11,swsusp_save_area at h
+	ori	r11,r11,swsusp_save_area at l
+	lwz	r0,SL_LR(r11)
+	mtlr	r0
+
+	blr
+
+_GLOBAL(swsusp_arch_resume)
+ 	sync
+
+	/* Load ptr the list of pages to copy in r3 */
+	lis	r11,(restore_pblist)@h
+	ori	r11,r11,restore_pblist at l
+	lwz	r3,0(r11)
+
+	/* Copy the pages. This is a very basic implementation, to
+	 * be replaced by something more cache efficient */
+1:
+	li	r0,256
+	mtctr	r0
+	lwz	r5,pbe_address(r3)	/* source */
+	lwz	r6,pbe_orig_address(r3)	/* destination */
+2:
+	lwz	r8,0(r5)
+	lwz	r9,4(r5)
+	lwz	r10,8(r5)
+	lwz	r11,12(r5)
+	addi	r5,r5,16
+	stw	r8,0(r6)
+	stw	r9,4(r6)
+	stw	r10,8(r6)
+	stw	r11,12(r6)
+	addi	r6,r6,16
+	bdnz	2b
+	lwz	r3,pbe_next(r3)
+	cmpwi	0,r3,0
+	bne	1b
+
+	bl flush_dcache_L1
+	bl flush_instruction_cache
+
+	lis	r11,swsusp_save_area at h
+	ori	r11,r11,swsusp_save_area at l
+
+	lwz	r4,SL_SDR1(r11)
+	mtsdr1	r4
+	lwz	r4,SL_SPRG0(r11)
+	mtsprg	0,r4
+	lwz	r4,SL_SPRG1(r11)
+	mtsprg	1,r4
+	lwz	r4,SL_SPRG2(r11)
+	mtsprg	2,r4
+	lwz	r4,SL_SPRG3(r11)
+	mtsprg	3,r4
+	lwz	r4,SL_SPRG4(r11)
+	mtsprg	4,r4
+	lwz	r4,SL_SPRG5(r11)
+	mtsprg	5,r4
+	lwz	r4,SL_SPRG6(r11)
+	mtsprg	6,r4
+	lwz	r4,SL_SPRG7(r11)
+	mtsprg	7,r4
+
+	/* Invalidate TLB0 & TLB1 */
+	li	r6,0x04
+	tlbivax 0,r6
+	TLBSYNC
+	li	r6,0x0c
+	tlbivax 0,r6
+	TLBSYNC
+
+	/* restore the MSR */
+	lwz	r3,SL_MSR(r11)
+
+	/* Restore TB */
+	li	r3,0
+	mttbl	r3
+	lwz	r3,SL_TBU(r11)
+	lwz	r4,SL_TBL(r11)
+	mttbu	r3
+	mttbl	r4
+
+	/* Kick decrementer */
+	li	r0,1
+	mtdec	r0
+
+	/* Restore the callee-saved registers and return */
+	lwz	r0,SL_CR(r11)
+	mtcr	r0
+	lwz	r2,SL_R2(r11)
+	lmw	r12,SL_R12(r11)
+	lwz	r1,SL_SP(r11)
+	lwz	r0,SL_LR(r11)
+	mtlr	r0
+
+	li	r3,0
+	blr
-- 
1.6.5.7


More information about the Linuxppc-dev mailing list