[PATCH 06/13] Make swsusp_32.S usable for suspend-to-RAM.

Scott Wood scottwood at freescale.com
Tue May 8 04:29:49 EST 2007


This allows platform suspend code to re-use the generic state saving
code, passing a pointer to the low-level suspend code.

The resume path is modified so that non-hibernate callers skip
hibernate-specific bits, and so that callers can specify that the MMU is
off (and thus BATs should be restored).

Signed-off-by: Scott Wood <scottwood at freescale.com>
---
 arch/powerpc/Kconfig            |   10 +++++++
 arch/powerpc/kernel/Makefile    |    2 +-
 arch/powerpc/kernel/swsusp_32.S |   57 ++++++++++++++++++++++----------------
 3 files changed, 44 insertions(+), 25 deletions(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 49b1ea2..e51781e 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -131,6 +131,16 @@ config PPC64_SWSUSP
 	depends on PPC64 && (BROKEN || (PPC_PMAC64 && EXPERIMENTAL))
 	default y
 
+config PPC_SUSPEND
+	bool
+	default n
+
+config PPC_SOFTWARE_SUSPEND
+	bool
+	depends on SOFTWARE_SUSPEND
+	select PPC_SUSPEND
+	default y
+
 menu "Processor support"
 choice
 	prompt "Processor Type"
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 4dc73b8..13cc430 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -37,8 +37,8 @@ obj-$(CONFIG_CRASH_DUMP)	+= crash_dump.o
 obj-$(CONFIG_6xx)		+= idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o
 obj-$(CONFIG_TAU)		+= tau_6xx.o
 obj-$(CONFIG_SOFTWARE_SUSPEND)	+= swsusp.o
-obj32-$(CONFIG_SOFTWARE_SUSPEND) += swsusp_32.o
 obj64-$(CONFIG_SOFTWARE_SUSPEND) += swsusp_64.o swsusp_asm64.o
+obj32-$(CONFIG_PPC_SUSPEND)	+= swsusp_32.o
 obj32-$(CONFIG_MODULES)		+= module_32.o
 
 ifeq ($(CONFIG_PPC_MERGE),y)
diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S
index 69e8f86..bc6dbc8 100644
--- a/arch/powerpc/kernel/swsusp_32.S
+++ b/arch/powerpc/kernel/swsusp_32.S
@@ -40,8 +40,13 @@ _GLOBAL(swsusp_save_area)
 	.section .text
 	.align	5
 
+#ifdef CONFIG_SOFTWARE_SUSPEND
 _GLOBAL(swsusp_arch_suspend)
+	lis	r3, swsusp_save at h
+	ori	r3, r3, swsusp_save at l
+#endif
 
+_GLOBAL(do_suspend)
 	lis	r11,swsusp_save_area at h
 	ori	r11,r11,swsusp_save_area at l
 
@@ -64,8 +69,8 @@ _GLOBAL(swsusp_arch_suspend)
 	stw	r4,SL_TB(r11)
 	mftb	r5
 	stw	r5,SL_TB+4(r11)
-	mftbu	r3
-	cmpw	r3,r4
+	mftbu	r6
+	cmpw	r6,r4
 	bne	1b
 
 	/* Save SPRGs */
@@ -119,7 +124,8 @@ _GLOBAL(swsusp_arch_suspend)
 	/* Call the low level suspend stuff (we should probably have made
 	 * a stackframe...
 	 */
-	bl	swsusp_save
+	mtctr	r3
+	bctrl
 
 	/* Restore LR from the save area */
 	lis	r11,swsusp_save_area at h
@@ -129,7 +135,7 @@ _GLOBAL(swsusp_arch_suspend)
 
 	blr
 
-
+#ifdef CONFIG_SOFTWARE_SUSPEND
 /* Resume code */
 _GLOBAL(swsusp_arch_resume)
 
@@ -212,6 +218,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 	bdnz	1b
 	sync
 
+	li	r3, 0
+#endif
+
+	/* r3 = nonzero if the MMU is completely disabled and
+	 * BATs may be restored, zero otherwise.
+	 */
+_GLOBAL(do_resume)
 	/* Ok, we are now running with the kernel data of the old
 	 * kernel fully restored. We can get to the save area
 	 * easily now. As for the rest of the code, it assumes the
@@ -226,10 +239,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 	bl	__restore_cpu_setup
 #endif
 	/* Restore the BATs, and SDR1.  Then we can turn on the MMU.
-	 * This is a bit hairy as we are running out of those BATs,
-	 * but first, our code is probably in the icache, and we are
-	 * writing the same value to the BAT, so that should be fine,
-	 * though a better solution will have to be found long-term
+	 * This can only be done when r3 != 0 (and thus the MMU is
+	 * off).
 	 */
 	lwz	r4,SL_SDR1(r11)
 	mtsdr1	r4
@@ -242,7 +253,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 	lwz	r4,SL_SPRG0+12(r11)
 	mtsprg	3,r4
 
-#if 0
+	cmpw	r3, 0
+	beq	1f
+
 	lwz	r4,SL_DBAT0(r11)
 	mtdbatu	0,r4
 	lwz	r4,SL_DBAT0+4(r11)
@@ -275,8 +288,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 	mtibatu	3,r4
 	lwz	r4,SL_IBAT3+4(r11)
 	mtibatl	3,r4
-#endif
 
+1:
 BEGIN_FTR_SECTION
 	li	r4,0
 	mtspr	SPRN_DBAT4U,r4
@@ -306,8 +319,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
 
 	/* restore the MSR and turn on the MMU */
 	lwz	r3,SL_MSR(r11)
-	bl	turn_on_mmu
-	tovirt(r11,r11)
+	lis	r4, 1f at h
+	ori	r4, r4, 1f at l
+
+	mtsrr0	r4
+	mtsrr1	r3
+	sync
+	isync
+	rfi
+
+1:	tovirt(r11, r11)
 
 	/* Restore TB */
 	li	r3,0
@@ -334,15 +355,3 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
 
 	li	r3,0
 	blr
-
-/* FIXME:This construct is actually not useful since we don't shut
- * down the instruction MMU, we could just flip back MSR-DR on.
- */
-turn_on_mmu:
-	mflr	r4
-	mtsrr0	r4
-	mtsrr1	r3
-	sync
-	isync
-	rfi
-
-- 
1.5.0.3




More information about the Linuxppc-dev mailing list