powerpc: Fix handling of fpscr on 64-bit

David Gibson david at gibson.dropbear.id.au
Thu Oct 27 16:27:25 EST 2005


Paulus, please apply to the merge tree.  This goes on top of sfr's
patch which uses the merged entry_*.S for ARCH=ppc64.

The recent merge of fpu.S broken the handling of fpscr for
ARCH=powerpc and CONFIG_PPC64=y.  FP registers could be corrupted,
leading to strange random application crashes.

The confusion arises, because the thread_struct has (and requires) a
64-bit area to save the fpscr, because we use load/store double
instructions to get it in to/out of the FPU.  However, only the low
32-bits are actually used, so we want to treat it as a 32-bit quantity
when manipulating its bits to avoid extra load/stores on 32-bit.  This
patch replaces the current definition with a structure of two 32-bit
quantities (pad and val), to clarify things as much as is possible.
The 'val' field is used when manipulating bits, the structure itself
is used when obtaining the address for loading/unloading the value
from the FPU.

While we're at it, consolidate the 4 (!) almost identical versions of
cvt_fd() and cvt_df() (arch/ppc/kernel/misc.S,
arch/ppc64/kernel/misc.S, arch/powerpc/kernel/misc_32.S,
arch/powerpc/kernel/misc_64.S) into a single version in fpu.S.  The
new version takes a pointer to thread_struct and applies the correct
offset itself, rather than a pointer to the fpscr field itself, again
to avoid confusion as to which is the correct field to use.

Finally, this patch makes ARCH=ppc64 also use the consolidated fpu.S
code, which it previously did not.

Built for G5 (ARCH=ppc64 and ARCH=powerpc), 32-bit powermac (ARCH=ppc
and ARCH=powerpc) and Walnut (ARCH=ppc, CONFIG_MATH_EMULATION=y).
Booted on G5 (ARCH=powerpc) and things which previously fell over no
longer do.

Signed-off-by: David Gibson <dwg at au1.ibm.com>

Index: working-2.6/arch/powerpc/kernel/fpu.S
===================================================================
--- working-2.6.orig/arch/powerpc/kernel/fpu.S	2005-10-27 16:01:06.000000000 +1000
+++ working-2.6/arch/powerpc/kernel/fpu.S	2005-10-27 16:01:41.000000000 +1000
@@ -48,7 +48,7 @@
 	addi	r4,r4,THREAD		/* want last_task_used_math->thread */
 	SAVE_32FPRS(0, r4)
 	mffs	fr0
-	stfd	fr0,THREAD_FPSCR-4(r4)
+	stfd	fr0,THREAD_FPSCR(r4)
 	LDL	r5,PT_REGS(r4)
 	tophys(r5,r5)
 	LDL	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
@@ -71,7 +71,7 @@
 	or	r12,r12,r4
 	std	r12,_MSR(r1)
 #endif
-	lfd	fr0,THREAD_FPSCR-4(r5)
+	lfd	fr0,THREAD_FPSCR(r5)
 	mtfsf	0xff,fr0
 	REST_32FPRS(0, r5)
 #ifndef CONFIG_SMP
@@ -104,7 +104,7 @@
 	CMPI	0,r5,0
 	SAVE_32FPRS(0, r3)
 	mffs	fr0
-	stfd	fr0,THREAD_FPSCR-4(r3)
+	stfd	fr0,THREAD_FPSCR(r3)
 	beq	1f
 	LDL	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
 	li	r3,MSR_FP|MSR_FE0|MSR_FE1
@@ -117,3 +117,28 @@
 	STL	r5,OFF(last_task_used_math)(r4)
 #endif /* CONFIG_SMP */
 	blr
+
+/*
+ * These are used in the alignment trap handler when emulating
+ * single-precision loads and stores.
+ * We restore and save the fpscr so the task gets the same result
+ * and exceptions as if the cpu had performed the load or store.
+ */
+
+_GLOBAL(cvt_fd)
+	lfd	0,THREAD_FPSCR(r5)	/* load up fpscr value */
+	mtfsf	0xff,0
+	lfs	0,0(r3)
+	stfd	0,0(r4)
+	mffs	0
+	stfd	0,THREAD_FPSCR(r5)	/* save new fpscr value */
+	blr
+
+_GLOBAL(cvt_df)
+	lfd	0,THREAD_FPSCR(r5)	/* load up fpscr value */
+	mtfsf	0xff,0
+	lfd	0,0(r3)
+	stfs	0,0(r4)
+	mffs	0
+	stfd	0,THREAD_FPSCR(r5)	/* save new fpscr value */
+	blr
Index: working-2.6/include/asm-powerpc/processor.h
===================================================================
--- working-2.6.orig/include/asm-powerpc/processor.h	2005-10-27 16:01:06.000000000 +1000
+++ working-2.6/include/asm-powerpc/processor.h	2005-10-27 16:01:41.000000000 +1000
@@ -162,10 +162,11 @@
 	unsigned long	dbcr1;
 #endif
 	double		fpr[32];	/* Complete floating point set */
-#ifdef CONFIG_PPC32
-	unsigned long	fpscr_pad;	/* fpr ... fpscr must be contiguous */
-#endif
-	unsigned long	fpscr;		/* Floating point status */
+	struct {			/* fpr ... fpscr must be contiguous */
+
+		unsigned int pad;
+		unsigned int val;	/* Floating point status */
+	} fpscr;
 	int		fpexc_mode;	/* floating-point exception mode */
 #ifdef CONFIG_PPC64
 	unsigned long	start_tb;	/* Start purr when proc switched in */
@@ -207,7 +208,7 @@
 	.regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
 	.fs = KERNEL_DS, \
 	.fpr = {0}, \
-	.fpscr = 0, \
+	.fpscr = { .val = 0, }, \
 	.fpexc_mode = MSR_FE0|MSR_FE1, \
 }
 #endif
Index: working-2.6/arch/powerpc/kernel/Makefile
===================================================================
--- working-2.6.orig/arch/powerpc/kernel/Makefile	2005-10-27 16:01:06.000000000 +1000
+++ working-2.6/arch/powerpc/kernel/Makefile	2005-10-27 16:01:41.000000000 +1000
@@ -29,7 +29,6 @@
 extra-$(CONFIG_FSL_BOOKE)	:= head_fsl_booke.o
 extra-$(CONFIG_8xx)		:= head_8xx.o
 extra-$(CONFIG_PPC64)		+= entry_64.o
-extra-$(CONFIG_PPC_FPU)		+= fpu.o
 extra-y				+= vmlinux.lds
 
 obj-y				+= process.o init_task.o time.o \
@@ -51,7 +50,7 @@
 obj-$(CONFIG_PPC64)		+= traps.o process.o init_task.o time.o
 
 extra-$(CONFIG_PPC64)		+= entry_64.o
-fpux-$(CONFIG_PPC32)		+= fpu.o
-extra-$(CONFIG_PPC_FPU)		+= $(fpux-y)
 
 endif
+
+extra-$(CONFIG_PPC_FPU)		+= fpu.o
Index: working-2.6/arch/ppc64/Kconfig
===================================================================
--- working-2.6.orig/arch/ppc64/Kconfig	2005-10-27 16:01:06.000000000 +1000
+++ working-2.6/arch/ppc64/Kconfig	2005-10-27 16:01:41.000000000 +1000
@@ -197,6 +197,9 @@
 config POWER4
 	def_bool y
 
+config PPC_FPU
+	def_bool y
+
 config POWER4_ONLY
 	bool "Optimize for POWER4"
 	default n
Index: working-2.6/arch/ppc64/Makefile
===================================================================
--- working-2.6.orig/arch/ppc64/Makefile	2005-10-27 16:01:06.000000000 +1000
+++ working-2.6/arch/ppc64/Makefile	2005-10-27 16:01:41.000000000 +1000
@@ -81,6 +81,7 @@
 
 head-y := arch/ppc64/kernel/head.o
 head-y += arch/powerpc/kernel/entry_64.o
+head-y += arch/powerpc/kernel/fpu.o
 
 libs-y				+= arch/ppc64/lib/
 core-y				+= arch/ppc64/kernel/ arch/powerpc/kernel/
Index: working-2.6/arch/ppc64/kernel/head.S
===================================================================
--- working-2.6.orig/arch/ppc64/kernel/head.S	2005-10-27 16:01:06.000000000 +1000
+++ working-2.6/arch/ppc64/kernel/head.S	2005-10-27 16:01:41.000000000 +1000
@@ -81,7 +81,7 @@
 _GLOBAL(__start)
 	/* NOP this out unconditionally */
 BEGIN_FTR_SECTION
-	b .__start_initialization_multiplatform
+	b	.__start_initialization_multiplatform
 END_FTR_SECTION(0, 1)
 #endif /* CONFIG_PPC_MULTIPLATFORM */
 
@@ -747,6 +747,7 @@
  * any task or sent any task a signal, you should use
  * ret_from_except or ret_from_except_lite instead of this.
  */
+	.globl	fast_exception_return
 fast_exception_return:
 	ld	r12,_MSR(r1)
 	ld	r11,_NIP(r1)
@@ -858,62 +859,6 @@
 	bl	.kernel_fp_unavailable_exception
 	BUG_OPCODE
 
-/*
- * load_up_fpu(unused, unused, tsk)
- * Disable FP for the task which had the FPU previously,
- * and save its floating-point registers in its thread_struct.
- * Enables the FPU for use in the kernel on return.
- * On SMP we know the fpu is free, since we give it up every
- * switch (ie, no lazy save of the FP registers).
- * On entry: r13 == 'current' && last_task_used_math != 'current'
- */
-_STATIC(load_up_fpu)
-	mfmsr	r5			/* grab the current MSR */
-	ori	r5,r5,MSR_FP
-	mtmsrd	r5			/* enable use of fpu now */
-	isync
-/*
- * For SMP, we don't do lazy FPU switching because it just gets too
- * horrendously complex, especially when a task switches from one CPU
- * to another.  Instead we call giveup_fpu in switch_to.
- *
- */
-#ifndef CONFIG_SMP
-	ld	r3,last_task_used_math at got(r2)
-	ld	r4,0(r3)
-	cmpdi	0,r4,0
-	beq	1f
-	/* Save FP state to last_task_used_math's THREAD struct */
-	addi	r4,r4,THREAD
-	SAVE_32FPRS(0, r4)
-	mffs	fr0
-	stfd	fr0,THREAD_FPSCR(r4)
-	/* Disable FP for last_task_used_math */
-	ld	r5,PT_REGS(r4)
-	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-	li	r6,MSR_FP|MSR_FE0|MSR_FE1
-	andc	r4,r4,r6
-	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#endif /* CONFIG_SMP */
-	/* enable use of FP after return */
-	ld	r4,PACACURRENT(r13)
-	addi	r5,r4,THREAD		/* Get THREAD */
-	ld	r4,THREAD_FPEXC_MODE(r5)
-	ori	r12,r12,MSR_FP
-	or	r12,r12,r4
-	std	r12,_MSR(r1)
-	lfd	fr0,THREAD_FPSCR(r5)
-	mtfsf	0xff,fr0
-	REST_32FPRS(0, r5)
-#ifndef CONFIG_SMP
-	/* Update last_task_used_math to 'current' */
-	subi	r4,r5,THREAD		/* Back to 'current' */
-	std	r4,0(r3)
-#endif /* CONFIG_SMP */
-	/* restore registers and return */
-	b	fast_exception_return
-
 	.align	7
 	.globl altivec_unavailable_common
 altivec_unavailable_common:
Index: working-2.6/arch/ppc64/kernel/misc.S
===================================================================
--- working-2.6.orig/arch/ppc64/kernel/misc.S	2005-10-27 16:01:06.000000000 +1000
+++ working-2.6/arch/ppc64/kernel/misc.S	2005-10-27 16:01:41.000000000 +1000
@@ -451,25 +451,6 @@
 	sync
 	blr	
 
-
-_GLOBAL(cvt_fd)
-	lfd	0,0(r5)		/* load up fpscr value */
-	mtfsf	0xff,0
-	lfs	0,0(r3)
-	stfd	0,0(r4)
-	mffs	0		/* save new fpscr value */
-	stfd	0,0(r5)
-	blr
-
-_GLOBAL(cvt_df)
-	lfd	0,0(r5)		/* load up fpscr value */
-	mtfsf	0xff,0
-	lfd	0,0(r3)
-	stfs	0,0(r4)
-	mffs	0		/* save new fpscr value */
-	stfd	0,0(r5)
-	blr
-
 /*
  * identify_cpu and calls setup_cpu
  * In:	r3 = base of the cpu_specs array
@@ -655,38 +636,6 @@
 	isync
 	blr
 
-/*
- * giveup_fpu(tsk)
- * Disable FP for the task given as the argument,
- * and save the floating-point registers in its thread_struct.
- * Enables the FPU for use in the kernel on return.
- */
-_GLOBAL(giveup_fpu)
-	mfmsr	r5
-	ori	r5,r5,MSR_FP
-	mtmsrd	r5			/* enable use of fpu now */
-	isync
-	cmpdi	0,r3,0
-	beqlr-				/* if no previous owner, done */
-	addi	r3,r3,THREAD		/* want THREAD of task */
-	ld	r5,PT_REGS(r3)
-	cmpdi	0,r5,0
-	SAVE_32FPRS(0, r3)
-	mffs	fr0
-	stfd	fr0,THREAD_FPSCR(r3)
-	beq	1f
-	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-	li	r3,MSR_FP|MSR_FE0|MSR_FE1
-	andc	r4,r4,r3		/* disable FP for previous task */
-	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#ifndef CONFIG_SMP
-	li	r5,0
-	ld	r4,last_task_used_math at got(r2)
-	std	r5,0(r4)
-#endif /* CONFIG_SMP */
-	blr
-
 #ifdef CONFIG_ALTIVEC
 
 #if 0 /* this has no callers for now */
Index: working-2.6/arch/powerpc/kernel/process.c
===================================================================
--- working-2.6.orig/arch/powerpc/kernel/process.c	2005-10-27 16:01:06.000000000 +1000
+++ working-2.6/arch/powerpc/kernel/process.c	2005-10-27 16:01:41.000000000 +1000
@@ -665,7 +665,7 @@
 #endif
 #endif /* CONFIG_SMP */
 	memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
-	current->thread.fpscr = 0;
+	current->thread.fpscr.val = 0;
 #ifdef CONFIG_ALTIVEC
 	memset(current->thread.vr, 0, sizeof(current->thread.vr));
 	memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
Index: working-2.6/arch/powerpc/kernel/signal_32.c
===================================================================
--- working-2.6.orig/arch/powerpc/kernel/signal_32.c	2005-10-27 16:01:06.000000000 +1000
+++ working-2.6/arch/powerpc/kernel/signal_32.c	2005-10-27 16:01:41.000000000 +1000
@@ -403,7 +403,7 @@
 		    ELF_NFPREG * sizeof(double)))
 		return 1;
 
-	current->thread.fpscr = 0;	/* turn off all fp exceptions */
+	current->thread.fpscr.val = 0;	/* turn off all fp exceptions */
 
 #ifdef CONFIG_ALTIVEC
 	/* save altivec registers */
Index: working-2.6/arch/ppc/math-emu/sfp-machine.h
===================================================================
--- working-2.6.orig/arch/ppc/math-emu/sfp-machine.h	2005-10-27 16:01:06.000000000 +1000
+++ working-2.6/arch/ppc/math-emu/sfp-machine.h	2005-10-27 16:01:41.000000000 +1000
@@ -166,7 +166,7 @@
 #include <linux/kernel.h>
 #include <linux/sched.h>
 
-#define __FPU_FPSCR	(current->thread.fpscr)
+#define __FPU_FPSCR	(current->thread.fpscr.val)
 
 /* We only actually write to the destination register
  * if exceptions signalled (if any) will not trap.
Index: working-2.6/arch/ppc64/kernel/signal.c
===================================================================
--- working-2.6.orig/arch/ppc64/kernel/signal.c	2005-10-27 16:01:06.000000000 +1000
+++ working-2.6/arch/ppc64/kernel/signal.c	2005-10-27 16:01:41.000000000 +1000
@@ -133,7 +133,7 @@
 	flush_fp_to_thread(current);
 
 	/* Make sure signal doesn't get spurrious FP exceptions */
-	current->thread.fpscr = 0;
+	current->thread.fpscr.val = 0;
 
 #ifdef CONFIG_ALTIVEC
 	err |= __put_user(v_regs, &sc->v_regs);
Index: working-2.6/arch/ppc/kernel/process.c
===================================================================
--- working-2.6.orig/arch/ppc/kernel/process.c	2005-10-27 16:01:06.000000000 +1000
+++ working-2.6/arch/ppc/kernel/process.c	2005-10-27 16:01:41.000000000 +1000
@@ -542,7 +542,7 @@
 		last_task_used_spe = NULL;
 #endif
 	memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
-	current->thread.fpscr = 0;
+	current->thread.fpscr.val = 0;
 #ifdef CONFIG_ALTIVEC
 	memset(current->thread.vr, 0, sizeof(current->thread.vr));
 	memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
Index: working-2.6/arch/ppc/kernel/traps.c
===================================================================
--- working-2.6.orig/arch/ppc/kernel/traps.c	2005-10-27 16:01:06.000000000 +1000
+++ working-2.6/arch/ppc/kernel/traps.c	2005-10-27 16:01:41.000000000 +1000
@@ -659,7 +659,7 @@
 			giveup_fpu(current);
 		preempt_enable();
 
-		fpscr = current->thread.fpscr;
+		fpscr = current->thread.fpscr.val;
 		fpscr &= fpscr << 22;	/* mask summary bits with enables */
 		if (fpscr & FPSCR_VX)
 			code = FPE_FLTINV;
Index: working-2.6/arch/powerpc/kernel/traps.c
===================================================================
--- working-2.6.orig/arch/powerpc/kernel/traps.c	2005-10-27 16:01:06.000000000 +1000
+++ working-2.6/arch/powerpc/kernel/traps.c	2005-10-27 16:01:41.000000000 +1000
@@ -549,7 +549,7 @@
 
 	flush_fp_to_thread(current);
 
-	fpscr = current->thread.fpscr;
+	fpscr = current->thread.fpscr.val;
 
 	/* Invalid operation */
 	if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
Index: working-2.6/arch/powerpc/kernel/misc_32.S
===================================================================
--- working-2.6.orig/arch/powerpc/kernel/misc_32.S	2005-10-27 16:01:06.000000000 +1000
+++ working-2.6/arch/powerpc/kernel/misc_32.S	2005-10-27 16:01:41.000000000 +1000
@@ -993,33 +993,6 @@
 	blr
 
 /*
- * These are used in the alignment trap handler when emulating
- * single-precision loads and stores.
- * We restore and save the fpscr so the task gets the same result
- * and exceptions as if the cpu had performed the load or store.
- */
-
-#ifdef CONFIG_PPC_FPU
-_GLOBAL(cvt_fd)
-	lfd	0,-4(r5)	/* load up fpscr value */
-	mtfsf	0xff,0
-	lfs	0,0(r3)
-	stfd	0,0(r4)
-	mffs	0		/* save new fpscr value */
-	stfd	0,-4(r5)
-	blr
-
-_GLOBAL(cvt_df)
-	lfd	0,-4(r5)	/* load up fpscr value */
-	mtfsf	0xff,0
-	lfd	0,0(r3)
-	stfs	0,0(r4)
-	mffs	0		/* save new fpscr value */
-	stfd	0,-4(r5)
-	blr
-#endif
-
-/*
  * Create a kernel thread
  *   kernel_thread(fn, arg, flags)
  */
Index: working-2.6/arch/powerpc/kernel/misc_64.S
===================================================================
--- working-2.6.orig/arch/powerpc/kernel/misc_64.S	2005-10-27 16:01:06.000000000 +1000
+++ working-2.6/arch/powerpc/kernel/misc_64.S	2005-10-27 16:01:41.000000000 +1000
@@ -462,25 +462,6 @@
 	sync
 	blr	
 
-
-_GLOBAL(cvt_fd)
-	lfd	0,0(r5)		/* load up fpscr value */
-	mtfsf	0xff,0
-	lfs	0,0(r3)
-	stfd	0,0(r4)
-	mffs	0		/* save new fpscr value */
-	stfd	0,0(r5)
-	blr
-
-_GLOBAL(cvt_df)
-	lfd	0,0(r5)		/* load up fpscr value */
-	mtfsf	0xff,0
-	lfd	0,0(r3)
-	stfs	0,0(r4)
-	mffs	0		/* save new fpscr value */
-	stfd	0,0(r5)
-	blr
-
 /*
  * identify_cpu and calls setup_cpu
  * In:	r3 = base of the cpu_specs array
Index: working-2.6/arch/ppc/kernel/align.c
===================================================================
--- working-2.6.orig/arch/ppc/kernel/align.c	2005-10-27 16:01:06.000000000 +1000
+++ working-2.6/arch/ppc/kernel/align.c	2005-10-27 16:01:41.000000000 +1000
@@ -375,7 +375,7 @@
 #ifdef CONFIG_PPC_FPU
 		preempt_disable();
 		enable_kernel_fp();
-		cvt_fd(&data.f, &data.d, &current->thread.fpscr);
+		cvt_fd(&data.f, &data.d, &current->thread);
 		preempt_enable();
 #else
 		return 0;
@@ -385,7 +385,7 @@
 #ifdef CONFIG_PPC_FPU
 		preempt_disable();
 		enable_kernel_fp();
-		cvt_df(&data.d, &data.f, &current->thread.fpscr);
+		cvt_df(&data.d, &data.f, &current->thread);
 		preempt_enable();
 #else
 		return 0;
Index: working-2.6/arch/ppc/kernel/misc.S
===================================================================
--- working-2.6.orig/arch/ppc/kernel/misc.S	2005-10-27 16:01:06.000000000 +1000
+++ working-2.6/arch/ppc/kernel/misc.S	2005-10-27 16:01:41.000000000 +1000
@@ -968,33 +968,6 @@
 	blr
 
 /*
- * These are used in the alignment trap handler when emulating
- * single-precision loads and stores.
- * We restore and save the fpscr so the task gets the same result
- * and exceptions as if the cpu had performed the load or store.
- */
-
-#ifdef CONFIG_PPC_FPU
-_GLOBAL(cvt_fd)
-	lfd	0,-4(r5)	/* load up fpscr value */
-	mtfsf	0xff,0
-	lfs	0,0(r3)
-	stfd	0,0(r4)
-	mffs	0		/* save new fpscr value */
-	stfd	0,-4(r5)
-	blr
-
-_GLOBAL(cvt_df)
-	lfd	0,-4(r5)	/* load up fpscr value */
-	mtfsf	0xff,0
-	lfd	0,0(r3)
-	stfs	0,0(r4)
-	mffs	0		/* save new fpscr value */
-	stfd	0,-4(r5)
-	blr
-#endif
-
-/*
  * Create a kernel thread
  *   kernel_thread(fn, arg, flags)
  */
Index: working-2.6/arch/ppc64/kernel/align.c
===================================================================
--- working-2.6.orig/arch/ppc64/kernel/align.c	2005-10-27 16:01:06.000000000 +1000
+++ working-2.6/arch/ppc64/kernel/align.c	2005-10-27 16:01:41.000000000 +1000
@@ -313,7 +313,7 @@
 				/* Doing stfs, have to convert to single */
 				preempt_disable();
 				enable_kernel_fp();
-				cvt_df(&current->thread.fpr[reg], (float *)&data.v[4], &current->thread.fpscr);
+				cvt_df(&current->thread.fpr[reg], (float *)&data.v[4], &current->thread);
 				disable_kernel_fp();
 				preempt_enable();
 			}
@@ -349,7 +349,7 @@
 				/* Doing lfs, have to convert to double */
 				preempt_disable();
 				enable_kernel_fp();
-				cvt_fd((float *)&data.v[4], &current->thread.fpr[reg], &current->thread.fpscr);
+				cvt_fd((float *)&data.v[4], &current->thread.fpr[reg], &current->thread);
 				disable_kernel_fp();
 				preempt_enable();
 			}
Index: working-2.6/include/asm-ppc/system.h
===================================================================
--- working-2.6.orig/include/asm-ppc/system.h	2005-10-27 16:01:06.000000000 +1000
+++ working-2.6/include/asm-ppc/system.h	2005-10-27 16:01:41.000000000 +1000
@@ -82,8 +82,8 @@
 extern void giveup_spe(struct task_struct *);
 extern void load_up_spe(struct task_struct *);
 extern int fix_alignment(struct pt_regs *);
-extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
-extern void cvt_df(double *from, float *to, unsigned long *fpscr);
+extern void cvt_fd(float *from, double *to, struct thread_struct *thread);
+extern void cvt_df(double *from, float *to, struct thread_struct *thread);
 
 #ifdef CONFIG_ALTIVEC
 extern void flush_altivec_to_thread(struct task_struct *);
Index: working-2.6/include/asm-ppc64/system.h
===================================================================
--- working-2.6.orig/include/asm-ppc64/system.h	2005-10-27 16:01:06.000000000 +1000
+++ working-2.6/include/asm-ppc64/system.h	2005-10-27 16:01:41.000000000 +1000
@@ -120,8 +120,8 @@
 extern void disable_kernel_altivec(void);
 extern void enable_kernel_altivec(void);
 extern int emulate_altivec(struct pt_regs *);
-extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
-extern void cvt_df(double *from, float *to, unsigned long *fpscr);
+extern void cvt_fd(float *from, double *to, struct thread_struct *thread);
+extern void cvt_df(double *from, float *to, struct thread_struct *thread);
 
 #ifdef CONFIG_ALTIVEC
 extern void flush_altivec_to_thread(struct task_struct *);
Index: working-2.6/include/asm-powerpc/system.h
===================================================================
--- working-2.6.orig/include/asm-powerpc/system.h	2005-10-27 16:01:06.000000000 +1000
+++ working-2.6/include/asm-powerpc/system.h	2005-10-27 16:01:41.000000000 +1000
@@ -132,8 +132,8 @@
 extern void giveup_spe(struct task_struct *);
 extern void load_up_spe(struct task_struct *);
 extern int fix_alignment(struct pt_regs *);
-extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
-extern void cvt_df(double *from, float *to, unsigned long *fpscr);
+extern void cvt_fd(float *from, double *to, struct thread_struct *thread);
+extern void cvt_df(double *from, float *to, struct thread_struct *thread);
 
 #ifdef CONFIG_ALTIVEC
 extern void flush_altivec_to_thread(struct task_struct *);

-- 
David Gibson			| I'll have my music baroque, and my code
david AT gibson.dropbear.id.au	| minimalist, thank you.  NOT _the_ _other_
				| _way_ _around_!
http://www.ozlabs.org/people/dgibson



More information about the Linuxppc64-dev mailing list