PATCH: sched_{s,g}etaffinity compat

Anton Blanchard anton at samba.org
Mon May 10 13:01:53 EST 2004


Hi,

> On "real" hardware with NR_CPUS > sizeof(long)*8, the
> sys_sched_setaffinity and getaffinity compatibility functions break,
> because they just convert long masks instead of the full CPU masks.
>
> This patches fixes this problem.
>
> Spotted by a ppc32 glibc make check, on a ppc64 Kernel.

Unfortunately thats not enough :) We need to change between 32bit and
64bit bitfields (just like we do on the 32bit compat select call). Here
is a patch from Milton from a while ago that should do it.

Anton

--

Patch from Milton Miller that adds the sched_affinity syscalls into the
compat layer.

 gr16b-anton/kernel/compat.c |   88 +++++++++++++++++++++++++++++++++++++++-----
 1 files changed, 79 insertions(+), 9 deletions(-)

diff -puN kernel/compat.c~compat_sys_sched_affinity kernel/compat.c
--- gr16b/kernel/compat.c~compat_sys_sched_affinity	2004-01-21 23:48:39.853282726 +1100
+++ gr16b-anton/kernel/compat.c	2004-01-21 23:48:39.861282640 +1100
@@ -381,6 +381,12 @@ compat_sys_wait4(compat_pid_t pid, compa
 	}
 }

+/* for maximum compatability, we allow programs to use a single (compat)
+ * unsigned long bitmask if all cpus will fit.  If not, you have to have
+ * at least the kernel size available.
+ */
+#define USE_COMPAT_ULONG_CPUMASK (NR_CPUS <= 8*sizeof(compat_ulong_t))
+
 extern asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
 					    unsigned long *user_mask_ptr);

@@ -388,18 +394,54 @@ asmlinkage long compat_sys_sched_setaffi
 					     unsigned int len,
 					     compat_ulong_t *user_mask_ptr)
 {
-	unsigned long kernel_mask;
+	cpumask_t kernel_mask;
 	mm_segment_t old_fs;
 	int ret;

-	if (get_user(kernel_mask, user_mask_ptr))
-		return -EFAULT;
+	if (USE_COMPAT_ULONG_CPUMASK) {
+		compat_ulong_t user_mask;
+
+		if (len < sizeof(user_mask))
+			return -EINVAL;
+
+		if (get_user(user_mask, user_mask_ptr))
+			return -EFAULT;
+
+		kernel_mask = cpus_promote(user_mask);
+	} else {
+		if (len < sizeof(kernel_mask))
+			return -EINVAL;
+
+		if (!access_ok(VERIFY_READ, user_mask_ptr, sizeof(kernel_mask)))
+			return -EFAULT;
+		else {
+			int i, j;
+			unsigned long *k, m;
+			compat_ulong_t um;
+
+			k = &cpus_coerce(kernel_mask);
+
+			for (i=0; i < sizeof(kernel_mask)/sizeof(m); i++) {
+				m = 0;
+
+				for (j = 0; j < sizeof(m)/sizeof(um); j++ ) {
+					if (__get_user(um, user_mask_ptr))
+						return -EFAULT;
+					user_mask_ptr++;
+					m <<= 4*sizeof(um);
+					m <<= 4*sizeof(um);
+					m |= um;
+				}
+				*k++ = m;
+			}
+		}
+	}

 	old_fs = get_fs();
 	set_fs(KERNEL_DS);
 	ret = sys_sched_setaffinity(pid,
 				    sizeof(kernel_mask),
-				    &kernel_mask);
+				    (unsigned long *)&kernel_mask);
 	set_fs(old_fs);

 	return ret;
@@ -411,21 +453,49 @@ extern asmlinkage long sys_sched_getaffi
 asmlinkage int compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len,
 					    compat_ulong_t *user_mask_ptr)
 {
-	unsigned long kernel_mask;
+	cpumask_t kernel_mask;
 	mm_segment_t old_fs;
 	int ret;

+	if (len < (USE_COMPAT_ULONG_CPUMASK ? sizeof(compat_ulong_t)
+				: sizeof(kernel_mask)))
+		return -EINVAL;
+
 	old_fs = get_fs();
 	set_fs(KERNEL_DS);
 	ret = sys_sched_getaffinity(pid,
 				    sizeof(kernel_mask),
-				    &kernel_mask);
+				    (unsigned long *)&kernel_mask);
 	set_fs(old_fs);

 	if (ret > 0) {
-		ret = sizeof(compat_ulong_t);
-		if (put_user(kernel_mask, user_mask_ptr))
-			return -EFAULT;
+		if (USE_COMPAT_ULONG_CPUMASK) {
+			ret = sizeof(compat_ulong_t);
+			if (put_user(cpus_coerce(kernel_mask), user_mask_ptr))
+				return -EFAULT;
+		} else {
+			int i, j, err;
+			unsigned long *k, m;
+			compat_ulong_t um;
+
+			err = access_ok(VERIFY_WRITE, user_mask_ptr, ret);
+
+			k = &cpus_coerce(kernel_mask);
+
+			for (i=0; i < sizeof(kernel_mask)/sizeof(m) && !err; i++) {
+				m = *k++;
+
+				for (j = 0; j < sizeof(m)/sizeof(compat_ulong_t) && !err; j++ ) {
+					um = m;
+					err |= __put_user(um, user_mask_ptr);
+					user_mask_ptr++;
+					m >>= 4*sizeof(compat_ulong_t);
+					m >>= 4*sizeof(compat_ulong_t);
+				}
+			}
+			if (err)
+				ret = -EFAULT;
+		}
 	}

 	return ret;


** Sent via the linuxppc64-dev mail list. See http://lists.linuxppc.org/





More information about the Linuxppc64-dev mailing list