[PATCH] x86/uaccess: Avoid barrier_nospec() in copy_from_user()
Josh Poimboeuf
jpoimboe at kernel.org
Sat Oct 12 15:09:39 AEDT 2024
For x86-64, the barrier_nospec() in copy_from_user() is overkill and
painfully slow. Instead, use pointer masking to force the user pointer
to a non-kernel value even in speculative paths.
While at it, harden the x86 implementations of raw_copy_to_user() and
clear_user(): a write in a mispredicted access_ok() branch to a
user-controlled kernel address can populate the rest of the affected
cache line with kernel data.
To avoid regressing powerpc, move the barrier_nospec() to the powerpc
raw_copy_from_user() implementation so there's no functional change.
Signed-off-by: Josh Poimboeuf <jpoimboe at kernel.org>
---
arch/powerpc/include/asm/uaccess.h | 2 ++
arch/x86/include/asm/uaccess_64.h | 4 +++-
arch/x86/lib/getuser.S | 2 +-
arch/x86/lib/putuser.S | 2 +-
include/linux/uaccess.h | 6 ------
5 files changed, 7 insertions(+), 9 deletions(-)
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 4f5a46a77fa2..12abb8bf5eda 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -7,6 +7,7 @@
#include <asm/extable.h>
#include <asm/kup.h>
#include <asm/asm-compat.h>
+#include <asm/barrier.h>
#ifdef __powerpc64__
/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
@@ -341,6 +342,7 @@ static inline unsigned long raw_copy_from_user(void *to,
{
unsigned long ret;
+ barrier_nospec();
allow_read_from_user(from, n);
ret = __copy_tofrom_user((__force void __user *)to, from, n);
prevent_read_from_user(from, n);
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index afce8ee5d7b7..39199eef26be 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -133,12 +133,14 @@ copy_user_generic(void *to, const void *from, unsigned long len)
static __always_inline __must_check unsigned long
raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
{
+ src = mask_user_address(src);
return copy_user_generic(dst, (__force void *)src, size);
}
static __always_inline __must_check unsigned long
raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
{
+ dst = mask_user_address(dst);
return copy_user_generic((__force void *)dst, src, size);
}
@@ -197,7 +199,7 @@ static __always_inline __must_check unsigned long __clear_user(void __user *addr
static __always_inline unsigned long clear_user(void __user *to, unsigned long n)
{
if (__access_ok(to, n))
- return __clear_user(to, n);
+ return __clear_user(mask_user_address(to), n);
return n;
}
#endif /* _ASM_X86_UACCESS_64_H */
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index d066aecf8aeb..094224ec9dca 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -39,7 +39,7 @@
.macro check_range size:req
.if IS_ENABLED(CONFIG_X86_64)
- mov %rax, %rdx
+ mov %rax, %rdx /* mask_user_address() */
sar $63, %rdx
or %rdx, %rax
.else
diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
index 975c9c18263d..09b7e37934ab 100644
--- a/arch/x86/lib/putuser.S
+++ b/arch/x86/lib/putuser.S
@@ -34,7 +34,7 @@
.macro check_range size:req
.if IS_ENABLED(CONFIG_X86_64)
- mov %rcx, %rbx
+ mov %rcx, %rbx /* mask_user_address() */
sar $63, %rbx
or %rbx, %rcx
.else
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 39c7cf82b0c2..dda9725a9559 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -160,12 +160,6 @@ _inline_copy_from_user(void *to, const void __user *from, unsigned long n)
unsigned long res = n;
might_fault();
if (!should_fail_usercopy() && likely(access_ok(from, n))) {
- /*
- * Ensure that bad access_ok() speculation will not
- * lead to nasty side effects *after* the copy is
- * finished:
- */
- barrier_nospec();
instrument_copy_from_user_before(to, from, n);
res = raw_copy_from_user(to, from, n);
instrument_copy_from_user_after(to, from, n, res);
--
2.46.2
More information about the Linuxppc-dev
mailing list