1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_UACCESS_H
3 #define _ASM_X86_UACCESS_H
5 * User space memory access functions
7 #include <linux/compiler.h>
8 #include <linux/kasan-checks.h>
9 #include <linux/string.h>
13 #include <asm/extable.h>
16 * The fs value determines whether argument validity checking should be
17 * performed or not. If get_fs() == USER_DS, checking is performed, with
18 * get_fs() == KERNEL_DS, checking is bypassed.
20 * For historical reasons, these macros are grossly misnamed.
23 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
25 #define KERNEL_DS MAKE_MM_SEG(-1UL)
26 #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
28 #define get_ds() (KERNEL_DS)
29 #define get_fs() (current->thread.addr_limit)
30 static inline void set_fs(mm_segment_t fs)
32 current->thread.addr_limit = fs;
33 /* On user-mode return, check fs is correct */
34 set_thread_flag(TIF_FSCHECK);
37 #define segment_eq(a, b) ((a).seg == (b).seg)
38 #define user_addr_max() (current->thread.addr_limit.seg)
41 * Test whether a block of memory is a valid user space address.
42 * Returns 0 if the range is valid, nonzero otherwise.
44 static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
47 * If we have used "sizeof()" for the size,
48 * we know it won't overflow the limit (but
49 * it might overflow the 'addr', so it's
50 * important to subtract the size from the
51 * limit, not add it to the address).
53 if (__builtin_constant_p(size))
54 return unlikely(addr > limit - size);
56 /* Arbitrary sizes? Be careful about overflow */
58 if (unlikely(addr < size))
60 return unlikely(addr > limit);
63 #define __range_not_ok(addr, size, limit) \
65 __chk_user_ptr(addr); \
66 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
69 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
70 # define WARN_ON_IN_IRQ() WARN_ON_ONCE(!in_task())
72 # define WARN_ON_IN_IRQ()
76 * access_ok: - Checks if a user space pointer is valid
77 * @addr: User space pointer to start of block to check
78 * @size: Size of block to check
80 * Context: User context only. This function may sleep if pagefaults are
83 * Checks if a pointer to a block of memory in user space is valid.
85 * Returns true (nonzero) if the memory block may be valid, false (zero)
86 * if it is definitely invalid.
88 * Note that, depending on architecture, this function probably just
89 * checks that the pointer is in the user space range - after calling
90 * this function, memory access functions may still return -EFAULT.
92 #define access_ok(addr, size) \
95 likely(!__range_not_ok(addr, size, user_addr_max())); \
99 * These are the main single-value transfer routines. They automatically
100 * use the right size if we just have the right pointer type.
102 * This gets kind of ugly. We want to return _two_ values in "get_user()"
103 * and yet we don't want to do any pointers, because that is too much
104 * of a performance impact. Thus we have a few rather ugly macros here,
105 * and hide all the ugliness from the user.
107 * The "__xxx" versions of the user access functions are versions that
108 * do not verify the address space, that must have been done previously
109 * with a separate "access_ok()" call (this is used when we do multiple
110 * accesses to the same area of user memory).
113 extern int __get_user_1(void);
114 extern int __get_user_2(void);
115 extern int __get_user_4(void);
116 extern int __get_user_8(void);
117 extern int __get_user_bad(void);
119 #define __uaccess_begin() stac()
120 #define __uaccess_end() clac()
121 #define __uaccess_begin_nospec() \
128 * This is a type: either unsigned long, if the argument fits into
129 * that type, or otherwise unsigned long long.
131 #define __inttype(x) \
132 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
135 * get_user: - Get a simple variable from user space.
136 * @x: Variable to store result.
137 * @ptr: Source address, in user space.
139 * Context: User context only. This function may sleep if pagefaults are
142 * This macro copies a single simple variable from user space to kernel
143 * space. It supports simple types like char and int, but not larger
144 * data types like structures or arrays.
146 * @ptr must have pointer-to-simple-variable type, and the result of
147 * dereferencing @ptr must be assignable to @x without a cast.
149 * Returns zero on success, or -EFAULT on error.
150 * On error, the variable @x is set to zero.
153 * Careful: we have to cast the result to the type of the pointer
156 * The use of _ASM_DX as the register specifier is a bit of a
157 * simplification, as gcc only cares about it as the starting point
158 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
159 * (%ecx being the next register in gcc's x86 register sequence), and
162 * Clang/LLVM cares about the size of the register, but still wants
163 * the base register for something that ends up being a pair.
165 #define get_user(x, ptr) \
168 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
169 __chk_user_ptr(ptr); \
171 asm volatile("call __get_user_%P4" \
172 : "=a" (__ret_gu), "=r" (__val_gu), \
173 ASM_CALL_CONSTRAINT \
174 : "0" (ptr), "i" (sizeof(*(ptr)))); \
175 (x) = (__force __typeof__(*(ptr))) __val_gu; \
176 __builtin_expect(__ret_gu, 0); \
179 #define __put_user_x(size, x, ptr, __ret_pu) \
180 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
181 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
186 #define __put_user_goto_u64(x, addr, label) \
187 asm_volatile_goto("\n" \
188 "1: movl %%eax,0(%1)\n" \
189 "2: movl %%edx,4(%1)\n" \
190 _ASM_EXTABLE_UA(1b, %l2) \
191 _ASM_EXTABLE_UA(2b, %l2) \
192 : : "A" (x), "r" (addr) \
195 #define __put_user_asm_ex_u64(x, addr) \
197 "1: movl %%eax,0(%1)\n" \
198 "2: movl %%edx,4(%1)\n" \
200 _ASM_EXTABLE_EX(1b, 2b) \
201 _ASM_EXTABLE_EX(2b, 3b) \
202 : : "A" (x), "r" (addr))
204 #define __put_user_x8(x, ptr, __ret_pu) \
205 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
206 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
208 #define __put_user_goto_u64(x, ptr, label) \
209 __put_user_goto(x, ptr, "q", "", "er", label)
210 #define __put_user_asm_ex_u64(x, addr) \
211 __put_user_asm_ex(x, addr, "q", "", "er")
212 #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
215 extern void __put_user_bad(void);
218 * Strange magic calling convention: pointer in %ecx,
219 * value in %eax(:%edx), return value in %eax. clobbers %rbx
221 extern void __put_user_1(void);
222 extern void __put_user_2(void);
223 extern void __put_user_4(void);
224 extern void __put_user_8(void);
227 * put_user: - Write a simple value into user space.
228 * @x: Value to copy to user space.
229 * @ptr: Destination address, in user space.
231 * Context: User context only. This function may sleep if pagefaults are
234 * This macro copies a single simple value from kernel space to user
235 * space. It supports simple types like char and int, but not larger
236 * data types like structures or arrays.
238 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
239 * to the result of dereferencing @ptr.
241 * Returns zero on success, or -EFAULT on error.
243 #define put_user(x, ptr) \
246 __typeof__(*(ptr)) __pu_val; \
247 __chk_user_ptr(ptr); \
250 switch (sizeof(*(ptr))) { \
252 __put_user_x(1, __pu_val, ptr, __ret_pu); \
255 __put_user_x(2, __pu_val, ptr, __ret_pu); \
258 __put_user_x(4, __pu_val, ptr, __ret_pu); \
261 __put_user_x8(__pu_val, ptr, __ret_pu); \
264 __put_user_x(X, __pu_val, ptr, __ret_pu); \
267 __builtin_expect(__ret_pu, 0); \
270 #define __put_user_size(x, ptr, size, label) \
272 __chk_user_ptr(ptr); \
275 __put_user_goto(x, ptr, "b", "b", "iq", label); \
278 __put_user_goto(x, ptr, "w", "w", "ir", label); \
281 __put_user_goto(x, ptr, "l", "k", "ir", label); \
284 __put_user_goto_u64((__typeof__(*ptr))(x), ptr, label); \
292 * This doesn't do __uaccess_begin/end - the exception handling
293 * around it must do that.
295 #define __put_user_size_ex(x, ptr, size) \
297 __chk_user_ptr(ptr); \
300 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
303 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
306 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
309 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
317 #define __get_user_asm_u64(x, ptr, retval, errret) \
319 __typeof__(ptr) __ptr = (ptr); \
321 "1: movl %2,%%eax\n" \
322 "2: movl %3,%%edx\n" \
324 ".section .fixup,\"ax\"\n" \
326 " xorl %%eax,%%eax\n" \
327 " xorl %%edx,%%edx\n" \
330 _ASM_EXTABLE_UA(1b, 4b) \
331 _ASM_EXTABLE_UA(2b, 4b) \
332 : "=r" (retval), "=&A"(x) \
333 : "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1), \
334 "i" (errret), "0" (retval)); \
337 #define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
339 #define __get_user_asm_u64(x, ptr, retval, errret) \
340 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
341 #define __get_user_asm_ex_u64(x, ptr) \
342 __get_user_asm_ex(x, ptr, "q", "", "=r")
345 #define __get_user_size(x, ptr, size, retval, errret) \
348 __chk_user_ptr(ptr); \
351 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
354 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
357 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
360 __get_user_asm_u64(x, ptr, retval, errret); \
363 (x) = __get_user_bad(); \
367 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
369 "1: mov"itype" %2,%"rtype"1\n" \
371 ".section .fixup,\"ax\"\n" \
373 " xor"itype" %"rtype"1,%"rtype"1\n" \
376 _ASM_EXTABLE_UA(1b, 3b) \
377 : "=r" (err), ltype(x) \
378 : "m" (__m(addr)), "i" (errret), "0" (err))
380 #define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret) \
382 "1: mov"itype" %2,%"rtype"1\n" \
384 ".section .fixup,\"ax\"\n" \
388 _ASM_EXTABLE_UA(1b, 3b) \
389 : "=r" (err), ltype(x) \
390 : "m" (__m(addr)), "i" (errret), "0" (err))
393 * This doesn't do __uaccess_begin/end - the exception handling
394 * around it must do that.
396 #define __get_user_size_ex(x, ptr, size) \
398 __chk_user_ptr(ptr); \
401 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
404 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
407 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
410 __get_user_asm_ex_u64(x, ptr); \
413 (x) = __get_user_bad(); \
417 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
418 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
420 ".section .fixup,\"ax\"\n" \
421 "3:xor"itype" %"rtype"0,%"rtype"0\n" \
424 _ASM_EXTABLE_EX(1b, 3b) \
425 : ltype(x) : "m" (__m(addr)))
427 #define __put_user_nocheck(x, ptr, size) \
429 __label__ __pu_label; \
430 int __pu_err = -EFAULT; \
432 __put_user_size((x), (ptr), (size), __pu_label); \
436 __builtin_expect(__pu_err, 0); \
439 #define __get_user_nocheck(x, ptr, size) \
442 __inttype(*(ptr)) __gu_val; \
443 __uaccess_begin_nospec(); \
444 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
446 (x) = (__force __typeof__(*(ptr)))__gu_val; \
447 __builtin_expect(__gu_err, 0); \
450 /* FIXME: this hack is definitely wrong -AK */
451 struct __large_struct { unsigned long buf[100]; };
452 #define __m(x) (*(struct __large_struct __user *)(x))
455 * Tell gcc we read from memory instead of writing: this is because
456 * we do not write to any memory gcc knows about, so there are no
459 #define __put_user_goto(x, addr, itype, rtype, ltype, label) \
460 asm_volatile_goto("\n" \
461 "1: mov"itype" %"rtype"0,%1\n" \
462 _ASM_EXTABLE_UA(1b, %l2) \
463 : : ltype(x), "m" (__m(addr)) \
466 #define __put_user_failed(x, addr, itype, rtype, ltype, errret) \
467 ({ __label__ __puflab; \
468 int __pufret = errret; \
469 __put_user_goto(x,addr,itype,rtype,ltype,__puflab); \
471 __puflab: __pufret; })
473 #define __put_user_asm(x, addr, retval, itype, rtype, ltype, errret) do { \
474 retval = __put_user_failed(x, addr, itype, rtype, ltype, errret); \
477 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
478 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
480 _ASM_EXTABLE_EX(1b, 2b) \
481 : : ltype(x), "m" (__m(addr)))
484 * uaccess_try and catch
486 #define uaccess_try do { \
487 current->thread.uaccess_err = 0; \
491 #define uaccess_try_nospec do { \
492 current->thread.uaccess_err = 0; \
493 __uaccess_begin_nospec(); \
495 #define uaccess_catch(err) \
497 (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \
501 * __get_user: - Get a simple variable from user space, with less checking.
502 * @x: Variable to store result.
503 * @ptr: Source address, in user space.
505 * Context: User context only. This function may sleep if pagefaults are
508 * This macro copies a single simple variable from user space to kernel
509 * space. It supports simple types like char and int, but not larger
510 * data types like structures or arrays.
512 * @ptr must have pointer-to-simple-variable type, and the result of
513 * dereferencing @ptr must be assignable to @x without a cast.
515 * Caller must check the pointer with access_ok() before calling this
518 * Returns zero on success, or -EFAULT on error.
519 * On error, the variable @x is set to zero.
522 #define __get_user(x, ptr) \
523 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
526 * __put_user: - Write a simple value into user space, with less checking.
527 * @x: Value to copy to user space.
528 * @ptr: Destination address, in user space.
530 * Context: User context only. This function may sleep if pagefaults are
533 * This macro copies a single simple value from kernel space to user
534 * space. It supports simple types like char and int, but not larger
535 * data types like structures or arrays.
537 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
538 * to the result of dereferencing @ptr.
540 * Caller must check the pointer with access_ok() before calling this
543 * Returns zero on success, or -EFAULT on error.
546 #define __put_user(x, ptr) \
547 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
550 * {get|put}_user_try and catch
554 * } get_user_catch(err)
556 #define get_user_try uaccess_try_nospec
557 #define get_user_catch(err) uaccess_catch(err)
559 #define get_user_ex(x, ptr) do { \
560 unsigned long __gue_val; \
561 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
562 (x) = (__force __typeof__(*(ptr)))__gue_val; \
565 #define put_user_try uaccess_try
566 #define put_user_catch(err) uaccess_catch(err)
568 #define put_user_ex(x, ptr) \
569 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
572 copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
573 extern __must_check long
574 strncpy_from_user(char *dst, const char __user *src, long count);
576 extern __must_check long strnlen_user(const char __user *str, long n);
578 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
579 unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
581 extern void __cmpxchg_wrong_size(void)
582 __compiletime_error("Bad argument size for cmpxchg");
584 #define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
587 __typeof__(ptr) __uval = (uval); \
588 __typeof__(*(ptr)) __old = (old); \
589 __typeof__(*(ptr)) __new = (new); \
590 __uaccess_begin_nospec(); \
595 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
597 "\t.section .fixup, \"ax\"\n" \
601 _ASM_EXTABLE_UA(1b, 3b) \
602 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
603 : "i" (-EFAULT), "q" (__new), "1" (__old) \
611 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
613 "\t.section .fixup, \"ax\"\n" \
617 _ASM_EXTABLE_UA(1b, 3b) \
618 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
619 : "i" (-EFAULT), "r" (__new), "1" (__old) \
627 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
629 "\t.section .fixup, \"ax\"\n" \
633 _ASM_EXTABLE_UA(1b, 3b) \
634 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
635 : "i" (-EFAULT), "r" (__new), "1" (__old) \
642 if (!IS_ENABLED(CONFIG_X86_64)) \
643 __cmpxchg_wrong_size(); \
646 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
648 "\t.section .fixup, \"ax\"\n" \
652 _ASM_EXTABLE_UA(1b, 3b) \
653 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
654 : "i" (-EFAULT), "r" (__new), "1" (__old) \
660 __cmpxchg_wrong_size(); \
667 #define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
669 access_ok((ptr), sizeof(*(ptr))) ? \
670 __user_atomic_cmpxchg_inatomic((uval), (ptr), \
671 (old), (new), sizeof(*(ptr))) : \
676 * movsl can be slow when source and dest are not both 8-byte aligned
678 #ifdef CONFIG_X86_INTEL_USERCOPY
679 extern struct movsl_mask {
681 } ____cacheline_aligned_in_smp movsl_mask;
684 #define ARCH_HAS_NOCACHE_UACCESS 1
687 # include <asm/uaccess_32.h>
689 # include <asm/uaccess_64.h>
693 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
694 * nested NMI paths are careful to preserve CR2.
696 * Caller must use pagefault_enable/disable, or run in interrupt context,
697 * and also do a uaccess_ok() check
699 #define __copy_from_user_nmi __copy_from_user_inatomic
702 * The "unsafe" user accesses aren't really "unsafe", but the naming
703 * is a big fat warning: you have to not only do the access_ok()
704 * checking before using them, but you have to surround them with the
705 * user_access_begin/end() pair.
707 static __must_check inline bool user_access_begin(const void __user *ptr, size_t len)
709 if (unlikely(!access_ok(ptr,len)))
714 #define user_access_begin(a,b) user_access_begin(a,b)
715 #define user_access_end() __uaccess_end()
717 #define unsafe_put_user(x, ptr, label) \
718 __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
720 #define unsafe_get_user(x, ptr, err_label) \
723 __inttype(*(ptr)) __gu_val; \
724 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
725 (x) = (__force __typeof__(*(ptr)))__gu_val; \
726 if (unlikely(__gu_err)) goto err_label; \
729 #endif /* _ASM_X86_UACCESS_H */