x86/uaccess: Remove unused __addr_ok() macro
[muen/linux.git] / arch / x86 / include / asm / uaccess.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_UACCESS_H
3 #define _ASM_X86_UACCESS_H
4 /*
5  * User space memory access functions
6  */
7 #include <linux/compiler.h>
8 #include <linux/kasan-checks.h>
9 #include <linux/string.h>
10 #include <asm/asm.h>
11 #include <asm/page.h>
12 #include <asm/smap.h>
13 #include <asm/extable.h>
14
15 /*
16  * The fs value determines whether argument validity checking should be
17  * performed or not.  If get_fs() == USER_DS, checking is performed, with
18  * get_fs() == KERNEL_DS, checking is bypassed.
19  *
20  * For historical reasons, these macros are grossly misnamed.
21  */
22
23 #define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
24
25 #define KERNEL_DS       MAKE_MM_SEG(-1UL)
26 #define USER_DS         MAKE_MM_SEG(TASK_SIZE_MAX)
27
28 #define get_ds()        (KERNEL_DS)
29 #define get_fs()        (current->thread.addr_limit)
30 static inline void set_fs(mm_segment_t fs)
31 {
32         current->thread.addr_limit = fs;
33         /* On user-mode return, check fs is correct */
34         set_thread_flag(TIF_FSCHECK);
35 }
36
37 #define segment_eq(a, b)        ((a).seg == (b).seg)
38 #define user_addr_max() (current->thread.addr_limit.seg)
39
40 /*
41  * Test whether a block of memory is a valid user space address.
42  * Returns 0 if the range is valid, nonzero otherwise.
43  */
44 static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
45 {
46         /*
47          * If we have used "sizeof()" for the size,
48          * we know it won't overflow the limit (but
49          * it might overflow the 'addr', so it's
50          * important to subtract the size from the
51          * limit, not add it to the address).
52          */
53         if (__builtin_constant_p(size))
54                 return unlikely(addr > limit - size);
55
56         /* Arbitrary sizes? Be careful about overflow */
57         addr += size;
58         if (unlikely(addr < size))
59                 return true;
60         return unlikely(addr > limit);
61 }
62
63 #define __range_not_ok(addr, size, limit)                               \
64 ({                                                                      \
65         __chk_user_ptr(addr);                                           \
66         __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
67 })
68
69 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
70 # define WARN_ON_IN_IRQ()       WARN_ON_ONCE(!in_task())
71 #else
72 # define WARN_ON_IN_IRQ()
73 #endif
74
75 /**
76  * access_ok: - Checks if a user space pointer is valid
77  * @addr: User space pointer to start of block to check
78  * @size: Size of block to check
79  *
80  * Context: User context only. This function may sleep if pagefaults are
81  *          enabled.
82  *
83  * Checks if a pointer to a block of memory in user space is valid.
84  *
85  * Returns true (nonzero) if the memory block may be valid, false (zero)
86  * if it is definitely invalid.
87  *
88  * Note that, depending on architecture, this function probably just
89  * checks that the pointer is in the user space range - after calling
90  * this function, memory access functions may still return -EFAULT.
91  */
92 #define access_ok(addr, size)                                   \
93 ({                                                                      \
94         WARN_ON_IN_IRQ();                                               \
95         likely(!__range_not_ok(addr, size, user_addr_max()));           \
96 })
97
98 /*
99  * These are the main single-value transfer routines.  They automatically
100  * use the right size if we just have the right pointer type.
101  *
102  * This gets kind of ugly. We want to return _two_ values in "get_user()"
103  * and yet we don't want to do any pointers, because that is too much
104  * of a performance impact. Thus we have a few rather ugly macros here,
105  * and hide all the ugliness from the user.
106  *
107  * The "__xxx" versions of the user access functions are versions that
108  * do not verify the address space, that must have been done previously
109  * with a separate "access_ok()" call (this is used when we do multiple
110  * accesses to the same area of user memory).
111  */
112
113 extern int __get_user_1(void);
114 extern int __get_user_2(void);
115 extern int __get_user_4(void);
116 extern int __get_user_8(void);
117 extern int __get_user_bad(void);
118
119 #define __uaccess_begin() stac()
120 #define __uaccess_end()   clac()
121 #define __uaccess_begin_nospec()        \
122 ({                                      \
123         stac();                         \
124         barrier_nospec();               \
125 })
126
127 /*
128  * This is a type: either unsigned long, if the argument fits into
129  * that type, or otherwise unsigned long long.
130  */
131 #define __inttype(x) \
132 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
133
134 /**
135  * get_user: - Get a simple variable from user space.
136  * @x:   Variable to store result.
137  * @ptr: Source address, in user space.
138  *
139  * Context: User context only. This function may sleep if pagefaults are
140  *          enabled.
141  *
142  * This macro copies a single simple variable from user space to kernel
143  * space.  It supports simple types like char and int, but not larger
144  * data types like structures or arrays.
145  *
146  * @ptr must have pointer-to-simple-variable type, and the result of
147  * dereferencing @ptr must be assignable to @x without a cast.
148  *
149  * Returns zero on success, or -EFAULT on error.
150  * On error, the variable @x is set to zero.
151  */
152 /*
153  * Careful: we have to cast the result to the type of the pointer
154  * for sign reasons.
155  *
156  * The use of _ASM_DX as the register specifier is a bit of a
157  * simplification, as gcc only cares about it as the starting point
158  * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
159  * (%ecx being the next register in gcc's x86 register sequence), and
160  * %rdx on 64 bits.
161  *
162  * Clang/LLVM cares about the size of the register, but still wants
163  * the base register for something that ends up being a pair.
164  */
165 #define get_user(x, ptr)                                                \
166 ({                                                                      \
167         int __ret_gu;                                                   \
168         register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX);            \
169         __chk_user_ptr(ptr);                                            \
170         might_fault();                                                  \
171         asm volatile("call __get_user_%P4"                              \
172                      : "=a" (__ret_gu), "=r" (__val_gu),                \
173                         ASM_CALL_CONSTRAINT                             \
174                      : "0" (ptr), "i" (sizeof(*(ptr))));                \
175         (x) = (__force __typeof__(*(ptr))) __val_gu;                    \
176         __builtin_expect(__ret_gu, 0);                                  \
177 })
178
179 #define __put_user_x(size, x, ptr, __ret_pu)                    \
180         asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
181                      : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
182
183
184
185 #ifdef CONFIG_X86_32
186 #define __put_user_goto_u64(x, addr, label)                     \
187         asm_volatile_goto("\n"                                  \
188                      "1:        movl %%eax,0(%1)\n"             \
189                      "2:        movl %%edx,4(%1)\n"             \
190                      _ASM_EXTABLE_UA(1b, %l2)                   \
191                      _ASM_EXTABLE_UA(2b, %l2)                   \
192                      : : "A" (x), "r" (addr)                    \
193                      : : label)
194
195 #define __put_user_asm_ex_u64(x, addr)                                  \
196         asm volatile("\n"                                               \
197                      "1:        movl %%eax,0(%1)\n"                     \
198                      "2:        movl %%edx,4(%1)\n"                     \
199                      "3:"                                               \
200                      _ASM_EXTABLE_EX(1b, 2b)                            \
201                      _ASM_EXTABLE_EX(2b, 3b)                            \
202                      : : "A" (x), "r" (addr))
203
204 #define __put_user_x8(x, ptr, __ret_pu)                         \
205         asm volatile("call __put_user_8" : "=a" (__ret_pu)      \
206                      : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
207 #else
208 #define __put_user_goto_u64(x, ptr, label) \
209         __put_user_goto(x, ptr, "q", "", "er", label)
210 #define __put_user_asm_ex_u64(x, addr)  \
211         __put_user_asm_ex(x, addr, "q", "", "er")
212 #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
213 #endif
214
215 extern void __put_user_bad(void);
216
217 /*
218  * Strange magic calling convention: pointer in %ecx,
219  * value in %eax(:%edx), return value in %eax. clobbers %rbx
220  */
221 extern void __put_user_1(void);
222 extern void __put_user_2(void);
223 extern void __put_user_4(void);
224 extern void __put_user_8(void);
225
226 /**
227  * put_user: - Write a simple value into user space.
228  * @x:   Value to copy to user space.
229  * @ptr: Destination address, in user space.
230  *
231  * Context: User context only. This function may sleep if pagefaults are
232  *          enabled.
233  *
234  * This macro copies a single simple value from kernel space to user
235  * space.  It supports simple types like char and int, but not larger
236  * data types like structures or arrays.
237  *
238  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
239  * to the result of dereferencing @ptr.
240  *
241  * Returns zero on success, or -EFAULT on error.
242  */
243 #define put_user(x, ptr)                                        \
244 ({                                                              \
245         int __ret_pu;                                           \
246         __typeof__(*(ptr)) __pu_val;                            \
247         __chk_user_ptr(ptr);                                    \
248         might_fault();                                          \
249         __pu_val = x;                                           \
250         switch (sizeof(*(ptr))) {                               \
251         case 1:                                                 \
252                 __put_user_x(1, __pu_val, ptr, __ret_pu);       \
253                 break;                                          \
254         case 2:                                                 \
255                 __put_user_x(2, __pu_val, ptr, __ret_pu);       \
256                 break;                                          \
257         case 4:                                                 \
258                 __put_user_x(4, __pu_val, ptr, __ret_pu);       \
259                 break;                                          \
260         case 8:                                                 \
261                 __put_user_x8(__pu_val, ptr, __ret_pu);         \
262                 break;                                          \
263         default:                                                \
264                 __put_user_x(X, __pu_val, ptr, __ret_pu);       \
265                 break;                                          \
266         }                                                       \
267         __builtin_expect(__ret_pu, 0);                          \
268 })
269
270 #define __put_user_size(x, ptr, size, label)                            \
271 do {                                                                    \
272         __chk_user_ptr(ptr);                                            \
273         switch (size) {                                                 \
274         case 1:                                                         \
275                 __put_user_goto(x, ptr, "b", "b", "iq", label); \
276                 break;                                                  \
277         case 2:                                                         \
278                 __put_user_goto(x, ptr, "w", "w", "ir", label);         \
279                 break;                                                  \
280         case 4:                                                         \
281                 __put_user_goto(x, ptr, "l", "k", "ir", label);         \
282                 break;                                                  \
283         case 8:                                                         \
284                 __put_user_goto_u64((__typeof__(*ptr))(x), ptr, label); \
285                 break;                                                  \
286         default:                                                        \
287                 __put_user_bad();                                       \
288         }                                                               \
289 } while (0)
290
291 /*
292  * This doesn't do __uaccess_begin/end - the exception handling
293  * around it must do that.
294  */
295 #define __put_user_size_ex(x, ptr, size)                                \
296 do {                                                                    \
297         __chk_user_ptr(ptr);                                            \
298         switch (size) {                                                 \
299         case 1:                                                         \
300                 __put_user_asm_ex(x, ptr, "b", "b", "iq");              \
301                 break;                                                  \
302         case 2:                                                         \
303                 __put_user_asm_ex(x, ptr, "w", "w", "ir");              \
304                 break;                                                  \
305         case 4:                                                         \
306                 __put_user_asm_ex(x, ptr, "l", "k", "ir");              \
307                 break;                                                  \
308         case 8:                                                         \
309                 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr);      \
310                 break;                                                  \
311         default:                                                        \
312                 __put_user_bad();                                       \
313         }                                                               \
314 } while (0)
315
316 #ifdef CONFIG_X86_32
317 #define __get_user_asm_u64(x, ptr, retval, errret)                      \
318 ({                                                                      \
319         __typeof__(ptr) __ptr = (ptr);                                  \
320         asm volatile("\n"                                       \
321                      "1:        movl %2,%%eax\n"                        \
322                      "2:        movl %3,%%edx\n"                        \
323                      "3:\n"                             \
324                      ".section .fixup,\"ax\"\n"                         \
325                      "4:        mov %4,%0\n"                            \
326                      "  xorl %%eax,%%eax\n"                             \
327                      "  xorl %%edx,%%edx\n"                             \
328                      "  jmp 3b\n"                                       \
329                      ".previous\n"                                      \
330                      _ASM_EXTABLE_UA(1b, 4b)                            \
331                      _ASM_EXTABLE_UA(2b, 4b)                            \
332                      : "=r" (retval), "=&A"(x)                          \
333                      : "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1),  \
334                        "i" (errret), "0" (retval));                     \
335 })
336
337 #define __get_user_asm_ex_u64(x, ptr)                   (x) = __get_user_bad()
338 #else
339 #define __get_user_asm_u64(x, ptr, retval, errret) \
340          __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
341 #define __get_user_asm_ex_u64(x, ptr) \
342          __get_user_asm_ex(x, ptr, "q", "", "=r")
343 #endif
344
345 #define __get_user_size(x, ptr, size, retval, errret)                   \
346 do {                                                                    \
347         retval = 0;                                                     \
348         __chk_user_ptr(ptr);                                            \
349         switch (size) {                                                 \
350         case 1:                                                         \
351                 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
352                 break;                                                  \
353         case 2:                                                         \
354                 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
355                 break;                                                  \
356         case 4:                                                         \
357                 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
358                 break;                                                  \
359         case 8:                                                         \
360                 __get_user_asm_u64(x, ptr, retval, errret);             \
361                 break;                                                  \
362         default:                                                        \
363                 (x) = __get_user_bad();                                 \
364         }                                                               \
365 } while (0)
366
367 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)       \
368         asm volatile("\n"                                               \
369                      "1:        mov"itype" %2,%"rtype"1\n"              \
370                      "2:\n"                                             \
371                      ".section .fixup,\"ax\"\n"                         \
372                      "3:        mov %3,%0\n"                            \
373                      "  xor"itype" %"rtype"1,%"rtype"1\n"               \
374                      "  jmp 2b\n"                                       \
375                      ".previous\n"                                      \
376                      _ASM_EXTABLE_UA(1b, 3b)                            \
377                      : "=r" (err), ltype(x)                             \
378                      : "m" (__m(addr)), "i" (errret), "0" (err))
379
380 #define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret)        \
381         asm volatile("\n"                                               \
382                      "1:        mov"itype" %2,%"rtype"1\n"              \
383                      "2:\n"                                             \
384                      ".section .fixup,\"ax\"\n"                         \
385                      "3:        mov %3,%0\n"                            \
386                      "  jmp 2b\n"                                       \
387                      ".previous\n"                                      \
388                      _ASM_EXTABLE_UA(1b, 3b)                            \
389                      : "=r" (err), ltype(x)                             \
390                      : "m" (__m(addr)), "i" (errret), "0" (err))
391
392 /*
393  * This doesn't do __uaccess_begin/end - the exception handling
394  * around it must do that.
395  */
396 #define __get_user_size_ex(x, ptr, size)                                \
397 do {                                                                    \
398         __chk_user_ptr(ptr);                                            \
399         switch (size) {                                                 \
400         case 1:                                                         \
401                 __get_user_asm_ex(x, ptr, "b", "b", "=q");              \
402                 break;                                                  \
403         case 2:                                                         \
404                 __get_user_asm_ex(x, ptr, "w", "w", "=r");              \
405                 break;                                                  \
406         case 4:                                                         \
407                 __get_user_asm_ex(x, ptr, "l", "k", "=r");              \
408                 break;                                                  \
409         case 8:                                                         \
410                 __get_user_asm_ex_u64(x, ptr);                          \
411                 break;                                                  \
412         default:                                                        \
413                 (x) = __get_user_bad();                                 \
414         }                                                               \
415 } while (0)
416
417 #define __get_user_asm_ex(x, addr, itype, rtype, ltype)                 \
418         asm volatile("1:        mov"itype" %1,%"rtype"0\n"              \
419                      "2:\n"                                             \
420                      ".section .fixup,\"ax\"\n"                         \
421                      "3:xor"itype" %"rtype"0,%"rtype"0\n"               \
422                      "  jmp 2b\n"                                       \
423                      ".previous\n"                                      \
424                      _ASM_EXTABLE_EX(1b, 3b)                            \
425                      : ltype(x) : "m" (__m(addr)))
426
427 #define __put_user_nocheck(x, ptr, size)                        \
428 ({                                                              \
429         __label__ __pu_label;                                   \
430         int __pu_err = -EFAULT;                                 \
431         __uaccess_begin();                                      \
432         __put_user_size((x), (ptr), (size), __pu_label);        \
433         __pu_err = 0;                                           \
434 __pu_label:                                                     \
435         __uaccess_end();                                        \
436         __builtin_expect(__pu_err, 0);                          \
437 })
438
439 #define __get_user_nocheck(x, ptr, size)                                \
440 ({                                                                      \
441         int __gu_err;                                                   \
442         __inttype(*(ptr)) __gu_val;                                     \
443         __uaccess_begin_nospec();                                       \
444         __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);    \
445         __uaccess_end();                                                \
446         (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
447         __builtin_expect(__gu_err, 0);                                  \
448 })
449
450 /* FIXME: this hack is definitely wrong -AK */
451 struct __large_struct { unsigned long buf[100]; };
452 #define __m(x) (*(struct __large_struct __user *)(x))
453
454 /*
455  * Tell gcc we read from memory instead of writing: this is because
456  * we do not write to any memory gcc knows about, so there are no
457  * aliasing issues.
458  */
459 #define __put_user_goto(x, addr, itype, rtype, ltype, label)    \
460         asm_volatile_goto("\n"                                          \
461                 "1:     mov"itype" %"rtype"0,%1\n"                      \
462                 _ASM_EXTABLE_UA(1b, %l2)                                        \
463                 : : ltype(x), "m" (__m(addr))                           \
464                 : : label)
465
466 #define __put_user_failed(x, addr, itype, rtype, ltype, errret)         \
467         ({      __label__ __puflab;                                     \
468                 int __pufret = errret;                                  \
469                 __put_user_goto(x,addr,itype,rtype,ltype,__puflab);     \
470                 __pufret = 0;                                           \
471         __puflab: __pufret; })
472
473 #define __put_user_asm(x, addr, retval, itype, rtype, ltype, errret)    do {    \
474         retval = __put_user_failed(x, addr, itype, rtype, ltype, errret);       \
475 } while (0)
476
477 #define __put_user_asm_ex(x, addr, itype, rtype, ltype)                 \
478         asm volatile("1:        mov"itype" %"rtype"0,%1\n"              \
479                      "2:\n"                                             \
480                      _ASM_EXTABLE_EX(1b, 2b)                            \
481                      : : ltype(x), "m" (__m(addr)))
482
483 /*
484  * uaccess_try and catch
485  */
486 #define uaccess_try     do {                                            \
487         current->thread.uaccess_err = 0;                                \
488         __uaccess_begin();                                              \
489         barrier();
490
491 #define uaccess_try_nospec do {                                         \
492         current->thread.uaccess_err = 0;                                \
493         __uaccess_begin_nospec();                                       \
494
495 #define uaccess_catch(err)                                              \
496         __uaccess_end();                                                \
497         (err) |= (current->thread.uaccess_err ? -EFAULT : 0);           \
498 } while (0)
499
500 /**
501  * __get_user: - Get a simple variable from user space, with less checking.
502  * @x:   Variable to store result.
503  * @ptr: Source address, in user space.
504  *
505  * Context: User context only. This function may sleep if pagefaults are
506  *          enabled.
507  *
508  * This macro copies a single simple variable from user space to kernel
509  * space.  It supports simple types like char and int, but not larger
510  * data types like structures or arrays.
511  *
512  * @ptr must have pointer-to-simple-variable type, and the result of
513  * dereferencing @ptr must be assignable to @x without a cast.
514  *
515  * Caller must check the pointer with access_ok() before calling this
516  * function.
517  *
518  * Returns zero on success, or -EFAULT on error.
519  * On error, the variable @x is set to zero.
520  */
521
522 #define __get_user(x, ptr)                                              \
523         __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
524
525 /**
526  * __put_user: - Write a simple value into user space, with less checking.
527  * @x:   Value to copy to user space.
528  * @ptr: Destination address, in user space.
529  *
530  * Context: User context only. This function may sleep if pagefaults are
531  *          enabled.
532  *
533  * This macro copies a single simple value from kernel space to user
534  * space.  It supports simple types like char and int, but not larger
535  * data types like structures or arrays.
536  *
537  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
538  * to the result of dereferencing @ptr.
539  *
540  * Caller must check the pointer with access_ok() before calling this
541  * function.
542  *
543  * Returns zero on success, or -EFAULT on error.
544  */
545
546 #define __put_user(x, ptr)                                              \
547         __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
548
549 /*
550  * {get|put}_user_try and catch
551  *
552  * get_user_try {
553  *      get_user_ex(...);
554  * } get_user_catch(err)
555  */
556 #define get_user_try            uaccess_try_nospec
557 #define get_user_catch(err)     uaccess_catch(err)
558
559 #define get_user_ex(x, ptr)     do {                                    \
560         unsigned long __gue_val;                                        \
561         __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr))));       \
562         (x) = (__force __typeof__(*(ptr)))__gue_val;                    \
563 } while (0)
564
565 #define put_user_try            uaccess_try
566 #define put_user_catch(err)     uaccess_catch(err)
567
568 #define put_user_ex(x, ptr)                                             \
569         __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
570
571 extern unsigned long
572 copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
573 extern __must_check long
574 strncpy_from_user(char *dst, const char __user *src, long count);
575
576 extern __must_check long strnlen_user(const char __user *str, long n);
577
578 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
579 unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
580
581 extern void __cmpxchg_wrong_size(void)
582         __compiletime_error("Bad argument size for cmpxchg");
583
584 #define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size)       \
585 ({                                                                      \
586         int __ret = 0;                                                  \
587         __typeof__(ptr) __uval = (uval);                                \
588         __typeof__(*(ptr)) __old = (old);                               \
589         __typeof__(*(ptr)) __new = (new);                               \
590         __uaccess_begin_nospec();                                       \
591         switch (size) {                                                 \
592         case 1:                                                         \
593         {                                                               \
594                 asm volatile("\n"                                       \
595                         "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n"          \
596                         "2:\n"                                          \
597                         "\t.section .fixup, \"ax\"\n"                   \
598                         "3:\tmov     %3, %0\n"                          \
599                         "\tjmp     2b\n"                                \
600                         "\t.previous\n"                                 \
601                         _ASM_EXTABLE_UA(1b, 3b)                         \
602                         : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
603                         : "i" (-EFAULT), "q" (__new), "1" (__old)       \
604                         : "memory"                                      \
605                 );                                                      \
606                 break;                                                  \
607         }                                                               \
608         case 2:                                                         \
609         {                                                               \
610                 asm volatile("\n"                                       \
611                         "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n"          \
612                         "2:\n"                                          \
613                         "\t.section .fixup, \"ax\"\n"                   \
614                         "3:\tmov     %3, %0\n"                          \
615                         "\tjmp     2b\n"                                \
616                         "\t.previous\n"                                 \
617                         _ASM_EXTABLE_UA(1b, 3b)                         \
618                         : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
619                         : "i" (-EFAULT), "r" (__new), "1" (__old)       \
620                         : "memory"                                      \
621                 );                                                      \
622                 break;                                                  \
623         }                                                               \
624         case 4:                                                         \
625         {                                                               \
626                 asm volatile("\n"                                       \
627                         "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"          \
628                         "2:\n"                                          \
629                         "\t.section .fixup, \"ax\"\n"                   \
630                         "3:\tmov     %3, %0\n"                          \
631                         "\tjmp     2b\n"                                \
632                         "\t.previous\n"                                 \
633                         _ASM_EXTABLE_UA(1b, 3b)                         \
634                         : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
635                         : "i" (-EFAULT), "r" (__new), "1" (__old)       \
636                         : "memory"                                      \
637                 );                                                      \
638                 break;                                                  \
639         }                                                               \
640         case 8:                                                         \
641         {                                                               \
642                 if (!IS_ENABLED(CONFIG_X86_64))                         \
643                         __cmpxchg_wrong_size();                         \
644                                                                         \
645                 asm volatile("\n"                                       \
646                         "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n"          \
647                         "2:\n"                                          \
648                         "\t.section .fixup, \"ax\"\n"                   \
649                         "3:\tmov     %3, %0\n"                          \
650                         "\tjmp     2b\n"                                \
651                         "\t.previous\n"                                 \
652                         _ASM_EXTABLE_UA(1b, 3b)                         \
653                         : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
654                         : "i" (-EFAULT), "r" (__new), "1" (__old)       \
655                         : "memory"                                      \
656                 );                                                      \
657                 break;                                                  \
658         }                                                               \
659         default:                                                        \
660                 __cmpxchg_wrong_size();                                 \
661         }                                                               \
662         __uaccess_end();                                                \
663         *__uval = __old;                                                \
664         __ret;                                                          \
665 })
666
667 #define user_atomic_cmpxchg_inatomic(uval, ptr, old, new)               \
668 ({                                                                      \
669         access_ok((ptr), sizeof(*(ptr))) ?              \
670                 __user_atomic_cmpxchg_inatomic((uval), (ptr),           \
671                                 (old), (new), sizeof(*(ptr))) :         \
672                 -EFAULT;                                                \
673 })
674
675 /*
676  * movsl can be slow when source and dest are not both 8-byte aligned
677  */
678 #ifdef CONFIG_X86_INTEL_USERCOPY
679 extern struct movsl_mask {
680         int mask;
681 } ____cacheline_aligned_in_smp movsl_mask;
682 #endif
683
684 #define ARCH_HAS_NOCACHE_UACCESS 1
685
686 #ifdef CONFIG_X86_32
687 # include <asm/uaccess_32.h>
688 #else
689 # include <asm/uaccess_64.h>
690 #endif
691
692 /*
693  * We rely on the nested NMI work to allow atomic faults from the NMI path; the
694  * nested NMI paths are careful to preserve CR2.
695  *
696  * Caller must use pagefault_enable/disable, or run in interrupt context,
697  * and also do a uaccess_ok() check
698  */
699 #define __copy_from_user_nmi __copy_from_user_inatomic
700
701 /*
702  * The "unsafe" user accesses aren't really "unsafe", but the naming
703  * is a big fat warning: you have to not only do the access_ok()
704  * checking before using them, but you have to surround them with the
705  * user_access_begin/end() pair.
706  */
707 static __must_check inline bool user_access_begin(const void __user *ptr, size_t len)
708 {
709         if (unlikely(!access_ok(ptr,len)))
710                 return 0;
711         __uaccess_begin();
712         return 1;
713 }
714 #define user_access_begin(a,b)  user_access_begin(a,b)
715 #define user_access_end()       __uaccess_end()
716
717 #define unsafe_put_user(x, ptr, label)  \
718         __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
719
720 #define unsafe_get_user(x, ptr, err_label)                                      \
721 do {                                                                            \
722         int __gu_err;                                                           \
723         __inttype(*(ptr)) __gu_val;                                             \
724         __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT);    \
725         (x) = (__force __typeof__(*(ptr)))__gu_val;                             \
726         if (unlikely(__gu_err)) goto err_label;                                 \
727 } while (0)
728
729 #endif /* _ASM_X86_UACCESS_H */
730