_copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = n;
- if (likely(access_ok(VERIFY_READ, from, n)))
+ might_fault();
+ if (likely(access_ok(VERIFY_READ, from, n))) {
+ kasan_check_write(to, n);
res = raw_copy_from_user(to, from, n);
+ }
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
static inline unsigned long
_copy_to_user(void __user *to, const void *from, unsigned long n)
{
- if (access_ok(VERIFY_WRITE, to, n))
+ might_fault();
+ if (access_ok(VERIFY_WRITE, to, n)) {
+ kasan_check_read(from, n);
n = raw_copy_to_user(to, from, n);
+ }
return n;
}
#else
_copy_to_user(void __user *, const void *, unsigned long);
#endif
-extern void __compiletime_error("usercopy buffer size is too small")
-__bad_copy_user(void);
-
-static inline void copy_user_overflow(int size, unsigned long count)
-{
- WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
-}
-
static __always_inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
- int sz = __compiletime_object_size(to);
-
- might_fault();
- kasan_check_write(to, n);
-
- if (likely(sz < 0 || sz >= n)) {
- check_object_size(to, n, false);
+ if (likely(check_copy_size(to, n, false)))
n = _copy_from_user(to, from, n);
- } else if (!__builtin_constant_p(n))
- copy_user_overflow(sz, n);
- else
- __bad_copy_user();
-
return n;
}
static __always_inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
- int sz = __compiletime_object_size(from);
-
- kasan_check_read(from, n);
- might_fault();
-
- if (likely(sz < 0 || sz >= n)) {
- check_object_size(from, n, true);
+ if (likely(check_copy_size(from, n, true)))
n = _copy_to_user(to, from, n);
- } else if (!__builtin_constant_p(n))
- copy_user_overflow(sz, n);
- else
- __bad_copy_user();
-
return n;
}
#ifdef CONFIG_COMPAT
#define __LINUX_UIO_H
#include <linux/kernel.h>
+#include <linux/thread_info.h>
#include <uapi/linux/uio.h>
struct page;
struct iov_iter *i);
size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);
-size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
-size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
-bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
-size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
+
+size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
+size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
+bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
+size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
+bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);
+
+static __always_inline __must_check
+size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
+{
+ if (unlikely(!check_copy_size(addr, bytes, true)))
+ return bytes;
+ else
+ return _copy_to_iter(addr, bytes, i);
+}
+
+static __always_inline __must_check
+size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
+{
+ if (unlikely(!check_copy_size(addr, bytes, false)))
+ return bytes;
+ else
+ return _copy_from_iter(addr, bytes, i);
+}
+
+static __always_inline __must_check
+bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
+{
+ if (unlikely(!check_copy_size(addr, bytes, false)))
+ return false;
+ else
+ return _copy_from_iter_full(addr, bytes, i);
+}
+
+static __always_inline __must_check
+size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
+{
+ if (unlikely(!check_copy_size(addr, bytes, false)))
+ return bytes;
+ else
+ return _copy_from_iter_nocache(addr, bytes, i);
+}
+
+static __always_inline __must_check
+bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
+{
+ if (unlikely(!check_copy_size(addr, bytes, false)))
+ return false;
+ else
+ return _copy_from_iter_full_nocache(addr, bytes, i);
+}
+
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
/*
* Note, users like pmem that depend on the stricter semantics of
* IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
* destination is flushed from the cache on return.
*/
-size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
+size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
#else
-static inline size_t copy_from_iter_flushcache(void *addr, size_t bytes,
- struct iov_iter *i)
+#define _copy_from_iter_flushcache _copy_from_iter_nocache
+#endif
+
+static __always_inline __must_check
+size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
{
- return copy_from_iter_nocache(addr, bytes, i);
+ if (unlikely(!check_copy_size(addr, bytes, false)))
+ return bytes;
+ else
+ return _copy_from_iter_flushcache(addr, bytes, i);
}
-#endif
-bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);
+
size_t iov_iter_zero(size_t bytes, struct iov_iter *);
unsigned long iov_iter_alignment(const struct iov_iter *i);
unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
} \
}
+static int copyout(void __user *to, const void *from, size_t n)
+{
+ if (access_ok(VERIFY_WRITE, to, n)) {
+ kasan_check_read(from, n);
+ n = raw_copy_to_user(to, from, n);
+ }
+ return n;
+}
+
+static int copyin(void *to, const void __user *from, size_t n)
+{
+ if (access_ok(VERIFY_READ, from, n)) {
+ kasan_check_write(to, n);
+ n = raw_copy_from_user(to, from, n);
+ }
+ return n;
+}
+
static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
if (unlikely(!bytes))
return 0;
+ might_fault();
wanted = bytes;
iov = i->iov;
skip = i->iov_offset;
from = kaddr + offset;
/* first chunk, usually the only one */
- left = __copy_to_user_inatomic(buf, from, copy);
+ left = copyout(buf, from, copy);
copy -= left;
skip += copy;
from += copy;
iov++;
buf = iov->iov_base;
copy = min(bytes, iov->iov_len);
- left = __copy_to_user_inatomic(buf, from, copy);
+ left = copyout(buf, from, copy);
copy -= left;
skip = copy;
from += copy;
kaddr = kmap(page);
from = kaddr + offset;
- left = __copy_to_user(buf, from, copy);
+ left = copyout(buf, from, copy);
copy -= left;
skip += copy;
from += copy;
iov++;
buf = iov->iov_base;
copy = min(bytes, iov->iov_len);
- left = __copy_to_user(buf, from, copy);
+ left = copyout(buf, from, copy);
copy -= left;
skip = copy;
from += copy;
if (unlikely(!bytes))
return 0;
+ might_fault();
wanted = bytes;
iov = i->iov;
skip = i->iov_offset;
to = kaddr + offset;
/* first chunk, usually the only one */
- left = __copy_from_user_inatomic(to, buf, copy);
+ left = copyin(to, buf, copy);
copy -= left;
skip += copy;
to += copy;
iov++;
buf = iov->iov_base;
copy = min(bytes, iov->iov_len);
- left = __copy_from_user_inatomic(to, buf, copy);
+ left = copyin(to, buf, copy);
copy -= left;
skip = copy;
to += copy;
kaddr = kmap(page);
to = kaddr + offset;
- left = __copy_from_user(to, buf, copy);
+ left = copyin(to, buf, copy);
copy -= left;
skip += copy;
to += copy;
iov++;
buf = iov->iov_base;
copy = min(bytes, iov->iov_len);
- left = __copy_from_user(to, buf, copy);
+ left = copyin(to, buf, copy);
copy -= left;
skip = copy;
to += copy;
return bytes;
}
-size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
+size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
const char *from = addr;
if (unlikely(i->type & ITER_PIPE))
return copy_pipe_to_iter(addr, bytes, i);
+ if (iter_is_iovec(i))
+ might_fault();
iterate_and_advance(i, bytes, v,
- __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
- v.iov_len),
+ copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
memcpy_to_page(v.bv_page, v.bv_offset,
(from += v.bv_len) - v.bv_len, v.bv_len),
memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
return bytes;
}
-EXPORT_SYMBOL(copy_to_iter);
+EXPORT_SYMBOL(_copy_to_iter);
-size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
+size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
if (unlikely(i->type & ITER_PIPE)) {
WARN_ON(1);
return 0;
}
+ if (iter_is_iovec(i))
+ might_fault();
iterate_and_advance(i, bytes, v,
- __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
- v.iov_len),
+ copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
return bytes;
}
-EXPORT_SYMBOL(copy_from_iter);
+EXPORT_SYMBOL(_copy_from_iter);
-bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
+bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
if (unlikely(i->type & ITER_PIPE)) {
if (unlikely(i->count < bytes))
return false;
+ if (iter_is_iovec(i))
+ might_fault();
iterate_all_kinds(i, bytes, v, ({
- if (__copy_from_user((to += v.iov_len) - v.iov_len,
+ if (copyin((to += v.iov_len) - v.iov_len,
v.iov_base, v.iov_len))
return false;
0;}),
iov_iter_advance(i, bytes);
return true;
}
-EXPORT_SYMBOL(copy_from_iter_full);
+EXPORT_SYMBOL(_copy_from_iter_full);
-size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
+size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
if (unlikely(i->type & ITER_PIPE)) {
return bytes;
}
-EXPORT_SYMBOL(copy_from_iter_nocache);
+EXPORT_SYMBOL(_copy_from_iter_nocache);
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
-size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
+size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
if (unlikely(i->type & ITER_PIPE)) {
return bytes;
}
-EXPORT_SYMBOL_GPL(copy_from_iter_flushcache);
+EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
#endif
-bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
+bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
if (unlikely(i->type & ITER_PIPE)) {
iov_iter_advance(i, bytes);
return true;
}
-EXPORT_SYMBOL(copy_from_iter_full_nocache);
+EXPORT_SYMBOL(_copy_from_iter_full_nocache);
+
+static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
+{
+ size_t v = n + offset;
+ if (likely(n <= v && v <= (PAGE_SIZE << compound_order(page))))
+ return true;
+ WARN_ON(1);
+ return false;
+}
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
+ if (unlikely(!page_copy_sane(page, offset, bytes)))
+ return 0;
if (i->type & (ITER_BVEC|ITER_KVEC)) {
void *kaddr = kmap_atomic(page);
size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
+ if (unlikely(!page_copy_sane(page, offset, bytes)))
+ return 0;
if (unlikely(i->type & ITER_PIPE)) {
WARN_ON(1);
return 0;
}
if (i->type & (ITER_BVEC|ITER_KVEC)) {
void *kaddr = kmap_atomic(page);
- size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
+ size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
kunmap_atomic(kaddr);
return wanted;
} else
if (unlikely(i->type & ITER_PIPE))
return pipe_zero(bytes, i);
iterate_and_advance(i, bytes, v,
- __clear_user(v.iov_base, v.iov_len),
+ clear_user(v.iov_base, v.iov_len),
memzero_page(v.bv_page, v.bv_offset, v.bv_len),
memset(v.iov_base, 0, v.iov_len)
)
struct iov_iter *i, unsigned long offset, size_t bytes)
{
char *kaddr = kmap_atomic(page), *p = kaddr + offset;
+ if (unlikely(!page_copy_sane(page, offset, bytes))) {
+ kunmap_atomic(kaddr);
+ return 0;
+ }
if (unlikely(i->type & ITER_PIPE)) {
kunmap_atomic(kaddr);
WARN_ON(1);
return 0;
}
iterate_all_kinds(i, bytes, v,
- __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len),
+ copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)