ret = copy_to_user(buf, &proto_ver, sizeof(__s32));
if (ret != 0)
goto error;
- ret = copy_to_user(buf+sizeof(__s32), &magic, sizeof(__s32));
+ ret = copy_to_user(buf + sizeof(__s32), &magic, sizeof(__s32));
if (ret != 0)
goto error;
- ret = copy_to_user(buf+2 * sizeof(__s32), &cur_op->tag, sizeof(__u64));
+ ret = copy_to_user(buf + 2 * sizeof(__s32),
+ &cur_op->tag,
+ sizeof(__u64));
if (ret != 0)
goto error;
- ret = copy_to_user(buf+2*sizeof(__s32)+sizeof(__u64), &cur_op->upcall,
- sizeof(struct orangefs_upcall_s));
+ ret = copy_to_user(buf + 2 * sizeof(__s32) + sizeof(__u64),
+ &cur_op->upcall,
+ sizeof(struct orangefs_upcall_s));
if (ret != 0)
goto error;
(unsigned int) MAX_DEV_REQ_DOWNSIZE);
return -EFAULT;
}
-
+
if (!copy_from_iter_full(&head, head_size, iter)) {
gossip_err("%s: failed to copy head.\n", __func__);
return -EFAULT;
goto wakeup;
/*
- * We've successfully peeled off the head and the downcall.
+ * We've successfully peeled off the head and the downcall.
* Something has gone awry if total doesn't equal the
* sum of head_size, downcall_size and trailer_size.
*/
wakeup:
/*
* Return to vfs waitqueue, and back to service_operation
- * through wait_for_matching_downcall.
+ * through wait_for_matching_downcall.
*/
spin_lock(&op->lock);
if (unlikely(op_is_cancel(op))) {
__s32 count;
};
- static unsigned long translate_dev_map26(unsigned long args, long *error)
- {
- struct ORANGEFS_dev_map_desc32 __user *p32 = (void __user *)args;
- /*
- * Depending on the architecture, allocate some space on the
- * user-call-stack based on our expected layout.
- */
- struct ORANGEFS_dev_map_desc __user *p =
- compat_alloc_user_space(sizeof(*p));
- compat_uptr_t addr;
-
- *error = 0;
- /* get the ptr from the 32 bit user-space */
- if (get_user(addr, &p32->ptr))
- goto err;
- /* try to put that into a 64-bit layout */
- if (put_user(compat_ptr(addr), &p->ptr))
- goto err;
- /* copy the remaining fields */
- if (copy_in_user(&p->total_size, &p32->total_size, sizeof(__s32)))
- goto err;
- if (copy_in_user(&p->size, &p32->size, sizeof(__s32)))
- goto err;
- if (copy_in_user(&p->count, &p32->count, sizeof(__s32)))
- goto err;
- return (unsigned long)p;
- err:
- *error = -EFAULT;
- return 0;
- }
-
/*
* 32 bit user-space apps' ioctl handlers when kernel modules
* is compiled as a 64 bit one
unsigned long args)
{
long ret;
- unsigned long arg = args;
/* Check for properly constructed commands */
ret = check_ioctl_command(cmd);
if (ret < 0)
return ret;
if (cmd == ORANGEFS_DEV_MAP) {
- /*
- * convert the arguments to what we expect internally
- * in kernel space
- */
- arg = translate_dev_map26(args, &ret);
- if (ret < 0) {
- gossip_err("Could not translate dev map\n");
- return ret;
- }
+ struct ORANGEFS_dev_map_desc desc;
+ struct ORANGEFS_dev_map_desc32 d32;
+
+ if (copy_from_user(&d32, (void __user *)args, sizeof(d32)))
+ return -EFAULT;
+
+ desc.ptr = compat_ptr(d32.ptr);
+ desc.total_size = d32.total_size;
+ desc.size = d32.size;
+ desc.count = d32.count;
+ return orangefs_bufmap_initialize(&desc);
}
/* no other ioctl requires translation */
- return dispatch_ioctl_command(cmd, arg);
+ return dispatch_ioctl_command(cmd, args);
}
#endif /* CONFIG_COMPAT is in .config */
static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
siginfo_t const *kinfo)
{
- long err;
+ struct signalfd_siginfo new;
BUILD_BUG_ON(sizeof(struct signalfd_siginfo) != 128);
/*
* Unused members should be zero ...
*/
- err = __clear_user(uinfo, sizeof(*uinfo));
+ memset(&new, 0, sizeof(new));
/*
* If you change siginfo_t structure, please be sure
* this code is fixed accordingly.
*/
- err |= __put_user(kinfo->si_signo, &uinfo->ssi_signo);
- err |= __put_user(kinfo->si_errno, &uinfo->ssi_errno);
- err |= __put_user(kinfo->si_code, &uinfo->ssi_code);
+ new.ssi_signo = kinfo->si_signo;
+ new.ssi_errno = kinfo->si_errno;
+ new.ssi_code = kinfo->si_code;
switch (siginfo_layout(kinfo->si_signo, kinfo->si_code)) {
case SIL_KILL:
- err |= __put_user(kinfo->si_pid, &uinfo->ssi_pid);
- err |= __put_user(kinfo->si_uid, &uinfo->ssi_uid);
+ new.ssi_pid = kinfo->si_pid;
+ new.ssi_uid = kinfo->si_uid;
break;
case SIL_TIMER:
- err |= __put_user(kinfo->si_tid, &uinfo->ssi_tid);
- err |= __put_user(kinfo->si_overrun, &uinfo->ssi_overrun);
- err |= __put_user((long) kinfo->si_ptr, &uinfo->ssi_ptr);
- err |= __put_user(kinfo->si_int, &uinfo->ssi_int);
+ new.ssi_tid = kinfo->si_tid;
+ new.ssi_overrun = kinfo->si_overrun;
+ new.ssi_ptr = (long) kinfo->si_ptr;
+ new.ssi_int = kinfo->si_int;
break;
case SIL_POLL:
- err |= __put_user(kinfo->si_band, &uinfo->ssi_band);
- err |= __put_user(kinfo->si_fd, &uinfo->ssi_fd);
+ new.ssi_band = kinfo->si_band;
+ new.ssi_fd = kinfo->si_fd;
break;
- case SIL_FAULT:
- err |= __put_user((long) kinfo->si_addr, &uinfo->ssi_addr);
-#ifdef __ARCH_SI_TRAPNO
- err |= __put_user(kinfo->si_trapno, &uinfo->ssi_trapno);
-#endif
-#ifdef BUS_MCEERR_AO
+ case SIL_FAULT_BNDERR:
+ case SIL_FAULT_PKUERR:
/*
- * Other callers might not initialize the si_lsb field,
- * so check explicitly for the right codes here.
+ * Fall through to the SIL_FAULT case. Both SIL_FAULT_BNDERR
+ * and SIL_FAULT_PKUERR are only generated by faults that
+ * deliver them synchronously to userspace. In case someone
+ * injects one of these signals and signalfd catches it treat
+ * it as SIL_FAULT.
*/
- if (kinfo->si_signo == SIGBUS &&
- kinfo->si_code == BUS_MCEERR_AO)
- err |= __put_user((short) kinfo->si_addr_lsb,
- &uinfo->ssi_addr_lsb);
+ case SIL_FAULT:
+ new.ssi_addr = (long) kinfo->si_addr;
+#ifdef __ARCH_SI_TRAPNO
+ new.ssi_trapno = kinfo->si_trapno;
#endif
-#ifdef BUS_MCEERR_AR
- /*
- * Other callers might not initialize the si_lsb field,
- * so check explicitly for the right codes here.
- */
- if (kinfo->si_signo == SIGBUS &&
- kinfo->si_code == BUS_MCEERR_AR)
- err |= __put_user((short) kinfo->si_addr_lsb,
- &uinfo->ssi_addr_lsb);
+ break;
+ case SIL_FAULT_MCEERR:
+ new.ssi_addr = (long) kinfo->si_addr;
+#ifdef __ARCH_SI_TRAPNO
+ new.ssi_trapno = kinfo->si_trapno;
#endif
+ new.ssi_addr_lsb = (short) kinfo->si_addr_lsb;
break;
case SIL_CHLD:
- err |= __put_user(kinfo->si_pid, &uinfo->ssi_pid);
- err |= __put_user(kinfo->si_uid, &uinfo->ssi_uid);
- err |= __put_user(kinfo->si_status, &uinfo->ssi_status);
- err |= __put_user(kinfo->si_utime, &uinfo->ssi_utime);
- err |= __put_user(kinfo->si_stime, &uinfo->ssi_stime);
+ new.ssi_pid = kinfo->si_pid;
+ new.ssi_uid = kinfo->si_uid;
+ new.ssi_status = kinfo->si_status;
+ new.ssi_utime = kinfo->si_utime;
+ new.ssi_stime = kinfo->si_stime;
break;
case SIL_RT:
- default:
/*
* This case catches also the signals queued by sigqueue().
*/
- err |= __put_user(kinfo->si_pid, &uinfo->ssi_pid);
- err |= __put_user(kinfo->si_uid, &uinfo->ssi_uid);
- err |= __put_user((long) kinfo->si_ptr, &uinfo->ssi_ptr);
- err |= __put_user(kinfo->si_int, &uinfo->ssi_int);
+ new.ssi_pid = kinfo->si_pid;
+ new.ssi_uid = kinfo->si_uid;
+ new.ssi_ptr = (long) kinfo->si_ptr;
+ new.ssi_int = kinfo->si_int;
+ break;
+ case SIL_SYS:
+ new.ssi_call_addr = (long) kinfo->si_call_addr;
+ new.ssi_syscall = kinfo->si_syscall;
+ new.ssi_arch = kinfo->si_arch;
break;
}
- return err ? -EFAULT: sizeof(*uinfo);
+ if (copy_to_user(uinfo, &new, sizeof(struct signalfd_siginfo)))
+ return -EFAULT;
+
+ return sizeof(*uinfo);
}
static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, siginfo_t *info,
.llseek = noop_llseek,
};
- static int do_signalfd4(int ufd, sigset_t __user *user_mask, size_t sizemask,
- int flags)
+ static int do_signalfd4(int ufd, sigset_t *mask, int flags)
{
- sigset_t sigmask;
struct signalfd_ctx *ctx;
/* Check the SFD_* constants for consistency. */
if (flags & ~(SFD_CLOEXEC | SFD_NONBLOCK))
return -EINVAL;
- if (sizemask != sizeof(sigset_t) ||
- copy_from_user(&sigmask, user_mask, sizeof(sigmask)))
- return -EINVAL;
- sigdelsetmask(&sigmask, sigmask(SIGKILL) | sigmask(SIGSTOP));
- signotset(&sigmask);
+ sigdelsetmask(mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
+ signotset(mask);
if (ufd == -1) {
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
- ctx->sigmask = sigmask;
+ ctx->sigmask = *mask;
/*
* When we call this, the initialization must be complete, since
return -EINVAL;
}
spin_lock_irq(¤t->sighand->siglock);
- ctx->sigmask = sigmask;
+ ctx->sigmask = *mask;
spin_unlock_irq(¤t->sighand->siglock);
wake_up(¤t->sighand->signalfd_wqh);
SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
size_t, sizemask, int, flags)
{
- return do_signalfd4(ufd, user_mask, sizemask, flags);
+ sigset_t mask;
+
+ if (sizemask != sizeof(sigset_t) ||
+ copy_from_user(&mask, user_mask, sizeof(mask)))
+ return -EINVAL;
+ return do_signalfd4(ufd, &mask, flags);
}
SYSCALL_DEFINE3(signalfd, int, ufd, sigset_t __user *, user_mask,
size_t, sizemask)
{
- return do_signalfd4(ufd, user_mask, sizemask, 0);
+ sigset_t mask;
+
+ if (sizemask != sizeof(sigset_t) ||
+ copy_from_user(&mask, user_mask, sizeof(mask)))
+ return -EINVAL;
+ return do_signalfd4(ufd, &mask, 0);
}
#ifdef CONFIG_COMPAT
static long do_compat_signalfd4(int ufd,
- const compat_sigset_t __user *sigmask,
+ const compat_sigset_t __user *user_mask,
compat_size_t sigsetsize, int flags)
{
- sigset_t tmp;
- sigset_t __user *ksigmask;
+ sigset_t mask;
if (sigsetsize != sizeof(compat_sigset_t))
return -EINVAL;
- if (get_compat_sigset(&tmp, sigmask))
- return -EFAULT;
- ksigmask = compat_alloc_user_space(sizeof(sigset_t));
- if (copy_to_user(ksigmask, &tmp, sizeof(sigset_t)))
+ if (get_compat_sigset(&mask, user_mask))
return -EFAULT;
-
- return do_signalfd4(ufd, ksigmask, sizeof(sigset_t), flags);
+ return do_signalfd4(ufd, &mask, flags);
}
COMPAT_SYSCALL_DEFINE4(signalfd4, int, ufd,
- const compat_sigset_t __user *, sigmask,
+ const compat_sigset_t __user *, user_mask,
compat_size_t, sigsetsize,
int, flags)
{
- return do_compat_signalfd4(ufd, sigmask, sigsetsize, flags);
+ return do_compat_signalfd4(ufd, user_mask, sigsetsize, flags);
}
COMPAT_SYSCALL_DEFINE3(signalfd, int, ufd,
- const compat_sigset_t __user *,sigmask,
+ const compat_sigset_t __user *, user_mask,
compat_size_t, sigsetsize)
{
- return do_compat_signalfd4(ufd, sigmask, sigsetsize, 0);
+ return do_compat_signalfd4(ufd, user_mask, sigsetsize, 0);
}
#endif
if (buffers <= PIPE_DEF_BUFFERS)
return 0;
- spd->pages = kmalloc(buffers * sizeof(struct page *), GFP_KERNEL);
- spd->partial = kmalloc(buffers * sizeof(struct partial_page), GFP_KERNEL);
+ spd->pages = kmalloc_array(buffers, sizeof(struct page *), GFP_KERNEL);
+ spd->partial = kmalloc_array(buffers, sizeof(struct partial_page),
+ GFP_KERNEL);
if (spd->pages && spd->partial)
return 0;
vec = __vec;
if (nr_pages > PIPE_DEF_BUFFERS) {
- vec = kmalloc(nr_pages * sizeof(struct kvec), GFP_KERNEL);
+ vec = kmalloc_array(nr_pages, sizeof(struct kvec), GFP_KERNEL);
if (unlikely(!vec)) {
res = -ENOMEM;
goto out;
* For lack of a better implementation, implement vmsplice() to userspace
* as a simple copy of the pipes pages to the user iov.
*/
- static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov,
- unsigned long nr_segs, unsigned int flags)
+ static long vmsplice_to_user(struct file *file, struct iov_iter *iter,
+ unsigned int flags)
{
- struct pipe_inode_info *pipe;
- struct splice_desc sd;
- long ret;
- struct iovec iovstack[UIO_FASTIOV];
- struct iovec *iov = iovstack;
- struct iov_iter iter;
+ struct pipe_inode_info *pipe = get_pipe_info(file);
+ struct splice_desc sd = {
+ .total_len = iov_iter_count(iter),
+ .flags = flags,
+ .u.data = iter
+ };
+ long ret = 0;
- pipe = get_pipe_info(file);
if (!pipe)
return -EBADF;
- ret = import_iovec(READ, uiov, nr_segs,
- ARRAY_SIZE(iovstack), &iov, &iter);
- if (ret < 0)
- return ret;
-
- sd.total_len = iov_iter_count(&iter);
- sd.len = 0;
- sd.flags = flags;
- sd.u.data = &iter;
- sd.pos = 0;
-
if (sd.total_len) {
pipe_lock(pipe);
ret = __splice_from_pipe(pipe, &sd, pipe_to_user);
pipe_unlock(pipe);
}
- kfree(iov);
return ret;
}
* as splice-from-memory, where the regular splice is splice-from-file (or
* to file). In both cases the output is a pipe, naturally.
*/
- static long vmsplice_to_pipe(struct file *file, const struct iovec __user *uiov,
- unsigned long nr_segs, unsigned int flags)
+ static long vmsplice_to_pipe(struct file *file, struct iov_iter *iter,
+ unsigned int flags)
{
struct pipe_inode_info *pipe;
- struct iovec iovstack[UIO_FASTIOV];
- struct iovec *iov = iovstack;
- struct iov_iter from;
- long ret;
+ long ret = 0;
unsigned buf_flag = 0;
if (flags & SPLICE_F_GIFT)
if (!pipe)
return -EBADF;
- ret = import_iovec(WRITE, uiov, nr_segs,
- ARRAY_SIZE(iovstack), &iov, &from);
- if (ret < 0)
- return ret;
-
pipe_lock(pipe);
ret = wait_for_space(pipe, flags);
if (!ret)
- ret = iter_to_pipe(&from, pipe, buf_flag);
+ ret = iter_to_pipe(iter, pipe, buf_flag);
pipe_unlock(pipe);
if (ret > 0)
wakeup_pipe_readers(pipe);
- kfree(iov);
return ret;
}
+ static int vmsplice_type(struct fd f, int *type)
+ {
+ if (!f.file)
+ return -EBADF;
+ if (f.file->f_mode & FMODE_WRITE) {
+ *type = WRITE;
+ } else if (f.file->f_mode & FMODE_READ) {
+ *type = READ;
+ } else {
+ fdput(f);
+ return -EBADF;
+ }
+ return 0;
+ }
+
/*
* Note that vmsplice only really supports true splicing _from_ user memory
* to a pipe, not the other way around. Splicing from user memory is a simple
* Currently we punt and implement it as a normal copy, see pipe_to_user().
*
*/
- static long do_vmsplice(int fd, const struct iovec __user *iov,
- unsigned long nr_segs, unsigned int flags)
+ static long do_vmsplice(struct file *f, struct iov_iter *iter, unsigned int flags)
{
- struct fd f;
- long error;
-
if (unlikely(flags & ~SPLICE_F_ALL))
return -EINVAL;
- if (unlikely(nr_segs > UIO_MAXIOV))
- return -EINVAL;
- else if (unlikely(!nr_segs))
- return 0;
- error = -EBADF;
- f = fdget(fd);
- if (f.file) {
- if (f.file->f_mode & FMODE_WRITE)
- error = vmsplice_to_pipe(f.file, iov, nr_segs, flags);
- else if (f.file->f_mode & FMODE_READ)
- error = vmsplice_to_user(f.file, iov, nr_segs, flags);
-
- fdput(f);
- }
+ if (!iov_iter_count(iter))
+ return 0;
- return error;
+ if (iov_iter_rw(iter) == WRITE)
+ return vmsplice_to_pipe(f, iter, flags);
+ else
+ return vmsplice_to_user(f, iter, flags);
}
- SYSCALL_DEFINE4(vmsplice, int, fd, const struct iovec __user *, iov,
+ SYSCALL_DEFINE4(vmsplice, int, fd, const struct iovec __user *, uiov,
unsigned long, nr_segs, unsigned int, flags)
{
- return do_vmsplice(fd, iov, nr_segs, flags);
+ struct iovec iovstack[UIO_FASTIOV];
+ struct iovec *iov = iovstack;
+ struct iov_iter iter;
+ long error;
+ struct fd f;
+ int type;
+
+ f = fdget(fd);
+ error = vmsplice_type(f, &type);
+ if (error)
+ return error;
+
+ error = import_iovec(type, uiov, nr_segs,
+ ARRAY_SIZE(iovstack), &iov, &iter);
+ if (!error) {
+ error = do_vmsplice(f.file, &iter, flags);
+ kfree(iov);
+ }
+ fdput(f);
+ return error;
}
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(vmsplice, int, fd, const struct compat_iovec __user *, iov32,
unsigned int, nr_segs, unsigned int, flags)
{
- unsigned i;
- struct iovec __user *iov;
- if (nr_segs > UIO_MAXIOV)
- return -EINVAL;
- iov = compat_alloc_user_space(nr_segs * sizeof(struct iovec));
- for (i = 0; i < nr_segs; i++) {
- struct compat_iovec v;
- if (get_user(v.iov_base, &iov32[i].iov_base) ||
- get_user(v.iov_len, &iov32[i].iov_len) ||
- put_user(compat_ptr(v.iov_base), &iov[i].iov_base) ||
- put_user(v.iov_len, &iov[i].iov_len))
- return -EFAULT;
+ struct iovec iovstack[UIO_FASTIOV];
+ struct iovec *iov = iovstack;
+ struct iov_iter iter;
+ long error;
+ struct fd f;
+ int type;
+
+ f = fdget(fd);
+ error = vmsplice_type(f, &type);
+ if (error)
+ return error;
+
+ error = compat_import_iovec(type, iov32, nr_segs,
+ ARRAY_SIZE(iovstack), &iov, &iter);
+ if (!error) {
+ error = do_vmsplice(f.file, &iter, flags);
+ kfree(iov);
}
- return do_vmsplice(fd, iov, nr_segs, flags);
+ fdput(f);
+ return error;
}
#endif