Merge branch 'work.aio' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 16 Jun 2018 07:11:40 +0000 (16:11 +0900)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 16 Jun 2018 07:11:40 +0000 (16:11 +0900)
Pull aio fixes from Al Viro:
 "Assorted AIO followups and fixes"

* 'work.aio' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
  eventpoll: switch to ->poll_mask
  aio: only return events requested in poll_mask() for IOCB_CMD_POLL
  eventfd: only return events requested in poll_mask()
  aio: mark __aio_sigset::sigmask const

fs/aio.c
fs/eventfd.c
fs/eventpoll.c
include/uapi/linux/aio_abi.h

index 134e5b635d643da8868b477a0990e5d3cdb4077e..e1d20124ec0e8698a1e8a5940537ff45f2e57d2c 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1661,7 +1661,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
        if (mask && !(mask & req->events))
                return 0;
 
-       mask = file->f_op->poll_mask(file, req->events);
+       mask = file->f_op->poll_mask(file, req->events) & req->events;
        if (!mask)
                return 0;
 
@@ -1719,7 +1719,7 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb)
 
        spin_lock_irq(&ctx->ctx_lock);
        spin_lock(&req->head->lock);
-       mask = req->file->f_op->poll_mask(req->file, req->events);
+       mask = req->file->f_op->poll_mask(req->file, req->events) & req->events;
        if (!mask) {
                __add_wait_queue(req->head, &req->wait);
                list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
index 61c9514da5e956a36a90e9df601eccfc01b9595a..ceb1031f1cac948e74a970f02058cfeb52d7a351 100644 (file)
@@ -156,11 +156,11 @@ static __poll_t eventfd_poll_mask(struct file *file, __poll_t eventmask)
        count = READ_ONCE(ctx->count);
 
        if (count > 0)
-               events |= EPOLLIN;
+               events |= (EPOLLIN & eventmask);
        if (count == ULLONG_MAX)
                events |= EPOLLERR;
        if (ULLONG_MAX - 1 > count)
-               events |= EPOLLOUT;
+               events |= (EPOLLOUT & eventmask);
 
        return events;
 }
index 67db22fe99c5ce8bf0ba606c0a45f221cbf69b38..ea4436f409fb005a16edeca3f49f29f955db0171 100644 (file)
@@ -922,13 +922,17 @@ static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head
        return 0;
 }
 
-static __poll_t ep_eventpoll_poll(struct file *file, poll_table *wait)
+static struct wait_queue_head *ep_eventpoll_get_poll_head(struct file *file,
+               __poll_t eventmask)
 {
        struct eventpoll *ep = file->private_data;
-       int depth = 0;
+       return &ep->poll_wait;
+}
 
-       /* Insert inside our poll wait queue */
-       poll_wait(file, &ep->poll_wait, wait);
+static __poll_t ep_eventpoll_poll_mask(struct file *file, __poll_t eventmask)
+{
+       struct eventpoll *ep = file->private_data;
+       int depth = 0;
 
        /*
         * Proceed to find out if wanted events are really available inside
@@ -968,7 +972,8 @@ static const struct file_operations eventpoll_fops = {
        .show_fdinfo    = ep_show_fdinfo,
 #endif
        .release        = ep_eventpoll_release,
-       .poll           = ep_eventpoll_poll,
+       .get_poll_head  = ep_eventpoll_get_poll_head,
+       .poll_mask      = ep_eventpoll_poll_mask,
        .llseek         = noop_llseek,
 };
 
index 75846164290e9acaa9b2fbead09bdebc5422104c..d00221345c1988ff59de79f47401903d560c55e0 100644 (file)
@@ -109,7 +109,7 @@ struct iocb {
 #undef IFLITTLE
 
 struct __aio_sigset {
-       sigset_t __user *sigmask;
+       const sigset_t __user   *sigmask;
        size_t          sigsetsize;
 };