summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-02-11 14:34:03 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-02-11 14:34:03 -0800
commita9a08845e9acbd224e4ee466f5c1275ed50054e8 (patch)
tree415d6e6a82e001c65e6b161539411f54ba5fe8ce /fs
parentee5daa1361fceb6f482c005bcc9ba8d01b92ea5c (diff)
vfs: do bulk POLL* -> EPOLL* replacement
This is the mindless scripted replacement of kernel use of POLL* variables as described by Al, done by this script: for V in IN OUT PRI ERR RDNORM RDBAND WRNORM WRBAND HUP RDHUP NVAL MSG; do L=`git grep -l -w POLL$V | grep -v '^t' | grep -v /um/ | grep -v '^sa' | grep -v '/poll.h$'|grep -v '^D'` for f in $L; do sed -i "-es/^\([^\"]*\)\(\<POLL$V\>\)/\\1E\\2/" $f; done done with de-mangling cleanups yet to come. NOTE! On almost all architectures, the EPOLL* constants have the same values as the POLL* constants do. But they keyword here is "almost". For various bad reasons they aren't the same, and epoll() doesn't actually work quite correctly in some cases due to this on Sparc et al. The next patch from Al will sort out the final differences, and we should be all done. Scripted-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/cachefiles/daemon.c6
-rw-r--r--fs/coda/psdev.c4
-rw-r--r--fs/debugfs/file.c2
-rw-r--r--fs/dlm/plock.c2
-rw-r--r--fs/dlm/user.c2
-rw-r--r--fs/ecryptfs/miscdev.c2
-rw-r--r--fs/eventfd.c18
-rw-r--r--fs/eventpoll.c22
-rw-r--r--fs/fcntl.c12
-rw-r--r--fs/fuse/dev.c8
-rw-r--r--fs/fuse/file.c2
-rw-r--r--fs/kernfs/file.c4
-rw-r--r--fs/notify/fanotify/fanotify_user.c2
-rw-r--r--fs/notify/inotify/inotify_user.c2
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c6
-rw-r--r--fs/orangefs/devorangefs-req.c2
-rw-r--r--fs/pipe.c22
-rw-r--r--fs/proc/kmsg.c2
-rw-r--r--fs/proc/proc_sysctl.c4
-rw-r--r--fs/proc_namespace.c4
-rw-r--r--fs/select.c10
-rw-r--r--fs/signalfd.c4
-rw-r--r--fs/timerfd.c2
-rw-r--r--fs/userfaultfd.c16
24 files changed, 80 insertions, 80 deletions
diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
index 7edbd0679952..3fdee214a5bb 100644
--- a/fs/cachefiles/daemon.c
+++ b/fs/cachefiles/daemon.c
@@ -289,7 +289,7 @@ found_command:
/*
* poll for culling state
- * - use POLLOUT to indicate culling state
+ * - use EPOLLOUT to indicate culling state
*/
static __poll_t cachefiles_daemon_poll(struct file *file,
struct poll_table_struct *poll)
@@ -301,10 +301,10 @@ static __poll_t cachefiles_daemon_poll(struct file *file,
mask = 0;
if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags))
- mask |= POLLIN;
+ mask |= EPOLLIN;
if (test_bit(CACHEFILES_CULLING, &cache->flags))
- mask |= POLLOUT;
+ mask |= EPOLLOUT;
return mask;
}
diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c
index 80b9b84391a9..c5234c21b539 100644
--- a/fs/coda/psdev.c
+++ b/fs/coda/psdev.c
@@ -64,12 +64,12 @@ static struct class *coda_psdev_class;
static __poll_t coda_psdev_poll(struct file *file, poll_table * wait)
{
struct venus_comm *vcp = (struct venus_comm *) file->private_data;
- __poll_t mask = POLLOUT | POLLWRNORM;
+ __poll_t mask = EPOLLOUT | EPOLLWRNORM;
poll_wait(file, &vcp->vc_waitq, wait);
mutex_lock(&vcp->vc_mutex);
if (!list_empty(&vcp->vc_pending))
- mask |= POLLIN | POLLRDNORM;
+ mask |= EPOLLIN | EPOLLRDNORM;
mutex_unlock(&vcp->vc_mutex);
return mask;
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 20bb73a931dd..1f99678ff5d3 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -214,7 +214,7 @@ static __poll_t full_proxy_poll(struct file *filp,
const struct file_operations *real_fops;
if (debugfs_file_get(dentry))
- return POLLHUP;
+ return EPOLLHUP;
real_fops = debugfs_real_fops(filp);
r = real_fops->poll(filp, wait);
diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
index a4c63e9e6385..c7d5a2ea3d03 100644
--- a/fs/dlm/plock.c
+++ b/fs/dlm/plock.c
@@ -471,7 +471,7 @@ static __poll_t dev_poll(struct file *file, poll_table *wait)
spin_lock(&ops_lock);
if (!list_empty(&send_list))
- mask = POLLIN | POLLRDNORM;
+ mask = EPOLLIN | EPOLLRDNORM;
spin_unlock(&ops_lock);
return mask;
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 662432af8ce8..2a669390cd7f 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -896,7 +896,7 @@ static __poll_t device_poll(struct file *file, poll_table *wait)
spin_lock(&proc->asts_spin);
if (!list_empty(&proc->asts)) {
spin_unlock(&proc->asts_spin);
- return POLLIN | POLLRDNORM;
+ return EPOLLIN | EPOLLRDNORM;
}
spin_unlock(&proc->asts_spin);
return 0;
diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
index 7423e792a092..2d1158e5f950 100644
--- a/fs/ecryptfs/miscdev.c
+++ b/fs/ecryptfs/miscdev.c
@@ -59,7 +59,7 @@ ecryptfs_miscdev_poll(struct file *file, poll_table *pt)
poll_wait(file, &daemon->wait, pt);
mutex_lock(&daemon->mux);
if (!list_empty(&daemon->msg_ctx_out_queue))
- mask |= POLLIN | POLLRDNORM;
+ mask |= EPOLLIN | EPOLLRDNORM;
out_unlock_daemon:
daemon->flags &= ~ECRYPTFS_DAEMON_IN_POLL;
mutex_unlock(&daemon->mux);
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 04fd824142a1..012f5bd46dfa 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -45,7 +45,7 @@ struct eventfd_ctx {
*
* This function is supposed to be called by the kernel in paths that do not
* allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
- * value, and we signal this as overflow condition by returning a POLLERR
+ * value, and we signal this as overflow condition by returning a EPOLLERR
* to poll(2).
*
* Returns the amount by which the counter was incremented. This will be less
@@ -60,7 +60,7 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
n = ULLONG_MAX - ctx->count;
ctx->count += n;
if (waitqueue_active(&ctx->wqh))
- wake_up_locked_poll(&ctx->wqh, POLLIN);
+ wake_up_locked_poll(&ctx->wqh, EPOLLIN);
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
return n;
@@ -96,7 +96,7 @@ static int eventfd_release(struct inode *inode, struct file *file)
{
struct eventfd_ctx *ctx = file->private_data;
- wake_up_poll(&ctx->wqh, POLLHUP);
+ wake_up_poll(&ctx->wqh, EPOLLHUP);
eventfd_ctx_put(ctx);
return 0;
}
@@ -150,11 +150,11 @@ static __poll_t eventfd_poll(struct file *file, poll_table *wait)
count = READ_ONCE(ctx->count);
if (count > 0)
- events |= POLLIN;
+ events |= EPOLLIN;
if (count == ULLONG_MAX)
- events |= POLLERR;
+ events |= EPOLLERR;
if (ULLONG_MAX - 1 > count)
- events |= POLLOUT;
+ events |= EPOLLOUT;
return events;
}
@@ -187,7 +187,7 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *w
eventfd_ctx_do_read(ctx, cnt);
__remove_wait_queue(&ctx->wqh, wait);
if (*cnt != 0 && waitqueue_active(&ctx->wqh))
- wake_up_locked_poll(&ctx->wqh, POLLOUT);
+ wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
return *cnt != 0 ? 0 : -EAGAIN;
@@ -231,7 +231,7 @@ static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
if (likely(res > 0)) {
eventfd_ctx_do_read(ctx, &ucnt);
if (waitqueue_active(&ctx->wqh))
- wake_up_locked_poll(&ctx->wqh, POLLOUT);
+ wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
}
spin_unlock_irq(&ctx->wqh.lock);
@@ -281,7 +281,7 @@ static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t c
if (likely(res > 0)) {
ctx->count += ucnt;
if (waitqueue_active(&ctx->wqh))
- wake_up_locked_poll(&ctx->wqh, POLLIN);
+ wake_up_locked_poll(&ctx->wqh, EPOLLIN);
}
spin_unlock_irq(&ctx->wqh.lock);
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index d1a490c7e6c3..0f3494ed3ed0 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -95,9 +95,9 @@
/* Epoll private bits inside the event mask */
#define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLONESHOT | EPOLLET | EPOLLEXCLUSIVE)
-#define EPOLLINOUT_BITS (POLLIN | POLLOUT)
+#define EPOLLINOUT_BITS (EPOLLIN | EPOLLOUT)
-#define EPOLLEXCLUSIVE_OK_BITS (EPOLLINOUT_BITS | POLLERR | POLLHUP | \
+#define EPOLLEXCLUSIVE_OK_BITS (EPOLLINOUT_BITS | EPOLLERR | EPOLLHUP | \
EPOLLWAKEUP | EPOLLET | EPOLLEXCLUSIVE)
/* Maximum number of nesting allowed inside epoll sets */
@@ -555,7 +555,7 @@ static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
wait_queue_head_t *wqueue = (wait_queue_head_t *)cookie;
spin_lock_irqsave_nested(&wqueue->lock, flags, call_nests + 1);
- wake_up_locked_poll(wqueue, POLLIN);
+ wake_up_locked_poll(wqueue, EPOLLIN);
spin_unlock_irqrestore(&wqueue->lock, flags);
return 0;
@@ -575,7 +575,7 @@ static void ep_poll_safewake(wait_queue_head_t *wq)
static void ep_poll_safewake(wait_queue_head_t *wq)
{
- wake_up_poll(wq, POLLIN);
+ wake_up_poll(wq, EPOLLIN);
}
#endif
@@ -908,7 +908,7 @@ static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head
list_for_each_entry_safe(epi, tmp, head, rdllink) {
if (ep_item_poll(epi, &pt, depth)) {
- return POLLIN | POLLRDNORM;
+ return EPOLLIN | EPOLLRDNORM;
} else {
/*
* Item has been dropped into the ready list by the poll
@@ -1181,12 +1181,12 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
if ((epi->event.events & EPOLLEXCLUSIVE) &&
!(pollflags & POLLFREE)) {
switch (pollflags & EPOLLINOUT_BITS) {
- case POLLIN:
- if (epi->event.events & POLLIN)
+ case EPOLLIN:
+ if (epi->event.events & EPOLLIN)
ewake = 1;
break;
- case POLLOUT:
- if (epi->event.events & POLLOUT)
+ case EPOLLOUT:
+ if (epi->event.events & EPOLLOUT)
ewake = 1;
break;
case 0:
@@ -2105,7 +2105,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
switch (op) {
case EPOLL_CTL_ADD:
if (!epi) {
- epds.events |= POLLERR | POLLHUP;
+ epds.events |= EPOLLERR | EPOLLHUP;
error = ep_insert(ep, &epds, tf.file, fd, full_check);
} else
error = -EEXIST;
@@ -2121,7 +2121,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
case EPOLL_CTL_MOD:
if (epi) {
if (!(epi->event.events & EPOLLEXCLUSIVE)) {
- epds.events |= POLLERR | POLLHUP;
+ epds.events |= EPOLLERR | EPOLLHUP;
error = ep_modify(ep, epi, &epds);
}
} else
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 4fc731876d6b..1e97f1fda90c 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -691,12 +691,12 @@ COMPAT_SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd,
/* Table to convert sigio signal codes into poll band bitmaps */
static const __poll_t band_table[NSIGPOLL] = {
- POLLIN | POLLRDNORM, /* POLL_IN */
- POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */
- POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */
- POLLERR, /* POLL_ERR */
- POLLPRI | POLLRDBAND, /* POLL_PRI */
- POLLHUP | POLLERR /* POLL_HUP */
+ EPOLLIN | EPOLLRDNORM, /* POLL_IN */
+ EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND, /* POLL_OUT */
+ EPOLLIN | EPOLLRDNORM | EPOLLMSG, /* POLL_MSG */
+ EPOLLERR, /* POLL_ERR */
+ EPOLLPRI | EPOLLRDBAND, /* POLL_PRI */
+ EPOLLHUP | EPOLLERR /* POLL_HUP */
};
static inline int sigio_perm(struct task_struct *p,
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index aa089a6925d0..5d06384c2cae 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -2006,21 +2006,21 @@ out:
static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
{
- __poll_t mask = POLLOUT | POLLWRNORM;
+ __poll_t mask = EPOLLOUT | EPOLLWRNORM;
struct fuse_iqueue *fiq;
struct fuse_dev *fud = fuse_get_dev(file);
if (!fud)
- return POLLERR;
+ return EPOLLERR;
fiq = &fud->fc->iq;
poll_wait(file, &fiq->waitq, wait);
spin_lock(&fiq->waitq.lock);
if (!fiq->connected)
- mask = POLLERR;
+ mask = EPOLLERR;
else if (request_pending(fiq))
- mask |= POLLIN | POLLRDNORM;
+ mask |= EPOLLIN | EPOLLRDNORM;
spin_unlock(&fiq->waitq.lock);
return mask;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index e85e974dd211..a201fb0ac64f 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -2791,7 +2791,7 @@ __poll_t fuse_file_poll(struct file *file, poll_table *wait)
fc->no_poll = 1;
return DEFAULT_POLLMASK;
}
- return POLLERR;
+ return EPOLLERR;
}
EXPORT_SYMBOL_GPL(fuse_file_poll);
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index a03ce3422578..fd5ce883072e 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -823,7 +823,7 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
* the content and then you use 'poll' or 'select' to wait for
* the content to change. When the content changes (assuming the
* manager for the kobject supports notification), poll will
- * return POLLERR|POLLPRI, and select will return the fd whether
+ * return EPOLLERR|EPOLLPRI, and select will return the fd whether
* it is waiting for read, write, or exceptions.
* Once poll/select indicates that the value has changed, you
* need to close and re-open the file, or seek to 0 and read again.
@@ -851,7 +851,7 @@ static __poll_t kernfs_fop_poll(struct file *filp, poll_table *wait)
return DEFAULT_POLLMASK;
trigger:
- return DEFAULT_POLLMASK|POLLERR|POLLPRI;
+ return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI;
}
static void kernfs_notify_workfn(struct work_struct *work)
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index ef08d64c84b8..c07eb3d655ea 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -247,7 +247,7 @@ static __poll_t fanotify_poll(struct file *file, poll_table *wait)
poll_wait(file, &group->notification_waitq, wait);
spin_lock(&group->notification_lock);
if (!fsnotify_notify_queue_is_empty(group))
- ret = POLLIN | POLLRDNORM;
+ ret = EPOLLIN | EPOLLRDNORM;
spin_unlock(&group->notification_lock);
return ret;
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 5c29bf16814f..2c908b31d6c9 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -115,7 +115,7 @@ static __poll_t inotify_poll(struct file *file, poll_table *wait)
poll_wait(file, &group->notification_waitq, wait);
spin_lock(&group->notification_lock);
if (!fsnotify_notify_queue_is_empty(group))
- ret = POLLIN | POLLRDNORM;
+ ret = EPOLLIN | EPOLLRDNORM;
spin_unlock(&group->notification_lock);
return ret;
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 385fcefa8bc5..602c71f32740 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -71,7 +71,7 @@ struct workqueue_struct *user_dlm_worker;
* Over time, dlmfs has added some features that were not part of the
* initial ABI. Unfortunately, some of these features are not detectable
* via standard usage. For example, Linux's default poll always returns
- * POLLIN, so there is no way for a caller of poll(2) to know when dlmfs
+ * EPOLLIN, so there is no way for a caller of poll(2) to know when dlmfs
* added poll support. Instead, we provide this list of new capabilities.
*
* Capabilities is a read-only attribute. We do it as a module parameter
@@ -83,7 +83,7 @@ struct workqueue_struct *user_dlm_worker;
* interaction.
*
* Capabilities:
- * - bast : POLLIN against the file descriptor of a held lock
+ * - bast : EPOLLIN against the file descriptor of a held lock
* signifies a bast fired on the lock.
*/
#define DLMFS_CAPABILITIES "bast stackglue"
@@ -230,7 +230,7 @@ static __poll_t dlmfs_file_poll(struct file *file, poll_table *wait)
spin_lock(&ip->ip_lockres.l_lock);
if (ip->ip_lockres.l_flags & USER_LOCK_BLOCKED)
- event = POLLIN | POLLRDNORM;
+ event = EPOLLIN | EPOLLRDNORM;
spin_unlock(&ip->ip_lockres.l_lock);
return event;
diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c
index f073cd9e6687..b03057afac2a 100644
--- a/fs/orangefs/devorangefs-req.c
+++ b/fs/orangefs/devorangefs-req.c
@@ -823,7 +823,7 @@ static __poll_t orangefs_devreq_poll(struct file *file,
poll_wait(file, &orangefs_request_list_waitq, poll_table);
if (!list_empty(&orangefs_request_list))
- poll_revent_mask |= POLLIN;
+ poll_revent_mask |= EPOLLIN;
return poll_revent_mask;
}
diff --git a/fs/pipe.c b/fs/pipe.c
index 0913aed7fd0d..7b1954caf388 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -327,7 +327,7 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
break;
}
if (do_wakeup) {
- wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
+ wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
pipe_wait(pipe);
@@ -336,7 +336,7 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
/* Signal writers asynchronously that there is more room. */
if (do_wakeup) {
- wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
+ wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
if (ret > 0)
@@ -463,7 +463,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
break;
}
if (do_wakeup) {
- wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
+ wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
do_wakeup = 0;
}
@@ -474,7 +474,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
out:
__pipe_unlock(pipe);
if (do_wakeup) {
- wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
+ wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
}
if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
@@ -523,19 +523,19 @@ pipe_poll(struct file *filp, poll_table *wait)
nrbufs = pipe->nrbufs;
mask = 0;
if (filp->f_mode & FMODE_READ) {
- mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
+ mask = (nrbufs > 0) ? EPOLLIN | EPOLLRDNORM : 0;
if (!pipe->writers && filp->f_version != pipe->w_counter)
- mask |= POLLHUP;
+ mask |= EPOLLHUP;
}
if (filp->f_mode & FMODE_WRITE) {
- mask |= (nrbufs < pipe->buffers) ? POLLOUT | POLLWRNORM : 0;
+ mask |= (nrbufs < pipe->buffers) ? EPOLLOUT | EPOLLWRNORM : 0;
/*
- * Most Unices do not set POLLERR for FIFOs but on Linux they
+ * Most Unices do not set EPOLLERR for FIFOs but on Linux they
* behave exactly like pipes for poll().
*/
if (!pipe->readers)
- mask |= POLLERR;
+ mask |= EPOLLERR;
}
return mask;
@@ -568,7 +568,7 @@ pipe_release(struct inode *inode, struct file *file)
pipe->writers--;
if (pipe->readers || pipe->writers) {
- wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
+ wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM | EPOLLERR | EPOLLHUP);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
@@ -936,7 +936,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
if (!is_pipe && !pipe->writers) {
if ((filp->f_flags & O_NONBLOCK)) {
- /* suppress POLLHUP until we have
+ /* suppress EPOLLHUP until we have
* seen a writer */
filp->f_version = pipe->w_counter;
} else {
diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
index f0bfb45c3f9f..4f4a2abb225e 100644
--- a/fs/proc/kmsg.c
+++ b/fs/proc/kmsg.c
@@ -44,7 +44,7 @@ static __poll_t kmsg_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &log_wait, wait);
if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_PROC))
- return POLLIN | POLLRDNORM;
+ return EPOLLIN | EPOLLRDNORM;
return 0;
}
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 63325377621a..c41ab261397d 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -640,7 +640,7 @@ static __poll_t proc_sys_poll(struct file *filp, poll_table *wait)
/* sysctl was unregistered */
if (IS_ERR(head))
- return POLLERR | POLLHUP;
+ return EPOLLERR | EPOLLHUP;
if (!table->proc_handler)
goto out;
@@ -653,7 +653,7 @@ static __poll_t proc_sys_poll(struct file *filp, poll_table *wait)
if (event != atomic_read(&table->poll->event)) {
filp->private_data = proc_sys_poll_event(table->poll);
- ret = POLLIN | POLLRDNORM | POLLERR | POLLPRI;
+ ret = EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLPRI;
}
out:
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
index c8528d587e09..e16fb8f2049e 100644
--- a/fs/proc_namespace.c
+++ b/fs/proc_namespace.c
@@ -23,7 +23,7 @@ static __poll_t mounts_poll(struct file *file, poll_table *wait)
struct seq_file *m = file->private_data;
struct proc_mounts *p = m->private;
struct mnt_namespace *ns = p->ns;
- __poll_t res = POLLIN | POLLRDNORM;
+ __poll_t res = EPOLLIN | EPOLLRDNORM;
int event;
poll_wait(file, &p->ns->poll, wait);
@@ -31,7 +31,7 @@ static __poll_t mounts_poll(struct file *file, poll_table *wait)
event = READ_ONCE(ns->event);
if (m->poll_event != event) {
m->poll_event = event;
- res |= POLLERR | POLLPRI;
+ res |= EPOLLERR | EPOLLPRI;
}
return res;
diff --git a/fs/select.c b/fs/select.c
index ec14171dd78a..b6c36254028a 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -432,9 +432,9 @@ get_max:
return max;
}
-#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
-#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
-#define POLLEX_SET (POLLPRI)
+#define POLLIN_SET (EPOLLRDNORM | EPOLLRDBAND | EPOLLIN | EPOLLHUP | EPOLLERR)
+#define POLLOUT_SET (EPOLLWRBAND | EPOLLWRNORM | EPOLLOUT | EPOLLERR)
+#define POLLEX_SET (EPOLLPRI)
static inline void wait_key_set(poll_table *wait, unsigned long in,
unsigned long out, unsigned long bit,
@@ -814,11 +814,11 @@ static inline __poll_t do_pollfd(struct pollfd *pollfd, poll_table *pwait,
fd = pollfd->fd;
if (fd >= 0) {
struct fd f = fdget(fd);
- mask = POLLNVAL;
+ mask = EPOLLNVAL;
if (f.file) {
/* userland u16 ->events contains POLL... bitmap */
__poll_t filter = demangle_poll(pollfd->events) |
- POLLERR | POLLHUP;
+ EPOLLERR | EPOLLHUP;
mask = DEFAULT_POLLMASK;
if (f.file->f_op->poll) {
pwait->_key = filter;
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 31e923bec99a..9990957264e3 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -45,7 +45,7 @@ void signalfd_cleanup(struct sighand_struct *sighand)
return;
/* wait_queue_entry_t->func(POLLFREE) should do remove_wait_queue() */
- wake_up_poll(wqh, POLLHUP | POLLFREE);
+ wake_up_poll(wqh, EPOLLHUP | POLLFREE);
}
struct signalfd_ctx {
@@ -69,7 +69,7 @@ static __poll_t signalfd_poll(struct file *file, poll_table *wait)
if (next_signal(&current->pending, &ctx->sigmask) ||
next_signal(&current->signal->shared_pending,
&ctx->sigmask))
- events |= POLLIN;
+ events |= EPOLLIN;
spin_unlock_irq(&current->sighand->siglock);
return events;
diff --git a/fs/timerfd.c b/fs/timerfd.c
index 0510717f3a53..cdad49da3ff7 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -237,7 +237,7 @@ static __poll_t timerfd_poll(struct file *file, poll_table *wait)
spin_lock_irqsave(&ctx->wqh.lock, flags);
if (ctx->ticks)
- events |= POLLIN;
+ events |= EPOLLIN;
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
return events;
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 87a13a7c8270..cec550c8468f 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -483,7 +483,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
if (likely(must_wait && !READ_ONCE(ctx->released) &&
(return_to_userland ? !signal_pending(current) :
!fatal_signal_pending(current)))) {
- wake_up_poll(&ctx->fd_wqh, POLLIN);
+ wake_up_poll(&ctx->fd_wqh, EPOLLIN);
schedule();
ret |= VM_FAULT_MAJOR;
@@ -614,7 +614,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
spin_unlock(&ctx->event_wqh.lock);
- wake_up_poll(&ctx->fd_wqh, POLLIN);
+ wake_up_poll(&ctx->fd_wqh, EPOLLIN);
schedule();
spin_lock(&ctx->event_wqh.lock);
@@ -904,7 +904,7 @@ wakeup:
/* Flush pending events that may still wait on event_wqh */
wake_up_all(&ctx->event_wqh);
- wake_up_poll(&ctx->fd_wqh, POLLHUP);
+ wake_up_poll(&ctx->fd_wqh, EPOLLHUP);
userfaultfd_ctx_put(ctx);
return 0;
}
@@ -949,14 +949,14 @@ static __poll_t userfaultfd_poll(struct file *file, poll_table *wait)
switch (ctx->state) {
case UFFD_STATE_WAIT_API:
- return POLLERR;
+ return EPOLLERR;
case UFFD_STATE_RUNNING:
/*
* poll() never guarantees that read won't block.
* userfaults can be waken before they're read().
*/
if (unlikely(!(file->f_flags & O_NONBLOCK)))
- return POLLERR;
+ return EPOLLERR;
/*
* lockless access to see if there are pending faults
* __pollwait last action is the add_wait_queue but
@@ -970,14 +970,14 @@ static __poll_t userfaultfd_poll(struct file *file, poll_table *wait)
ret = 0;
smp_mb();
if (waitqueue_active(&ctx->fault_pending_wqh))
- ret = POLLIN;
+ ret = EPOLLIN;
else if (waitqueue_active(&ctx->event_wqh))
- ret = POLLIN;
+ ret = EPOLLIN;
return ret;
default:
WARN_ON_ONCE(1);
- return POLLERR;
+ return EPOLLERR;
}
}