ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev_epoll.c
(Generate patch)

Comparing libev/ev_epoll.c (file contents):
Revision 1.40 by root, Wed Oct 29 07:09:37 2008 UTC vs.
Revision 1.62 by root, Sat Jun 4 05:25:03 2011 UTC

1/* 1/*
2 * libev epoll fd activity backend 2 * libev epoll fd activity backend
3 * 3 *
4 * Copyright (c) 2007,2008 Marc Alexander Lehmann <libev@schmorp.de> 4 * Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann <libev@schmorp.de>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without modifica- 7 * Redistribution and use in source and binary forms, with or without modifica-
8 * tion, are permitted provided that the following conditions are met: 8 * tion, are permitted provided that the following conditions are met:
9 * 9 *
10 * 1. Redistributions of source code must retain the above copyright notice, 10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer. 11 * this list of conditions and the following disclaimer.
12 * 12 *
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution. 15 * documentation and/or other materials provided with the distribution.
16 * 16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
19 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 19 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
20 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 20 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
21 * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 21 * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
47 * b) the fact that ADD != MOD creates a lot of extra syscalls due to a) 47 * b) the fact that ADD != MOD creates a lot of extra syscalls due to a)
48 * and seems not to have any advantage. 48 * and seems not to have any advantage.
49 * c) the inability to handle fork or file descriptors (think dup) 49 * c) the inability to handle fork or file descriptors (think dup)
50 * limits the applicability over poll, so this is not a generic 50 * limits the applicability over poll, so this is not a generic
51 * poll replacement. 51 * poll replacement.
52 * d) epoll doesn't work the same as select with many file descriptors
53 * (such as files). while not critical, no other advanced interface
54 * seems to share this (rather non-unixy) limitation.
55 * e) epoll claims to be embeddable, but in practise you never get
56 * a ready event for the epoll fd (broken: <=2.6.26, working: >=2.6.32).
57 * f) epoll_ctl returning EPERM means the fd is always ready.
52 * 58 *
53 * lots of "weird code" and complication handling in this file is due 59 * lots of "weird code" and complication handling in this file is due
54 * to these design problems with epoll, as we try very hard to avoid 60 * to these design problems with epoll, as we try very hard to avoid
55 * epoll_ctl syscalls for common usage patterns and handle the breakage 61 * epoll_ctl syscalls for common usage patterns and handle the breakage
56 * ensuing from receiving events for closed and otherwise long gone 62 * ensuing from receiving events for closed and otherwise long gone
57 * file descriptors. 63 * file descriptors.
58 */ 64 */
59 65
60#include <sys/epoll.h> 66#include <sys/epoll.h>
61 67
68#define EV_EMASK_EPERM 0x80
69
62static void 70static void
63epoll_modify (EV_P_ int fd, int oev, int nev) 71epoll_modify (EV_P_ int fd, int oev, int nev)
64{ 72{
65 struct epoll_event ev; 73 struct epoll_event ev;
66 unsigned char oldmask; 74 unsigned char oldmask;
68 /* 76 /*
69 * we handle EPOLL_CTL_DEL by ignoring it here 77 * we handle EPOLL_CTL_DEL by ignoring it here
70 * on the assumption that the fd is gone anyways 78 * on the assumption that the fd is gone anyways
71 * if that is wrong, we have to handle the spurious 79 * if that is wrong, we have to handle the spurious
72 * event in epoll_poll. 80 * event in epoll_poll.
73 * the fd is later added, we try to ADD it, and, if that 81 * if the fd is added again, we try to ADD it, and, if that
74 * fails, we assume it still has the same eventmask. 82 * fails, we assume it still has the same eventmask.
75 */ 83 */
76 if (!nev) 84 if (!nev)
77 return; 85 return;
78 86
79 oldmask = anfds [fd].emask; 87 oldmask = anfds [fd].emask;
80 anfds [fd].emask = nev; 88 anfds [fd].emask = nev;
81 89
82 /* store the generation counter in the upper 32 bits */ 90 /* store the generation counter in the upper 32 bits, the fd in the lower 32 bits */
91 ev.data.u64 = (uint64_t)(uint32_t)fd
83 ev.data.u64 = fd | ((uint64_t)(uint32_t)++anfds [fd].egen << 32); 92 | ((uint64_t)(uint32_t)++anfds [fd].egen << 32);
84 ev.events = (nev & EV_READ ? EPOLLIN : 0) 93 ev.events = (nev & EV_READ ? EPOLLIN : 0)
85 | (nev & EV_WRITE ? EPOLLOUT : 0); 94 | (nev & EV_WRITE ? EPOLLOUT : 0);
86 95
87 if (expect_true (!epoll_ctl (backend_fd, oev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev))) 96 if (expect_true (!epoll_ctl (backend_fd, oev && oldmask != nev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev)))
88 return; 97 return;
89 98
90 if (expect_true (errno == ENOENT)) 99 if (expect_true (errno == ENOENT))
91 { 100 {
92 /* if ENOENT then the fd went away, so try to do the right thing */ 101 /* if ENOENT then the fd went away, so try to do the right thing */
104 goto dec_egen; 113 goto dec_egen;
105 114
106 if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev)) 115 if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev))
107 return; 116 return;
108 } 117 }
118 else if (expect_true (errno == EPERM))
119 {
120 /* EPERM means the fd is always ready, but epoll is too snobbish */
121 /* to handle it, unlike select or poll. */
122 anfds [fd].emask = EV_EMASK_EPERM;
123
124 /* add fd to epoll_eperms, if not already inside */
125 if (!(oldmask & EV_EMASK_EPERM))
126 {
127 array_needsize (int, epoll_eperms, epoll_epermmax, epoll_epermcnt + 1, EMPTY2);
128 epoll_eperms [epoll_epermcnt++] = fd;
129 }
130
131 return;
132 }
109 133
110 fd_kill (EV_A_ fd); 134 fd_kill (EV_A_ fd);
111 135
112dec_egen: 136dec_egen:
113 /* we didn't successfully call epoll_ctl, so decrement the generation counter again */ 137 /* we didn't successfully call epoll_ctl, so decrement the generation counter again */
116 140
117static void 141static void
118epoll_poll (EV_P_ ev_tstamp timeout) 142epoll_poll (EV_P_ ev_tstamp timeout)
119{ 143{
120 int i; 144 int i;
145 int eventcnt;
146
147 if (expect_false (epoll_epermcnt))
148 timeout = 0.;
149
150 /* epoll wait times cannot be larger than (LONG_MAX - 999UL) / HZ msecs, which is below */
151 /* the default libev max wait time, however. */
152 EV_RELEASE_CB;
121 int eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, (int)ceil (timeout * 1000.)); 153 eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, timeout * 1e3);
154 EV_ACQUIRE_CB;
122 155
123 if (expect_false (eventcnt < 0)) 156 if (expect_false (eventcnt < 0))
124 { 157 {
125 if (errno != EINTR) 158 if (errno != EINTR)
126 ev_syserr ("(libev) epoll_wait"); 159 ev_syserr ("(libev) epoll_wait");
136 int want = anfds [fd].events; 169 int want = anfds [fd].events;
137 int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) 170 int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0)
138 | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0); 171 | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0);
139 172
140 /* check for spurious notification */ 173 /* check for spurious notification */
174 /* we assume that fd is always in range, as we never shrink the anfds array */
141 if (expect_false ((uint32_t)anfds [fd].egen != (uint32_t)(ev->data.u64 >> 32))) 175 if (expect_false ((uint32_t)anfds [fd].egen != (uint32_t)(ev->data.u64 >> 32)))
142 { 176 {
143 /* recreate kernel state */ 177 /* recreate kernel state */
144 postfork = 1; 178 postfork = 1;
145 continue; 179 continue;
152 /* we received an event but are not interested in it, try mod or del */ 186 /* we received an event but are not interested in it, try mod or del */
153 /* I don't think we ever need MOD, but let's handle it anyways */ 187 /* I don't think we ever need MOD, but let's handle it anyways */
154 ev->events = (want & EV_READ ? EPOLLIN : 0) 188 ev->events = (want & EV_READ ? EPOLLIN : 0)
155 | (want & EV_WRITE ? EPOLLOUT : 0); 189 | (want & EV_WRITE ? EPOLLOUT : 0);
156 190
191 /* pre-2.6.9 kernels require a non-null pointer with EPOLL_CTL_DEL, */
192 /* which is fortunately easy to do for us. */
157 if (epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev)) 193 if (epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev))
158 { 194 {
159 postfork = 1; /* an error occured, recreate kernel state */ 195 postfork = 1; /* an error occurred, recreate kernel state */
160 continue; 196 continue;
161 } 197 }
162 } 198 }
163 199
164 fd_event (EV_A_ fd, got); 200 fd_event (EV_A_ fd, got);
169 { 205 {
170 ev_free (epoll_events); 206 ev_free (epoll_events);
171 epoll_eventmax = array_nextsize (sizeof (struct epoll_event), epoll_eventmax, epoll_eventmax + 1); 207 epoll_eventmax = array_nextsize (sizeof (struct epoll_event), epoll_eventmax, epoll_eventmax + 1);
172 epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); 208 epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax);
173 } 209 }
210
211 /* now synthesize events for all fds where epoll fails, while select works... */
212 for (i = epoll_epermcnt; i--; )
213 {
214 int fd = epoll_eperms [i];
215 unsigned char events = anfds [fd].events & (EV_READ | EV_WRITE);
216
217 if (anfds [fd].emask & EV_EMASK_EPERM && events)
218 fd_event (EV_A_ fd, events);
219 else
220 epoll_eperms [i] = epoll_eperms [--epoll_epermcnt];
221 }
174} 222}
175 223
176int inline_size 224int inline_size
177epoll_init (EV_P_ int flags) 225epoll_init (EV_P_ int flags)
178{ 226{
227#ifdef EPOLL_CLOEXEC
228 backend_fd = epoll_create1 (EPOLL_CLOEXEC);
229
230 if (backend_fd <= 0)
231#endif
179 backend_fd = epoll_create (256); 232 backend_fd = epoll_create (256);
180 233
181 if (backend_fd < 0) 234 if (backend_fd < 0)
182 return 0; 235 return 0;
183 236
184 fcntl (backend_fd, F_SETFD, FD_CLOEXEC); 237 fcntl (backend_fd, F_SETFD, FD_CLOEXEC);
185 238
186 backend_fudge = 0.; /* kernel sources seem to indicate this to be zero */ 239 backend_fudge = 1./1024.; /* epoll does sometimes return early, this is just to avoid the worst */
187 backend_modify = epoll_modify; 240 backend_modify = epoll_modify;
188 backend_poll = epoll_poll; 241 backend_poll = epoll_poll;
189 242
190 epoll_eventmax = 64; /* initial number of events receivable per poll */ 243 epoll_eventmax = 64; /* initial number of events receivable per poll */
191 epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); 244 epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax);
195 248
196void inline_size 249void inline_size
197epoll_destroy (EV_P) 250epoll_destroy (EV_P)
198{ 251{
199 ev_free (epoll_events); 252 ev_free (epoll_events);
253 array_free (epoll_eperm, EMPTY);
200} 254}
201 255
202void inline_size 256void inline_size
203epoll_fork (EV_P) 257epoll_fork (EV_P)
204{ 258{

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines