ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev_epoll.c
(Generate patch)

Comparing libev/ev_epoll.c (file contents):
Revision 1.64 by root, Mon Jun 27 21:51:52 2011 UTC vs.
Revision 1.80 by root, Thu Jul 11 05:41:39 2019 UTC

1/* 1/*
2 * libev epoll fd activity backend 2 * libev epoll fd activity backend
3 * 3 *
4 * Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann <libev@schmorp.de> 4 * Copyright (c) 2007,2008,2009,2010,2011,2016,2017,2019 Marc Alexander Lehmann <libev@schmorp.de>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without modifica- 7 * Redistribution and use in source and binary forms, with or without modifica-
8 * tion, are permitted provided that the following conditions are met: 8 * tion, are permitted provided that the following conditions are met:
9 * 9 *
91 ev.data.u64 = (uint64_t)(uint32_t)fd 91 ev.data.u64 = (uint64_t)(uint32_t)fd
92 | ((uint64_t)(uint32_t)++anfds [fd].egen << 32); 92 | ((uint64_t)(uint32_t)++anfds [fd].egen << 32);
93 ev.events = (nev & EV_READ ? EPOLLIN : 0) 93 ev.events = (nev & EV_READ ? EPOLLIN : 0)
94 | (nev & EV_WRITE ? EPOLLOUT : 0); 94 | (nev & EV_WRITE ? EPOLLOUT : 0);
95 95
96 if (expect_true (!epoll_ctl (backend_fd, oev && oldmask != nev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev))) 96 if (ecb_expect_true (!epoll_ctl (backend_fd, oev && oldmask != nev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev)))
97 return; 97 return;
98 98
99 if (expect_true (errno == ENOENT)) 99 if (ecb_expect_true (errno == ENOENT))
100 { 100 {
101 /* if ENOENT then the fd went away, so try to do the right thing */ 101 /* if ENOENT then the fd went away, so try to do the right thing */
102 if (!nev) 102 if (!nev)
103 goto dec_egen; 103 goto dec_egen;
104 104
105 if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev)) 105 if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev))
106 return; 106 return;
107 } 107 }
108 else if (expect_true (errno == EEXIST)) 108 else if (ecb_expect_true (errno == EEXIST))
109 { 109 {
110 /* EEXIST means we ignored a previous DEL, but the fd is still active */ 110 /* EEXIST means we ignored a previous DEL, but the fd is still active */
111 /* if the kernel mask is the same as the new mask, we assume it hasn't changed */ 111 /* if the kernel mask is the same as the new mask, we assume it hasn't changed */
112 if (oldmask == nev) 112 if (oldmask == nev)
113 goto dec_egen; 113 goto dec_egen;
114 114
115 if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev)) 115 if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev))
116 return; 116 return;
117 } 117 }
118 else if (expect_true (errno == EPERM)) 118 else if (ecb_expect_true (errno == EPERM))
119 { 119 {
120 /* EPERM means the fd is always ready, but epoll is too snobbish */ 120 /* EPERM means the fd is always ready, but epoll is too snobbish */
121 /* to handle it, unlike select or poll. */ 121 /* to handle it, unlike select or poll. */
122 anfds [fd].emask = EV_EMASK_EPERM; 122 anfds [fd].emask = EV_EMASK_EPERM;
123 123
124 /* add fd to epoll_eperms, if not already inside */ 124 /* add fd to epoll_eperms, if not already inside */
125 if (!(oldmask & EV_EMASK_EPERM)) 125 if (!(oldmask & EV_EMASK_EPERM))
126 { 126 {
127 array_needsize (int, epoll_eperms, epoll_epermmax, epoll_epermcnt + 1, EMPTY2); 127 array_needsize (int, epoll_eperms, epoll_epermmax, epoll_epermcnt + 1, array_needsize_noinit);
128 epoll_eperms [epoll_epermcnt++] = fd; 128 epoll_eperms [epoll_epermcnt++] = fd;
129 } 129 }
130 130
131 return; 131 return;
132 } 132 }
133 else
134 assert (("libev: I/O watcher with invalid fd found in epoll_ctl", errno != EBADF && errno != ELOOP && errno != EINVAL));
133 135
134 fd_kill (EV_A_ fd); 136 fd_kill (EV_A_ fd);
135 137
136dec_egen: 138dec_egen:
137 /* we didn't successfully call epoll_ctl, so decrement the generation counter again */ 139 /* we didn't successfully call epoll_ctl, so decrement the generation counter again */
142epoll_poll (EV_P_ ev_tstamp timeout) 144epoll_poll (EV_P_ ev_tstamp timeout)
143{ 145{
144 int i; 146 int i;
145 int eventcnt; 147 int eventcnt;
146 148
147 if (expect_false (epoll_epermcnt)) 149 if (ecb_expect_false (epoll_epermcnt))
148 timeout = 0.; 150 timeout = 0.;
149 151
150 /* epoll wait times cannot be larger than (LONG_MAX - 999UL) / HZ msecs, which is below */ 152 /* epoll wait times cannot be larger than (LONG_MAX - 999UL) / HZ msecs, which is below */
151 /* the default libev max wait time, however. */ 153 /* the default libev max wait time, however. */
152 EV_RELEASE_CB; 154 EV_RELEASE_CB;
153 eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, timeout * 1e3); 155 eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, EV_TS_TO_MS (timeout));
154 EV_ACQUIRE_CB; 156 EV_ACQUIRE_CB;
155 157
156 if (expect_false (eventcnt < 0)) 158 if (ecb_expect_false (eventcnt < 0))
157 { 159 {
158 if (errno != EINTR) 160 if (errno != EINTR)
159 ev_syserr ("(libev) epoll_wait"); 161 ev_syserr ("(libev) epoll_wait");
160 162
161 return; 163 return;
174 * check for spurious notification. 176 * check for spurious notification.
175 * this only finds spurious notifications on egen updates 177 * this only finds spurious notifications on egen updates
176 * other spurious notifications will be found by epoll_ctl, below 178 * other spurious notifications will be found by epoll_ctl, below
177 * we assume that fd is always in range, as we never shrink the anfds array 179 * we assume that fd is always in range, as we never shrink the anfds array
178 */ 180 */
179 if (expect_false ((uint32_t)anfds [fd].egen != (uint32_t)(ev->data.u64 >> 32))) 181 if (ecb_expect_false ((uint32_t)anfds [fd].egen != (uint32_t)(ev->data.u64 >> 32)))
180 { 182 {
181 /* recreate kernel state */ 183 /* recreate kernel state */
182 postfork = 1; 184 postfork |= 2;
183 continue; 185 continue;
184 } 186 }
185 187
186 if (expect_false (got & ~want)) 188 if (ecb_expect_false (got & ~want))
187 { 189 {
188 anfds [fd].emask = want; 190 anfds [fd].emask = want;
189 191
190 /* 192 /*
191 * we received an event but are not interested in it, try mod or del 193 * we received an event but are not interested in it, try mod or del
193 * when we are no longer interested in them, but also when we get spurious 195 * when we are no longer interested in them, but also when we get spurious
194 * notifications for fds from another process. this is partially handled 196 * notifications for fds from another process. this is partially handled
195 * above with the gencounter check (== our fd is not the event fd), and 197 * above with the gencounter check (== our fd is not the event fd), and
196 * partially here, when epoll_ctl returns an error (== a child has the fd 198 * partially here, when epoll_ctl returns an error (== a child has the fd
197 * but we closed it). 199 * but we closed it).
200 * note: for events such as POLLHUP, where we can't know whether it refers
201 * to EV_READ or EV_WRITE, we might issue redundant EPOLL_CTL_MOD calls.
198 */ 202 */
199 ev->events = (want & EV_READ ? EPOLLIN : 0) 203 ev->events = (want & EV_READ ? EPOLLIN : 0)
200 | (want & EV_WRITE ? EPOLLOUT : 0); 204 | (want & EV_WRITE ? EPOLLOUT : 0);
201 205
202 /* pre-2.6.9 kernels require a non-null pointer with EPOLL_CTL_DEL, */ 206 /* pre-2.6.9 kernels require a non-null pointer with EPOLL_CTL_DEL, */
203 /* which is fortunately easy to do for us. */ 207 /* which is fortunately easy to do for us. */
204 if (epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev)) 208 if (epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev))
205 { 209 {
206 postfork = 1; /* an error occurred, recreate kernel state */ 210 postfork |= 2; /* an error occurred, recreate kernel state */
207 continue; 211 continue;
208 } 212 }
209 } 213 }
210 214
211 fd_event (EV_A_ fd, got); 215 fd_event (EV_A_ fd, got);
212 } 216 }
213 217
214 /* if the receive array was full, increase its size */ 218 /* if the receive array was full, increase its size */
215 if (expect_false (eventcnt == epoll_eventmax)) 219 if (ecb_expect_false (eventcnt == epoll_eventmax))
216 { 220 {
217 ev_free (epoll_events); 221 ev_free (epoll_events);
218 epoll_eventmax = array_nextsize (sizeof (struct epoll_event), epoll_eventmax, epoll_eventmax + 1); 222 epoll_eventmax = array_nextsize (sizeof (struct epoll_event), epoll_eventmax, epoll_eventmax + 1);
219 epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); 223 epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax);
220 } 224 }
226 unsigned char events = anfds [fd].events & (EV_READ | EV_WRITE); 230 unsigned char events = anfds [fd].events & (EV_READ | EV_WRITE);
227 231
228 if (anfds [fd].emask & EV_EMASK_EPERM && events) 232 if (anfds [fd].emask & EV_EMASK_EPERM && events)
229 fd_event (EV_A_ fd, events); 233 fd_event (EV_A_ fd, events);
230 else 234 else
235 {
231 epoll_eperms [i] = epoll_eperms [--epoll_epermcnt]; 236 epoll_eperms [i] = epoll_eperms [--epoll_epermcnt];
237 anfds [fd].emask = 0;
238 }
239 }
240}
241
242static int
243epoll_epoll_create (void)
244{
245 int fd;
246
247#if defined EPOLL_CLOEXEC && !defined __ANDROID__
248 fd = epoll_create1 (EPOLL_CLOEXEC);
249
250 if (fd < 0 && (errno == EINVAL || errno == ENOSYS))
251#endif
232 } 252 {
233} 253 fd = epoll_create (256);
234 254
255 if (fd >= 0)
256 fcntl (fd, F_SETFD, FD_CLOEXEC);
257 }
258
259 return fd;
260}
261
235int inline_size 262inline_size
263int
236epoll_init (EV_P_ int flags) 264epoll_init (EV_P_ int flags)
237{ 265{
238#ifdef EPOLL_CLOEXEC 266 if ((backend_fd = epoll_epoll_create ()) < 0)
239 backend_fd = epoll_create1 (EPOLL_CLOEXEC);
240
241 if (backend_fd <= 0)
242#endif
243 backend_fd = epoll_create (256);
244
245 if (backend_fd < 0)
246 return 0; 267 return 0;
247 268
248 fcntl (backend_fd, F_SETFD, FD_CLOEXEC);
249
250 backend_mintime = 1./1024.; /* epoll does sometimes return early, this is just to avoid the worst */ 269 backend_mintime = 1e-3; /* epoll does sometimes return early, this is just to avoid the worst */
251 backend_modify = epoll_modify; 270 backend_modify = epoll_modify;
252 backend_poll = epoll_poll; 271 backend_poll = epoll_poll;
253 272
254 epoll_eventmax = 64; /* initial number of events receivable per poll */ 273 epoll_eventmax = 64; /* initial number of events receivable per poll */
255 epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); 274 epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax);
256 275
257 return EVBACKEND_EPOLL; 276 return EVBACKEND_EPOLL;
258} 277}
259 278
260void inline_size 279inline_size
280void
261epoll_destroy (EV_P) 281epoll_destroy (EV_P)
262{ 282{
263 ev_free (epoll_events); 283 ev_free (epoll_events);
264 array_free (epoll_eperm, EMPTY); 284 array_free (epoll_eperm, EMPTY);
265} 285}
266 286
267void inline_size 287ecb_cold
288static void
268epoll_fork (EV_P) 289epoll_fork (EV_P)
269{ 290{
270 close (backend_fd); 291 close (backend_fd);
271 292
272 while ((backend_fd = epoll_create (256)) < 0) 293 while ((backend_fd = epoll_epoll_create ()) < 0)
273 ev_syserr ("(libev) epoll_create"); 294 ev_syserr ("(libev) epoll_create");
274 295
275 fcntl (backend_fd, F_SETFD, FD_CLOEXEC);
276
277 fd_rearm_all (EV_A); 296 fd_rearm_all (EV_A);
278} 297}
279 298

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines