ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev_epoll.c
(Generate patch)

Comparing libev/ev_epoll.c (file contents):
Revision 1.34 by root, Fri May 23 16:37:38 2008 UTC vs.
Revision 1.58 by root, Mon Jan 10 14:05:23 2011 UTC

1/* 1/*
2 * libev epoll fd activity backend 2 * libev epoll fd activity backend
3 * 3 *
4 * Copyright (c) 2007,2008 Marc Alexander Lehmann <libev@schmorp.de> 4 * Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann <libev@schmorp.de>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without modifica- 7 * Redistribution and use in source and binary forms, with or without modifica-
8 * tion, are permitted provided that the following conditions are met: 8 * tion, are permitted provided that the following conditions are met:
9 * 9 *
47 * b) the fact that ADD != MOD creates a lot of extra syscalls due to a) 47 * b) the fact that ADD != MOD creates a lot of extra syscalls due to a)
48 * and seems not to have any advantage. 48 * and seems not to have any advantage.
49 * c) the inability to handle fork or file descriptors (think dup) 49 * c) the inability to handle fork or file descriptors (think dup)
50 * limits the applicability over poll, so this is not a generic 50 * limits the applicability over poll, so this is not a generic
51 * poll replacement. 51 * poll replacement.
52 * d) epoll doesn't work the same as select with many file descriptors
53 * (such as files). while not critical, no other advanced interface
54 * seems to share this (rather non-unixy) limitation.
55 * e) epoll claims to be embeddable, but in practise you never get
56 * a ready event for the epoll fd (broken: <=2.6.26, working: >=2.6.32).
57 * f) epoll_ctl returning EPERM means the fd is always ready.
52 * 58 *
53 * lots of "weird code" and complication handling in this file is due 59 * lots of "weird code" and complication handling in this file is due
54 * to these design problems with epoll, as we try very hard to avoid 60 * to these design problems with epoll, as we try very hard to avoid
55 * epoll_ctl syscalls for common usage patterns. 61 * epoll_ctl syscalls for common usage patterns and handle the breakage
62 * ensuing from receiving events for closed and otherwise long gone
63 * file descriptors.
56 */ 64 */
57 65
58#include <sys/epoll.h> 66#include <sys/epoll.h>
67
68#define EV_EMASK_EPERM 0x80
59 69
60static void 70static void
61epoll_modify (EV_P_ int fd, int oev, int nev) 71epoll_modify (EV_P_ int fd, int oev, int nev)
62{ 72{
63 struct epoll_event ev; 73 struct epoll_event ev;
74 unsigned char oldmask;
64 75
65 /* 76 /*
66 * we handle EPOLL_CTL_DEL by ignoring it here 77 * we handle EPOLL_CTL_DEL by ignoring it here
67 * on the assumption that the fd is gone anyways 78 * on the assumption that the fd is gone anyways
68 * if that is wrong, we have to handle the spurious 79 * if that is wrong, we have to handle the spurious
69 * event in epoll_poll. 80 * event in epoll_poll.
81 * if the fd is added again, we try to ADD it, and, if that
82 * fails, we assume it still has the same eventmask.
70 */ 83 */
71 if (!nev) 84 if (!nev)
72 return; 85 return;
73 86
74 ev.data.u64 = fd; /* use u64 to fully initialise the struct, for nicer strace etc. */ 87 oldmask = anfds [fd].emask;
88 anfds [fd].emask = nev;
89
90 /* store the generation counter in the upper 32 bits, the fd in the lower 32 bits */
91 ev.data.u64 = (uint64_t)(uint32_t)fd
92 | ((uint64_t)(uint32_t)++anfds [fd].egen << 32);
75 ev.events = (nev & EV_READ ? EPOLLIN : 0) 93 ev.events = (nev & EV_READ ? EPOLLIN : 0)
76 | (nev & EV_WRITE ? EPOLLOUT : 0); 94 | (nev & EV_WRITE ? EPOLLOUT : 0);
77 95
78 if (expect_true (!epoll_ctl (backend_fd, oev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev))) 96 if (expect_true (!epoll_ctl (backend_fd, oev && oldmask != nev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev)))
79 return; 97 return;
80 98
81 if (expect_true (errno == ENOENT)) 99 if (expect_true (errno == ENOENT))
82 { 100 {
83 /* on ENOENT the fd went away, so try to do the right thing */ 101 /* if ENOENT then the fd went away, so try to do the right thing */
84 if (!nev) 102 if (!nev)
85 return; 103 goto dec_egen;
86 104
87 if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev)) 105 if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev))
88 return; 106 return;
89 } 107 }
90 else if (expect_true (errno == EEXIST)) 108 else if (expect_true (errno == EEXIST))
91 { 109 {
92 /* on EEXIST we ignored a previous DEL */ 110 /* EEXIST means we ignored a previous DEL, but the fd is still active */
111 /* if the kernel mask is the same as the new mask, we assume it hasn't changed */
112 if (oldmask == nev)
113 goto dec_egen;
114
93 if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev)) 115 if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev))
94 return; 116 return;
95 } 117 }
118 else if (expect_true (errno == EPERM))
119 {
120 /* EPERM means the fd is always ready, but epoll is too snobbish */
121 /* to handle it, unlike select or poll. */
122 anfds [fd].emask = EV_EMASK_EPERM;
123
124 /* add fd to epoll_eperms, if not already inside */
125 if (!(oldmask & EV_EMASK_EPERM))
126 {
127 array_needsize (int, epoll_eperms, epoll_epermmax, epoll_epermcnt + 1, EMPTY2);
128 epoll_eperms [epoll_epermcnt++] = fd;
129 }
130
131 return;
132 }
96 133
97 fd_kill (EV_A_ fd); 134 fd_kill (EV_A_ fd);
135
136dec_egen:
137 /* we didn't successfully call epoll_ctl, so decrement the generation counter again */
138 --anfds [fd].egen;
98} 139}
99 140
100static void 141static void
101epoll_poll (EV_P_ ev_tstamp timeout) 142epoll_poll (EV_P_ ev_tstamp timeout)
102{ 143{
103 int i; 144 int i;
145 int eventcnt;
146
147 /* epoll wait times cannot be larger than (LONG_MAX - 999UL) / HZ msecs, which is below */
148 /* the default libev max wait time, however. */
149 EV_RELEASE_CB;
104 int eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, (int)ceil (timeout * 1000.)); 150 eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax,
151 epoll_epermcnt ? 0 : (int)ceil (timeout * 1000.));
152 EV_ACQUIRE_CB;
105 153
106 if (expect_false (eventcnt < 0)) 154 if (expect_false (eventcnt < 0))
107 { 155 {
108 if (errno != EINTR) 156 if (errno != EINTR)
109 syserr ("(libev) epoll_wait"); 157 ev_syserr ("(libev) epoll_wait");
110 158
111 return; 159 return;
112 } 160 }
113 161
114 for (i = 0; i < eventcnt; ++i) 162 for (i = 0; i < eventcnt; ++i)
115 { 163 {
116 struct epoll_event *ev = epoll_events + i; 164 struct epoll_event *ev = epoll_events + i;
117 165
118 int fd = ev->data.u64; 166 int fd = (uint32_t)ev->data.u64; /* mask out the lower 32 bits */
167 int want = anfds [fd].events;
119 int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) 168 int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0)
120 | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0); 169 | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0);
121 int want = anfds [fd].events; 170
171 /* check for spurious notification */
172 /* we assume that fd is always in range, as we never shrink the anfds array */
173 if (expect_false ((uint32_t)anfds [fd].egen != (uint32_t)(ev->data.u64 >> 32)))
174 {
175 /* recreate kernel state */
176 postfork = 1;
177 continue;
178 }
122 179
123 if (expect_false (got & ~want)) 180 if (expect_false (got & ~want))
124 { 181 {
182 anfds [fd].emask = want;
183
125 /* we received an event but are not interested in it, try mod or del */ 184 /* we received an event but are not interested in it, try mod or del */
185 /* I don't think we ever need MOD, but let's handle it anyways */
126 ev->events = (want & EV_READ ? EPOLLIN : 0) 186 ev->events = (want & EV_READ ? EPOLLIN : 0)
127 | (want & EV_WRITE ? EPOLLOUT : 0); 187 | (want & EV_WRITE ? EPOLLOUT : 0);
128 188
189 /* pre-2.6.9 kernels require a non-null pointer with EPOLL_CTL_DEL, */
190 /* which is fortunately easy to do for us. */
129 epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev); 191 if (epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev))
192 {
193 postfork = 1; /* an error occurred, recreate kernel state */
194 continue;
195 }
130 } 196 }
131 197
132 fd_event (EV_A_ fd, got); 198 fd_event (EV_A_ fd, got);
133 } 199 }
134 200
137 { 203 {
138 ev_free (epoll_events); 204 ev_free (epoll_events);
139 epoll_eventmax = array_nextsize (sizeof (struct epoll_event), epoll_eventmax, epoll_eventmax + 1); 205 epoll_eventmax = array_nextsize (sizeof (struct epoll_event), epoll_eventmax, epoll_eventmax + 1);
140 epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); 206 epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax);
141 } 207 }
208
209 /* now synthesize events for all fds where epoll fails, while select works... */
210 for (i = epoll_epermcnt; i--; )
211 {
212 int fd = epoll_eperms [i];
213 unsigned char events = anfds [fd].events & (EV_READ | EV_WRITE);
214
215 if (anfds [fd].emask & EV_EMASK_EPERM && events)
216 fd_event (EV_A_ fd, events);
217 else
218 epoll_eperms [i] = epoll_eperms [--epoll_epermcnt];
219 }
142} 220}
143 221
144int inline_size 222int inline_size
145epoll_init (EV_P_ int flags) 223epoll_init (EV_P_ int flags)
146{ 224{
225#ifdef EPOLL_CLOEXEC
226 backend_fd = epoll_create1 (EPOLL_CLOEXEC);
227
228 if (backend_fd <= 0)
229#endif
147 backend_fd = epoll_create (256); 230 backend_fd = epoll_create (256);
148 231
149 if (backend_fd < 0) 232 if (backend_fd < 0)
150 return 0; 233 return 0;
151 234
152 fcntl (backend_fd, F_SETFD, FD_CLOEXEC); 235 fcntl (backend_fd, F_SETFD, FD_CLOEXEC);
153 236
154 backend_fudge = 0.; /* kernel sources seem to indicate this to be zero */ 237 backend_fudge = 0.; /* kernel sources seem to indicate this to be zero */
155 backend_modify = epoll_modify; 238 backend_modify = epoll_modify;
156 backend_poll = epoll_poll; 239 backend_poll = epoll_poll;
157 240
158 epoll_eventmax = 64; /* intiial number of events receivable per poll */ 241 epoll_eventmax = 64; /* initial number of events receivable per poll */
159 epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); 242 epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax);
160 243
161 return EVBACKEND_EPOLL; 244 return EVBACKEND_EPOLL;
162} 245}
163 246
164void inline_size 247void inline_size
165epoll_destroy (EV_P) 248epoll_destroy (EV_P)
166{ 249{
167 ev_free (epoll_events); 250 ev_free (epoll_events);
251 array_free (epoll_eperm, EMPTY);
168} 252}
169 253
170void inline_size 254void inline_size
171epoll_fork (EV_P) 255epoll_fork (EV_P)
172{ 256{
173 close (backend_fd); 257 close (backend_fd);
174 258
175 while ((backend_fd = epoll_create (256)) < 0) 259 while ((backend_fd = epoll_create (256)) < 0)
176 syserr ("(libev) epoll_create"); 260 ev_syserr ("(libev) epoll_create");
177 261
178 fcntl (backend_fd, F_SETFD, FD_CLOEXEC); 262 fcntl (backend_fd, F_SETFD, FD_CLOEXEC);
179 263
180 fd_rearm_all (EV_A); 264 fd_rearm_all (EV_A);
181} 265}

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines