1 | /* |
1 | /* |
2 | * libev epoll fd activity backend |
2 | * libev epoll fd activity backend |
3 | * |
3 | * |
4 | * Copyright (c) 2007,2008 Marc Alexander Lehmann <libev@schmorp.de> |
4 | * Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann <libev@schmorp.de> |
5 | * All rights reserved. |
5 | * All rights reserved. |
6 | * |
6 | * |
7 | * Redistribution and use in source and binary forms, with or without modifica- |
7 | * Redistribution and use in source and binary forms, with or without modifica- |
8 | * tion, are permitted provided that the following conditions are met: |
8 | * tion, are permitted provided that the following conditions are met: |
9 | * |
9 | * |
… | |
… | |
47 | * b) the fact that ADD != MOD creates a lot of extra syscalls due to a) |
47 | * b) the fact that ADD != MOD creates a lot of extra syscalls due to a) |
48 | * and seems not to have any advantage. |
48 | * and seems not to have any advantage. |
49 | * c) the inability to handle fork or file descriptors (think dup) |
49 | * c) the inability to handle fork or file descriptors (think dup) |
50 | * limits the applicability over poll, so this is not a generic |
50 | * limits the applicability over poll, so this is not a generic |
51 | * poll replacement. |
51 | * poll replacement. |
|
|
52 | * d) epoll doesn't work the same as select with many file descriptors |
|
|
53 | * (such as files). while not critical, no other advanced interface |
|
|
54 | * seems to share this (rather non-unixy) limitation. |
|
|
55 | * e) epoll claims to be embeddable, but in practise you never get |
|
|
56 | * a ready event for the epoll fd (broken: <=2.6.26, working: >=2.6.32). |
|
|
57 | * f) epoll_ctl returning EPERM means the fd is always ready. |
52 | * |
58 | * |
53 | * lots of "weird code" and complication handling in this file is due |
59 | * lots of "weird code" and complication handling in this file is due |
54 | * to these design problems with epoll, as we try very hard to avoid |
60 | * to these design problems with epoll, as we try very hard to avoid |
55 | * epoll_ctl syscalls for common usage patterns and handle the breakage |
61 | * epoll_ctl syscalls for common usage patterns and handle the breakage |
56 | * ensuing from receiving events for closed and otherwise long gone |
62 | * ensuing from receiving events for closed and otherwise long gone |
57 | * file descriptors. |
63 | * file descriptors. |
58 | */ |
64 | */ |
59 | |
65 | |
60 | #include <sys/epoll.h> |
66 | #include <sys/epoll.h> |
|
|
67 | |
|
|
68 | #define EV_EMASK_EPERM 0x80 |
61 | |
69 | |
62 | static void |
70 | static void |
63 | epoll_modify (EV_P_ int fd, int oev, int nev) |
71 | epoll_modify (EV_P_ int fd, int oev, int nev) |
64 | { |
72 | { |
65 | struct epoll_event ev; |
73 | struct epoll_event ev; |
… | |
… | |
83 | ev.data.u64 = (uint64_t)(uint32_t)fd |
91 | ev.data.u64 = (uint64_t)(uint32_t)fd |
84 | | ((uint64_t)(uint32_t)++anfds [fd].egen << 32); |
92 | | ((uint64_t)(uint32_t)++anfds [fd].egen << 32); |
85 | ev.events = (nev & EV_READ ? EPOLLIN : 0) |
93 | ev.events = (nev & EV_READ ? EPOLLIN : 0) |
86 | | (nev & EV_WRITE ? EPOLLOUT : 0); |
94 | | (nev & EV_WRITE ? EPOLLOUT : 0); |
87 | |
95 | |
88 | if (expect_true (!epoll_ctl (backend_fd, oev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev))) |
96 | if (expect_true (!epoll_ctl (backend_fd, oev && oldmask != nev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev))) |
89 | return; |
97 | return; |
90 | |
98 | |
91 | if (expect_true (errno == ENOENT)) |
99 | if (expect_true (errno == ENOENT)) |
92 | { |
100 | { |
93 | /* if ENOENT then the fd went away, so try to do the right thing */ |
101 | /* if ENOENT then the fd went away, so try to do the right thing */ |
… | |
… | |
105 | goto dec_egen; |
113 | goto dec_egen; |
106 | |
114 | |
107 | if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev)) |
115 | if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev)) |
108 | return; |
116 | return; |
109 | } |
117 | } |
|
|
118 | else if (expect_true (errno == EPERM)) |
|
|
119 | { |
|
|
120 | /* EPERM means the fd is always ready, but epoll is too snobbish */ |
|
|
121 | /* to handle it, unlike select or poll. */ |
|
|
122 | anfds [fd].emask = EV_EMASK_EPERM; |
|
|
123 | |
|
|
124 | /* add fd to epoll_eperms, if not already inside */ |
|
|
125 | if (!(oldmask & EV_EMASK_EPERM)) |
|
|
126 | { |
|
|
127 | array_needsize (int, epoll_eperms, epoll_epermmax, epoll_epermcnt + 1, EMPTY2); |
|
|
128 | epoll_eperms [epoll_epermcnt++] = fd; |
|
|
129 | } |
|
|
130 | |
|
|
131 | return; |
|
|
132 | } |
110 | |
133 | |
111 | fd_kill (EV_A_ fd); |
134 | fd_kill (EV_A_ fd); |
112 | |
135 | |
113 | dec_egen: |
136 | dec_egen: |
114 | /* we didn't successfully call epoll_ctl, so decrement the generation counter again */ |
137 | /* we didn't successfully call epoll_ctl, so decrement the generation counter again */ |
… | |
… | |
117 | |
140 | |
118 | static void |
141 | static void |
119 | epoll_poll (EV_P_ ev_tstamp timeout) |
142 | epoll_poll (EV_P_ ev_tstamp timeout) |
120 | { |
143 | { |
121 | int i; |
144 | int i; |
|
|
145 | int eventcnt; |
|
|
146 | |
|
|
147 | /* epoll wait times cannot be larger than (LONG_MAX - 999UL) / HZ msecs, which is below */ |
|
|
148 | /* the default libev max wait time, however. */ |
|
|
149 | EV_RELEASE_CB; |
122 | int eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, (int)ceil (timeout * 1000.)); |
150 | eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, (int)ceil (timeout * 1000.)); |
|
|
151 | EV_ACQUIRE_CB; |
123 | |
152 | |
124 | if (expect_false (eventcnt < 0)) |
153 | if (expect_false (eventcnt < 0)) |
125 | { |
154 | { |
126 | if (errno != EINTR) |
155 | if (errno != EINTR) |
127 | ev_syserr ("(libev) epoll_wait"); |
156 | ev_syserr ("(libev) epoll_wait"); |
… | |
… | |
137 | int want = anfds [fd].events; |
166 | int want = anfds [fd].events; |
138 | int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) |
167 | int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) |
139 | | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0); |
168 | | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0); |
140 | |
169 | |
141 | /* check for spurious notification */ |
170 | /* check for spurious notification */ |
|
|
171 | /* we assume that fd is always in range, as we never shrink the anfds array */ |
142 | if (expect_false ((uint32_t)anfds [fd].egen != (uint32_t)(ev->data.u64 >> 32))) |
172 | if (expect_false ((uint32_t)anfds [fd].egen != (uint32_t)(ev->data.u64 >> 32))) |
143 | { |
173 | { |
144 | /* recreate kernel state */ |
174 | /* recreate kernel state */ |
145 | postfork = 1; |
175 | postfork = 1; |
146 | continue; |
176 | continue; |
… | |
… | |
153 | /* we received an event but are not interested in it, try mod or del */ |
183 | /* we received an event but are not interested in it, try mod or del */ |
154 | /* I don't think we ever need MOD, but let's handle it anyways */ |
184 | /* I don't think we ever need MOD, but let's handle it anyways */ |
155 | ev->events = (want & EV_READ ? EPOLLIN : 0) |
185 | ev->events = (want & EV_READ ? EPOLLIN : 0) |
156 | | (want & EV_WRITE ? EPOLLOUT : 0); |
186 | | (want & EV_WRITE ? EPOLLOUT : 0); |
157 | |
187 | |
|
|
188 | /* pre-2.6.9 kernels require a non-null pointer with EPOLL_CTL_DEL, */ |
|
|
189 | /* which is fortunately easy to do for us. */ |
158 | if (epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev)) |
190 | if (epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev)) |
159 | { |
191 | { |
160 | postfork = 1; /* an error occured, recreate kernel state */ |
192 | postfork = 1; /* an error occurred, recreate kernel state */ |
161 | continue; |
193 | continue; |
162 | } |
194 | } |
163 | } |
195 | } |
164 | |
196 | |
165 | fd_event (EV_A_ fd, got); |
197 | fd_event (EV_A_ fd, got); |
… | |
… | |
170 | { |
202 | { |
171 | ev_free (epoll_events); |
203 | ev_free (epoll_events); |
172 | epoll_eventmax = array_nextsize (sizeof (struct epoll_event), epoll_eventmax, epoll_eventmax + 1); |
204 | epoll_eventmax = array_nextsize (sizeof (struct epoll_event), epoll_eventmax, epoll_eventmax + 1); |
173 | epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); |
205 | epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); |
174 | } |
206 | } |
|
|
207 | |
|
|
208 | /* now synthesize events for all fds where epoll fails, while select works... */ |
|
|
209 | for (i = epoll_epermcnt; i--; ) |
|
|
210 | { |
|
|
211 | int fd = epoll_eperms [i]; |
|
|
212 | unsigned char events = anfds [fd].events & (EV_READ | EV_WRITE); |
|
|
213 | |
|
|
214 | if (anfds [fd].emask & EV_EMASK_EPERM && events) |
|
|
215 | fd_event (EV_A_ fd, events); |
|
|
216 | else |
|
|
217 | epoll_eperms [i] = epoll_eperms [--epoll_epermcnt]; |
|
|
218 | } |
175 | } |
219 | } |
176 | |
220 | |
177 | int inline_size |
221 | int inline_size |
178 | epoll_init (EV_P_ int flags) |
222 | epoll_init (EV_P_ int flags) |
179 | { |
223 | { |
|
|
224 | #ifdef EPOLL_CLOEXEC |
|
|
225 | backend_fd = epoll_create1 (EPOLL_CLOEXEC); |
|
|
226 | |
|
|
227 | if (backend_fd <= 0) |
|
|
228 | #endif |
180 | backend_fd = epoll_create (256); |
229 | backend_fd = epoll_create (256); |
181 | |
230 | |
182 | if (backend_fd < 0) |
231 | if (backend_fd < 0) |
183 | return 0; |
232 | return 0; |
184 | |
233 | |
185 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); |
234 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); |
… | |
… | |
196 | |
245 | |
197 | void inline_size |
246 | void inline_size |
198 | epoll_destroy (EV_P) |
247 | epoll_destroy (EV_P) |
199 | { |
248 | { |
200 | ev_free (epoll_events); |
249 | ev_free (epoll_events); |
|
|
250 | array_free (epoll_eperm, EMPTY); |
201 | } |
251 | } |
202 | |
252 | |
203 | void inline_size |
253 | void inline_size |
204 | epoll_fork (EV_P) |
254 | epoll_fork (EV_P) |
205 | { |
255 | { |