… | |
… | |
50 | * limits the applicability over poll, so this is not a generic |
50 | * limits the applicability over poll, so this is not a generic |
51 | * poll replacement. |
51 | * poll replacement. |
52 | * |
52 | * |
53 | * lots of "weird code" and complication handling in this file is due |
53 | * lots of "weird code" and complication handling in this file is due |
54 | * to these design problems with epoll, as we try very hard to avoid |
54 | * to these design problems with epoll, as we try very hard to avoid |
55 | * epoll_ctl syscalls for common usage patterns. |
55 | * epoll_ctl syscalls for common usage patterns and handle the breakage |
|
|
56 | * ensuing from receiving events for closed and otherwise long gone |
|
|
57 | * file descriptors. |
56 | */ |
58 | */ |
57 | |
59 | |
58 | #include <sys/epoll.h> |
60 | #include <sys/epoll.h> |
59 | |
|
|
60 | void inline_size |
|
|
61 | unsigned_char_init (unsigned char *base, int count) |
|
|
62 | { |
|
|
63 | /* memset might be overkill */ |
|
|
64 | while (count--) |
|
|
65 | *base++ = 0; |
|
|
66 | } |
|
|
67 | |
61 | |
68 | static void |
62 | static void |
69 | epoll_modify (EV_P_ int fd, int oev, int nev) |
63 | epoll_modify (EV_P_ int fd, int oev, int nev) |
70 | { |
64 | { |
71 | struct epoll_event ev; |
65 | struct epoll_event ev; |
… | |
… | |
74 | /* |
68 | /* |
75 | * we handle EPOLL_CTL_DEL by ignoring it here |
69 | * we handle EPOLL_CTL_DEL by ignoring it here |
76 | * on the assumption that the fd is gone anyways |
70 | * on the assumption that the fd is gone anyways |
77 | * if that is wrong, we have to handle the spurious |
71 | * if that is wrong, we have to handle the spurious |
78 | * event in epoll_poll. |
72 | * event in epoll_poll. |
79 | * the fd is later added, we try to ADD it, and, if that |
73 | * if the fd is added again, we try to ADD it, and, if that |
80 | * fails, we assume it still has the same eventmask. |
74 | * fails, we assume it still has the same eventmask. |
81 | */ |
75 | */ |
82 | if (!nev) |
76 | if (!nev) |
83 | return; |
77 | return; |
84 | |
78 | |
85 | oldmask = anfds [fd].emask; |
79 | oldmask = anfds [fd].emask; |
86 | anfds [fd].emask = nev; |
80 | anfds [fd].emask = nev; |
87 | |
81 | |
88 | /* store the generation counter in the upper 32 bits */ |
82 | /* store the generation counter in the upper 32 bits, the fd in the lower 32 bits */ |
89 | ev.data.u64 = fd | ((uint64_t)++anfds [fd].egen << 32); |
83 | ev.data.u64 = (uint64_t)(uint32_t)fd |
|
|
84 | | ((uint64_t)(uint32_t)++anfds [fd].egen << 32); |
90 | ev.events = (nev & EV_READ ? EPOLLIN : 0) |
85 | ev.events = (nev & EV_READ ? EPOLLIN : 0) |
91 | | (nev & EV_WRITE ? EPOLLOUT : 0); |
86 | | (nev & EV_WRITE ? EPOLLOUT : 0); |
92 | |
87 | |
93 | if (expect_true (!epoll_ctl (backend_fd, oev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev))) |
88 | if (expect_true (!epoll_ctl (backend_fd, oev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev))) |
94 | return; |
89 | return; |
… | |
… | |
127 | int eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, (int)ceil (timeout * 1000.)); |
122 | int eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, (int)ceil (timeout * 1000.)); |
128 | |
123 | |
129 | if (expect_false (eventcnt < 0)) |
124 | if (expect_false (eventcnt < 0)) |
130 | { |
125 | { |
131 | if (errno != EINTR) |
126 | if (errno != EINTR) |
132 | syserr ("(libev) epoll_wait"); |
127 | ev_syserr ("(libev) epoll_wait"); |
133 | |
128 | |
134 | return; |
129 | return; |
135 | } |
130 | } |
136 | |
131 | |
137 | for (i = 0; i < eventcnt; ++i) |
132 | for (i = 0; i < eventcnt; ++i) |
138 | { |
133 | { |
139 | struct epoll_event *ev = epoll_events + i; |
134 | struct epoll_event *ev = epoll_events + i; |
140 | |
135 | |
141 | int fd = (uint32_t)ev->data.u64; /* mask out the lower 32 bits */ |
136 | int fd = (uint32_t)ev->data.u64; /* mask out the lower 32 bits */ |
|
|
137 | int want = anfds [fd].events; |
142 | int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) |
138 | int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) |
143 | | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0); |
139 | | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0); |
144 | int want = anfds [fd].events; |
|
|
145 | |
140 | |
146 | if (anfds [fd].egen != (unsigned char)(ev->data.u64 >> 32)) |
141 | /* check for spurious notification */ |
147 | /*fprintf (stderr, "spurious notification fd %d, %d vs %d\n", fd, (int)(ev->data.u64 >> 32), anfds [fd].egen);*/ |
142 | if (expect_false ((uint32_t)anfds [fd].egen != (uint32_t)(ev->data.u64 >> 32))) |
|
|
143 | { |
|
|
144 | /* recreate kernel state */ |
|
|
145 | postfork = 1; |
148 | continue; |
146 | continue; |
|
|
147 | } |
149 | |
148 | |
150 | if (expect_false (got & ~want)) |
149 | if (expect_false (got & ~want)) |
151 | { |
150 | { |
152 | anfds [fd].emask = want; |
151 | anfds [fd].emask = want; |
153 | |
152 | |
154 | /* we received an event but are not interested in it, try mod or del */ |
153 | /* we received an event but are not interested in it, try mod or del */ |
155 | /* I don't think we ever need MOD, but let's handle it anyways */ |
154 | /* I don't think we ever need MOD, but let's handle it anyways */ |
156 | ev->events = (want & EV_READ ? EPOLLIN : 0) |
155 | ev->events = (want & EV_READ ? EPOLLIN : 0) |
157 | | (want & EV_WRITE ? EPOLLOUT : 0); |
156 | | (want & EV_WRITE ? EPOLLOUT : 0); |
158 | |
157 | |
159 | epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev); |
158 | if (epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev)) |
|
|
159 | { |
|
|
160 | postfork = 1; /* an error occured, recreate kernel state */ |
|
|
161 | continue; |
|
|
162 | } |
160 | } |
163 | } |
161 | |
164 | |
162 | fd_event (EV_A_ fd, got); |
165 | fd_event (EV_A_ fd, got); |
163 | } |
166 | } |
164 | |
167 | |
… | |
… | |
183 | |
186 | |
184 | backend_fudge = 0.; /* kernel sources seem to indicate this to be zero */ |
187 | backend_fudge = 0.; /* kernel sources seem to indicate this to be zero */ |
185 | backend_modify = epoll_modify; |
188 | backend_modify = epoll_modify; |
186 | backend_poll = epoll_poll; |
189 | backend_poll = epoll_poll; |
187 | |
190 | |
188 | epoll_eventmax = 64; /* intiial number of events receivable per poll */ |
191 | epoll_eventmax = 64; /* initial number of events receivable per poll */ |
189 | epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); |
192 | epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); |
190 | |
193 | |
191 | return EVBACKEND_EPOLL; |
194 | return EVBACKEND_EPOLL; |
192 | } |
195 | } |
193 | |
196 | |
… | |
… | |
201 | epoll_fork (EV_P) |
204 | epoll_fork (EV_P) |
202 | { |
205 | { |
203 | close (backend_fd); |
206 | close (backend_fd); |
204 | |
207 | |
205 | while ((backend_fd = epoll_create (256)) < 0) |
208 | while ((backend_fd = epoll_create (256)) < 0) |
206 | syserr ("(libev) epoll_create"); |
209 | ev_syserr ("(libev) epoll_create"); |
207 | |
210 | |
208 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); |
211 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); |
209 | |
212 | |
210 | fd_rearm_all (EV_A); |
213 | fd_rearm_all (EV_A); |
211 | } |
214 | } |