… | |
… | |
50 | * limits the applicability over poll, so this is not a generic |
50 | * limits the applicability over poll, so this is not a generic |
51 | * poll replacement. |
51 | * poll replacement. |
52 | * |
52 | * |
53 | * lots of "weird code" and complication handling in this file is due |
53 | * lots of "weird code" and complication handling in this file is due |
54 | * to these design problems with epoll, as we try very hard to avoid |
54 | * to these design problems with epoll, as we try very hard to avoid |
55 | * epoll_ctl syscalls for common usage patterns. |
55 | * epoll_ctl syscalls for common usage patterns and handle the breakage |
|
|
56 | * ensuing from receiving events for closed and otherwise long gone |
|
|
57 | * file descriptors. |
56 | */ |
58 | */ |
57 | |
59 | |
58 | #include <sys/epoll.h> |
60 | #include <sys/epoll.h> |
59 | |
|
|
60 | void inline_size |
|
|
61 | unsigned_char_init (unsigned char *base, int count) |
|
|
62 | { |
|
|
63 | /* memset might be overkill */ |
|
|
64 | while (count--) |
|
|
65 | *base++ = 0; |
|
|
66 | } |
|
|
67 | |
61 | |
68 | static void |
62 | static void |
69 | epoll_modify (EV_P_ int fd, int oev, int nev) |
63 | epoll_modify (EV_P_ int fd, int oev, int nev) |
70 | { |
64 | { |
71 | struct epoll_event ev; |
65 | struct epoll_event ev; |
… | |
… | |
83 | return; |
77 | return; |
84 | |
78 | |
85 | oldmask = anfds [fd].emask; |
79 | oldmask = anfds [fd].emask; |
86 | anfds [fd].emask = nev; |
80 | anfds [fd].emask = nev; |
87 | |
81 | |
88 | ev.data.u64 = fd; /* use u64 to fully initialise the struct, for nicer strace etc. */ |
82 | /* store the generation counter in the upper 32 bits, the fd in the lower 32 bits */ |
|
|
83 | ev.data.u64 = (uint64_t)(uint32_t)fd |
|
|
84 | | ((uint64_t)(uint32_t)++anfds [fd].egen << 32); |
89 | ev.events = (nev & EV_READ ? EPOLLIN : 0) |
85 | ev.events = (nev & EV_READ ? EPOLLIN : 0) |
90 | | (nev & EV_WRITE ? EPOLLOUT : 0); |
86 | | (nev & EV_WRITE ? EPOLLOUT : 0); |
91 | |
87 | |
92 | if (expect_true (!epoll_ctl (backend_fd, oev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev))) |
88 | if (expect_true (!epoll_ctl (backend_fd, oev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev))) |
93 | return; |
89 | return; |
94 | |
90 | |
95 | if (expect_true (errno == ENOENT)) |
91 | if (expect_true (errno == ENOENT)) |
96 | { |
92 | { |
97 | /* if ENOENT then the fd went away, so try to do the right thing */ |
93 | /* if ENOENT then the fd went away, so try to do the right thing */ |
98 | if (!nev) |
94 | if (!nev) |
99 | return; |
95 | goto dec_egen; |
100 | |
96 | |
101 | if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev)) |
97 | if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev)) |
102 | return; |
98 | return; |
103 | } |
99 | } |
104 | else if (expect_true (errno == EEXIST)) |
100 | else if (expect_true (errno == EEXIST)) |
105 | { |
101 | { |
106 | /* EEXIST means we ignored a previous DEL, but the fd is still active */ |
102 | /* EEXIST means we ignored a previous DEL, but the fd is still active */ |
107 | /* if the kernel mask is the same as the new mask, we assume it hasn't changed */ |
103 | /* if the kernel mask is the same as the new mask, we assume it hasn't changed */ |
|
|
104 | if (oldmask == nev) |
|
|
105 | goto dec_egen; |
|
|
106 | |
108 | if (oldmask == nev || !epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev)) |
107 | if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev)) |
109 | return; |
108 | return; |
110 | } |
109 | } |
111 | |
110 | |
112 | fd_kill (EV_A_ fd); |
111 | fd_kill (EV_A_ fd); |
|
|
112 | |
|
|
113 | dec_egen: |
|
|
114 | /* we didn't successfully call epoll_ctl, so decrement the generation counter again */ |
|
|
115 | --anfds [fd].egen; |
113 | } |
116 | } |
114 | |
117 | |
115 | static void |
118 | static void |
116 | epoll_poll (EV_P_ ev_tstamp timeout) |
119 | epoll_poll (EV_P_ ev_tstamp timeout) |
117 | { |
120 | { |
… | |
… | |
119 | int eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, (int)ceil (timeout * 1000.)); |
122 | int eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, (int)ceil (timeout * 1000.)); |
120 | |
123 | |
121 | if (expect_false (eventcnt < 0)) |
124 | if (expect_false (eventcnt < 0)) |
122 | { |
125 | { |
123 | if (errno != EINTR) |
126 | if (errno != EINTR) |
124 | syserr ("(libev) epoll_wait"); |
127 | ev_syserr ("(libev) epoll_wait"); |
125 | |
128 | |
126 | return; |
129 | return; |
127 | } |
130 | } |
128 | |
131 | |
129 | for (i = 0; i < eventcnt; ++i) |
132 | for (i = 0; i < eventcnt; ++i) |
130 | { |
133 | { |
131 | struct epoll_event *ev = epoll_events + i; |
134 | struct epoll_event *ev = epoll_events + i; |
132 | |
135 | |
133 | int fd = ev->data.u64; |
136 | int fd = (uint32_t)ev->data.u64; /* mask out the lower 32 bits */ |
|
|
137 | int want = anfds [fd].events; |
134 | int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) |
138 | int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) |
135 | | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0); |
139 | | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0); |
136 | int want = anfds [fd].events; |
140 | |
|
|
141 | /* check for spurious notification */ |
|
|
142 | if (expect_false ((uint32_t)anfds [fd].egen != (uint32_t)(ev->data.u64 >> 32))) |
|
|
143 | { |
|
|
144 | /* recreate kernel state */ |
|
|
145 | postfork = 1; |
|
|
146 | continue; |
|
|
147 | } |
137 | |
148 | |
138 | if (expect_false (got & ~want)) |
149 | if (expect_false (got & ~want)) |
139 | { |
150 | { |
140 | anfds [fd].emask = want; |
151 | anfds [fd].emask = want; |
141 | |
152 | |
142 | /* we received an event but are not interested in it, try mod or del */ |
153 | /* we received an event but are not interested in it, try mod or del */ |
143 | /* I don't think we ever need MOD, but let's handle it anyways */ |
154 | /* I don't think we ever need MOD, but let's handle it anyways */ |
144 | ev->events = (want & EV_READ ? EPOLLIN : 0) |
155 | ev->events = (want & EV_READ ? EPOLLIN : 0) |
145 | | (want & EV_WRITE ? EPOLLOUT : 0); |
156 | | (want & EV_WRITE ? EPOLLOUT : 0); |
146 | |
157 | |
147 | epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev); |
158 | if (epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev)) |
|
|
159 | { |
|
|
160 | postfork = 1; /* an error occured, recreate kernel state */ |
|
|
161 | continue; |
|
|
162 | } |
148 | } |
163 | } |
149 | |
164 | |
150 | fd_event (EV_A_ fd, got); |
165 | fd_event (EV_A_ fd, got); |
151 | } |
166 | } |
152 | |
167 | |
… | |
… | |
171 | |
186 | |
172 | backend_fudge = 0.; /* kernel sources seem to indicate this to be zero */ |
187 | backend_fudge = 0.; /* kernel sources seem to indicate this to be zero */ |
173 | backend_modify = epoll_modify; |
188 | backend_modify = epoll_modify; |
174 | backend_poll = epoll_poll; |
189 | backend_poll = epoll_poll; |
175 | |
190 | |
176 | epoll_eventmax = 64; /* intiial number of events receivable per poll */ |
191 | epoll_eventmax = 64; /* initial number of events receivable per poll */ |
177 | epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); |
192 | epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); |
178 | |
193 | |
179 | return EVBACKEND_EPOLL; |
194 | return EVBACKEND_EPOLL; |
180 | } |
195 | } |
181 | |
196 | |
… | |
… | |
189 | epoll_fork (EV_P) |
204 | epoll_fork (EV_P) |
190 | { |
205 | { |
191 | close (backend_fd); |
206 | close (backend_fd); |
192 | |
207 | |
193 | while ((backend_fd = epoll_create (256)) < 0) |
208 | while ((backend_fd = epoll_create (256)) < 0) |
194 | syserr ("(libev) epoll_create"); |
209 | ev_syserr ("(libev) epoll_create"); |
195 | |
210 | |
196 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); |
211 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); |
197 | |
212 | |
198 | fd_rearm_all (EV_A); |
213 | fd_rearm_all (EV_A); |
199 | } |
214 | } |