… | |
… | |
50 | * limits the applicability over poll, so this is not a generic |
50 | * limits the applicability over poll, so this is not a generic |
51 | * poll replacement. |
51 | * poll replacement. |
52 | * |
52 | * |
53 | * lots of "weird code" and complication handling in this file is due |
53 | * lots of "weird code" and complication handling in this file is due |
54 | * to these design problems with epoll, as we try very hard to avoid |
54 | * to these design problems with epoll, as we try very hard to avoid |
55 | * epoll_ctl syscalls for common usage patterns. |
55 | * epoll_ctl syscalls for common usage patterns and handle the breakage |
|
|
56 | * ensuing from receiving events for closed and otherwise long gone |
|
|
57 | * file descriptors. |
56 | */ |
58 | */ |
57 | |
59 | |
58 | #include <sys/epoll.h> |
60 | #include <sys/epoll.h> |
59 | |
|
|
60 | void inline_size |
|
|
61 | unsigned_char_init (unsigned char *base, int count) |
|
|
62 | { |
|
|
63 | /* memset might be overkill */ |
|
|
64 | while (count--) |
|
|
65 | *base++ = 0; |
|
|
66 | } |
|
|
67 | |
61 | |
68 | static void |
62 | static void |
69 | epoll_modify (EV_P_ int fd, int oev, int nev) |
63 | epoll_modify (EV_P_ int fd, int oev, int nev) |
70 | { |
64 | { |
71 | struct epoll_event ev; |
65 | struct epoll_event ev; |
… | |
… | |
83 | return; |
77 | return; |
84 | |
78 | |
85 | oldmask = anfds [fd].emask; |
79 | oldmask = anfds [fd].emask; |
86 | anfds [fd].emask = nev; |
80 | anfds [fd].emask = nev; |
87 | |
81 | |
88 | ev.data.u64 = fd; /* use u64 to fully initialise the struct, for nicer strace etc. */ |
82 | /* store the generation counter in the upper 32 bits */ |
|
|
83 | ev.data.u64 = fd | ((uint64_t)++anfds [fd].egen << 32); |
89 | ev.events = (nev & EV_READ ? EPOLLIN : 0) |
84 | ev.events = (nev & EV_READ ? EPOLLIN : 0) |
90 | | (nev & EV_WRITE ? EPOLLOUT : 0); |
85 | | (nev & EV_WRITE ? EPOLLOUT : 0); |
91 | |
86 | |
92 | if (expect_true (!epoll_ctl (backend_fd, oev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev))) |
87 | if (expect_true (!epoll_ctl (backend_fd, oev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev))) |
93 | return; |
88 | return; |
94 | |
89 | |
95 | if (expect_true (errno == ENOENT)) |
90 | if (expect_true (errno == ENOENT)) |
96 | { |
91 | { |
97 | /* if ENOENT then the fd went away, so try to do the right thing */ |
92 | /* if ENOENT then the fd went away, so try to do the right thing */ |
98 | if (!nev) |
93 | if (!nev) |
99 | return; |
94 | goto dec_egen; |
100 | |
95 | |
101 | if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev)) |
96 | if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev)) |
102 | return; |
97 | return; |
103 | } |
98 | } |
104 | else if (expect_true (errno == EEXIST)) |
99 | else if (expect_true (errno == EEXIST)) |
105 | { |
100 | { |
106 | /* EEXIST means we ignored a previous DEL, but the fd is still active */ |
101 | /* EEXIST means we ignored a previous DEL, but the fd is still active */ |
107 | /* if the kernel mask is the same as the new mask, we assume it hasn't changed */ |
102 | /* if the kernel mask is the same as the new mask, we assume it hasn't changed */ |
|
|
103 | if (oldmask == nev) |
|
|
104 | goto dec_egen; |
|
|
105 | |
108 | if (oldmask == nev || !epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev)) |
106 | if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev)) |
109 | return; |
107 | return; |
110 | } |
108 | } |
111 | |
109 | |
112 | fd_kill (EV_A_ fd); |
110 | fd_kill (EV_A_ fd); |
|
|
111 | |
|
|
112 | dec_egen: |
|
|
113 | /* we didn't successfully call epoll_ctl, so decrement the generation counter again */ |
|
|
114 | --anfds [fd].egen; |
113 | } |
115 | } |
114 | |
116 | |
115 | static void |
117 | static void |
116 | epoll_poll (EV_P_ ev_tstamp timeout) |
118 | epoll_poll (EV_P_ ev_tstamp timeout) |
117 | { |
119 | { |
… | |
… | |
119 | int eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, (int)ceil (timeout * 1000.)); |
121 | int eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, (int)ceil (timeout * 1000.)); |
120 | |
122 | |
121 | if (expect_false (eventcnt < 0)) |
123 | if (expect_false (eventcnt < 0)) |
122 | { |
124 | { |
123 | if (errno != EINTR) |
125 | if (errno != EINTR) |
124 | syserr ("(libev) epoll_wait"); |
126 | ev_syserr ("(libev) epoll_wait"); |
125 | |
127 | |
126 | return; |
128 | return; |
127 | } |
129 | } |
128 | |
130 | |
129 | for (i = 0; i < eventcnt; ++i) |
131 | for (i = 0; i < eventcnt; ++i) |
130 | { |
132 | { |
131 | struct epoll_event *ev = epoll_events + i; |
133 | struct epoll_event *ev = epoll_events + i; |
132 | |
134 | |
133 | int fd = ev->data.u64; |
135 | int fd = (uint32_t)ev->data.u64; /* mask out the lower 32 bits */ |
|
|
136 | int want = anfds [fd].events; |
134 | int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) |
137 | int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) |
135 | | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0); |
138 | | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0); |
136 | int want = anfds [fd].events; |
139 | |
|
|
140 | /* check for spurious notification */ |
|
|
141 | if (expect_false (anfds [fd].egen != (unsigned char)(ev->data.u64 >> 32))) |
|
|
142 | { |
|
|
143 | /* recreate kernel state */ |
|
|
144 | postfork = 1; |
|
|
145 | continue; |
|
|
146 | } |
137 | |
147 | |
138 | if (expect_false (got & ~want)) |
148 | if (expect_false (got & ~want)) |
139 | { |
149 | { |
140 | anfds [fd].emask = want; |
150 | anfds [fd].emask = want; |
141 | |
151 | |
142 | /* we received an event but are not interested in it, try mod or del */ |
152 | /* we received an event but are not interested in it, try mod or del */ |
143 | /* I don't think we ever need MOD, but let's handle it anyways */ |
153 | /* I don't think we ever need MOD, but let's handle it anyways */ |
144 | ev->events = (want & EV_READ ? EPOLLIN : 0) |
154 | ev->events = (want & EV_READ ? EPOLLIN : 0) |
145 | | (want & EV_WRITE ? EPOLLOUT : 0); |
155 | | (want & EV_WRITE ? EPOLLOUT : 0); |
146 | |
156 | |
147 | epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev); |
157 | if (epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev)) |
|
|
158 | { |
|
|
159 | postfork = 1; /* an error occured, recreate kernel state */ |
|
|
160 | continue; |
|
|
161 | } |
148 | } |
162 | } |
149 | |
163 | |
150 | fd_event (EV_A_ fd, got); |
164 | fd_event (EV_A_ fd, got); |
151 | } |
165 | } |
152 | |
166 | |
… | |
… | |
171 | |
185 | |
172 | backend_fudge = 0.; /* kernel sources seem to indicate this to be zero */ |
186 | backend_fudge = 0.; /* kernel sources seem to indicate this to be zero */ |
173 | backend_modify = epoll_modify; |
187 | backend_modify = epoll_modify; |
174 | backend_poll = epoll_poll; |
188 | backend_poll = epoll_poll; |
175 | |
189 | |
176 | epoll_eventmax = 64; /* intiial number of events receivable per poll */ |
190 | epoll_eventmax = 64; /* initial number of events receivable per poll */ |
177 | epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); |
191 | epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); |
178 | |
192 | |
179 | return EVBACKEND_EPOLL; |
193 | return EVBACKEND_EPOLL; |
180 | } |
194 | } |
181 | |
195 | |
… | |
… | |
189 | epoll_fork (EV_P) |
203 | epoll_fork (EV_P) |
190 | { |
204 | { |
191 | close (backend_fd); |
205 | close (backend_fd); |
192 | |
206 | |
193 | while ((backend_fd = epoll_create (256)) < 0) |
207 | while ((backend_fd = epoll_create (256)) < 0) |
194 | syserr ("(libev) epoll_create"); |
208 | ev_syserr ("(libev) epoll_create"); |
195 | |
209 | |
196 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); |
210 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); |
197 | |
211 | |
198 | fd_rearm_all (EV_A); |
212 | fd_rearm_all (EV_A); |
199 | } |
213 | } |