… | |
… | |
40 | struct kevent *ke; |
40 | struct kevent *ke; |
41 | |
41 | |
42 | ++kqueue_changecnt; |
42 | ++kqueue_changecnt; |
43 | array_needsize (struct kevent, kqueue_changes, kqueue_changemax, kqueue_changecnt, EMPTY2); |
43 | array_needsize (struct kevent, kqueue_changes, kqueue_changemax, kqueue_changecnt, EMPTY2); |
44 | |
44 | |
45 | ke = &kqueue_changes [kqueue_changecnt - 1]; |
45 | EV_SET (&kqueue_changes [kqueue_changecnt - 1], fd, filter, flags, fflags, 0, 0); |
46 | memset (ke, 0, sizeof (struct kevent)); |
|
|
47 | ke->ident = fd; |
|
|
48 | ke->filter = filter; |
|
|
49 | ke->flags = flags; |
|
|
50 | ke->fflags = fflags; |
|
|
51 | } |
46 | } |
52 | |
47 | |
53 | #ifndef NOTE_EOF |
48 | #ifndef NOTE_EOF |
54 | # define NOTE_EOF 0 |
49 | # define NOTE_EOF 0 |
55 | #endif |
50 | #endif |
… | |
… | |
90 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
85 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
91 | } |
86 | } |
92 | |
87 | |
93 | ts.tv_sec = (time_t)timeout; |
88 | ts.tv_sec = (time_t)timeout; |
94 | ts.tv_nsec = (long)((timeout - (ev_tstamp)ts.tv_sec) * 1e9); |
89 | ts.tv_nsec = (long)((timeout - (ev_tstamp)ts.tv_sec) * 1e9); |
95 | res = kevent (kqueue_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts); |
90 | res = kevent (backend_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts); |
96 | kqueue_changecnt = 0; |
91 | kqueue_changecnt = 0; |
97 | |
92 | |
98 | if (res < 0) |
93 | if (res < 0) |
99 | { |
94 | { |
100 | if (errno != EINTR) |
95 | if (errno != EINTR) |
… | |
… | |
105 | |
100 | |
106 | for (i = 0; i < res; ++i) |
101 | for (i = 0; i < res; ++i) |
107 | { |
102 | { |
108 | int fd = kqueue_events [i].ident; |
103 | int fd = kqueue_events [i].ident; |
109 | |
104 | |
110 | if (kqueue_events [i].flags & EV_ERROR) |
105 | if (expect_false (kqueue_events [i].flags & EV_ERROR)) |
111 | { |
106 | { |
112 | int err = kqueue_events [i].data; |
107 | int err = kqueue_events [i].data; |
113 | |
108 | |
114 | /* |
109 | /* |
115 | * errors that may happen |
110 | * errors that may happen |
… | |
… | |
159 | kqueue_init (EV_P_ int flags) |
154 | kqueue_init (EV_P_ int flags) |
160 | { |
155 | { |
161 | struct kevent ch, ev; |
156 | struct kevent ch, ev; |
162 | |
157 | |
163 | /* Initalize the kernel queue */ |
158 | /* Initalize the kernel queue */ |
164 | if ((kqueue_fd = kqueue ()) < 0) |
159 | if ((backend_fd = kqueue ()) < 0) |
165 | return 0; |
160 | return 0; |
166 | |
161 | |
167 | fcntl (kqueue_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */ |
162 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */ |
168 | |
163 | |
169 | /* Check for Mac OS X kqueue bug. */ |
164 | /* Check for Mac OS X kqueue bug. */ |
170 | ch.ident = -1; |
165 | ch.ident = -1; |
171 | ch.filter = EVFILT_READ; |
166 | ch.filter = EVFILT_READ; |
172 | ch.flags = EV_ADD; |
167 | ch.flags = EV_ADD; |
… | |
… | |
174 | /* |
169 | /* |
175 | * If kqueue works, then kevent will succeed, and it will |
170 | * If kqueue works, then kevent will succeed, and it will |
176 | * stick an error in ev. If kqueue is broken, then |
171 | * stick an error in ev. If kqueue is broken, then |
177 | * kevent will fail. |
172 | * kevent will fail. |
178 | */ |
173 | */ |
179 | if (kevent (kqueue_fd, &ch, 1, &ev, 1, 0) != 1 |
174 | if (kevent (backend_fd, &ch, 1, &ev, 1, 0) != 1 |
180 | || ev.ident != -1 |
175 | || ev.ident != -1 |
181 | || ev.flags != EV_ERROR) |
176 | || ev.flags != EV_ERROR) |
182 | { |
177 | { |
183 | /* detected broken kqueue */ |
178 | /* detected broken kqueue */ |
184 | close (kqueue_fd); |
179 | close (backend_fd); |
185 | return 0; |
180 | return 0; |
186 | } |
181 | } |
187 | |
182 | |
188 | backend_fudge = 1e-3; /* needed to compensate for kevent returning early */ |
183 | backend_fudge = 1e-3; /* needed to compensate for kevent returning early */ |
189 | backend_modify = kqueue_modify; |
184 | backend_modify = kqueue_modify; |
… | |
… | |
200 | } |
195 | } |
201 | |
196 | |
202 | static void |
197 | static void |
203 | kqueue_destroy (EV_P) |
198 | kqueue_destroy (EV_P) |
204 | { |
199 | { |
205 | close (kqueue_fd); |
200 | close (backend_fd); |
206 | |
201 | |
207 | ev_free (kqueue_events); |
202 | ev_free (kqueue_events); |
208 | ev_free (kqueue_changes); |
203 | ev_free (kqueue_changes); |
209 | } |
204 | } |
210 | |
205 | |
211 | static void |
206 | static void |
212 | kqueue_fork (EV_P) |
207 | kqueue_fork (EV_P) |
213 | { |
208 | { |
214 | close (kqueue_fd); |
209 | close (backend_fd); |
215 | |
210 | |
216 | while ((kqueue_fd = kqueue ()) < 0) |
211 | while ((backend_fd = kqueue ()) < 0) |
217 | syserr ("(libev) kqueue"); |
212 | syserr ("(libev) kqueue"); |
218 | |
213 | |
219 | fcntl (kqueue_fd, F_SETFD, FD_CLOEXEC); |
214 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); |
220 | |
215 | |
221 | /* re-register interest in fds */ |
216 | /* re-register interest in fds */ |
222 | fd_rearm_all (EV_A); |
217 | fd_rearm_all (EV_A); |
223 | } |
218 | } |
224 | |
219 | |