1 | /* |
1 | /* |
2 | * libev kqueue backend |
2 | * libev kqueue backend |
3 | * |
3 | * |
4 | * Copyright (c) 2007,2008,2009 Marc Alexander Lehmann <libev@schmorp.de> |
4 | * Copyright (c) 2007,2008,2009,2010,2011,2012,2013 Marc Alexander Lehmann <libev@schmorp.de> |
5 | * All rights reserved. |
5 | * All rights reserved. |
6 | * |
6 | * |
7 | * Redistribution and use in source and binary forms, with or without modifica- |
7 | * Redistribution and use in source and binary forms, with or without modifica- |
8 | * tion, are permitted provided that the following conditions are met: |
8 | * tion, are permitted provided that the following conditions are met: |
9 | * |
9 | * |
10 | * 1. Redistributions of source code must retain the above copyright notice, |
10 | * 1. Redistributions of source code must retain the above copyright notice, |
11 | * this list of conditions and the following disclaimer. |
11 | * this list of conditions and the following disclaimer. |
12 | * |
12 | * |
13 | * 2. Redistributions in binary form must reproduce the above copyright |
13 | * 2. Redistributions in binary form must reproduce the above copyright |
14 | * notice, this list of conditions and the following disclaimer in the |
14 | * notice, this list of conditions and the following disclaimer in the |
15 | * documentation and/or other materials provided with the distribution. |
15 | * documentation and/or other materials provided with the distribution. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
18 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- |
18 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- |
19 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO |
19 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO |
20 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- |
20 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- |
21 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
21 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
… | |
… | |
37 | * either the BSD or the GPL. |
37 | * either the BSD or the GPL. |
38 | */ |
38 | */ |
39 | |
39 | |
40 | #include <sys/types.h> |
40 | #include <sys/types.h> |
41 | #include <sys/time.h> |
41 | #include <sys/time.h> |
42 | #include <sys/queue.h> |
|
|
43 | #include <sys/event.h> |
42 | #include <sys/event.h> |
44 | #include <string.h> |
43 | #include <string.h> |
45 | #include <errno.h> |
44 | #include <errno.h> |
46 | |
45 | |
47 | void inline_speed |
46 | void inline_speed |
… | |
… | |
51 | array_needsize (struct kevent, kqueue_changes, kqueue_changemax, kqueue_changecnt, EMPTY2); |
50 | array_needsize (struct kevent, kqueue_changes, kqueue_changemax, kqueue_changecnt, EMPTY2); |
52 | |
51 | |
53 | EV_SET (&kqueue_changes [kqueue_changecnt - 1], fd, filter, flags, fflags, 0, 0); |
52 | EV_SET (&kqueue_changes [kqueue_changecnt - 1], fd, filter, flags, fflags, 0, 0); |
54 | } |
53 | } |
55 | |
54 | |
|
|
55 | /* OS X at least needs this */ |
|
|
56 | #ifndef EV_ENABLE |
|
|
57 | # define EV_ENABLE 0 |
|
|
58 | #endif |
56 | #ifndef NOTE_EOF |
59 | #ifndef NOTE_EOF |
57 | # define NOTE_EOF 0 |
60 | # define NOTE_EOF 0 |
58 | #endif |
61 | #endif |
59 | |
62 | |
60 | static void |
63 | static void |
… | |
… | |
71 | |
74 | |
72 | /* to detect close/reopen reliably, we have to re-add */ |
75 | /* to detect close/reopen reliably, we have to re-add */ |
73 | /* event requests even when oev == nev */ |
76 | /* event requests even when oev == nev */ |
74 | |
77 | |
75 | if (nev & EV_READ) |
78 | if (nev & EV_READ) |
76 | kqueue_change (EV_A_ fd, EVFILT_READ , EV_ADD, NOTE_EOF); |
79 | kqueue_change (EV_A_ fd, EVFILT_READ , EV_ADD | EV_ENABLE, NOTE_EOF); |
77 | |
80 | |
78 | if (nev & EV_WRITE) |
81 | if (nev & EV_WRITE) |
79 | kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_ADD, NOTE_EOF); |
82 | kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_ADD | EV_ENABLE, NOTE_EOF); |
80 | } |
83 | } |
81 | |
84 | |
82 | static void |
85 | static void |
83 | kqueue_poll (EV_P_ ev_tstamp timeout) |
86 | kqueue_poll (EV_P_ ev_tstamp timeout) |
84 | { |
87 | { |
… | |
… | |
92 | kqueue_eventmax = array_nextsize (sizeof (struct kevent), kqueue_eventmax, kqueue_changecnt); |
95 | kqueue_eventmax = array_nextsize (sizeof (struct kevent), kqueue_eventmax, kqueue_changecnt); |
93 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
96 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
94 | } |
97 | } |
95 | |
98 | |
96 | EV_RELEASE_CB; |
99 | EV_RELEASE_CB; |
97 | ts.tv_sec = (time_t)timeout; |
100 | EV_TS_SET (ts, timeout); |
98 | ts.tv_nsec = (long)((timeout - (ev_tstamp)ts.tv_sec) * 1e9); |
|
|
99 | res = kevent (backend_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts); |
101 | res = kevent (backend_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts); |
100 | EV_ACQUIRE_CB; |
102 | EV_ACQUIRE_CB; |
101 | kqueue_changecnt = 0; |
103 | kqueue_changecnt = 0; |
102 | |
104 | |
103 | if (expect_false (res < 0)) |
105 | if (expect_false (res < 0)) |
104 | { |
106 | { |
105 | if (errno != EINTR) |
107 | if (errno != EINTR) |
106 | ev_syserr ("(libev) kevent"); |
108 | ev_syserr ("(libev) kevent"); |
107 | |
109 | |
108 | return; |
110 | return; |
109 | } |
111 | } |
110 | |
112 | |
111 | for (i = 0; i < res; ++i) |
113 | for (i = 0; i < res; ++i) |
112 | { |
114 | { |
113 | int fd = kqueue_events [i].ident; |
115 | int fd = kqueue_events [i].ident; |
114 | |
116 | |
115 | if (expect_false (kqueue_events [i].flags & EV_ERROR)) |
117 | if (expect_false (kqueue_events [i].flags & EV_ERROR)) |
116 | { |
118 | { |
117 | int err = kqueue_events [i].data; |
119 | int err = kqueue_events [i].data; |
118 | |
120 | |
119 | /* we are only interested in errors for fds that we are interested in :) */ |
121 | /* we are only interested in errors for fds that we are interested in :) */ |
120 | if (anfds [fd].events) |
122 | if (anfds [fd].events) |
121 | { |
123 | { |
122 | if (err == ENOENT) /* resubmit changes on ENOENT */ |
124 | if (err == ENOENT) /* resubmit changes on ENOENT */ |
123 | kqueue_modify (EV_A_ fd, 0, anfds [fd].events); |
125 | kqueue_modify (EV_A_ fd, 0, anfds [fd].events); |
124 | else if (err == EBADF) /* on EBADF, we re-check the fd */ |
126 | else if (err == EBADF) /* on EBADF, we re-check the fd */ |
125 | { |
127 | { |
126 | if (fd_valid (fd)) |
128 | if (fd_valid (fd)) |
… | |
… | |
128 | else |
130 | else |
129 | fd_kill (EV_A_ fd); |
131 | fd_kill (EV_A_ fd); |
130 | } |
132 | } |
131 | else /* on all other errors, we error out on the fd */ |
133 | else /* on all other errors, we error out on the fd */ |
132 | fd_kill (EV_A_ fd); |
134 | fd_kill (EV_A_ fd); |
133 | } |
135 | } |
134 | } |
136 | } |
135 | else |
137 | else |
136 | fd_event ( |
138 | fd_event ( |
137 | EV_A_ |
139 | EV_A_ |
138 | fd, |
140 | fd, |
… | |
… | |
151 | } |
153 | } |
152 | |
154 | |
153 | int inline_size |
155 | int inline_size |
154 | kqueue_init (EV_P_ int flags) |
156 | kqueue_init (EV_P_ int flags) |
155 | { |
157 | { |
156 | /* Initalize the kernel queue */ |
158 | /* initialize the kernel queue */ |
|
|
159 | kqueue_fd_pid = getpid (); |
157 | if ((backend_fd = kqueue ()) < 0) |
160 | if ((backend_fd = kqueue ()) < 0) |
158 | return 0; |
161 | return 0; |
159 | |
162 | |
160 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */ |
163 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */ |
161 | |
164 | |
162 | backend_fudge = 0.; |
165 | backend_mintime = 1e-9; /* apparently, they did the right thing in freebsd */ |
163 | backend_modify = kqueue_modify; |
166 | backend_modify = kqueue_modify; |
164 | backend_poll = kqueue_poll; |
167 | backend_poll = kqueue_poll; |
165 | |
168 | |
166 | kqueue_eventmax = 64; /* initial number of events receivable per poll */ |
169 | kqueue_eventmax = 64; /* initial number of events receivable per poll */ |
167 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
170 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
168 | |
171 | |
169 | kqueue_changes = 0; |
172 | kqueue_changes = 0; |
… | |
… | |
181 | } |
184 | } |
182 | |
185 | |
183 | void inline_size |
186 | void inline_size |
184 | kqueue_fork (EV_P) |
187 | kqueue_fork (EV_P) |
185 | { |
188 | { |
|
|
189 | /* some BSD kernels don't just destroy the kqueue itself, |
|
|
190 | * but also close the fd, which isn't documented, and |
|
|
191 | * impossible to support properly. |
|
|
192 | * we remember the pid of the kqueue call and only close |
|
|
193 | * the fd if the pid is still the same. |
|
|
194 | * this leaks fds on sane kernels, but BSD interfaces are |
|
|
195 | * notoriously buggy and rarely get fixed. |
|
|
196 | */ |
|
|
197 | pid_t newpid = getpid (); |
|
|
198 | |
|
|
199 | if (newpid == kqueue_fd_pid) |
186 | close (backend_fd); |
200 | close (backend_fd); |
187 | |
201 | |
|
|
202 | kqueue_fd_pid = newpid; |
188 | while ((backend_fd = kqueue ()) < 0) |
203 | while ((backend_fd = kqueue ()) < 0) |
189 | ev_syserr ("(libev) kqueue"); |
204 | ev_syserr ("(libev) kqueue"); |
190 | |
205 | |
191 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); |
206 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); |
192 | |
207 | |
193 | /* re-register interest in fds */ |
208 | /* re-register interest in fds */ |
194 | fd_rearm_all (EV_A); |
209 | fd_rearm_all (EV_A); |
195 | } |
210 | } |
196 | |
211 | |
|
|
212 | /* sys/event.h defines EV_ERROR */ |
|
|
213 | #undef EV_ERROR |
|
|
214 | |