1 | /* |
1 | /* |
2 | * libev kqueue backend |
2 | * libev kqueue backend |
3 | * |
3 | * |
4 | * Copyright (c) 2007,2008 Marc Alexander Lehmann <libev@schmorp.de> |
4 | * Copyright (c) 2007,2008,2009,2010,2011,2012,2013,2016,2019 Marc Alexander Lehmann <libev@schmorp.de> |
5 | * All rights reserved. |
5 | * All rights reserved. |
6 | * |
6 | * |
7 | * Redistribution and use in source and binary forms, with or without modifica- |
7 | * Redistribution and use in source and binary forms, with or without modifica- |
8 | * tion, are permitted provided that the following conditions are met: |
8 | * tion, are permitted provided that the following conditions are met: |
9 | * |
9 | * |
10 | * 1. Redistributions of source code must retain the above copyright notice, |
10 | * 1. Redistributions of source code must retain the above copyright notice, |
11 | * this list of conditions and the following disclaimer. |
11 | * this list of conditions and the following disclaimer. |
12 | * |
12 | * |
13 | * 2. Redistributions in binary form must reproduce the above copyright |
13 | * 2. Redistributions in binary form must reproduce the above copyright |
14 | * notice, this list of conditions and the following disclaimer in the |
14 | * notice, this list of conditions and the following disclaimer in the |
15 | * documentation and/or other materials provided with the distribution. |
15 | * documentation and/or other materials provided with the distribution. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
18 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- |
18 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- |
19 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO |
19 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO |
20 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- |
20 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- |
21 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
21 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
… | |
… | |
37 | * either the BSD or the GPL. |
37 | * either the BSD or the GPL. |
38 | */ |
38 | */ |
39 | |
39 | |
40 | #include <sys/types.h> |
40 | #include <sys/types.h> |
41 | #include <sys/time.h> |
41 | #include <sys/time.h> |
42 | #include <sys/queue.h> |
|
|
43 | #include <sys/event.h> |
42 | #include <sys/event.h> |
44 | #include <string.h> |
43 | #include <string.h> |
45 | #include <errno.h> |
44 | #include <errno.h> |
46 | |
45 | |
47 | void inline_speed |
46 | inline_speed |
|
|
47 | void |
48 | kqueue_change (EV_P_ int fd, int filter, int flags, int fflags) |
48 | kqueue_change (EV_P_ int fd, int filter, int flags, int fflags) |
49 | { |
49 | { |
50 | ++kqueue_changecnt; |
50 | ++kqueue_changecnt; |
51 | array_needsize (struct kevent, kqueue_changes, kqueue_changemax, kqueue_changecnt, EMPTY2); |
51 | array_needsize (struct kevent, kqueue_changes, kqueue_changemax, kqueue_changecnt, array_needsize_noinit); |
52 | |
52 | |
53 | EV_SET (&kqueue_changes [kqueue_changecnt - 1], fd, filter, flags, fflags, 0, 0); |
53 | EV_SET (&kqueue_changes [kqueue_changecnt - 1], fd, filter, flags, fflags, 0, 0); |
54 | } |
54 | } |
55 | |
55 | |
|
|
56 | /* OS X at least needs this */ |
|
|
57 | #ifndef EV_ENABLE |
|
|
58 | # define EV_ENABLE 0 |
|
|
59 | #endif |
56 | #ifndef NOTE_EOF |
60 | #ifndef NOTE_EOF |
57 | # define NOTE_EOF 0 |
61 | # define NOTE_EOF 0 |
58 | #endif |
62 | #endif |
59 | |
63 | |
60 | static void |
64 | static void |
… | |
… | |
71 | |
75 | |
72 | /* to detect close/reopen reliably, we have to re-add */ |
76 | /* to detect close/reopen reliably, we have to re-add */ |
73 | /* event requests even when oev == nev */ |
77 | /* event requests even when oev == nev */ |
74 | |
78 | |
75 | if (nev & EV_READ) |
79 | if (nev & EV_READ) |
76 | kqueue_change (EV_A_ fd, EVFILT_READ , EV_ADD, NOTE_EOF); |
80 | kqueue_change (EV_A_ fd, EVFILT_READ , EV_ADD | EV_ENABLE, NOTE_EOF); |
77 | |
81 | |
78 | if (nev & EV_WRITE) |
82 | if (nev & EV_WRITE) |
79 | kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_ADD, NOTE_EOF); |
83 | kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_ADD | EV_ENABLE, NOTE_EOF); |
80 | } |
84 | } |
81 | |
85 | |
82 | static void |
86 | static void |
83 | kqueue_poll (EV_P_ ev_tstamp timeout) |
87 | kqueue_poll (EV_P_ ev_tstamp timeout) |
84 | { |
88 | { |
… | |
… | |
91 | ev_free (kqueue_events); |
95 | ev_free (kqueue_events); |
92 | kqueue_eventmax = array_nextsize (sizeof (struct kevent), kqueue_eventmax, kqueue_changecnt); |
96 | kqueue_eventmax = array_nextsize (sizeof (struct kevent), kqueue_eventmax, kqueue_changecnt); |
93 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
97 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
94 | } |
98 | } |
95 | |
99 | |
96 | if (expect_false (suspend_cb)) suspend_cb (EV_A); |
100 | EV_RELEASE_CB; |
97 | ts.tv_sec = (time_t)timeout; |
101 | EV_TS_SET (ts, timeout); |
98 | ts.tv_nsec = (long)((timeout - (ev_tstamp)ts.tv_sec) * 1e9); |
|
|
99 | res = kevent (backend_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts); |
102 | res = kevent (backend_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts); |
100 | if (expect_false (resume_cb)) resume_cb (EV_A); |
103 | EV_ACQUIRE_CB; |
101 | kqueue_changecnt = 0; |
104 | kqueue_changecnt = 0; |
102 | |
105 | |
103 | if (expect_false (res < 0)) |
106 | if (ecb_expect_false (res < 0)) |
104 | { |
107 | { |
105 | if (errno != EINTR) |
108 | if (errno != EINTR) |
106 | ev_syserr ("(libev) kevent"); |
109 | ev_syserr ("(libev) kqueue kevent"); |
107 | |
110 | |
108 | return; |
111 | return; |
109 | } |
112 | } |
110 | |
113 | |
111 | for (i = 0; i < res; ++i) |
114 | for (i = 0; i < res; ++i) |
112 | { |
115 | { |
113 | int fd = kqueue_events [i].ident; |
116 | int fd = kqueue_events [i].ident; |
114 | |
117 | |
115 | if (expect_false (kqueue_events [i].flags & EV_ERROR)) |
118 | if (ecb_expect_false (kqueue_events [i].flags & EV_ERROR)) |
116 | { |
119 | { |
117 | int err = kqueue_events [i].data; |
120 | int err = kqueue_events [i].data; |
118 | |
121 | |
119 | /* we are only interested in errors for fds that we are interested in :) */ |
122 | /* we are only interested in errors for fds that we are interested in :) */ |
120 | if (anfds [fd].events) |
123 | if (anfds [fd].events) |
121 | { |
124 | { |
122 | if (err == ENOENT) /* resubmit changes on ENOENT */ |
125 | if (err == ENOENT) /* resubmit changes on ENOENT */ |
123 | kqueue_modify (EV_A_ fd, 0, anfds [fd].events); |
126 | kqueue_modify (EV_A_ fd, 0, anfds [fd].events); |
124 | else if (err == EBADF) /* on EBADF, we re-check the fd */ |
127 | else if (err == EBADF) /* on EBADF, we re-check the fd */ |
125 | { |
128 | { |
126 | if (fd_valid (fd)) |
129 | if (fd_valid (fd)) |
127 | kqueue_modify (EV_A_ fd, 0, anfds [fd].events); |
130 | kqueue_modify (EV_A_ fd, 0, anfds [fd].events); |
128 | else |
131 | else |
|
|
132 | { |
|
|
133 | assert (("libev: kqueue found invalid fd", 0)); |
129 | fd_kill (EV_A_ fd); |
134 | fd_kill (EV_A_ fd); |
|
|
135 | } |
130 | } |
136 | } |
131 | else /* on all other errors, we error out on the fd */ |
137 | else /* on all other errors, we error out on the fd */ |
|
|
138 | { |
|
|
139 | assert (("libev: kqueue found invalid fd", 0)); |
132 | fd_kill (EV_A_ fd); |
140 | fd_kill (EV_A_ fd); |
133 | } |
141 | } |
|
|
142 | } |
134 | } |
143 | } |
135 | else |
144 | else |
136 | fd_event ( |
145 | fd_event ( |
137 | EV_A_ |
146 | EV_A_ |
138 | fd, |
147 | fd, |
… | |
… | |
140 | : kqueue_events [i].filter == EVFILT_WRITE ? EV_WRITE |
149 | : kqueue_events [i].filter == EVFILT_WRITE ? EV_WRITE |
141 | : 0 |
150 | : 0 |
142 | ); |
151 | ); |
143 | } |
152 | } |
144 | |
153 | |
145 | if (expect_false (res == kqueue_eventmax)) |
154 | if (ecb_expect_false (res == kqueue_eventmax)) |
146 | { |
155 | { |
147 | ev_free (kqueue_events); |
156 | ev_free (kqueue_events); |
148 | kqueue_eventmax = array_nextsize (sizeof (struct kevent), kqueue_eventmax, kqueue_eventmax + 1); |
157 | kqueue_eventmax = array_nextsize (sizeof (struct kevent), kqueue_eventmax, kqueue_eventmax + 1); |
149 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
158 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
150 | } |
159 | } |
151 | } |
160 | } |
152 | |
161 | |
153 | int inline_size |
162 | inline_size |
|
|
163 | int |
154 | kqueue_init (EV_P_ int flags) |
164 | kqueue_init (EV_P_ int flags) |
155 | { |
165 | { |
156 | /* Initalize the kernel queue */ |
166 | /* initialize the kernel queue */ |
|
|
167 | kqueue_fd_pid = getpid (); |
157 | if ((backend_fd = kqueue ()) < 0) |
168 | if ((backend_fd = kqueue ()) < 0) |
158 | return 0; |
169 | return 0; |
159 | |
170 | |
160 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */ |
171 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */ |
161 | |
172 | |
162 | backend_fudge = 0.; |
173 | backend_mintime = 1e-9; /* apparently, they did the right thing in freebsd */ |
163 | backend_modify = kqueue_modify; |
174 | backend_modify = kqueue_modify; |
164 | backend_poll = kqueue_poll; |
175 | backend_poll = kqueue_poll; |
165 | |
176 | |
166 | kqueue_eventmax = 64; /* initial number of events receivable per poll */ |
177 | kqueue_eventmax = 64; /* initial number of events receivable per poll */ |
167 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
178 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
168 | |
179 | |
169 | kqueue_changes = 0; |
180 | kqueue_changes = 0; |
… | |
… | |
171 | kqueue_changecnt = 0; |
182 | kqueue_changecnt = 0; |
172 | |
183 | |
173 | return EVBACKEND_KQUEUE; |
184 | return EVBACKEND_KQUEUE; |
174 | } |
185 | } |
175 | |
186 | |
176 | void inline_size |
187 | inline_size |
|
|
188 | void |
177 | kqueue_destroy (EV_P) |
189 | kqueue_destroy (EV_P) |
178 | { |
190 | { |
179 | ev_free (kqueue_events); |
191 | ev_free (kqueue_events); |
180 | ev_free (kqueue_changes); |
192 | ev_free (kqueue_changes); |
181 | } |
193 | } |
182 | |
194 | |
183 | void inline_size |
195 | inline_size |
|
|
196 | void |
184 | kqueue_fork (EV_P) |
197 | kqueue_fork (EV_P) |
185 | { |
198 | { |
|
|
199 | /* some BSD kernels don't just destroy the kqueue itself, |
|
|
200 | * but also close the fd, which isn't documented, and |
|
|
201 | * impossible to support properly. |
|
|
202 | * we remember the pid of the kqueue call and only close |
|
|
203 | * the fd if the pid is still the same. |
|
|
204 | * this leaks fds on sane kernels, but BSD interfaces are |
|
|
205 | * notoriously buggy and rarely get fixed. |
|
|
206 | */ |
|
|
207 | pid_t newpid = getpid (); |
|
|
208 | |
|
|
209 | if (newpid == kqueue_fd_pid) |
186 | close (backend_fd); |
210 | close (backend_fd); |
187 | |
211 | |
|
|
212 | kqueue_fd_pid = newpid; |
188 | while ((backend_fd = kqueue ()) < 0) |
213 | while ((backend_fd = kqueue ()) < 0) |
189 | ev_syserr ("(libev) kqueue"); |
214 | ev_syserr ("(libev) kqueue"); |
190 | |
215 | |
191 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); |
216 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); |
192 | |
217 | |
193 | /* re-register interest in fds */ |
218 | /* re-register interest in fds */ |
194 | fd_rearm_all (EV_A); |
219 | fd_rearm_all (EV_A); |
195 | } |
220 | } |
196 | |
221 | |
|
|
222 | /* sys/event.h defines EV_ERROR */ |
|
|
223 | #undef EV_ERROR |
|
|
224 | |