1 | /* |
1 | /* |
2 | * libev kqueue backend |
2 | * libev kqueue backend |
3 | * |
3 | * |
4 | * Copyright (c) 2007,2008,2009 Marc Alexander Lehmann <libev@schmorp.de> |
4 | * Copyright (c) 2007,2008,2009,2010,2011,2012,2013 Marc Alexander Lehmann <libev@schmorp.de> |
5 | * All rights reserved. |
5 | * All rights reserved. |
6 | * |
6 | * |
7 | * Redistribution and use in source and binary forms, with or without modifica- |
7 | * Redistribution and use in source and binary forms, with or without modifica- |
8 | * tion, are permitted provided that the following conditions are met: |
8 | * tion, are permitted provided that the following conditions are met: |
9 | * |
9 | * |
10 | * 1. Redistributions of source code must retain the above copyright notice, |
10 | * 1. Redistributions of source code must retain the above copyright notice, |
11 | * this list of conditions and the following disclaimer. |
11 | * this list of conditions and the following disclaimer. |
12 | * |
12 | * |
13 | * 2. Redistributions in binary form must reproduce the above copyright |
13 | * 2. Redistributions in binary form must reproduce the above copyright |
14 | * notice, this list of conditions and the following disclaimer in the |
14 | * notice, this list of conditions and the following disclaimer in the |
15 | * documentation and/or other materials provided with the distribution. |
15 | * documentation and/or other materials provided with the distribution. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
18 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- |
18 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- |
19 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO |
19 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO |
20 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- |
20 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- |
21 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
21 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
… | |
… | |
50 | array_needsize (struct kevent, kqueue_changes, kqueue_changemax, kqueue_changecnt, EMPTY2); |
50 | array_needsize (struct kevent, kqueue_changes, kqueue_changemax, kqueue_changecnt, EMPTY2); |
51 | |
51 | |
52 | EV_SET (&kqueue_changes [kqueue_changecnt - 1], fd, filter, flags, fflags, 0, 0); |
52 | EV_SET (&kqueue_changes [kqueue_changecnt - 1], fd, filter, flags, fflags, 0, 0); |
53 | } |
53 | } |
54 | |
54 | |
|
|
55 | /* OS X at least needs this */ |
|
|
56 | #ifndef EV_ENABLE |
|
|
57 | # define EV_ENABLE 0 |
|
|
58 | #endif |
55 | #ifndef NOTE_EOF |
59 | #ifndef NOTE_EOF |
56 | # define NOTE_EOF 0 |
60 | # define NOTE_EOF 0 |
57 | #endif |
61 | #endif |
58 | |
62 | |
59 | static void |
63 | static void |
… | |
… | |
70 | |
74 | |
71 | /* to detect close/reopen reliably, we have to re-add */ |
75 | /* to detect close/reopen reliably, we have to re-add */ |
72 | /* event requests even when oev == nev */ |
76 | /* event requests even when oev == nev */ |
73 | |
77 | |
74 | if (nev & EV_READ) |
78 | if (nev & EV_READ) |
75 | kqueue_change (EV_A_ fd, EVFILT_READ , EV_ADD, NOTE_EOF); |
79 | kqueue_change (EV_A_ fd, EVFILT_READ , EV_ADD | EV_ENABLE, NOTE_EOF); |
76 | |
80 | |
77 | if (nev & EV_WRITE) |
81 | if (nev & EV_WRITE) |
78 | kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_ADD, NOTE_EOF); |
82 | kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_ADD | EV_ENABLE, NOTE_EOF); |
79 | } |
83 | } |
80 | |
84 | |
81 | static void |
85 | static void |
82 | kqueue_poll (EV_P_ ev_tstamp timeout) |
86 | kqueue_poll (EV_P_ ev_tstamp timeout) |
83 | { |
87 | { |
… | |
… | |
91 | kqueue_eventmax = array_nextsize (sizeof (struct kevent), kqueue_eventmax, kqueue_changecnt); |
95 | kqueue_eventmax = array_nextsize (sizeof (struct kevent), kqueue_eventmax, kqueue_changecnt); |
92 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
96 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
93 | } |
97 | } |
94 | |
98 | |
95 | EV_RELEASE_CB; |
99 | EV_RELEASE_CB; |
96 | ts.tv_sec = (time_t)timeout; |
100 | EV_TS_SET (ts, timeout); |
97 | ts.tv_nsec = (long)((timeout - (ev_tstamp)ts.tv_sec) * 1e9); |
|
|
98 | res = kevent (backend_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts); |
101 | res = kevent (backend_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts); |
99 | EV_ACQUIRE_CB; |
102 | EV_ACQUIRE_CB; |
100 | kqueue_changecnt = 0; |
103 | kqueue_changecnt = 0; |
101 | |
104 | |
102 | if (expect_false (res < 0)) |
105 | if (expect_false (res < 0)) |
103 | { |
106 | { |
104 | if (errno != EINTR) |
107 | if (errno != EINTR) |
105 | ev_syserr ("(libev) kevent"); |
108 | ev_syserr ("(libev) kevent"); |
106 | |
109 | |
107 | return; |
110 | return; |
108 | } |
111 | } |
109 | |
112 | |
110 | for (i = 0; i < res; ++i) |
113 | for (i = 0; i < res; ++i) |
111 | { |
114 | { |
112 | int fd = kqueue_events [i].ident; |
115 | int fd = kqueue_events [i].ident; |
113 | |
116 | |
114 | if (expect_false (kqueue_events [i].flags & EV_ERROR)) |
117 | if (expect_false (kqueue_events [i].flags & EV_ERROR)) |
115 | { |
118 | { |
116 | int err = kqueue_events [i].data; |
119 | int err = kqueue_events [i].data; |
117 | |
120 | |
118 | /* we are only interested in errors for fds that we are interested in :) */ |
121 | /* we are only interested in errors for fds that we are interested in :) */ |
119 | if (anfds [fd].events) |
122 | if (anfds [fd].events) |
120 | { |
123 | { |
121 | if (err == ENOENT) /* resubmit changes on ENOENT */ |
124 | if (err == ENOENT) /* resubmit changes on ENOENT */ |
122 | kqueue_modify (EV_A_ fd, 0, anfds [fd].events); |
125 | kqueue_modify (EV_A_ fd, 0, anfds [fd].events); |
123 | else if (err == EBADF) /* on EBADF, we re-check the fd */ |
126 | else if (err == EBADF) /* on EBADF, we re-check the fd */ |
124 | { |
127 | { |
125 | if (fd_valid (fd)) |
128 | if (fd_valid (fd)) |
… | |
… | |
127 | else |
130 | else |
128 | fd_kill (EV_A_ fd); |
131 | fd_kill (EV_A_ fd); |
129 | } |
132 | } |
130 | else /* on all other errors, we error out on the fd */ |
133 | else /* on all other errors, we error out on the fd */ |
131 | fd_kill (EV_A_ fd); |
134 | fd_kill (EV_A_ fd); |
132 | } |
135 | } |
133 | } |
136 | } |
134 | else |
137 | else |
135 | fd_event ( |
138 | fd_event ( |
136 | EV_A_ |
139 | EV_A_ |
137 | fd, |
140 | fd, |
… | |
… | |
150 | } |
153 | } |
151 | |
154 | |
152 | int inline_size |
155 | int inline_size |
153 | kqueue_init (EV_P_ int flags) |
156 | kqueue_init (EV_P_ int flags) |
154 | { |
157 | { |
155 | /* Initalize the kernel queue */ |
158 | /* initialize the kernel queue */ |
|
|
159 | kqueue_fd_pid = getpid (); |
156 | if ((backend_fd = kqueue ()) < 0) |
160 | if ((backend_fd = kqueue ()) < 0) |
157 | return 0; |
161 | return 0; |
158 | |
162 | |
159 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */ |
163 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */ |
160 | |
164 | |
161 | backend_fudge = 0.; |
165 | backend_mintime = 1e-9; /* apparently, they did the right thing in freebsd */ |
162 | backend_modify = kqueue_modify; |
166 | backend_modify = kqueue_modify; |
163 | backend_poll = kqueue_poll; |
167 | backend_poll = kqueue_poll; |
164 | |
168 | |
165 | kqueue_eventmax = 64; /* initial number of events receivable per poll */ |
169 | kqueue_eventmax = 64; /* initial number of events receivable per poll */ |
166 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
170 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
167 | |
171 | |
168 | kqueue_changes = 0; |
172 | kqueue_changes = 0; |
… | |
… | |
180 | } |
184 | } |
181 | |
185 | |
182 | void inline_size |
186 | void inline_size |
183 | kqueue_fork (EV_P) |
187 | kqueue_fork (EV_P) |
184 | { |
188 | { |
|
|
189 | /* some BSD kernels don't just destroy the kqueue itself, |
|
|
190 | * but also close the fd, which isn't documented, and |
|
|
191 | * impossible to support properly. |
|
|
192 | * we remember the pid of the kqueue call and only close |
|
|
193 | * the fd if the pid is still the same. |
|
|
194 | * this leaks fds on sane kernels, but BSD interfaces are |
|
|
195 | * notoriously buggy and rarely get fixed. |
|
|
196 | */ |
|
|
197 | pid_t newpid = getpid (); |
|
|
198 | |
|
|
199 | if (newpid == kqueue_fd_pid) |
185 | close (backend_fd); |
200 | close (backend_fd); |
186 | |
201 | |
|
|
202 | kqueue_fd_pid = newpid; |
187 | while ((backend_fd = kqueue ()) < 0) |
203 | while ((backend_fd = kqueue ()) < 0) |
188 | ev_syserr ("(libev) kqueue"); |
204 | ev_syserr ("(libev) kqueue"); |
189 | |
205 | |
190 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); |
206 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); |
191 | |
207 | |
192 | /* re-register interest in fds */ |
208 | /* re-register interest in fds */ |
193 | fd_rearm_all (EV_A); |
209 | fd_rearm_all (EV_A); |
194 | } |
210 | } |
195 | |
211 | |
|
|
212 | /* sys/event.h defines EV_ERROR */ |
|
|
213 | #undef EV_ERROR |
|
|
214 | |