1 |
root |
1.1 |
/* |
2 |
root |
1.26 |
* libev kqueue backend |
3 |
|
|
* |
4 |
|
|
* Copyright (c) 2007 Marc Alexander Lehmann <libev@schmorp.de> |
5 |
root |
1.1 |
* All rights reserved. |
6 |
|
|
* |
7 |
|
|
* Redistribution and use in source and binary forms, with or without |
8 |
root |
1.26 |
* modification, are permitted provided that the following conditions are |
9 |
|
|
* met: |
10 |
|
|
* |
11 |
|
|
* * Redistributions of source code must retain the above copyright |
12 |
|
|
* notice, this list of conditions and the following disclaimer. |
13 |
root |
1.1 |
* |
14 |
root |
1.26 |
* * Redistributions in binary form must reproduce the above |
15 |
|
|
* copyright notice, this list of conditions and the following |
16 |
|
|
* disclaimer in the documentation and/or other materials provided |
17 |
|
|
* with the distribution. |
18 |
root |
1.1 |
* |
19 |
root |
1.26 |
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
20 |
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
21 |
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
22 |
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
23 |
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
24 |
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
25 |
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
26 |
root |
1.1 |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
27 |
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
28 |
root |
1.26 |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
29 |
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
30 |
root |
1.1 |
*/ |
31 |
|
|
|
32 |
|
|
#include <sys/types.h> |
33 |
|
|
#include <sys/time.h> |
34 |
|
|
#include <sys/queue.h> |
35 |
|
|
#include <sys/event.h> |
36 |
|
|
#include <string.h> |
37 |
|
|
#include <errno.h> |
38 |
|
|
|
39 |
root |
1.27 |
void inline_speed |
40 |
root |
1.6 |
kqueue_change (EV_P_ int fd, int filter, int flags, int fflags) |
41 |
root |
1.1 |
{ |
42 |
|
|
struct kevent *ke; |
43 |
|
|
|
44 |
ayin |
1.16 |
++kqueue_changecnt; |
45 |
root |
1.17 |
array_needsize (struct kevent, kqueue_changes, kqueue_changemax, kqueue_changecnt, EMPTY2); |
46 |
root |
1.1 |
|
47 |
root |
1.23 |
EV_SET (&kqueue_changes [kqueue_changecnt - 1], fd, filter, flags, fflags, 0, 0); |
48 |
root |
1.1 |
} |
49 |
|
|
|
50 |
root |
1.4 |
#ifndef NOTE_EOF |
51 |
|
|
# define NOTE_EOF 0 |
52 |
|
|
#endif |
53 |
|
|
|
54 |
root |
1.1 |
static void |
55 |
root |
1.6 |
kqueue_modify (EV_P_ int fd, int oev, int nev) |
56 |
root |
1.1 |
{ |
57 |
root |
1.22 |
if (oev != nev) |
58 |
|
|
{ |
59 |
|
|
if (oev & EV_READ) |
60 |
|
|
kqueue_change (EV_A_ fd, EVFILT_READ , EV_DELETE, 0); |
61 |
root |
1.14 |
|
62 |
root |
1.22 |
if (oev & EV_WRITE) |
63 |
|
|
kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_DELETE, 0); |
64 |
|
|
} |
65 |
root |
1.14 |
|
66 |
root |
1.22 |
/* to detect close/reopen reliably, we have to re-add */ |
67 |
|
|
/* event requests even when oev == nev */ |
68 |
root |
1.14 |
|
69 |
|
|
if (nev & EV_READ) |
70 |
root |
1.22 |
kqueue_change (EV_A_ fd, EVFILT_READ , EV_ADD, NOTE_EOF); |
71 |
root |
1.1 |
|
72 |
root |
1.14 |
if (nev & EV_WRITE) |
73 |
|
|
kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_ADD, NOTE_EOF); |
74 |
root |
1.1 |
} |
75 |
|
|
|
76 |
|
|
static void |
77 |
root |
1.6 |
kqueue_poll (EV_P_ ev_tstamp timeout) |
78 |
root |
1.1 |
{ |
79 |
|
|
int res, i; |
80 |
|
|
struct timespec ts; |
81 |
|
|
|
82 |
root |
1.14 |
/* need to resize so there is enough space for errors */ |
83 |
|
|
if (kqueue_changecnt > kqueue_eventmax) |
84 |
|
|
{ |
85 |
|
|
ev_free (kqueue_events); |
86 |
root |
1.28 |
kqueue_eventmax = array_nextsize (sizeof (struct kevent), kqueue_eventmax, kqueue_changecnt); |
87 |
root |
1.15 |
kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
88 |
root |
1.14 |
} |
89 |
|
|
|
90 |
root |
1.1 |
ts.tv_sec = (time_t)timeout; |
91 |
root |
1.18 |
ts.tv_nsec = (long)((timeout - (ev_tstamp)ts.tv_sec) * 1e9); |
92 |
root |
1.25 |
res = kevent (backend_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts); |
93 |
root |
1.6 |
kqueue_changecnt = 0; |
94 |
root |
1.1 |
|
95 |
root |
1.29 |
if (expect_false (res < 0)) |
96 |
root |
1.9 |
{ |
97 |
|
|
if (errno != EINTR) |
98 |
root |
1.10 |
syserr ("(libev) kevent"); |
99 |
root |
1.9 |
|
100 |
|
|
return; |
101 |
|
|
} |
102 |
root |
1.1 |
|
103 |
|
|
for (i = 0; i < res; ++i) |
104 |
|
|
{ |
105 |
root |
1.14 |
int fd = kqueue_events [i].ident; |
106 |
|
|
|
107 |
root |
1.24 |
if (expect_false (kqueue_events [i].flags & EV_ERROR)) |
108 |
root |
1.1 |
{ |
109 |
root |
1.14 |
int err = kqueue_events [i].data; |
110 |
|
|
|
111 |
|
|
/* we are only interested in errors for fds that we are interested in :) */ |
112 |
|
|
if (anfds [fd].events) |
113 |
|
|
{ |
114 |
|
|
if (err == ENOENT) /* resubmit changes on ENOENT */ |
115 |
|
|
kqueue_modify (EV_A_ fd, 0, anfds [fd].events); |
116 |
|
|
else if (err == EBADF) /* on EBADF, we re-check the fd */ |
117 |
|
|
{ |
118 |
|
|
if (fd_valid (fd)) |
119 |
|
|
kqueue_modify (EV_A_ fd, 0, anfds [fd].events); |
120 |
|
|
else |
121 |
|
|
fd_kill (EV_A_ fd); |
122 |
|
|
} |
123 |
|
|
else /* on all other errors, we error out on the fd */ |
124 |
|
|
fd_kill (EV_A_ fd); |
125 |
|
|
} |
126 |
root |
1.1 |
} |
127 |
|
|
else |
128 |
root |
1.3 |
fd_event ( |
129 |
root |
1.6 |
EV_A_ |
130 |
root |
1.14 |
fd, |
131 |
root |
1.6 |
kqueue_events [i].filter == EVFILT_READ ? EV_READ |
132 |
|
|
: kqueue_events [i].filter == EVFILT_WRITE ? EV_WRITE |
133 |
root |
1.1 |
: 0 |
134 |
|
|
); |
135 |
|
|
} |
136 |
|
|
|
137 |
root |
1.6 |
if (expect_false (res == kqueue_eventmax)) |
138 |
root |
1.1 |
{ |
139 |
root |
1.9 |
ev_free (kqueue_events); |
140 |
root |
1.28 |
kqueue_eventmax = array_nextsize (sizeof (struct kevent), kqueue_eventmax, kqueue_eventmax + 1); |
141 |
root |
1.15 |
kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
142 |
root |
1.1 |
} |
143 |
|
|
} |
144 |
|
|
|
145 |
root |
1.27 |
int inline_size |
146 |
root |
1.6 |
kqueue_init (EV_P_ int flags) |
147 |
root |
1.1 |
{ |
148 |
root |
1.2 |
struct kevent ch, ev; |
149 |
|
|
|
150 |
root |
1.1 |
/* Initalize the kernel queue */ |
151 |
root |
1.25 |
if ((backend_fd = kqueue ()) < 0) |
152 |
root |
1.6 |
return 0; |
153 |
root |
1.1 |
|
154 |
root |
1.25 |
fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */ |
155 |
root |
1.7 |
|
156 |
root |
1.31 |
backend_fudge = 0.; |
157 |
root |
1.21 |
backend_modify = kqueue_modify; |
158 |
|
|
backend_poll = kqueue_poll; |
159 |
root |
1.1 |
|
160 |
root |
1.26 |
kqueue_eventmax = 64; /* initial number of events receivable per poll */ |
161 |
root |
1.15 |
kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
162 |
root |
1.6 |
|
163 |
root |
1.7 |
kqueue_changes = 0; |
164 |
|
|
kqueue_changemax = 0; |
165 |
|
|
kqueue_changecnt = 0; |
166 |
|
|
|
167 |
root |
1.20 |
return EVBACKEND_KQUEUE; |
168 |
root |
1.1 |
} |
169 |
|
|
|
170 |
root |
1.27 |
void inline_size |
171 |
root |
1.7 |
kqueue_destroy (EV_P) |
172 |
|
|
{ |
173 |
root |
1.9 |
ev_free (kqueue_events); |
174 |
|
|
ev_free (kqueue_changes); |
175 |
root |
1.7 |
} |
176 |
|
|
|
177 |
root |
1.27 |
void inline_size |
178 |
root |
1.7 |
kqueue_fork (EV_P) |
179 |
|
|
{ |
180 |
root |
1.25 |
close (backend_fd); |
181 |
root |
1.10 |
|
182 |
root |
1.25 |
while ((backend_fd = kqueue ()) < 0) |
183 |
root |
1.11 |
syserr ("(libev) kqueue"); |
184 |
root |
1.10 |
|
185 |
root |
1.25 |
fcntl (backend_fd, F_SETFD, FD_CLOEXEC); |
186 |
root |
1.7 |
|
187 |
|
|
/* re-register interest in fds */ |
188 |
root |
1.8 |
fd_rearm_all (EV_A); |
189 |
root |
1.7 |
} |
190 |
|
|
|