1 |
/* |
2 |
* libev kqueue backend |
3 |
* |
4 |
* Copyright (c) 2007 Marc Alexander Lehmann <libev@schmorp.de> |
5 |
* All rights reserved. |
6 |
* |
7 |
* Redistribution and use in source and binary forms, with or without |
8 |
* modification, are permitted provided that the following conditions are |
9 |
* met: |
10 |
* |
11 |
* * Redistributions of source code must retain the above copyright |
12 |
* notice, this list of conditions and the following disclaimer. |
13 |
* |
14 |
* * Redistributions in binary form must reproduce the above |
15 |
* copyright notice, this list of conditions and the following |
16 |
* disclaimer in the documentation and/or other materials provided |
17 |
* with the distribution. |
18 |
* |
19 |
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
20 |
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
21 |
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
22 |
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
23 |
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
24 |
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
25 |
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
26 |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
27 |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
28 |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
29 |
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
30 |
*/ |
31 |
|
32 |
#include <sys/types.h> |
33 |
#include <sys/time.h> |
34 |
#include <sys/queue.h> |
35 |
#include <sys/event.h> |
36 |
#include <string.h> |
37 |
#include <errno.h> |
38 |
|
39 |
static void |
40 |
kqueue_change (EV_P_ int fd, int filter, int flags, int fflags) |
41 |
{ |
42 |
struct kevent *ke; |
43 |
|
44 |
++kqueue_changecnt; |
45 |
array_needsize (struct kevent, kqueue_changes, kqueue_changemax, kqueue_changecnt, EMPTY2); |
46 |
|
47 |
EV_SET (&kqueue_changes [kqueue_changecnt - 1], fd, filter, flags, fflags, 0, 0); |
48 |
} |
49 |
|
50 |
#ifndef NOTE_EOF |
51 |
# define NOTE_EOF 0 |
52 |
#endif |
53 |
|
54 |
static void |
55 |
kqueue_modify (EV_P_ int fd, int oev, int nev) |
56 |
{ |
57 |
if (oev != nev) |
58 |
{ |
59 |
if (oev & EV_READ) |
60 |
kqueue_change (EV_A_ fd, EVFILT_READ , EV_DELETE, 0); |
61 |
|
62 |
if (oev & EV_WRITE) |
63 |
kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_DELETE, 0); |
64 |
} |
65 |
|
66 |
/* to detect close/reopen reliably, we have to re-add */ |
67 |
/* event requests even when oev == nev */ |
68 |
|
69 |
if (nev & EV_READ) |
70 |
kqueue_change (EV_A_ fd, EVFILT_READ , EV_ADD, NOTE_EOF); |
71 |
|
72 |
if (nev & EV_WRITE) |
73 |
kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_ADD, NOTE_EOF); |
74 |
} |
75 |
|
76 |
static void |
77 |
kqueue_poll (EV_P_ ev_tstamp timeout) |
78 |
{ |
79 |
int res, i; |
80 |
struct timespec ts; |
81 |
|
82 |
/* need to resize so there is enough space for errors */ |
83 |
if (kqueue_changecnt > kqueue_eventmax) |
84 |
{ |
85 |
ev_free (kqueue_events); |
86 |
kqueue_eventmax = array_roundsize (struct kevent, kqueue_changecnt); |
87 |
kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
88 |
} |
89 |
|
90 |
ts.tv_sec = (time_t)timeout; |
91 |
ts.tv_nsec = (long)((timeout - (ev_tstamp)ts.tv_sec) * 1e9); |
92 |
res = kevent (backend_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts); |
93 |
kqueue_changecnt = 0; |
94 |
|
95 |
if (res < 0) |
96 |
{ |
97 |
if (errno != EINTR) |
98 |
syserr ("(libev) kevent"); |
99 |
|
100 |
return; |
101 |
} |
102 |
|
103 |
for (i = 0; i < res; ++i) |
104 |
{ |
105 |
int fd = kqueue_events [i].ident; |
106 |
|
107 |
if (expect_false (kqueue_events [i].flags & EV_ERROR)) |
108 |
{ |
109 |
int err = kqueue_events [i].data; |
110 |
|
111 |
/* we are only interested in errors for fds that we are interested in :) */ |
112 |
if (anfds [fd].events) |
113 |
{ |
114 |
if (err == ENOENT) /* resubmit changes on ENOENT */ |
115 |
kqueue_modify (EV_A_ fd, 0, anfds [fd].events); |
116 |
else if (err == EBADF) /* on EBADF, we re-check the fd */ |
117 |
{ |
118 |
if (fd_valid (fd)) |
119 |
kqueue_modify (EV_A_ fd, 0, anfds [fd].events); |
120 |
else |
121 |
fd_kill (EV_A_ fd); |
122 |
} |
123 |
else /* on all other errors, we error out on the fd */ |
124 |
fd_kill (EV_A_ fd); |
125 |
} |
126 |
} |
127 |
else |
128 |
fd_event ( |
129 |
EV_A_ |
130 |
fd, |
131 |
kqueue_events [i].filter == EVFILT_READ ? EV_READ |
132 |
: kqueue_events [i].filter == EVFILT_WRITE ? EV_WRITE |
133 |
: 0 |
134 |
); |
135 |
} |
136 |
|
137 |
if (expect_false (res == kqueue_eventmax)) |
138 |
{ |
139 |
ev_free (kqueue_events); |
140 |
kqueue_eventmax = array_roundsize (struct kevent, kqueue_eventmax << 1); |
141 |
kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
142 |
} |
143 |
} |
144 |
|
145 |
static int |
146 |
kqueue_init (EV_P_ int flags) |
147 |
{ |
148 |
struct kevent ch, ev; |
149 |
|
150 |
/* Initalize the kernel queue */ |
151 |
if ((backend_fd = kqueue ()) < 0) |
152 |
return 0; |
153 |
|
154 |
fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */ |
155 |
|
156 |
/* fudge *might* be zero from the documentation, but bsd docs are notoriously wrong */ |
157 |
backend_fudge = 1e-3; /* needed to compensate for kevent returning early */ |
158 |
backend_modify = kqueue_modify; |
159 |
backend_poll = kqueue_poll; |
160 |
|
161 |
kqueue_eventmax = 64; /* initial number of events receivable per poll */ |
162 |
kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
163 |
|
164 |
kqueue_changes = 0; |
165 |
kqueue_changemax = 0; |
166 |
kqueue_changecnt = 0; |
167 |
|
168 |
return EVBACKEND_KQUEUE; |
169 |
} |
170 |
|
171 |
static void |
172 |
kqueue_destroy (EV_P) |
173 |
{ |
174 |
close (backend_fd); |
175 |
|
176 |
ev_free (kqueue_events); |
177 |
ev_free (kqueue_changes); |
178 |
} |
179 |
|
180 |
static void |
181 |
kqueue_fork (EV_P) |
182 |
{ |
183 |
close (backend_fd); |
184 |
|
185 |
while ((backend_fd = kqueue ()) < 0) |
186 |
syserr ("(libev) kqueue"); |
187 |
|
188 |
fcntl (backend_fd, F_SETFD, FD_CLOEXEC); |
189 |
|
190 |
/* re-register interest in fds */ |
191 |
fd_rearm_all (EV_A); |
192 |
} |
193 |
|