1 | |
|
|
2 | /* |
1 | /* |
3 | * Copyright 2007 Marc Alexander Lehmann <libev@schmorp.de> |
2 | * Copyright 2007 Marc Alexander Lehmann <libev@schmorp.de> |
4 | * Copyright 2000-2002 Niels Provos <provos@citi.umich.edu> |
3 | * Copyright 2000-2002 Niels Provos <provos@citi.umich.edu> |
5 | * All rights reserved. |
4 | * All rights reserved. |
6 | * |
5 | * |
… | |
… | |
33 | #include <sys/queue.h> |
32 | #include <sys/queue.h> |
34 | #include <sys/event.h> |
33 | #include <sys/event.h> |
35 | #include <string.h> |
34 | #include <string.h> |
36 | #include <errno.h> |
35 | #include <errno.h> |
37 | |
36 | |
38 | static int kq_fd; |
|
|
39 | static struct kevent *kq_changes; |
|
|
40 | static int kq_changemax, kq_changecnt; |
|
|
41 | static struct kevent *kq_events; |
|
|
42 | static int kq_eventmax; |
|
|
43 | |
|
|
44 | static void |
37 | static void |
45 | kqueue_change (int fd, int filter, int flags, int fflags) |
38 | kqueue_change (EV_P_ int fd, int filter, int flags, int fflags) |
46 | { |
39 | { |
47 | struct kevent *ke; |
40 | struct kevent *ke; |
48 | |
41 | |
|
|
42 | ++kqueue_changecnt; |
49 | array_needsize (kq_changes, kq_changemax, ++kq_changecnt, ); |
43 | array_needsize (struct kevent, kqueue_changes, kqueue_changemax, kqueue_changecnt, EMPTY2); |
50 | |
44 | |
51 | ke = &kq_changes [kq_changecnt - 1]; |
45 | EV_SET (&kqueue_changes [kqueue_changecnt - 1], fd, filter, flags, fflags, 0, 0); |
52 | memset (ke, 0, sizeof (struct kevent)); |
|
|
53 | ke->ident = fd; |
|
|
54 | ke->filter = filter; |
|
|
55 | ke->flags = flags; |
|
|
56 | ke->fflags = fflags; |
|
|
57 | } |
46 | } |
58 | |
47 | |
|
|
48 | #ifndef NOTE_EOF |
|
|
49 | # define NOTE_EOF 0 |
|
|
50 | #endif |
|
|
51 | |
59 | static void |
52 | static void |
60 | kqueue_modify (int fd, int oev, int nev) |
53 | kqueue_modify (EV_P_ int fd, int oev, int nev) |
61 | { |
54 | { |
62 | if ((oev ^ new) & EV_READ) |
55 | if (oev != nev) |
63 | { |
56 | { |
64 | if (nev & EV_READ) |
57 | if (oev & EV_READ) |
65 | kqueue_change (fd, EVFILT_READ, EV_ADD, NOTE_EOF); |
|
|
66 | else |
|
|
67 | kqueue_change (fd, EVFILT_READ, EV_DELETE, 0); |
58 | kqueue_change (EV_A_ fd, EVFILT_READ , EV_DELETE, 0); |
68 | } |
|
|
69 | |
59 | |
70 | if ((oev ^ new) & EV_WRITE) |
|
|
71 | { |
|
|
72 | if (nev & EV_WRITE) |
60 | if (oev & EV_WRITE) |
73 | kqueue_change (fd, EVFILT_WRITE, EV_ADD, NOTE_EOF); |
|
|
74 | else |
|
|
75 | kqueue_change (fd, EVFILT_WRITE, EV_DELETE, 0); |
61 | kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_DELETE, 0); |
76 | } |
62 | } |
77 | } |
|
|
78 | |
63 | |
|
|
64 | /* to detect close/reopen reliably, we have to re-add */ |
|
|
65 | /* event requests even when oev == nev */ |
|
|
66 | |
|
|
67 | if (nev & EV_READ) |
|
|
68 | kqueue_change (EV_A_ fd, EVFILT_READ , EV_ADD, NOTE_EOF); |
|
|
69 | |
|
|
70 | if (nev & EV_WRITE) |
|
|
71 | kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_ADD, NOTE_EOF); |
|
|
72 | } |
|
|
73 | |
79 | static void |
74 | static void |
80 | kqueue_poll (ev_tstamp timeout) |
75 | kqueue_poll (EV_P_ ev_tstamp timeout) |
81 | { |
76 | { |
82 | int res, i; |
77 | int res, i; |
83 | struct timespec ts; |
78 | struct timespec ts; |
84 | |
79 | |
|
|
80 | /* need to resize so there is enough space for errors */ |
|
|
81 | if (kqueue_changecnt > kqueue_eventmax) |
|
|
82 | { |
|
|
83 | ev_free (kqueue_events); |
|
|
84 | kqueue_eventmax = array_roundsize (struct kevent, kqueue_changecnt); |
|
|
85 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
|
|
86 | } |
|
|
87 | |
85 | ts.tv_sec = (time_t)timeout; |
88 | ts.tv_sec = (time_t)timeout; |
86 | ts.tv_nsec = (long)(timeout - (ev_tstamp)ts.tv_sec) * 1e9; |
89 | ts.tv_nsec = (long)((timeout - (ev_tstamp)ts.tv_sec) * 1e9); |
87 | res = kevent (kq_fd, kq_changes, kq_changecnt, kq_events, kq_eventmax, &ts); |
90 | res = kevent (kqueue_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts); |
88 | kq_changecnt = 0; |
91 | kqueue_changecnt = 0; |
89 | |
92 | |
90 | if (res < 0) |
93 | if (res < 0) |
|
|
94 | { |
|
|
95 | if (errno != EINTR) |
|
|
96 | syserr ("(libev) kevent"); |
|
|
97 | |
91 | return; |
98 | return; |
|
|
99 | } |
92 | |
100 | |
93 | for (i = 0; i < res; ++i) |
101 | for (i = 0; i < res; ++i) |
94 | { |
102 | { |
|
|
103 | int fd = kqueue_events [i].ident; |
|
|
104 | |
95 | if (kq_events [i].flags & EV_ERROR) |
105 | if (expect_false (kqueue_events [i].flags & EV_ERROR)) |
96 | { |
106 | { |
|
|
107 | int err = kqueue_events [i].data; |
|
|
108 | |
97 | /* |
109 | /* |
98 | * Error messages that can happen, when a delete fails. |
110 | * errors that may happen |
99 | * EBADF happens when the file discriptor has been |
111 | * EBADF happens when the file discriptor has been |
100 | * closed, |
112 | * closed, |
101 | * ENOENT when the file discriptor was closed and |
113 | * ENOENT when the file descriptor was closed and |
102 | * then reopened. |
114 | * then reopened. |
103 | * EINVAL for some reasons not understood; EINVAL |
115 | * EINVAL for some reasons not understood; EINVAL |
104 | * should not be returned ever; but FreeBSD does :-\ |
116 | * should not be returned ever; but FreeBSD does :-\ |
105 | * An error is also indicated when a callback deletes |
|
|
106 | * an event we are still processing. In that case |
|
|
107 | * the data field is set to ENOENT. |
|
|
108 | */ |
117 | */ |
109 | if (events [i].data == EBADF) |
118 | |
110 | fd_kill (events [i].ident); |
119 | /* we are only interested in errors for fds that we are interested in :) */ |
|
|
120 | if (anfds [fd].events) |
|
|
121 | { |
|
|
122 | if (err == ENOENT) /* resubmit changes on ENOENT */ |
|
|
123 | kqueue_modify (EV_A_ fd, 0, anfds [fd].events); |
|
|
124 | else if (err == EBADF) /* on EBADF, we re-check the fd */ |
|
|
125 | { |
|
|
126 | if (fd_valid (fd)) |
|
|
127 | kqueue_modify (EV_A_ fd, 0, anfds [fd].events); |
|
|
128 | else |
|
|
129 | fd_kill (EV_A_ fd); |
|
|
130 | } |
|
|
131 | else /* on all other errors, we error out on the fd */ |
|
|
132 | fd_kill (EV_A_ fd); |
|
|
133 | } |
111 | } |
134 | } |
112 | else |
135 | else |
113 | fd_event ( |
136 | fd_event ( |
114 | events [i].ident, |
137 | EV_A_ |
|
|
138 | fd, |
115 | events [i].filter == EVFILT_READ ? EV_READ |
139 | kqueue_events [i].filter == EVFILT_READ ? EV_READ |
116 | : events [i].filter == EVFILT_WRITE ? EV_WRITE |
140 | : kqueue_events [i].filter == EVFILT_WRITE ? EV_WRITE |
117 | : 0 |
141 | : 0 |
118 | ); |
142 | ); |
119 | } |
143 | } |
120 | |
144 | |
121 | if (expect_false (res == kq_eventmax)) |
145 | if (expect_false (res == kqueue_eventmax)) |
122 | { |
146 | { |
123 | free (kq_events); |
147 | ev_free (kqueue_events); |
124 | kq_eventmax = array_roundsize (kq_events, kq_eventmax << 1); |
148 | kqueue_eventmax = array_roundsize (struct kevent, kqueue_eventmax << 1); |
125 | kq_events = malloc (sizeof (struct kevent) * kq_eventmax); |
149 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
126 | } |
150 | } |
127 | } |
151 | } |
128 | |
152 | |
129 | static void |
153 | static int |
130 | kqueue_init (struct event_base *base) |
154 | kqueue_init (EV_P_ int flags) |
131 | { |
155 | { |
132 | struct kevent ch, ev; |
156 | struct kevent ch, ev; |
133 | |
157 | |
134 | /* Initalize the kernel queue */ |
158 | /* Initalize the kernel queue */ |
135 | if ((kq_fd = kqueue ()) < 0) |
159 | if ((kqueue_fd = kqueue ()) < 0) |
136 | return; |
160 | return 0; |
|
|
161 | |
|
|
162 | fcntl (kqueue_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */ |
137 | |
163 | |
138 | /* Check for Mac OS X kqueue bug. */ |
164 | /* Check for Mac OS X kqueue bug. */ |
139 | ch.ident = -1; |
165 | ch.ident = -1; |
140 | ch.filter = EVFILT_READ; |
166 | ch.filter = EVFILT_READ; |
141 | ch.flags = EV_ADD; |
167 | ch.flags = EV_ADD; |
142 | |
168 | |
143 | /* |
169 | /* |
144 | * If kqueue works, then kevent will succeed, and it will |
170 | * If kqueue works, then kevent will succeed, and it will |
145 | * stick an error in events[0]. If kqueue is broken, then |
171 | * stick an error in ev. If kqueue is broken, then |
146 | * kevent will fail. |
172 | * kevent will fail. |
147 | */ |
173 | */ |
148 | if (kevent (kq_fd, &ch, 1, &ev, 1, 0) != 1 |
174 | if (kevent (kqueue_fd, &ch, 1, &ev, 1, 0) != 1 |
149 | || ev.ident != -1 |
175 | || ev.ident != -1 |
150 | || ev.flags != EV_ERROR) |
176 | || ev.flags != EV_ERROR) |
151 | { |
177 | { |
152 | /* detected broken kqueue */ |
178 | /* detected broken kqueue */ |
153 | close (kq_fd); |
179 | close (kqueue_fd); |
154 | return; |
180 | return 0; |
155 | } |
181 | } |
156 | |
182 | |
157 | ev_method = EVMETHOD_KQUEUE; |
|
|
158 | method_fudge = 1e-3; /* needed to compensate for kevent returning early */ |
183 | backend_fudge = 1e-3; /* needed to compensate for kevent returning early */ |
159 | method_modify = kq_modify; |
184 | backend_modify = kqueue_modify; |
160 | method_poll = kq_poll; |
185 | backend_poll = kqueue_poll; |
161 | |
186 | |
162 | kq_eventmax = 64; /* intiial number of events receivable per poll */ |
187 | kqueue_eventmax = 64; /* intiial number of events receivable per poll */ |
163 | kq_events = malloc (sizeof (struct kevent) * kq_eventmax); |
188 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
164 | } |
|
|
165 | |
189 | |
|
|
190 | kqueue_changes = 0; |
|
|
191 | kqueue_changemax = 0; |
|
|
192 | kqueue_changecnt = 0; |
|
|
193 | |
|
|
194 | return EVBACKEND_KQUEUE; |
|
|
195 | } |
|
|
196 | |
|
|
197 | static void |
|
|
198 | kqueue_destroy (EV_P) |
|
|
199 | { |
|
|
200 | close (kqueue_fd); |
|
|
201 | |
|
|
202 | ev_free (kqueue_events); |
|
|
203 | ev_free (kqueue_changes); |
|
|
204 | } |
|
|
205 | |
|
|
206 | static void |
|
|
207 | kqueue_fork (EV_P) |
|
|
208 | { |
|
|
209 | close (kqueue_fd); |
|
|
210 | |
|
|
211 | while ((kqueue_fd = kqueue ()) < 0) |
|
|
212 | syserr ("(libev) kqueue"); |
|
|
213 | |
|
|
214 | fcntl (kqueue_fd, F_SETFD, FD_CLOEXEC); |
|
|
215 | |
|
|
216 | /* re-register interest in fds */ |
|
|
217 | fd_rearm_all (EV_A); |
|
|
218 | } |
|
|
219 | |