1 | |
|
|
2 | /* |
1 | /* |
3 | * Copyright 2007 Marc Alexander Lehmann <libev@schmorp.de> |
2 | * Copyright 2007 Marc Alexander Lehmann <libev@schmorp.de> |
4 | * Copyright 2000-2002 Niels Provos <provos@citi.umich.edu> |
3 | * Copyright 2000-2002 Niels Provos <provos@citi.umich.edu> |
5 | * All rights reserved. |
4 | * All rights reserved. |
6 | * |
5 | * |
… | |
… | |
33 | #include <sys/queue.h> |
32 | #include <sys/queue.h> |
34 | #include <sys/event.h> |
33 | #include <sys/event.h> |
35 | #include <string.h> |
34 | #include <string.h> |
36 | #include <errno.h> |
35 | #include <errno.h> |
37 | |
36 | |
38 | static int kq_fd; |
|
|
39 | static struct kevent *kq_changes; |
|
|
40 | static int kq_changemax, kq_changecnt; |
|
|
41 | static struct kevent *kq_events; |
|
|
42 | static int kq_eventmax; |
|
|
43 | |
|
|
44 | static void |
37 | static void |
45 | kqueue_change (int fd, int filter, int flags, int fflags) |
38 | kqueue_change (EV_P_ int fd, int filter, int flags, int fflags) |
46 | { |
39 | { |
47 | struct kevent *ke; |
40 | struct kevent *ke; |
48 | |
41 | |
|
|
42 | ++kqueue_changecnt; |
49 | array_needsize (kq_changes, kq_changemax, ++kq_changecnt, ); |
43 | array_needsize (struct kevent, kqueue_changes, kqueue_changemax, kqueue_changecnt, EMPTY2); |
50 | |
44 | |
51 | ke = &kq_changes [kq_changecnt - 1]; |
45 | ke = &kqueue_changes [kqueue_changecnt - 1]; |
52 | memset (ke, 0, sizeof (struct kevent)); |
46 | memset (ke, 0, sizeof (struct kevent)); |
53 | ke->ident = fd; |
47 | ke->ident = fd; |
54 | ke->filter = filter; |
48 | ke->filter = filter; |
55 | ke->flags = flags; |
49 | ke->flags = flags; |
56 | ke->fflags = fflags; |
50 | ke->fflags = fflags; |
57 | } |
51 | } |
58 | |
52 | |
|
|
53 | #ifndef NOTE_EOF |
|
|
54 | # define NOTE_EOF 0 |
|
|
55 | #endif |
|
|
56 | |
59 | static void |
57 | static void |
60 | kqueue_modify (int fd, int oev, int nev) |
58 | kqueue_modify (EV_P_ int fd, int oev, int nev) |
61 | { |
59 | { |
|
|
60 | /* to detect close/reopen reliably, we have to remove and re-add */ |
|
|
61 | /* event requests even when oev == nev */ |
|
|
62 | |
62 | if ((oev ^ new) & EV_READ) |
63 | if (oev & EV_READ) |
63 | { |
64 | kqueue_change (EV_A_ fd, EVFILT_READ, EV_DELETE, 0); |
|
|
65 | |
|
|
66 | if (oev & EV_WRITE) |
|
|
67 | kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_DELETE, 0); |
|
|
68 | |
64 | if (nev & EV_READ) |
69 | if (nev & EV_READ) |
65 | kqueue_change (fd, EVFILT_READ, EV_ADD, NOTE_EOF); |
70 | kqueue_change (EV_A_ fd, EVFILT_READ, EV_ADD, NOTE_EOF); |
66 | else |
|
|
67 | kqueue_change (fd, EVFILT_READ, EV_DELETE, 0); |
|
|
68 | } |
|
|
69 | |
71 | |
70 | if ((oev ^ new) & EV_WRITE) |
|
|
71 | { |
|
|
72 | if (nev & EV_WRITE) |
72 | if (nev & EV_WRITE) |
73 | kqueue_change (fd, EVFILT_WRITE, EV_ADD, NOTE_EOF); |
73 | kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_ADD, NOTE_EOF); |
74 | else |
|
|
75 | kqueue_change (fd, EVFILT_WRITE, EV_DELETE, 0); |
|
|
76 | } |
|
|
77 | } |
74 | } |
78 | |
75 | |
79 | static void |
76 | static void |
80 | kqueue_poll (ev_tstamp timeout) |
77 | kqueue_poll (EV_P_ ev_tstamp timeout) |
81 | { |
78 | { |
82 | int res, i; |
79 | int res, i; |
83 | struct timespec ts; |
80 | struct timespec ts; |
84 | |
81 | |
|
|
82 | /* need to resize so there is enough space for errors */ |
|
|
83 | if (kqueue_changecnt > kqueue_eventmax) |
|
|
84 | { |
|
|
85 | ev_free (kqueue_events); |
|
|
86 | kqueue_eventmax = array_roundsize (struct kevent, kqueue_changecnt); |
|
|
87 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
|
|
88 | } |
|
|
89 | |
85 | ts.tv_sec = (time_t)timeout; |
90 | ts.tv_sec = (time_t)timeout; |
86 | ts.tv_nsec = (long)(timeout - (ev_tstamp)ts.tv_sec) * 1e9; |
91 | ts.tv_nsec = (long)((timeout - (ev_tstamp)ts.tv_sec) * 1e9); |
|
|
92 | fprintf (stderr, "to %ld:%09ld %f\n", ts.tv_sec, ts.tv_nsec, res);//D |
87 | res = kevent (kq_fd, kq_changes, kq_changecnt, kq_events, kq_eventmax, &ts); |
93 | res = kevent (kqueue_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts); |
88 | kq_changecnt = 0; |
94 | kqueue_changecnt = 0; |
89 | |
95 | |
90 | if (res < 0) |
96 | if (res < 0) |
|
|
97 | { |
|
|
98 | if (errno != EINTR) |
|
|
99 | syserr ("(libev) kevent"); |
|
|
100 | |
91 | return; |
101 | return; |
|
|
102 | } |
92 | |
103 | |
93 | for (i = 0; i < res; ++i) |
104 | for (i = 0; i < res; ++i) |
94 | { |
105 | { |
|
|
106 | int fd = kqueue_events [i].ident; |
|
|
107 | |
95 | if (kq_events [i].flags & EV_ERROR) |
108 | if (kqueue_events [i].flags & EV_ERROR) |
96 | { |
109 | { |
|
|
110 | int err = kqueue_events [i].data; |
|
|
111 | |
97 | /* |
112 | /* |
98 | * Error messages that can happen, when a delete fails. |
113 | * errors that may happen |
99 | * EBADF happens when the file discriptor has been |
114 | * EBADF happens when the file discriptor has been |
100 | * closed, |
115 | * closed, |
101 | * ENOENT when the file discriptor was closed and |
116 | * ENOENT when the file descriptor was closed and |
102 | * then reopened. |
117 | * then reopened. |
103 | * EINVAL for some reasons not understood; EINVAL |
118 | * EINVAL for some reasons not understood; EINVAL |
104 | * should not be returned ever; but FreeBSD does :-\ |
119 | * should not be returned ever; but FreeBSD does :-\ |
105 | * An error is also indicated when a callback deletes |
|
|
106 | * an event we are still processing. In that case |
|
|
107 | * the data field is set to ENOENT. |
|
|
108 | */ |
120 | */ |
109 | if (events [i].data == EBADF) |
121 | |
110 | fd_kill (events [i].ident); |
122 | /* we are only interested in errors for fds that we are interested in :) */ |
|
|
123 | if (anfds [fd].events) |
|
|
124 | { |
|
|
125 | if (err == ENOENT) /* resubmit changes on ENOENT */ |
|
|
126 | kqueue_modify (EV_A_ fd, 0, anfds [fd].events); |
|
|
127 | else if (err == EBADF) /* on EBADF, we re-check the fd */ |
|
|
128 | { |
|
|
129 | if (fd_valid (fd)) |
|
|
130 | kqueue_modify (EV_A_ fd, 0, anfds [fd].events); |
|
|
131 | else |
|
|
132 | fd_kill (EV_A_ fd); |
|
|
133 | } |
|
|
134 | else /* on all other errors, we error out on the fd */ |
|
|
135 | fd_kill (EV_A_ fd); |
|
|
136 | } |
111 | } |
137 | } |
112 | else |
138 | else |
113 | fd_event ( |
139 | fd_event ( |
114 | events [i].ident, |
140 | EV_A_ |
|
|
141 | fd, |
115 | events [i].filter == EVFILT_READ ? EV_READ |
142 | kqueue_events [i].filter == EVFILT_READ ? EV_READ |
116 | : events [i].filter == EVFILT_WRITE ? EV_WRITE |
143 | : kqueue_events [i].filter == EVFILT_WRITE ? EV_WRITE |
117 | : 0 |
144 | : 0 |
118 | ); |
145 | ); |
119 | } |
146 | } |
120 | |
147 | |
121 | if (expect_false (res == kq_eventmax)) |
148 | if (expect_false (res == kqueue_eventmax)) |
122 | { |
149 | { |
123 | free (kq_events); |
150 | ev_free (kqueue_events); |
124 | kq_eventmax = array_roundsize (kq_events, kq_eventmax << 1); |
151 | kqueue_eventmax = array_roundsize (struct kevent, kqueue_eventmax << 1); |
125 | kq_events = malloc (sizeof (struct kevent) * kq_eventmax); |
152 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
126 | } |
153 | } |
127 | } |
154 | } |
128 | |
155 | |
129 | static void |
156 | static int |
130 | kqueue_init (struct event_base *base) |
157 | kqueue_init (EV_P_ int flags) |
131 | { |
158 | { |
132 | struct kevent ch, ev; |
159 | struct kevent ch, ev; |
133 | |
160 | |
134 | /* Initalize the kernel queue */ |
161 | /* Initalize the kernel queue */ |
135 | if ((kq_fd = kqueue ()) < 0) |
162 | if ((kqueue_fd = kqueue ()) < 0) |
136 | return; |
163 | return 0; |
|
|
164 | |
|
|
165 | fcntl (kqueue_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */ |
137 | |
166 | |
138 | /* Check for Mac OS X kqueue bug. */ |
167 | /* Check for Mac OS X kqueue bug. */ |
139 | ch.ident = -1; |
168 | ch.ident = -1; |
140 | ch.filter = EVFILT_READ; |
169 | ch.filter = EVFILT_READ; |
141 | ch.flags = EV_ADD; |
170 | ch.flags = EV_ADD; |
142 | |
171 | |
143 | /* |
172 | /* |
144 | * If kqueue works, then kevent will succeed, and it will |
173 | * If kqueue works, then kevent will succeed, and it will |
145 | * stick an error in events[0]. If kqueue is broken, then |
174 | * stick an error in ev. If kqueue is broken, then |
146 | * kevent will fail. |
175 | * kevent will fail. |
147 | */ |
176 | */ |
148 | if (kevent (kq_fd, &ch, 1, &ev, 1, 0) != 1 |
177 | if (kevent (kqueue_fd, &ch, 1, &ev, 1, 0) != 1 |
149 | || ev.ident != -1 |
178 | || ev.ident != -1 |
150 | || ev.flags != EV_ERROR) |
179 | || ev.flags != EV_ERROR) |
151 | { |
180 | { |
152 | /* detected broken kqueue */ |
181 | /* detected broken kqueue */ |
153 | close (kq_fd); |
182 | close (kqueue_fd); |
154 | return; |
183 | return 0; |
155 | } |
184 | } |
156 | |
185 | |
157 | ev_method = EVMETHOD_KQUEUE; |
|
|
158 | method_fudge = 1e-3; /* needed to compensate for kevent returning early */ |
186 | method_fudge = 1e-3; /* needed to compensate for kevent returning early */ |
159 | method_modify = kq_modify; |
187 | method_modify = kqueue_modify; |
160 | method_poll = kq_poll; |
188 | method_poll = kqueue_poll; |
161 | |
189 | |
162 | kq_eventmax = 64; /* intiial number of events receivable per poll */ |
190 | kqueue_eventmax = 64; /* intiial number of events receivable per poll */ |
163 | kq_events = malloc (sizeof (struct kevent) * kq_eventmax); |
191 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
164 | } |
|
|
165 | |
192 | |
|
|
193 | kqueue_changes = 0; |
|
|
194 | kqueue_changemax = 0; |
|
|
195 | kqueue_changecnt = 0; |
|
|
196 | |
|
|
197 | return EVMETHOD_KQUEUE; |
|
|
198 | } |
|
|
199 | |
|
|
200 | static void |
|
|
201 | kqueue_destroy (EV_P) |
|
|
202 | { |
|
|
203 | close (kqueue_fd); |
|
|
204 | |
|
|
205 | ev_free (kqueue_events); |
|
|
206 | ev_free (kqueue_changes); |
|
|
207 | } |
|
|
208 | |
|
|
209 | static void |
|
|
210 | kqueue_fork (EV_P) |
|
|
211 | { |
|
|
212 | close (kqueue_fd); |
|
|
213 | |
|
|
214 | while ((kqueue_fd = kqueue ()) < 0) |
|
|
215 | syserr ("(libev) kqueue"); |
|
|
216 | |
|
|
217 | fcntl (kqueue_fd, F_SETFD, FD_CLOEXEC); |
|
|
218 | |
|
|
219 | /* re-register interest in fds */ |
|
|
220 | fd_rearm_all (EV_A); |
|
|
221 | } |
|
|
222 | |