… | |
… | |
34 | #include <sys/queue.h> |
34 | #include <sys/queue.h> |
35 | #include <sys/event.h> |
35 | #include <sys/event.h> |
36 | #include <string.h> |
36 | #include <string.h> |
37 | #include <errno.h> |
37 | #include <errno.h> |
38 | |
38 | |
39 | static void |
39 | void inline_speed |
40 | kqueue_change (EV_P_ int fd, int filter, int flags, int fflags) |
40 | kqueue_change (EV_P_ int fd, int filter, int flags, int fflags) |
41 | { |
41 | { |
42 | struct kevent *ke; |
|
|
43 | |
|
|
44 | ++kqueue_changecnt; |
42 | ++kqueue_changecnt; |
45 | array_needsize (struct kevent, kqueue_changes, kqueue_changemax, kqueue_changecnt, EMPTY2); |
43 | array_needsize (struct kevent, kqueue_changes, kqueue_changemax, kqueue_changecnt, EMPTY2); |
46 | |
44 | |
47 | EV_SET (&kqueue_changes [kqueue_changecnt - 1], fd, filter, flags, fflags, 0, 0); |
45 | EV_SET (&kqueue_changes [kqueue_changecnt - 1], fd, filter, flags, fflags, 0, 0); |
48 | } |
46 | } |
… | |
… | |
81 | |
79 | |
82 | /* need to resize so there is enough space for errors */ |
80 | /* need to resize so there is enough space for errors */ |
83 | if (kqueue_changecnt > kqueue_eventmax) |
81 | if (kqueue_changecnt > kqueue_eventmax) |
84 | { |
82 | { |
85 | ev_free (kqueue_events); |
83 | ev_free (kqueue_events); |
86 | kqueue_eventmax = array_roundsize (struct kevent, kqueue_changecnt); |
84 | kqueue_eventmax = array_nextsize (sizeof (struct kevent), kqueue_eventmax, kqueue_changecnt); |
87 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
85 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
88 | } |
86 | } |
89 | |
87 | |
90 | ts.tv_sec = (time_t)timeout; |
88 | ts.tv_sec = (time_t)timeout; |
91 | ts.tv_nsec = (long)((timeout - (ev_tstamp)ts.tv_sec) * 1e9); |
89 | ts.tv_nsec = (long)((timeout - (ev_tstamp)ts.tv_sec) * 1e9); |
92 | res = kevent (backend_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts); |
90 | res = kevent (backend_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts); |
93 | kqueue_changecnt = 0; |
91 | kqueue_changecnt = 0; |
94 | |
92 | |
95 | if (res < 0) |
93 | if (expect_false (res < 0)) |
96 | { |
94 | { |
97 | if (errno != EINTR) |
95 | if (errno != EINTR) |
98 | syserr ("(libev) kevent"); |
96 | syserr ("(libev) kevent"); |
99 | |
97 | |
100 | return; |
98 | return; |
… | |
… | |
135 | } |
133 | } |
136 | |
134 | |
137 | if (expect_false (res == kqueue_eventmax)) |
135 | if (expect_false (res == kqueue_eventmax)) |
138 | { |
136 | { |
139 | ev_free (kqueue_events); |
137 | ev_free (kqueue_events); |
140 | kqueue_eventmax = array_roundsize (struct kevent, kqueue_eventmax << 1); |
138 | kqueue_eventmax = array_nextsize (sizeof (struct kevent), kqueue_eventmax, kqueue_eventmax + 1); |
141 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
139 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
142 | } |
140 | } |
143 | } |
141 | } |
144 | |
142 | |
145 | static int |
143 | int inline_size |
146 | kqueue_init (EV_P_ int flags) |
144 | kqueue_init (EV_P_ int flags) |
147 | { |
145 | { |
148 | struct kevent ch, ev; |
|
|
149 | |
|
|
150 | /* Initalize the kernel queue */ |
146 | /* Initalize the kernel queue */ |
151 | if ((backend_fd = kqueue ()) < 0) |
147 | if ((backend_fd = kqueue ()) < 0) |
152 | return 0; |
148 | return 0; |
153 | |
149 | |
154 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */ |
150 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */ |
155 | |
151 | |
156 | /* fudge *might* be zero from the documentation, but bsd docs are notoriously wrong */ |
152 | backend_fudge = 0.; |
157 | backend_fudge = 1e-3; /* needed to compensate for kevent returning early */ |
|
|
158 | backend_modify = kqueue_modify; |
153 | backend_modify = kqueue_modify; |
159 | backend_poll = kqueue_poll; |
154 | backend_poll = kqueue_poll; |
160 | |
155 | |
161 | kqueue_eventmax = 64; /* initial number of events receivable per poll */ |
156 | kqueue_eventmax = 64; /* initial number of events receivable per poll */ |
162 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
157 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
… | |
… | |
166 | kqueue_changecnt = 0; |
161 | kqueue_changecnt = 0; |
167 | |
162 | |
168 | return EVBACKEND_KQUEUE; |
163 | return EVBACKEND_KQUEUE; |
169 | } |
164 | } |
170 | |
165 | |
171 | static void |
166 | void inline_size |
172 | kqueue_destroy (EV_P) |
167 | kqueue_destroy (EV_P) |
173 | { |
168 | { |
174 | close (backend_fd); |
|
|
175 | |
|
|
176 | ev_free (kqueue_events); |
169 | ev_free (kqueue_events); |
177 | ev_free (kqueue_changes); |
170 | ev_free (kqueue_changes); |
178 | } |
171 | } |
179 | |
172 | |
180 | static void |
173 | void inline_size |
181 | kqueue_fork (EV_P) |
174 | kqueue_fork (EV_P) |
182 | { |
175 | { |
183 | close (backend_fd); |
176 | close (backend_fd); |
184 | |
177 | |
185 | while ((backend_fd = kqueue ()) < 0) |
178 | while ((backend_fd = kqueue ()) < 0) |