… | |
… | |
131 | |
131 | |
132 | /* Initalize the kernel queue */ |
132 | /* Initalize the kernel queue */ |
133 | if ((kqueue_fd = kqueue ()) < 0) |
133 | if ((kqueue_fd = kqueue ()) < 0) |
134 | return 0; |
134 | return 0; |
135 | |
135 | |
|
|
136 | fcntl (kqueue_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */ |
|
|
137 | |
136 | /* Check for Mac OS X kqueue bug. */ |
138 | /* Check for Mac OS X kqueue bug. */ |
137 | ch.ident = -1; |
139 | ch.ident = -1; |
138 | ch.filter = EVFILT_READ; |
140 | ch.filter = EVFILT_READ; |
139 | ch.flags = EV_ADD; |
141 | ch.flags = EV_ADD; |
140 | |
142 | |
… | |
… | |
157 | method_poll = kqueue_poll; |
159 | method_poll = kqueue_poll; |
158 | |
160 | |
159 | kqueue_eventmax = 64; /* intiial number of events receivable per poll */ |
161 | kqueue_eventmax = 64; /* intiial number of events receivable per poll */ |
160 | kqueue_events = malloc (sizeof (struct kevent) * kqueue_eventmax); |
162 | kqueue_events = malloc (sizeof (struct kevent) * kqueue_eventmax); |
161 | |
163 | |
|
|
164 | kqueue_changes = 0; |
|
|
165 | kqueue_changemax = 0; |
|
|
166 | kqueue_changecnt = 0; |
|
|
167 | |
162 | return EVMETHOD_KQUEUE; |
168 | return EVMETHOD_KQUEUE; |
163 | } |
169 | } |
164 | |
170 | |
|
|
171 | static void |
|
|
172 | kqueue_destroy (EV_P) |
|
|
173 | { |
|
|
174 | close (kqueue_fd); |
|
|
175 | |
|
|
176 | free (kqueue_events); |
|
|
177 | free (kqueue_changes); |
|
|
178 | } |
|
|
179 | |
|
|
180 | static void |
|
|
181 | kqueue_fork (EV_P) |
|
|
182 | { |
|
|
183 | kqueue_fd = kqueue (); |
|
|
184 | fcntl (kqueue_fd, F_SETFD, FD_CLOEXEC); |
|
|
185 | |
|
|
186 | /* re-register interest in fds */ |
|
|
187 | fd_rearm_all (); |
|
|
188 | } |
|
|
189 | |