… | |
… | |
37 | static void |
37 | static void |
38 | kqueue_change (EV_P_ int fd, int filter, int flags, int fflags) |
38 | kqueue_change (EV_P_ int fd, int filter, int flags, int fflags) |
39 | { |
39 | { |
40 | struct kevent *ke; |
40 | struct kevent *ke; |
41 | |
41 | |
|
|
42 | ++kqueue_changecnt; |
42 | array_needsize (struct kevent, kqueue_changes, kqueue_changemax, ++kqueue_changecnt, ); |
43 | array_needsize (struct kevent, kqueue_changes, kqueue_changemax, kqueue_changecnt, EMPTY2); |
43 | |
44 | |
44 | ke = &kqueue_changes [kqueue_changecnt - 1]; |
45 | EV_SET (&kqueue_changes [kqueue_changecnt - 1], fd, filter, flags, fflags, 0, 0); |
45 | memset (ke, 0, sizeof (struct kevent)); |
|
|
46 | ke->ident = fd; |
|
|
47 | ke->filter = filter; |
|
|
48 | ke->flags = flags; |
|
|
49 | ke->fflags = fflags; |
|
|
50 | } |
46 | } |
51 | |
47 | |
52 | #ifndef NOTE_EOF |
48 | #ifndef NOTE_EOF |
53 | # define NOTE_EOF 0 |
49 | # define NOTE_EOF 0 |
54 | #endif |
50 | #endif |
55 | |
51 | |
56 | static void |
52 | static void |
57 | kqueue_modify (EV_P_ int fd, int oev, int nev) |
53 | kqueue_modify (EV_P_ int fd, int oev, int nev) |
58 | { |
54 | { |
|
|
55 | if (oev != nev) |
|
|
56 | { |
|
|
57 | if (oev & EV_READ) |
|
|
58 | kqueue_change (EV_A_ fd, EVFILT_READ , EV_DELETE, 0); |
|
|
59 | |
|
|
60 | if (oev & EV_WRITE) |
|
|
61 | kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_DELETE, 0); |
|
|
62 | } |
|
|
63 | |
59 | /* to detect close/reopen reliably, we have to remove and re-add */ |
64 | /* to detect close/reopen reliably, we have to re-add */ |
60 | /* event requests even when oev == nev */ |
65 | /* event requests even when oev == nev */ |
61 | |
66 | |
62 | if (oev & EV_READ) |
|
|
63 | kqueue_change (EV_A_ fd, EVFILT_READ, EV_DELETE, 0); |
|
|
64 | |
|
|
65 | if (oev & EV_WRITE) |
|
|
66 | kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_DELETE, 0); |
|
|
67 | |
|
|
68 | if (nev & EV_READ) |
67 | if (nev & EV_READ) |
69 | kqueue_change (EV_A_ fd, EVFILT_READ, EV_ADD, NOTE_EOF); |
68 | kqueue_change (EV_A_ fd, EVFILT_READ , EV_ADD, NOTE_EOF); |
70 | |
69 | |
71 | if (nev & EV_WRITE) |
70 | if (nev & EV_WRITE) |
72 | kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_ADD, NOTE_EOF); |
71 | kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_ADD, NOTE_EOF); |
73 | } |
72 | } |
74 | |
73 | |
… | |
… | |
81 | /* need to resize so there is enough space for errors */ |
80 | /* need to resize so there is enough space for errors */ |
82 | if (kqueue_changecnt > kqueue_eventmax) |
81 | if (kqueue_changecnt > kqueue_eventmax) |
83 | { |
82 | { |
84 | ev_free (kqueue_events); |
83 | ev_free (kqueue_events); |
85 | kqueue_eventmax = array_roundsize (struct kevent, kqueue_changecnt); |
84 | kqueue_eventmax = array_roundsize (struct kevent, kqueue_changecnt); |
86 | kqueue_events = ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
85 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
87 | } |
86 | } |
88 | |
87 | |
89 | ts.tv_sec = (time_t)timeout; |
88 | ts.tv_sec = (time_t)timeout; |
90 | ts.tv_nsec = (long)(timeout - (ev_tstamp)ts.tv_sec) * 1e9; |
89 | ts.tv_nsec = (long)((timeout - (ev_tstamp)ts.tv_sec) * 1e9); |
91 | res = kevent (kqueue_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts); |
90 | res = kevent (kqueue_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts); |
92 | kqueue_changecnt = 0; |
91 | kqueue_changecnt = 0; |
93 | |
92 | |
94 | if (res < 0) |
93 | if (res < 0) |
95 | { |
94 | { |
… | |
… | |
145 | |
144 | |
146 | if (expect_false (res == kqueue_eventmax)) |
145 | if (expect_false (res == kqueue_eventmax)) |
147 | { |
146 | { |
148 | ev_free (kqueue_events); |
147 | ev_free (kqueue_events); |
149 | kqueue_eventmax = array_roundsize (struct kevent, kqueue_eventmax << 1); |
148 | kqueue_eventmax = array_roundsize (struct kevent, kqueue_eventmax << 1); |
150 | kqueue_events = ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
149 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
151 | } |
150 | } |
152 | } |
151 | } |
153 | |
152 | |
154 | static int |
153 | static int |
155 | kqueue_init (EV_P_ int flags) |
154 | kqueue_init (EV_P_ int flags) |
… | |
… | |
179 | /* detected broken kqueue */ |
178 | /* detected broken kqueue */ |
180 | close (kqueue_fd); |
179 | close (kqueue_fd); |
181 | return 0; |
180 | return 0; |
182 | } |
181 | } |
183 | |
182 | |
184 | method_fudge = 1e-3; /* needed to compensate for kevent returning early */ |
183 | backend_fudge = 1e-3; /* needed to compensate for kevent returning early */ |
185 | method_modify = kqueue_modify; |
184 | backend_modify = kqueue_modify; |
186 | method_poll = kqueue_poll; |
185 | backend_poll = kqueue_poll; |
187 | |
186 | |
188 | kqueue_eventmax = 64; /* intiial number of events receivable per poll */ |
187 | kqueue_eventmax = 64; /* intiial number of events receivable per poll */ |
189 | kqueue_events = ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
188 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
190 | |
189 | |
191 | kqueue_changes = 0; |
190 | kqueue_changes = 0; |
192 | kqueue_changemax = 0; |
191 | kqueue_changemax = 0; |
193 | kqueue_changecnt = 0; |
192 | kqueue_changecnt = 0; |
194 | |
193 | |
195 | return EVMETHOD_KQUEUE; |
194 | return EVBACKEND_KQUEUE; |
196 | } |
195 | } |
197 | |
196 | |
198 | static void |
197 | static void |
199 | kqueue_destroy (EV_P) |
198 | kqueue_destroy (EV_P) |
200 | { |
199 | { |