… | |
… | |
32 | #include <sys/queue.h> |
32 | #include <sys/queue.h> |
33 | #include <sys/event.h> |
33 | #include <sys/event.h> |
34 | #include <string.h> |
34 | #include <string.h> |
35 | #include <errno.h> |
35 | #include <errno.h> |
36 | |
36 | |
37 | static int kq_fd; |
|
|
38 | static struct kevent *kq_changes; |
|
|
39 | static int kq_changemax, kq_changecnt; |
|
|
40 | static struct kevent *kq_events; |
|
|
41 | static int kq_eventmax; |
|
|
42 | |
|
|
43 | static void |
37 | static void |
44 | kqueue_change (int fd, int filter, int flags, int fflags) |
38 | kqueue_change (EV_P_ int fd, int filter, int flags, int fflags) |
45 | { |
39 | { |
46 | struct kevent *ke; |
40 | struct kevent *ke; |
47 | |
41 | |
48 | array_needsize (kq_changes, kq_changemax, ++kq_changecnt, ); |
42 | array_needsize (kqueue_changes, kqueue_changemax, ++kqueue_changecnt, ); |
49 | |
43 | |
50 | ke = &kq_changes [kq_changecnt - 1]; |
44 | ke = &kqueue_changes [kqueue_changecnt - 1]; |
51 | memset (ke, 0, sizeof (struct kevent)); |
45 | memset (ke, 0, sizeof (struct kevent)); |
52 | ke->ident = fd; |
46 | ke->ident = fd; |
53 | ke->filter = filter; |
47 | ke->filter = filter; |
54 | ke->flags = flags; |
48 | ke->flags = flags; |
55 | ke->fflags = fflags; |
49 | ke->fflags = fflags; |
… | |
… | |
58 | #ifndef NOTE_EOF |
52 | #ifndef NOTE_EOF |
59 | # define NOTE_EOF 0 |
53 | # define NOTE_EOF 0 |
60 | #endif |
54 | #endif |
61 | |
55 | |
62 | static void |
56 | static void |
63 | kqueue_modify (int fd, int oev, int nev) |
57 | kqueue_modify (EV_P_ int fd, int oev, int nev) |
64 | { |
58 | { |
65 | if ((oev ^ nev) & EV_READ) |
59 | if ((oev ^ nev) & EV_READ) |
66 | { |
60 | { |
67 | if (nev & EV_READ) |
61 | if (nev & EV_READ) |
68 | kqueue_change (fd, EVFILT_READ, EV_ADD, NOTE_EOF); |
62 | kqueue_change (fd, EVFILT_READ, EV_ADD, NOTE_EOF); |
… | |
… | |
78 | kqueue_change (fd, EVFILT_WRITE, EV_DELETE, 0); |
72 | kqueue_change (fd, EVFILT_WRITE, EV_DELETE, 0); |
79 | } |
73 | } |
80 | } |
74 | } |
81 | |
75 | |
82 | static void |
76 | static void |
83 | kqueue_poll (ev_tstamp timeout) |
77 | kqueue_poll (EV_P_ ev_tstamp timeout) |
84 | { |
78 | { |
85 | int res, i; |
79 | int res, i; |
86 | struct timespec ts; |
80 | struct timespec ts; |
87 | |
81 | |
88 | ts.tv_sec = (time_t)timeout; |
82 | ts.tv_sec = (time_t)timeout; |
89 | ts.tv_nsec = (long)(timeout - (ev_tstamp)ts.tv_sec) * 1e9; |
83 | ts.tv_nsec = (long)(timeout - (ev_tstamp)ts.tv_sec) * 1e9; |
90 | res = kevent (kq_fd, kq_changes, kq_changecnt, kq_events, kq_eventmax, &ts); |
84 | res = kevent (kqueue_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts); |
91 | kq_changecnt = 0; |
85 | kqueue_changecnt = 0; |
92 | |
86 | |
93 | if (res < 0) |
87 | if (res < 0) |
|
|
88 | { |
|
|
89 | if (errno != EINTR) |
|
|
90 | syserr (); |
|
|
91 | |
94 | return; |
92 | return; |
|
|
93 | } |
95 | |
94 | |
96 | for (i = 0; i < res; ++i) |
95 | for (i = 0; i < res; ++i) |
97 | { |
96 | { |
98 | if (kq_events [i].flags & EV_ERROR) |
97 | if (kqueue_events [i].flags & EV_ERROR) |
99 | { |
98 | { |
100 | /* |
99 | /* |
101 | * Error messages that can happen, when a delete fails. |
100 | * Error messages that can happen, when a delete fails. |
102 | * EBADF happens when the file discriptor has been |
101 | * EBADF happens when the file discriptor has been |
103 | * closed, |
102 | * closed, |
… | |
… | |
107 | * should not be returned ever; but FreeBSD does :-\ |
106 | * should not be returned ever; but FreeBSD does :-\ |
108 | * An error is also indicated when a callback deletes |
107 | * An error is also indicated when a callback deletes |
109 | * an event we are still processing. In that case |
108 | * an event we are still processing. In that case |
110 | * the data field is set to ENOENT. |
109 | * the data field is set to ENOENT. |
111 | */ |
110 | */ |
112 | if (kq_events [i].data == EBADF) |
111 | if (kqueue_events [i].data == EBADF) |
113 | fd_kill (kq_events [i].ident); |
112 | fd_kill (EV_A_ kqueue_events [i].ident); |
114 | } |
113 | } |
115 | else |
114 | else |
116 | fd_event ( |
115 | fd_event ( |
|
|
116 | EV_A_ |
117 | kq_events [i].ident, |
117 | kqueue_events [i].ident, |
118 | kq_events [i].filter == EVFILT_READ ? EV_READ |
118 | kqueue_events [i].filter == EVFILT_READ ? EV_READ |
119 | : kq_events [i].filter == EVFILT_WRITE ? EV_WRITE |
119 | : kqueue_events [i].filter == EVFILT_WRITE ? EV_WRITE |
120 | : 0 |
120 | : 0 |
121 | ); |
121 | ); |
122 | } |
122 | } |
123 | |
123 | |
124 | if (expect_false (res == kq_eventmax)) |
124 | if (expect_false (res == kqueue_eventmax)) |
125 | { |
125 | { |
126 | free (kq_events); |
126 | ev_free (kqueue_events); |
127 | kq_eventmax = array_roundsize (kq_events, kq_eventmax << 1); |
127 | kqueue_eventmax = array_roundsize (kqueue_events, kqueue_eventmax << 1); |
128 | kq_events = malloc (sizeof (struct kevent) * kq_eventmax); |
128 | kqueue_events = ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
129 | } |
129 | } |
130 | } |
130 | } |
131 | |
131 | |
132 | static void |
132 | static int |
133 | kqueue_init (int flags) |
133 | kqueue_init (EV_P_ int flags) |
134 | { |
134 | { |
135 | struct kevent ch, ev; |
135 | struct kevent ch, ev; |
136 | |
136 | |
137 | /* Initalize the kernel queue */ |
137 | /* Initalize the kernel queue */ |
138 | if ((kq_fd = kqueue ()) < 0) |
138 | if ((kqueue_fd = kqueue ()) < 0) |
139 | return; |
139 | return 0; |
|
|
140 | |
|
|
141 | fcntl (kqueue_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */ |
140 | |
142 | |
141 | /* Check for Mac OS X kqueue bug. */ |
143 | /* Check for Mac OS X kqueue bug. */ |
142 | ch.ident = -1; |
144 | ch.ident = -1; |
143 | ch.filter = EVFILT_READ; |
145 | ch.filter = EVFILT_READ; |
144 | ch.flags = EV_ADD; |
146 | ch.flags = EV_ADD; |
… | |
… | |
146 | /* |
148 | /* |
147 | * If kqueue works, then kevent will succeed, and it will |
149 | * If kqueue works, then kevent will succeed, and it will |
148 | * stick an error in ev. If kqueue is broken, then |
150 | * stick an error in ev. If kqueue is broken, then |
149 | * kevent will fail. |
151 | * kevent will fail. |
150 | */ |
152 | */ |
151 | if (kevent (kq_fd, &ch, 1, &ev, 1, 0) != 1 |
153 | if (kevent (kqueue_fd, &ch, 1, &ev, 1, 0) != 1 |
152 | || ev.ident != -1 |
154 | || ev.ident != -1 |
153 | || ev.flags != EV_ERROR) |
155 | || ev.flags != EV_ERROR) |
154 | { |
156 | { |
155 | /* detected broken kqueue */ |
157 | /* detected broken kqueue */ |
156 | close (kq_fd); |
158 | close (kqueue_fd); |
157 | return; |
159 | return 0; |
158 | } |
160 | } |
159 | |
161 | |
160 | ev_method = EVMETHOD_KQUEUE; |
|
|
161 | method_fudge = 1e-3; /* needed to compensate for kevent returning early */ |
162 | method_fudge = 1e-3; /* needed to compensate for kevent returning early */ |
162 | method_modify = kqueue_modify; |
163 | method_modify = kqueue_modify; |
163 | method_poll = kqueue_poll; |
164 | method_poll = kqueue_poll; |
164 | |
165 | |
165 | kq_eventmax = 64; /* intiial number of events receivable per poll */ |
166 | kqueue_eventmax = 64; /* intiial number of events receivable per poll */ |
166 | kq_events = malloc (sizeof (struct kevent) * kq_eventmax); |
167 | kqueue_events = ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
|
|
168 | |
|
|
169 | kqueue_changes = 0; |
|
|
170 | kqueue_changemax = 0; |
|
|
171 | kqueue_changecnt = 0; |
|
|
172 | |
|
|
173 | return EVMETHOD_KQUEUE; |
167 | } |
174 | } |
168 | |
175 | |
|
|
176 | static void |
|
|
177 | kqueue_destroy (EV_P) |
|
|
178 | { |
|
|
179 | close (kqueue_fd); |
|
|
180 | |
|
|
181 | ev_free (kqueue_events); |
|
|
182 | ev_free (kqueue_changes); |
|
|
183 | } |
|
|
184 | |
|
|
185 | static void |
|
|
186 | kqueue_fork (EV_P) |
|
|
187 | { |
|
|
188 | kqueue_fd = kqueue (); |
|
|
189 | fcntl (kqueue_fd, F_SETFD, FD_CLOEXEC); |
|
|
190 | |
|
|
191 | /* re-register interest in fds */ |
|
|
192 | fd_rearm_all (EV_A); |
|
|
193 | } |
|
|
194 | |