1 | /* |
1 | /* |
2 | * libev kqueue backend |
2 | * libev kqueue backend |
3 | * |
3 | * |
4 | * Copyright (c) 2007 Marc Alexander Lehmann <libev@schmorp.de> |
4 | * Copyright (c) 2007,2008,2009,2010 Marc Alexander Lehmann <libev@schmorp.de> |
5 | * All rights reserved. |
5 | * All rights reserved. |
6 | * |
6 | * |
7 | * Redistribution and use in source and binary forms, with or without modifica- |
7 | * Redistribution and use in source and binary forms, with or without modifica- |
8 | * tion, are permitted provided that the following conditions are met: |
8 | * tion, are permitted provided that the following conditions are met: |
9 | * |
9 | * |
… | |
… | |
37 | * either the BSD or the GPL. |
37 | * either the BSD or the GPL. |
38 | */ |
38 | */ |
39 | |
39 | |
40 | #include <sys/types.h> |
40 | #include <sys/types.h> |
41 | #include <sys/time.h> |
41 | #include <sys/time.h> |
42 | #include <sys/queue.h> |
|
|
43 | #include <sys/event.h> |
42 | #include <sys/event.h> |
44 | #include <string.h> |
43 | #include <string.h> |
45 | #include <errno.h> |
44 | #include <errno.h> |
46 | |
45 | |
47 | void inline_speed |
46 | void inline_speed |
… | |
… | |
51 | array_needsize (struct kevent, kqueue_changes, kqueue_changemax, kqueue_changecnt, EMPTY2); |
50 | array_needsize (struct kevent, kqueue_changes, kqueue_changemax, kqueue_changecnt, EMPTY2); |
52 | |
51 | |
53 | EV_SET (&kqueue_changes [kqueue_changecnt - 1], fd, filter, flags, fflags, 0, 0); |
52 | EV_SET (&kqueue_changes [kqueue_changecnt - 1], fd, filter, flags, fflags, 0, 0); |
54 | } |
53 | } |
55 | |
54 | |
|
|
55 | /* OS X at least needs this */ |
|
|
56 | #ifndef EV_ENABLE |
|
|
57 | # define EV_ENABLE 0 |
|
|
58 | #endif |
56 | #ifndef NOTE_EOF |
59 | #ifndef NOTE_EOF |
57 | # define NOTE_EOF 0 |
60 | # define NOTE_EOF 0 |
58 | #endif |
61 | #endif |
59 | |
62 | |
60 | static void |
63 | static void |
… | |
… | |
71 | |
74 | |
72 | /* to detect close/reopen reliably, we have to re-add */ |
75 | /* to detect close/reopen reliably, we have to re-add */ |
73 | /* event requests even when oev == nev */ |
76 | /* event requests even when oev == nev */ |
74 | |
77 | |
75 | if (nev & EV_READ) |
78 | if (nev & EV_READ) |
76 | kqueue_change (EV_A_ fd, EVFILT_READ , EV_ADD, NOTE_EOF); |
79 | kqueue_change (EV_A_ fd, EVFILT_READ , EV_ADD | EV_ENABLE, NOTE_EOF); |
77 | |
80 | |
78 | if (nev & EV_WRITE) |
81 | if (nev & EV_WRITE) |
79 | kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_ADD, NOTE_EOF); |
82 | kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_ADD | EV_ENABLE, NOTE_EOF); |
80 | } |
83 | } |
81 | |
84 | |
82 | static void |
85 | static void |
83 | kqueue_poll (EV_P_ ev_tstamp timeout) |
86 | kqueue_poll (EV_P_ ev_tstamp timeout) |
84 | { |
87 | { |
… | |
… | |
91 | ev_free (kqueue_events); |
94 | ev_free (kqueue_events); |
92 | kqueue_eventmax = array_nextsize (sizeof (struct kevent), kqueue_eventmax, kqueue_changecnt); |
95 | kqueue_eventmax = array_nextsize (sizeof (struct kevent), kqueue_eventmax, kqueue_changecnt); |
93 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
96 | kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax); |
94 | } |
97 | } |
95 | |
98 | |
|
|
99 | EV_RELEASE_CB; |
96 | ts.tv_sec = (time_t)timeout; |
100 | ts.tv_sec = (time_t)timeout; |
97 | ts.tv_nsec = (long)((timeout - (ev_tstamp)ts.tv_sec) * 1e9); |
101 | ts.tv_nsec = (long)((timeout - (ev_tstamp)ts.tv_sec) * 1e9); |
98 | res = kevent (backend_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts); |
102 | res = kevent (backend_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts); |
|
|
103 | EV_ACQUIRE_CB; |
99 | kqueue_changecnt = 0; |
104 | kqueue_changecnt = 0; |
100 | |
105 | |
101 | if (expect_false (res < 0)) |
106 | if (expect_false (res < 0)) |
102 | { |
107 | { |
103 | if (errno != EINTR) |
108 | if (errno != EINTR) |
104 | syserr ("(libev) kevent"); |
109 | ev_syserr ("(libev) kevent"); |
105 | |
110 | |
106 | return; |
111 | return; |
107 | } |
112 | } |
108 | |
113 | |
109 | for (i = 0; i < res; ++i) |
114 | for (i = 0; i < res; ++i) |
110 | { |
115 | { |
111 | int fd = kqueue_events [i].ident; |
116 | int fd = kqueue_events [i].ident; |
112 | |
117 | |
113 | if (expect_false (kqueue_events [i].flags & EV_ERROR)) |
118 | if (expect_false (kqueue_events [i].flags & EV_ERROR)) |
114 | { |
119 | { |
115 | int err = kqueue_events [i].data; |
120 | int err = kqueue_events [i].data; |
116 | |
121 | |
117 | /* we are only interested in errors for fds that we are interested in :) */ |
122 | /* we are only interested in errors for fds that we are interested in :) */ |
118 | if (anfds [fd].events) |
123 | if (anfds [fd].events) |
119 | { |
124 | { |
120 | if (err == ENOENT) /* resubmit changes on ENOENT */ |
125 | if (err == ENOENT) /* resubmit changes on ENOENT */ |
121 | kqueue_modify (EV_A_ fd, 0, anfds [fd].events); |
126 | kqueue_modify (EV_A_ fd, 0, anfds [fd].events); |
122 | else if (err == EBADF) /* on EBADF, we re-check the fd */ |
127 | else if (err == EBADF) /* on EBADF, we re-check the fd */ |
123 | { |
128 | { |
124 | if (fd_valid (fd)) |
129 | if (fd_valid (fd)) |
… | |
… | |
126 | else |
131 | else |
127 | fd_kill (EV_A_ fd); |
132 | fd_kill (EV_A_ fd); |
128 | } |
133 | } |
129 | else /* on all other errors, we error out on the fd */ |
134 | else /* on all other errors, we error out on the fd */ |
130 | fd_kill (EV_A_ fd); |
135 | fd_kill (EV_A_ fd); |
131 | } |
136 | } |
132 | } |
137 | } |
133 | else |
138 | else |
134 | fd_event ( |
139 | fd_event ( |
135 | EV_A_ |
140 | EV_A_ |
136 | fd, |
141 | fd, |
… | |
… | |
149 | } |
154 | } |
150 | |
155 | |
151 | int inline_size |
156 | int inline_size |
152 | kqueue_init (EV_P_ int flags) |
157 | kqueue_init (EV_P_ int flags) |
153 | { |
158 | { |
154 | /* Initalize the kernel queue */ |
159 | /* Initialize the kernel queue */ |
155 | if ((backend_fd = kqueue ()) < 0) |
160 | if ((backend_fd = kqueue ()) < 0) |
156 | return 0; |
161 | return 0; |
157 | |
162 | |
158 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */ |
163 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */ |
159 | |
164 | |
… | |
… | |
182 | kqueue_fork (EV_P) |
187 | kqueue_fork (EV_P) |
183 | { |
188 | { |
184 | close (backend_fd); |
189 | close (backend_fd); |
185 | |
190 | |
186 | while ((backend_fd = kqueue ()) < 0) |
191 | while ((backend_fd = kqueue ()) < 0) |
187 | syserr ("(libev) kqueue"); |
192 | ev_syserr ("(libev) kqueue"); |
188 | |
193 | |
189 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); |
194 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); |
190 | |
195 | |
191 | /* re-register interest in fds */ |
196 | /* re-register interest in fds */ |
192 | fd_rearm_all (EV_A); |
197 | fd_rearm_all (EV_A); |