ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev_kqueue.c
(Generate patch)

Comparing libev/ev_kqueue.c (file contents):
Revision 1.1 by root, Fri Nov 2 20:59:15 2007 UTC vs.
Revision 1.23 by root, Fri Nov 23 05:29:16 2007 UTC

1
2/* 1/*
3 * Copyright 2007 Marc Alexander Lehmann <libev@schmorp.de> 2 * Copyright 2007 Marc Alexander Lehmann <libev@schmorp.de>
4 * Copyright 2000-2002 Niels Provos <provos@citi.umich.edu> 3 * Copyright 2000-2002 Niels Provos <provos@citi.umich.edu>
5 * All rights reserved. 4 * All rights reserved.
6 * 5 *
33#include <sys/queue.h> 32#include <sys/queue.h>
34#include <sys/event.h> 33#include <sys/event.h>
35#include <string.h> 34#include <string.h>
36#include <errno.h> 35#include <errno.h>
37 36
38static int kq_fd;
39static struct kevent *kq_changes;
40static int kq_changemax, kq_changecnt;
41static struct kevent *kq_events;
42static int kq_eventmax;
43
44static void 37static void
45kqueue_change (int fd, int filter, int flags, int fflags) 38kqueue_change (EV_P_ int fd, int filter, int flags, int fflags)
46{ 39{
47 struct kevent *ke; 40 struct kevent *ke;
48 41
42 ++kqueue_changecnt;
49 array_needsize (kq_changes, kq_changemax, ++kq_changecnt, ); 43 array_needsize (struct kevent, kqueue_changes, kqueue_changemax, kqueue_changecnt, EMPTY2);
50 44
51 ke = &kq_changes [kq_changecnt - 1]; 45 EV_SET (&kqueue_changes [kqueue_changecnt - 1], fd, filter, flags, fflags, 0, 0);
52 memset (ke, 0, sizeof (struct kevent));
53 ke->ident = fd;
54 ke->filter = filter;
55 ke->flags = flags;
56 ke->fflags = fflags;
57} 46}
58 47
48#ifndef NOTE_EOF
49# define NOTE_EOF 0
50#endif
51
59static void 52static void
60kqueue_modify (int fd, int oev, int nev) 53kqueue_modify (EV_P_ int fd, int oev, int nev)
61{ 54{
62 if ((oev ^ new) & EV_READ) 55 if (oev != nev)
63 { 56 {
64 if (nev & EV_READ) 57 if (oev & EV_READ)
65 kqueue_change (fd, EVFILT_READ, EV_ADD, NOTE_EOF);
66 else
67 kqueue_change (fd, EVFILT_READ, EV_DELETE, 0); 58 kqueue_change (EV_A_ fd, EVFILT_READ , EV_DELETE, 0);
68 }
69 59
70 if ((oev ^ new) & EV_WRITE)
71 {
72 if (nev & EV_WRITE) 60 if (oev & EV_WRITE)
73 kqueue_change (fd, EVFILT_WRITE, EV_ADD, NOTE_EOF);
74 else
75 kqueue_change (fd, EVFILT_WRITE, EV_DELETE, 0); 61 kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_DELETE, 0);
76 } 62 }
77}
78 63
64 /* to detect close/reopen reliably, we have to re-add */
65 /* event requests even when oev == nev */
66
67 if (nev & EV_READ)
68 kqueue_change (EV_A_ fd, EVFILT_READ , EV_ADD, NOTE_EOF);
69
70 if (nev & EV_WRITE)
71 kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_ADD, NOTE_EOF);
72}
73
79static void 74static void
80kqueue_poll (ev_tstamp timeout) 75kqueue_poll (EV_P_ ev_tstamp timeout)
81{ 76{
82 int res, i; 77 int res, i;
83 struct timespec ts; 78 struct timespec ts;
84 79
80 /* need to resize so there is enough space for errors */
81 if (kqueue_changecnt > kqueue_eventmax)
82 {
83 ev_free (kqueue_events);
84 kqueue_eventmax = array_roundsize (struct kevent, kqueue_changecnt);
85 kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax);
86 }
87
85 ts.tv_sec = (time_t)timeout; 88 ts.tv_sec = (time_t)timeout;
86 ts.tv_nsec = (long)(timeout - (ev_tstamp)ts.tv_sec) * 1e9; 89 ts.tv_nsec = (long)((timeout - (ev_tstamp)ts.tv_sec) * 1e9);
87 res = kevent (kq_fd, kq_changes, kq_changecnt, kq_events, kq_eventmax, &ts); 90 res = kevent (kqueue_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts);
88 kq_changecnt = 0; 91 kqueue_changecnt = 0;
89 92
90 if (res < 0) 93 if (res < 0)
94 {
95 if (errno != EINTR)
96 syserr ("(libev) kevent");
97
91 return; 98 return;
99 }
92 100
93 for (i = 0; i < res; ++i) 101 for (i = 0; i < res; ++i)
94 { 102 {
103 int fd = kqueue_events [i].ident;
104
95 if (kq_events [i].flags & EV_ERROR) 105 if (kqueue_events [i].flags & EV_ERROR)
96 { 106 {
107 int err = kqueue_events [i].data;
108
97 /* 109 /*
98 * Error messages that can happen, when a delete fails. 110 * errors that may happen
99 * EBADF happens when the file discriptor has been 111 * EBADF happens when the file discriptor has been
100 * closed, 112 * closed,
101 * ENOENT when the file discriptor was closed and 113 * ENOENT when the file descriptor was closed and
102 * then reopened. 114 * then reopened.
103 * EINVAL for some reasons not understood; EINVAL 115 * EINVAL for some reasons not understood; EINVAL
104 * should not be returned ever; but FreeBSD does :-\ 116 * should not be returned ever; but FreeBSD does :-\
105 * An error is also indicated when a callback deletes
106 * an event we are still processing. In that case
107 * the data field is set to ENOENT.
108 */ 117 */
109 if (events [i].data == EBADF) 118
110 fd_kill (events [i].ident); 119 /* we are only interested in errors for fds that we are interested in :) */
120 if (anfds [fd].events)
121 {
122 if (err == ENOENT) /* resubmit changes on ENOENT */
123 kqueue_modify (EV_A_ fd, 0, anfds [fd].events);
124 else if (err == EBADF) /* on EBADF, we re-check the fd */
125 {
126 if (fd_valid (fd))
127 kqueue_modify (EV_A_ fd, 0, anfds [fd].events);
128 else
129 fd_kill (EV_A_ fd);
130 }
131 else /* on all other errors, we error out on the fd */
132 fd_kill (EV_A_ fd);
133 }
111 } 134 }
112 else 135 else
113 event ( 136 fd_event (
114 events [i].ident, 137 EV_A_
138 fd,
115 events [i].filter == EVFILT_READ ? EV_READ 139 kqueue_events [i].filter == EVFILT_READ ? EV_READ
116 : events [i].filter == EVFILT_WRITE ? EV_WRITE 140 : kqueue_events [i].filter == EVFILT_WRITE ? EV_WRITE
117 : 0 141 : 0
118 ); 142 );
119 } 143 }
120 144
121 if (expect_false (res == kq_eventmax)) 145 if (expect_false (res == kqueue_eventmax))
122 { 146 {
123 free (kq_events); 147 ev_free (kqueue_events);
124 kq_eventmax = array_roundsize (kq_events, kq_eventmax << 1); 148 kqueue_eventmax = array_roundsize (struct kevent, kqueue_eventmax << 1);
125 kq_events = malloc (sizeof (struct kevent) * kq_eventmax); 149 kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax);
126 } 150 }
127} 151}
128 152
129static void 153static int
130kqueue_init (struct event_base *base) 154kqueue_init (EV_P_ int flags)
131{ 155{
156 struct kevent ch, ev;
157
132 /* Initalize the kernel queue */ 158 /* Initalize the kernel queue */
133 if ((kq_fd = kqueue ()) == -1) 159 if ((kqueue_fd = kqueue ()) < 0)
134 {
135 free (kqueueop);
136 return;
137 }
138
139 /* Initalize fields */
140 kq_changes = malloc (NEVENT * sizeof (struct kevent));
141 if (!kq_changes)
142 return; 160 return 0;
143 161
144 events = malloc (NEVENT * sizeof (struct kevent)); 162 fcntl (kqueue_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */
145 if (!events)
146 {
147 free (kq_changes);
148 return;
149 }
150 163
151 /* Check for Mac OS X kqueue bug. */ 164 /* Check for Mac OS X kqueue bug. */
152 kq_changes [0].ident = -1; 165 ch.ident = -1;
153 kq_changes [0].filter = EVFILT_READ; 166 ch.filter = EVFILT_READ;
154 kq_changes [0].flags = EV_ADD; 167 ch.flags = EV_ADD;
168
155 /* 169 /*
156 * If kqueue works, then kevent will succeed, and it will 170 * If kqueue works, then kevent will succeed, and it will
157 * stick an error in events[0]. If kqueue is broken, then 171 * stick an error in ev. If kqueue is broken, then
158 * kevent will fail. 172 * kevent will fail.
159 */ 173 */
160 if (kevent (kq_fd, kq_changes, 1, kq_events, NEVENT, NULL) != 1 174 if (kevent (kqueue_fd, &ch, 1, &ev, 1, 0) != 1
161 || kq_events[0].ident != -1 175 || ev.ident != -1
162 || kq_events[0].flags != EV_ERROR) 176 || ev.flags != EV_ERROR)
163 { 177 {
164 /* detected broken kqueue */ 178 /* detected broken kqueue */
165 free (kq_changes);
166 free (kq_events);
167 close (kq_fd); 179 close (kqueue_fd);
168 return; 180 return 0;
169 } 181 }
170 182
171 ev_method = EVMETHOD_KQUEUE;
172 method_fudge = 1e-3; /* needed to compensate for kevent returning early */ 183 backend_fudge = 1e-3; /* needed to compensate for kevent returning early */
173 method_modify = kq_modify; 184 backend_modify = kqueue_modify;
174 method_poll = kq_poll; 185 backend_poll = kqueue_poll;
175 186
176 kq_eventmax = 64; /* intiial number of events receivable per poll */ 187 kqueue_eventmax = 64; /* intiial number of events receivable per poll */
177 kq_events = malloc (sizeof (struct kevent) * kq_eventmax); 188 kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax);
178}
179 189
190 kqueue_changes = 0;
191 kqueue_changemax = 0;
192 kqueue_changecnt = 0;
193
194 return EVBACKEND_KQUEUE;
195}
196
197static void
198kqueue_destroy (EV_P)
199{
200 close (kqueue_fd);
201
202 ev_free (kqueue_events);
203 ev_free (kqueue_changes);
204}
205
206static void
207kqueue_fork (EV_P)
208{
209 close (kqueue_fd);
210
211 while ((kqueue_fd = kqueue ()) < 0)
212 syserr ("(libev) kqueue");
213
214 fcntl (kqueue_fd, F_SETFD, FD_CLOEXEC);
215
216 /* re-register interest in fds */
217 fd_rearm_all (EV_A);
218}
219

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines