ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev_kqueue.c
(Generate patch)

Comparing libev/ev_kqueue.c (file contents):
Revision 1.1 by root, Fri Nov 2 20:59:15 2007 UTC vs.
Revision 1.22 by root, Fri Nov 23 05:28:17 2007 UTC

1
2/* 1/*
3 * Copyright 2007 Marc Alexander Lehmann <libev@schmorp.de> 2 * Copyright 2007 Marc Alexander Lehmann <libev@schmorp.de>
4 * Copyright 2000-2002 Niels Provos <provos@citi.umich.edu> 3 * Copyright 2000-2002 Niels Provos <provos@citi.umich.edu>
5 * All rights reserved. 4 * All rights reserved.
6 * 5 *
33#include <sys/queue.h> 32#include <sys/queue.h>
34#include <sys/event.h> 33#include <sys/event.h>
35#include <string.h> 34#include <string.h>
36#include <errno.h> 35#include <errno.h>
37 36
38static int kq_fd;
39static struct kevent *kq_changes;
40static int kq_changemax, kq_changecnt;
41static struct kevent *kq_events;
42static int kq_eventmax;
43
44static void 37static void
45kqueue_change (int fd, int filter, int flags, int fflags) 38kqueue_change (EV_P_ int fd, int filter, int flags, int fflags)
46{ 39{
47 struct kevent *ke; 40 struct kevent *ke;
48 41
42 ++kqueue_changecnt;
49 array_needsize (kq_changes, kq_changemax, ++kq_changecnt, ); 43 array_needsize (struct kevent, kqueue_changes, kqueue_changemax, kqueue_changecnt, EMPTY2);
50 44
51 ke = &kq_changes [kq_changecnt - 1]; 45 ke = &kqueue_changes [kqueue_changecnt - 1];
52 memset (ke, 0, sizeof (struct kevent)); 46 memset (ke, 0, sizeof (struct kevent));
53 ke->ident = fd; 47 ke->ident = fd;
54 ke->filter = filter; 48 ke->filter = filter;
55 ke->flags = flags; 49 ke->flags = flags;
56 ke->fflags = fflags; 50 ke->fflags = fflags;
57} 51}
58 52
53#ifndef NOTE_EOF
54# define NOTE_EOF 0
55#endif
56
59static void 57static void
60kqueue_modify (int fd, int oev, int nev) 58kqueue_modify (EV_P_ int fd, int oev, int nev)
61{ 59{
62 if ((oev ^ new) & EV_READ) 60 if (oev != nev)
63 { 61 {
64 if (nev & EV_READ) 62 if (oev & EV_READ)
65 kqueue_change (fd, EVFILT_READ, EV_ADD, NOTE_EOF);
66 else
67 kqueue_change (fd, EVFILT_READ, EV_DELETE, 0); 63 kqueue_change (EV_A_ fd, EVFILT_READ , EV_DELETE, 0);
68 }
69 64
70 if ((oev ^ new) & EV_WRITE)
71 {
72 if (nev & EV_WRITE) 65 if (oev & EV_WRITE)
73 kqueue_change (fd, EVFILT_WRITE, EV_ADD, NOTE_EOF);
74 else
75 kqueue_change (fd, EVFILT_WRITE, EV_DELETE, 0); 66 kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_DELETE, 0);
76 } 67 }
77}
78 68
69 /* to detect close/reopen reliably, we have to re-add */
70 /* event requests even when oev == nev */
71
72 if (nev & EV_READ)
73 kqueue_change (EV_A_ fd, EVFILT_READ , EV_ADD, NOTE_EOF);
74
75 if (nev & EV_WRITE)
76 kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_ADD, NOTE_EOF);
77}
78
79static void 79static void
80kqueue_poll (ev_tstamp timeout) 80kqueue_poll (EV_P_ ev_tstamp timeout)
81{ 81{
82 int res, i; 82 int res, i;
83 struct timespec ts; 83 struct timespec ts;
84 84
85 /* need to resize so there is enough space for errors */
86 if (kqueue_changecnt > kqueue_eventmax)
87 {
88 ev_free (kqueue_events);
89 kqueue_eventmax = array_roundsize (struct kevent, kqueue_changecnt);
90 kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax);
91 }
92
85 ts.tv_sec = (time_t)timeout; 93 ts.tv_sec = (time_t)timeout;
86 ts.tv_nsec = (long)(timeout - (ev_tstamp)ts.tv_sec) * 1e9; 94 ts.tv_nsec = (long)((timeout - (ev_tstamp)ts.tv_sec) * 1e9);
87 res = kevent (kq_fd, kq_changes, kq_changecnt, kq_events, kq_eventmax, &ts); 95 res = kevent (kqueue_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts);
88 kq_changecnt = 0; 96 kqueue_changecnt = 0;
89 97
90 if (res < 0) 98 if (res < 0)
99 {
100 if (errno != EINTR)
101 syserr ("(libev) kevent");
102
91 return; 103 return;
104 }
92 105
93 for (i = 0; i < res; ++i) 106 for (i = 0; i < res; ++i)
94 { 107 {
108 int fd = kqueue_events [i].ident;
109
95 if (kq_events [i].flags & EV_ERROR) 110 if (kqueue_events [i].flags & EV_ERROR)
96 { 111 {
112 int err = kqueue_events [i].data;
113
97 /* 114 /*
98 * Error messages that can happen, when a delete fails. 115 * errors that may happen
99 * EBADF happens when the file discriptor has been 116 * EBADF happens when the file discriptor has been
100 * closed, 117 * closed,
101 * ENOENT when the file discriptor was closed and 118 * ENOENT when the file descriptor was closed and
102 * then reopened. 119 * then reopened.
103 * EINVAL for some reasons not understood; EINVAL 120 * EINVAL for some reasons not understood; EINVAL
104 * should not be returned ever; but FreeBSD does :-\ 121 * should not be returned ever; but FreeBSD does :-\
105 * An error is also indicated when a callback deletes
106 * an event we are still processing. In that case
107 * the data field is set to ENOENT.
108 */ 122 */
109 if (events [i].data == EBADF) 123
110 fd_kill (events [i].ident); 124 /* we are only interested in errors for fds that we are interested in :) */
125 if (anfds [fd].events)
126 {
127 if (err == ENOENT) /* resubmit changes on ENOENT */
128 kqueue_modify (EV_A_ fd, 0, anfds [fd].events);
129 else if (err == EBADF) /* on EBADF, we re-check the fd */
130 {
131 if (fd_valid (fd))
132 kqueue_modify (EV_A_ fd, 0, anfds [fd].events);
133 else
134 fd_kill (EV_A_ fd);
135 }
136 else /* on all other errors, we error out on the fd */
137 fd_kill (EV_A_ fd);
138 }
111 } 139 }
112 else 140 else
113 event ( 141 fd_event (
114 events [i].ident, 142 EV_A_
143 fd,
115 events [i].filter == EVFILT_READ ? EV_READ 144 kqueue_events [i].filter == EVFILT_READ ? EV_READ
116 : events [i].filter == EVFILT_WRITE ? EV_WRITE 145 : kqueue_events [i].filter == EVFILT_WRITE ? EV_WRITE
117 : 0 146 : 0
118 ); 147 );
119 } 148 }
120 149
121 if (expect_false (res == kq_eventmax)) 150 if (expect_false (res == kqueue_eventmax))
122 { 151 {
123 free (kq_events); 152 ev_free (kqueue_events);
124 kq_eventmax = array_roundsize (kq_events, kq_eventmax << 1); 153 kqueue_eventmax = array_roundsize (struct kevent, kqueue_eventmax << 1);
125 kq_events = malloc (sizeof (struct kevent) * kq_eventmax); 154 kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax);
126 } 155 }
127} 156}
128 157
129static void 158static int
130kqueue_init (struct event_base *base) 159kqueue_init (EV_P_ int flags)
131{ 160{
161 struct kevent ch, ev;
162
132 /* Initalize the kernel queue */ 163 /* Initalize the kernel queue */
133 if ((kq_fd = kqueue ()) == -1) 164 if ((kqueue_fd = kqueue ()) < 0)
134 {
135 free (kqueueop);
136 return;
137 }
138
139 /* Initalize fields */
140 kq_changes = malloc (NEVENT * sizeof (struct kevent));
141 if (!kq_changes)
142 return; 165 return 0;
143 166
144 events = malloc (NEVENT * sizeof (struct kevent)); 167 fcntl (kqueue_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */
145 if (!events)
146 {
147 free (kq_changes);
148 return;
149 }
150 168
151 /* Check for Mac OS X kqueue bug. */ 169 /* Check for Mac OS X kqueue bug. */
152 kq_changes [0].ident = -1; 170 ch.ident = -1;
153 kq_changes [0].filter = EVFILT_READ; 171 ch.filter = EVFILT_READ;
154 kq_changes [0].flags = EV_ADD; 172 ch.flags = EV_ADD;
173
155 /* 174 /*
156 * If kqueue works, then kevent will succeed, and it will 175 * If kqueue works, then kevent will succeed, and it will
157 * stick an error in events[0]. If kqueue is broken, then 176 * stick an error in ev. If kqueue is broken, then
158 * kevent will fail. 177 * kevent will fail.
159 */ 178 */
160 if (kevent (kq_fd, kq_changes, 1, kq_events, NEVENT, NULL) != 1 179 if (kevent (kqueue_fd, &ch, 1, &ev, 1, 0) != 1
161 || kq_events[0].ident != -1 180 || ev.ident != -1
162 || kq_events[0].flags != EV_ERROR) 181 || ev.flags != EV_ERROR)
163 { 182 {
164 /* detected broken kqueue */ 183 /* detected broken kqueue */
165 free (kq_changes);
166 free (kq_events);
167 close (kq_fd); 184 close (kqueue_fd);
168 return; 185 return 0;
169 } 186 }
170 187
171 ev_method = EVMETHOD_KQUEUE;
172 method_fudge = 1e-3; /* needed to compensate for kevent returning early */ 188 backend_fudge = 1e-3; /* needed to compensate for kevent returning early */
173 method_modify = kq_modify; 189 backend_modify = kqueue_modify;
174 method_poll = kq_poll; 190 backend_poll = kqueue_poll;
175 191
176 kq_eventmax = 64; /* intiial number of events receivable per poll */ 192 kqueue_eventmax = 64; /* intiial number of events receivable per poll */
177 kq_events = malloc (sizeof (struct kevent) * kq_eventmax); 193 kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax);
178}
179 194
195 kqueue_changes = 0;
196 kqueue_changemax = 0;
197 kqueue_changecnt = 0;
198
199 return EVBACKEND_KQUEUE;
200}
201
202static void
203kqueue_destroy (EV_P)
204{
205 close (kqueue_fd);
206
207 ev_free (kqueue_events);
208 ev_free (kqueue_changes);
209}
210
211static void
212kqueue_fork (EV_P)
213{
214 close (kqueue_fd);
215
216 while ((kqueue_fd = kqueue ()) < 0)
217 syserr ("(libev) kqueue");
218
219 fcntl (kqueue_fd, F_SETFD, FD_CLOEXEC);
220
221 /* re-register interest in fds */
222 fd_rearm_all (EV_A);
223}
224

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines