ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev_kqueue.c
(Generate patch)

Comparing libev/ev_kqueue.c (file contents):
Revision 1.1 by root, Fri Nov 2 20:59:15 2007 UTC vs.
Revision 1.17 by root, Mon Nov 12 20:03:39 2007 UTC

1
2/* 1/*
3 * Copyright 2007 Marc Alexander Lehmann <libev@schmorp.de> 2 * Copyright 2007 Marc Alexander Lehmann <libev@schmorp.de>
4 * Copyright 2000-2002 Niels Provos <provos@citi.umich.edu> 3 * Copyright 2000-2002 Niels Provos <provos@citi.umich.edu>
5 * All rights reserved. 4 * All rights reserved.
6 * 5 *
33#include <sys/queue.h> 32#include <sys/queue.h>
34#include <sys/event.h> 33#include <sys/event.h>
35#include <string.h> 34#include <string.h>
36#include <errno.h> 35#include <errno.h>
37 36
38static int kq_fd;
39static struct kevent *kq_changes;
40static int kq_changemax, kq_changecnt;
41static struct kevent *kq_events;
42static int kq_eventmax;
43
44static void 37static void
45kqueue_change (int fd, int filter, int flags, int fflags) 38kqueue_change (EV_P_ int fd, int filter, int flags, int fflags)
46{ 39{
47 struct kevent *ke; 40 struct kevent *ke;
48 41
42 ++kqueue_changecnt;
49 array_needsize (kq_changes, kq_changemax, ++kq_changecnt, ); 43 array_needsize (struct kevent, kqueue_changes, kqueue_changemax, kqueue_changecnt, EMPTY2);
50 44
51 ke = &kq_changes [kq_changecnt - 1]; 45 ke = &kqueue_changes [kqueue_changecnt - 1];
52 memset (ke, 0, sizeof (struct kevent)); 46 memset (ke, 0, sizeof (struct kevent));
53 ke->ident = fd; 47 ke->ident = fd;
54 ke->filter = filter; 48 ke->filter = filter;
55 ke->flags = flags; 49 ke->flags = flags;
56 ke->fflags = fflags; 50 ke->fflags = fflags;
57} 51}
58 52
53#ifndef NOTE_EOF
54# define NOTE_EOF 0
55#endif
56
59static void 57static void
60kqueue_modify (int fd, int oev, int nev) 58kqueue_modify (EV_P_ int fd, int oev, int nev)
61{ 59{
60 /* to detect close/reopen reliably, we have to remove and re-add */
61 /* event requests even when oev == nev */
62
62 if ((oev ^ new) & EV_READ) 63 if (oev & EV_READ)
63 { 64 kqueue_change (EV_A_ fd, EVFILT_READ, EV_DELETE, 0);
65
66 if (oev & EV_WRITE)
67 kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_DELETE, 0);
68
64 if (nev & EV_READ) 69 if (nev & EV_READ)
65 kqueue_change (fd, EVFILT_READ, EV_ADD, NOTE_EOF); 70 kqueue_change (EV_A_ fd, EVFILT_READ, EV_ADD, NOTE_EOF);
66 else
67 kqueue_change (fd, EVFILT_READ, EV_DELETE, 0);
68 }
69 71
70 if ((oev ^ new) & EV_WRITE)
71 {
72 if (nev & EV_WRITE) 72 if (nev & EV_WRITE)
73 kqueue_change (fd, EVFILT_WRITE, EV_ADD, NOTE_EOF); 73 kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_ADD, NOTE_EOF);
74 else
75 kqueue_change (fd, EVFILT_WRITE, EV_DELETE, 0);
76 }
77} 74}
78 75
79static void 76static void
80kqueue_poll (ev_tstamp timeout) 77kqueue_poll (EV_P_ ev_tstamp timeout)
81{ 78{
82 int res, i; 79 int res, i;
83 struct timespec ts; 80 struct timespec ts;
84 81
82 /* need to resize so there is enough space for errors */
83 if (kqueue_changecnt > kqueue_eventmax)
84 {
85 ev_free (kqueue_events);
86 kqueue_eventmax = array_roundsize (struct kevent, kqueue_changecnt);
87 kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax);
88 }
89
85 ts.tv_sec = (time_t)timeout; 90 ts.tv_sec = (time_t)timeout;
86 ts.tv_nsec = (long)(timeout - (ev_tstamp)ts.tv_sec) * 1e9; 91 ts.tv_nsec = (long)(timeout - (ev_tstamp)ts.tv_sec) * 1e9;
87 res = kevent (kq_fd, kq_changes, kq_changecnt, kq_events, kq_eventmax, &ts); 92 res = kevent (kqueue_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts);
88 kq_changecnt = 0; 93 kqueue_changecnt = 0;
89 94
90 if (res < 0) 95 if (res < 0)
96 {
97 if (errno != EINTR)
98 syserr ("(libev) kevent");
99
91 return; 100 return;
101 }
92 102
93 for (i = 0; i < res; ++i) 103 for (i = 0; i < res; ++i)
94 { 104 {
105 int fd = kqueue_events [i].ident;
106
95 if (kq_events [i].flags & EV_ERROR) 107 if (kqueue_events [i].flags & EV_ERROR)
96 { 108 {
109 int err = kqueue_events [i].data;
110
97 /* 111 /*
98 * Error messages that can happen, when a delete fails. 112 * errors that may happen
99 * EBADF happens when the file discriptor has been 113 * EBADF happens when the file discriptor has been
100 * closed, 114 * closed,
101 * ENOENT when the file discriptor was closed and 115 * ENOENT when the file descriptor was closed and
102 * then reopened. 116 * then reopened.
103 * EINVAL for some reasons not understood; EINVAL 117 * EINVAL for some reasons not understood; EINVAL
104 * should not be returned ever; but FreeBSD does :-\ 118 * should not be returned ever; but FreeBSD does :-\
105 * An error is also indicated when a callback deletes
106 * an event we are still processing. In that case
107 * the data field is set to ENOENT.
108 */ 119 */
109 if (events [i].data == EBADF) 120
110 fd_kill (events [i].ident); 121 /* we are only interested in errors for fds that we are interested in :) */
122 if (anfds [fd].events)
123 {
124 if (err == ENOENT) /* resubmit changes on ENOENT */
125 kqueue_modify (EV_A_ fd, 0, anfds [fd].events);
126 else if (err == EBADF) /* on EBADF, we re-check the fd */
127 {
128 if (fd_valid (fd))
129 kqueue_modify (EV_A_ fd, 0, anfds [fd].events);
130 else
131 fd_kill (EV_A_ fd);
132 }
133 else /* on all other errors, we error out on the fd */
134 fd_kill (EV_A_ fd);
135 }
111 } 136 }
112 else 137 else
113 event ( 138 fd_event (
114 events [i].ident, 139 EV_A_
140 fd,
115 events [i].filter == EVFILT_READ ? EV_READ 141 kqueue_events [i].filter == EVFILT_READ ? EV_READ
116 : events [i].filter == EVFILT_WRITE ? EV_WRITE 142 : kqueue_events [i].filter == EVFILT_WRITE ? EV_WRITE
117 : 0 143 : 0
118 ); 144 );
119 } 145 }
120 146
121 if (expect_false (res == kq_eventmax)) 147 if (expect_false (res == kqueue_eventmax))
122 { 148 {
123 free (kq_events); 149 ev_free (kqueue_events);
124 kq_eventmax = array_roundsize (kq_events, kq_eventmax << 1); 150 kqueue_eventmax = array_roundsize (struct kevent, kqueue_eventmax << 1);
125 kq_events = malloc (sizeof (struct kevent) * kq_eventmax); 151 kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax);
126 } 152 }
127} 153}
128 154
129static void 155static int
130kqueue_init (struct event_base *base) 156kqueue_init (EV_P_ int flags)
131{ 157{
158 struct kevent ch, ev;
159
132 /* Initalize the kernel queue */ 160 /* Initalize the kernel queue */
133 if ((kq_fd = kqueue ()) == -1) 161 if ((kqueue_fd = kqueue ()) < 0)
134 {
135 free (kqueueop);
136 return;
137 }
138
139 /* Initalize fields */
140 kq_changes = malloc (NEVENT * sizeof (struct kevent));
141 if (!kq_changes)
142 return; 162 return 0;
143 163
144 events = malloc (NEVENT * sizeof (struct kevent)); 164 fcntl (kqueue_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */
145 if (!events)
146 {
147 free (kq_changes);
148 return;
149 }
150 165
151 /* Check for Mac OS X kqueue bug. */ 166 /* Check for Mac OS X kqueue bug. */
152 kq_changes [0].ident = -1; 167 ch.ident = -1;
153 kq_changes [0].filter = EVFILT_READ; 168 ch.filter = EVFILT_READ;
154 kq_changes [0].flags = EV_ADD; 169 ch.flags = EV_ADD;
170
155 /* 171 /*
156 * If kqueue works, then kevent will succeed, and it will 172 * If kqueue works, then kevent will succeed, and it will
157 * stick an error in events[0]. If kqueue is broken, then 173 * stick an error in ev. If kqueue is broken, then
158 * kevent will fail. 174 * kevent will fail.
159 */ 175 */
160 if (kevent (kq_fd, kq_changes, 1, kq_events, NEVENT, NULL) != 1 176 if (kevent (kqueue_fd, &ch, 1, &ev, 1, 0) != 1
161 || kq_events[0].ident != -1 177 || ev.ident != -1
162 || kq_events[0].flags != EV_ERROR) 178 || ev.flags != EV_ERROR)
163 { 179 {
164 /* detected broken kqueue */ 180 /* detected broken kqueue */
165 free (kq_changes);
166 free (kq_events);
167 close (kq_fd); 181 close (kqueue_fd);
168 return; 182 return 0;
169 } 183 }
170 184
171 ev_method = EVMETHOD_KQUEUE;
172 method_fudge = 1e-3; /* needed to compensate for kevent returning early */ 185 method_fudge = 1e-3; /* needed to compensate for kevent returning early */
173 method_modify = kq_modify; 186 method_modify = kqueue_modify;
174 method_poll = kq_poll; 187 method_poll = kqueue_poll;
175 188
176 kq_eventmax = 64; /* intiial number of events receivable per poll */ 189 kqueue_eventmax = 64; /* intiial number of events receivable per poll */
177 kq_events = malloc (sizeof (struct kevent) * kq_eventmax); 190 kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax);
178}
179 191
192 kqueue_changes = 0;
193 kqueue_changemax = 0;
194 kqueue_changecnt = 0;
195
196 return EVMETHOD_KQUEUE;
197}
198
199static void
200kqueue_destroy (EV_P)
201{
202 close (kqueue_fd);
203
204 ev_free (kqueue_events);
205 ev_free (kqueue_changes);
206}
207
208static void
209kqueue_fork (EV_P)
210{
211 close (kqueue_fd);
212
213 while ((kqueue_fd = kqueue ()) < 0)
214 syserr ("(libev) kqueue");
215
216 fcntl (kqueue_fd, F_SETFD, FD_CLOEXEC);
217
218 /* re-register interest in fds */
219 fd_rearm_all (EV_A);
220}
221

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines