ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/thttpd/fdwatch.c
Revision: 1.1
Committed: Mon Jun 18 21:11:56 2001 UTC (22 years, 11 months ago) by root
Content type: text/plain
Branch: MAIN
CVS Tags: mp_j, dp_j, cp_j, HEAD
Branch point for: connpatch, dirpatch, mmapppatch
Log Message:
*** empty log message ***

File Contents

# Content
1 /* fdwatch.c - fd watcher routines, either select() or poll()
2 **
3 ** Copyright © 1999,2000 by Jef Poskanzer <jef@acme.com>.
4 ** All rights reserved.
5 **
6 ** Redistribution and use in source and binary forms, with or without
7 ** modification, are permitted provided that the following conditions
8 ** are met:
9 ** 1. Redistributions of source code must retain the above copyright
10 ** notice, this list of conditions and the following disclaimer.
11 ** 2. Redistributions in binary form must reproduce the above copyright
12 ** notice, this list of conditions and the following disclaimer in the
13 ** documentation and/or other materials provided with the distribution.
14 **
15 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 ** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 ** IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 ** ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 ** FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 ** DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 ** OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 ** HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 ** LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 ** OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 ** SUCH DAMAGE.
26 */
27
28 #include <sys/types.h>
29 #include <stdlib.h>
30 #include <unistd.h>
31 #include <string.h>
32 #include <sys/time.h>
33 #include <sys/resource.h>
34 #include <syslog.h>
35
36 #ifndef MIN
37 #define MIN(a,b) ((a) < (b) ? (a) : (b))
38 #endif
39
40 #ifdef HAVE_POLL_H
41 #include <poll.h>
42 #else /* HAVE_POLL_H */
43 #ifdef HAVE_SYS_POLL_H
44 #include <sys/poll.h>
45 #endif /* HAVE_SYS_POLL_H */
46 #endif /* HAVE_POLL_H */
47
48 #ifdef HAVE_SYS_EVENT_H
49 #include <sys/event.h>
50 #endif /* HAVE_SYS_EVENT_H */
51
52 #include "fdwatch.h"
53
54 #ifdef HAVE_SELECT
55 #ifndef FD_SET
56 #define NFDBITS 32
57 #define FD_SETSIZE 32
58 #define FD_SET(n, p) ((p)->fds_bits[(n)/NFDBITS] |= (1 << ((n) % NFDBITS)))
59 #define FD_CLR(n, p) ((p)->fds_bits[(n)/NFDBITS] &= ~(1 << ((n) % NFDBITS)))
60 #define FD_ISSET(n, p) ((p)->fds_bits[(n)/NFDBITS] & (1 << ((n) % NFDBITS)))
61 #define FD_ZERO(p) bzero((char*)(p), sizeof(*(p)))
62 #endif /* !FD_SET */
63 #endif /* HAVE_SELECT */
64
65 static int nfiles;
66 static long nwatches;
67 static int* fd_rw;
68 static void** fd_data;
69
70 #ifdef HAVE_KQUEUE
71 static int kqueue_init( int nfiles );
72 static void kqueue_add_fd( int fd, int rw );
73 static void kqueue_del_fd( int fd );
74 static int kqueue_watch( long timeout_msecs );
75 static int kqueue_check_fd( int fd );
76 static int kqueue_get_fd( int ridx );
77 #else /* HAVE_KQUEUE */
78 # ifdef HAVE_POLL
79 static int poll_init( int nfiles );
80 static void poll_add_fd( int fd, int rw );
81 static void poll_del_fd( int fd );
82 static int poll_watch( long timeout_msecs );
83 static int poll_check_fd( int fd );
84 static int poll_get_fd( int ridx );
85 # else /* HAVE_POLL */
86 # ifdef HAVE_SELECT
87 static int select_init( int nfiles );
88 static void select_add_fd( int fd, int rw );
89 static void select_del_fd( int fd );
90 static int select_watch( long timeout_msecs );
91 static int select_check_fd( int fd );
92 static int select_get_fd( int ridx );
93 static int select_get_maxfd( void );
94 # endif /* HAVE_SELECT */
95 # endif /* HAVE_POLL */
96 #endif /* HAVE_KQUEUE */
97
98
99 /* Routines. */
100
101 /* Figure out how many file descriptors the system allows, and
102 ** initialize the fdwatch data structures. Returns -1 on failure.
103 */
104 int
105 fdwatch_get_nfiles( void )
106 {
107 #ifdef RLIMIT_NOFILE
108 struct rlimit rl;
109 #endif /* RLIMIT_NOFILE */
110
111 /* Figure out how many fd's we can have. */
112 nfiles = getdtablesize();
113 #ifdef RLIMIT_NOFILE
114 /* If we have getrlimit(), use that, and attempt to raise the limit. */
115 if ( getrlimit( RLIMIT_NOFILE, &rl ) == 0 )
116 {
117 nfiles = rl.rlim_cur;
118 if ( rl.rlim_max == RLIM_INFINITY )
119 rl.rlim_cur = 8192; /* arbitrary */
120 else if ( rl.rlim_max > rl.rlim_cur )
121 rl.rlim_cur = rl.rlim_max;
122 if ( setrlimit( RLIMIT_NOFILE, &rl ) == 0 )
123 nfiles = rl.rlim_cur;
124 }
125 #endif /* RLIMIT_NOFILE */
126
127 #if defined(HAVE_SELECT) && ! ( defined(HAVE_POLL) || defined(HAVE_KQUEUE) )
128 /* If we use select(), then we must limit ourselves to FD_SETSIZE. */
129 nfiles = MIN( nfiles, FD_SETSIZE );
130 #endif /* HAVE_SELECT && ! ( HAVE_POLL || HAVE_KQUEUE ) */
131
132 /* Initialize the fdwatch data structures. */
133 nwatches = 0;
134 fd_rw = (int*) malloc( sizeof(int) * nfiles );
135 fd_data = (void**) malloc( sizeof(void*) * nfiles );
136 if ( fd_rw == (int*) 0 || fd_data == (void**) 0 )
137 return -1;
138 #ifdef HAVE_KQUEUE
139 if ( kqueue_init( nfiles ) == -1 )
140 return -1;
141 #else /* HAVE_KQUEUE */
142 # ifdef HAVE_POLL
143 if ( poll_init( nfiles ) == -1 )
144 return -1;
145 # else /* HAVE_POLL */
146 # ifdef HAVE_SELECT
147 if ( select_init( nfiles ) == -1 )
148 return -1;
149 # endif /* HAVE_SELECT */
150 # endif /* HAVE_POLL */
151 #endif /* HAVE_KQUEUE */
152
153 return nfiles;
154 }
155
156
157 /* Add a descriptor to the watch list. rw is either FDW_READ or FDW_WRITE. */
158 void
159 fdwatch_add_fd( int fd, void* client_data, int rw )
160 {
161 #ifdef HAVE_KQUEUE
162 kqueue_add_fd( fd, rw );
163 #else /* HAVE_KQUEUE */
164 # ifdef HAVE_POLL
165 poll_add_fd( fd, rw );
166 # else /* HAVE_POLL */
167 # ifdef HAVE_SELECT
168 select_add_fd( fd, rw );
169 # endif /* HAVE_SELECT */
170 # endif /* HAVE_POLL */
171 #endif /* HAVE_KQUEUE */
172
173 fd_rw[fd] = rw;
174 fd_data[fd] = client_data;
175 }
176
177
178 /* Remove a descriptor from the watch list. */
179 void
180 fdwatch_del_fd( int fd )
181 {
182 #ifdef HAVE_KQUEUE
183 kqueue_del_fd( fd );
184 #else /* HAVE_KQUEUE */
185 # ifdef HAVE_POLL
186 poll_del_fd( fd );
187 # else /* HAVE_POLL */
188 # ifdef HAVE_SELECT
189 select_del_fd( fd );
190 # endif /* HAVE_SELECT */
191 # endif /* HAVE_POLL */
192 #endif /* HAVE_KQUEUE */
193
194 fd_data[fd] = (void*) 0;
195 }
196
197 /* Do the watch. Return value is the number of descriptors that are ready,
198 ** or 0 if the timeout expired, or -1 on errors. A timeout of INFTIM means
199 ** wait indefinitely.
200 */
201 int
202 fdwatch( long timeout_msecs )
203 {
204 ++nwatches;
205 #ifdef HAVE_KQUEUE
206 return kqueue_watch( timeout_msecs );
207 #else /* HAVE_KQUEUE */
208 # ifdef HAVE_POLL
209 return poll_watch( timeout_msecs );
210 # else /* HAVE_POLL */
211 # ifdef HAVE_SELECT
212 return select_watch( timeout_msecs );
213 # else /* HAVE_SELECT */
214 return -1;
215 # endif /* HAVE_SELECT */
216 # endif /* HAVE_POLL */
217 #endif /* HAVE_KQUEUE */
218 }
219
220
221 /* Check if a descriptor was ready. */
222 int
223 fdwatch_check_fd( int fd )
224 {
225 #ifdef HAVE_KQUEUE
226 return kqueue_check_fd( fd );
227 #else
228 # ifdef HAVE_POLL
229 return poll_check_fd( fd );
230 # else /* HAVE_POLL */
231 # ifdef HAVE_SELECT
232 return select_check_fd( fd );
233 # else /* HAVE_SELECT */
234 return 0;
235 # endif /* HAVE_SELECT */
236 # endif /* HAVE_POLL */
237 #endif /* HAVE_KQUEUE */
238 }
239
240
241 void*
242 fdwatch_get_client_data( int ridx )
243 {
244 int fd;
245
246 #ifdef HAVE_KQUEUE
247 fd = kqueue_get_fd( ridx );
248 #else /* HAVE_KQUEUE */
249 # ifdef HAVE_POLL
250 fd = poll_get_fd( ridx );
251 # else /* HAVE_POLL */
252 # ifdef HAVE_SELECT
253 fd = select_get_fd( ridx );
254 # else /* HAVE_SELECT */
255 fd = -1;
256 # endif /* HAVE_SELECT */
257 # endif /* HAVE_POLL */
258 #endif /* HAVE_KQUEUE */
259
260 if ( fd < 0 || fd >= nfiles )
261 return (void*) 0;
262 return fd_data[fd];
263 }
264
265
266 /* Generate debugging statistics syslog message. */
267 void
268 fdwatch_logstats( long secs )
269 {
270 char* which;
271
272 #ifdef HAVE_KQUEUE
273 which = "kevent";
274 #else /* HAVE_KQUEUE */
275 # ifdef HAVE_POLL
276 which = "poll";
277 # else /* HAVE_POLL */
278 # ifdef HAVE_SELECT
279 which = "select";
280 # else /* HAVE_SELECT */
281 which = "UNKNOWN";
282 # endif /* HAVE_SELECT */
283 # endif /* HAVE_POLL */
284 #endif /* HAVE_KQUEUE */
285
286 syslog(
287 LOG_NOTICE, " fdwatch - %ld %ss (%g/sec)",
288 nwatches, which, (float) nwatches / secs );
289 nwatches = 0;
290 }
291
292
293 #ifdef HAVE_KQUEUE
294
295 static struct kevent* kqchanges;
296 static int nkqchanges;
297 static struct kevent* kqevents;
298 static int* kqrfdidx;
299 static int kq;
300
301
302 static int
303 kqueue_init( int nfiles )
304 {
305 kq = kqueue();
306 if ( kq == -1 )
307 return -1;
308 kqchanges = (struct kevent*) malloc( sizeof(struct kevent) * 2 * nfiles );
309 kqevents = (struct kevent*) malloc( sizeof(struct kevent) * nfiles );
310 kqrfdidx = (int*) malloc( sizeof(int) * nfiles );
311 if ( kqchanges == (struct kevent*) 0 || kqevents == (struct kevent*) 0 ||
312 kqrfdidx == (int*) 0 )
313 return -1;
314 return 0;
315 }
316
317
318 static void
319 kqueue_add_fd( int fd, int rw )
320 {
321 kqchanges[nkqchanges].ident = fd;
322 kqchanges[nkqchanges].flags = EV_ADD;
323 switch ( rw )
324 {
325 case FDW_READ: kqchanges[nkqchanges].filter = EVFILT_READ; break;
326 case FDW_WRITE: kqchanges[nkqchanges].filter = EVFILT_WRITE; break;
327 default: break;
328 }
329 ++nkqchanges;
330 }
331
332
333 static void
334 kqueue_del_fd( int fd )
335 {
336 kqchanges[nkqchanges].ident = fd;
337 kqchanges[nkqchanges].flags = EV_DELETE;
338 switch ( fd_rw[fd] )
339 {
340 case FDW_READ: kqchanges[nkqchanges].filter = EVFILT_READ; break;
341 case FDW_WRITE: kqchanges[nkqchanges].filter = EVFILT_WRITE; break;
342 }
343 ++nkqchanges;
344 }
345
346
347 static int
348 kqueue_watch( long timeout_msecs )
349 {
350 int i, r;
351
352 if ( timeout_msecs == INFTIM )
353 r = kevent(
354 kq, kqchanges, nkqchanges, kqevents, nfiles, (struct timespec*) 0 );
355 else
356 {
357 struct timespec ts;
358 ts.tv_sec = timeout_msecs / 1000L;
359 ts.tv_nsec = ( timeout_msecs % 1000L ) * 1000000L;
360 r = kevent( kq, kqchanges, nkqchanges, kqevents, nfiles, &ts );
361 }
362 nkqchanges = 0;
363 if ( r == -1 )
364 return -1;
365
366 for ( i = 0; i < r; ++i )
367 if ( ! ( kqevents[i].flags & EV_ERROR ) )
368 kqrfdidx[kqevents[i].ident] = i;
369
370 return r;
371 }
372
373
374 static int
375 kqueue_check_fd( int fd )
376 {
377 int ridx = kqrfdidx[fd];
378
379 if ( kqevents[ridx].ident != fd )
380 return 0;
381 switch ( fd_rw[fd] )
382 {
383 case FDW_READ: return kqevents[ridx].filter == EVFILT_READ;
384 case FDW_WRITE: return kqevents[ridx].filter == EVFILT_WRITE;
385 default: return 0;
386 }
387 }
388
389
390 static int
391 kqueue_get_fd( int ridx )
392 {
393 int fd;
394
395 if ( kqevents[ridx].flags & EV_ERROR )
396 return -1;
397 fd = kqevents[ridx].ident;
398 if ( kqueue_check_fd( fd ) )
399 return fd;
400 return -1;
401 }
402
403 #else /* HAVE_KQUEUE */
404
405 # ifdef HAVE_POLL
406
407 static struct pollfd* pollfds;
408 static int npollfds;
409 static int* poll_fdidx;
410 static int* poll_rfdidx;
411
412
413 static int
414 poll_init( int nfiles )
415 {
416 pollfds = (struct pollfd*) malloc( sizeof(struct pollfd) * nfiles );
417 poll_fdidx = (int*) malloc( sizeof(int) * nfiles );
418 poll_rfdidx = (int*) malloc( sizeof(int) * nfiles );
419 if ( pollfds == (struct pollfd*) 0 || poll_fdidx == (int*) 0 ||
420 poll_rfdidx == (int*) 0 )
421 return -1;
422 return 0;
423 }
424
425
426 static void
427 poll_add_fd( int fd, int rw )
428 {
429 pollfds[npollfds].fd = fd;
430 switch ( rw )
431 {
432 case FDW_READ: pollfds[npollfds].events = POLLIN; break;
433 case FDW_WRITE: pollfds[npollfds].events = POLLOUT; break;
434 default: break;
435 }
436 poll_fdidx[fd] = npollfds;
437 ++npollfds;
438 }
439
440
441 static void
442 poll_del_fd( int fd )
443 {
444 int idx = poll_fdidx[fd];
445
446 --npollfds;
447 pollfds[idx] = pollfds[npollfds];
448 poll_fdidx[pollfds[idx].fd] = idx;
449 }
450
451
452 static int
453 poll_watch( long timeout_msecs )
454 {
455 int r, ridx, i;
456
457 r = poll( pollfds, npollfds, (int) timeout_msecs );
458 if ( r == -1 )
459 return -1;
460
461 ridx = 0;
462 for ( i = 0; i < npollfds; ++i )
463 if ( pollfds[i].revents & ( POLLIN | POLLOUT ) )
464 poll_rfdidx[ridx++] = pollfds[i].fd;
465
466 return r;
467 }
468
469
470 static int
471 poll_check_fd( int fd )
472 {
473 switch ( fd_rw[fd] )
474 {
475 case FDW_READ: return pollfds[poll_fdidx[fd]].revents & POLLIN;
476 case FDW_WRITE: return pollfds[poll_fdidx[fd]].revents & POLLOUT;
477 default: return 0;
478 }
479 }
480
481
482 static int
483 poll_get_fd( int ridx )
484 {
485 int fd = poll_rfdidx[ridx];
486
487 if ( poll_check_fd( fd ) )
488 return fd;
489 return -1;
490 }
491
492 # else /* HAVE_POLL */
493
494 # ifdef HAVE_SELECT
495
496 static fd_set master_rfdset;
497 static fd_set master_wfdset;
498 static fd_set working_rfdset;
499 static fd_set working_wfdset;
500 static int* select_fds;
501 static int* select_fdidx;
502 static int* select_rfdidx;
503 static int nselect_fds;
504 static int maxfd;
505 static int maxfd_changed;
506
507
508 static int
509 select_init( int nfiles )
510 {
511 FD_ZERO( &master_rfdset );
512 FD_ZERO( &master_wfdset );
513 select_fds = (int*) malloc( sizeof(int) * nfiles );
514 select_fdidx = (int*) malloc( sizeof(int) * nfiles );
515 select_rfdidx = (int*) malloc( sizeof(int) * nfiles );
516 if ( select_fds == (int*) 0 || select_fdidx == (int*) 0 ||
517 select_rfdidx == (int*) 0 )
518 return -1;
519 maxfd = -1;
520 maxfd_changed = 0;
521 return 0;
522 }
523
524
525 static void
526 select_add_fd( int fd, int rw )
527 {
528 select_fds[nselect_fds] = fd;
529 switch ( rw )
530 {
531 case FDW_READ: FD_SET( fd, &master_rfdset ); break;
532 case FDW_WRITE: FD_SET( fd, &master_wfdset ); break;
533 default: break;
534 }
535 if ( fd > maxfd )
536 maxfd = fd;
537 select_fdidx[fd] = nselect_fds;
538 ++nselect_fds;
539 }
540
541
542 static void
543 select_del_fd( int fd )
544 {
545 int idx = select_fdidx[fd];
546
547 --nselect_fds;
548 select_fds[idx] = select_fds[nselect_fds];
549 select_fdidx[select_fds[idx]] = idx;
550
551 FD_CLR( fd, &master_rfdset );
552 FD_CLR( fd, &master_wfdset );
553
554 if ( fd >= maxfd )
555 maxfd_changed = 1;
556 }
557
558
559 static int
560 select_watch( long timeout_msecs )
561 {
562 int mfd;
563 int r, fd, ridx;
564
565 working_rfdset = master_rfdset;
566 working_wfdset = master_wfdset;
567 mfd = select_get_maxfd();
568 if ( timeout_msecs == INFTIM )
569 r = select(
570 mfd + 1, &working_rfdset, &working_wfdset, (fd_set*) 0,
571 (struct timeval*) 0 );
572 else
573 {
574 struct timeval timeout;
575 timeout.tv_sec = timeout_msecs / 1000L;
576 timeout.tv_usec = ( timeout_msecs % 1000L ) * 1000L;
577 r = select(
578 mfd + 1, &working_rfdset, &working_wfdset, (fd_set*) 0, &timeout );
579 }
580 if ( r == -1 )
581 return -1;
582
583 ridx = 0;
584 for ( fd = 0; fd <= mfd; ++fd )
585 if ( select_check_fd( fd ) )
586 select_rfdidx[ridx++] = fd;
587
588 return r;
589 }
590
591
592 static int
593 select_check_fd( int fd )
594 {
595 switch ( fd_rw[fd] )
596 {
597 case FDW_READ: return FD_ISSET( fd, &working_rfdset );
598 case FDW_WRITE: return FD_ISSET( fd, &working_wfdset );
599 default: return 0;
600 }
601 }
602
603
604 static int
605 select_get_fd( int ridx )
606 {
607 int fd = select_rfdidx[ridx];
608
609 if ( select_check_fd( fd ) )
610 return fd;
611 return -1;
612 }
613
614
615 static int
616 select_get_maxfd( void )
617 {
618 if ( maxfd_changed )
619 {
620 int i;
621 maxfd = -1;
622 for ( i = 0; i < nselect_fds; ++i )
623 if ( select_fds[i] > maxfd )
624 maxfd = select_fds[i];
625 maxfd_changed = 0;
626 }
627 return maxfd;
628 }
629
630 # endif /* HAVE_SELECT */
631
632 # endif /* HAVE_POLL */
633
634 #endif /* HAVE_KQUEUE */