ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev_epoll.c
(Generate patch)

Comparing libev/ev_epoll.c (file contents):
Revision 1.35 by root, Thu Oct 23 04:56:49 2008 UTC vs.
Revision 1.50 by root, Tue Mar 23 23:45:12 2010 UTC

1/* 1/*
2 * libev epoll fd activity backend 2 * libev epoll fd activity backend
3 * 3 *
4 * Copyright (c) 2007,2008 Marc Alexander Lehmann <libev@schmorp.de> 4 * Copyright (c) 2007,2008,2009,2010 Marc Alexander Lehmann <libev@schmorp.de>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without modifica- 7 * Redistribution and use in source and binary forms, with or without modifica-
8 * tion, are permitted provided that the following conditions are met: 8 * tion, are permitted provided that the following conditions are met:
9 * 9 *
47 * b) the fact that ADD != MOD creates a lot of extra syscalls due to a) 47 * b) the fact that ADD != MOD creates a lot of extra syscalls due to a)
48 * and seems not to have any advantage. 48 * and seems not to have any advantage.
49 * c) the inability to handle fork or file descriptors (think dup) 49 * c) the inability to handle fork or file descriptors (think dup)
50 * limits the applicability over poll, so this is not a generic 50 * limits the applicability over poll, so this is not a generic
51 * poll replacement. 51 * poll replacement.
52 * d) epoll doesn't work the same as select with many file descriptors
53 * (such as files). while not critical, no other advanced interface
54 * seems to share this (rather non-unixy) limitation.
55 * e) epoll claims to be embeddable, but in practise you never get
56 * a ready event for the epoll fd.
52 * 57 *
53 * lots of "weird code" and complication handling in this file is due 58 * lots of "weird code" and complication handling in this file is due
54 * to these design problems with epoll, as we try very hard to avoid 59 * to these design problems with epoll, as we try very hard to avoid
55 * epoll_ctl syscalls for common usage patterns. 60 * epoll_ctl syscalls for common usage patterns and handle the breakage
61 * ensuing from receiving events for closed and otherwise long gone
62 * file descriptors.
56 */ 63 */
57 64
58#include <sys/epoll.h> 65#include <sys/epoll.h>
59
60void inline_size
61unsigned_char_init (unsigned char *base, int count)
62{
63 /* memset might be overkill */
64 while (count--)
65 *base++ = 0;
66}
67 66
68static void 67static void
69epoll_modify (EV_P_ int fd, int oev, int nev) 68epoll_modify (EV_P_ int fd, int oev, int nev)
70{ 69{
71 struct epoll_event ev; 70 struct epoll_event ev;
74 /* 73 /*
75 * we handle EPOLL_CTL_DEL by ignoring it here 74 * we handle EPOLL_CTL_DEL by ignoring it here
76 * on the assumption that the fd is gone anyways 75 * on the assumption that the fd is gone anyways
77 * if that is wrong, we have to handle the spurious 76 * if that is wrong, we have to handle the spurious
78 * event in epoll_poll. 77 * event in epoll_poll.
79 * the fd is later added, we try to ADD it, and, if that 78 * if the fd is added again, we try to ADD it, and, if that
80 * fails, we assume it still has the same eventmask. 79 * fails, we assume it still has the same eventmask.
81 */ 80 */
82 if (!nev) 81 if (!nev)
83 return; 82 return;
84 83
85 oldmask = anfds [fd].emask; 84 oldmask = anfds [fd].emask;
86 anfds [fd].emask = nev; 85 anfds [fd].emask = nev;
87 86
88 ev.data.u64 = fd; /* use u64 to fully initialise the struct, for nicer strace etc. */ 87 /* store the generation counter in the upper 32 bits, the fd in the lower 32 bits */
88 ev.data.u64 = (uint64_t)(uint32_t)fd
89 | ((uint64_t)(uint32_t)++anfds [fd].egen << 32);
89 ev.events = (nev & EV_READ ? EPOLLIN : 0) 90 ev.events = (nev & EV_READ ? EPOLLIN : 0)
90 | (nev & EV_WRITE ? EPOLLOUT : 0); 91 | (nev & EV_WRITE ? EPOLLOUT : 0);
91 92
92 if (expect_true (!epoll_ctl (backend_fd, oev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev))) 93 if (expect_true (!epoll_ctl (backend_fd, oev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev)))
93 return; 94 return;
94 95
95 if (expect_true (errno == ENOENT)) 96 if (expect_true (errno == ENOENT))
96 { 97 {
97 /* if ENOENT then the fd went away, so try to do the right thing */ 98 /* if ENOENT then the fd went away, so try to do the right thing */
98 if (!nev) 99 if (!nev)
99 return; 100 goto dec_egen;
100 101
101 if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev)) 102 if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev))
102 return; 103 return;
103 } 104 }
104 else if (expect_true (errno == EEXIST)) 105 else if (expect_true (errno == EEXIST))
105 { 106 {
106 /* EEXIST means we ignored a previous DEL, but the fd is still active */ 107 /* EEXIST means we ignored a previous DEL, but the fd is still active */
107 /* if the kernel mask is the same as the new mask, we assume it hasn't changed */ 108 /* if the kernel mask is the same as the new mask, we assume it hasn't changed */
109 if (oldmask == nev)
110 goto dec_egen;
111
108 if (oldmask == nev || !epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev)) 112 if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev))
109 return; 113 return;
110 } 114 }
111 115
112 fd_kill (EV_A_ fd); 116 fd_kill (EV_A_ fd);
117
118dec_egen:
119 /* we didn't successfully call epoll_ctl, so decrement the generation counter again */
120 --anfds [fd].egen;
113} 121}
114 122
115static void 123static void
116epoll_poll (EV_P_ ev_tstamp timeout) 124epoll_poll (EV_P_ ev_tstamp timeout)
117{ 125{
118 int i; 126 int i;
127 int eventcnt;
128
129 /* epoll wait times cannot be larger than (LONG_MAX - 999UL) / HZ msecs, which is below */
130 /* the default libev max wait time, however. */
131 EV_RELEASE_CB;
119 int eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, (int)ceil (timeout * 1000.)); 132 eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, (int)ceil (timeout * 1000.));
133 EV_ACQUIRE_CB;
120 134
121 if (expect_false (eventcnt < 0)) 135 if (expect_false (eventcnt < 0))
122 { 136 {
123 if (errno != EINTR) 137 if (errno != EINTR)
124 syserr ("(libev) epoll_wait"); 138 ev_syserr ("(libev) epoll_wait");
125 139
126 return; 140 return;
127 } 141 }
128 142
129 for (i = 0; i < eventcnt; ++i) 143 for (i = 0; i < eventcnt; ++i)
130 { 144 {
131 struct epoll_event *ev = epoll_events + i; 145 struct epoll_event *ev = epoll_events + i;
132 146
133 int fd = ev->data.u64; 147 int fd = (uint32_t)ev->data.u64; /* mask out the lower 32 bits */
148 int want = anfds [fd].events;
134 int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) 149 int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0)
135 | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0); 150 | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0);
136 int want = anfds [fd].events; 151
152 /* check for spurious notification */
153 if (expect_false ((uint32_t)anfds [fd].egen != (uint32_t)(ev->data.u64 >> 32)))
154 {
155 /* recreate kernel state */
156 postfork = 1;
157 continue;
158 }
137 159
138 if (expect_false (got & ~want)) 160 if (expect_false (got & ~want))
139 { 161 {
140 anfds [fd].emask = want; 162 anfds [fd].emask = want;
141 163
142 /* we received an event but are not interested in it, try mod or del */ 164 /* we received an event but are not interested in it, try mod or del */
143 /* I don't think we ever need MOD, but let's handle it anyways */ 165 /* I don't think we ever need MOD, but let's handle it anyways */
144 ev->events = (want & EV_READ ? EPOLLIN : 0) 166 ev->events = (want & EV_READ ? EPOLLIN : 0)
145 | (want & EV_WRITE ? EPOLLOUT : 0); 167 | (want & EV_WRITE ? EPOLLOUT : 0);
146 168
169 /* pre-2.6.9 kernels require a non-null pointer with EPOLL_CTL_DEL, */
170 /* which is fortunately easy to do for us. */
147 epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev); 171 if (epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev))
172 {
173 postfork = 1; /* an error occured, recreate kernel state */
174 continue;
175 }
148 } 176 }
149 177
150 fd_event (EV_A_ fd, got); 178 fd_event (EV_A_ fd, got);
151 } 179 }
152 180
160} 188}
161 189
162int inline_size 190int inline_size
163epoll_init (EV_P_ int flags) 191epoll_init (EV_P_ int flags)
164{ 192{
193#ifdef EPOLL_CLOEXEC
194 backend_fd = epoll_create1 (EPOLL_CLOEXEC);
195
196 if (backend_fd <= 0)
197#endif
165 backend_fd = epoll_create (256); 198 backend_fd = epoll_create (256);
166 199
167 if (backend_fd < 0) 200 if (backend_fd < 0)
168 return 0; 201 return 0;
169 202
170 fcntl (backend_fd, F_SETFD, FD_CLOEXEC); 203 fcntl (backend_fd, F_SETFD, FD_CLOEXEC);
171 204
172 backend_fudge = 0.; /* kernel sources seem to indicate this to be zero */ 205 backend_fudge = 0.; /* kernel sources seem to indicate this to be zero */
173 backend_modify = epoll_modify; 206 backend_modify = epoll_modify;
174 backend_poll = epoll_poll; 207 backend_poll = epoll_poll;
175 208
176 epoll_eventmax = 64; /* intiial number of events receivable per poll */ 209 epoll_eventmax = 64; /* initial number of events receivable per poll */
177 epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); 210 epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax);
178 211
179 return EVBACKEND_EPOLL; 212 return EVBACKEND_EPOLL;
180} 213}
181 214
189epoll_fork (EV_P) 222epoll_fork (EV_P)
190{ 223{
191 close (backend_fd); 224 close (backend_fd);
192 225
193 while ((backend_fd = epoll_create (256)) < 0) 226 while ((backend_fd = epoll_create (256)) < 0)
194 syserr ("(libev) epoll_create"); 227 ev_syserr ("(libev) epoll_create");
195 228
196 fcntl (backend_fd, F_SETFD, FD_CLOEXEC); 229 fcntl (backend_fd, F_SETFD, FD_CLOEXEC);
197 230
198 fd_rearm_all (EV_A); 231 fd_rearm_all (EV_A);
199} 232}

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines