ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev_epoll.c
(Generate patch)

Comparing libev/ev_epoll.c (file contents):
Revision 1.35 by root, Thu Oct 23 04:56:49 2008 UTC vs.
Revision 1.47 by root, Sun Jul 19 04:11:27 2009 UTC

1/* 1/*
2 * libev epoll fd activity backend 2 * libev epoll fd activity backend
3 * 3 *
4 * Copyright (c) 2007,2008 Marc Alexander Lehmann <libev@schmorp.de> 4 * Copyright (c) 2007,2008,2009 Marc Alexander Lehmann <libev@schmorp.de>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without modifica- 7 * Redistribution and use in source and binary forms, with or without modifica-
8 * tion, are permitted provided that the following conditions are met: 8 * tion, are permitted provided that the following conditions are met:
9 * 9 *
50 * limits the applicability over poll, so this is not a generic 50 * limits the applicability over poll, so this is not a generic
51 * poll replacement. 51 * poll replacement.
52 * 52 *
53 * lots of "weird code" and complication handling in this file is due 53 * lots of "weird code" and complication handling in this file is due
54 * to these design problems with epoll, as we try very hard to avoid 54 * to these design problems with epoll, as we try very hard to avoid
55 * epoll_ctl syscalls for common usage patterns. 55 * epoll_ctl syscalls for common usage patterns and handle the breakage
56 * ensuing from receiving events for closed and otherwise long gone
57 * file descriptors.
56 */ 58 */
57 59
58#include <sys/epoll.h> 60#include <sys/epoll.h>
59
60void inline_size
61unsigned_char_init (unsigned char *base, int count)
62{
63 /* memset might be overkill */
64 while (count--)
65 *base++ = 0;
66}
67 61
68static void 62static void
69epoll_modify (EV_P_ int fd, int oev, int nev) 63epoll_modify (EV_P_ int fd, int oev, int nev)
70{ 64{
71 struct epoll_event ev; 65 struct epoll_event ev;
74 /* 68 /*
75 * we handle EPOLL_CTL_DEL by ignoring it here 69 * we handle EPOLL_CTL_DEL by ignoring it here
76 * on the assumption that the fd is gone anyways 70 * on the assumption that the fd is gone anyways
77 * if that is wrong, we have to handle the spurious 71 * if that is wrong, we have to handle the spurious
78 * event in epoll_poll. 72 * event in epoll_poll.
79 * the fd is later added, we try to ADD it, and, if that 73 * if the fd is added again, we try to ADD it, and, if that
80 * fails, we assume it still has the same eventmask. 74 * fails, we assume it still has the same eventmask.
81 */ 75 */
82 if (!nev) 76 if (!nev)
83 return; 77 return;
84 78
85 oldmask = anfds [fd].emask; 79 oldmask = anfds [fd].emask;
86 anfds [fd].emask = nev; 80 anfds [fd].emask = nev;
87 81
88 ev.data.u64 = fd; /* use u64 to fully initialise the struct, for nicer strace etc. */ 82 /* store the generation counter in the upper 32 bits, the fd in the lower 32 bits */
83 ev.data.u64 = (uint64_t)(uint32_t)fd
84 | ((uint64_t)(uint32_t)++anfds [fd].egen << 32);
89 ev.events = (nev & EV_READ ? EPOLLIN : 0) 85 ev.events = (nev & EV_READ ? EPOLLIN : 0)
90 | (nev & EV_WRITE ? EPOLLOUT : 0); 86 | (nev & EV_WRITE ? EPOLLOUT : 0);
91 87
92 if (expect_true (!epoll_ctl (backend_fd, oev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev))) 88 if (expect_true (!epoll_ctl (backend_fd, oev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev)))
93 return; 89 return;
94 90
95 if (expect_true (errno == ENOENT)) 91 if (expect_true (errno == ENOENT))
96 { 92 {
97 /* if ENOENT then the fd went away, so try to do the right thing */ 93 /* if ENOENT then the fd went away, so try to do the right thing */
98 if (!nev) 94 if (!nev)
99 return; 95 goto dec_egen;
100 96
101 if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev)) 97 if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev))
102 return; 98 return;
103 } 99 }
104 else if (expect_true (errno == EEXIST)) 100 else if (expect_true (errno == EEXIST))
105 { 101 {
106 /* EEXIST means we ignored a previous DEL, but the fd is still active */ 102 /* EEXIST means we ignored a previous DEL, but the fd is still active */
107 /* if the kernel mask is the same as the new mask, we assume it hasn't changed */ 103 /* if the kernel mask is the same as the new mask, we assume it hasn't changed */
104 if (oldmask == nev)
105 goto dec_egen;
106
108 if (oldmask == nev || !epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev)) 107 if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev))
109 return; 108 return;
110 } 109 }
111 110
112 fd_kill (EV_A_ fd); 111 fd_kill (EV_A_ fd);
112
113dec_egen:
114 /* we didn't successfully call epoll_ctl, so decrement the generation counter again */
115 --anfds [fd].egen;
113} 116}
114 117
115static void 118static void
116epoll_poll (EV_P_ ev_tstamp timeout) 119epoll_poll (EV_P_ ev_tstamp timeout)
117{ 120{
118 int i; 121 int i;
122 int eventcnt;
123
124 EV_RELEASE_CB;
119 int eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, (int)ceil (timeout * 1000.)); 125 eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, (int)ceil (timeout * 1000.));
126 EV_ACQUIRE_CB;
120 127
121 if (expect_false (eventcnt < 0)) 128 if (expect_false (eventcnt < 0))
122 { 129 {
123 if (errno != EINTR) 130 if (errno != EINTR)
124 syserr ("(libev) epoll_wait"); 131 ev_syserr ("(libev) epoll_wait");
125 132
126 return; 133 return;
127 } 134 }
128 135
129 for (i = 0; i < eventcnt; ++i) 136 for (i = 0; i < eventcnt; ++i)
130 { 137 {
131 struct epoll_event *ev = epoll_events + i; 138 struct epoll_event *ev = epoll_events + i;
132 139
133 int fd = ev->data.u64; 140 int fd = (uint32_t)ev->data.u64; /* mask out the lower 32 bits */
141 int want = anfds [fd].events;
134 int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) 142 int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0)
135 | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0); 143 | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0);
136 int want = anfds [fd].events; 144
145 /* check for spurious notification */
146 if (expect_false ((uint32_t)anfds [fd].egen != (uint32_t)(ev->data.u64 >> 32)))
147 {
148 /* recreate kernel state */
149 postfork = 1;
150 continue;
151 }
137 152
138 if (expect_false (got & ~want)) 153 if (expect_false (got & ~want))
139 { 154 {
140 anfds [fd].emask = want; 155 anfds [fd].emask = want;
141 156
142 /* we received an event but are not interested in it, try mod or del */ 157 /* we received an event but are not interested in it, try mod or del */
143 /* I don't think we ever need MOD, but let's handle it anyways */ 158 /* I don't think we ever need MOD, but let's handle it anyways */
144 ev->events = (want & EV_READ ? EPOLLIN : 0) 159 ev->events = (want & EV_READ ? EPOLLIN : 0)
145 | (want & EV_WRITE ? EPOLLOUT : 0); 160 | (want & EV_WRITE ? EPOLLOUT : 0);
146 161
147 epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev); 162 if (epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev))
163 {
164 postfork = 1; /* an error occured, recreate kernel state */
165 continue;
166 }
148 } 167 }
149 168
150 fd_event (EV_A_ fd, got); 169 fd_event (EV_A_ fd, got);
151 } 170 }
152 171
160} 179}
161 180
162int inline_size 181int inline_size
163epoll_init (EV_P_ int flags) 182epoll_init (EV_P_ int flags)
164{ 183{
184#ifdef EPOLL_CLOEXEC
185 backend_fd = epoll_create1 (EPOLL_CLOEXEC);
186
187 if (backend_fd <= 0)
188#endif
165 backend_fd = epoll_create (256); 189 backend_fd = epoll_create (256);
166 190
167 if (backend_fd < 0) 191 if (backend_fd < 0)
168 return 0; 192 return 0;
169 193
170 fcntl (backend_fd, F_SETFD, FD_CLOEXEC); 194 fcntl (backend_fd, F_SETFD, FD_CLOEXEC);
171 195
172 backend_fudge = 0.; /* kernel sources seem to indicate this to be zero */ 196 backend_fudge = 0.; /* kernel sources seem to indicate this to be zero */
173 backend_modify = epoll_modify; 197 backend_modify = epoll_modify;
174 backend_poll = epoll_poll; 198 backend_poll = epoll_poll;
175 199
176 epoll_eventmax = 64; /* intiial number of events receivable per poll */ 200 epoll_eventmax = 64; /* initial number of events receivable per poll */
177 epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); 201 epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax);
178 202
179 return EVBACKEND_EPOLL; 203 return EVBACKEND_EPOLL;
180} 204}
181 205
189epoll_fork (EV_P) 213epoll_fork (EV_P)
190{ 214{
191 close (backend_fd); 215 close (backend_fd);
192 216
193 while ((backend_fd = epoll_create (256)) < 0) 217 while ((backend_fd = epoll_create (256)) < 0)
194 syserr ("(libev) epoll_create"); 218 ev_syserr ("(libev) epoll_create");
195 219
196 fcntl (backend_fd, F_SETFD, FD_CLOEXEC); 220 fcntl (backend_fd, F_SETFD, FD_CLOEXEC);
197 221
198 fd_rearm_all (EV_A); 222 fd_rearm_all (EV_A);
199} 223}

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines