… | |
… | |
27 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
27 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
28 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
28 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
29 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
29 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
30 | */ |
30 | */ |
31 | |
31 | |
|
|
32 | /* |
|
|
33 | * general notes about epoll: |
|
|
34 | * |
|
|
35 | * a) epoll silently removes fds from the fd set. as nothing tells us |
|
|
36 | * that an fd has been removed otherwise, we have to continually |
|
|
37 | * "rearm" fds that we suspect *might* have changed (same |
|
|
38 | * problem with kqueue, but much less costly there). |
|
|
39 | * b) the fact that ADD != MOD creates a lot of extra syscalls due to a) |
|
|
40 | * and seems not to have any advantage. |
|
|
41 | * c) the inability to handle fork or file descriptors (think dup) |
|
|
42 | * limits the applicability over poll, so this is not a generic |
|
|
43 | * poll replacement. |
|
|
44 | * |
|
|
45 | * lots of "weird code" and complication handling in this file is due |
|
|
46 | * to these design problems with epoll, as we try very hard to avoid |
|
|
47 | * epoll_ctl syscalls for common usage patterns. |
|
|
48 | */ |
|
|
49 | |
32 | #include <sys/epoll.h> |
50 | #include <sys/epoll.h> |
33 | |
51 | |
34 | static int epoll_fd = -1; |
52 | static void |
|
|
53 | epoll_modify (EV_P_ int fd, int oev, int nev) |
|
|
54 | { |
|
|
55 | struct epoll_event ev; |
35 | |
56 | |
36 | static void |
57 | /* |
37 | epoll_modify (int fd, int oev, int nev) |
58 | * we handle EPOLL_CTL_DEL by ignoring it here |
38 | { |
59 | * on the assumption that the fd is gone anyways |
39 | int mode = nev ? oev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD : EPOLL_CTL_DEL; |
60 | * if that is wrong, we have to handle the spurious |
|
|
61 | * event in epoll_poll. |
|
|
62 | */ |
|
|
63 | if (!nev) |
|
|
64 | return; |
40 | |
65 | |
41 | struct epoll_event ev; |
66 | ev.data.u64 = fd; /* use u64 to fully initialise the struct, for nicer strace etc. */ |
42 | ev.data.fd = fd; |
|
|
43 | ev.events = |
|
|
44 | (nev & EV_READ ? EPOLLIN : 0) |
67 | ev.events = (nev & EV_READ ? EPOLLIN : 0) |
45 | | (nev & EV_WRITE ? EPOLLOUT : 0); |
68 | | (nev & EV_WRITE ? EPOLLOUT : 0); |
46 | |
69 | |
47 | epoll_ctl (epoll_fd, mode, fd, &ev); |
70 | if (expect_true (!epoll_ctl (backend_fd, oev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev))) |
|
|
71 | return; |
|
|
72 | |
|
|
73 | if (expect_true (errno == ENOENT)) |
|
|
74 | { |
|
|
75 | /* on ENOENT the fd went away, so try to do the right thing */ |
|
|
76 | if (!nev) |
|
|
77 | return; |
|
|
78 | |
|
|
79 | if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev)) |
|
|
80 | return; |
|
|
81 | } |
|
|
82 | else if (expect_true (errno == EEXIST)) |
|
|
83 | { |
|
|
84 | /* on EEXIST we ignored a previous DEL */ |
|
|
85 | if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev)) |
|
|
86 | return; |
|
|
87 | } |
|
|
88 | |
|
|
89 | fd_kill (EV_A_ fd); |
48 | } |
90 | } |
49 | |
91 | |
50 | static void |
92 | static void |
51 | epoll_postfork_child (void) |
93 | epoll_poll (EV_P_ ev_tstamp timeout) |
52 | { |
94 | { |
53 | int fd; |
95 | int i; |
|
|
96 | int eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, (int)ceil (timeout * 1000.)); |
54 | |
97 | |
55 | epoll_fd = epoll_create (256); |
98 | if (expect_false (eventcnt < 0)) |
56 | fcntl (epoll_fd, F_SETFD, FD_CLOEXEC); |
99 | { |
|
|
100 | if (errno != EINTR) |
|
|
101 | syserr ("(libev) epoll_wait"); |
57 | |
102 | |
58 | /* re-register interest in fds */ |
|
|
59 | for (fd = 0; fd < anfdmax; ++fd) |
|
|
60 | if (anfds [fd].events)//D |
|
|
61 | epoll_modify (fd, EV_NONE, anfds [fd].events); |
|
|
62 | } |
|
|
63 | |
|
|
64 | static struct epoll_event *events; |
|
|
65 | static int eventmax; |
|
|
66 | |
|
|
67 | static void |
|
|
68 | epoll_poll (ev_tstamp timeout) |
|
|
69 | { |
|
|
70 | int eventcnt = epoll_wait (epoll_fd, events, eventmax, ceil (timeout * 1000.)); |
|
|
71 | int i; |
|
|
72 | |
|
|
73 | if (eventcnt < 0) |
|
|
74 | return; |
103 | return; |
|
|
104 | } |
75 | |
105 | |
76 | for (i = 0; i < eventcnt; ++i) |
106 | for (i = 0; i < eventcnt; ++i) |
77 | fd_event ( |
107 | { |
78 | events [i].data.fd, |
108 | struct epoll_event *ev = epoll_events + i; |
|
|
109 | |
|
|
110 | int fd = ev->data.u64; |
79 | (events [i].events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) |
111 | int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) |
80 | | (events [i].events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0) |
112 | | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0); |
81 | ); |
113 | int want = anfds [fd].events; |
|
|
114 | |
|
|
115 | if (expect_false (got & ~want)) |
|
|
116 | { |
|
|
117 | /* we received an event but are not interested in it, try mod or del */ |
|
|
118 | ev->events = (want & EV_READ ? EPOLLIN : 0) |
|
|
119 | | (want & EV_WRITE ? EPOLLOUT : 0); |
|
|
120 | |
|
|
121 | epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev); |
|
|
122 | } |
|
|
123 | |
|
|
124 | fd_event (EV_A_ fd, got); |
|
|
125 | } |
82 | |
126 | |
83 | /* if the receive array was full, increase its size */ |
127 | /* if the receive array was full, increase its size */ |
84 | if (expect_false (eventcnt == eventmax)) |
128 | if (expect_false (eventcnt == epoll_eventmax)) |
85 | { |
129 | { |
86 | free (events); |
130 | ev_free (epoll_events); |
87 | eventmax = array_roundsize (events, eventmax << 1); |
131 | epoll_eventmax = array_nextsize (sizeof (struct epoll_event), epoll_eventmax, epoll_eventmax + 1); |
88 | events = malloc (sizeof (struct epoll_event) * eventmax); |
132 | epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); |
89 | } |
133 | } |
90 | } |
134 | } |
91 | |
135 | |
92 | static void |
136 | int inline_size |
93 | epoll_init (int flags) |
137 | epoll_init (EV_P_ int flags) |
94 | { |
138 | { |
95 | epoll_fd = epoll_create (256); |
139 | backend_fd = epoll_create (256); |
96 | |
140 | |
97 | if (epoll_fd < 0) |
141 | if (backend_fd < 0) |
98 | return; |
142 | return 0; |
99 | |
143 | |
100 | fcntl (epoll_fd, F_SETFD, FD_CLOEXEC); |
144 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); |
101 | |
145 | |
102 | ev_method = EVMETHOD_EPOLL; |
146 | backend_fudge = 0.; /* kernel sources seem to indicate this to be zero */ |
103 | method_fudge = 1e-3; /* needed to compensate for epoll returning early */ |
|
|
104 | method_modify = epoll_modify; |
147 | backend_modify = epoll_modify; |
105 | method_poll = epoll_poll; |
148 | backend_poll = epoll_poll; |
106 | |
149 | |
107 | eventmax = 64; /* intiial number of events receivable per poll */ |
150 | epoll_eventmax = 64; /* intiial number of events receivable per poll */ |
108 | events = malloc (sizeof (struct epoll_event) * eventmax); |
151 | epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); |
|
|
152 | |
|
|
153 | return EVBACKEND_EPOLL; |
109 | } |
154 | } |
110 | |
155 | |
|
|
156 | void inline_size |
|
|
157 | epoll_destroy (EV_P) |
|
|
158 | { |
|
|
159 | ev_free (epoll_events); |
|
|
160 | } |
|
|
161 | |
|
|
162 | void inline_size |
|
|
163 | epoll_fork (EV_P) |
|
|
164 | { |
|
|
165 | close (backend_fd); |
|
|
166 | |
|
|
167 | while ((backend_fd = epoll_create (256)) < 0) |
|
|
168 | syserr ("(libev) epoll_create"); |
|
|
169 | |
|
|
170 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); |
|
|
171 | |
|
|
172 | fd_rearm_all (EV_A); |
|
|
173 | } |
|
|
174 | |