… | |
… | |
50 | * limits the applicability over poll, so this is not a generic |
50 | * limits the applicability over poll, so this is not a generic |
51 | * poll replacement. |
51 | * poll replacement. |
52 | * |
52 | * |
53 | * lots of "weird code" and complication handling in this file is due |
53 | * lots of "weird code" and complication handling in this file is due |
54 | * to these design problems with epoll, as we try very hard to avoid |
54 | * to these design problems with epoll, as we try very hard to avoid |
55 | * epoll_ctl syscalls for common usage patterns. |
55 | * epoll_ctl syscalls for common usage patterns and handle the breakage |
|
|
56 | * ensuing from receiving events for closed and otherwise long gone |
|
|
57 | * file descriptors. |
56 | */ |
58 | */ |
57 | |
59 | |
58 | #include <sys/epoll.h> |
60 | #include <sys/epoll.h> |
59 | |
61 | |
60 | static void |
62 | static void |
61 | epoll_modify (EV_P_ int fd, int oev, int nev) |
63 | epoll_modify (EV_P_ int fd, int oev, int nev) |
62 | { |
64 | { |
63 | struct epoll_event ev; |
65 | struct epoll_event ev; |
|
|
66 | unsigned char oldmask; |
64 | |
67 | |
65 | /* |
68 | /* |
66 | * we handle EPOLL_CTL_DEL by ignoring it here |
69 | * we handle EPOLL_CTL_DEL by ignoring it here |
67 | * on the assumption that the fd is gone anyways |
70 | * on the assumption that the fd is gone anyways |
68 | * if that is wrong, we have to handle the spurious |
71 | * if that is wrong, we have to handle the spurious |
69 | * event in epoll_poll. |
72 | * event in epoll_poll. |
|
|
73 | * the fd is later added, we try to ADD it, and, if that |
|
|
74 | * fails, we assume it still has the same eventmask. |
70 | */ |
75 | */ |
71 | if (!nev) |
76 | if (!nev) |
72 | return; |
77 | return; |
73 | |
78 | |
74 | ev.data.u64 = fd; /* use u64 to fully initialise the struct, for nicer strace etc. */ |
79 | oldmask = anfds [fd].emask; |
|
|
80 | anfds [fd].emask = nev; |
|
|
81 | |
|
|
82 | /* store the generation counter in the upper 32 bits */ |
|
|
83 | ev.data.u64 = fd | ((uint64_t)++anfds [fd].egen << 32); |
75 | ev.events = (nev & EV_READ ? EPOLLIN : 0) |
84 | ev.events = (nev & EV_READ ? EPOLLIN : 0) |
76 | | (nev & EV_WRITE ? EPOLLOUT : 0); |
85 | | (nev & EV_WRITE ? EPOLLOUT : 0); |
77 | |
86 | |
78 | if (expect_true (!epoll_ctl (backend_fd, oev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev))) |
87 | if (expect_true (!epoll_ctl (backend_fd, oev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev))) |
79 | return; |
88 | return; |
80 | |
89 | |
81 | if (expect_true (errno == ENOENT)) |
90 | if (expect_true (errno == ENOENT)) |
82 | { |
91 | { |
83 | /* on ENOENT the fd went away, so try to do the right thing */ |
92 | /* if ENOENT then the fd went away, so try to do the right thing */ |
84 | if (!nev) |
93 | if (!nev) |
85 | return; |
94 | goto dec_egen; |
86 | |
95 | |
87 | if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev)) |
96 | if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev)) |
88 | return; |
97 | return; |
89 | } |
98 | } |
90 | else if (expect_true (errno == EEXIST)) |
99 | else if (expect_true (errno == EEXIST)) |
91 | { |
100 | { |
92 | /* on EEXIST we ignored a previous DEL */ |
101 | /* EEXIST means we ignored a previous DEL, but the fd is still active */ |
|
|
102 | /* if the kernel mask is the same as the new mask, we assume it hasn't changed */ |
|
|
103 | if (oldmask == nev) |
|
|
104 | goto dec_egen; |
|
|
105 | |
93 | if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev)) |
106 | if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev)) |
94 | return; |
107 | return; |
95 | } |
108 | } |
96 | |
109 | |
97 | fd_kill (EV_A_ fd); |
110 | fd_kill (EV_A_ fd); |
|
|
111 | |
|
|
112 | dec_egen: |
|
|
113 | /* we didn't successfully call epoll_ctl, so decrement the generation counter again */ |
|
|
114 | --anfds [fd].egen; |
98 | } |
115 | } |
99 | |
116 | |
100 | static void |
117 | static void |
101 | epoll_poll (EV_P_ ev_tstamp timeout) |
118 | epoll_poll (EV_P_ ev_tstamp timeout) |
102 | { |
119 | { |
… | |
… | |
113 | |
130 | |
114 | for (i = 0; i < eventcnt; ++i) |
131 | for (i = 0; i < eventcnt; ++i) |
115 | { |
132 | { |
116 | struct epoll_event *ev = epoll_events + i; |
133 | struct epoll_event *ev = epoll_events + i; |
117 | |
134 | |
118 | int fd = ev->data.u64; |
135 | int fd = (uint32_t)ev->data.u64; /* mask out the lower 32 bits */ |
|
|
136 | int want = anfds [fd].events; |
119 | int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) |
137 | int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) |
120 | | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0); |
138 | | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0); |
121 | int want = anfds [fd].events; |
139 | |
|
|
140 | /* check for spurious notification */ |
|
|
141 | if (expect_false (anfds [fd].egen != (unsigned char)(ev->data.u64 >> 32))) |
|
|
142 | { |
|
|
143 | /* recreate kernel state */ |
|
|
144 | postfork = 1; |
|
|
145 | continue; |
|
|
146 | } |
122 | |
147 | |
123 | if (expect_false (got & ~want)) |
148 | if (expect_false (got & ~want)) |
124 | { |
149 | { |
|
|
150 | anfds [fd].emask = want; |
|
|
151 | |
125 | /* we received an event but are not interested in it, try mod or del */ |
152 | /* we received an event but are not interested in it, try mod or del */ |
|
|
153 | /* I don't think we ever need MOD, but let's handle it anyways */ |
126 | ev->events = (want & EV_READ ? EPOLLIN : 0) |
154 | ev->events = (want & EV_READ ? EPOLLIN : 0) |
127 | | (want & EV_WRITE ? EPOLLOUT : 0); |
155 | | (want & EV_WRITE ? EPOLLOUT : 0); |
128 | |
156 | |
129 | epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev); |
157 | if (epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev)) |
|
|
158 | { |
|
|
159 | postfork = 1; /* an error occured, recreate kernel state */ |
|
|
160 | continue; |
|
|
161 | } |
130 | } |
162 | } |
131 | |
163 | |
132 | fd_event (EV_A_ fd, got); |
164 | fd_event (EV_A_ fd, got); |
133 | } |
165 | } |
134 | |
166 | |
… | |
… | |
153 | |
185 | |
154 | backend_fudge = 0.; /* kernel sources seem to indicate this to be zero */ |
186 | backend_fudge = 0.; /* kernel sources seem to indicate this to be zero */ |
155 | backend_modify = epoll_modify; |
187 | backend_modify = epoll_modify; |
156 | backend_poll = epoll_poll; |
188 | backend_poll = epoll_poll; |
157 | |
189 | |
158 | epoll_eventmax = 64; /* intiial number of events receivable per poll */ |
190 | epoll_eventmax = 64; /* initial number of events receivable per poll */ |
159 | epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); |
191 | epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); |
160 | |
192 | |
161 | return EVBACKEND_EPOLL; |
193 | return EVBACKEND_EPOLL; |
162 | } |
194 | } |
163 | |
195 | |