… | |
… | |
27 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
27 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
28 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
28 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
29 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
29 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
30 | */ |
30 | */ |
31 | |
31 | |
|
|
32 | /* |
|
|
33 | * general notes about epoll: |
|
|
34 | * |
|
|
35 | * a) epoll silently removes fds from the fd set. as nothing tells us |
|
|
36 | * that an fd has been removed otherwise, we have to continually |
|
|
37 | * "rearm" fds that we suspect *might* have changed (same |
|
|
38 | * problem with kqueue, but much less costly there). |
|
|
39 | * b) the fact that ADD != MOD creates a lot of extra syscalls due to a) |
|
|
40 | * and seems not to have any advantage. |
|
|
41 | * c) the inability to handle fork or file descriptors (think dup) |
|
|
42 | * limits the applicability over poll, so this is not a generic |
|
|
43 | * poll replacement. |
|
|
44 | * |
|
|
45 | * lots of "weird code" and complication handling in this file is due |
|
|
46 | * to these design problems with epoll, as we try very hard to avoid |
|
|
47 | * epoll_ctl syscalls for common usage patterns. |
|
|
48 | */ |
|
|
49 | |
32 | #include <sys/epoll.h> |
50 | #include <sys/epoll.h> |
33 | |
51 | |
34 | static void |
52 | static void |
35 | epoll_modify (EV_P_ int fd, int oev, int nev) |
53 | epoll_modify (EV_P_ int fd, int oev, int nev) |
36 | { |
54 | { |
37 | int mode = nev ? oev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD : EPOLL_CTL_DEL; |
55 | struct epoll_event ev; |
38 | |
56 | |
39 | struct epoll_event ev; |
57 | /* |
|
|
58 | * we handle EPOLL_CTL_DEL by ignoring it here |
|
|
59 | * on the assumption that the fd is gone anyways |
|
|
60 | * if that is wrong, we have to handle the spurious |
|
|
61 | * event in epoll_poll. |
|
|
62 | */ |
|
|
63 | if (!nev) |
|
|
64 | return; |
|
|
65 | |
40 | ev.data.u64 = fd; /* use u64 to fully initialise the struct, for nicer strace etc. */ |
66 | ev.data.u64 = fd; /* use u64 to fully initialise the struct, for nicer strace etc. */ |
41 | ev.events = |
|
|
42 | (nev & EV_READ ? EPOLLIN : 0) |
67 | ev.events = (nev & EV_READ ? EPOLLIN : 0) |
43 | | (nev & EV_WRITE ? EPOLLOUT : 0); |
68 | | (nev & EV_WRITE ? EPOLLOUT : 0); |
44 | |
69 | |
45 | if (epoll_ctl (epoll_fd, mode, fd, &ev)) |
70 | if (expect_true (!epoll_ctl (backend_fd, oev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev))) |
|
|
71 | return; |
|
|
72 | |
|
|
73 | if (expect_true (errno == ENOENT)) |
|
|
74 | { |
46 | if (errno != ENOENT /* on ENOENT the fd went away, so try to do the right thing */ |
75 | /* on ENOENT the fd went away, so try to do the right thing */ |
|
|
76 | if (!nev) |
|
|
77 | return; |
|
|
78 | |
47 | || (nev && epoll_ctl (epoll_fd, EPOLL_CTL_ADD, fd, &ev))) |
79 | if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev)) |
|
|
80 | return; |
|
|
81 | } |
|
|
82 | else if (expect_true (errno == EEXIST)) |
|
|
83 | { |
|
|
84 | /* on EEXIST we ignored a previous DEL */ |
|
|
85 | if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev)) |
|
|
86 | return; |
|
|
87 | } |
|
|
88 | |
48 | fd_kill (EV_A_ fd); |
89 | fd_kill (EV_A_ fd); |
49 | } |
90 | } |
50 | |
91 | |
51 | static void |
92 | static void |
52 | epoll_poll (EV_P_ ev_tstamp timeout) |
93 | epoll_poll (EV_P_ ev_tstamp timeout) |
53 | { |
94 | { |
54 | int i; |
95 | int i; |
55 | int eventcnt = epoll_wait (epoll_fd, epoll_events, epoll_eventmax, ceil (timeout * 1000.)); |
96 | int eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, (int)ceil (timeout * 1000.)); |
56 | |
97 | |
57 | if (eventcnt < 0) |
98 | if (expect_false (eventcnt < 0)) |
58 | { |
99 | { |
59 | if (errno != EINTR) |
100 | if (errno != EINTR) |
60 | syserr ("(libev) epoll_wait"); |
101 | syserr ("(libev) epoll_wait"); |
61 | |
102 | |
62 | return; |
103 | return; |
63 | } |
104 | } |
64 | |
105 | |
65 | for (i = 0; i < eventcnt; ++i) |
106 | for (i = 0; i < eventcnt; ++i) |
66 | fd_event ( |
107 | { |
67 | EV_A_ |
108 | struct epoll_event *ev = epoll_events + i; |
68 | epoll_events [i].data.u64, |
109 | |
|
|
110 | int fd = ev->data.u64; |
69 | (epoll_events [i].events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) |
111 | int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) |
70 | | (epoll_events [i].events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0) |
112 | | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0); |
71 | ); |
113 | int want = anfds [fd].events; |
|
|
114 | |
|
|
115 | if (expect_false (got & ~want)) |
|
|
116 | { |
|
|
117 | /* we received an event but are not interested in it, try mod or del */ |
|
|
118 | ev->events = (want & EV_READ ? EPOLLIN : 0) |
|
|
119 | | (want & EV_WRITE ? EPOLLOUT : 0); |
|
|
120 | |
|
|
121 | epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev); |
|
|
122 | } |
|
|
123 | |
|
|
124 | fd_event (EV_A_ fd, got); |
|
|
125 | } |
72 | |
126 | |
73 | /* if the receive array was full, increase its size */ |
127 | /* if the receive array was full, increase its size */ |
74 | if (expect_false (eventcnt == epoll_eventmax)) |
128 | if (expect_false (eventcnt == epoll_eventmax)) |
75 | { |
129 | { |
76 | ev_free (epoll_events); |
130 | ev_free (epoll_events); |
77 | epoll_eventmax = array_roundsize (epoll_events, epoll_eventmax << 1); |
131 | epoll_eventmax = array_nextsize (sizeof (struct epoll_event), epoll_eventmax, epoll_eventmax + 1); |
78 | epoll_events = ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); |
132 | epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); |
79 | } |
133 | } |
80 | } |
134 | } |
81 | |
135 | |
82 | static int |
136 | int inline_size |
83 | epoll_init (EV_P_ int flags) |
137 | epoll_init (EV_P_ int flags) |
84 | { |
138 | { |
85 | epoll_fd = epoll_create (256); |
139 | backend_fd = epoll_create (256); |
86 | |
140 | |
87 | if (epoll_fd < 0) |
141 | if (backend_fd < 0) |
88 | return 0; |
142 | return 0; |
89 | |
143 | |
90 | fcntl (epoll_fd, F_SETFD, FD_CLOEXEC); |
144 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); |
91 | |
145 | |
92 | method_fudge = 1e-3; /* needed to compensate for epoll returning early */ |
146 | backend_fudge = 0.; /* kernel sources seem to indicate this to be zero */ |
93 | method_modify = epoll_modify; |
147 | backend_modify = epoll_modify; |
94 | method_poll = epoll_poll; |
148 | backend_poll = epoll_poll; |
95 | |
149 | |
96 | epoll_eventmax = 64; /* intiial number of events receivable per poll */ |
150 | epoll_eventmax = 64; /* intiial number of events receivable per poll */ |
97 | epoll_events = ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); |
151 | epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); |
98 | |
152 | |
99 | return EVMETHOD_EPOLL; |
153 | return EVBACKEND_EPOLL; |
100 | } |
154 | } |
101 | |
155 | |
102 | static void |
156 | void inline_size |
103 | epoll_destroy (EV_P) |
157 | epoll_destroy (EV_P) |
104 | { |
158 | { |
105 | close (epoll_fd); |
|
|
106 | |
|
|
107 | ev_free (epoll_events); |
159 | ev_free (epoll_events); |
108 | } |
160 | } |
109 | |
161 | |
110 | static void |
162 | void inline_size |
111 | epoll_fork (EV_P) |
163 | epoll_fork (EV_P) |
112 | { |
164 | { |
113 | close (epoll_fd); |
165 | close (backend_fd); |
114 | |
166 | |
115 | while ((epoll_fd = epoll_create (256)) < 0) |
167 | while ((backend_fd = epoll_create (256)) < 0) |
116 | syserr ("(libev) epoll_create"); |
168 | syserr ("(libev) epoll_create"); |
117 | |
169 | |
118 | fcntl (epoll_fd, F_SETFD, FD_CLOEXEC); |
170 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); |
119 | |
171 | |
120 | fd_rearm_all (EV_A); |
172 | fd_rearm_all (EV_A); |
121 | } |
173 | } |
122 | |
174 | |