1 | /* |
1 | /* |
2 | * libev epoll fd activity backend |
2 | * libev poll fd activity backend |
3 | * |
3 | * |
4 | * Copyright (c) 2007 Marc Alexander Lehmann <libev@schmorp.de> |
4 | * Copyright (c) 2007 Marc Alexander Lehmann <libev@schmorp.de> |
5 | * All rights reserved. |
5 | * All rights reserved. |
6 | * |
6 | * |
7 | * Redistribution and use in source and binary forms, with or without |
7 | * Redistribution and use in source and binary forms, with or without |
… | |
… | |
29 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
29 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
30 | */ |
30 | */ |
31 | |
31 | |
32 | #include <poll.h> |
32 | #include <poll.h> |
33 | |
33 | |
34 | static void |
34 | void inline_size |
35 | pollidx_init (int *base, int count) |
35 | pollidx_init (int *base, int count) |
36 | { |
36 | { |
37 | while (count--) |
37 | while (count--) |
38 | *base++ = -1; |
38 | *base++ = -1; |
39 | } |
39 | } |
… | |
… | |
44 | int idx; |
44 | int idx; |
45 | |
45 | |
46 | if (oev == nev) |
46 | if (oev == nev) |
47 | return; |
47 | return; |
48 | |
48 | |
49 | array_needsize (pollidxs, pollidxmax, fd + 1, pollidx_init); |
49 | array_needsize (int, pollidxs, pollidxmax, fd + 1, pollidx_init); |
50 | |
50 | |
51 | idx = pollidxs [fd]; |
51 | idx = pollidxs [fd]; |
52 | |
52 | |
53 | if (idx < 0) /* need to allocate a new pollfd */ |
53 | if (idx < 0) /* need to allocate a new pollfd */ |
54 | { |
54 | { |
55 | idx = pollcnt++; |
55 | pollidxs [fd] = idx = pollcnt++; |
56 | array_needsize (polls, pollmax, pollcnt, ); |
56 | array_needsize (struct pollfd, polls, pollmax, pollcnt, EMPTY2); |
57 | polls [idx].fd = fd; |
57 | polls [idx].fd = fd; |
58 | } |
58 | } |
|
|
59 | |
|
|
60 | assert (polls [idx].fd == fd); |
59 | |
61 | |
60 | if (nev) |
62 | if (nev) |
61 | polls [idx].events = |
63 | polls [idx].events = |
62 | (nev & EV_READ ? POLLIN : 0) |
64 | (nev & EV_READ ? POLLIN : 0) |
63 | | (nev & EV_WRITE ? POLLOUT : 0); |
65 | | (nev & EV_WRITE ? POLLOUT : 0); |
64 | else /* remove pollfd */ |
66 | else /* remove pollfd */ |
65 | { |
67 | { |
66 | if (idx < pollcnt--) |
68 | pollidxs [fd] = -1; |
|
|
69 | |
|
|
70 | if (expect_true (idx < --pollcnt)) |
67 | { |
71 | { |
68 | pollidxs [fd] = -1; |
|
|
69 | polls [idx] = polls [pollcnt]; |
72 | polls [idx] = polls [pollcnt]; |
70 | pollidxs [polls [idx].fd] = idx; |
73 | pollidxs [polls [idx].fd] = idx; |
71 | } |
74 | } |
72 | } |
75 | } |
73 | } |
76 | } |
74 | |
77 | |
75 | static void |
78 | static void |
76 | poll_poll (EV_P_ ev_tstamp timeout) |
79 | poll_poll (EV_P_ ev_tstamp timeout) |
77 | { |
80 | { |
78 | int i; |
81 | int i; |
79 | int res = poll (polls, pollcnt, ceil (timeout * 1000.)); |
82 | int res = poll (polls, pollcnt, (int)ceil (timeout * 1000.)); |
80 | |
83 | |
81 | if (res < 0) |
84 | if (expect_false (res < 0)) |
82 | { |
85 | { |
83 | if (errno == EBADF) |
86 | if (errno == EBADF) |
84 | fd_ebadf (EV_A); |
87 | fd_ebadf (EV_A); |
85 | else if (errno == ENOMEM && !syserr_cb) |
88 | else if (errno == ENOMEM && !syserr_cb) |
86 | fd_enomem (EV_A); |
89 | fd_enomem (EV_A); |
… | |
… | |
89 | |
92 | |
90 | return; |
93 | return; |
91 | } |
94 | } |
92 | |
95 | |
93 | for (i = 0; i < pollcnt; ++i) |
96 | for (i = 0; i < pollcnt; ++i) |
|
|
97 | if (expect_false (polls [i].revents & POLLNVAL)) |
|
|
98 | fd_kill (EV_A_ polls [i].fd); |
|
|
99 | else |
94 | fd_event ( |
100 | fd_event ( |
95 | EV_A_ |
101 | EV_A_ |
96 | polls [i].fd, |
102 | polls [i].fd, |
97 | (polls [i].revents & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) |
103 | (polls [i].revents & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) |
98 | | (polls [i].revents & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) |
104 | | (polls [i].revents & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) |
99 | ); |
105 | ); |
100 | } |
106 | } |
101 | |
107 | |
102 | static int |
108 | int inline_size |
103 | poll_init (EV_P_ int flags) |
109 | poll_init (EV_P_ int flags) |
104 | { |
110 | { |
105 | method_fudge = 1e-3; /* needed to compensate for select returning early, very conservative */ |
111 | backend_fudge = 1e-3; /* needed to compensate for select returning early, very conservative */ |
106 | method_modify = poll_modify; |
112 | backend_modify = poll_modify; |
107 | method_poll = poll_poll; |
113 | backend_poll = poll_poll; |
108 | |
114 | |
109 | pollidxs = 0; pollidxmax = 0; |
115 | pollidxs = 0; pollidxmax = 0; |
110 | polls = 0; pollmax = 0; pollcnt = 0; |
116 | polls = 0; pollmax = 0; pollcnt = 0; |
111 | |
117 | |
112 | return EVMETHOD_POLL; |
118 | return EVBACKEND_POLL; |
113 | } |
119 | } |
114 | |
120 | |
115 | static void |
121 | void inline_size |
116 | poll_destroy (EV_P) |
122 | poll_destroy (EV_P) |
117 | { |
123 | { |
118 | ev_free (pollidxs); |
124 | ev_free (pollidxs); |
119 | ev_free (polls); |
125 | ev_free (polls); |
120 | } |
126 | } |
|
|
127 | |