… | |
… | |
45 | |
45 | |
46 | Libev represents time as a single floating point number, representing the |
46 | Libev represents time as a single floating point number, representing the |
47 | (fractional) number of seconds since the (POSIX) epoch (somewhere near |
47 | (fractional) number of seconds since the (POSIX) epoch (somewhere near |
48 | the beginning of 1970, details are complicated, don't ask). This type is |
48 | the beginning of 1970, details are complicated, don't ask). This type is |
49 | called C<ev_tstamp>, which is what you should use too. It usually aliases |
49 | called C<ev_tstamp>, which is what you should use too. It usually aliases |
50 | to the double type in C. |
50 | to the C<double> type in C, and when you need to do any calculations on |
|
|
51 | it, you should treat it as such. |
|
|
52 | |
51 | |
53 | |
52 | =head1 GLOBAL FUNCTIONS |
54 | =head1 GLOBAL FUNCTIONS |
53 | |
55 | |
54 | These functions can be called anytime, even before initialising the |
56 | These functions can be called anytime, even before initialising the |
55 | library in any way. |
57 | library in any way. |
56 | |
58 | |
57 | =over 4 |
59 | =over 4 |
58 | |
60 | |
59 | =item ev_tstamp ev_time () |
61 | =item ev_tstamp ev_time () |
60 | |
62 | |
61 | Returns the current time as libev would use it. |
63 | Returns the current time as libev would use it. Please note that the |
|
|
64 | C<ev_now> function is usually faster and also often returns the timestamp |
|
|
65 | you actually want to know. |
62 | |
66 | |
63 | =item int ev_version_major () |
67 | =item int ev_version_major () |
64 | |
68 | |
65 | =item int ev_version_minor () |
69 | =item int ev_version_minor () |
66 | |
70 | |
… | |
… | |
73 | Usually, it's a good idea to terminate if the major versions mismatch, |
77 | Usually, it's a good idea to terminate if the major versions mismatch, |
74 | as this indicates an incompatible change. Minor versions are usually |
78 | as this indicates an incompatible change. Minor versions are usually |
75 | compatible to older versions, so a larger minor version alone is usually |
79 | compatible to older versions, so a larger minor version alone is usually |
76 | not a problem. |
80 | not a problem. |
77 | |
81 | |
|
|
82 | Example: make sure we haven't accidentally been linked against the wrong |
|
|
83 | version: |
|
|
84 | |
|
|
85 | assert (("libev version mismatch", |
|
|
86 | ev_version_major () == EV_VERSION_MAJOR |
|
|
87 | && ev_version_minor () >= EV_VERSION_MINOR)); |
|
|
88 | |
|
|
89 | =item unsigned int ev_supported_backends () |
|
|
90 | |
|
|
91 | Return the set of all backends (i.e. their corresponding C<EV_BACKEND_*> |
|
|
92 | value) compiled into this binary of libev (independent of their |
|
|
93 | availability on the system you are running on). See C<ev_default_loop> for |
|
|
94 | a description of the set values. |
|
|
95 | |
|
|
96 | Example: make sure we have the epoll method, because yeah this is cool and |
|
|
97 | a must have and can we have a torrent of it please!!!11 |
|
|
98 | |
|
|
99 | assert (("sorry, no epoll, no sex", |
|
|
100 | ev_supported_backends () & EVBACKEND_EPOLL)); |
|
|
101 | |
|
|
102 | =item unsigned int ev_recommended_backends () |
|
|
103 | |
|
|
104 | Return the set of all backends compiled into this binary of libev and also |
|
|
105 | recommended for this platform. This set is often smaller than the one |
|
|
106 | returned by C<ev_supported_backends>, as for example kqueue is broken on |
|
|
107 | most BSDs and will not be autodetected unless you explicitly request it |
|
|
108 | (assuming you know what you are doing). This is the set of backends that |
|
|
109 | libev will probe for if you specify no backends explicitly. |
|
|
110 | |
|
|
111 | =item unsigned int ev_embeddable_backends () |
|
|
112 | |
|
|
113 | Returns the set of backends that are embeddable in other event loops. This |
|
|
114 | is the theoretical, all-platform, value. To find which backends |
|
|
115 | might be supported on the current system, you would need to look at |
|
|
116 | C<ev_embeddable_backends () & ev_supported_backends ()>, likewise for |
|
|
117 | recommended ones. |
|
|
118 | |
|
|
119 | See the description of C<ev_embed> watchers for more info. |
|
|
120 | |
78 | =item ev_set_allocator (void *(*cb)(void *ptr, long size)) |
121 | =item ev_set_allocator (void *(*cb)(void *ptr, long size)) |
79 | |
122 | |
80 | Sets the allocation function to use (the prototype is similar to the |
123 | Sets the allocation function to use (the prototype is similar to the |
81 | realloc C function, the semantics are identical). It is used to allocate |
124 | realloc C function, the semantics are identical). It is used to allocate |
82 | and free memory (no surprises here). If it returns zero when memory |
125 | and free memory (no surprises here). If it returns zero when memory |
… | |
… | |
84 | destructive action. The default is your system realloc function. |
127 | destructive action. The default is your system realloc function. |
85 | |
128 | |
86 | You could override this function in high-availability programs to, say, |
129 | You could override this function in high-availability programs to, say, |
87 | free some memory if it cannot allocate memory, to use a special allocator, |
130 | free some memory if it cannot allocate memory, to use a special allocator, |
88 | or even to sleep a while and retry until some memory is available. |
131 | or even to sleep a while and retry until some memory is available. |
|
|
132 | |
|
|
133 | Example: replace the libev allocator with one that waits a bit and then |
|
|
134 | retries: better than mine). |
|
|
135 | |
|
|
136 | static void * |
|
|
137 | persistent_realloc (void *ptr, long size) |
|
|
138 | { |
|
|
139 | for (;;) |
|
|
140 | { |
|
|
141 | void *newptr = realloc (ptr, size); |
|
|
142 | |
|
|
143 | if (newptr) |
|
|
144 | return newptr; |
|
|
145 | |
|
|
146 | sleep (60); |
|
|
147 | } |
|
|
148 | } |
|
|
149 | |
|
|
150 | ... |
|
|
151 | ev_set_allocator (persistent_realloc); |
89 | |
152 | |
90 | =item ev_set_syserr_cb (void (*cb)(const char *msg)); |
153 | =item ev_set_syserr_cb (void (*cb)(const char *msg)); |
91 | |
154 | |
92 | Set the callback function to call on a retryable syscall error (such |
155 | Set the callback function to call on a retryable syscall error (such |
93 | as failed select, poll, epoll_wait). The message is a printable string |
156 | as failed select, poll, epoll_wait). The message is a printable string |
… | |
… | |
95 | callback is set, then libev will expect it to remedy the sitution, no |
158 | callback is set, then libev will expect it to remedy the sitution, no |
96 | matter what, when it returns. That is, libev will generally retry the |
159 | matter what, when it returns. That is, libev will generally retry the |
97 | requested operation, or, if the condition doesn't go away, do bad stuff |
160 | requested operation, or, if the condition doesn't go away, do bad stuff |
98 | (such as abort). |
161 | (such as abort). |
99 | |
162 | |
|
|
163 | Example: do the same thing as libev does internally: |
|
|
164 | |
|
|
165 | static void |
|
|
166 | fatal_error (const char *msg) |
|
|
167 | { |
|
|
168 | perror (msg); |
|
|
169 | abort (); |
|
|
170 | } |
|
|
171 | |
|
|
172 | ... |
|
|
173 | ev_set_syserr_cb (fatal_error); |
|
|
174 | |
100 | =back |
175 | =back |
101 | |
176 | |
102 | =head1 FUNCTIONS CONTROLLING THE EVENT LOOP |
177 | =head1 FUNCTIONS CONTROLLING THE EVENT LOOP |
103 | |
178 | |
104 | An event loop is described by a C<struct ev_loop *>. The library knows two |
179 | An event loop is described by a C<struct ev_loop *>. The library knows two |
… | |
… | |
117 | =item struct ev_loop *ev_default_loop (unsigned int flags) |
192 | =item struct ev_loop *ev_default_loop (unsigned int flags) |
118 | |
193 | |
119 | This will initialise the default event loop if it hasn't been initialised |
194 | This will initialise the default event loop if it hasn't been initialised |
120 | yet and return it. If the default loop could not be initialised, returns |
195 | yet and return it. If the default loop could not be initialised, returns |
121 | false. If it already was initialised it simply returns it (and ignores the |
196 | false. If it already was initialised it simply returns it (and ignores the |
122 | flags). |
197 | flags. If that is troubling you, check C<ev_backend ()> afterwards). |
123 | |
198 | |
124 | If you don't know what event loop to use, use the one returned from this |
199 | If you don't know what event loop to use, use the one returned from this |
125 | function. |
200 | function. |
126 | |
201 | |
127 | The flags argument can be used to specify special behaviour or specific |
202 | The flags argument can be used to specify special behaviour or specific |
128 | backends to use, and is usually specified as 0 (or EVFLAG_AUTO). |
203 | backends to use, and is usually specified as C<0> (or C<EVFLAG_AUTO>). |
129 | |
204 | |
130 | It supports the following flags: |
205 | The following flags are supported: |
131 | |
206 | |
132 | =over 4 |
207 | =over 4 |
133 | |
208 | |
134 | =item C<EVFLAG_AUTO> |
209 | =item C<EVFLAG_AUTO> |
135 | |
210 | |
… | |
… | |
143 | C<LIBEV_FLAGS>. Otherwise (the default), this environment variable will |
218 | C<LIBEV_FLAGS>. Otherwise (the default), this environment variable will |
144 | override the flags completely if it is found in the environment. This is |
219 | override the flags completely if it is found in the environment. This is |
145 | useful to try out specific backends to test their performance, or to work |
220 | useful to try out specific backends to test their performance, or to work |
146 | around bugs. |
221 | around bugs. |
147 | |
222 | |
148 | =item C<EVMETHOD_SELECT> (portable select backend) |
223 | =item C<EVBACKEND_SELECT> (value 1, portable select backend) |
149 | |
224 | |
|
|
225 | This is your standard select(2) backend. Not I<completely> standard, as |
|
|
226 | libev tries to roll its own fd_set with no limits on the number of fds, |
|
|
227 | but if that fails, expect a fairly low limit on the number of fds when |
|
|
228 | using this backend. It doesn't scale too well (O(highest_fd)), but its usually |
|
|
229 | the fastest backend for a low number of fds. |
|
|
230 | |
150 | =item C<EVMETHOD_POLL> (poll backend, available everywhere except on windows) |
231 | =item C<EVBACKEND_POLL> (value 2, poll backend, available everywhere except on windows) |
151 | |
232 | |
152 | =item C<EVMETHOD_EPOLL> (linux only) |
233 | And this is your standard poll(2) backend. It's more complicated than |
|
|
234 | select, but handles sparse fds better and has no artificial limit on the |
|
|
235 | number of fds you can use (except it will slow down considerably with a |
|
|
236 | lot of inactive fds). It scales similarly to select, i.e. O(total_fds). |
153 | |
237 | |
154 | =item C<EVMETHOD_KQUEUE> (some bsds only) |
238 | =item C<EVBACKEND_EPOLL> (value 4, Linux) |
155 | |
239 | |
156 | =item C<EVMETHOD_DEVPOLL> (solaris 8 only) |
240 | For few fds, this backend is a bit little slower than poll and select, |
|
|
241 | but it scales phenomenally better. While poll and select usually scale like |
|
|
242 | O(total_fds) where n is the total number of fds (or the highest fd), epoll scales |
|
|
243 | either O(1) or O(active_fds). |
157 | |
244 | |
158 | =item C<EVMETHOD_PORT> (solaris 10 only) |
245 | While stopping and starting an I/O watcher in the same iteration will |
|
|
246 | result in some caching, there is still a syscall per such incident |
|
|
247 | (because the fd could point to a different file description now), so its |
|
|
248 | best to avoid that. Also, dup()ed file descriptors might not work very |
|
|
249 | well if you register events for both fds. |
|
|
250 | |
|
|
251 | Please note that epoll sometimes generates spurious notifications, so you |
|
|
252 | need to use non-blocking I/O or other means to avoid blocking when no data |
|
|
253 | (or space) is available. |
|
|
254 | |
|
|
255 | =item C<EVBACKEND_KQUEUE> (value 8, most BSD clones) |
|
|
256 | |
|
|
257 | Kqueue deserves special mention, as at the time of this writing, it |
|
|
258 | was broken on all BSDs except NetBSD (usually it doesn't work with |
|
|
259 | anything but sockets and pipes, except on Darwin, where of course its |
|
|
260 | completely useless). For this reason its not being "autodetected" |
|
|
261 | unless you explicitly specify it explicitly in the flags (i.e. using |
|
|
262 | C<EVBACKEND_KQUEUE>). |
|
|
263 | |
|
|
264 | It scales in the same way as the epoll backend, but the interface to the |
|
|
265 | kernel is more efficient (which says nothing about its actual speed, of |
|
|
266 | course). While starting and stopping an I/O watcher does not cause an |
|
|
267 | extra syscall as with epoll, it still adds up to four event changes per |
|
|
268 | incident, so its best to avoid that. |
|
|
269 | |
|
|
270 | =item C<EVBACKEND_DEVPOLL> (value 16, Solaris 8) |
|
|
271 | |
|
|
272 | This is not implemented yet (and might never be). |
|
|
273 | |
|
|
274 | =item C<EVBACKEND_PORT> (value 32, Solaris 10) |
|
|
275 | |
|
|
276 | This uses the Solaris 10 port mechanism. As with everything on Solaris, |
|
|
277 | it's really slow, but it still scales very well (O(active_fds)). |
|
|
278 | |
|
|
279 | Please note that solaris ports can result in a lot of spurious |
|
|
280 | notifications, so you need to use non-blocking I/O or other means to avoid |
|
|
281 | blocking when no data (or space) is available. |
|
|
282 | |
|
|
283 | =item C<EVBACKEND_ALL> |
|
|
284 | |
|
|
285 | Try all backends (even potentially broken ones that wouldn't be tried |
|
|
286 | with C<EVFLAG_AUTO>). Since this is a mask, you can do stuff such as |
|
|
287 | C<EVBACKEND_ALL & ~EVBACKEND_KQUEUE>. |
|
|
288 | |
|
|
289 | =back |
159 | |
290 | |
160 | If one or more of these are ored into the flags value, then only these |
291 | If one or more of these are ored into the flags value, then only these |
161 | backends will be tried (in the reverse order as given here). If one are |
292 | backends will be tried (in the reverse order as given here). If none are |
162 | specified, any backend will do. |
293 | specified, most compiled-in backend will be tried, usually in reverse |
|
|
294 | order of their flag values :) |
163 | |
295 | |
164 | =back |
296 | The most typical usage is like this: |
|
|
297 | |
|
|
298 | if (!ev_default_loop (0)) |
|
|
299 | fatal ("could not initialise libev, bad $LIBEV_FLAGS in environment?"); |
|
|
300 | |
|
|
301 | Restrict libev to the select and poll backends, and do not allow |
|
|
302 | environment settings to be taken into account: |
|
|
303 | |
|
|
304 | ev_default_loop (EVBACKEND_POLL | EVBACKEND_SELECT | EVFLAG_NOENV); |
|
|
305 | |
|
|
306 | Use whatever libev has to offer, but make sure that kqueue is used if |
|
|
307 | available (warning, breaks stuff, best use only with your own private |
|
|
308 | event loop and only if you know the OS supports your types of fds): |
|
|
309 | |
|
|
310 | ev_default_loop (ev_recommended_backends () | EVBACKEND_KQUEUE); |
165 | |
311 | |
166 | =item struct ev_loop *ev_loop_new (unsigned int flags) |
312 | =item struct ev_loop *ev_loop_new (unsigned int flags) |
167 | |
313 | |
168 | Similar to C<ev_default_loop>, but always creates a new event loop that is |
314 | Similar to C<ev_default_loop>, but always creates a new event loop that is |
169 | always distinct from the default loop. Unlike the default loop, it cannot |
315 | always distinct from the default loop. Unlike the default loop, it cannot |
170 | handle signal and child watchers, and attempts to do so will be greeted by |
316 | handle signal and child watchers, and attempts to do so will be greeted by |
171 | undefined behaviour (or a failed assertion if assertions are enabled). |
317 | undefined behaviour (or a failed assertion if assertions are enabled). |
172 | |
318 | |
|
|
319 | Example: try to create a event loop that uses epoll and nothing else. |
|
|
320 | |
|
|
321 | struct ev_loop *epoller = ev_loop_new (EVBACKEND_EPOLL | EVFLAG_NOENV); |
|
|
322 | if (!epoller) |
|
|
323 | fatal ("no epoll found here, maybe it hides under your chair"); |
|
|
324 | |
173 | =item ev_default_destroy () |
325 | =item ev_default_destroy () |
174 | |
326 | |
175 | Destroys the default loop again (frees all memory and kernel state |
327 | Destroys the default loop again (frees all memory and kernel state |
176 | etc.). This stops all registered event watchers (by not touching them in |
328 | etc.). This stops all registered event watchers (by not touching them in |
177 | any way whatsoever, although you cannot rely on this :). |
329 | any way whatsoever, although you cannot rely on this :). |
… | |
… | |
186 | This function reinitialises the kernel state for backends that have |
338 | This function reinitialises the kernel state for backends that have |
187 | one. Despite the name, you can call it anytime, but it makes most sense |
339 | one. Despite the name, you can call it anytime, but it makes most sense |
188 | after forking, in either the parent or child process (or both, but that |
340 | after forking, in either the parent or child process (or both, but that |
189 | again makes little sense). |
341 | again makes little sense). |
190 | |
342 | |
191 | You I<must> call this function after forking if and only if you want to |
343 | You I<must> call this function in the child process after forking if and |
192 | use the event library in both processes. If you just fork+exec, you don't |
344 | only if you want to use the event library in both processes. If you just |
193 | have to call it. |
345 | fork+exec, you don't have to call it. |
194 | |
346 | |
195 | The function itself is quite fast and it's usually not a problem to call |
347 | The function itself is quite fast and it's usually not a problem to call |
196 | it just in case after a fork. To make this easy, the function will fit in |
348 | it just in case after a fork. To make this easy, the function will fit in |
197 | quite nicely into a call to C<pthread_atfork>: |
349 | quite nicely into a call to C<pthread_atfork>: |
198 | |
350 | |
199 | pthread_atfork (0, 0, ev_default_fork); |
351 | pthread_atfork (0, 0, ev_default_fork); |
200 | |
352 | |
|
|
353 | At the moment, C<EVBACKEND_SELECT> and C<EVBACKEND_POLL> are safe to use |
|
|
354 | without calling this function, so if you force one of those backends you |
|
|
355 | do not need to care. |
|
|
356 | |
201 | =item ev_loop_fork (loop) |
357 | =item ev_loop_fork (loop) |
202 | |
358 | |
203 | Like C<ev_default_fork>, but acts on an event loop created by |
359 | Like C<ev_default_fork>, but acts on an event loop created by |
204 | C<ev_loop_new>. Yes, you have to call this on every allocated event loop |
360 | C<ev_loop_new>. Yes, you have to call this on every allocated event loop |
205 | after fork, and how you do this is entirely your own problem. |
361 | after fork, and how you do this is entirely your own problem. |
206 | |
362 | |
207 | =item unsigned int ev_method (loop) |
363 | =item unsigned int ev_backend (loop) |
208 | |
364 | |
209 | Returns one of the C<EVMETHOD_*> flags indicating the event backend in |
365 | Returns one of the C<EVBACKEND_*> flags indicating the event backend in |
210 | use. |
366 | use. |
211 | |
367 | |
212 | =item ev_tstamp ev_now (loop) |
368 | =item ev_tstamp ev_now (loop) |
213 | |
369 | |
214 | Returns the current "event loop time", which is the time the event loop |
370 | Returns the current "event loop time", which is the time the event loop |
215 | got events and started processing them. This timestamp does not change |
371 | received events and started processing them. This timestamp does not |
216 | as long as callbacks are being processed, and this is also the base time |
372 | change as long as callbacks are being processed, and this is also the base |
217 | used for relative timers. You can treat it as the timestamp of the event |
373 | time used for relative timers. You can treat it as the timestamp of the |
218 | occuring (or more correctly, the mainloop finding out about it). |
374 | event occuring (or more correctly, libev finding out about it). |
219 | |
375 | |
220 | =item ev_loop (loop, int flags) |
376 | =item ev_loop (loop, int flags) |
221 | |
377 | |
222 | Finally, this is it, the event handler. This function usually is called |
378 | Finally, this is it, the event handler. This function usually is called |
223 | after you initialised all your watchers and you want to start handling |
379 | after you initialised all your watchers and you want to start handling |
224 | events. |
380 | events. |
225 | |
381 | |
226 | If the flags argument is specified as 0, it will not return until either |
382 | If the flags argument is specified as C<0>, it will not return until |
227 | no event watchers are active anymore or C<ev_unloop> was called. |
383 | either no event watchers are active anymore or C<ev_unloop> was called. |
|
|
384 | |
|
|
385 | Please note that an explicit C<ev_unloop> is usually better than |
|
|
386 | relying on all watchers to be stopped when deciding when a program has |
|
|
387 | finished (especially in interactive programs), but having a program that |
|
|
388 | automatically loops as long as it has to and no longer by virtue of |
|
|
389 | relying on its watchers stopping correctly is a thing of beauty. |
228 | |
390 | |
229 | A flags value of C<EVLOOP_NONBLOCK> will look for new events, will handle |
391 | A flags value of C<EVLOOP_NONBLOCK> will look for new events, will handle |
230 | those events and any outstanding ones, but will not block your process in |
392 | those events and any outstanding ones, but will not block your process in |
231 | case there are no events and will return after one iteration of the loop. |
393 | case there are no events and will return after one iteration of the loop. |
232 | |
394 | |
233 | A flags value of C<EVLOOP_ONESHOT> will look for new events (waiting if |
395 | A flags value of C<EVLOOP_ONESHOT> will look for new events (waiting if |
234 | neccessary) and will handle those and any outstanding ones. It will block |
396 | neccessary) and will handle those and any outstanding ones. It will block |
235 | your process until at least one new event arrives, and will return after |
397 | your process until at least one new event arrives, and will return after |
236 | one iteration of the loop. |
398 | one iteration of the loop. This is useful if you are waiting for some |
|
|
399 | external event in conjunction with something not expressible using other |
|
|
400 | libev watchers. However, a pair of C<ev_prepare>/C<ev_check> watchers is |
|
|
401 | usually a better approach for this kind of thing. |
237 | |
402 | |
238 | This flags value could be used to implement alternative looping |
403 | Here are the gory details of what C<ev_loop> does: |
239 | constructs, but the C<prepare> and C<check> watchers provide a better and |
404 | |
240 | more generic mechanism. |
405 | * If there are no active watchers (reference count is zero), return. |
|
|
406 | - Queue prepare watchers and then call all outstanding watchers. |
|
|
407 | - If we have been forked, recreate the kernel state. |
|
|
408 | - Update the kernel state with all outstanding changes. |
|
|
409 | - Update the "event loop time". |
|
|
410 | - Calculate for how long to block. |
|
|
411 | - Block the process, waiting for any events. |
|
|
412 | - Queue all outstanding I/O (fd) events. |
|
|
413 | - Update the "event loop time" and do time jump handling. |
|
|
414 | - Queue all outstanding timers. |
|
|
415 | - Queue all outstanding periodics. |
|
|
416 | - If no events are pending now, queue all idle watchers. |
|
|
417 | - Queue all check watchers. |
|
|
418 | - Call all queued watchers in reverse order (i.e. check watchers first). |
|
|
419 | Signals and child watchers are implemented as I/O watchers, and will |
|
|
420 | be handled here by queueing them when their watcher gets executed. |
|
|
421 | - If ev_unloop has been called or EVLOOP_ONESHOT or EVLOOP_NONBLOCK |
|
|
422 | were used, return, otherwise continue with step *. |
|
|
423 | |
|
|
424 | Example: queue some jobs and then loop until no events are outsanding |
|
|
425 | anymore. |
|
|
426 | |
|
|
427 | ... queue jobs here, make sure they register event watchers as long |
|
|
428 | ... as they still have work to do (even an idle watcher will do..) |
|
|
429 | ev_loop (my_loop, 0); |
|
|
430 | ... jobs done. yeah! |
241 | |
431 | |
242 | =item ev_unloop (loop, how) |
432 | =item ev_unloop (loop, how) |
243 | |
433 | |
244 | Can be used to make a call to C<ev_loop> return early (but only after it |
434 | Can be used to make a call to C<ev_loop> return early (but only after it |
245 | has processed all outstanding events). The C<how> argument must be either |
435 | has processed all outstanding events). The C<how> argument must be either |
246 | C<EVUNLOOP_ONCE>, which will make the innermost C<ev_loop> call return, or |
436 | C<EVUNLOOP_ONE>, which will make the innermost C<ev_loop> call return, or |
247 | C<EVUNLOOP_ALL>, which will make all nested C<ev_loop> calls return. |
437 | C<EVUNLOOP_ALL>, which will make all nested C<ev_loop> calls return. |
248 | |
438 | |
249 | =item ev_ref (loop) |
439 | =item ev_ref (loop) |
250 | |
440 | |
251 | =item ev_unref (loop) |
441 | =item ev_unref (loop) |
… | |
… | |
259 | visible to the libev user and should not keep C<ev_loop> from exiting if |
449 | visible to the libev user and should not keep C<ev_loop> from exiting if |
260 | no event watchers registered by it are active. It is also an excellent |
450 | no event watchers registered by it are active. It is also an excellent |
261 | way to do this for generic recurring timers or from within third-party |
451 | way to do this for generic recurring timers or from within third-party |
262 | libraries. Just remember to I<unref after start> and I<ref before stop>. |
452 | libraries. Just remember to I<unref after start> and I<ref before stop>. |
263 | |
453 | |
|
|
454 | Example: create a signal watcher, but keep it from keeping C<ev_loop> |
|
|
455 | running when nothing else is active. |
|
|
456 | |
|
|
457 | struct dv_signal exitsig; |
|
|
458 | ev_signal_init (&exitsig, sig_cb, SIGINT); |
|
|
459 | ev_signal_start (myloop, &exitsig); |
|
|
460 | evf_unref (myloop); |
|
|
461 | |
|
|
462 | Example: for some weird reason, unregister the above signal handler again. |
|
|
463 | |
|
|
464 | ev_ref (myloop); |
|
|
465 | ev_signal_stop (myloop, &exitsig); |
|
|
466 | |
264 | =back |
467 | =back |
265 | |
468 | |
266 | =head1 ANATOMY OF A WATCHER |
469 | =head1 ANATOMY OF A WATCHER |
267 | |
470 | |
268 | A watcher is a structure that you create and register to record your |
471 | A watcher is a structure that you create and register to record your |
… | |
… | |
302 | *) >>), and you can stop watching for events at any time by calling the |
505 | *) >>), and you can stop watching for events at any time by calling the |
303 | corresponding stop function (C<< ev_<type>_stop (loop, watcher *) >>. |
506 | corresponding stop function (C<< ev_<type>_stop (loop, watcher *) >>. |
304 | |
507 | |
305 | As long as your watcher is active (has been started but not stopped) you |
508 | As long as your watcher is active (has been started but not stopped) you |
306 | must not touch the values stored in it. Most specifically you must never |
509 | must not touch the values stored in it. Most specifically you must never |
307 | reinitialise it or call its set method. |
510 | reinitialise it or call its set macro. |
308 | |
511 | |
309 | You can check whether an event is active by calling the C<ev_is_active |
512 | You can check whether an event is active by calling the C<ev_is_active |
310 | (watcher *)> macro. To see whether an event is outstanding (but the |
513 | (watcher *)> macro. To see whether an event is outstanding (but the |
311 | callback for it has not been called yet) you can use the C<ev_is_pending |
514 | callback for it has not been called yet) you can use the C<ev_is_pending |
312 | (watcher *)> macro. |
515 | (watcher *)> macro. |
… | |
… | |
409 | =head1 WATCHER TYPES |
612 | =head1 WATCHER TYPES |
410 | |
613 | |
411 | This section describes each watcher in detail, but will not repeat |
614 | This section describes each watcher in detail, but will not repeat |
412 | information given in the last section. |
615 | information given in the last section. |
413 | |
616 | |
|
|
617 | |
414 | =head2 C<ev_io> - is this file descriptor readable or writable |
618 | =head2 C<ev_io> - is this file descriptor readable or writable |
415 | |
619 | |
416 | I/O watchers check whether a file descriptor is readable or writable |
620 | I/O watchers check whether a file descriptor is readable or writable |
417 | in each iteration of the event loop (This behaviour is called |
621 | in each iteration of the event loop (This behaviour is called |
418 | level-triggering because you keep receiving events as long as the |
622 | level-triggering because you keep receiving events as long as the |
… | |
… | |
429 | descriptors correctly if you register interest in two or more fds pointing |
633 | descriptors correctly if you register interest in two or more fds pointing |
430 | to the same underlying file/socket etc. description (that is, they share |
634 | to the same underlying file/socket etc. description (that is, they share |
431 | the same underlying "file open"). |
635 | the same underlying "file open"). |
432 | |
636 | |
433 | If you must do this, then force the use of a known-to-be-good backend |
637 | If you must do this, then force the use of a known-to-be-good backend |
434 | (at the time of this writing, this includes only EVMETHOD_SELECT and |
638 | (at the time of this writing, this includes only C<EVBACKEND_SELECT> and |
435 | EVMETHOD_POLL). |
639 | C<EVBACKEND_POLL>). |
436 | |
640 | |
437 | =over 4 |
641 | =over 4 |
438 | |
642 | |
439 | =item ev_io_init (ev_io *, callback, int fd, int events) |
643 | =item ev_io_init (ev_io *, callback, int fd, int events) |
440 | |
644 | |
… | |
… | |
442 | |
646 | |
443 | Configures an C<ev_io> watcher. The fd is the file descriptor to rceeive |
647 | Configures an C<ev_io> watcher. The fd is the file descriptor to rceeive |
444 | events for and events is either C<EV_READ>, C<EV_WRITE> or C<EV_READ | |
648 | events for and events is either C<EV_READ>, C<EV_WRITE> or C<EV_READ | |
445 | EV_WRITE> to receive the given events. |
649 | EV_WRITE> to receive the given events. |
446 | |
650 | |
|
|
651 | Please note that most of the more scalable backend mechanisms (for example |
|
|
652 | epoll and solaris ports) can result in spurious readyness notifications |
|
|
653 | for file descriptors, so you practically need to use non-blocking I/O (and |
|
|
654 | treat callback invocation as hint only), or retest separately with a safe |
|
|
655 | interface before doing I/O (XLib can do this), or force the use of either |
|
|
656 | C<EVBACKEND_SELECT> or C<EVBACKEND_POLL>, which don't suffer from this |
|
|
657 | problem. Also note that it is quite easy to have your callback invoked |
|
|
658 | when the readyness condition is no longer valid even when employing |
|
|
659 | typical ways of handling events, so its a good idea to use non-blocking |
|
|
660 | I/O unconditionally. |
|
|
661 | |
447 | =back |
662 | =back |
|
|
663 | |
|
|
664 | Example: call C<stdin_readable_cb> when STDIN_FILENO has become, well |
|
|
665 | readable, but only once. Since it is likely line-buffered, you could |
|
|
666 | attempt to read a whole line in the callback: |
|
|
667 | |
|
|
668 | static void |
|
|
669 | stdin_readable_cb (struct ev_loop *loop, struct ev_io *w, int revents) |
|
|
670 | { |
|
|
671 | ev_io_stop (loop, w); |
|
|
672 | .. read from stdin here (or from w->fd) and haqndle any I/O errors |
|
|
673 | } |
|
|
674 | |
|
|
675 | ... |
|
|
676 | struct ev_loop *loop = ev_default_init (0); |
|
|
677 | struct ev_io stdin_readable; |
|
|
678 | ev_io_init (&stdin_readable, stdin_readable_cb, STDIN_FILENO, EV_READ); |
|
|
679 | ev_io_start (loop, &stdin_readable); |
|
|
680 | ev_loop (loop, 0); |
|
|
681 | |
448 | |
682 | |
449 | =head2 C<ev_timer> - relative and optionally recurring timeouts |
683 | =head2 C<ev_timer> - relative and optionally recurring timeouts |
450 | |
684 | |
451 | Timer watchers are simple relative timers that generate an event after a |
685 | Timer watchers are simple relative timers that generate an event after a |
452 | given time, and optionally repeating in regular intervals after that. |
686 | given time, and optionally repeating in regular intervals after that. |
453 | |
687 | |
454 | The timers are based on real time, that is, if you register an event that |
688 | The timers are based on real time, that is, if you register an event that |
455 | times out after an hour and you reset your system clock to last years |
689 | times out after an hour and you reset your system clock to last years |
456 | time, it will still time out after (roughly) and hour. "Roughly" because |
690 | time, it will still time out after (roughly) and hour. "Roughly" because |
457 | detecting time jumps is hard, and soem inaccuracies are unavoidable (the |
691 | detecting time jumps is hard, and some inaccuracies are unavoidable (the |
458 | monotonic clock option helps a lot here). |
692 | monotonic clock option helps a lot here). |
459 | |
693 | |
460 | The relative timeouts are calculated relative to the C<ev_now ()> |
694 | The relative timeouts are calculated relative to the C<ev_now ()> |
461 | time. This is usually the right thing as this timestamp refers to the time |
695 | time. This is usually the right thing as this timestamp refers to the time |
462 | of the event triggering whatever timeout you are modifying/starting. If |
696 | of the event triggering whatever timeout you are modifying/starting. If |
463 | you suspect event processing to be delayed and you *need* to base the timeout |
697 | you suspect event processing to be delayed and you I<need> to base the timeout |
464 | on the current time, use something like this to adjust for this: |
698 | on the current time, use something like this to adjust for this: |
465 | |
699 | |
466 | ev_timer_set (&timer, after + ev_now () - ev_time (), 0.); |
700 | ev_timer_set (&timer, after + ev_now () - ev_time (), 0.); |
|
|
701 | |
|
|
702 | The callback is guarenteed to be invoked only when its timeout has passed, |
|
|
703 | but if multiple timers become ready during the same loop iteration then |
|
|
704 | order of execution is undefined. |
467 | |
705 | |
468 | =over 4 |
706 | =over 4 |
469 | |
707 | |
470 | =item ev_timer_init (ev_timer *, callback, ev_tstamp after, ev_tstamp repeat) |
708 | =item ev_timer_init (ev_timer *, callback, ev_tstamp after, ev_tstamp repeat) |
471 | |
709 | |
… | |
… | |
501 | state where you do not expect data to travel on the socket, you can stop |
739 | state where you do not expect data to travel on the socket, you can stop |
502 | the timer, and again will automatically restart it if need be. |
740 | the timer, and again will automatically restart it if need be. |
503 | |
741 | |
504 | =back |
742 | =back |
505 | |
743 | |
|
|
744 | Example: create a timer that fires after 60 seconds. |
|
|
745 | |
|
|
746 | static void |
|
|
747 | one_minute_cb (struct ev_loop *loop, struct ev_timer *w, int revents) |
|
|
748 | { |
|
|
749 | .. one minute over, w is actually stopped right here |
|
|
750 | } |
|
|
751 | |
|
|
752 | struct ev_timer mytimer; |
|
|
753 | ev_timer_init (&mytimer, one_minute_cb, 60., 0.); |
|
|
754 | ev_timer_start (loop, &mytimer); |
|
|
755 | |
|
|
756 | Example: create a timeout timer that times out after 10 seconds of |
|
|
757 | inactivity. |
|
|
758 | |
|
|
759 | static void |
|
|
760 | timeout_cb (struct ev_loop *loop, struct ev_timer *w, int revents) |
|
|
761 | { |
|
|
762 | .. ten seconds without any activity |
|
|
763 | } |
|
|
764 | |
|
|
765 | struct ev_timer mytimer; |
|
|
766 | ev_timer_init (&mytimer, timeout_cb, 0., 10.); /* note, only repeat used */ |
|
|
767 | ev_timer_again (&mytimer); /* start timer */ |
|
|
768 | ev_loop (loop, 0); |
|
|
769 | |
|
|
770 | // and in some piece of code that gets executed on any "activity": |
|
|
771 | // reset the timeout to start ticking again at 10 seconds |
|
|
772 | ev_timer_again (&mytimer); |
|
|
773 | |
|
|
774 | |
506 | =head2 C<ev_periodic> - to cron or not to cron |
775 | =head2 C<ev_periodic> - to cron or not to cron |
507 | |
776 | |
508 | Periodic watchers are also timers of a kind, but they are very versatile |
777 | Periodic watchers are also timers of a kind, but they are very versatile |
509 | (and unfortunately a bit complex). |
778 | (and unfortunately a bit complex). |
510 | |
779 | |
… | |
… | |
518 | again). |
787 | again). |
519 | |
788 | |
520 | They can also be used to implement vastly more complex timers, such as |
789 | They can also be used to implement vastly more complex timers, such as |
521 | triggering an event on eahc midnight, local time. |
790 | triggering an event on eahc midnight, local time. |
522 | |
791 | |
|
|
792 | As with timers, the callback is guarenteed to be invoked only when the |
|
|
793 | time (C<at>) has been passed, but if multiple periodic timers become ready |
|
|
794 | during the same loop iteration then order of execution is undefined. |
|
|
795 | |
523 | =over 4 |
796 | =over 4 |
524 | |
797 | |
525 | =item ev_periodic_init (ev_periodic *, callback, ev_tstamp at, ev_tstamp interval, reschedule_cb) |
798 | =item ev_periodic_init (ev_periodic *, callback, ev_tstamp at, ev_tstamp interval, reschedule_cb) |
526 | |
799 | |
527 | =item ev_periodic_set (ev_periodic *, ev_tstamp after, ev_tstamp repeat, reschedule_cb) |
800 | =item ev_periodic_set (ev_periodic *, ev_tstamp after, ev_tstamp repeat, reschedule_cb) |
528 | |
801 | |
529 | Lots of arguments, lets sort it out... There are basically three modes of |
802 | Lots of arguments, lets sort it out... There are basically three modes of |
530 | operation, and we will explain them from simplest to complex: |
803 | operation, and we will explain them from simplest to complex: |
531 | |
|
|
532 | |
804 | |
533 | =over 4 |
805 | =over 4 |
534 | |
806 | |
535 | =item * absolute timer (interval = reschedule_cb = 0) |
807 | =item * absolute timer (interval = reschedule_cb = 0) |
536 | |
808 | |
… | |
… | |
601 | when you changed some parameters or the reschedule callback would return |
873 | when you changed some parameters or the reschedule callback would return |
602 | a different time than the last time it was called (e.g. in a crond like |
874 | a different time than the last time it was called (e.g. in a crond like |
603 | program when the crontabs have changed). |
875 | program when the crontabs have changed). |
604 | |
876 | |
605 | =back |
877 | =back |
|
|
878 | |
|
|
879 | Example: call a callback every hour, or, more precisely, whenever the |
|
|
880 | system clock is divisible by 3600. The callback invocation times have |
|
|
881 | potentially a lot of jittering, but good long-term stability. |
|
|
882 | |
|
|
883 | static void |
|
|
884 | clock_cb (struct ev_loop *loop, struct ev_io *w, int revents) |
|
|
885 | { |
|
|
886 | ... its now a full hour (UTC, or TAI or whatever your clock follows) |
|
|
887 | } |
|
|
888 | |
|
|
889 | struct ev_periodic hourly_tick; |
|
|
890 | ev_periodic_init (&hourly_tick, clock_cb, 0., 3600., 0); |
|
|
891 | ev_periodic_start (loop, &hourly_tick); |
|
|
892 | |
|
|
893 | Example: the same as above, but use a reschedule callback to do it: |
|
|
894 | |
|
|
895 | #include <math.h> |
|
|
896 | |
|
|
897 | static ev_tstamp |
|
|
898 | my_scheduler_cb (struct ev_periodic *w, ev_tstamp now) |
|
|
899 | { |
|
|
900 | return fmod (now, 3600.) + 3600.; |
|
|
901 | } |
|
|
902 | |
|
|
903 | ev_periodic_init (&hourly_tick, clock_cb, 0., 0., my_scheduler_cb); |
|
|
904 | |
|
|
905 | Example: call a callback every hour, starting now: |
|
|
906 | |
|
|
907 | struct ev_periodic hourly_tick; |
|
|
908 | ev_periodic_init (&hourly_tick, clock_cb, |
|
|
909 | fmod (ev_now (loop), 3600.), 3600., 0); |
|
|
910 | ev_periodic_start (loop, &hourly_tick); |
|
|
911 | |
606 | |
912 | |
607 | =head2 C<ev_signal> - signal me when a signal gets signalled |
913 | =head2 C<ev_signal> - signal me when a signal gets signalled |
608 | |
914 | |
609 | Signal watchers will trigger an event when the process receives a specific |
915 | Signal watchers will trigger an event when the process receives a specific |
610 | signal one or more times. Even though signals are very asynchronous, libev |
916 | signal one or more times. Even though signals are very asynchronous, libev |
… | |
… | |
627 | Configures the watcher to trigger on the given signal number (usually one |
933 | Configures the watcher to trigger on the given signal number (usually one |
628 | of the C<SIGxxx> constants). |
934 | of the C<SIGxxx> constants). |
629 | |
935 | |
630 | =back |
936 | =back |
631 | |
937 | |
|
|
938 | |
632 | =head2 C<ev_child> - wait for pid status changes |
939 | =head2 C<ev_child> - wait for pid status changes |
633 | |
940 | |
634 | Child watchers trigger when your process receives a SIGCHLD in response to |
941 | Child watchers trigger when your process receives a SIGCHLD in response to |
635 | some child status changes (most typically when a child of yours dies). |
942 | some child status changes (most typically when a child of yours dies). |
636 | |
943 | |
… | |
… | |
646 | the status word (use the macros from C<sys/wait.h> and see your systems |
953 | the status word (use the macros from C<sys/wait.h> and see your systems |
647 | C<waitpid> documentation). The C<rpid> member contains the pid of the |
954 | C<waitpid> documentation). The C<rpid> member contains the pid of the |
648 | process causing the status change. |
955 | process causing the status change. |
649 | |
956 | |
650 | =back |
957 | =back |
|
|
958 | |
|
|
959 | Example: try to exit cleanly on SIGINT and SIGTERM. |
|
|
960 | |
|
|
961 | static void |
|
|
962 | sigint_cb (struct ev_loop *loop, struct ev_signal *w, int revents) |
|
|
963 | { |
|
|
964 | ev_unloop (loop, EVUNLOOP_ALL); |
|
|
965 | } |
|
|
966 | |
|
|
967 | struct ev_signal signal_watcher; |
|
|
968 | ev_signal_init (&signal_watcher, sigint_cb, SIGINT); |
|
|
969 | ev_signal_start (loop, &sigint_cb); |
|
|
970 | |
651 | |
971 | |
652 | =head2 C<ev_idle> - when you've got nothing better to do |
972 | =head2 C<ev_idle> - when you've got nothing better to do |
653 | |
973 | |
654 | Idle watchers trigger events when there are no other events are pending |
974 | Idle watchers trigger events when there are no other events are pending |
655 | (prepare, check and other idle watchers do not count). That is, as long |
975 | (prepare, check and other idle watchers do not count). That is, as long |
… | |
… | |
675 | kind. There is a C<ev_idle_set> macro, but using it is utterly pointless, |
995 | kind. There is a C<ev_idle_set> macro, but using it is utterly pointless, |
676 | believe me. |
996 | believe me. |
677 | |
997 | |
678 | =back |
998 | =back |
679 | |
999 | |
|
|
1000 | Example: dynamically allocate an C<ev_idle>, start it, and in the |
|
|
1001 | callback, free it. Alos, use no error checking, as usual. |
|
|
1002 | |
|
|
1003 | static void |
|
|
1004 | idle_cb (struct ev_loop *loop, struct ev_idle *w, int revents) |
|
|
1005 | { |
|
|
1006 | free (w); |
|
|
1007 | // now do something you wanted to do when the program has |
|
|
1008 | // no longer asnything immediate to do. |
|
|
1009 | } |
|
|
1010 | |
|
|
1011 | struct ev_idle *idle_watcher = malloc (sizeof (struct ev_idle)); |
|
|
1012 | ev_idle_init (idle_watcher, idle_cb); |
|
|
1013 | ev_idle_start (loop, idle_cb); |
|
|
1014 | |
|
|
1015 | |
680 | =head2 C<ev_prepare> and C<ev_check> - customise your event loop |
1016 | =head2 C<ev_prepare> and C<ev_check> - customise your event loop |
681 | |
1017 | |
682 | Prepare and check watchers are usually (but not always) used in tandem: |
1018 | Prepare and check watchers are usually (but not always) used in tandem: |
683 | prepare watchers get invoked before the process blocks and check watchers |
1019 | prepare watchers get invoked before the process blocks and check watchers |
684 | afterwards. |
1020 | afterwards. |
685 | |
1021 | |
686 | Their main purpose is to integrate other event mechanisms into libev. This |
1022 | Their main purpose is to integrate other event mechanisms into libev and |
687 | could be used, for example, to track variable changes, implement your own |
1023 | their use is somewhat advanced. This could be used, for example, to track |
688 | watchers, integrate net-snmp or a coroutine library and lots more. |
1024 | variable changes, implement your own watchers, integrate net-snmp or a |
|
|
1025 | coroutine library and lots more. |
689 | |
1026 | |
690 | This is done by examining in each prepare call which file descriptors need |
1027 | This is done by examining in each prepare call which file descriptors need |
691 | to be watched by the other library, registering C<ev_io> watchers for |
1028 | to be watched by the other library, registering C<ev_io> watchers for |
692 | them and starting an C<ev_timer> watcher for any timeouts (many libraries |
1029 | them and starting an C<ev_timer> watcher for any timeouts (many libraries |
693 | provide just this functionality). Then, in the check watcher you check for |
1030 | provide just this functionality). Then, in the check watcher you check for |
… | |
… | |
715 | parameters of any kind. There are C<ev_prepare_set> and C<ev_check_set> |
1052 | parameters of any kind. There are C<ev_prepare_set> and C<ev_check_set> |
716 | macros, but using them is utterly, utterly and completely pointless. |
1053 | macros, but using them is utterly, utterly and completely pointless. |
717 | |
1054 | |
718 | =back |
1055 | =back |
719 | |
1056 | |
|
|
1057 | Example: *TODO*. |
|
|
1058 | |
|
|
1059 | |
|
|
1060 | =head2 C<ev_embed> - when one backend isn't enough |
|
|
1061 | |
|
|
1062 | This is a rather advanced watcher type that lets you embed one event loop |
|
|
1063 | into another. |
|
|
1064 | |
|
|
1065 | There are primarily two reasons you would want that: work around bugs and |
|
|
1066 | prioritise I/O. |
|
|
1067 | |
|
|
1068 | As an example for a bug workaround, the kqueue backend might only support |
|
|
1069 | sockets on some platform, so it is unusable as generic backend, but you |
|
|
1070 | still want to make use of it because you have many sockets and it scales |
|
|
1071 | so nicely. In this case, you would create a kqueue-based loop and embed it |
|
|
1072 | into your default loop (which might use e.g. poll). Overall operation will |
|
|
1073 | be a bit slower because first libev has to poll and then call kevent, but |
|
|
1074 | at least you can use both at what they are best. |
|
|
1075 | |
|
|
1076 | As for prioritising I/O: rarely you have the case where some fds have |
|
|
1077 | to be watched and handled very quickly (with low latency), and even |
|
|
1078 | priorities and idle watchers might have too much overhead. In this case |
|
|
1079 | you would put all the high priority stuff in one loop and all the rest in |
|
|
1080 | a second one, and embed the second one in the first. |
|
|
1081 | |
|
|
1082 | As long as the watcher is started it will automatically handle events. The |
|
|
1083 | callback will be invoked whenever some events have been handled. You can |
|
|
1084 | set the callback to C<0> to avoid having to specify one if you are not |
|
|
1085 | interested in that. |
|
|
1086 | |
|
|
1087 | Also, there have not currently been made special provisions for forking: |
|
|
1088 | when you fork, you not only have to call C<ev_loop_fork> on both loops, |
|
|
1089 | but you will also have to stop and restart any C<ev_embed> watchers |
|
|
1090 | yourself. |
|
|
1091 | |
|
|
1092 | Unfortunately, not all backends are embeddable, only the ones returned by |
|
|
1093 | C<ev_embeddable_backends> are, which, unfortunately, does not include any |
|
|
1094 | portable one. |
|
|
1095 | |
|
|
1096 | So when you want to use this feature you will always have to be prepared |
|
|
1097 | that you cannot get an embeddable loop. The recommended way to get around |
|
|
1098 | this is to have a separate variables for your embeddable loop, try to |
|
|
1099 | create it, and if that fails, use the normal loop for everything: |
|
|
1100 | |
|
|
1101 | struct ev_loop *loop_hi = ev_default_init (0); |
|
|
1102 | struct ev_loop *loop_lo = 0; |
|
|
1103 | struct ev_embed embed; |
|
|
1104 | |
|
|
1105 | // see if there is a chance of getting one that works |
|
|
1106 | // (remember that a flags value of 0 means autodetection) |
|
|
1107 | loop_lo = ev_embeddable_backends () & ev_recommended_backends () |
|
|
1108 | ? ev_loop_new (ev_embeddable_backends () & ev_recommended_backends ()) |
|
|
1109 | : 0; |
|
|
1110 | |
|
|
1111 | // if we got one, then embed it, otherwise default to loop_hi |
|
|
1112 | if (loop_lo) |
|
|
1113 | { |
|
|
1114 | ev_embed_init (&embed, 0, loop_lo); |
|
|
1115 | ev_embed_start (loop_hi, &embed); |
|
|
1116 | } |
|
|
1117 | else |
|
|
1118 | loop_lo = loop_hi; |
|
|
1119 | |
|
|
1120 | =over 4 |
|
|
1121 | |
|
|
1122 | =item ev_embed_init (ev_embed *, callback, struct ev_loop *loop) |
|
|
1123 | |
|
|
1124 | =item ev_embed_set (ev_embed *, callback, struct ev_loop *loop) |
|
|
1125 | |
|
|
1126 | Configures the watcher to embed the given loop, which must be embeddable. |
|
|
1127 | |
|
|
1128 | =back |
|
|
1129 | |
|
|
1130 | |
720 | =head1 OTHER FUNCTIONS |
1131 | =head1 OTHER FUNCTIONS |
721 | |
1132 | |
722 | There are some other functions of possible interest. Described. Here. Now. |
1133 | There are some other functions of possible interest. Described. Here. Now. |
723 | |
1134 | |
724 | =over 4 |
1135 | =over 4 |
… | |
… | |
770 | |
1181 | |
771 | Feed an event as if the given signal occured (loop must be the default loop!). |
1182 | Feed an event as if the given signal occured (loop must be the default loop!). |
772 | |
1183 | |
773 | =back |
1184 | =back |
774 | |
1185 | |
|
|
1186 | |
775 | =head1 LIBEVENT EMULATION |
1187 | =head1 LIBEVENT EMULATION |
776 | |
1188 | |
777 | Libev offers a compatibility emulation layer for libevent. It cannot |
1189 | Libev offers a compatibility emulation layer for libevent. It cannot |
778 | emulate the internals of libevent, so here are some usage hints: |
1190 | emulate the internals of libevent, so here are some usage hints: |
779 | |
1191 | |