… | |
… | |
127 | .\} |
127 | .\} |
128 | .rm #[ #] #H #V #F C |
128 | .rm #[ #] #H #V #F C |
129 | .\" ======================================================================== |
129 | .\" ======================================================================== |
130 | .\" |
130 | .\" |
131 | .IX Title ""<STANDARD INPUT>" 1" |
131 | .IX Title ""<STANDARD INPUT>" 1" |
132 | .TH "<STANDARD INPUT>" 1 "2007-11-13" "perl v5.8.8" "User Contributed Perl Documentation" |
132 | .TH "<STANDARD INPUT>" 1 "2007-11-24" "perl v5.8.8" "User Contributed Perl Documentation" |
133 | .SH "NAME" |
133 | .SH "NAME" |
134 | libev \- a high performance full\-featured event loop written in C |
134 | libev \- a high performance full\-featured event loop written in C |
135 | .SH "SYNOPSIS" |
135 | .SH "SYNOPSIS" |
136 | .IX Header "SYNOPSIS" |
136 | .IX Header "SYNOPSIS" |
137 | .Vb 1 |
137 | .Vb 1 |
… | |
… | |
173 | .IX Header "TIME REPRESENTATION" |
173 | .IX Header "TIME REPRESENTATION" |
174 | Libev represents time as a single floating point number, representing the |
174 | Libev represents time as a single floating point number, representing the |
175 | (fractional) number of seconds since the (\s-1POSIX\s0) epoch (somewhere near |
175 | (fractional) number of seconds since the (\s-1POSIX\s0) epoch (somewhere near |
176 | the beginning of 1970, details are complicated, don't ask). This type is |
176 | the beginning of 1970, details are complicated, don't ask). This type is |
177 | called \f(CW\*(C`ev_tstamp\*(C'\fR, which is what you should use too. It usually aliases |
177 | called \f(CW\*(C`ev_tstamp\*(C'\fR, which is what you should use too. It usually aliases |
178 | to the double type in C. |
178 | to the \f(CW\*(C`double\*(C'\fR type in C, and when you need to do any calculations on |
|
|
179 | it, you should treat it as such. |
179 | .SH "GLOBAL FUNCTIONS" |
180 | .SH "GLOBAL FUNCTIONS" |
180 | .IX Header "GLOBAL FUNCTIONS" |
181 | .IX Header "GLOBAL FUNCTIONS" |
181 | These functions can be called anytime, even before initialising the |
182 | These functions can be called anytime, even before initialising the |
182 | library in any way. |
183 | library in any way. |
183 | .IP "ev_tstamp ev_time ()" 4 |
184 | .IP "ev_tstamp ev_time ()" 4 |
184 | .IX Item "ev_tstamp ev_time ()" |
185 | .IX Item "ev_tstamp ev_time ()" |
185 | Returns the current time as libev would use it. |
186 | Returns the current time as libev would use it. Please note that the |
|
|
187 | \&\f(CW\*(C`ev_now\*(C'\fR function is usually faster and also often returns the timestamp |
|
|
188 | you actually want to know. |
186 | .IP "int ev_version_major ()" 4 |
189 | .IP "int ev_version_major ()" 4 |
187 | .IX Item "int ev_version_major ()" |
190 | .IX Item "int ev_version_major ()" |
188 | .PD 0 |
191 | .PD 0 |
189 | .IP "int ev_version_minor ()" 4 |
192 | .IP "int ev_version_minor ()" 4 |
190 | .IX Item "int ev_version_minor ()" |
193 | .IX Item "int ev_version_minor ()" |
… | |
… | |
197 | .Sp |
200 | .Sp |
198 | Usually, it's a good idea to terminate if the major versions mismatch, |
201 | Usually, it's a good idea to terminate if the major versions mismatch, |
199 | as this indicates an incompatible change. Minor versions are usually |
202 | as this indicates an incompatible change. Minor versions are usually |
200 | compatible to older versions, so a larger minor version alone is usually |
203 | compatible to older versions, so a larger minor version alone is usually |
201 | not a problem. |
204 | not a problem. |
|
|
205 | .Sp |
|
|
206 | Example: make sure we haven't accidentally been linked against the wrong |
|
|
207 | version: |
|
|
208 | .Sp |
|
|
209 | .Vb 3 |
|
|
210 | \& assert (("libev version mismatch", |
|
|
211 | \& ev_version_major () == EV_VERSION_MAJOR |
|
|
212 | \& && ev_version_minor () >= EV_VERSION_MINOR)); |
|
|
213 | .Ve |
|
|
214 | .IP "unsigned int ev_supported_backends ()" 4 |
|
|
215 | .IX Item "unsigned int ev_supported_backends ()" |
|
|
216 | Return the set of all backends (i.e. their corresponding \f(CW\*(C`EV_BACKEND_*\*(C'\fR |
|
|
217 | value) compiled into this binary of libev (independent of their |
|
|
218 | availability on the system you are running on). See \f(CW\*(C`ev_default_loop\*(C'\fR for |
|
|
219 | a description of the set values. |
|
|
220 | .Sp |
|
|
221 | Example: make sure we have the epoll method, because yeah this is cool and |
|
|
222 | a must have and can we have a torrent of it please!!!11 |
|
|
223 | .Sp |
|
|
224 | .Vb 2 |
|
|
225 | \& assert (("sorry, no epoll, no sex", |
|
|
226 | \& ev_supported_backends () & EVBACKEND_EPOLL)); |
|
|
227 | .Ve |
|
|
228 | .IP "unsigned int ev_recommended_backends ()" 4 |
|
|
229 | .IX Item "unsigned int ev_recommended_backends ()" |
|
|
230 | Return the set of all backends compiled into this binary of libev and also |
|
|
231 | recommended for this platform. This set is often smaller than the one |
|
|
232 | returned by \f(CW\*(C`ev_supported_backends\*(C'\fR, as for example kqueue is broken on |
|
|
233 | most BSDs and will not be autodetected unless you explicitly request it |
|
|
234 | (assuming you know what you are doing). This is the set of backends that |
|
|
235 | libev will probe for if you specify no backends explicitly. |
|
|
236 | .IP "unsigned int ev_embeddable_backends ()" 4 |
|
|
237 | .IX Item "unsigned int ev_embeddable_backends ()" |
|
|
238 | Returns the set of backends that are embeddable in other event loops. This |
|
|
239 | is the theoretical, all\-platform, value. To find which backends |
|
|
240 | might be supported on the current system, you would need to look at |
|
|
241 | \&\f(CW\*(C`ev_embeddable_backends () & ev_supported_backends ()\*(C'\fR, likewise for |
|
|
242 | recommended ones. |
|
|
243 | .Sp |
|
|
244 | See the description of \f(CW\*(C`ev_embed\*(C'\fR watchers for more info. |
202 | .IP "ev_set_allocator (void *(*cb)(void *ptr, long size))" 4 |
245 | .IP "ev_set_allocator (void *(*cb)(void *ptr, long size))" 4 |
203 | .IX Item "ev_set_allocator (void *(*cb)(void *ptr, long size))" |
246 | .IX Item "ev_set_allocator (void *(*cb)(void *ptr, long size))" |
204 | Sets the allocation function to use (the prototype is similar to the |
247 | Sets the allocation function to use (the prototype is similar to the |
205 | realloc C function, the semantics are identical). It is used to allocate |
248 | realloc C function, the semantics are identical). It is used to allocate |
206 | and free memory (no surprises here). If it returns zero when memory |
249 | and free memory (no surprises here). If it returns zero when memory |
… | |
… | |
208 | destructive action. The default is your system realloc function. |
251 | destructive action. The default is your system realloc function. |
209 | .Sp |
252 | .Sp |
210 | You could override this function in high-availability programs to, say, |
253 | You could override this function in high-availability programs to, say, |
211 | free some memory if it cannot allocate memory, to use a special allocator, |
254 | free some memory if it cannot allocate memory, to use a special allocator, |
212 | or even to sleep a while and retry until some memory is available. |
255 | or even to sleep a while and retry until some memory is available. |
|
|
256 | .Sp |
|
|
257 | Example: replace the libev allocator with one that waits a bit and then |
|
|
258 | retries: better than mine). |
|
|
259 | .Sp |
|
|
260 | .Vb 6 |
|
|
261 | \& static void * |
|
|
262 | \& persistent_realloc (void *ptr, long size) |
|
|
263 | \& { |
|
|
264 | \& for (;;) |
|
|
265 | \& { |
|
|
266 | \& void *newptr = realloc (ptr, size); |
|
|
267 | .Ve |
|
|
268 | .Sp |
|
|
269 | .Vb 2 |
|
|
270 | \& if (newptr) |
|
|
271 | \& return newptr; |
|
|
272 | .Ve |
|
|
273 | .Sp |
|
|
274 | .Vb 3 |
|
|
275 | \& sleep (60); |
|
|
276 | \& } |
|
|
277 | \& } |
|
|
278 | .Ve |
|
|
279 | .Sp |
|
|
280 | .Vb 2 |
|
|
281 | \& ... |
|
|
282 | \& ev_set_allocator (persistent_realloc); |
|
|
283 | .Ve |
213 | .IP "ev_set_syserr_cb (void (*cb)(const char *msg));" 4 |
284 | .IP "ev_set_syserr_cb (void (*cb)(const char *msg));" 4 |
214 | .IX Item "ev_set_syserr_cb (void (*cb)(const char *msg));" |
285 | .IX Item "ev_set_syserr_cb (void (*cb)(const char *msg));" |
215 | Set the callback function to call on a retryable syscall error (such |
286 | Set the callback function to call on a retryable syscall error (such |
216 | as failed select, poll, epoll_wait). The message is a printable string |
287 | as failed select, poll, epoll_wait). The message is a printable string |
217 | indicating the system call or subsystem causing the problem. If this |
288 | indicating the system call or subsystem causing the problem. If this |
218 | callback is set, then libev will expect it to remedy the sitution, no |
289 | callback is set, then libev will expect it to remedy the sitution, no |
219 | matter what, when it returns. That is, libev will generally retry the |
290 | matter what, when it returns. That is, libev will generally retry the |
220 | requested operation, or, if the condition doesn't go away, do bad stuff |
291 | requested operation, or, if the condition doesn't go away, do bad stuff |
221 | (such as abort). |
292 | (such as abort). |
|
|
293 | .Sp |
|
|
294 | Example: do the same thing as libev does internally: |
|
|
295 | .Sp |
|
|
296 | .Vb 6 |
|
|
297 | \& static void |
|
|
298 | \& fatal_error (const char *msg) |
|
|
299 | \& { |
|
|
300 | \& perror (msg); |
|
|
301 | \& abort (); |
|
|
302 | \& } |
|
|
303 | .Ve |
|
|
304 | .Sp |
|
|
305 | .Vb 2 |
|
|
306 | \& ... |
|
|
307 | \& ev_set_syserr_cb (fatal_error); |
|
|
308 | .Ve |
222 | .SH "FUNCTIONS CONTROLLING THE EVENT LOOP" |
309 | .SH "FUNCTIONS CONTROLLING THE EVENT LOOP" |
223 | .IX Header "FUNCTIONS CONTROLLING THE EVENT LOOP" |
310 | .IX Header "FUNCTIONS CONTROLLING THE EVENT LOOP" |
224 | An event loop is described by a \f(CW\*(C`struct ev_loop *\*(C'\fR. The library knows two |
311 | An event loop is described by a \f(CW\*(C`struct ev_loop *\*(C'\fR. The library knows two |
225 | types of such loops, the \fIdefault\fR loop, which supports signals and child |
312 | types of such loops, the \fIdefault\fR loop, which supports signals and child |
226 | events, and dynamically created loops which do not. |
313 | events, and dynamically created loops which do not. |
… | |
… | |
234 | .IP "struct ev_loop *ev_default_loop (unsigned int flags)" 4 |
321 | .IP "struct ev_loop *ev_default_loop (unsigned int flags)" 4 |
235 | .IX Item "struct ev_loop *ev_default_loop (unsigned int flags)" |
322 | .IX Item "struct ev_loop *ev_default_loop (unsigned int flags)" |
236 | This will initialise the default event loop if it hasn't been initialised |
323 | This will initialise the default event loop if it hasn't been initialised |
237 | yet and return it. If the default loop could not be initialised, returns |
324 | yet and return it. If the default loop could not be initialised, returns |
238 | false. If it already was initialised it simply returns it (and ignores the |
325 | false. If it already was initialised it simply returns it (and ignores the |
239 | flags). |
326 | flags. If that is troubling you, check \f(CW\*(C`ev_backend ()\*(C'\fR afterwards). |
240 | .Sp |
327 | .Sp |
241 | If you don't know what event loop to use, use the one returned from this |
328 | If you don't know what event loop to use, use the one returned from this |
242 | function. |
329 | function. |
243 | .Sp |
330 | .Sp |
244 | The flags argument can be used to specify special behaviour or specific |
331 | The flags argument can be used to specify special behaviour or specific |
245 | backends to use, and is usually specified as 0 (or \s-1EVFLAG_AUTO\s0). |
332 | backends to use, and is usually specified as \f(CW0\fR (or \f(CW\*(C`EVFLAG_AUTO\*(C'\fR). |
246 | .Sp |
333 | .Sp |
247 | It supports the following flags: |
334 | The following flags are supported: |
248 | .RS 4 |
335 | .RS 4 |
249 | .ie n .IP """EVFLAG_AUTO""" 4 |
336 | .ie n .IP """EVFLAG_AUTO""" 4 |
250 | .el .IP "\f(CWEVFLAG_AUTO\fR" 4 |
337 | .el .IP "\f(CWEVFLAG_AUTO\fR" 4 |
251 | .IX Item "EVFLAG_AUTO" |
338 | .IX Item "EVFLAG_AUTO" |
252 | The default flags value. Use this if you have no clue (it's the right |
339 | The default flags value. Use this if you have no clue (it's the right |
… | |
… | |
258 | or setgid) then libev will \fInot\fR look at the environment variable |
345 | or setgid) then libev will \fInot\fR look at the environment variable |
259 | \&\f(CW\*(C`LIBEV_FLAGS\*(C'\fR. Otherwise (the default), this environment variable will |
346 | \&\f(CW\*(C`LIBEV_FLAGS\*(C'\fR. Otherwise (the default), this environment variable will |
260 | override the flags completely if it is found in the environment. This is |
347 | override the flags completely if it is found in the environment. This is |
261 | useful to try out specific backends to test their performance, or to work |
348 | useful to try out specific backends to test their performance, or to work |
262 | around bugs. |
349 | around bugs. |
263 | .ie n .IP """EVMETHOD_SELECT"" (portable select backend)" 4 |
350 | .ie n .IP """EVBACKEND_SELECT"" (value 1, portable select backend)" 4 |
264 | .el .IP "\f(CWEVMETHOD_SELECT\fR (portable select backend)" 4 |
351 | .el .IP "\f(CWEVBACKEND_SELECT\fR (value 1, portable select backend)" 4 |
265 | .IX Item "EVMETHOD_SELECT (portable select backend)" |
352 | .IX Item "EVBACKEND_SELECT (value 1, portable select backend)" |
266 | .PD 0 |
353 | This is your standard \fIselect\fR\|(2) backend. Not \fIcompletely\fR standard, as |
|
|
354 | libev tries to roll its own fd_set with no limits on the number of fds, |
|
|
355 | but if that fails, expect a fairly low limit on the number of fds when |
|
|
356 | using this backend. It doesn't scale too well (O(highest_fd)), but its usually |
|
|
357 | the fastest backend for a low number of fds. |
267 | .ie n .IP """EVMETHOD_POLL"" (poll backend, available everywhere except on windows)" 4 |
358 | .ie n .IP """EVBACKEND_POLL"" (value 2, poll backend, available everywhere except on windows)" 4 |
268 | .el .IP "\f(CWEVMETHOD_POLL\fR (poll backend, available everywhere except on windows)" 4 |
359 | .el .IP "\f(CWEVBACKEND_POLL\fR (value 2, poll backend, available everywhere except on windows)" 4 |
269 | .IX Item "EVMETHOD_POLL (poll backend, available everywhere except on windows)" |
360 | .IX Item "EVBACKEND_POLL (value 2, poll backend, available everywhere except on windows)" |
|
|
361 | And this is your standard \fIpoll\fR\|(2) backend. It's more complicated than |
|
|
362 | select, but handles sparse fds better and has no artificial limit on the |
|
|
363 | number of fds you can use (except it will slow down considerably with a |
|
|
364 | lot of inactive fds). It scales similarly to select, i.e. O(total_fds). |
270 | .ie n .IP """EVMETHOD_EPOLL"" (linux only)" 4 |
365 | .ie n .IP """EVBACKEND_EPOLL"" (value 4, Linux)" 4 |
271 | .el .IP "\f(CWEVMETHOD_EPOLL\fR (linux only)" 4 |
366 | .el .IP "\f(CWEVBACKEND_EPOLL\fR (value 4, Linux)" 4 |
272 | .IX Item "EVMETHOD_EPOLL (linux only)" |
367 | .IX Item "EVBACKEND_EPOLL (value 4, Linux)" |
273 | .ie n .IP """EVMETHOD_KQUEUE"" (some bsds only)" 4 |
368 | For few fds, this backend is a bit little slower than poll and select, |
274 | .el .IP "\f(CWEVMETHOD_KQUEUE\fR (some bsds only)" 4 |
369 | but it scales phenomenally better. While poll and select usually scale like |
275 | .IX Item "EVMETHOD_KQUEUE (some bsds only)" |
370 | O(total_fds) where n is the total number of fds (or the highest fd), epoll scales |
|
|
371 | either O(1) or O(active_fds). |
|
|
372 | .Sp |
|
|
373 | While stopping and starting an I/O watcher in the same iteration will |
|
|
374 | result in some caching, there is still a syscall per such incident |
|
|
375 | (because the fd could point to a different file description now), so its |
|
|
376 | best to avoid that. Also, \fIdup()\fRed file descriptors might not work very |
|
|
377 | well if you register events for both fds. |
|
|
378 | .Sp |
|
|
379 | Please note that epoll sometimes generates spurious notifications, so you |
|
|
380 | need to use non-blocking I/O or other means to avoid blocking when no data |
|
|
381 | (or space) is available. |
|
|
382 | .ie n .IP """EVBACKEND_KQUEUE"" (value 8, most \s-1BSD\s0 clones)" 4 |
|
|
383 | .el .IP "\f(CWEVBACKEND_KQUEUE\fR (value 8, most \s-1BSD\s0 clones)" 4 |
|
|
384 | .IX Item "EVBACKEND_KQUEUE (value 8, most BSD clones)" |
|
|
385 | Kqueue deserves special mention, as at the time of this writing, it |
|
|
386 | was broken on all BSDs except NetBSD (usually it doesn't work with |
|
|
387 | anything but sockets and pipes, except on Darwin, where of course its |
|
|
388 | completely useless). For this reason its not being \*(L"autodetected\*(R" |
|
|
389 | unless you explicitly specify it explicitly in the flags (i.e. using |
|
|
390 | \&\f(CW\*(C`EVBACKEND_KQUEUE\*(C'\fR). |
|
|
391 | .Sp |
|
|
392 | It scales in the same way as the epoll backend, but the interface to the |
|
|
393 | kernel is more efficient (which says nothing about its actual speed, of |
|
|
394 | course). While starting and stopping an I/O watcher does not cause an |
|
|
395 | extra syscall as with epoll, it still adds up to four event changes per |
|
|
396 | incident, so its best to avoid that. |
276 | .ie n .IP """EVMETHOD_DEVPOLL"" (solaris 8 only)" 4 |
397 | .ie n .IP """EVBACKEND_DEVPOLL"" (value 16, Solaris 8)" 4 |
277 | .el .IP "\f(CWEVMETHOD_DEVPOLL\fR (solaris 8 only)" 4 |
398 | .el .IP "\f(CWEVBACKEND_DEVPOLL\fR (value 16, Solaris 8)" 4 |
278 | .IX Item "EVMETHOD_DEVPOLL (solaris 8 only)" |
399 | .IX Item "EVBACKEND_DEVPOLL (value 16, Solaris 8)" |
|
|
400 | This is not implemented yet (and might never be). |
279 | .ie n .IP """EVMETHOD_PORT"" (solaris 10 only)" 4 |
401 | .ie n .IP """EVBACKEND_PORT"" (value 32, Solaris 10)" 4 |
280 | .el .IP "\f(CWEVMETHOD_PORT\fR (solaris 10 only)" 4 |
402 | .el .IP "\f(CWEVBACKEND_PORT\fR (value 32, Solaris 10)" 4 |
281 | .IX Item "EVMETHOD_PORT (solaris 10 only)" |
403 | .IX Item "EVBACKEND_PORT (value 32, Solaris 10)" |
282 | .PD |
404 | This uses the Solaris 10 port mechanism. As with everything on Solaris, |
283 | If one or more of these are ored into the flags value, then only these |
405 | it's really slow, but it still scales very well (O(active_fds)). |
284 | backends will be tried (in the reverse order as given here). If one are |
406 | .Sp |
285 | specified, any backend will do. |
407 | Please note that solaris ports can result in a lot of spurious |
|
|
408 | notifications, so you need to use non-blocking I/O or other means to avoid |
|
|
409 | blocking when no data (or space) is available. |
|
|
410 | .ie n .IP """EVBACKEND_ALL""" 4 |
|
|
411 | .el .IP "\f(CWEVBACKEND_ALL\fR" 4 |
|
|
412 | .IX Item "EVBACKEND_ALL" |
|
|
413 | Try all backends (even potentially broken ones that wouldn't be tried |
|
|
414 | with \f(CW\*(C`EVFLAG_AUTO\*(C'\fR). Since this is a mask, you can do stuff such as |
|
|
415 | \&\f(CW\*(C`EVBACKEND_ALL & ~EVBACKEND_KQUEUE\*(C'\fR. |
286 | .RE |
416 | .RE |
287 | .RS 4 |
417 | .RS 4 |
|
|
418 | .Sp |
|
|
419 | If one or more of these are ored into the flags value, then only these |
|
|
420 | backends will be tried (in the reverse order as given here). If none are |
|
|
421 | specified, most compiled-in backend will be tried, usually in reverse |
|
|
422 | order of their flag values :) |
|
|
423 | .Sp |
|
|
424 | The most typical usage is like this: |
|
|
425 | .Sp |
|
|
426 | .Vb 2 |
|
|
427 | \& if (!ev_default_loop (0)) |
|
|
428 | \& fatal ("could not initialise libev, bad $LIBEV_FLAGS in environment?"); |
|
|
429 | .Ve |
|
|
430 | .Sp |
|
|
431 | Restrict libev to the select and poll backends, and do not allow |
|
|
432 | environment settings to be taken into account: |
|
|
433 | .Sp |
|
|
434 | .Vb 1 |
|
|
435 | \& ev_default_loop (EVBACKEND_POLL | EVBACKEND_SELECT | EVFLAG_NOENV); |
|
|
436 | .Ve |
|
|
437 | .Sp |
|
|
438 | Use whatever libev has to offer, but make sure that kqueue is used if |
|
|
439 | available (warning, breaks stuff, best use only with your own private |
|
|
440 | event loop and only if you know the \s-1OS\s0 supports your types of fds): |
|
|
441 | .Sp |
|
|
442 | .Vb 1 |
|
|
443 | \& ev_default_loop (ev_recommended_backends () | EVBACKEND_KQUEUE); |
|
|
444 | .Ve |
288 | .RE |
445 | .RE |
289 | .IP "struct ev_loop *ev_loop_new (unsigned int flags)" 4 |
446 | .IP "struct ev_loop *ev_loop_new (unsigned int flags)" 4 |
290 | .IX Item "struct ev_loop *ev_loop_new (unsigned int flags)" |
447 | .IX Item "struct ev_loop *ev_loop_new (unsigned int flags)" |
291 | Similar to \f(CW\*(C`ev_default_loop\*(C'\fR, but always creates a new event loop that is |
448 | Similar to \f(CW\*(C`ev_default_loop\*(C'\fR, but always creates a new event loop that is |
292 | always distinct from the default loop. Unlike the default loop, it cannot |
449 | always distinct from the default loop. Unlike the default loop, it cannot |
293 | handle signal and child watchers, and attempts to do so will be greeted by |
450 | handle signal and child watchers, and attempts to do so will be greeted by |
294 | undefined behaviour (or a failed assertion if assertions are enabled). |
451 | undefined behaviour (or a failed assertion if assertions are enabled). |
|
|
452 | .Sp |
|
|
453 | Example: try to create a event loop that uses epoll and nothing else. |
|
|
454 | .Sp |
|
|
455 | .Vb 3 |
|
|
456 | \& struct ev_loop *epoller = ev_loop_new (EVBACKEND_EPOLL | EVFLAG_NOENV); |
|
|
457 | \& if (!epoller) |
|
|
458 | \& fatal ("no epoll found here, maybe it hides under your chair"); |
|
|
459 | .Ve |
295 | .IP "ev_default_destroy ()" 4 |
460 | .IP "ev_default_destroy ()" 4 |
296 | .IX Item "ev_default_destroy ()" |
461 | .IX Item "ev_default_destroy ()" |
297 | Destroys the default loop again (frees all memory and kernel state |
462 | Destroys the default loop again (frees all memory and kernel state |
298 | etc.). This stops all registered event watchers (by not touching them in |
463 | etc.). This stops all registered event watchers (by not touching them in |
299 | any way whatsoever, although you cannot rely on this :). |
464 | any way whatsoever, although you cannot rely on this :). |
… | |
… | |
306 | This function reinitialises the kernel state for backends that have |
471 | This function reinitialises the kernel state for backends that have |
307 | one. Despite the name, you can call it anytime, but it makes most sense |
472 | one. Despite the name, you can call it anytime, but it makes most sense |
308 | after forking, in either the parent or child process (or both, but that |
473 | after forking, in either the parent or child process (or both, but that |
309 | again makes little sense). |
474 | again makes little sense). |
310 | .Sp |
475 | .Sp |
311 | You \fImust\fR call this function after forking if and only if you want to |
476 | You \fImust\fR call this function in the child process after forking if and |
312 | use the event library in both processes. If you just fork+exec, you don't |
477 | only if you want to use the event library in both processes. If you just |
313 | have to call it. |
478 | fork+exec, you don't have to call it. |
314 | .Sp |
479 | .Sp |
315 | The function itself is quite fast and it's usually not a problem to call |
480 | The function itself is quite fast and it's usually not a problem to call |
316 | it just in case after a fork. To make this easy, the function will fit in |
481 | it just in case after a fork. To make this easy, the function will fit in |
317 | quite nicely into a call to \f(CW\*(C`pthread_atfork\*(C'\fR: |
482 | quite nicely into a call to \f(CW\*(C`pthread_atfork\*(C'\fR: |
318 | .Sp |
483 | .Sp |
319 | .Vb 1 |
484 | .Vb 1 |
320 | \& pthread_atfork (0, 0, ev_default_fork); |
485 | \& pthread_atfork (0, 0, ev_default_fork); |
321 | .Ve |
486 | .Ve |
|
|
487 | .Sp |
|
|
488 | At the moment, \f(CW\*(C`EVBACKEND_SELECT\*(C'\fR and \f(CW\*(C`EVBACKEND_POLL\*(C'\fR are safe to use |
|
|
489 | without calling this function, so if you force one of those backends you |
|
|
490 | do not need to care. |
322 | .IP "ev_loop_fork (loop)" 4 |
491 | .IP "ev_loop_fork (loop)" 4 |
323 | .IX Item "ev_loop_fork (loop)" |
492 | .IX Item "ev_loop_fork (loop)" |
324 | Like \f(CW\*(C`ev_default_fork\*(C'\fR, but acts on an event loop created by |
493 | Like \f(CW\*(C`ev_default_fork\*(C'\fR, but acts on an event loop created by |
325 | \&\f(CW\*(C`ev_loop_new\*(C'\fR. Yes, you have to call this on every allocated event loop |
494 | \&\f(CW\*(C`ev_loop_new\*(C'\fR. Yes, you have to call this on every allocated event loop |
326 | after fork, and how you do this is entirely your own problem. |
495 | after fork, and how you do this is entirely your own problem. |
327 | .IP "unsigned int ev_method (loop)" 4 |
496 | .IP "unsigned int ev_backend (loop)" 4 |
328 | .IX Item "unsigned int ev_method (loop)" |
497 | .IX Item "unsigned int ev_backend (loop)" |
329 | Returns one of the \f(CW\*(C`EVMETHOD_*\*(C'\fR flags indicating the event backend in |
498 | Returns one of the \f(CW\*(C`EVBACKEND_*\*(C'\fR flags indicating the event backend in |
330 | use. |
499 | use. |
331 | .IP "ev_tstamp ev_now (loop)" 4 |
500 | .IP "ev_tstamp ev_now (loop)" 4 |
332 | .IX Item "ev_tstamp ev_now (loop)" |
501 | .IX Item "ev_tstamp ev_now (loop)" |
333 | Returns the current \*(L"event loop time\*(R", which is the time the event loop |
502 | Returns the current \*(L"event loop time\*(R", which is the time the event loop |
334 | got events and started processing them. This timestamp does not change |
503 | received events and started processing them. This timestamp does not |
335 | as long as callbacks are being processed, and this is also the base time |
504 | change as long as callbacks are being processed, and this is also the base |
336 | used for relative timers. You can treat it as the timestamp of the event |
505 | time used for relative timers. You can treat it as the timestamp of the |
337 | occuring (or more correctly, the mainloop finding out about it). |
506 | event occuring (or more correctly, libev finding out about it). |
338 | .IP "ev_loop (loop, int flags)" 4 |
507 | .IP "ev_loop (loop, int flags)" 4 |
339 | .IX Item "ev_loop (loop, int flags)" |
508 | .IX Item "ev_loop (loop, int flags)" |
340 | Finally, this is it, the event handler. This function usually is called |
509 | Finally, this is it, the event handler. This function usually is called |
341 | after you initialised all your watchers and you want to start handling |
510 | after you initialised all your watchers and you want to start handling |
342 | events. |
511 | events. |
343 | .Sp |
512 | .Sp |
344 | If the flags argument is specified as 0, it will not return until either |
513 | If the flags argument is specified as \f(CW0\fR, it will not return until |
345 | no event watchers are active anymore or \f(CW\*(C`ev_unloop\*(C'\fR was called. |
514 | either no event watchers are active anymore or \f(CW\*(C`ev_unloop\*(C'\fR was called. |
|
|
515 | .Sp |
|
|
516 | Please note that an explicit \f(CW\*(C`ev_unloop\*(C'\fR is usually better than |
|
|
517 | relying on all watchers to be stopped when deciding when a program has |
|
|
518 | finished (especially in interactive programs), but having a program that |
|
|
519 | automatically loops as long as it has to and no longer by virtue of |
|
|
520 | relying on its watchers stopping correctly is a thing of beauty. |
346 | .Sp |
521 | .Sp |
347 | A flags value of \f(CW\*(C`EVLOOP_NONBLOCK\*(C'\fR will look for new events, will handle |
522 | A flags value of \f(CW\*(C`EVLOOP_NONBLOCK\*(C'\fR will look for new events, will handle |
348 | those events and any outstanding ones, but will not block your process in |
523 | those events and any outstanding ones, but will not block your process in |
349 | case there are no events and will return after one iteration of the loop. |
524 | case there are no events and will return after one iteration of the loop. |
350 | .Sp |
525 | .Sp |
351 | A flags value of \f(CW\*(C`EVLOOP_ONESHOT\*(C'\fR will look for new events (waiting if |
526 | A flags value of \f(CW\*(C`EVLOOP_ONESHOT\*(C'\fR will look for new events (waiting if |
352 | neccessary) and will handle those and any outstanding ones. It will block |
527 | neccessary) and will handle those and any outstanding ones. It will block |
353 | your process until at least one new event arrives, and will return after |
528 | your process until at least one new event arrives, and will return after |
354 | one iteration of the loop. |
529 | one iteration of the loop. This is useful if you are waiting for some |
|
|
530 | external event in conjunction with something not expressible using other |
|
|
531 | libev watchers. However, a pair of \f(CW\*(C`ev_prepare\*(C'\fR/\f(CW\*(C`ev_check\*(C'\fR watchers is |
|
|
532 | usually a better approach for this kind of thing. |
355 | .Sp |
533 | .Sp |
356 | This flags value could be used to implement alternative looping |
534 | Here are the gory details of what \f(CW\*(C`ev_loop\*(C'\fR does: |
357 | constructs, but the \f(CW\*(C`prepare\*(C'\fR and \f(CW\*(C`check\*(C'\fR watchers provide a better and |
535 | .Sp |
358 | more generic mechanism. |
536 | .Vb 18 |
|
|
537 | \& * If there are no active watchers (reference count is zero), return. |
|
|
538 | \& - Queue prepare watchers and then call all outstanding watchers. |
|
|
539 | \& - If we have been forked, recreate the kernel state. |
|
|
540 | \& - Update the kernel state with all outstanding changes. |
|
|
541 | \& - Update the "event loop time". |
|
|
542 | \& - Calculate for how long to block. |
|
|
543 | \& - Block the process, waiting for any events. |
|
|
544 | \& - Queue all outstanding I/O (fd) events. |
|
|
545 | \& - Update the "event loop time" and do time jump handling. |
|
|
546 | \& - Queue all outstanding timers. |
|
|
547 | \& - Queue all outstanding periodics. |
|
|
548 | \& - If no events are pending now, queue all idle watchers. |
|
|
549 | \& - Queue all check watchers. |
|
|
550 | \& - Call all queued watchers in reverse order (i.e. check watchers first). |
|
|
551 | \& Signals and child watchers are implemented as I/O watchers, and will |
|
|
552 | \& be handled here by queueing them when their watcher gets executed. |
|
|
553 | \& - If ev_unloop has been called or EVLOOP_ONESHOT or EVLOOP_NONBLOCK |
|
|
554 | \& were used, return, otherwise continue with step *. |
|
|
555 | .Ve |
|
|
556 | .Sp |
|
|
557 | Example: queue some jobs and then loop until no events are outsanding |
|
|
558 | anymore. |
|
|
559 | .Sp |
|
|
560 | .Vb 4 |
|
|
561 | \& ... queue jobs here, make sure they register event watchers as long |
|
|
562 | \& ... as they still have work to do (even an idle watcher will do..) |
|
|
563 | \& ev_loop (my_loop, 0); |
|
|
564 | \& ... jobs done. yeah! |
|
|
565 | .Ve |
359 | .IP "ev_unloop (loop, how)" 4 |
566 | .IP "ev_unloop (loop, how)" 4 |
360 | .IX Item "ev_unloop (loop, how)" |
567 | .IX Item "ev_unloop (loop, how)" |
361 | Can be used to make a call to \f(CW\*(C`ev_loop\*(C'\fR return early (but only after it |
568 | Can be used to make a call to \f(CW\*(C`ev_loop\*(C'\fR return early (but only after it |
362 | has processed all outstanding events). The \f(CW\*(C`how\*(C'\fR argument must be either |
569 | has processed all outstanding events). The \f(CW\*(C`how\*(C'\fR argument must be either |
363 | \&\f(CW\*(C`EVUNLOOP_ONE\*(C'\fR, which will make the innermost \f(CW\*(C`ev_loop\*(C'\fR call return, or |
570 | \&\f(CW\*(C`EVUNLOOP_ONE\*(C'\fR, which will make the innermost \f(CW\*(C`ev_loop\*(C'\fR call return, or |
… | |
… | |
376 | example, libev itself uses this for its internal signal pipe: It is not |
583 | example, libev itself uses this for its internal signal pipe: It is not |
377 | visible to the libev user and should not keep \f(CW\*(C`ev_loop\*(C'\fR from exiting if |
584 | visible to the libev user and should not keep \f(CW\*(C`ev_loop\*(C'\fR from exiting if |
378 | no event watchers registered by it are active. It is also an excellent |
585 | no event watchers registered by it are active. It is also an excellent |
379 | way to do this for generic recurring timers or from within third-party |
586 | way to do this for generic recurring timers or from within third-party |
380 | libraries. Just remember to \fIunref after start\fR and \fIref before stop\fR. |
587 | libraries. Just remember to \fIunref after start\fR and \fIref before stop\fR. |
|
|
588 | .Sp |
|
|
589 | Example: create a signal watcher, but keep it from keeping \f(CW\*(C`ev_loop\*(C'\fR |
|
|
590 | running when nothing else is active. |
|
|
591 | .Sp |
|
|
592 | .Vb 4 |
|
|
593 | \& struct dv_signal exitsig; |
|
|
594 | \& ev_signal_init (&exitsig, sig_cb, SIGINT); |
|
|
595 | \& ev_signal_start (myloop, &exitsig); |
|
|
596 | \& evf_unref (myloop); |
|
|
597 | .Ve |
|
|
598 | .Sp |
|
|
599 | Example: for some weird reason, unregister the above signal handler again. |
|
|
600 | .Sp |
|
|
601 | .Vb 2 |
|
|
602 | \& ev_ref (myloop); |
|
|
603 | \& ev_signal_stop (myloop, &exitsig); |
|
|
604 | .Ve |
381 | .SH "ANATOMY OF A WATCHER" |
605 | .SH "ANATOMY OF A WATCHER" |
382 | .IX Header "ANATOMY OF A WATCHER" |
606 | .IX Header "ANATOMY OF A WATCHER" |
383 | A watcher is a structure that you create and register to record your |
607 | A watcher is a structure that you create and register to record your |
384 | interest in some event. For instance, if you want to wait for \s-1STDIN\s0 to |
608 | interest in some event. For instance, if you want to wait for \s-1STDIN\s0 to |
385 | become readable, you would create an \f(CW\*(C`ev_io\*(C'\fR watcher for that: |
609 | become readable, you would create an \f(CW\*(C`ev_io\*(C'\fR watcher for that: |
… | |
… | |
421 | *)\*(C'\fR), and you can stop watching for events at any time by calling the |
645 | *)\*(C'\fR), and you can stop watching for events at any time by calling the |
422 | corresponding stop function (\f(CW\*(C`ev_<type>_stop (loop, watcher *)\*(C'\fR. |
646 | corresponding stop function (\f(CW\*(C`ev_<type>_stop (loop, watcher *)\*(C'\fR. |
423 | .PP |
647 | .PP |
424 | As long as your watcher is active (has been started but not stopped) you |
648 | As long as your watcher is active (has been started but not stopped) you |
425 | must not touch the values stored in it. Most specifically you must never |
649 | must not touch the values stored in it. Most specifically you must never |
426 | reinitialise it or call its set method. |
650 | reinitialise it or call its set macro. |
427 | .PP |
651 | .PP |
428 | You can check whether an event is active by calling the \f(CW\*(C`ev_is_active |
652 | You can check whether an event is active by calling the \f(CW\*(C`ev_is_active |
429 | (watcher *)\*(C'\fR macro. To see whether an event is outstanding (but the |
653 | (watcher *)\*(C'\fR macro. To see whether an event is outstanding (but the |
430 | callback for it has not been called yet) you can use the \f(CW\*(C`ev_is_pending |
654 | callback for it has not been called yet) you can use the \f(CW\*(C`ev_is_pending |
431 | (watcher *)\*(C'\fR macro. |
655 | (watcher *)\*(C'\fR macro. |
… | |
… | |
551 | descriptors correctly if you register interest in two or more fds pointing |
775 | descriptors correctly if you register interest in two or more fds pointing |
552 | to the same underlying file/socket etc. description (that is, they share |
776 | to the same underlying file/socket etc. description (that is, they share |
553 | the same underlying \*(L"file open\*(R"). |
777 | the same underlying \*(L"file open\*(R"). |
554 | .PP |
778 | .PP |
555 | If you must do this, then force the use of a known-to-be-good backend |
779 | If you must do this, then force the use of a known-to-be-good backend |
556 | (at the time of this writing, this includes only \s-1EVMETHOD_SELECT\s0 and |
780 | (at the time of this writing, this includes only \f(CW\*(C`EVBACKEND_SELECT\*(C'\fR and |
557 | \&\s-1EVMETHOD_POLL\s0). |
781 | \&\f(CW\*(C`EVBACKEND_POLL\*(C'\fR). |
558 | .IP "ev_io_init (ev_io *, callback, int fd, int events)" 4 |
782 | .IP "ev_io_init (ev_io *, callback, int fd, int events)" 4 |
559 | .IX Item "ev_io_init (ev_io *, callback, int fd, int events)" |
783 | .IX Item "ev_io_init (ev_io *, callback, int fd, int events)" |
560 | .PD 0 |
784 | .PD 0 |
561 | .IP "ev_io_set (ev_io *, int fd, int events)" 4 |
785 | .IP "ev_io_set (ev_io *, int fd, int events)" 4 |
562 | .IX Item "ev_io_set (ev_io *, int fd, int events)" |
786 | .IX Item "ev_io_set (ev_io *, int fd, int events)" |
563 | .PD |
787 | .PD |
564 | Configures an \f(CW\*(C`ev_io\*(C'\fR watcher. The fd is the file descriptor to rceeive |
788 | Configures an \f(CW\*(C`ev_io\*(C'\fR watcher. The fd is the file descriptor to rceeive |
565 | events for and events is either \f(CW\*(C`EV_READ\*(C'\fR, \f(CW\*(C`EV_WRITE\*(C'\fR or \f(CW\*(C`EV_READ | |
789 | events for and events is either \f(CW\*(C`EV_READ\*(C'\fR, \f(CW\*(C`EV_WRITE\*(C'\fR or \f(CW\*(C`EV_READ | |
566 | EV_WRITE\*(C'\fR to receive the given events. |
790 | EV_WRITE\*(C'\fR to receive the given events. |
|
|
791 | .Sp |
|
|
792 | Please note that most of the more scalable backend mechanisms (for example |
|
|
793 | epoll and solaris ports) can result in spurious readyness notifications |
|
|
794 | for file descriptors, so you practically need to use non-blocking I/O (and |
|
|
795 | treat callback invocation as hint only), or retest separately with a safe |
|
|
796 | interface before doing I/O (XLib can do this), or force the use of either |
|
|
797 | \&\f(CW\*(C`EVBACKEND_SELECT\*(C'\fR or \f(CW\*(C`EVBACKEND_POLL\*(C'\fR, which don't suffer from this |
|
|
798 | problem. Also note that it is quite easy to have your callback invoked |
|
|
799 | when the readyness condition is no longer valid even when employing |
|
|
800 | typical ways of handling events, so its a good idea to use non-blocking |
|
|
801 | I/O unconditionally. |
|
|
802 | .PP |
|
|
803 | Example: call \f(CW\*(C`stdin_readable_cb\*(C'\fR when \s-1STDIN_FILENO\s0 has become, well |
|
|
804 | readable, but only once. Since it is likely line\-buffered, you could |
|
|
805 | attempt to read a whole line in the callback: |
|
|
806 | .PP |
|
|
807 | .Vb 6 |
|
|
808 | \& static void |
|
|
809 | \& stdin_readable_cb (struct ev_loop *loop, struct ev_io *w, int revents) |
|
|
810 | \& { |
|
|
811 | \& ev_io_stop (loop, w); |
|
|
812 | \& .. read from stdin here (or from w->fd) and haqndle any I/O errors |
|
|
813 | \& } |
|
|
814 | .Ve |
|
|
815 | .PP |
|
|
816 | .Vb 6 |
|
|
817 | \& ... |
|
|
818 | \& struct ev_loop *loop = ev_default_init (0); |
|
|
819 | \& struct ev_io stdin_readable; |
|
|
820 | \& ev_io_init (&stdin_readable, stdin_readable_cb, STDIN_FILENO, EV_READ); |
|
|
821 | \& ev_io_start (loop, &stdin_readable); |
|
|
822 | \& ev_loop (loop, 0); |
|
|
823 | .Ve |
567 | .ie n .Sh """ev_timer"" \- relative and optionally recurring timeouts" |
824 | .ie n .Sh """ev_timer"" \- relative and optionally recurring timeouts" |
568 | .el .Sh "\f(CWev_timer\fP \- relative and optionally recurring timeouts" |
825 | .el .Sh "\f(CWev_timer\fP \- relative and optionally recurring timeouts" |
569 | .IX Subsection "ev_timer - relative and optionally recurring timeouts" |
826 | .IX Subsection "ev_timer - relative and optionally recurring timeouts" |
570 | Timer watchers are simple relative timers that generate an event after a |
827 | Timer watchers are simple relative timers that generate an event after a |
571 | given time, and optionally repeating in regular intervals after that. |
828 | given time, and optionally repeating in regular intervals after that. |
572 | .PP |
829 | .PP |
573 | The timers are based on real time, that is, if you register an event that |
830 | The timers are based on real time, that is, if you register an event that |
574 | times out after an hour and you reset your system clock to last years |
831 | times out after an hour and you reset your system clock to last years |
575 | time, it will still time out after (roughly) and hour. \*(L"Roughly\*(R" because |
832 | time, it will still time out after (roughly) and hour. \*(L"Roughly\*(R" because |
576 | detecting time jumps is hard, and soem inaccuracies are unavoidable (the |
833 | detecting time jumps is hard, and some inaccuracies are unavoidable (the |
577 | monotonic clock option helps a lot here). |
834 | monotonic clock option helps a lot here). |
578 | .PP |
835 | .PP |
579 | The relative timeouts are calculated relative to the \f(CW\*(C`ev_now ()\*(C'\fR |
836 | The relative timeouts are calculated relative to the \f(CW\*(C`ev_now ()\*(C'\fR |
580 | time. This is usually the right thing as this timestamp refers to the time |
837 | time. This is usually the right thing as this timestamp refers to the time |
581 | of the event triggering whatever timeout you are modifying/starting. If |
838 | of the event triggering whatever timeout you are modifying/starting. If |
582 | you suspect event processing to be delayed and you *need* to base the timeout |
839 | you suspect event processing to be delayed and you \fIneed\fR to base the timeout |
583 | on the current time, use something like this to adjust for this: |
840 | on the current time, use something like this to adjust for this: |
584 | .PP |
841 | .PP |
585 | .Vb 1 |
842 | .Vb 1 |
586 | \& ev_timer_set (&timer, after + ev_now () - ev_time (), 0.); |
843 | \& ev_timer_set (&timer, after + ev_now () - ev_time (), 0.); |
587 | .Ve |
844 | .Ve |
|
|
845 | .PP |
|
|
846 | The callback is guarenteed to be invoked only when its timeout has passed, |
|
|
847 | but if multiple timers become ready during the same loop iteration then |
|
|
848 | order of execution is undefined. |
588 | .IP "ev_timer_init (ev_timer *, callback, ev_tstamp after, ev_tstamp repeat)" 4 |
849 | .IP "ev_timer_init (ev_timer *, callback, ev_tstamp after, ev_tstamp repeat)" 4 |
589 | .IX Item "ev_timer_init (ev_timer *, callback, ev_tstamp after, ev_tstamp repeat)" |
850 | .IX Item "ev_timer_init (ev_timer *, callback, ev_tstamp after, ev_tstamp repeat)" |
590 | .PD 0 |
851 | .PD 0 |
591 | .IP "ev_timer_set (ev_timer *, ev_tstamp after, ev_tstamp repeat)" 4 |
852 | .IP "ev_timer_set (ev_timer *, ev_tstamp after, ev_tstamp repeat)" 4 |
592 | .IX Item "ev_timer_set (ev_timer *, ev_tstamp after, ev_tstamp repeat)" |
853 | .IX Item "ev_timer_set (ev_timer *, ev_tstamp after, ev_tstamp repeat)" |
… | |
… | |
617 | seconds of inactivity on the socket. The easiest way to do this is to |
878 | seconds of inactivity on the socket. The easiest way to do this is to |
618 | configure an \f(CW\*(C`ev_timer\*(C'\fR with after=repeat=60 and calling ev_timer_again each |
879 | configure an \f(CW\*(C`ev_timer\*(C'\fR with after=repeat=60 and calling ev_timer_again each |
619 | time you successfully read or write some data. If you go into an idle |
880 | time you successfully read or write some data. If you go into an idle |
620 | state where you do not expect data to travel on the socket, you can stop |
881 | state where you do not expect data to travel on the socket, you can stop |
621 | the timer, and again will automatically restart it if need be. |
882 | the timer, and again will automatically restart it if need be. |
|
|
883 | .PP |
|
|
884 | Example: create a timer that fires after 60 seconds. |
|
|
885 | .PP |
|
|
886 | .Vb 5 |
|
|
887 | \& static void |
|
|
888 | \& one_minute_cb (struct ev_loop *loop, struct ev_timer *w, int revents) |
|
|
889 | \& { |
|
|
890 | \& .. one minute over, w is actually stopped right here |
|
|
891 | \& } |
|
|
892 | .Ve |
|
|
893 | .PP |
|
|
894 | .Vb 3 |
|
|
895 | \& struct ev_timer mytimer; |
|
|
896 | \& ev_timer_init (&mytimer, one_minute_cb, 60., 0.); |
|
|
897 | \& ev_timer_start (loop, &mytimer); |
|
|
898 | .Ve |
|
|
899 | .PP |
|
|
900 | Example: create a timeout timer that times out after 10 seconds of |
|
|
901 | inactivity. |
|
|
902 | .PP |
|
|
903 | .Vb 5 |
|
|
904 | \& static void |
|
|
905 | \& timeout_cb (struct ev_loop *loop, struct ev_timer *w, int revents) |
|
|
906 | \& { |
|
|
907 | \& .. ten seconds without any activity |
|
|
908 | \& } |
|
|
909 | .Ve |
|
|
910 | .PP |
|
|
911 | .Vb 4 |
|
|
912 | \& struct ev_timer mytimer; |
|
|
913 | \& ev_timer_init (&mytimer, timeout_cb, 0., 10.); /* note, only repeat used */ |
|
|
914 | \& ev_timer_again (&mytimer); /* start timer */ |
|
|
915 | \& ev_loop (loop, 0); |
|
|
916 | .Ve |
|
|
917 | .PP |
|
|
918 | .Vb 3 |
|
|
919 | \& // and in some piece of code that gets executed on any "activity": |
|
|
920 | \& // reset the timeout to start ticking again at 10 seconds |
|
|
921 | \& ev_timer_again (&mytimer); |
|
|
922 | .Ve |
622 | .ie n .Sh """ev_periodic"" \- to cron or not to cron" |
923 | .ie n .Sh """ev_periodic"" \- to cron or not to cron" |
623 | .el .Sh "\f(CWev_periodic\fP \- to cron or not to cron" |
924 | .el .Sh "\f(CWev_periodic\fP \- to cron or not to cron" |
624 | .IX Subsection "ev_periodic - to cron or not to cron" |
925 | .IX Subsection "ev_periodic - to cron or not to cron" |
625 | Periodic watchers are also timers of a kind, but they are very versatile |
926 | Periodic watchers are also timers of a kind, but they are very versatile |
626 | (and unfortunately a bit complex). |
927 | (and unfortunately a bit complex). |
… | |
… | |
634 | roughly 10 seconds later and of course not if you reset your system time |
935 | roughly 10 seconds later and of course not if you reset your system time |
635 | again). |
936 | again). |
636 | .PP |
937 | .PP |
637 | They can also be used to implement vastly more complex timers, such as |
938 | They can also be used to implement vastly more complex timers, such as |
638 | triggering an event on eahc midnight, local time. |
939 | triggering an event on eahc midnight, local time. |
|
|
940 | .PP |
|
|
941 | As with timers, the callback is guarenteed to be invoked only when the |
|
|
942 | time (\f(CW\*(C`at\*(C'\fR) has been passed, but if multiple periodic timers become ready |
|
|
943 | during the same loop iteration then order of execution is undefined. |
639 | .IP "ev_periodic_init (ev_periodic *, callback, ev_tstamp at, ev_tstamp interval, reschedule_cb)" 4 |
944 | .IP "ev_periodic_init (ev_periodic *, callback, ev_tstamp at, ev_tstamp interval, reschedule_cb)" 4 |
640 | .IX Item "ev_periodic_init (ev_periodic *, callback, ev_tstamp at, ev_tstamp interval, reschedule_cb)" |
945 | .IX Item "ev_periodic_init (ev_periodic *, callback, ev_tstamp at, ev_tstamp interval, reschedule_cb)" |
641 | .PD 0 |
946 | .PD 0 |
642 | .IP "ev_periodic_set (ev_periodic *, ev_tstamp after, ev_tstamp repeat, reschedule_cb)" 4 |
947 | .IP "ev_periodic_set (ev_periodic *, ev_tstamp after, ev_tstamp repeat, reschedule_cb)" 4 |
643 | .IX Item "ev_periodic_set (ev_periodic *, ev_tstamp after, ev_tstamp repeat, reschedule_cb)" |
948 | .IX Item "ev_periodic_set (ev_periodic *, ev_tstamp after, ev_tstamp repeat, reschedule_cb)" |
… | |
… | |
714 | .IX Item "ev_periodic_again (loop, ev_periodic *)" |
1019 | .IX Item "ev_periodic_again (loop, ev_periodic *)" |
715 | Simply stops and restarts the periodic watcher again. This is only useful |
1020 | Simply stops and restarts the periodic watcher again. This is only useful |
716 | when you changed some parameters or the reschedule callback would return |
1021 | when you changed some parameters or the reschedule callback would return |
717 | a different time than the last time it was called (e.g. in a crond like |
1022 | a different time than the last time it was called (e.g. in a crond like |
718 | program when the crontabs have changed). |
1023 | program when the crontabs have changed). |
|
|
1024 | .PP |
|
|
1025 | Example: call a callback every hour, or, more precisely, whenever the |
|
|
1026 | system clock is divisible by 3600. The callback invocation times have |
|
|
1027 | potentially a lot of jittering, but good long-term stability. |
|
|
1028 | .PP |
|
|
1029 | .Vb 5 |
|
|
1030 | \& static void |
|
|
1031 | \& clock_cb (struct ev_loop *loop, struct ev_io *w, int revents) |
|
|
1032 | \& { |
|
|
1033 | \& ... its now a full hour (UTC, or TAI or whatever your clock follows) |
|
|
1034 | \& } |
|
|
1035 | .Ve |
|
|
1036 | .PP |
|
|
1037 | .Vb 3 |
|
|
1038 | \& struct ev_periodic hourly_tick; |
|
|
1039 | \& ev_periodic_init (&hourly_tick, clock_cb, 0., 3600., 0); |
|
|
1040 | \& ev_periodic_start (loop, &hourly_tick); |
|
|
1041 | .Ve |
|
|
1042 | .PP |
|
|
1043 | Example: the same as above, but use a reschedule callback to do it: |
|
|
1044 | .PP |
|
|
1045 | .Vb 1 |
|
|
1046 | \& #include <math.h> |
|
|
1047 | .Ve |
|
|
1048 | .PP |
|
|
1049 | .Vb 5 |
|
|
1050 | \& static ev_tstamp |
|
|
1051 | \& my_scheduler_cb (struct ev_periodic *w, ev_tstamp now) |
|
|
1052 | \& { |
|
|
1053 | \& return fmod (now, 3600.) + 3600.; |
|
|
1054 | \& } |
|
|
1055 | .Ve |
|
|
1056 | .PP |
|
|
1057 | .Vb 1 |
|
|
1058 | \& ev_periodic_init (&hourly_tick, clock_cb, 0., 0., my_scheduler_cb); |
|
|
1059 | .Ve |
|
|
1060 | .PP |
|
|
1061 | Example: call a callback every hour, starting now: |
|
|
1062 | .PP |
|
|
1063 | .Vb 4 |
|
|
1064 | \& struct ev_periodic hourly_tick; |
|
|
1065 | \& ev_periodic_init (&hourly_tick, clock_cb, |
|
|
1066 | \& fmod (ev_now (loop), 3600.), 3600., 0); |
|
|
1067 | \& ev_periodic_start (loop, &hourly_tick); |
|
|
1068 | .Ve |
719 | .ie n .Sh """ev_signal"" \- signal me when a signal gets signalled" |
1069 | .ie n .Sh """ev_signal"" \- signal me when a signal gets signalled" |
720 | .el .Sh "\f(CWev_signal\fP \- signal me when a signal gets signalled" |
1070 | .el .Sh "\f(CWev_signal\fP \- signal me when a signal gets signalled" |
721 | .IX Subsection "ev_signal - signal me when a signal gets signalled" |
1071 | .IX Subsection "ev_signal - signal me when a signal gets signalled" |
722 | Signal watchers will trigger an event when the process receives a specific |
1072 | Signal watchers will trigger an event when the process receives a specific |
723 | signal one or more times. Even though signals are very asynchronous, libev |
1073 | signal one or more times. Even though signals are very asynchronous, libev |
… | |
… | |
753 | \&\fIany\fR process if \f(CW\*(C`pid\*(C'\fR is specified as \f(CW0\fR). The callback can look |
1103 | \&\fIany\fR process if \f(CW\*(C`pid\*(C'\fR is specified as \f(CW0\fR). The callback can look |
754 | at the \f(CW\*(C`rstatus\*(C'\fR member of the \f(CW\*(C`ev_child\*(C'\fR watcher structure to see |
1104 | at the \f(CW\*(C`rstatus\*(C'\fR member of the \f(CW\*(C`ev_child\*(C'\fR watcher structure to see |
755 | the status word (use the macros from \f(CW\*(C`sys/wait.h\*(C'\fR and see your systems |
1105 | the status word (use the macros from \f(CW\*(C`sys/wait.h\*(C'\fR and see your systems |
756 | \&\f(CW\*(C`waitpid\*(C'\fR documentation). The \f(CW\*(C`rpid\*(C'\fR member contains the pid of the |
1106 | \&\f(CW\*(C`waitpid\*(C'\fR documentation). The \f(CW\*(C`rpid\*(C'\fR member contains the pid of the |
757 | process causing the status change. |
1107 | process causing the status change. |
|
|
1108 | .PP |
|
|
1109 | Example: try to exit cleanly on \s-1SIGINT\s0 and \s-1SIGTERM\s0. |
|
|
1110 | .PP |
|
|
1111 | .Vb 5 |
|
|
1112 | \& static void |
|
|
1113 | \& sigint_cb (struct ev_loop *loop, struct ev_signal *w, int revents) |
|
|
1114 | \& { |
|
|
1115 | \& ev_unloop (loop, EVUNLOOP_ALL); |
|
|
1116 | \& } |
|
|
1117 | .Ve |
|
|
1118 | .PP |
|
|
1119 | .Vb 3 |
|
|
1120 | \& struct ev_signal signal_watcher; |
|
|
1121 | \& ev_signal_init (&signal_watcher, sigint_cb, SIGINT); |
|
|
1122 | \& ev_signal_start (loop, &sigint_cb); |
|
|
1123 | .Ve |
758 | .ie n .Sh """ev_idle"" \- when you've got nothing better to do" |
1124 | .ie n .Sh """ev_idle"" \- when you've got nothing better to do" |
759 | .el .Sh "\f(CWev_idle\fP \- when you've got nothing better to do" |
1125 | .el .Sh "\f(CWev_idle\fP \- when you've got nothing better to do" |
760 | .IX Subsection "ev_idle - when you've got nothing better to do" |
1126 | .IX Subsection "ev_idle - when you've got nothing better to do" |
761 | Idle watchers trigger events when there are no other events are pending |
1127 | Idle watchers trigger events when there are no other events are pending |
762 | (prepare, check and other idle watchers do not count). That is, as long |
1128 | (prepare, check and other idle watchers do not count). That is, as long |
… | |
… | |
776 | .IP "ev_idle_init (ev_signal *, callback)" 4 |
1142 | .IP "ev_idle_init (ev_signal *, callback)" 4 |
777 | .IX Item "ev_idle_init (ev_signal *, callback)" |
1143 | .IX Item "ev_idle_init (ev_signal *, callback)" |
778 | Initialises and configures the idle watcher \- it has no parameters of any |
1144 | Initialises and configures the idle watcher \- it has no parameters of any |
779 | kind. There is a \f(CW\*(C`ev_idle_set\*(C'\fR macro, but using it is utterly pointless, |
1145 | kind. There is a \f(CW\*(C`ev_idle_set\*(C'\fR macro, but using it is utterly pointless, |
780 | believe me. |
1146 | believe me. |
|
|
1147 | .PP |
|
|
1148 | Example: dynamically allocate an \f(CW\*(C`ev_idle\*(C'\fR, start it, and in the |
|
|
1149 | callback, free it. Alos, use no error checking, as usual. |
|
|
1150 | .PP |
|
|
1151 | .Vb 7 |
|
|
1152 | \& static void |
|
|
1153 | \& idle_cb (struct ev_loop *loop, struct ev_idle *w, int revents) |
|
|
1154 | \& { |
|
|
1155 | \& free (w); |
|
|
1156 | \& // now do something you wanted to do when the program has |
|
|
1157 | \& // no longer asnything immediate to do. |
|
|
1158 | \& } |
|
|
1159 | .Ve |
|
|
1160 | .PP |
|
|
1161 | .Vb 3 |
|
|
1162 | \& struct ev_idle *idle_watcher = malloc (sizeof (struct ev_idle)); |
|
|
1163 | \& ev_idle_init (idle_watcher, idle_cb); |
|
|
1164 | \& ev_idle_start (loop, idle_cb); |
|
|
1165 | .Ve |
781 | .ie n .Sh """ev_prepare""\fP and \f(CW""ev_check"" \- customise your event loop" |
1166 | .ie n .Sh """ev_prepare""\fP and \f(CW""ev_check"" \- customise your event loop" |
782 | .el .Sh "\f(CWev_prepare\fP and \f(CWev_check\fP \- customise your event loop" |
1167 | .el .Sh "\f(CWev_prepare\fP and \f(CWev_check\fP \- customise your event loop" |
783 | .IX Subsection "ev_prepare and ev_check - customise your event loop" |
1168 | .IX Subsection "ev_prepare and ev_check - customise your event loop" |
784 | Prepare and check watchers are usually (but not always) used in tandem: |
1169 | Prepare and check watchers are usually (but not always) used in tandem: |
785 | prepare watchers get invoked before the process blocks and check watchers |
1170 | prepare watchers get invoked before the process blocks and check watchers |
786 | afterwards. |
1171 | afterwards. |
787 | .PP |
1172 | .PP |
788 | Their main purpose is to integrate other event mechanisms into libev. This |
1173 | Their main purpose is to integrate other event mechanisms into libev and |
789 | could be used, for example, to track variable changes, implement your own |
1174 | their use is somewhat advanced. This could be used, for example, to track |
790 | watchers, integrate net-snmp or a coroutine library and lots more. |
1175 | variable changes, implement your own watchers, integrate net-snmp or a |
|
|
1176 | coroutine library and lots more. |
791 | .PP |
1177 | .PP |
792 | This is done by examining in each prepare call which file descriptors need |
1178 | This is done by examining in each prepare call which file descriptors need |
793 | to be watched by the other library, registering \f(CW\*(C`ev_io\*(C'\fR watchers for |
1179 | to be watched by the other library, registering \f(CW\*(C`ev_io\*(C'\fR watchers for |
794 | them and starting an \f(CW\*(C`ev_timer\*(C'\fR watcher for any timeouts (many libraries |
1180 | them and starting an \f(CW\*(C`ev_timer\*(C'\fR watcher for any timeouts (many libraries |
795 | provide just this functionality). Then, in the check watcher you check for |
1181 | provide just this functionality). Then, in the check watcher you check for |
… | |
… | |
813 | .IX Item "ev_check_init (ev_check *, callback)" |
1199 | .IX Item "ev_check_init (ev_check *, callback)" |
814 | .PD |
1200 | .PD |
815 | Initialises and configures the prepare or check watcher \- they have no |
1201 | Initialises and configures the prepare or check watcher \- they have no |
816 | parameters of any kind. There are \f(CW\*(C`ev_prepare_set\*(C'\fR and \f(CW\*(C`ev_check_set\*(C'\fR |
1202 | parameters of any kind. There are \f(CW\*(C`ev_prepare_set\*(C'\fR and \f(CW\*(C`ev_check_set\*(C'\fR |
817 | macros, but using them is utterly, utterly and completely pointless. |
1203 | macros, but using them is utterly, utterly and completely pointless. |
|
|
1204 | .PP |
|
|
1205 | Example: *TODO*. |
|
|
1206 | .ie n .Sh """ev_embed"" \- when one backend isn't enough" |
|
|
1207 | .el .Sh "\f(CWev_embed\fP \- when one backend isn't enough" |
|
|
1208 | .IX Subsection "ev_embed - when one backend isn't enough" |
|
|
1209 | This is a rather advanced watcher type that lets you embed one event loop |
|
|
1210 | into another. |
|
|
1211 | .PP |
|
|
1212 | There are primarily two reasons you would want that: work around bugs and |
|
|
1213 | prioritise I/O. |
|
|
1214 | .PP |
|
|
1215 | As an example for a bug workaround, the kqueue backend might only support |
|
|
1216 | sockets on some platform, so it is unusable as generic backend, but you |
|
|
1217 | still want to make use of it because you have many sockets and it scales |
|
|
1218 | so nicely. In this case, you would create a kqueue-based loop and embed it |
|
|
1219 | into your default loop (which might use e.g. poll). Overall operation will |
|
|
1220 | be a bit slower because first libev has to poll and then call kevent, but |
|
|
1221 | at least you can use both at what they are best. |
|
|
1222 | .PP |
|
|
1223 | As for prioritising I/O: rarely you have the case where some fds have |
|
|
1224 | to be watched and handled very quickly (with low latency), and even |
|
|
1225 | priorities and idle watchers might have too much overhead. In this case |
|
|
1226 | you would put all the high priority stuff in one loop and all the rest in |
|
|
1227 | a second one, and embed the second one in the first. |
|
|
1228 | .PP |
|
|
1229 | As long as the watcher is started it will automatically handle events. The |
|
|
1230 | callback will be invoked whenever some events have been handled. You can |
|
|
1231 | set the callback to \f(CW0\fR to avoid having to specify one if you are not |
|
|
1232 | interested in that. |
|
|
1233 | .PP |
|
|
1234 | Also, there have not currently been made special provisions for forking: |
|
|
1235 | when you fork, you not only have to call \f(CW\*(C`ev_loop_fork\*(C'\fR on both loops, |
|
|
1236 | but you will also have to stop and restart any \f(CW\*(C`ev_embed\*(C'\fR watchers |
|
|
1237 | yourself. |
|
|
1238 | .PP |
|
|
1239 | Unfortunately, not all backends are embeddable, only the ones returned by |
|
|
1240 | \&\f(CW\*(C`ev_embeddable_backends\*(C'\fR are, which, unfortunately, does not include any |
|
|
1241 | portable one. |
|
|
1242 | .PP |
|
|
1243 | So when you want to use this feature you will always have to be prepared |
|
|
1244 | that you cannot get an embeddable loop. The recommended way to get around |
|
|
1245 | this is to have a separate variables for your embeddable loop, try to |
|
|
1246 | create it, and if that fails, use the normal loop for everything: |
|
|
1247 | .PP |
|
|
1248 | .Vb 3 |
|
|
1249 | \& struct ev_loop *loop_hi = ev_default_init (0); |
|
|
1250 | \& struct ev_loop *loop_lo = 0; |
|
|
1251 | \& struct ev_embed embed; |
|
|
1252 | .Ve |
|
|
1253 | .PP |
|
|
1254 | .Vb 5 |
|
|
1255 | \& // see if there is a chance of getting one that works |
|
|
1256 | \& // (remember that a flags value of 0 means autodetection) |
|
|
1257 | \& loop_lo = ev_embeddable_backends () & ev_recommended_backends () |
|
|
1258 | \& ? ev_loop_new (ev_embeddable_backends () & ev_recommended_backends ()) |
|
|
1259 | \& : 0; |
|
|
1260 | .Ve |
|
|
1261 | .PP |
|
|
1262 | .Vb 8 |
|
|
1263 | \& // if we got one, then embed it, otherwise default to loop_hi |
|
|
1264 | \& if (loop_lo) |
|
|
1265 | \& { |
|
|
1266 | \& ev_embed_init (&embed, 0, loop_lo); |
|
|
1267 | \& ev_embed_start (loop_hi, &embed); |
|
|
1268 | \& } |
|
|
1269 | \& else |
|
|
1270 | \& loop_lo = loop_hi; |
|
|
1271 | .Ve |
|
|
1272 | .IP "ev_embed_init (ev_embed *, callback, struct ev_loop *loop)" 4 |
|
|
1273 | .IX Item "ev_embed_init (ev_embed *, callback, struct ev_loop *loop)" |
|
|
1274 | .PD 0 |
|
|
1275 | .IP "ev_embed_set (ev_embed *, callback, struct ev_loop *loop)" 4 |
|
|
1276 | .IX Item "ev_embed_set (ev_embed *, callback, struct ev_loop *loop)" |
|
|
1277 | .PD |
|
|
1278 | Configures the watcher to embed the given loop, which must be embeddable. |
818 | .SH "OTHER FUNCTIONS" |
1279 | .SH "OTHER FUNCTIONS" |
819 | .IX Header "OTHER FUNCTIONS" |
1280 | .IX Header "OTHER FUNCTIONS" |
820 | There are some other functions of possible interest. Described. Here. Now. |
1281 | There are some other functions of possible interest. Described. Here. Now. |
821 | .IP "ev_once (loop, int fd, int events, ev_tstamp timeout, callback)" 4 |
1282 | .IP "ev_once (loop, int fd, int events, ev_tstamp timeout, callback)" 4 |
822 | .IX Item "ev_once (loop, int fd, int events, ev_tstamp timeout, callback)" |
1283 | .IX Item "ev_once (loop, int fd, int events, ev_tstamp timeout, callback)" |