… | |
… | |
127 | .\} |
127 | .\} |
128 | .rm #[ #] #H #V #F C |
128 | .rm #[ #] #H #V #F C |
129 | .\" ======================================================================== |
129 | .\" ======================================================================== |
130 | .\" |
130 | .\" |
131 | .IX Title ""<STANDARD INPUT>" 1" |
131 | .IX Title ""<STANDARD INPUT>" 1" |
132 | .TH "<STANDARD INPUT>" 1 "2007-11-13" "perl v5.8.8" "User Contributed Perl Documentation" |
132 | .TH "<STANDARD INPUT>" 1 "2007-11-23" "perl v5.8.8" "User Contributed Perl Documentation" |
133 | .SH "NAME" |
133 | .SH "NAME" |
134 | libev \- a high performance full\-featured event loop written in C |
134 | libev \- a high performance full\-featured event loop written in C |
135 | .SH "SYNOPSIS" |
135 | .SH "SYNOPSIS" |
136 | .IX Header "SYNOPSIS" |
136 | .IX Header "SYNOPSIS" |
137 | .Vb 1 |
137 | .Vb 1 |
… | |
… | |
180 | .IX Header "GLOBAL FUNCTIONS" |
180 | .IX Header "GLOBAL FUNCTIONS" |
181 | These functions can be called anytime, even before initialising the |
181 | These functions can be called anytime, even before initialising the |
182 | library in any way. |
182 | library in any way. |
183 | .IP "ev_tstamp ev_time ()" 4 |
183 | .IP "ev_tstamp ev_time ()" 4 |
184 | .IX Item "ev_tstamp ev_time ()" |
184 | .IX Item "ev_tstamp ev_time ()" |
185 | Returns the current time as libev would use it. |
185 | Returns the current time as libev would use it. Please note that the |
|
|
186 | \&\f(CW\*(C`ev_now\*(C'\fR function is usually faster and also often returns the timestamp |
|
|
187 | you actually want to know. |
186 | .IP "int ev_version_major ()" 4 |
188 | .IP "int ev_version_major ()" 4 |
187 | .IX Item "int ev_version_major ()" |
189 | .IX Item "int ev_version_major ()" |
188 | .PD 0 |
190 | .PD 0 |
189 | .IP "int ev_version_minor ()" 4 |
191 | .IP "int ev_version_minor ()" 4 |
190 | .IX Item "int ev_version_minor ()" |
192 | .IX Item "int ev_version_minor ()" |
… | |
… | |
197 | .Sp |
199 | .Sp |
198 | Usually, it's a good idea to terminate if the major versions mismatch, |
200 | Usually, it's a good idea to terminate if the major versions mismatch, |
199 | as this indicates an incompatible change. Minor versions are usually |
201 | as this indicates an incompatible change. Minor versions are usually |
200 | compatible to older versions, so a larger minor version alone is usually |
202 | compatible to older versions, so a larger minor version alone is usually |
201 | not a problem. |
203 | not a problem. |
|
|
204 | .IP "unsigned int ev_supported_backends ()" 4 |
|
|
205 | .IX Item "unsigned int ev_supported_backends ()" |
|
|
206 | Return the set of all backends (i.e. their corresponding \f(CW\*(C`EV_BACKEND_*\*(C'\fR |
|
|
207 | value) compiled into this binary of libev (independent of their |
|
|
208 | availability on the system you are running on). See \f(CW\*(C`ev_default_loop\*(C'\fR for |
|
|
209 | a description of the set values. |
|
|
210 | .IP "unsigned int ev_recommended_backends ()" 4 |
|
|
211 | .IX Item "unsigned int ev_recommended_backends ()" |
|
|
212 | Return the set of all backends compiled into this binary of libev and also |
|
|
213 | recommended for this platform. This set is often smaller than the one |
|
|
214 | returned by \f(CW\*(C`ev_supported_backends\*(C'\fR, as for example kqueue is broken on |
|
|
215 | most BSDs and will not be autodetected unless you explicitly request it |
|
|
216 | (assuming you know what you are doing). This is the set of backends that |
|
|
217 | \&\f(CW\*(C`EVFLAG_AUTO\*(C'\fR will probe for. |
202 | .IP "ev_set_allocator (void *(*cb)(void *ptr, long size))" 4 |
218 | .IP "ev_set_allocator (void *(*cb)(void *ptr, long size))" 4 |
203 | .IX Item "ev_set_allocator (void *(*cb)(void *ptr, long size))" |
219 | .IX Item "ev_set_allocator (void *(*cb)(void *ptr, long size))" |
204 | Sets the allocation function to use (the prototype is similar to the |
220 | Sets the allocation function to use (the prototype is similar to the |
205 | realloc C function, the semantics are identical). It is used to allocate |
221 | realloc C function, the semantics are identical). It is used to allocate |
206 | and free memory (no surprises here). If it returns zero when memory |
222 | and free memory (no surprises here). If it returns zero when memory |
… | |
… | |
234 | .IP "struct ev_loop *ev_default_loop (unsigned int flags)" 4 |
250 | .IP "struct ev_loop *ev_default_loop (unsigned int flags)" 4 |
235 | .IX Item "struct ev_loop *ev_default_loop (unsigned int flags)" |
251 | .IX Item "struct ev_loop *ev_default_loop (unsigned int flags)" |
236 | This will initialise the default event loop if it hasn't been initialised |
252 | This will initialise the default event loop if it hasn't been initialised |
237 | yet and return it. If the default loop could not be initialised, returns |
253 | yet and return it. If the default loop could not be initialised, returns |
238 | false. If it already was initialised it simply returns it (and ignores the |
254 | false. If it already was initialised it simply returns it (and ignores the |
239 | flags). |
255 | flags. If that is troubling you, check \f(CW\*(C`ev_backend ()\*(C'\fR afterwards). |
240 | .Sp |
256 | .Sp |
241 | If you don't know what event loop to use, use the one returned from this |
257 | If you don't know what event loop to use, use the one returned from this |
242 | function. |
258 | function. |
243 | .Sp |
259 | .Sp |
244 | The flags argument can be used to specify special behaviour or specific |
260 | The flags argument can be used to specify special behaviour or specific |
245 | backends to use, and is usually specified as 0 (or \s-1EVFLAG_AUTO\s0). |
261 | backends to use, and is usually specified as \f(CW0\fR (or \s-1EVFLAG_AUTO\s0). |
246 | .Sp |
262 | .Sp |
247 | It supports the following flags: |
263 | It supports the following flags: |
248 | .RS 4 |
264 | .RS 4 |
249 | .ie n .IP """EVFLAG_AUTO""" 4 |
265 | .ie n .IP """EVFLAG_AUTO""" 4 |
250 | .el .IP "\f(CWEVFLAG_AUTO\fR" 4 |
266 | .el .IP "\f(CWEVFLAG_AUTO\fR" 4 |
… | |
… | |
258 | or setgid) then libev will \fInot\fR look at the environment variable |
274 | or setgid) then libev will \fInot\fR look at the environment variable |
259 | \&\f(CW\*(C`LIBEV_FLAGS\*(C'\fR. Otherwise (the default), this environment variable will |
275 | \&\f(CW\*(C`LIBEV_FLAGS\*(C'\fR. Otherwise (the default), this environment variable will |
260 | override the flags completely if it is found in the environment. This is |
276 | override the flags completely if it is found in the environment. This is |
261 | useful to try out specific backends to test their performance, or to work |
277 | useful to try out specific backends to test their performance, or to work |
262 | around bugs. |
278 | around bugs. |
263 | .ie n .IP """EVMETHOD_SELECT"" (portable select backend)" 4 |
279 | .ie n .IP """EVBACKEND_SELECT"" (value 1, portable select backend)" 4 |
264 | .el .IP "\f(CWEVMETHOD_SELECT\fR (portable select backend)" 4 |
280 | .el .IP "\f(CWEVBACKEND_SELECT\fR (value 1, portable select backend)" 4 |
265 | .IX Item "EVMETHOD_SELECT (portable select backend)" |
281 | .IX Item "EVBACKEND_SELECT (value 1, portable select backend)" |
266 | .PD 0 |
282 | This is your standard \fIselect\fR\|(2) backend. Not \fIcompletely\fR standard, as |
|
|
283 | libev tries to roll its own fd_set with no limits on the number of fds, |
|
|
284 | but if that fails, expect a fairly low limit on the number of fds when |
|
|
285 | using this backend. It doesn't scale too well (O(highest_fd)), but its usually |
|
|
286 | the fastest backend for a low number of fds. |
267 | .ie n .IP """EVMETHOD_POLL"" (poll backend, available everywhere except on windows)" 4 |
287 | .ie n .IP """EVBACKEND_POLL"" (value 2, poll backend, available everywhere except on windows)" 4 |
268 | .el .IP "\f(CWEVMETHOD_POLL\fR (poll backend, available everywhere except on windows)" 4 |
288 | .el .IP "\f(CWEVBACKEND_POLL\fR (value 2, poll backend, available everywhere except on windows)" 4 |
269 | .IX Item "EVMETHOD_POLL (poll backend, available everywhere except on windows)" |
289 | .IX Item "EVBACKEND_POLL (value 2, poll backend, available everywhere except on windows)" |
|
|
290 | And this is your standard \fIpoll\fR\|(2) backend. It's more complicated than |
|
|
291 | select, but handles sparse fds better and has no artificial limit on the |
|
|
292 | number of fds you can use (except it will slow down considerably with a |
|
|
293 | lot of inactive fds). It scales similarly to select, i.e. O(total_fds). |
270 | .ie n .IP """EVMETHOD_EPOLL"" (linux only)" 4 |
294 | .ie n .IP """EVBACKEND_EPOLL"" (value 4, Linux)" 4 |
271 | .el .IP "\f(CWEVMETHOD_EPOLL\fR (linux only)" 4 |
295 | .el .IP "\f(CWEVBACKEND_EPOLL\fR (value 4, Linux)" 4 |
272 | .IX Item "EVMETHOD_EPOLL (linux only)" |
296 | .IX Item "EVBACKEND_EPOLL (value 4, Linux)" |
273 | .ie n .IP """EVMETHOD_KQUEUE"" (some bsds only)" 4 |
297 | For few fds, this backend is a bit little slower than poll and select, |
274 | .el .IP "\f(CWEVMETHOD_KQUEUE\fR (some bsds only)" 4 |
298 | but it scales phenomenally better. While poll and select usually scale like |
275 | .IX Item "EVMETHOD_KQUEUE (some bsds only)" |
299 | O(total_fds) where n is the total number of fds (or the highest fd), epoll scales |
|
|
300 | either O(1) or O(active_fds). |
|
|
301 | .Sp |
|
|
302 | While stopping and starting an I/O watcher in the same iteration will |
|
|
303 | result in some caching, there is still a syscall per such incident |
|
|
304 | (because the fd could point to a different file description now), so its |
|
|
305 | best to avoid that. Also, \fIdup()\fRed file descriptors might not work very |
|
|
306 | well if you register events for both fds. |
|
|
307 | .Sp |
|
|
308 | Please note that epoll sometimes generates spurious notifications, so you |
|
|
309 | need to use non-blocking I/O or other means to avoid blocking when no data |
|
|
310 | (or space) is available. |
|
|
311 | .ie n .IP """EVBACKEND_KQUEUE"" (value 8, most \s-1BSD\s0 clones)" 4 |
|
|
312 | .el .IP "\f(CWEVBACKEND_KQUEUE\fR (value 8, most \s-1BSD\s0 clones)" 4 |
|
|
313 | .IX Item "EVBACKEND_KQUEUE (value 8, most BSD clones)" |
|
|
314 | Kqueue deserves special mention, as at the time of this writing, it |
|
|
315 | was broken on all BSDs except NetBSD (usually it doesn't work with |
|
|
316 | anything but sockets and pipes, except on Darwin, where of course its |
|
|
317 | completely useless). For this reason its not being \*(L"autodetected\*(R" unless |
|
|
318 | you explicitly specify the flags (i.e. you don't use \s-1EVFLAG_AUTO\s0). |
|
|
319 | .Sp |
|
|
320 | It scales in the same way as the epoll backend, but the interface to the |
|
|
321 | kernel is more efficient (which says nothing about its actual speed, of |
|
|
322 | course). While starting and stopping an I/O watcher does not cause an |
|
|
323 | extra syscall as with epoll, it still adds up to four event changes per |
|
|
324 | incident, so its best to avoid that. |
276 | .ie n .IP """EVMETHOD_DEVPOLL"" (solaris 8 only)" 4 |
325 | .ie n .IP """EVBACKEND_DEVPOLL"" (value 16, Solaris 8)" 4 |
277 | .el .IP "\f(CWEVMETHOD_DEVPOLL\fR (solaris 8 only)" 4 |
326 | .el .IP "\f(CWEVBACKEND_DEVPOLL\fR (value 16, Solaris 8)" 4 |
278 | .IX Item "EVMETHOD_DEVPOLL (solaris 8 only)" |
327 | .IX Item "EVBACKEND_DEVPOLL (value 16, Solaris 8)" |
|
|
328 | This is not implemented yet (and might never be). |
279 | .ie n .IP """EVMETHOD_PORT"" (solaris 10 only)" 4 |
329 | .ie n .IP """EVBACKEND_PORT"" (value 32, Solaris 10)" 4 |
280 | .el .IP "\f(CWEVMETHOD_PORT\fR (solaris 10 only)" 4 |
330 | .el .IP "\f(CWEVBACKEND_PORT\fR (value 32, Solaris 10)" 4 |
281 | .IX Item "EVMETHOD_PORT (solaris 10 only)" |
331 | .IX Item "EVBACKEND_PORT (value 32, Solaris 10)" |
282 | .PD |
332 | This uses the Solaris 10 port mechanism. As with everything on Solaris, |
283 | If one or more of these are ored into the flags value, then only these |
333 | it's really slow, but it still scales very well (O(active_fds)). |
284 | backends will be tried (in the reverse order as given here). If one are |
334 | .Sp |
285 | specified, any backend will do. |
335 | Please note that solaris ports can result in a lot of spurious |
|
|
336 | notifications, so you need to use non-blocking I/O or other means to avoid |
|
|
337 | blocking when no data (or space) is available. |
|
|
338 | .ie n .IP """EVBACKEND_ALL""" 4 |
|
|
339 | .el .IP "\f(CWEVBACKEND_ALL\fR" 4 |
|
|
340 | .IX Item "EVBACKEND_ALL" |
|
|
341 | Try all backends (even potentially broken ones that wouldn't be tried |
|
|
342 | with \f(CW\*(C`EVFLAG_AUTO\*(C'\fR). Since this is a mask, you can do stuff such as |
|
|
343 | \&\f(CW\*(C`EVBACKEND_ALL & ~EVBACKEND_KQUEUE\*(C'\fR. |
286 | .RE |
344 | .RE |
287 | .RS 4 |
345 | .RS 4 |
|
|
346 | .Sp |
|
|
347 | If one or more of these are ored into the flags value, then only these |
|
|
348 | backends will be tried (in the reverse order as given here). If none are |
|
|
349 | specified, most compiled-in backend will be tried, usually in reverse |
|
|
350 | order of their flag values :) |
288 | .RE |
351 | .RE |
289 | .IP "struct ev_loop *ev_loop_new (unsigned int flags)" 4 |
352 | .IP "struct ev_loop *ev_loop_new (unsigned int flags)" 4 |
290 | .IX Item "struct ev_loop *ev_loop_new (unsigned int flags)" |
353 | .IX Item "struct ev_loop *ev_loop_new (unsigned int flags)" |
291 | Similar to \f(CW\*(C`ev_default_loop\*(C'\fR, but always creates a new event loop that is |
354 | Similar to \f(CW\*(C`ev_default_loop\*(C'\fR, but always creates a new event loop that is |
292 | always distinct from the default loop. Unlike the default loop, it cannot |
355 | always distinct from the default loop. Unlike the default loop, it cannot |
… | |
… | |
306 | This function reinitialises the kernel state for backends that have |
369 | This function reinitialises the kernel state for backends that have |
307 | one. Despite the name, you can call it anytime, but it makes most sense |
370 | one. Despite the name, you can call it anytime, but it makes most sense |
308 | after forking, in either the parent or child process (or both, but that |
371 | after forking, in either the parent or child process (or both, but that |
309 | again makes little sense). |
372 | again makes little sense). |
310 | .Sp |
373 | .Sp |
311 | You \fImust\fR call this function after forking if and only if you want to |
374 | You \fImust\fR call this function in the child process after forking if and |
312 | use the event library in both processes. If you just fork+exec, you don't |
375 | only if you want to use the event library in both processes. If you just |
313 | have to call it. |
376 | fork+exec, you don't have to call it. |
314 | .Sp |
377 | .Sp |
315 | The function itself is quite fast and it's usually not a problem to call |
378 | The function itself is quite fast and it's usually not a problem to call |
316 | it just in case after a fork. To make this easy, the function will fit in |
379 | it just in case after a fork. To make this easy, the function will fit in |
317 | quite nicely into a call to \f(CW\*(C`pthread_atfork\*(C'\fR: |
380 | quite nicely into a call to \f(CW\*(C`pthread_atfork\*(C'\fR: |
318 | .Sp |
381 | .Sp |
319 | .Vb 1 |
382 | .Vb 1 |
320 | \& pthread_atfork (0, 0, ev_default_fork); |
383 | \& pthread_atfork (0, 0, ev_default_fork); |
321 | .Ve |
384 | .Ve |
|
|
385 | .Sp |
|
|
386 | At the moment, \f(CW\*(C`EVBACKEND_SELECT\*(C'\fR and \f(CW\*(C`EVBACKEND_POLL\*(C'\fR are safe to use |
|
|
387 | without calling this function, so if you force one of those backends you |
|
|
388 | do not need to care. |
322 | .IP "ev_loop_fork (loop)" 4 |
389 | .IP "ev_loop_fork (loop)" 4 |
323 | .IX Item "ev_loop_fork (loop)" |
390 | .IX Item "ev_loop_fork (loop)" |
324 | Like \f(CW\*(C`ev_default_fork\*(C'\fR, but acts on an event loop created by |
391 | Like \f(CW\*(C`ev_default_fork\*(C'\fR, but acts on an event loop created by |
325 | \&\f(CW\*(C`ev_loop_new\*(C'\fR. Yes, you have to call this on every allocated event loop |
392 | \&\f(CW\*(C`ev_loop_new\*(C'\fR. Yes, you have to call this on every allocated event loop |
326 | after fork, and how you do this is entirely your own problem. |
393 | after fork, and how you do this is entirely your own problem. |
327 | .IP "unsigned int ev_method (loop)" 4 |
394 | .IP "unsigned int ev_backend (loop)" 4 |
328 | .IX Item "unsigned int ev_method (loop)" |
395 | .IX Item "unsigned int ev_backend (loop)" |
329 | Returns one of the \f(CW\*(C`EVMETHOD_*\*(C'\fR flags indicating the event backend in |
396 | Returns one of the \f(CW\*(C`EVBACKEND_*\*(C'\fR flags indicating the event backend in |
330 | use. |
397 | use. |
331 | .IP "ev_tstamp ev_now (loop)" 4 |
398 | .IP "ev_tstamp ev_now (loop)" 4 |
332 | .IX Item "ev_tstamp ev_now (loop)" |
399 | .IX Item "ev_tstamp ev_now (loop)" |
333 | Returns the current \*(L"event loop time\*(R", which is the time the event loop |
400 | Returns the current \*(L"event loop time\*(R", which is the time the event loop |
334 | got events and started processing them. This timestamp does not change |
401 | got events and started processing them. This timestamp does not change |
… | |
… | |
354 | one iteration of the loop. |
421 | one iteration of the loop. |
355 | .Sp |
422 | .Sp |
356 | This flags value could be used to implement alternative looping |
423 | This flags value could be used to implement alternative looping |
357 | constructs, but the \f(CW\*(C`prepare\*(C'\fR and \f(CW\*(C`check\*(C'\fR watchers provide a better and |
424 | constructs, but the \f(CW\*(C`prepare\*(C'\fR and \f(CW\*(C`check\*(C'\fR watchers provide a better and |
358 | more generic mechanism. |
425 | more generic mechanism. |
|
|
426 | .Sp |
|
|
427 | Here are the gory details of what ev_loop does: |
|
|
428 | .Sp |
|
|
429 | .Vb 15 |
|
|
430 | \& 1. If there are no active watchers (reference count is zero), return. |
|
|
431 | \& 2. Queue and immediately call all prepare watchers. |
|
|
432 | \& 3. If we have been forked, recreate the kernel state. |
|
|
433 | \& 4. Update the kernel state with all outstanding changes. |
|
|
434 | \& 5. Update the "event loop time". |
|
|
435 | \& 6. Calculate for how long to block. |
|
|
436 | \& 7. Block the process, waiting for events. |
|
|
437 | \& 8. Update the "event loop time" and do time jump handling. |
|
|
438 | \& 9. Queue all outstanding timers. |
|
|
439 | \& 10. Queue all outstanding periodics. |
|
|
440 | \& 11. If no events are pending now, queue all idle watchers. |
|
|
441 | \& 12. Queue all check watchers. |
|
|
442 | \& 13. Call all queued watchers in reverse order (i.e. check watchers first). |
|
|
443 | \& 14. If ev_unloop has been called or EVLOOP_ONESHOT or EVLOOP_NONBLOCK |
|
|
444 | \& was used, return, otherwise continue with step #1. |
|
|
445 | .Ve |
359 | .IP "ev_unloop (loop, how)" 4 |
446 | .IP "ev_unloop (loop, how)" 4 |
360 | .IX Item "ev_unloop (loop, how)" |
447 | .IX Item "ev_unloop (loop, how)" |
361 | Can be used to make a call to \f(CW\*(C`ev_loop\*(C'\fR return early (but only after it |
448 | Can be used to make a call to \f(CW\*(C`ev_loop\*(C'\fR return early (but only after it |
362 | has processed all outstanding events). The \f(CW\*(C`how\*(C'\fR argument must be either |
449 | has processed all outstanding events). The \f(CW\*(C`how\*(C'\fR argument must be either |
363 | \&\f(CW\*(C`EVUNLOOP_ONE\*(C'\fR, which will make the innermost \f(CW\*(C`ev_loop\*(C'\fR call return, or |
450 | \&\f(CW\*(C`EVUNLOOP_ONE\*(C'\fR, which will make the innermost \f(CW\*(C`ev_loop\*(C'\fR call return, or |
… | |
… | |
421 | *)\*(C'\fR), and you can stop watching for events at any time by calling the |
508 | *)\*(C'\fR), and you can stop watching for events at any time by calling the |
422 | corresponding stop function (\f(CW\*(C`ev_<type>_stop (loop, watcher *)\*(C'\fR. |
509 | corresponding stop function (\f(CW\*(C`ev_<type>_stop (loop, watcher *)\*(C'\fR. |
423 | .PP |
510 | .PP |
424 | As long as your watcher is active (has been started but not stopped) you |
511 | As long as your watcher is active (has been started but not stopped) you |
425 | must not touch the values stored in it. Most specifically you must never |
512 | must not touch the values stored in it. Most specifically you must never |
426 | reinitialise it or call its set method. |
513 | reinitialise it or call its set macro. |
427 | .PP |
514 | .PP |
428 | You can check whether an event is active by calling the \f(CW\*(C`ev_is_active |
515 | You can check whether an event is active by calling the \f(CW\*(C`ev_is_active |
429 | (watcher *)\*(C'\fR macro. To see whether an event is outstanding (but the |
516 | (watcher *)\*(C'\fR macro. To see whether an event is outstanding (but the |
430 | callback for it has not been called yet) you can use the \f(CW\*(C`ev_is_pending |
517 | callback for it has not been called yet) you can use the \f(CW\*(C`ev_is_pending |
431 | (watcher *)\*(C'\fR macro. |
518 | (watcher *)\*(C'\fR macro. |
… | |
… | |
551 | descriptors correctly if you register interest in two or more fds pointing |
638 | descriptors correctly if you register interest in two or more fds pointing |
552 | to the same underlying file/socket etc. description (that is, they share |
639 | to the same underlying file/socket etc. description (that is, they share |
553 | the same underlying \*(L"file open\*(R"). |
640 | the same underlying \*(L"file open\*(R"). |
554 | .PP |
641 | .PP |
555 | If you must do this, then force the use of a known-to-be-good backend |
642 | If you must do this, then force the use of a known-to-be-good backend |
556 | (at the time of this writing, this includes only \s-1EVMETHOD_SELECT\s0 and |
643 | (at the time of this writing, this includes only \f(CW\*(C`EVBACKEND_SELECT\*(C'\fR and |
557 | \&\s-1EVMETHOD_POLL\s0). |
644 | \&\f(CW\*(C`EVBACKEND_POLL\*(C'\fR). |
558 | .IP "ev_io_init (ev_io *, callback, int fd, int events)" 4 |
645 | .IP "ev_io_init (ev_io *, callback, int fd, int events)" 4 |
559 | .IX Item "ev_io_init (ev_io *, callback, int fd, int events)" |
646 | .IX Item "ev_io_init (ev_io *, callback, int fd, int events)" |
560 | .PD 0 |
647 | .PD 0 |
561 | .IP "ev_io_set (ev_io *, int fd, int events)" 4 |
648 | .IP "ev_io_set (ev_io *, int fd, int events)" 4 |
562 | .IX Item "ev_io_set (ev_io *, int fd, int events)" |
649 | .IX Item "ev_io_set (ev_io *, int fd, int events)" |
563 | .PD |
650 | .PD |
564 | Configures an \f(CW\*(C`ev_io\*(C'\fR watcher. The fd is the file descriptor to rceeive |
651 | Configures an \f(CW\*(C`ev_io\*(C'\fR watcher. The fd is the file descriptor to rceeive |
565 | events for and events is either \f(CW\*(C`EV_READ\*(C'\fR, \f(CW\*(C`EV_WRITE\*(C'\fR or \f(CW\*(C`EV_READ | |
652 | events for and events is either \f(CW\*(C`EV_READ\*(C'\fR, \f(CW\*(C`EV_WRITE\*(C'\fR or \f(CW\*(C`EV_READ | |
566 | EV_WRITE\*(C'\fR to receive the given events. |
653 | EV_WRITE\*(C'\fR to receive the given events. |
|
|
654 | .Sp |
|
|
655 | Please note that most of the more scalable backend mechanisms (for example |
|
|
656 | epoll and solaris ports) can result in spurious readyness notifications |
|
|
657 | for file descriptors, so you practically need to use non-blocking I/O (and |
|
|
658 | treat callback invocation as hint only), or retest separately with a safe |
|
|
659 | interface before doing I/O (XLib can do this), or force the use of either |
|
|
660 | \&\f(CW\*(C`EVBACKEND_SELECT\*(C'\fR or \f(CW\*(C`EVBACKEND_POLL\*(C'\fR, which don't suffer from this |
|
|
661 | problem. Also note that it is quite easy to have your callback invoked |
|
|
662 | when the readyness condition is no longer valid even when employing |
|
|
663 | typical ways of handling events, so its a good idea to use non-blocking |
|
|
664 | I/O unconditionally. |
567 | .ie n .Sh """ev_timer"" \- relative and optionally recurring timeouts" |
665 | .ie n .Sh """ev_timer"" \- relative and optionally recurring timeouts" |
568 | .el .Sh "\f(CWev_timer\fP \- relative and optionally recurring timeouts" |
666 | .el .Sh "\f(CWev_timer\fP \- relative and optionally recurring timeouts" |
569 | .IX Subsection "ev_timer - relative and optionally recurring timeouts" |
667 | .IX Subsection "ev_timer - relative and optionally recurring timeouts" |
570 | Timer watchers are simple relative timers that generate an event after a |
668 | Timer watchers are simple relative timers that generate an event after a |
571 | given time, and optionally repeating in regular intervals after that. |
669 | given time, and optionally repeating in regular intervals after that. |
572 | .PP |
670 | .PP |
573 | The timers are based on real time, that is, if you register an event that |
671 | The timers are based on real time, that is, if you register an event that |
574 | times out after an hour and you reset your system clock to last years |
672 | times out after an hour and you reset your system clock to last years |
575 | time, it will still time out after (roughly) and hour. \*(L"Roughly\*(R" because |
673 | time, it will still time out after (roughly) and hour. \*(L"Roughly\*(R" because |
576 | detecting time jumps is hard, and soem inaccuracies are unavoidable (the |
674 | detecting time jumps is hard, and some inaccuracies are unavoidable (the |
577 | monotonic clock option helps a lot here). |
675 | monotonic clock option helps a lot here). |
578 | .PP |
676 | .PP |
579 | The relative timeouts are calculated relative to the \f(CW\*(C`ev_now ()\*(C'\fR |
677 | The relative timeouts are calculated relative to the \f(CW\*(C`ev_now ()\*(C'\fR |
580 | time. This is usually the right thing as this timestamp refers to the time |
678 | time. This is usually the right thing as this timestamp refers to the time |
581 | of the event triggering whatever timeout you are modifying/starting. If |
679 | of the event triggering whatever timeout you are modifying/starting. If |
582 | you suspect event processing to be delayed and you *need* to base the timeout |
680 | you suspect event processing to be delayed and you \fIneed\fR to base the timeout |
583 | on the current time, use something like this to adjust for this: |
681 | on the current time, use something like this to adjust for this: |
584 | .PP |
682 | .PP |
585 | .Vb 1 |
683 | .Vb 1 |
586 | \& ev_timer_set (&timer, after + ev_now () - ev_time (), 0.); |
684 | \& ev_timer_set (&timer, after + ev_now () - ev_time (), 0.); |
587 | .Ve |
685 | .Ve |
|
|
686 | .PP |
|
|
687 | The callback is guarenteed to be invoked only when its timeout has passed, |
|
|
688 | but if multiple timers become ready during the same loop iteration then |
|
|
689 | order of execution is undefined. |
588 | .IP "ev_timer_init (ev_timer *, callback, ev_tstamp after, ev_tstamp repeat)" 4 |
690 | .IP "ev_timer_init (ev_timer *, callback, ev_tstamp after, ev_tstamp repeat)" 4 |
589 | .IX Item "ev_timer_init (ev_timer *, callback, ev_tstamp after, ev_tstamp repeat)" |
691 | .IX Item "ev_timer_init (ev_timer *, callback, ev_tstamp after, ev_tstamp repeat)" |
590 | .PD 0 |
692 | .PD 0 |
591 | .IP "ev_timer_set (ev_timer *, ev_tstamp after, ev_tstamp repeat)" 4 |
693 | .IP "ev_timer_set (ev_timer *, ev_tstamp after, ev_tstamp repeat)" 4 |
592 | .IX Item "ev_timer_set (ev_timer *, ev_tstamp after, ev_tstamp repeat)" |
694 | .IX Item "ev_timer_set (ev_timer *, ev_tstamp after, ev_tstamp repeat)" |
… | |
… | |
634 | roughly 10 seconds later and of course not if you reset your system time |
736 | roughly 10 seconds later and of course not if you reset your system time |
635 | again). |
737 | again). |
636 | .PP |
738 | .PP |
637 | They can also be used to implement vastly more complex timers, such as |
739 | They can also be used to implement vastly more complex timers, such as |
638 | triggering an event on eahc midnight, local time. |
740 | triggering an event on eahc midnight, local time. |
|
|
741 | .PP |
|
|
742 | As with timers, the callback is guarenteed to be invoked only when the |
|
|
743 | time (\f(CW\*(C`at\*(C'\fR) has been passed, but if multiple periodic timers become ready |
|
|
744 | during the same loop iteration then order of execution is undefined. |
639 | .IP "ev_periodic_init (ev_periodic *, callback, ev_tstamp at, ev_tstamp interval, reschedule_cb)" 4 |
745 | .IP "ev_periodic_init (ev_periodic *, callback, ev_tstamp at, ev_tstamp interval, reschedule_cb)" 4 |
640 | .IX Item "ev_periodic_init (ev_periodic *, callback, ev_tstamp at, ev_tstamp interval, reschedule_cb)" |
746 | .IX Item "ev_periodic_init (ev_periodic *, callback, ev_tstamp at, ev_tstamp interval, reschedule_cb)" |
641 | .PD 0 |
747 | .PD 0 |
642 | .IP "ev_periodic_set (ev_periodic *, ev_tstamp after, ev_tstamp repeat, reschedule_cb)" 4 |
748 | .IP "ev_periodic_set (ev_periodic *, ev_tstamp after, ev_tstamp repeat, reschedule_cb)" 4 |
643 | .IX Item "ev_periodic_set (ev_periodic *, ev_tstamp after, ev_tstamp repeat, reschedule_cb)" |
749 | .IX Item "ev_periodic_set (ev_periodic *, ev_tstamp after, ev_tstamp repeat, reschedule_cb)" |