… | |
… | |
122 | .\} |
122 | .\} |
123 | .rm #[ #] #H #V #F C |
123 | .rm #[ #] #H #V #F C |
124 | .\" ======================================================================== |
124 | .\" ======================================================================== |
125 | .\" |
125 | .\" |
126 | .IX Title "LIBEV 3" |
126 | .IX Title "LIBEV 3" |
127 | .TH LIBEV 3 "2010-11-03" "libev-4.01" "libev - high performance full featured event loop" |
127 | .TH LIBEV 3 "2011-01-11" "libev-4.03" "libev - high performance full featured event loop" |
128 | .\" For nroff, turn off justification. Always turn off hyphenation; it makes |
128 | .\" For nroff, turn off justification. Always turn off hyphenation; it makes |
129 | .\" way too many mistakes in technical documents. |
129 | .\" way too many mistakes in technical documents. |
130 | .if n .ad l |
130 | .if n .ad l |
131 | .nh |
131 | .nh |
132 | .SH "NAME" |
132 | .SH "NAME" |
… | |
… | |
421 | \& } |
421 | \& } |
422 | \& |
422 | \& |
423 | \& ... |
423 | \& ... |
424 | \& ev_set_syserr_cb (fatal_error); |
424 | \& ev_set_syserr_cb (fatal_error); |
425 | .Ve |
425 | .Ve |
|
|
426 | .IP "ev_feed_signal (int signum)" 4 |
|
|
427 | .IX Item "ev_feed_signal (int signum)" |
|
|
428 | This function can be used to \*(L"simulate\*(R" a signal receive. It is completely |
|
|
429 | safe to call this function at any time, from any context, including signal |
|
|
430 | handlers or random threads. |
|
|
431 | .Sp |
|
|
432 | Its main use is to customise signal handling in your process, especially |
|
|
433 | in the presence of threads. For example, you could block signals |
|
|
434 | by default in all threads (and specifying \f(CW\*(C`EVFLAG_NOSIGMASK\*(C'\fR when |
|
|
435 | creating any loops), and in one thread, use \f(CW\*(C`sigwait\*(C'\fR or any other |
|
|
436 | mechanism to wait for signals, then \*(L"deliver\*(R" them to libev by calling |
|
|
437 | \&\f(CW\*(C`ev_feed_signal\*(C'\fR. |
426 | .SH "FUNCTIONS CONTROLLING EVENT LOOPS" |
438 | .SH "FUNCTIONS CONTROLLING EVENT LOOPS" |
427 | .IX Header "FUNCTIONS CONTROLLING EVENT LOOPS" |
439 | .IX Header "FUNCTIONS CONTROLLING EVENT LOOPS" |
428 | An event loop is described by a \f(CW\*(C`struct ev_loop *\*(C'\fR (the \f(CW\*(C`struct\*(C'\fR is |
440 | An event loop is described by a \f(CW\*(C`struct ev_loop *\*(C'\fR (the \f(CW\*(C`struct\*(C'\fR is |
429 | \&\fInot\fR optional in this case unless libev 3 compatibility is disabled, as |
441 | \&\fInot\fR optional in this case unless libev 3 compatibility is disabled, as |
430 | libev 3 had an \f(CW\*(C`ev_loop\*(C'\fR function colliding with the struct name). |
442 | libev 3 had an \f(CW\*(C`ev_loop\*(C'\fR function colliding with the struct name). |
… | |
… | |
475 | .IP "struct ev_loop *ev_loop_new (unsigned int flags)" 4 |
487 | .IP "struct ev_loop *ev_loop_new (unsigned int flags)" 4 |
476 | .IX Item "struct ev_loop *ev_loop_new (unsigned int flags)" |
488 | .IX Item "struct ev_loop *ev_loop_new (unsigned int flags)" |
477 | This will create and initialise a new event loop object. If the loop |
489 | This will create and initialise a new event loop object. If the loop |
478 | could not be initialised, returns false. |
490 | could not be initialised, returns false. |
479 | .Sp |
491 | .Sp |
480 | Note that this function \fIis\fR thread-safe, and one common way to use |
492 | This function is thread-safe, and one common way to use libev with |
481 | libev with threads is indeed to create one loop per thread, and using the |
493 | threads is indeed to create one loop per thread, and using the default |
482 | default loop in the \*(L"main\*(R" or \*(L"initial\*(R" thread. |
494 | loop in the \*(L"main\*(R" or \*(L"initial\*(R" thread. |
483 | .Sp |
495 | .Sp |
484 | The flags argument can be used to specify special behaviour or specific |
496 | The flags argument can be used to specify special behaviour or specific |
485 | backends to use, and is usually specified as \f(CW0\fR (or \f(CW\*(C`EVFLAG_AUTO\*(C'\fR). |
497 | backends to use, and is usually specified as \f(CW0\fR (or \f(CW\*(C`EVFLAG_AUTO\*(C'\fR). |
486 | .Sp |
498 | .Sp |
487 | The following flags are supported: |
499 | The following flags are supported: |
… | |
… | |
537 | threads that are not interested in handling them. |
549 | threads that are not interested in handling them. |
538 | .Sp |
550 | .Sp |
539 | Signalfd will not be used by default as this changes your signal mask, and |
551 | Signalfd will not be used by default as this changes your signal mask, and |
540 | there are a lot of shoddy libraries and programs (glib's threadpool for |
552 | there are a lot of shoddy libraries and programs (glib's threadpool for |
541 | example) that can't properly initialise their signal masks. |
553 | example) that can't properly initialise their signal masks. |
|
|
554 | .ie n .IP """EVFLAG_NOSIGMASK""" 4 |
|
|
555 | .el .IP "\f(CWEVFLAG_NOSIGMASK\fR" 4 |
|
|
556 | .IX Item "EVFLAG_NOSIGMASK" |
|
|
557 | When this flag is specified, then libev will avoid to modify the signal |
|
|
558 | mask. Specifically, this means you ahve to make sure signals are unblocked |
|
|
559 | when you want to receive them. |
|
|
560 | .Sp |
|
|
561 | This behaviour is useful when you want to do your own signal handling, or |
|
|
562 | want to handle signals only in specific threads and want to avoid libev |
|
|
563 | unblocking the signals. |
|
|
564 | .Sp |
|
|
565 | This flag's behaviour will become the default in future versions of libev. |
542 | .ie n .IP """EVBACKEND_SELECT"" (value 1, portable select backend)" 4 |
566 | .ie n .IP """EVBACKEND_SELECT"" (value 1, portable select backend)" 4 |
543 | .el .IP "\f(CWEVBACKEND_SELECT\fR (value 1, portable select backend)" 4 |
567 | .el .IP "\f(CWEVBACKEND_SELECT\fR (value 1, portable select backend)" 4 |
544 | .IX Item "EVBACKEND_SELECT (value 1, portable select backend)" |
568 | .IX Item "EVBACKEND_SELECT (value 1, portable select backend)" |
545 | This is your standard \fIselect\fR\|(2) backend. Not \fIcompletely\fR standard, as |
569 | This is your standard \fIselect\fR\|(2) backend. Not \fIcompletely\fR standard, as |
546 | libev tries to roll its own fd_set with no limits on the number of fds, |
570 | libev tries to roll its own fd_set with no limits on the number of fds, |
… | |
… | |
600 | employing an additional generation counter and comparing that against the |
624 | employing an additional generation counter and comparing that against the |
601 | events to filter out spurious ones, recreating the set when required. Last |
625 | events to filter out spurious ones, recreating the set when required. Last |
602 | not least, it also refuses to work with some file descriptors which work |
626 | not least, it also refuses to work with some file descriptors which work |
603 | perfectly fine with \f(CW\*(C`select\*(C'\fR (files, many character devices...). |
627 | perfectly fine with \f(CW\*(C`select\*(C'\fR (files, many character devices...). |
604 | .Sp |
628 | .Sp |
605 | Epoll is truly the train wreck analog among event poll mechanisms. |
629 | Epoll is truly the train wreck analog among event poll mechanisms, |
|
|
630 | a frankenpoll, cobbled together in a hurry, no thought to design or |
|
|
631 | interaction with others. |
606 | .Sp |
632 | .Sp |
607 | While stopping, setting and starting an I/O watcher in the same iteration |
633 | While stopping, setting and starting an I/O watcher in the same iteration |
608 | will result in some caching, there is still a system call per such |
634 | will result in some caching, there is still a system call per such |
609 | incident (because the same \fIfile descriptor\fR could point to a different |
635 | incident (because the same \fIfile descriptor\fR could point to a different |
610 | \&\fIfile description\fR now), so its best to avoid that. Also, \f(CW\*(C`dup ()\*(C'\fR'ed |
636 | \&\fIfile description\fR now), so its best to avoid that. Also, \f(CW\*(C`dup ()\*(C'\fR'ed |
… | |
… | |
676 | .el .IP "\f(CWEVBACKEND_PORT\fR (value 32, Solaris 10)" 4 |
702 | .el .IP "\f(CWEVBACKEND_PORT\fR (value 32, Solaris 10)" 4 |
677 | .IX Item "EVBACKEND_PORT (value 32, Solaris 10)" |
703 | .IX Item "EVBACKEND_PORT (value 32, Solaris 10)" |
678 | This uses the Solaris 10 event port mechanism. As with everything on Solaris, |
704 | This uses the Solaris 10 event port mechanism. As with everything on Solaris, |
679 | it's really slow, but it still scales very well (O(active_fds)). |
705 | it's really slow, but it still scales very well (O(active_fds)). |
680 | .Sp |
706 | .Sp |
681 | Please note that Solaris event ports can deliver a lot of spurious |
|
|
682 | notifications, so you need to use non-blocking I/O or other means to avoid |
|
|
683 | blocking when no data (or space) is available. |
|
|
684 | .Sp |
|
|
685 | While this backend scales well, it requires one system call per active |
707 | While this backend scales well, it requires one system call per active |
686 | file descriptor per loop iteration. For small and medium numbers of file |
708 | file descriptor per loop iteration. For small and medium numbers of file |
687 | descriptors a \*(L"slow\*(R" \f(CW\*(C`EVBACKEND_SELECT\*(C'\fR or \f(CW\*(C`EVBACKEND_POLL\*(C'\fR backend |
709 | descriptors a \*(L"slow\*(R" \f(CW\*(C`EVBACKEND_SELECT\*(C'\fR or \f(CW\*(C`EVBACKEND_POLL\*(C'\fR backend |
688 | might perform better. |
710 | might perform better. |
689 | .Sp |
711 | .Sp |
690 | On the positive side, with the exception of the spurious readiness |
712 | On the positive side, this backend actually performed fully to |
691 | notifications, this backend actually performed fully to specification |
|
|
692 | in all tests and is fully embeddable, which is a rare feat among the |
713 | specification in all tests and is fully embeddable, which is a rare feat |
693 | OS-specific backends (I vastly prefer correctness over speed hacks). |
714 | among the OS-specific backends (I vastly prefer correctness over speed |
|
|
715 | hacks). |
|
|
716 | .Sp |
|
|
717 | On the negative side, the interface is \fIbizarre\fR \- so bizarre that |
|
|
718 | even sun itself gets it wrong in their code examples: The event polling |
|
|
719 | function sometimes returning events to the caller even though an error |
|
|
720 | occurred, but with no indication whether it has done so or not (yes, it's |
|
|
721 | even documented that way) \- deadly for edge-triggered interfaces where |
|
|
722 | you absolutely have to know whether an event occurred or not because you |
|
|
723 | have to re-arm the watcher. |
|
|
724 | .Sp |
|
|
725 | Fortunately libev seems to be able to work around these idiocies. |
694 | .Sp |
726 | .Sp |
695 | This backend maps \f(CW\*(C`EV_READ\*(C'\fR and \f(CW\*(C`EV_WRITE\*(C'\fR in the same way as |
727 | This backend maps \f(CW\*(C`EV_READ\*(C'\fR and \f(CW\*(C`EV_WRITE\*(C'\fR in the same way as |
696 | \&\f(CW\*(C`EVBACKEND_POLL\*(C'\fR. |
728 | \&\f(CW\*(C`EVBACKEND_POLL\*(C'\fR. |
697 | .ie n .IP """EVBACKEND_ALL""" 4 |
729 | .ie n .IP """EVBACKEND_ALL""" 4 |
698 | .el .IP "\f(CWEVBACKEND_ALL\fR" 4 |
730 | .el .IP "\f(CWEVBACKEND_ALL\fR" 4 |
699 | .IX Item "EVBACKEND_ALL" |
731 | .IX Item "EVBACKEND_ALL" |
700 | Try all backends (even potentially broken ones that wouldn't be tried |
732 | Try all backends (even potentially broken ones that wouldn't be tried |
701 | with \f(CW\*(C`EVFLAG_AUTO\*(C'\fR). Since this is a mask, you can do stuff such as |
733 | with \f(CW\*(C`EVFLAG_AUTO\*(C'\fR). Since this is a mask, you can do stuff such as |
702 | \&\f(CW\*(C`EVBACKEND_ALL & ~EVBACKEND_KQUEUE\*(C'\fR. |
734 | \&\f(CW\*(C`EVBACKEND_ALL & ~EVBACKEND_KQUEUE\*(C'\fR. |
703 | .Sp |
735 | .Sp |
704 | It is definitely not recommended to use this flag. |
736 | It is definitely not recommended to use this flag, use whatever |
|
|
737 | \&\f(CW\*(C`ev_recommended_backends ()\*(C'\fR returns, or simply do not specify a backend |
|
|
738 | at all. |
|
|
739 | .ie n .IP """EVBACKEND_MASK""" 4 |
|
|
740 | .el .IP "\f(CWEVBACKEND_MASK\fR" 4 |
|
|
741 | .IX Item "EVBACKEND_MASK" |
|
|
742 | Not a backend at all, but a mask to select all backend bits from a |
|
|
743 | \&\f(CW\*(C`flags\*(C'\fR value, in case you want to mask out any backends from a flags |
|
|
744 | value (e.g. when modifying the \f(CW\*(C`LIBEV_FLAGS\*(C'\fR environment variable). |
705 | .RE |
745 | .RE |
706 | .RS 4 |
746 | .RS 4 |
707 | .Sp |
747 | .Sp |
708 | If one or more of the backend flags are or'ed into the flags value, |
748 | If one or more of the backend flags are or'ed into the flags value, |
709 | then only these backends will be tried (in the reverse order as listed |
749 | then only these backends will be tried (in the reverse order as listed |
… | |
… | |
798 | \&\f(CW\*(C`ev_prepare\*(C'\fR and \f(CW\*(C`ev_check\*(C'\fR calls \- and is incremented between the |
838 | \&\f(CW\*(C`ev_prepare\*(C'\fR and \f(CW\*(C`ev_check\*(C'\fR calls \- and is incremented between the |
799 | prepare and check phases. |
839 | prepare and check phases. |
800 | .IP "unsigned int ev_depth (loop)" 4 |
840 | .IP "unsigned int ev_depth (loop)" 4 |
801 | .IX Item "unsigned int ev_depth (loop)" |
841 | .IX Item "unsigned int ev_depth (loop)" |
802 | Returns the number of times \f(CW\*(C`ev_run\*(C'\fR was entered minus the number of |
842 | Returns the number of times \f(CW\*(C`ev_run\*(C'\fR was entered minus the number of |
803 | times \f(CW\*(C`ev_run\*(C'\fR was exited, in other words, the recursion depth. |
843 | times \f(CW\*(C`ev_run\*(C'\fR was exited normally, in other words, the recursion depth. |
804 | .Sp |
844 | .Sp |
805 | Outside \f(CW\*(C`ev_run\*(C'\fR, this number is zero. In a callback, this number is |
845 | Outside \f(CW\*(C`ev_run\*(C'\fR, this number is zero. In a callback, this number is |
806 | \&\f(CW1\fR, unless \f(CW\*(C`ev_run\*(C'\fR was invoked recursively (or from another thread), |
846 | \&\f(CW1\fR, unless \f(CW\*(C`ev_run\*(C'\fR was invoked recursively (or from another thread), |
807 | in which case it is higher. |
847 | in which case it is higher. |
808 | .Sp |
848 | .Sp |
809 | Leaving \f(CW\*(C`ev_run\*(C'\fR abnormally (setjmp/longjmp, cancelling the thread |
849 | Leaving \f(CW\*(C`ev_run\*(C'\fR abnormally (setjmp/longjmp, cancelling the thread, |
810 | etc.), doesn't count as \*(L"exit\*(R" \- consider this as a hint to avoid such |
850 | throwing an exception etc.), doesn't count as \*(L"exit\*(R" \- consider this |
811 | ungentleman-like behaviour unless it's really convenient. |
851 | as a hint to avoid such ungentleman-like behaviour unless it's really |
|
|
852 | convenient, in which case it is fully supported. |
812 | .IP "unsigned int ev_backend (loop)" 4 |
853 | .IP "unsigned int ev_backend (loop)" 4 |
813 | .IX Item "unsigned int ev_backend (loop)" |
854 | .IX Item "unsigned int ev_backend (loop)" |
814 | Returns one of the \f(CW\*(C`EVBACKEND_*\*(C'\fR flags indicating the event backend in |
855 | Returns one of the \f(CW\*(C`EVBACKEND_*\*(C'\fR flags indicating the event backend in |
815 | use. |
856 | use. |
816 | .IP "ev_tstamp ev_now (loop)" 4 |
857 | .IP "ev_tstamp ev_now (loop)" 4 |
… | |
… | |
874 | relying on all watchers to be stopped when deciding when a program has |
915 | relying on all watchers to be stopped when deciding when a program has |
875 | finished (especially in interactive programs), but having a program |
916 | finished (especially in interactive programs), but having a program |
876 | that automatically loops as long as it has to and no longer by virtue |
917 | that automatically loops as long as it has to and no longer by virtue |
877 | of relying on its watchers stopping correctly, that is truly a thing of |
918 | of relying on its watchers stopping correctly, that is truly a thing of |
878 | beauty. |
919 | beauty. |
|
|
920 | .Sp |
|
|
921 | This function is also \fImostly\fR exception-safe \- you can break out of |
|
|
922 | a \f(CW\*(C`ev_run\*(C'\fR call by calling \f(CW\*(C`longjmp\*(C'\fR in a callback, throwing a \*(C+ |
|
|
923 | exception and so on. This does not decrement the \f(CW\*(C`ev_depth\*(C'\fR value, nor |
|
|
924 | will it clear any outstanding \f(CW\*(C`EVBREAK_ONE\*(C'\fR breaks. |
879 | .Sp |
925 | .Sp |
880 | A flags value of \f(CW\*(C`EVRUN_NOWAIT\*(C'\fR will look for new events, will handle |
926 | A flags value of \f(CW\*(C`EVRUN_NOWAIT\*(C'\fR will look for new events, will handle |
881 | those events and any already outstanding ones, but will not wait and |
927 | those events and any already outstanding ones, but will not wait and |
882 | block your process in case there are no events and will return after one |
928 | block your process in case there are no events and will return after one |
883 | iteration of the loop. This is sometimes useful to poll and handle new |
929 | iteration of the loop. This is sometimes useful to poll and handle new |
… | |
… | |
948 | Can be used to make a call to \f(CW\*(C`ev_run\*(C'\fR return early (but only after it |
994 | Can be used to make a call to \f(CW\*(C`ev_run\*(C'\fR return early (but only after it |
949 | has processed all outstanding events). The \f(CW\*(C`how\*(C'\fR argument must be either |
995 | has processed all outstanding events). The \f(CW\*(C`how\*(C'\fR argument must be either |
950 | \&\f(CW\*(C`EVBREAK_ONE\*(C'\fR, which will make the innermost \f(CW\*(C`ev_run\*(C'\fR call return, or |
996 | \&\f(CW\*(C`EVBREAK_ONE\*(C'\fR, which will make the innermost \f(CW\*(C`ev_run\*(C'\fR call return, or |
951 | \&\f(CW\*(C`EVBREAK_ALL\*(C'\fR, which will make all nested \f(CW\*(C`ev_run\*(C'\fR calls return. |
997 | \&\f(CW\*(C`EVBREAK_ALL\*(C'\fR, which will make all nested \f(CW\*(C`ev_run\*(C'\fR calls return. |
952 | .Sp |
998 | .Sp |
953 | This \*(L"break state\*(R" will be cleared when entering \f(CW\*(C`ev_run\*(C'\fR again. |
999 | This \*(L"break state\*(R" will be cleared on the next call to \f(CW\*(C`ev_run\*(C'\fR. |
954 | .Sp |
1000 | .Sp |
955 | It is safe to call \f(CW\*(C`ev_break\*(C'\fR from outside any \f(CW\*(C`ev_run\*(C'\fR calls, too. |
1001 | It is safe to call \f(CW\*(C`ev_break\*(C'\fR from outside any \f(CW\*(C`ev_run\*(C'\fR calls, too, in |
|
|
1002 | which case it will have no effect. |
956 | .IP "ev_ref (loop)" 4 |
1003 | .IP "ev_ref (loop)" 4 |
957 | .IX Item "ev_ref (loop)" |
1004 | .IX Item "ev_ref (loop)" |
958 | .PD 0 |
1005 | .PD 0 |
959 | .IP "ev_unref (loop)" 4 |
1006 | .IP "ev_unref (loop)" 4 |
960 | .IX Item "ev_unref (loop)" |
1007 | .IX Item "ev_unref (loop)" |
… | |
… | |
983 | .Sp |
1030 | .Sp |
984 | .Vb 4 |
1031 | .Vb 4 |
985 | \& ev_signal exitsig; |
1032 | \& ev_signal exitsig; |
986 | \& ev_signal_init (&exitsig, sig_cb, SIGINT); |
1033 | \& ev_signal_init (&exitsig, sig_cb, SIGINT); |
987 | \& ev_signal_start (loop, &exitsig); |
1034 | \& ev_signal_start (loop, &exitsig); |
988 | \& evf_unref (loop); |
1035 | \& ev_unref (loop); |
989 | .Ve |
1036 | .Ve |
990 | .Sp |
1037 | .Sp |
991 | Example: For some weird reason, unregister the above signal handler again. |
1038 | Example: For some weird reason, unregister the above signal handler again. |
992 | .Sp |
1039 | .Sp |
993 | .Vb 2 |
1040 | .Vb 2 |
… | |
… | |
1107 | See also the locking example in the \f(CW\*(C`THREADS\*(C'\fR section later in this |
1154 | See also the locking example in the \f(CW\*(C`THREADS\*(C'\fR section later in this |
1108 | document. |
1155 | document. |
1109 | .IP "ev_set_userdata (loop, void *data)" 4 |
1156 | .IP "ev_set_userdata (loop, void *data)" 4 |
1110 | .IX Item "ev_set_userdata (loop, void *data)" |
1157 | .IX Item "ev_set_userdata (loop, void *data)" |
1111 | .PD 0 |
1158 | .PD 0 |
1112 | .IP "ev_userdata (loop)" 4 |
1159 | .IP "void *ev_userdata (loop)" 4 |
1113 | .IX Item "ev_userdata (loop)" |
1160 | .IX Item "void *ev_userdata (loop)" |
1114 | .PD |
1161 | .PD |
1115 | Set and retrieve a single \f(CW\*(C`void *\*(C'\fR associated with a loop. When |
1162 | Set and retrieve a single \f(CW\*(C`void *\*(C'\fR associated with a loop. When |
1116 | \&\f(CW\*(C`ev_set_userdata\*(C'\fR has never been called, then \f(CW\*(C`ev_userdata\*(C'\fR returns |
1163 | \&\f(CW\*(C`ev_set_userdata\*(C'\fR has never been called, then \f(CW\*(C`ev_userdata\*(C'\fR returns |
1117 | \&\f(CW0.\fR |
1164 | \&\f(CW0\fR. |
1118 | .Sp |
1165 | .Sp |
1119 | These two functions can be used to associate arbitrary data with a loop, |
1166 | These two functions can be used to associate arbitrary data with a loop, |
1120 | and are intended solely for the \f(CW\*(C`invoke_pending_cb\*(C'\fR, \f(CW\*(C`release\*(C'\fR and |
1167 | and are intended solely for the \f(CW\*(C`invoke_pending_cb\*(C'\fR, \f(CW\*(C`release\*(C'\fR and |
1121 | \&\f(CW\*(C`acquire\*(C'\fR callbacks described above, but of course can be (ab\-)used for |
1168 | \&\f(CW\*(C`acquire\*(C'\fR callbacks described above, but of course can be (ab\-)used for |
1122 | any other purpose as well. |
1169 | any other purpose as well. |
… | |
… | |
1428 | \&\f(CW\*(C`ev_clear_pending\*(C'\fR will clear the pending event, even if the watcher was |
1475 | \&\f(CW\*(C`ev_clear_pending\*(C'\fR will clear the pending event, even if the watcher was |
1429 | not started in the first place. |
1476 | not started in the first place. |
1430 | .Sp |
1477 | .Sp |
1431 | See also \f(CW\*(C`ev_feed_fd_event\*(C'\fR and \f(CW\*(C`ev_feed_signal_event\*(C'\fR for related |
1478 | See also \f(CW\*(C`ev_feed_fd_event\*(C'\fR and \f(CW\*(C`ev_feed_signal_event\*(C'\fR for related |
1432 | functions that do not need a watcher. |
1479 | functions that do not need a watcher. |
1433 | .SS "\s-1ASSOCIATING\s0 \s-1CUSTOM\s0 \s-1DATA\s0 \s-1WITH\s0 A \s-1WATCHER\s0" |
|
|
1434 | .IX Subsection "ASSOCIATING CUSTOM DATA WITH A WATCHER" |
|
|
1435 | Each watcher has, by default, a member \f(CW\*(C`void *data\*(C'\fR that you can change |
|
|
1436 | and read at any time: libev will completely ignore it. This can be used |
|
|
1437 | to associate arbitrary data with your watcher. If you need more data and |
|
|
1438 | don't want to allocate memory and store a pointer to it in that data |
|
|
1439 | member, you can also \*(L"subclass\*(R" the watcher type and provide your own |
|
|
1440 | data: |
|
|
1441 | .PP |
1480 | .PP |
1442 | .Vb 7 |
1481 | See also the \*(L"\s-1ASSOCIATING\s0 \s-1CUSTOM\s0 \s-1DATA\s0 \s-1WITH\s0 A \s-1WATCHER\s0\*(R" and \*(L"\s-1BUILDING\s0 \s-1YOUR\s0 |
1443 | \& struct my_io |
1482 | \&\s-1OWN\s0 \s-1COMPOSITE\s0 \s-1WATCHERS\s0\*(R" idioms. |
1444 | \& { |
|
|
1445 | \& ev_io io; |
|
|
1446 | \& int otherfd; |
|
|
1447 | \& void *somedata; |
|
|
1448 | \& struct whatever *mostinteresting; |
|
|
1449 | \& }; |
|
|
1450 | \& |
|
|
1451 | \& ... |
|
|
1452 | \& struct my_io w; |
|
|
1453 | \& ev_io_init (&w.io, my_cb, fd, EV_READ); |
|
|
1454 | .Ve |
|
|
1455 | .PP |
|
|
1456 | And since your callback will be called with a pointer to the watcher, you |
|
|
1457 | can cast it back to your own type: |
|
|
1458 | .PP |
|
|
1459 | .Vb 5 |
|
|
1460 | \& static void my_cb (struct ev_loop *loop, ev_io *w_, int revents) |
|
|
1461 | \& { |
|
|
1462 | \& struct my_io *w = (struct my_io *)w_; |
|
|
1463 | \& ... |
|
|
1464 | \& } |
|
|
1465 | .Ve |
|
|
1466 | .PP |
|
|
1467 | More interesting and less C\-conformant ways of casting your callback type |
|
|
1468 | instead have been omitted. |
|
|
1469 | .PP |
|
|
1470 | Another common scenario is to use some data structure with multiple |
|
|
1471 | embedded watchers: |
|
|
1472 | .PP |
|
|
1473 | .Vb 6 |
|
|
1474 | \& struct my_biggy |
|
|
1475 | \& { |
|
|
1476 | \& int some_data; |
|
|
1477 | \& ev_timer t1; |
|
|
1478 | \& ev_timer t2; |
|
|
1479 | \& } |
|
|
1480 | .Ve |
|
|
1481 | .PP |
|
|
1482 | In this case getting the pointer to \f(CW\*(C`my_biggy\*(C'\fR is a bit more |
|
|
1483 | complicated: Either you store the address of your \f(CW\*(C`my_biggy\*(C'\fR struct |
|
|
1484 | in the \f(CW\*(C`data\*(C'\fR member of the watcher (for woozies), or you need to use |
|
|
1485 | some pointer arithmetic using \f(CW\*(C`offsetof\*(C'\fR inside your watchers (for real |
|
|
1486 | programmers): |
|
|
1487 | .PP |
|
|
1488 | .Vb 1 |
|
|
1489 | \& #include <stddef.h> |
|
|
1490 | \& |
|
|
1491 | \& static void |
|
|
1492 | \& t1_cb (EV_P_ ev_timer *w, int revents) |
|
|
1493 | \& { |
|
|
1494 | \& struct my_biggy big = (struct my_biggy *) |
|
|
1495 | \& (((char *)w) \- offsetof (struct my_biggy, t1)); |
|
|
1496 | \& } |
|
|
1497 | \& |
|
|
1498 | \& static void |
|
|
1499 | \& t2_cb (EV_P_ ev_timer *w, int revents) |
|
|
1500 | \& { |
|
|
1501 | \& struct my_biggy big = (struct my_biggy *) |
|
|
1502 | \& (((char *)w) \- offsetof (struct my_biggy, t2)); |
|
|
1503 | \& } |
|
|
1504 | .Ve |
|
|
1505 | .SS "\s-1WATCHER\s0 \s-1STATES\s0" |
1483 | .SS "\s-1WATCHER\s0 \s-1STATES\s0" |
1506 | .IX Subsection "WATCHER STATES" |
1484 | .IX Subsection "WATCHER STATES" |
1507 | There are various watcher states mentioned throughout this manual \- |
1485 | There are various watcher states mentioned throughout this manual \- |
1508 | active, pending and so on. In this section these states and the rules to |
1486 | active, pending and so on. In this section these states and the rules to |
1509 | transition between them will be described in more detail \- and while these |
1487 | transition between them will be described in more detail \- and while these |
… | |
… | |
1684 | In general you can register as many read and/or write event watchers per |
1662 | In general you can register as many read and/or write event watchers per |
1685 | fd as you want (as long as you don't confuse yourself). Setting all file |
1663 | fd as you want (as long as you don't confuse yourself). Setting all file |
1686 | descriptors to non-blocking mode is also usually a good idea (but not |
1664 | descriptors to non-blocking mode is also usually a good idea (but not |
1687 | required if you know what you are doing). |
1665 | required if you know what you are doing). |
1688 | .PP |
1666 | .PP |
1689 | If you cannot use non-blocking mode, then force the use of a |
|
|
1690 | known-to-be-good backend (at the time of this writing, this includes only |
|
|
1691 | \&\f(CW\*(C`EVBACKEND_SELECT\*(C'\fR and \f(CW\*(C`EVBACKEND_POLL\*(C'\fR). The same applies to file |
|
|
1692 | descriptors for which non-blocking operation makes no sense (such as |
|
|
1693 | files) \- libev doesn't guarantee any specific behaviour in that case. |
|
|
1694 | .PP |
|
|
1695 | Another thing you have to watch out for is that it is quite easy to |
1667 | Another thing you have to watch out for is that it is quite easy to |
1696 | receive \*(L"spurious\*(R" readiness notifications, that is your callback might |
1668 | receive \*(L"spurious\*(R" readiness notifications, that is, your callback might |
1697 | be called with \f(CW\*(C`EV_READ\*(C'\fR but a subsequent \f(CW\*(C`read\*(C'\fR(2) will actually block |
1669 | be called with \f(CW\*(C`EV_READ\*(C'\fR but a subsequent \f(CW\*(C`read\*(C'\fR(2) will actually block |
1698 | because there is no data. Not only are some backends known to create a |
1670 | because there is no data. It is very easy to get into this situation even |
1699 | lot of those (for example Solaris ports), it is very easy to get into |
1671 | with a relatively standard program structure. Thus it is best to always |
1700 | this situation even with a relatively standard program structure. Thus |
1672 | use non-blocking I/O: An extra \f(CW\*(C`read\*(C'\fR(2) returning \f(CW\*(C`EAGAIN\*(C'\fR is far |
1701 | it is best to always use non-blocking I/O: An extra \f(CW\*(C`read\*(C'\fR(2) returning |
|
|
1702 | \&\f(CW\*(C`EAGAIN\*(C'\fR is far preferable to a program hanging until some data arrives. |
1673 | preferable to a program hanging until some data arrives. |
1703 | .PP |
1674 | .PP |
1704 | If you cannot run the fd in non-blocking mode (for example you should |
1675 | If you cannot run the fd in non-blocking mode (for example you should |
1705 | not play around with an Xlib connection), then you have to separately |
1676 | not play around with an Xlib connection), then you have to separately |
1706 | re-test whether a file descriptor is really ready with a known-to-be good |
1677 | re-test whether a file descriptor is really ready with a known-to-be good |
1707 | interface such as poll (fortunately in our Xlib example, Xlib already |
1678 | interface such as poll (fortunately in the case of Xlib, it already does |
1708 | does this on its own, so its quite safe to use). Some people additionally |
1679 | this on its own, so its quite safe to use). Some people additionally |
1709 | use \f(CW\*(C`SIGALRM\*(C'\fR and an interval timer, just to be sure you won't block |
1680 | use \f(CW\*(C`SIGALRM\*(C'\fR and an interval timer, just to be sure you won't block |
1710 | indefinitely. |
1681 | indefinitely. |
1711 | .PP |
1682 | .PP |
1712 | But really, best use non-blocking mode. |
1683 | But really, best use non-blocking mode. |
1713 | .PP |
1684 | .PP |
… | |
… | |
1743 | .PP |
1714 | .PP |
1744 | There is no workaround possible except not registering events |
1715 | There is no workaround possible except not registering events |
1745 | for potentially \f(CW\*(C`dup ()\*(C'\fR'ed file descriptors, or to resort to |
1716 | for potentially \f(CW\*(C`dup ()\*(C'\fR'ed file descriptors, or to resort to |
1746 | \&\f(CW\*(C`EVBACKEND_SELECT\*(C'\fR or \f(CW\*(C`EVBACKEND_POLL\*(C'\fR. |
1717 | \&\f(CW\*(C`EVBACKEND_SELECT\*(C'\fR or \f(CW\*(C`EVBACKEND_POLL\*(C'\fR. |
1747 | .PP |
1718 | .PP |
|
|
1719 | \fIThe special problem of files\fR |
|
|
1720 | .IX Subsection "The special problem of files" |
|
|
1721 | .PP |
|
|
1722 | Many people try to use \f(CW\*(C`select\*(C'\fR (or libev) on file descriptors |
|
|
1723 | representing files, and expect it to become ready when their program |
|
|
1724 | doesn't block on disk accesses (which can take a long time on their own). |
|
|
1725 | .PP |
|
|
1726 | However, this cannot ever work in the \*(L"expected\*(R" way \- you get a readiness |
|
|
1727 | notification as soon as the kernel knows whether and how much data is |
|
|
1728 | there, and in the case of open files, that's always the case, so you |
|
|
1729 | always get a readiness notification instantly, and your read (or possibly |
|
|
1730 | write) will still block on the disk I/O. |
|
|
1731 | .PP |
|
|
1732 | Another way to view it is that in the case of sockets, pipes, character |
|
|
1733 | devices and so on, there is another party (the sender) that delivers data |
|
|
1734 | on its own, but in the case of files, there is no such thing: the disk |
|
|
1735 | will not send data on its own, simply because it doesn't know what you |
|
|
1736 | wish to read \- you would first have to request some data. |
|
|
1737 | .PP |
|
|
1738 | Since files are typically not-so-well supported by advanced notification |
|
|
1739 | mechanism, libev tries hard to emulate \s-1POSIX\s0 behaviour with respect |
|
|
1740 | to files, even though you should not use it. The reason for this is |
|
|
1741 | convenience: sometimes you want to watch \s-1STDIN\s0 or \s-1STDOUT\s0, which is |
|
|
1742 | usually a tty, often a pipe, but also sometimes files or special devices |
|
|
1743 | (for example, \f(CW\*(C`epoll\*(C'\fR on Linux works with \fI/dev/random\fR but not with |
|
|
1744 | \&\fI/dev/urandom\fR), and even though the file might better be served with |
|
|
1745 | asynchronous I/O instead of with non-blocking I/O, it is still useful when |
|
|
1746 | it \*(L"just works\*(R" instead of freezing. |
|
|
1747 | .PP |
|
|
1748 | So avoid file descriptors pointing to files when you know it (e.g. use |
|
|
1749 | libeio), but use them when it is convenient, e.g. for \s-1STDIN/STDOUT\s0, or |
|
|
1750 | when you rarely read from a file instead of from a socket, and want to |
|
|
1751 | reuse the same code path. |
|
|
1752 | .PP |
1748 | \fIThe special problem of fork\fR |
1753 | \fIThe special problem of fork\fR |
1749 | .IX Subsection "The special problem of fork" |
1754 | .IX Subsection "The special problem of fork" |
1750 | .PP |
1755 | .PP |
1751 | Some backends (epoll, kqueue) do not support \f(CW\*(C`fork ()\*(C'\fR at all or exhibit |
1756 | Some backends (epoll, kqueue) do not support \f(CW\*(C`fork ()\*(C'\fR at all or exhibit |
1752 | useless behaviour. Libev fully supports fork, but needs to be told about |
1757 | useless behaviour. Libev fully supports fork, but needs to be told about |
1753 | it in the child. |
1758 | it in the child if you want to continue to use it in the child. |
1754 | .PP |
1759 | .PP |
1755 | To support fork in your programs, you either have to call |
1760 | To support fork in your child processes, you have to call \f(CW\*(C`ev_loop_fork |
1756 | \&\f(CW\*(C`ev_default_fork ()\*(C'\fR or \f(CW\*(C`ev_loop_fork ()\*(C'\fR after a fork in the child, |
1761 | ()\*(C'\fR after a fork in the child, enable \f(CW\*(C`EVFLAG_FORKCHECK\*(C'\fR, or resort to |
1757 | enable \f(CW\*(C`EVFLAG_FORKCHECK\*(C'\fR, or resort to \f(CW\*(C`EVBACKEND_SELECT\*(C'\fR or |
1762 | \&\f(CW\*(C`EVBACKEND_SELECT\*(C'\fR or \f(CW\*(C`EVBACKEND_POLL\*(C'\fR. |
1758 | \&\f(CW\*(C`EVBACKEND_POLL\*(C'\fR. |
|
|
1759 | .PP |
1763 | .PP |
1760 | \fIThe special problem of \s-1SIGPIPE\s0\fR |
1764 | \fIThe special problem of \s-1SIGPIPE\s0\fR |
1761 | .IX Subsection "The special problem of SIGPIPE" |
1765 | .IX Subsection "The special problem of SIGPIPE" |
1762 | .PP |
1766 | .PP |
1763 | While not really specific to libev, it is easy to forget about \f(CW\*(C`SIGPIPE\*(C'\fR: |
1767 | While not really specific to libev, it is easy to forget about \f(CW\*(C`SIGPIPE\*(C'\fR: |
… | |
… | |
2448 | \&\fIhas\fR to modify the signal mask, at least temporarily. |
2452 | \&\fIhas\fR to modify the signal mask, at least temporarily. |
2449 | .PP |
2453 | .PP |
2450 | So I can't stress this enough: \fIIf you do not reset your signal mask when |
2454 | So I can't stress this enough: \fIIf you do not reset your signal mask when |
2451 | you expect it to be empty, you have a race condition in your code\fR. This |
2455 | you expect it to be empty, you have a race condition in your code\fR. This |
2452 | is not a libev-specific thing, this is true for most event libraries. |
2456 | is not a libev-specific thing, this is true for most event libraries. |
|
|
2457 | .PP |
|
|
2458 | \fIThe special problem of threads signal handling\fR |
|
|
2459 | .IX Subsection "The special problem of threads signal handling" |
|
|
2460 | .PP |
|
|
2461 | \&\s-1POSIX\s0 threads has problematic signal handling semantics, specifically, |
|
|
2462 | a lot of functionality (sigfd, sigwait etc.) only really works if all |
|
|
2463 | threads in a process block signals, which is hard to achieve. |
|
|
2464 | .PP |
|
|
2465 | When you want to use sigwait (or mix libev signal handling with your own |
|
|
2466 | for the same signals), you can tackle this problem by globally blocking |
|
|
2467 | all signals before creating any threads (or creating them with a fully set |
|
|
2468 | sigprocmask) and also specifying the \f(CW\*(C`EVFLAG_NOSIGMASK\*(C'\fR when creating |
|
|
2469 | loops. Then designate one thread as \*(L"signal receiver thread\*(R" which handles |
|
|
2470 | these signals. You can pass on any signals that libev might be interested |
|
|
2471 | in by calling \f(CW\*(C`ev_feed_signal\*(C'\fR. |
2453 | .PP |
2472 | .PP |
2454 | \fIWatcher-Specific Functions and Data Members\fR |
2473 | \fIWatcher-Specific Functions and Data Members\fR |
2455 | .IX Subsection "Watcher-Specific Functions and Data Members" |
2474 | .IX Subsection "Watcher-Specific Functions and Data Members" |
2456 | .IP "ev_signal_init (ev_signal *, callback, int signum)" 4 |
2475 | .IP "ev_signal_init (ev_signal *, callback, int signum)" 4 |
2457 | .IX Item "ev_signal_init (ev_signal *, callback, int signum)" |
2476 | .IX Item "ev_signal_init (ev_signal *, callback, int signum)" |
… | |
… | |
3300 | it by calling \f(CW\*(C`ev_async_send\*(C'\fR, which is thread\- and signal safe. |
3319 | it by calling \f(CW\*(C`ev_async_send\*(C'\fR, which is thread\- and signal safe. |
3301 | .PP |
3320 | .PP |
3302 | This functionality is very similar to \f(CW\*(C`ev_signal\*(C'\fR watchers, as signals, |
3321 | This functionality is very similar to \f(CW\*(C`ev_signal\*(C'\fR watchers, as signals, |
3303 | too, are asynchronous in nature, and signals, too, will be compressed |
3322 | too, are asynchronous in nature, and signals, too, will be compressed |
3304 | (i.e. the number of callback invocations may be less than the number of |
3323 | (i.e. the number of callback invocations may be less than the number of |
3305 | \&\f(CW\*(C`ev_async_sent\*(C'\fR calls). |
3324 | \&\f(CW\*(C`ev_async_sent\*(C'\fR calls). In fact, you could use signal watchers as a kind |
|
|
3325 | of \*(L"global async watchers\*(R" by using a watcher on an otherwise unused |
|
|
3326 | signal, and \f(CW\*(C`ev_feed_signal\*(C'\fR to signal this watcher from another thread, |
|
|
3327 | even without knowing which loop owns the signal. |
3306 | .PP |
3328 | .PP |
3307 | Unlike \f(CW\*(C`ev_signal\*(C'\fR watchers, \f(CW\*(C`ev_async\*(C'\fR works with any event loop, not |
3329 | Unlike \f(CW\*(C`ev_signal\*(C'\fR watchers, \f(CW\*(C`ev_async\*(C'\fR works with any event loop, not |
3308 | just the default loop. |
3330 | just the default loop. |
3309 | .PP |
3331 | .PP |
3310 | \fIQueueing\fR |
3332 | \fIQueueing\fR |
… | |
… | |
3473 | .IX Item "ev_feed_fd_event (loop, int fd, int revents)" |
3495 | .IX Item "ev_feed_fd_event (loop, int fd, int revents)" |
3474 | Feed an event on the given fd, as if a file descriptor backend detected |
3496 | Feed an event on the given fd, as if a file descriptor backend detected |
3475 | the given events it. |
3497 | the given events it. |
3476 | .IP "ev_feed_signal_event (loop, int signum)" 4 |
3498 | .IP "ev_feed_signal_event (loop, int signum)" 4 |
3477 | .IX Item "ev_feed_signal_event (loop, int signum)" |
3499 | .IX Item "ev_feed_signal_event (loop, int signum)" |
3478 | Feed an event as if the given signal occurred (\f(CW\*(C`loop\*(C'\fR must be the default |
3500 | Feed an event as if the given signal occurred. See also \f(CW\*(C`ev_feed_signal\*(C'\fR, |
3479 | loop!). |
3501 | which is async-safe. |
|
|
3502 | .SH "COMMON OR USEFUL IDIOMS (OR BOTH)" |
|
|
3503 | .IX Header "COMMON OR USEFUL IDIOMS (OR BOTH)" |
|
|
3504 | This section explains some common idioms that are not immediately |
|
|
3505 | obvious. Note that examples are sprinkled over the whole manual, and this |
|
|
3506 | section only contains stuff that wouldn't fit anywhere else. |
|
|
3507 | .SS "\s-1ASSOCIATING\s0 \s-1CUSTOM\s0 \s-1DATA\s0 \s-1WITH\s0 A \s-1WATCHER\s0" |
|
|
3508 | .IX Subsection "ASSOCIATING CUSTOM DATA WITH A WATCHER" |
|
|
3509 | Each watcher has, by default, a \f(CW\*(C`void *data\*(C'\fR member that you can read |
|
|
3510 | or modify at any time: libev will completely ignore it. This can be used |
|
|
3511 | to associate arbitrary data with your watcher. If you need more data and |
|
|
3512 | don't want to allocate memory separately and store a pointer to it in that |
|
|
3513 | data member, you can also \*(L"subclass\*(R" the watcher type and provide your own |
|
|
3514 | data: |
|
|
3515 | .PP |
|
|
3516 | .Vb 7 |
|
|
3517 | \& struct my_io |
|
|
3518 | \& { |
|
|
3519 | \& ev_io io; |
|
|
3520 | \& int otherfd; |
|
|
3521 | \& void *somedata; |
|
|
3522 | \& struct whatever *mostinteresting; |
|
|
3523 | \& }; |
|
|
3524 | \& |
|
|
3525 | \& ... |
|
|
3526 | \& struct my_io w; |
|
|
3527 | \& ev_io_init (&w.io, my_cb, fd, EV_READ); |
|
|
3528 | .Ve |
|
|
3529 | .PP |
|
|
3530 | And since your callback will be called with a pointer to the watcher, you |
|
|
3531 | can cast it back to your own type: |
|
|
3532 | .PP |
|
|
3533 | .Vb 5 |
|
|
3534 | \& static void my_cb (struct ev_loop *loop, ev_io *w_, int revents) |
|
|
3535 | \& { |
|
|
3536 | \& struct my_io *w = (struct my_io *)w_; |
|
|
3537 | \& ... |
|
|
3538 | \& } |
|
|
3539 | .Ve |
|
|
3540 | .PP |
|
|
3541 | More interesting and less C\-conformant ways of casting your callback |
|
|
3542 | function type instead have been omitted. |
|
|
3543 | .SS "\s-1BUILDING\s0 \s-1YOUR\s0 \s-1OWN\s0 \s-1COMPOSITE\s0 \s-1WATCHERS\s0" |
|
|
3544 | .IX Subsection "BUILDING YOUR OWN COMPOSITE WATCHERS" |
|
|
3545 | Another common scenario is to use some data structure with multiple |
|
|
3546 | embedded watchers, in effect creating your own watcher that combines |
|
|
3547 | multiple libev event sources into one \*(L"super-watcher\*(R": |
|
|
3548 | .PP |
|
|
3549 | .Vb 6 |
|
|
3550 | \& struct my_biggy |
|
|
3551 | \& { |
|
|
3552 | \& int some_data; |
|
|
3553 | \& ev_timer t1; |
|
|
3554 | \& ev_timer t2; |
|
|
3555 | \& } |
|
|
3556 | .Ve |
|
|
3557 | .PP |
|
|
3558 | In this case getting the pointer to \f(CW\*(C`my_biggy\*(C'\fR is a bit more |
|
|
3559 | complicated: Either you store the address of your \f(CW\*(C`my_biggy\*(C'\fR struct in |
|
|
3560 | the \f(CW\*(C`data\*(C'\fR member of the watcher (for woozies or \*(C+ coders), or you need |
|
|
3561 | to use some pointer arithmetic using \f(CW\*(C`offsetof\*(C'\fR inside your watchers (for |
|
|
3562 | real programmers): |
|
|
3563 | .PP |
|
|
3564 | .Vb 1 |
|
|
3565 | \& #include <stddef.h> |
|
|
3566 | \& |
|
|
3567 | \& static void |
|
|
3568 | \& t1_cb (EV_P_ ev_timer *w, int revents) |
|
|
3569 | \& { |
|
|
3570 | \& struct my_biggy big = (struct my_biggy *) |
|
|
3571 | \& (((char *)w) \- offsetof (struct my_biggy, t1)); |
|
|
3572 | \& } |
|
|
3573 | \& |
|
|
3574 | \& static void |
|
|
3575 | \& t2_cb (EV_P_ ev_timer *w, int revents) |
|
|
3576 | \& { |
|
|
3577 | \& struct my_biggy big = (struct my_biggy *) |
|
|
3578 | \& (((char *)w) \- offsetof (struct my_biggy, t2)); |
|
|
3579 | \& } |
|
|
3580 | .Ve |
|
|
3581 | .SS "\s-1MODEL/NESTED\s0 \s-1EVENT\s0 \s-1LOOP\s0 \s-1INVOCATIONS\s0 \s-1AND\s0 \s-1EXIT\s0 \s-1CONDITIONS\s0" |
|
|
3582 | .IX Subsection "MODEL/NESTED EVENT LOOP INVOCATIONS AND EXIT CONDITIONS" |
|
|
3583 | Often (especially in \s-1GUI\s0 toolkits) there are places where you have |
|
|
3584 | \&\fImodal\fR interaction, which is most easily implemented by recursively |
|
|
3585 | invoking \f(CW\*(C`ev_run\*(C'\fR. |
|
|
3586 | .PP |
|
|
3587 | This brings the problem of exiting \- a callback might want to finish the |
|
|
3588 | main \f(CW\*(C`ev_run\*(C'\fR call, but not the nested one (e.g. user clicked \*(L"Quit\*(R", but |
|
|
3589 | a modal \*(L"Are you sure?\*(R" dialog is still waiting), or just the nested one |
|
|
3590 | and not the main one (e.g. user clocked \*(L"Ok\*(R" in a modal dialog), or some |
|
|
3591 | other combination: In these cases, \f(CW\*(C`ev_break\*(C'\fR will not work alone. |
|
|
3592 | .PP |
|
|
3593 | The solution is to maintain \*(L"break this loop\*(R" variable for each \f(CW\*(C`ev_run\*(C'\fR |
|
|
3594 | invocation, and use a loop around \f(CW\*(C`ev_run\*(C'\fR until the condition is |
|
|
3595 | triggered, using \f(CW\*(C`EVRUN_ONCE\*(C'\fR: |
|
|
3596 | .PP |
|
|
3597 | .Vb 2 |
|
|
3598 | \& // main loop |
|
|
3599 | \& int exit_main_loop = 0; |
|
|
3600 | \& |
|
|
3601 | \& while (!exit_main_loop) |
|
|
3602 | \& ev_run (EV_DEFAULT_ EVRUN_ONCE); |
|
|
3603 | \& |
|
|
3604 | \& // in a model watcher |
|
|
3605 | \& int exit_nested_loop = 0; |
|
|
3606 | \& |
|
|
3607 | \& while (!exit_nested_loop) |
|
|
3608 | \& ev_run (EV_A_ EVRUN_ONCE); |
|
|
3609 | .Ve |
|
|
3610 | .PP |
|
|
3611 | To exit from any of these loops, just set the corresponding exit variable: |
|
|
3612 | .PP |
|
|
3613 | .Vb 2 |
|
|
3614 | \& // exit modal loop |
|
|
3615 | \& exit_nested_loop = 1; |
|
|
3616 | \& |
|
|
3617 | \& // exit main program, after modal loop is finished |
|
|
3618 | \& exit_main_loop = 1; |
|
|
3619 | \& |
|
|
3620 | \& // exit both |
|
|
3621 | \& exit_main_loop = exit_nested_loop = 1; |
|
|
3622 | .Ve |
|
|
3623 | .SS "\s-1THREAD\s0 \s-1LOCKING\s0 \s-1EXAMPLE\s0" |
|
|
3624 | .IX Subsection "THREAD LOCKING EXAMPLE" |
|
|
3625 | Here is a fictitious example of how to run an event loop in a different |
|
|
3626 | thread from where callbacks are being invoked and watchers are |
|
|
3627 | created/added/removed. |
|
|
3628 | .PP |
|
|
3629 | For a real-world example, see the \f(CW\*(C`EV::Loop::Async\*(C'\fR perl module, |
|
|
3630 | which uses exactly this technique (which is suited for many high-level |
|
|
3631 | languages). |
|
|
3632 | .PP |
|
|
3633 | The example uses a pthread mutex to protect the loop data, a condition |
|
|
3634 | variable to wait for callback invocations, an async watcher to notify the |
|
|
3635 | event loop thread and an unspecified mechanism to wake up the main thread. |
|
|
3636 | .PP |
|
|
3637 | First, you need to associate some data with the event loop: |
|
|
3638 | .PP |
|
|
3639 | .Vb 6 |
|
|
3640 | \& typedef struct { |
|
|
3641 | \& mutex_t lock; /* global loop lock */ |
|
|
3642 | \& ev_async async_w; |
|
|
3643 | \& thread_t tid; |
|
|
3644 | \& cond_t invoke_cv; |
|
|
3645 | \& } userdata; |
|
|
3646 | \& |
|
|
3647 | \& void prepare_loop (EV_P) |
|
|
3648 | \& { |
|
|
3649 | \& // for simplicity, we use a static userdata struct. |
|
|
3650 | \& static userdata u; |
|
|
3651 | \& |
|
|
3652 | \& ev_async_init (&u\->async_w, async_cb); |
|
|
3653 | \& ev_async_start (EV_A_ &u\->async_w); |
|
|
3654 | \& |
|
|
3655 | \& pthread_mutex_init (&u\->lock, 0); |
|
|
3656 | \& pthread_cond_init (&u\->invoke_cv, 0); |
|
|
3657 | \& |
|
|
3658 | \& // now associate this with the loop |
|
|
3659 | \& ev_set_userdata (EV_A_ u); |
|
|
3660 | \& ev_set_invoke_pending_cb (EV_A_ l_invoke); |
|
|
3661 | \& ev_set_loop_release_cb (EV_A_ l_release, l_acquire); |
|
|
3662 | \& |
|
|
3663 | \& // then create the thread running ev_loop |
|
|
3664 | \& pthread_create (&u\->tid, 0, l_run, EV_A); |
|
|
3665 | \& } |
|
|
3666 | .Ve |
|
|
3667 | .PP |
|
|
3668 | The callback for the \f(CW\*(C`ev_async\*(C'\fR watcher does nothing: the watcher is used |
|
|
3669 | solely to wake up the event loop so it takes notice of any new watchers |
|
|
3670 | that might have been added: |
|
|
3671 | .PP |
|
|
3672 | .Vb 5 |
|
|
3673 | \& static void |
|
|
3674 | \& async_cb (EV_P_ ev_async *w, int revents) |
|
|
3675 | \& { |
|
|
3676 | \& // just used for the side effects |
|
|
3677 | \& } |
|
|
3678 | .Ve |
|
|
3679 | .PP |
|
|
3680 | The \f(CW\*(C`l_release\*(C'\fR and \f(CW\*(C`l_acquire\*(C'\fR callbacks simply unlock/lock the mutex |
|
|
3681 | protecting the loop data, respectively. |
|
|
3682 | .PP |
|
|
3683 | .Vb 6 |
|
|
3684 | \& static void |
|
|
3685 | \& l_release (EV_P) |
|
|
3686 | \& { |
|
|
3687 | \& userdata *u = ev_userdata (EV_A); |
|
|
3688 | \& pthread_mutex_unlock (&u\->lock); |
|
|
3689 | \& } |
|
|
3690 | \& |
|
|
3691 | \& static void |
|
|
3692 | \& l_acquire (EV_P) |
|
|
3693 | \& { |
|
|
3694 | \& userdata *u = ev_userdata (EV_A); |
|
|
3695 | \& pthread_mutex_lock (&u\->lock); |
|
|
3696 | \& } |
|
|
3697 | .Ve |
|
|
3698 | .PP |
|
|
3699 | The event loop thread first acquires the mutex, and then jumps straight |
|
|
3700 | into \f(CW\*(C`ev_run\*(C'\fR: |
|
|
3701 | .PP |
|
|
3702 | .Vb 4 |
|
|
3703 | \& void * |
|
|
3704 | \& l_run (void *thr_arg) |
|
|
3705 | \& { |
|
|
3706 | \& struct ev_loop *loop = (struct ev_loop *)thr_arg; |
|
|
3707 | \& |
|
|
3708 | \& l_acquire (EV_A); |
|
|
3709 | \& pthread_setcanceltype (PTHREAD_CANCEL_ASYNCHRONOUS, 0); |
|
|
3710 | \& ev_run (EV_A_ 0); |
|
|
3711 | \& l_release (EV_A); |
|
|
3712 | \& |
|
|
3713 | \& return 0; |
|
|
3714 | \& } |
|
|
3715 | .Ve |
|
|
3716 | .PP |
|
|
3717 | Instead of invoking all pending watchers, the \f(CW\*(C`l_invoke\*(C'\fR callback will |
|
|
3718 | signal the main thread via some unspecified mechanism (signals? pipe |
|
|
3719 | writes? \f(CW\*(C`Async::Interrupt\*(C'\fR?) and then waits until all pending watchers |
|
|
3720 | have been called (in a while loop because a) spurious wakeups are possible |
|
|
3721 | and b) skipping inter-thread-communication when there are no pending |
|
|
3722 | watchers is very beneficial): |
|
|
3723 | .PP |
|
|
3724 | .Vb 4 |
|
|
3725 | \& static void |
|
|
3726 | \& l_invoke (EV_P) |
|
|
3727 | \& { |
|
|
3728 | \& userdata *u = ev_userdata (EV_A); |
|
|
3729 | \& |
|
|
3730 | \& while (ev_pending_count (EV_A)) |
|
|
3731 | \& { |
|
|
3732 | \& wake_up_other_thread_in_some_magic_or_not_so_magic_way (); |
|
|
3733 | \& pthread_cond_wait (&u\->invoke_cv, &u\->lock); |
|
|
3734 | \& } |
|
|
3735 | \& } |
|
|
3736 | .Ve |
|
|
3737 | .PP |
|
|
3738 | Now, whenever the main thread gets told to invoke pending watchers, it |
|
|
3739 | will grab the lock, call \f(CW\*(C`ev_invoke_pending\*(C'\fR and then signal the loop |
|
|
3740 | thread to continue: |
|
|
3741 | .PP |
|
|
3742 | .Vb 4 |
|
|
3743 | \& static void |
|
|
3744 | \& real_invoke_pending (EV_P) |
|
|
3745 | \& { |
|
|
3746 | \& userdata *u = ev_userdata (EV_A); |
|
|
3747 | \& |
|
|
3748 | \& pthread_mutex_lock (&u\->lock); |
|
|
3749 | \& ev_invoke_pending (EV_A); |
|
|
3750 | \& pthread_cond_signal (&u\->invoke_cv); |
|
|
3751 | \& pthread_mutex_unlock (&u\->lock); |
|
|
3752 | \& } |
|
|
3753 | .Ve |
|
|
3754 | .PP |
|
|
3755 | Whenever you want to start/stop a watcher or do other modifications to an |
|
|
3756 | event loop, you will now have to lock: |
|
|
3757 | .PP |
|
|
3758 | .Vb 2 |
|
|
3759 | \& ev_timer timeout_watcher; |
|
|
3760 | \& userdata *u = ev_userdata (EV_A); |
|
|
3761 | \& |
|
|
3762 | \& ev_timer_init (&timeout_watcher, timeout_cb, 5.5, 0.); |
|
|
3763 | \& |
|
|
3764 | \& pthread_mutex_lock (&u\->lock); |
|
|
3765 | \& ev_timer_start (EV_A_ &timeout_watcher); |
|
|
3766 | \& ev_async_send (EV_A_ &u\->async_w); |
|
|
3767 | \& pthread_mutex_unlock (&u\->lock); |
|
|
3768 | .Ve |
|
|
3769 | .PP |
|
|
3770 | Note that sending the \f(CW\*(C`ev_async\*(C'\fR watcher is required because otherwise |
|
|
3771 | an event loop currently blocking in the kernel will have no knowledge |
|
|
3772 | about the newly added timer. By waking up the loop it will pick up any new |
|
|
3773 | watchers in the next event loop iteration. |
|
|
3774 | .SS "\s-1THREADS\s0, \s-1COROUTINES\s0, \s-1CONTINUATIONS\s0, \s-1QUEUES\s0... \s-1INSTEAD\s0 \s-1OF\s0 \s-1CALLBACKS\s0" |
|
|
3775 | .IX Subsection "THREADS, COROUTINES, CONTINUATIONS, QUEUES... INSTEAD OF CALLBACKS" |
|
|
3776 | While the overhead of a callback that e.g. schedules a thread is small, it |
|
|
3777 | is still an overhead. If you embed libev, and your main usage is with some |
|
|
3778 | kind of threads or coroutines, you might want to customise libev so that |
|
|
3779 | doesn't need callbacks anymore. |
|
|
3780 | .PP |
|
|
3781 | Imagine you have coroutines that you can switch to using a function |
|
|
3782 | \&\f(CW\*(C`switch_to (coro)\*(C'\fR, that libev runs in a coroutine called \f(CW\*(C`libev_coro\*(C'\fR |
|
|
3783 | and that due to some magic, the currently active coroutine is stored in a |
|
|
3784 | global called \f(CW\*(C`current_coro\*(C'\fR. Then you can build your own \*(L"wait for libev |
|
|
3785 | event\*(R" primitive by changing \f(CW\*(C`EV_CB_DECLARE\*(C'\fR and \f(CW\*(C`EV_CB_INVOKE\*(C'\fR (note |
|
|
3786 | the differing \f(CW\*(C`;\*(C'\fR conventions): |
|
|
3787 | .PP |
|
|
3788 | .Vb 2 |
|
|
3789 | \& #define EV_CB_DECLARE(type) struct my_coro *cb; |
|
|
3790 | \& #define EV_CB_INVOKE(watcher) switch_to ((watcher)\->cb) |
|
|
3791 | .Ve |
|
|
3792 | .PP |
|
|
3793 | That means instead of having a C callback function, you store the |
|
|
3794 | coroutine to switch to in each watcher, and instead of having libev call |
|
|
3795 | your callback, you instead have it switch to that coroutine. |
|
|
3796 | .PP |
|
|
3797 | A coroutine might now wait for an event with a function called |
|
|
3798 | \&\f(CW\*(C`wait_for_event\*(C'\fR. (the watcher needs to be started, as always, but it doesn't |
|
|
3799 | matter when, or whether the watcher is active or not when this function is |
|
|
3800 | called): |
|
|
3801 | .PP |
|
|
3802 | .Vb 6 |
|
|
3803 | \& void |
|
|
3804 | \& wait_for_event (ev_watcher *w) |
|
|
3805 | \& { |
|
|
3806 | \& ev_cb_set (w) = current_coro; |
|
|
3807 | \& switch_to (libev_coro); |
|
|
3808 | \& } |
|
|
3809 | .Ve |
|
|
3810 | .PP |
|
|
3811 | That basically suspends the coroutine inside \f(CW\*(C`wait_for_event\*(C'\fR and |
|
|
3812 | continues the libev coroutine, which, when appropriate, switches back to |
|
|
3813 | this or any other coroutine. I am sure if you sue this your own :) |
|
|
3814 | .PP |
|
|
3815 | You can do similar tricks if you have, say, threads with an event queue \- |
|
|
3816 | instead of storing a coroutine, you store the queue object and instead of |
|
|
3817 | switching to a coroutine, you push the watcher onto the queue and notify |
|
|
3818 | any waiters. |
|
|
3819 | .PP |
|
|
3820 | To embed libev, see \s-1EMBEDDING\s0, but in short, it's easiest to create two |
|
|
3821 | files, \fImy_ev.h\fR and \fImy_ev.c\fR that include the respective libev files: |
|
|
3822 | .PP |
|
|
3823 | .Vb 4 |
|
|
3824 | \& // my_ev.h |
|
|
3825 | \& #define EV_CB_DECLARE(type) struct my_coro *cb; |
|
|
3826 | \& #define EV_CB_INVOKE(watcher) switch_to ((watcher)\->cb); |
|
|
3827 | \& #include "../libev/ev.h" |
|
|
3828 | \& |
|
|
3829 | \& // my_ev.c |
|
|
3830 | \& #define EV_H "my_ev.h" |
|
|
3831 | \& #include "../libev/ev.c" |
|
|
3832 | .Ve |
|
|
3833 | .PP |
|
|
3834 | And then use \fImy_ev.h\fR when you would normally use \fIev.h\fR, and compile |
|
|
3835 | \&\fImy_ev.c\fR into your project. When properly specifying include paths, you |
|
|
3836 | can even use \fIev.h\fR as header file name directly. |
3480 | .SH "LIBEVENT EMULATION" |
3837 | .SH "LIBEVENT EMULATION" |
3481 | .IX Header "LIBEVENT EMULATION" |
3838 | .IX Header "LIBEVENT EMULATION" |
3482 | Libev offers a compatibility emulation layer for libevent. It cannot |
3839 | Libev offers a compatibility emulation layer for libevent. It cannot |
3483 | emulate the internals of libevent, so here are some usage hints: |
3840 | emulate the internals of libevent, so here are some usage hints: |
|
|
3841 | .IP "\(bu" 4 |
|
|
3842 | Only the libevent\-1.4.1\-beta \s-1API\s0 is being emulated. |
|
|
3843 | .Sp |
|
|
3844 | This was the newest libevent version available when libev was implemented, |
|
|
3845 | and is still mostly unchanged in 2010. |
3484 | .IP "\(bu" 4 |
3846 | .IP "\(bu" 4 |
3485 | Use it by including <event.h>, as usual. |
3847 | Use it by including <event.h>, as usual. |
3486 | .IP "\(bu" 4 |
3848 | .IP "\(bu" 4 |
3487 | The following members are fully supported: ev_base, ev_callback, |
3849 | The following members are fully supported: ev_base, ev_callback, |
3488 | ev_arg, ev_fd, ev_res, ev_events. |
3850 | ev_arg, ev_fd, ev_res, ev_events. |
… | |
… | |
3494 | Priorities are not currently supported. Initialising priorities |
3856 | Priorities are not currently supported. Initialising priorities |
3495 | will fail and all watchers will have the same priority, even though there |
3857 | will fail and all watchers will have the same priority, even though there |
3496 | is an ev_pri field. |
3858 | is an ev_pri field. |
3497 | .IP "\(bu" 4 |
3859 | .IP "\(bu" 4 |
3498 | In libevent, the last base created gets the signals, in libev, the |
3860 | In libevent, the last base created gets the signals, in libev, the |
3499 | first base created (== the default loop) gets the signals. |
3861 | base that registered the signal gets the signals. |
3500 | .IP "\(bu" 4 |
3862 | .IP "\(bu" 4 |
3501 | Other members are not supported. |
3863 | Other members are not supported. |
3502 | .IP "\(bu" 4 |
3864 | .IP "\(bu" 4 |
3503 | The libev emulation is \fInot\fR \s-1ABI\s0 compatible to libevent, you need |
3865 | The libev emulation is \fInot\fR \s-1ABI\s0 compatible to libevent, you need |
3504 | to use the libev header file and library. |
3866 | to use the libev header file and library. |
… | |
… | |
3522 | Care has been taken to keep the overhead low. The only data member the \*(C+ |
3884 | Care has been taken to keep the overhead low. The only data member the \*(C+ |
3523 | classes add (compared to plain C\-style watchers) is the event loop pointer |
3885 | classes add (compared to plain C\-style watchers) is the event loop pointer |
3524 | that the watcher is associated with (or no additional members at all if |
3886 | that the watcher is associated with (or no additional members at all if |
3525 | you disable \f(CW\*(C`EV_MULTIPLICITY\*(C'\fR when embedding libev). |
3887 | you disable \f(CW\*(C`EV_MULTIPLICITY\*(C'\fR when embedding libev). |
3526 | .PP |
3888 | .PP |
3527 | Currently, functions, and static and non-static member functions can be |
3889 | Currently, functions, static and non-static member functions and classes |
3528 | used as callbacks. Other types should be easy to add as long as they only |
3890 | with \f(CW\*(C`operator ()\*(C'\fR can be used as callbacks. Other types should be easy |
3529 | need one additional pointer for context. If you need support for other |
3891 | to add as long as they only need one additional pointer for context. If |
3530 | types of functors please contact the author (preferably after implementing |
3892 | you need support for other types of functors please contact the author |
3531 | it). |
3893 | (preferably after implementing it). |
3532 | .PP |
3894 | .PP |
3533 | Here is a list of things available in the \f(CW\*(C`ev\*(C'\fR namespace: |
3895 | Here is a list of things available in the \f(CW\*(C`ev\*(C'\fR namespace: |
3534 | .ie n .IP """ev::READ"", ""ev::WRITE"" etc." 4 |
3896 | .ie n .IP """ev::READ"", ""ev::WRITE"" etc." 4 |
3535 | .el .IP "\f(CWev::READ\fR, \f(CWev::WRITE\fR etc." 4 |
3897 | .el .IP "\f(CWev::READ\fR, \f(CWev::WRITE\fR etc." 4 |
3536 | .IX Item "ev::READ, ev::WRITE etc." |
3898 | .IX Item "ev::READ, ev::WRITE etc." |
… | |
… | |
4374 | .PP |
4736 | .PP |
4375 | .Vb 2 |
4737 | .Vb 2 |
4376 | \& #include "ev_cpp.h" |
4738 | \& #include "ev_cpp.h" |
4377 | \& #include "ev.c" |
4739 | \& #include "ev.c" |
4378 | .Ve |
4740 | .Ve |
4379 | .SH "INTERACTION WITH OTHER PROGRAMS OR LIBRARIES" |
4741 | .SH "INTERACTION WITH OTHER PROGRAMS, LIBRARIES OR THE ENVIRONMENT" |
4380 | .IX Header "INTERACTION WITH OTHER PROGRAMS OR LIBRARIES" |
4742 | .IX Header "INTERACTION WITH OTHER PROGRAMS, LIBRARIES OR THE ENVIRONMENT" |
4381 | .SS "\s-1THREADS\s0 \s-1AND\s0 \s-1COROUTINES\s0" |
4743 | .SS "\s-1THREADS\s0 \s-1AND\s0 \s-1COROUTINES\s0" |
4382 | .IX Subsection "THREADS AND COROUTINES" |
4744 | .IX Subsection "THREADS AND COROUTINES" |
4383 | \fI\s-1THREADS\s0\fR |
4745 | \fI\s-1THREADS\s0\fR |
4384 | .IX Subsection "THREADS" |
4746 | .IX Subsection "THREADS" |
4385 | .PP |
4747 | .PP |
… | |
… | |
4432 | An example use would be to communicate signals or other events that only |
4794 | An example use would be to communicate signals or other events that only |
4433 | work in the default loop by registering the signal watcher with the |
4795 | work in the default loop by registering the signal watcher with the |
4434 | default loop and triggering an \f(CW\*(C`ev_async\*(C'\fR watcher from the default loop |
4796 | default loop and triggering an \f(CW\*(C`ev_async\*(C'\fR watcher from the default loop |
4435 | watcher callback into the event loop interested in the signal. |
4797 | watcher callback into the event loop interested in the signal. |
4436 | .PP |
4798 | .PP |
4437 | \s-1THREAD\s0 \s-1LOCKING\s0 \s-1EXAMPLE\s0 |
4799 | See also \*(L"\s-1THREAD\s0 \s-1LOCKING\s0 \s-1EXAMPLE\s0\*(R". |
4438 | .IX Subsection "THREAD LOCKING EXAMPLE" |
|
|
4439 | .PP |
|
|
4440 | Here is a fictitious example of how to run an event loop in a different |
|
|
4441 | thread than where callbacks are being invoked and watchers are |
|
|
4442 | created/added/removed. |
|
|
4443 | .PP |
|
|
4444 | For a real-world example, see the \f(CW\*(C`EV::Loop::Async\*(C'\fR perl module, |
|
|
4445 | which uses exactly this technique (which is suited for many high-level |
|
|
4446 | languages). |
|
|
4447 | .PP |
|
|
4448 | The example uses a pthread mutex to protect the loop data, a condition |
|
|
4449 | variable to wait for callback invocations, an async watcher to notify the |
|
|
4450 | event loop thread and an unspecified mechanism to wake up the main thread. |
|
|
4451 | .PP |
|
|
4452 | First, you need to associate some data with the event loop: |
|
|
4453 | .PP |
|
|
4454 | .Vb 6 |
|
|
4455 | \& typedef struct { |
|
|
4456 | \& mutex_t lock; /* global loop lock */ |
|
|
4457 | \& ev_async async_w; |
|
|
4458 | \& thread_t tid; |
|
|
4459 | \& cond_t invoke_cv; |
|
|
4460 | \& } userdata; |
|
|
4461 | \& |
|
|
4462 | \& void prepare_loop (EV_P) |
|
|
4463 | \& { |
|
|
4464 | \& // for simplicity, we use a static userdata struct. |
|
|
4465 | \& static userdata u; |
|
|
4466 | \& |
|
|
4467 | \& ev_async_init (&u\->async_w, async_cb); |
|
|
4468 | \& ev_async_start (EV_A_ &u\->async_w); |
|
|
4469 | \& |
|
|
4470 | \& pthread_mutex_init (&u\->lock, 0); |
|
|
4471 | \& pthread_cond_init (&u\->invoke_cv, 0); |
|
|
4472 | \& |
|
|
4473 | \& // now associate this with the loop |
|
|
4474 | \& ev_set_userdata (EV_A_ u); |
|
|
4475 | \& ev_set_invoke_pending_cb (EV_A_ l_invoke); |
|
|
4476 | \& ev_set_loop_release_cb (EV_A_ l_release, l_acquire); |
|
|
4477 | \& |
|
|
4478 | \& // then create the thread running ev_loop |
|
|
4479 | \& pthread_create (&u\->tid, 0, l_run, EV_A); |
|
|
4480 | \& } |
|
|
4481 | .Ve |
|
|
4482 | .PP |
|
|
4483 | The callback for the \f(CW\*(C`ev_async\*(C'\fR watcher does nothing: the watcher is used |
|
|
4484 | solely to wake up the event loop so it takes notice of any new watchers |
|
|
4485 | that might have been added: |
|
|
4486 | .PP |
|
|
4487 | .Vb 5 |
|
|
4488 | \& static void |
|
|
4489 | \& async_cb (EV_P_ ev_async *w, int revents) |
|
|
4490 | \& { |
|
|
4491 | \& // just used for the side effects |
|
|
4492 | \& } |
|
|
4493 | .Ve |
|
|
4494 | .PP |
|
|
4495 | The \f(CW\*(C`l_release\*(C'\fR and \f(CW\*(C`l_acquire\*(C'\fR callbacks simply unlock/lock the mutex |
|
|
4496 | protecting the loop data, respectively. |
|
|
4497 | .PP |
|
|
4498 | .Vb 6 |
|
|
4499 | \& static void |
|
|
4500 | \& l_release (EV_P) |
|
|
4501 | \& { |
|
|
4502 | \& userdata *u = ev_userdata (EV_A); |
|
|
4503 | \& pthread_mutex_unlock (&u\->lock); |
|
|
4504 | \& } |
|
|
4505 | \& |
|
|
4506 | \& static void |
|
|
4507 | \& l_acquire (EV_P) |
|
|
4508 | \& { |
|
|
4509 | \& userdata *u = ev_userdata (EV_A); |
|
|
4510 | \& pthread_mutex_lock (&u\->lock); |
|
|
4511 | \& } |
|
|
4512 | .Ve |
|
|
4513 | .PP |
|
|
4514 | The event loop thread first acquires the mutex, and then jumps straight |
|
|
4515 | into \f(CW\*(C`ev_run\*(C'\fR: |
|
|
4516 | .PP |
|
|
4517 | .Vb 4 |
|
|
4518 | \& void * |
|
|
4519 | \& l_run (void *thr_arg) |
|
|
4520 | \& { |
|
|
4521 | \& struct ev_loop *loop = (struct ev_loop *)thr_arg; |
|
|
4522 | \& |
|
|
4523 | \& l_acquire (EV_A); |
|
|
4524 | \& pthread_setcanceltype (PTHREAD_CANCEL_ASYNCHRONOUS, 0); |
|
|
4525 | \& ev_run (EV_A_ 0); |
|
|
4526 | \& l_release (EV_A); |
|
|
4527 | \& |
|
|
4528 | \& return 0; |
|
|
4529 | \& } |
|
|
4530 | .Ve |
|
|
4531 | .PP |
|
|
4532 | Instead of invoking all pending watchers, the \f(CW\*(C`l_invoke\*(C'\fR callback will |
|
|
4533 | signal the main thread via some unspecified mechanism (signals? pipe |
|
|
4534 | writes? \f(CW\*(C`Async::Interrupt\*(C'\fR?) and then waits until all pending watchers |
|
|
4535 | have been called (in a while loop because a) spurious wakeups are possible |
|
|
4536 | and b) skipping inter-thread-communication when there are no pending |
|
|
4537 | watchers is very beneficial): |
|
|
4538 | .PP |
|
|
4539 | .Vb 4 |
|
|
4540 | \& static void |
|
|
4541 | \& l_invoke (EV_P) |
|
|
4542 | \& { |
|
|
4543 | \& userdata *u = ev_userdata (EV_A); |
|
|
4544 | \& |
|
|
4545 | \& while (ev_pending_count (EV_A)) |
|
|
4546 | \& { |
|
|
4547 | \& wake_up_other_thread_in_some_magic_or_not_so_magic_way (); |
|
|
4548 | \& pthread_cond_wait (&u\->invoke_cv, &u\->lock); |
|
|
4549 | \& } |
|
|
4550 | \& } |
|
|
4551 | .Ve |
|
|
4552 | .PP |
|
|
4553 | Now, whenever the main thread gets told to invoke pending watchers, it |
|
|
4554 | will grab the lock, call \f(CW\*(C`ev_invoke_pending\*(C'\fR and then signal the loop |
|
|
4555 | thread to continue: |
|
|
4556 | .PP |
|
|
4557 | .Vb 4 |
|
|
4558 | \& static void |
|
|
4559 | \& real_invoke_pending (EV_P) |
|
|
4560 | \& { |
|
|
4561 | \& userdata *u = ev_userdata (EV_A); |
|
|
4562 | \& |
|
|
4563 | \& pthread_mutex_lock (&u\->lock); |
|
|
4564 | \& ev_invoke_pending (EV_A); |
|
|
4565 | \& pthread_cond_signal (&u\->invoke_cv); |
|
|
4566 | \& pthread_mutex_unlock (&u\->lock); |
|
|
4567 | \& } |
|
|
4568 | .Ve |
|
|
4569 | .PP |
|
|
4570 | Whenever you want to start/stop a watcher or do other modifications to an |
|
|
4571 | event loop, you will now have to lock: |
|
|
4572 | .PP |
|
|
4573 | .Vb 2 |
|
|
4574 | \& ev_timer timeout_watcher; |
|
|
4575 | \& userdata *u = ev_userdata (EV_A); |
|
|
4576 | \& |
|
|
4577 | \& ev_timer_init (&timeout_watcher, timeout_cb, 5.5, 0.); |
|
|
4578 | \& |
|
|
4579 | \& pthread_mutex_lock (&u\->lock); |
|
|
4580 | \& ev_timer_start (EV_A_ &timeout_watcher); |
|
|
4581 | \& ev_async_send (EV_A_ &u\->async_w); |
|
|
4582 | \& pthread_mutex_unlock (&u\->lock); |
|
|
4583 | .Ve |
|
|
4584 | .PP |
|
|
4585 | Note that sending the \f(CW\*(C`ev_async\*(C'\fR watcher is required because otherwise |
|
|
4586 | an event loop currently blocking in the kernel will have no knowledge |
|
|
4587 | about the newly added timer. By waking up the loop it will pick up any new |
|
|
4588 | watchers in the next event loop iteration. |
|
|
4589 | .PP |
4800 | .PP |
4590 | \fI\s-1COROUTINES\s0\fR |
4801 | \fI\s-1COROUTINES\s0\fR |
4591 | .IX Subsection "COROUTINES" |
4802 | .IX Subsection "COROUTINES" |
4592 | .PP |
4803 | .PP |
4593 | Libev is very accommodating to coroutines (\*(L"cooperative threads\*(R"): |
4804 | Libev is very accommodating to coroutines (\*(L"cooperative threads\*(R"): |