… | |
… | |
33 | # define BOOT_PAGESIZE (void)0 |
33 | # define BOOT_PAGESIZE (void)0 |
34 | #endif |
34 | #endif |
35 | |
35 | |
36 | #if USE_VALGRIND |
36 | #if USE_VALGRIND |
37 | # include <valgrind/valgrind.h> |
37 | # include <valgrind/valgrind.h> |
|
|
38 | # define REGISTER_STACK(cctx,start,end) (cctx)->valgrind_id = VALGRIND_STACK_REGISTER ((start), (end)) |
|
|
39 | #else |
|
|
40 | # define REGISTER_STACK(cctx,start,end) |
38 | #endif |
41 | #endif |
39 | |
42 | |
40 | /* the maximum number of idle cctx that will be pooled */ |
43 | /* the maximum number of idle cctx that will be pooled */ |
41 | #define MAX_IDLE_CCTX 8 |
44 | #define MAX_IDLE_CCTX 8 |
42 | |
45 | |
… | |
… | |
135 | typedef struct coro_cctx { |
138 | typedef struct coro_cctx { |
136 | struct coro_cctx *next; |
139 | struct coro_cctx *next; |
137 | |
140 | |
138 | /* the stack */ |
141 | /* the stack */ |
139 | void *sptr; |
142 | void *sptr; |
140 | long ssize; /* positive == mmap, otherwise malloc */ |
143 | ssize_t ssize; /* positive == mmap, otherwise malloc */ |
141 | |
144 | |
142 | /* cpu state */ |
145 | /* cpu state */ |
143 | void *idle_sp; /* sp of top-level transfer/schedule/cede call */ |
146 | void *idle_sp; /* sp of top-level transfer/schedule/cede call */ |
144 | JMPENV *idle_te; /* same as idle_sp, but for top_env, TODO: remove once stable */ |
147 | JMPENV *idle_te; /* same as idle_sp, but for top_env, TODO: remove once stable */ |
145 | JMPENV *top_env; |
148 | JMPENV *top_env; |
… | |
… | |
185 | int prio; |
188 | int prio; |
186 | }; |
189 | }; |
187 | |
190 | |
188 | typedef struct coro *Coro__State; |
191 | typedef struct coro *Coro__State; |
189 | typedef struct coro *Coro__State_or_hashref; |
192 | typedef struct coro *Coro__State_or_hashref; |
|
|
193 | |
|
|
194 | /** Coro ********************************************************************/ |
|
|
195 | |
|
|
196 | #define PRIO_MAX 3 |
|
|
197 | #define PRIO_HIGH 1 |
|
|
198 | #define PRIO_NORMAL 0 |
|
|
199 | #define PRIO_LOW -1 |
|
|
200 | #define PRIO_IDLE -3 |
|
|
201 | #define PRIO_MIN -4 |
|
|
202 | |
|
|
203 | /* for Coro.pm */ |
|
|
204 | static SV *coro_current; |
|
|
205 | static AV *coro_ready [PRIO_MAX-PRIO_MIN+1]; |
|
|
206 | static int coro_nready; |
|
|
207 | |
|
|
208 | /** lowlevel stuff **********************************************************/ |
190 | |
209 | |
191 | static AV * |
210 | static AV * |
192 | coro_clone_padlist (CV *cv) |
211 | coro_clone_padlist (CV *cv) |
193 | { |
212 | { |
194 | AV *padlist = CvPADLIST (cv); |
213 | AV *padlist = CvPADLIST (cv); |
… | |
… | |
303 | av_extend (av, AvMAX (av) + 1); |
322 | av_extend (av, AvMAX (av) + 1); |
304 | |
323 | |
305 | AvARRAY (av)[++AvFILLp (av)] = (SV *)CvPADLIST (cv); |
324 | AvARRAY (av)[++AvFILLp (av)] = (SV *)CvPADLIST (cv); |
306 | } |
325 | } |
307 | |
326 | |
|
|
327 | /** load & save, init *******************************************************/ |
|
|
328 | |
308 | #define SB do { |
329 | #define SB do { |
309 | #define SE } while (0) |
330 | #define SE } while (0) |
310 | |
331 | |
311 | #define REPLACE_SV(sv,val) SB SvREFCNT_dec (sv); (sv) = (val); (val) = 0; SE |
332 | #define REPLACE_SV(sv,val) SB SvREFCNT_dec (sv); (sv) = (val); (val) = 0; SE |
312 | |
333 | |
… | |
… | |
504 | #if !PERL_VERSION_ATLEAST (5,9,0) |
525 | #if !PERL_VERSION_ATLEAST (5,9,0) |
505 | Safefree (PL_retstack); |
526 | Safefree (PL_retstack); |
506 | #endif |
527 | #endif |
507 | } |
528 | } |
508 | |
529 | |
|
|
530 | /** coroutine stack handling ************************************************/ |
|
|
531 | |
509 | static void |
532 | static void |
510 | setup_coro (struct coro *coro) |
533 | setup_coro (struct coro *coro) |
511 | { |
534 | { |
512 | /* |
535 | /* |
513 | * emulate part of the perl startup here. |
536 | * emulate part of the perl startup here. |
… | |
… | |
610 | |
633 | |
611 | cctx->ssize = ((STACKSIZE * sizeof (long) + PAGESIZE - 1) / PAGESIZE + STACKGUARD) * PAGESIZE; |
634 | cctx->ssize = ((STACKSIZE * sizeof (long) + PAGESIZE - 1) / PAGESIZE + STACKGUARD) * PAGESIZE; |
612 | /* mmap supposedly does allocate-on-write for us */ |
635 | /* mmap supposedly does allocate-on-write for us */ |
613 | cctx->sptr = mmap (0, cctx->ssize, PROT_EXEC|PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, 0, 0); |
636 | cctx->sptr = mmap (0, cctx->ssize, PROT_EXEC|PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, 0, 0); |
614 | |
637 | |
615 | if (cctx->sptr == (void *)-1) |
638 | if (cctx->sptr != (void *)-1) |
616 | { |
|
|
617 | perror ("FATAL: unable to mmap stack for coroutine"); |
|
|
618 | _exit (EXIT_FAILURE); |
|
|
619 | } |
639 | { |
620 | |
|
|
621 | # if STACKGUARD |
640 | # if STACKGUARD |
622 | mprotect (cctx->sptr, STACKGUARD * PAGESIZE, PROT_NONE); |
641 | mprotect (cctx->sptr, STACKGUARD * PAGESIZE, PROT_NONE); |
623 | # endif |
642 | # endif |
624 | |
643 | REGISTER_STACK ( |
625 | #else |
644 | cctx, |
626 | |
|
|
627 | cctx->ssize = STACKSIZE * (long)sizeof (long); |
|
|
628 | New (0, cctx->sptr, STACKSIZE, long); |
|
|
629 | |
|
|
630 | if (!cctx->sptr) |
|
|
631 | { |
|
|
632 | perror ("FATAL: unable to malloc stack for coroutine"); |
|
|
633 | _exit (EXIT_FAILURE); |
|
|
634 | } |
|
|
635 | |
|
|
636 | #endif |
|
|
637 | |
|
|
638 | #if USE_VALGRIND |
|
|
639 | cctx->valgrind_id = VALGRIND_STACK_REGISTER ( |
|
|
640 | STACKGUARD * PAGESIZE + (char *)cctx->sptr, |
645 | STACKGUARD * PAGESIZE + (char *)cctx->sptr, |
641 | cctx->ssize + (char *)cctx->sptr |
646 | cctx->ssize + (char *)cctx->sptr |
642 | ); |
647 | ); |
643 | #endif |
|
|
644 | |
648 | |
645 | coro_create (&cctx->cctx, coro_run, (void *)cctx, cctx->sptr, cctx->ssize); |
649 | coro_create (&cctx->cctx, coro_run, (void *)cctx, cctx->sptr, cctx->ssize); |
|
|
650 | } |
|
|
651 | else |
|
|
652 | #endif |
|
|
653 | { |
|
|
654 | cctx->ssize = -STACKSIZE * (long)sizeof (long); |
|
|
655 | New (0, cctx->sptr, STACKSIZE, long); |
|
|
656 | |
|
|
657 | if (!cctx->sptr) |
|
|
658 | { |
|
|
659 | perror ("FATAL: unable to allocate stack for coroutine"); |
|
|
660 | _exit (EXIT_FAILURE); |
|
|
661 | } |
|
|
662 | |
|
|
663 | REGISTER_STACK ( |
|
|
664 | cctx, |
|
|
665 | (char *)cctx->sptr, |
|
|
666 | (char *)cctx->sptr - cctx->ssize |
|
|
667 | ); |
|
|
668 | |
|
|
669 | coro_create (&cctx->cctx, coro_run, (void *)cctx, cctx->sptr, -cctx->ssize); |
|
|
670 | } |
646 | |
671 | |
647 | return cctx; |
672 | return cctx; |
648 | } |
673 | } |
649 | |
674 | |
650 | static void |
675 | static void |
… | |
… | |
658 | #if USE_VALGRIND |
683 | #if USE_VALGRIND |
659 | VALGRIND_STACK_DEREGISTER (cctx->valgrind_id); |
684 | VALGRIND_STACK_DEREGISTER (cctx->valgrind_id); |
660 | #endif |
685 | #endif |
661 | |
686 | |
662 | #if HAVE_MMAP |
687 | #if HAVE_MMAP |
|
|
688 | if (cctx->ssize > 0) |
663 | munmap (cctx->sptr, cctx->ssize); |
689 | munmap (cctx->sptr, cctx->ssize); |
664 | #else |
690 | else |
|
|
691 | #endif |
665 | Safefree (cctx->sptr); |
692 | Safefree (cctx->sptr); |
666 | #endif |
|
|
667 | |
693 | |
668 | Safefree (cctx); |
694 | Safefree (cctx); |
669 | } |
695 | } |
670 | |
696 | |
671 | static coro_cctx * |
697 | static coro_cctx * |
… | |
… | |
704 | |
730 | |
705 | ++cctx_idle; |
731 | ++cctx_idle; |
706 | cctx->next = cctx_first; |
732 | cctx->next = cctx_first; |
707 | cctx_first = cctx; |
733 | cctx_first = cctx; |
708 | } |
734 | } |
|
|
735 | |
|
|
736 | /** coroutine switching *****************************************************/ |
709 | |
737 | |
710 | /* never call directly, always through the coro_state_transfer global variable */ |
738 | /* never call directly, always through the coro_state_transfer global variable */ |
711 | static void NOINLINE |
739 | static void NOINLINE |
712 | transfer (struct coro *prev, struct coro *next) |
740 | transfer (struct coro *prev, struct coro *next) |
713 | { |
741 | { |
… | |
… | |
803 | struct coro *prev, *next; |
831 | struct coro *prev, *next; |
804 | }; |
832 | }; |
805 | |
833 | |
806 | #define TRANSFER(ta) transfer ((ta).prev, (ta).next) |
834 | #define TRANSFER(ta) transfer ((ta).prev, (ta).next) |
807 | |
835 | |
|
|
836 | /** high level stuff ********************************************************/ |
|
|
837 | |
808 | static int |
838 | static int |
809 | coro_state_destroy (struct coro *coro) |
839 | coro_state_destroy (struct coro *coro) |
810 | { |
840 | { |
811 | if (coro->flags & CF_DESTROYED) |
841 | if (coro->flags & CF_DESTROYED) |
812 | return 0; |
842 | return 0; |
813 | |
843 | |
814 | coro->flags |= CF_DESTROYED; |
844 | coro->flags |= CF_DESTROYED; |
|
|
845 | |
|
|
846 | if (coro->flags & CF_READY) |
|
|
847 | { |
|
|
848 | /* reduce nready, as destroying a ready coro effectively unreadies it */ |
|
|
849 | /* alternative: look through all ready queues and remove the coro */ |
|
|
850 | LOCK; |
|
|
851 | --coro_nready; |
|
|
852 | UNLOCK; |
|
|
853 | } |
|
|
854 | else |
|
|
855 | coro->flags |= CF_READY; /* make sure it is NOT put into the readyqueue */ |
815 | |
856 | |
816 | if (coro->mainstack && coro->mainstack != main_mainstack) |
857 | if (coro->mainstack && coro->mainstack != main_mainstack) |
817 | { |
858 | { |
|
|
859 | struct coro temp; |
|
|
860 | |
818 | assert (!(coro->flags & CF_RUNNING)); |
861 | assert (!(coro->flags & CF_RUNNING)); |
819 | |
862 | |
820 | struct coro temp; |
|
|
821 | Zero (&temp, 1, struct coro); |
863 | Zero (&temp, 1, struct coro); |
822 | temp.save = CORO_SAVE_ALL; |
864 | temp.save = CORO_SAVE_ALL; |
823 | |
865 | |
824 | if (coro->flags & CF_RUNNING) |
866 | if (coro->flags & CF_RUNNING) |
825 | croak ("FATAL: tried to destroy currently running coroutine"); |
867 | croak ("FATAL: tried to destroy currently running coroutine"); |
… | |
… | |
926 | return old_save; |
968 | return old_save; |
927 | } |
969 | } |
928 | |
970 | |
929 | /** Coro ********************************************************************/ |
971 | /** Coro ********************************************************************/ |
930 | |
972 | |
931 | #define PRIO_MAX 3 |
|
|
932 | #define PRIO_HIGH 1 |
|
|
933 | #define PRIO_NORMAL 0 |
|
|
934 | #define PRIO_LOW -1 |
|
|
935 | #define PRIO_IDLE -3 |
|
|
936 | #define PRIO_MIN -4 |
|
|
937 | |
|
|
938 | /* for Coro.pm */ |
|
|
939 | static SV *coro_current; |
|
|
940 | static AV *coro_ready [PRIO_MAX-PRIO_MIN+1]; |
|
|
941 | static int coro_nready; |
|
|
942 | |
|
|
943 | static void |
973 | static void |
944 | coro_enq (SV *coro_sv) |
974 | coro_enq (SV *coro_sv) |
945 | { |
975 | { |
946 | av_push (coro_ready [SvSTATE (coro_sv)->prio - PRIO_MIN], coro_sv); |
976 | av_push (coro_ready [SvSTATE (coro_sv)->prio - PRIO_MIN], coro_sv); |
947 | coro_nready++; |
|
|
948 | } |
977 | } |
949 | |
978 | |
950 | static SV * |
979 | static SV * |
951 | coro_deq (int min_prio) |
980 | coro_deq (int min_prio) |
952 | { |
981 | { |
… | |
… | |
956 | if (min_prio < 0) |
985 | if (min_prio < 0) |
957 | min_prio = 0; |
986 | min_prio = 0; |
958 | |
987 | |
959 | for (prio = PRIO_MAX - PRIO_MIN + 1; --prio >= min_prio; ) |
988 | for (prio = PRIO_MAX - PRIO_MIN + 1; --prio >= min_prio; ) |
960 | if (AvFILLp (coro_ready [prio]) >= 0) |
989 | if (AvFILLp (coro_ready [prio]) >= 0) |
961 | { |
|
|
962 | coro_nready--; |
|
|
963 | return av_shift (coro_ready [prio]); |
990 | return av_shift (coro_ready [prio]); |
964 | } |
|
|
965 | |
991 | |
966 | return 0; |
992 | return 0; |
967 | } |
993 | } |
968 | |
994 | |
969 | static int |
995 | static int |
… | |
… | |
981 | |
1007 | |
982 | coro->flags |= CF_READY; |
1008 | coro->flags |= CF_READY; |
983 | |
1009 | |
984 | LOCK; |
1010 | LOCK; |
985 | coro_enq (SvREFCNT_inc (coro_sv)); |
1011 | coro_enq (SvREFCNT_inc (coro_sv)); |
|
|
1012 | ++coro_nready; |
986 | UNLOCK; |
1013 | UNLOCK; |
987 | |
1014 | |
988 | return 1; |
1015 | return 1; |
989 | } |
1016 | } |
990 | |
1017 | |
… | |
… | |
1001 | |
1028 | |
1002 | for (;;) |
1029 | for (;;) |
1003 | { |
1030 | { |
1004 | LOCK; |
1031 | LOCK; |
1005 | next_sv = coro_deq (PRIO_MIN); |
1032 | next_sv = coro_deq (PRIO_MIN); |
1006 | UNLOCK; |
|
|
1007 | |
1033 | |
1008 | /* nothing to schedule: call the idle handler */ |
1034 | /* nothing to schedule: call the idle handler */ |
1009 | if (!next_sv) |
1035 | if (!next_sv) |
1010 | { |
1036 | { |
1011 | dSP; |
1037 | dSP; |
|
|
1038 | UNLOCK; |
1012 | |
1039 | |
1013 | ENTER; |
1040 | ENTER; |
1014 | SAVETMPS; |
1041 | SAVETMPS; |
1015 | |
1042 | |
1016 | PUSHMARK (SP); |
1043 | PUSHMARK (SP); |
… | |
… | |
1025 | ta->next = SvSTATE (next_sv); |
1052 | ta->next = SvSTATE (next_sv); |
1026 | |
1053 | |
1027 | /* cannot transfer to destroyed coros, skip and look for next */ |
1054 | /* cannot transfer to destroyed coros, skip and look for next */ |
1028 | if (ta->next->flags & CF_DESTROYED) |
1055 | if (ta->next->flags & CF_DESTROYED) |
1029 | { |
1056 | { |
|
|
1057 | UNLOCK; |
1030 | SvREFCNT_dec (next_sv); |
1058 | SvREFCNT_dec (next_sv); |
|
|
1059 | /* coro_nready is already taken care of by destroy */ |
1031 | continue; |
1060 | continue; |
1032 | } |
1061 | } |
1033 | |
1062 | |
|
|
1063 | --coro_nready; |
|
|
1064 | UNLOCK; |
1034 | break; |
1065 | break; |
1035 | } |
1066 | } |
1036 | |
1067 | |
1037 | /* free this only after the transfer */ |
1068 | /* free this only after the transfer */ |
1038 | prev_sv = SvRV (coro_current); |
1069 | prev_sv = SvRV (coro_current); |