… | |
… | |
33 | # define BOOT_PAGESIZE (void)0 |
33 | # define BOOT_PAGESIZE (void)0 |
34 | #endif |
34 | #endif |
35 | |
35 | |
36 | #if USE_VALGRIND |
36 | #if USE_VALGRIND |
37 | # include <valgrind/valgrind.h> |
37 | # include <valgrind/valgrind.h> |
|
|
38 | # define REGISTER_STACK(cctx,start,end) (cctx)->valgrind_id = VALGRIND_STACK_REGISTER ((start), (end)) |
|
|
39 | #else |
|
|
40 | # define REGISTER_STACK(cctx,start,end) |
38 | #endif |
41 | #endif |
39 | |
42 | |
40 | /* the maximum number of idle cctx that will be pooled */ |
43 | /* the maximum number of idle cctx that will be pooled */ |
41 | #define MAX_IDLE_CCTX 8 |
44 | #define MAX_IDLE_CCTX 8 |
42 | |
45 | |
… | |
… | |
112 | #else |
115 | #else |
113 | # define LOCK (void)0 |
116 | # define LOCK (void)0 |
114 | # define UNLOCK (void)0 |
117 | # define UNLOCK (void)0 |
115 | #endif |
118 | #endif |
116 | |
119 | |
|
|
120 | /* helper storage struct for Coro::AIO */ |
117 | struct io_state |
121 | struct io_state |
118 | { |
122 | { |
119 | int errorno; |
123 | int errorno; |
120 | I32 laststype; |
124 | I32 laststype; |
121 | int laststatval; |
125 | int laststatval; |
… | |
… | |
134 | typedef struct coro_cctx { |
138 | typedef struct coro_cctx { |
135 | struct coro_cctx *next; |
139 | struct coro_cctx *next; |
136 | |
140 | |
137 | /* the stack */ |
141 | /* the stack */ |
138 | void *sptr; |
142 | void *sptr; |
139 | long ssize; /* positive == mmap, otherwise malloc */ |
143 | ssize_t ssize; /* positive == mmap, otherwise malloc */ |
140 | |
144 | |
141 | /* cpu state */ |
145 | /* cpu state */ |
142 | void *idle_sp; /* sp of top-level transfer/schedule/cede call */ |
146 | void *idle_sp; /* sp of top-level transfer/schedule/cede call */ |
143 | JMPENV *idle_te; /* same as idle_sp, but for top_env, TODO: remove once stable */ |
147 | JMPENV *idle_te; /* same as idle_sp, but for top_env, TODO: remove once stable */ |
144 | JMPENV *top_env; |
148 | JMPENV *top_env; |
… | |
… | |
184 | int prio; |
188 | int prio; |
185 | }; |
189 | }; |
186 | |
190 | |
187 | typedef struct coro *Coro__State; |
191 | typedef struct coro *Coro__State; |
188 | typedef struct coro *Coro__State_or_hashref; |
192 | typedef struct coro *Coro__State_or_hashref; |
|
|
193 | |
|
|
194 | /** Coro ********************************************************************/ |
|
|
195 | |
|
|
196 | #define PRIO_MAX 3 |
|
|
197 | #define PRIO_HIGH 1 |
|
|
198 | #define PRIO_NORMAL 0 |
|
|
199 | #define PRIO_LOW -1 |
|
|
200 | #define PRIO_IDLE -3 |
|
|
201 | #define PRIO_MIN -4 |
|
|
202 | |
|
|
203 | /* for Coro.pm */ |
|
|
204 | static SV *coro_current; |
|
|
205 | static AV *coro_ready [PRIO_MAX-PRIO_MIN+1]; |
|
|
206 | static int coro_nready; |
|
|
207 | |
|
|
208 | /** lowlevel stuff **********************************************************/ |
189 | |
209 | |
190 | static AV * |
210 | static AV * |
191 | coro_clone_padlist (CV *cv) |
211 | coro_clone_padlist (CV *cv) |
192 | { |
212 | { |
193 | AV *padlist = CvPADLIST (cv); |
213 | AV *padlist = CvPADLIST (cv); |
… | |
… | |
302 | av_extend (av, AvMAX (av) + 1); |
322 | av_extend (av, AvMAX (av) + 1); |
303 | |
323 | |
304 | AvARRAY (av)[++AvFILLp (av)] = (SV *)CvPADLIST (cv); |
324 | AvARRAY (av)[++AvFILLp (av)] = (SV *)CvPADLIST (cv); |
305 | } |
325 | } |
306 | |
326 | |
|
|
327 | /** load & save, init *******************************************************/ |
|
|
328 | |
307 | #define SB do { |
329 | #define SB do { |
308 | #define SE } while (0) |
330 | #define SE } while (0) |
309 | |
331 | |
310 | #define REPLACE_SV(sv,val) SB SvREFCNT_dec (sv); (sv) = (val); (val) = 0; SE |
332 | #define REPLACE_SV(sv,val) SB SvREFCNT_dec (sv); (sv) = (val); (val) = 0; SE |
311 | |
333 | |
… | |
… | |
503 | #if !PERL_VERSION_ATLEAST (5,9,0) |
525 | #if !PERL_VERSION_ATLEAST (5,9,0) |
504 | Safefree (PL_retstack); |
526 | Safefree (PL_retstack); |
505 | #endif |
527 | #endif |
506 | } |
528 | } |
507 | |
529 | |
|
|
530 | /** coroutine stack handling ************************************************/ |
|
|
531 | |
508 | static void |
532 | static void |
509 | setup_coro (struct coro *coro) |
533 | setup_coro (struct coro *coro) |
510 | { |
534 | { |
511 | /* |
535 | /* |
512 | * emulate part of the perl startup here. |
536 | * emulate part of the perl startup here. |
… | |
… | |
609 | |
633 | |
610 | cctx->ssize = ((STACKSIZE * sizeof (long) + PAGESIZE - 1) / PAGESIZE + STACKGUARD) * PAGESIZE; |
634 | cctx->ssize = ((STACKSIZE * sizeof (long) + PAGESIZE - 1) / PAGESIZE + STACKGUARD) * PAGESIZE; |
611 | /* mmap supposedly does allocate-on-write for us */ |
635 | /* mmap supposedly does allocate-on-write for us */ |
612 | cctx->sptr = mmap (0, cctx->ssize, PROT_EXEC|PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, 0, 0); |
636 | cctx->sptr = mmap (0, cctx->ssize, PROT_EXEC|PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, 0, 0); |
613 | |
637 | |
614 | if (cctx->sptr == (void *)-1) |
638 | if (cctx->sptr != (void *)-1) |
615 | { |
|
|
616 | perror ("FATAL: unable to mmap stack for coroutine"); |
|
|
617 | _exit (EXIT_FAILURE); |
|
|
618 | } |
639 | { |
619 | |
|
|
620 | # if STACKGUARD |
640 | # if STACKGUARD |
621 | mprotect (cctx->sptr, STACKGUARD * PAGESIZE, PROT_NONE); |
641 | mprotect (cctx->sptr, STACKGUARD * PAGESIZE, PROT_NONE); |
622 | # endif |
642 | # endif |
623 | |
643 | REGISTER_STACK ( |
624 | #else |
644 | cctx, |
625 | |
|
|
626 | cctx->ssize = STACKSIZE * (long)sizeof (long); |
|
|
627 | New (0, cctx->sptr, STACKSIZE, long); |
|
|
628 | |
|
|
629 | if (!cctx->sptr) |
|
|
630 | { |
|
|
631 | perror ("FATAL: unable to malloc stack for coroutine"); |
|
|
632 | _exit (EXIT_FAILURE); |
|
|
633 | } |
|
|
634 | |
|
|
635 | #endif |
|
|
636 | |
|
|
637 | #if USE_VALGRIND |
|
|
638 | cctx->valgrind_id = VALGRIND_STACK_REGISTER ( |
|
|
639 | STACKGUARD * PAGESIZE + (char *)cctx->sptr, |
645 | STACKGUARD * PAGESIZE + (char *)cctx->sptr, |
640 | cctx->ssize + (char *)cctx->sptr |
646 | cctx->ssize + (char *)cctx->sptr |
641 | ); |
647 | ); |
642 | #endif |
|
|
643 | |
648 | |
644 | coro_create (&cctx->cctx, coro_run, (void *)cctx, cctx->sptr, cctx->ssize); |
649 | coro_create (&cctx->cctx, coro_run, (void *)cctx, cctx->sptr, cctx->ssize); |
|
|
650 | } |
|
|
651 | else |
|
|
652 | #endif |
|
|
653 | { |
|
|
654 | cctx->ssize = -STACKSIZE * (long)sizeof (long); |
|
|
655 | New (0, cctx->sptr, STACKSIZE, long); |
|
|
656 | |
|
|
657 | if (!cctx->sptr) |
|
|
658 | { |
|
|
659 | perror ("FATAL: unable to allocate stack for coroutine"); |
|
|
660 | _exit (EXIT_FAILURE); |
|
|
661 | } |
|
|
662 | |
|
|
663 | REGISTER_STACK ( |
|
|
664 | cctx, |
|
|
665 | (char *)cctx->sptr, |
|
|
666 | (char *)cctx->sptr - cctx->ssize |
|
|
667 | ); |
|
|
668 | |
|
|
669 | coro_create (&cctx->cctx, coro_run, (void *)cctx, cctx->sptr, -cctx->ssize); |
|
|
670 | } |
645 | |
671 | |
646 | return cctx; |
672 | return cctx; |
647 | } |
673 | } |
648 | |
674 | |
649 | static void |
675 | static void |
… | |
… | |
657 | #if USE_VALGRIND |
683 | #if USE_VALGRIND |
658 | VALGRIND_STACK_DEREGISTER (cctx->valgrind_id); |
684 | VALGRIND_STACK_DEREGISTER (cctx->valgrind_id); |
659 | #endif |
685 | #endif |
660 | |
686 | |
661 | #if HAVE_MMAP |
687 | #if HAVE_MMAP |
|
|
688 | if (cctx->ssize > 0) |
662 | munmap (cctx->sptr, cctx->ssize); |
689 | munmap (cctx->sptr, cctx->ssize); |
663 | #else |
690 | else |
|
|
691 | #endif |
664 | Safefree (cctx->sptr); |
692 | Safefree (cctx->sptr); |
665 | #endif |
|
|
666 | |
693 | |
667 | Safefree (cctx); |
694 | Safefree (cctx); |
668 | } |
695 | } |
669 | |
696 | |
670 | static coro_cctx * |
697 | static coro_cctx * |
… | |
… | |
703 | |
730 | |
704 | ++cctx_idle; |
731 | ++cctx_idle; |
705 | cctx->next = cctx_first; |
732 | cctx->next = cctx_first; |
706 | cctx_first = cctx; |
733 | cctx_first = cctx; |
707 | } |
734 | } |
|
|
735 | |
|
|
736 | /** coroutine switching *****************************************************/ |
708 | |
737 | |
709 | /* never call directly, always through the coro_state_transfer global variable */ |
738 | /* never call directly, always through the coro_state_transfer global variable */ |
710 | static void NOINLINE |
739 | static void NOINLINE |
711 | transfer (struct coro *prev, struct coro *next) |
740 | transfer (struct coro *prev, struct coro *next) |
712 | { |
741 | { |
… | |
… | |
802 | struct coro *prev, *next; |
831 | struct coro *prev, *next; |
803 | }; |
832 | }; |
804 | |
833 | |
805 | #define TRANSFER(ta) transfer ((ta).prev, (ta).next) |
834 | #define TRANSFER(ta) transfer ((ta).prev, (ta).next) |
806 | |
835 | |
|
|
836 | /** high level stuff ********************************************************/ |
|
|
837 | |
807 | static int |
838 | static int |
808 | coro_state_destroy (struct coro *coro) |
839 | coro_state_destroy (struct coro *coro) |
809 | { |
840 | { |
810 | if (coro->flags & CF_DESTROYED) |
841 | if (coro->flags & CF_DESTROYED) |
811 | return 0; |
842 | return 0; |
812 | |
843 | |
813 | coro->flags |= CF_DESTROYED; |
844 | coro->flags |= CF_DESTROYED; |
|
|
845 | |
|
|
846 | if (coro->flags & CF_READY) |
|
|
847 | { |
|
|
848 | /* reduce nready, as destroying a ready coro effectively unreadies it */ |
|
|
849 | /* alternative: look through all ready queues and remove the coro */ |
|
|
850 | LOCK; |
|
|
851 | --coro_nready; |
|
|
852 | UNLOCK; |
|
|
853 | } |
|
|
854 | else |
|
|
855 | coro->flags |= CF_READY; /* make sure it is NOT put into the readyqueue */ |
814 | |
856 | |
815 | if (coro->mainstack && coro->mainstack != main_mainstack) |
857 | if (coro->mainstack && coro->mainstack != main_mainstack) |
816 | { |
858 | { |
|
|
859 | struct coro temp; |
|
|
860 | |
817 | assert (!(coro->flags & CF_RUNNING)); |
861 | assert (!(coro->flags & CF_RUNNING)); |
818 | |
862 | |
819 | struct coro temp; |
|
|
820 | Zero (&temp, 1, struct coro); |
863 | Zero (&temp, 1, struct coro); |
821 | temp.save = CORO_SAVE_ALL; |
864 | temp.save = CORO_SAVE_ALL; |
822 | |
865 | |
823 | if (coro->flags & CF_RUNNING) |
866 | if (coro->flags & CF_RUNNING) |
824 | croak ("FATAL: tried to destroy currently running coroutine"); |
867 | croak ("FATAL: tried to destroy currently running coroutine"); |
… | |
… | |
925 | return old_save; |
968 | return old_save; |
926 | } |
969 | } |
927 | |
970 | |
928 | /** Coro ********************************************************************/ |
971 | /** Coro ********************************************************************/ |
929 | |
972 | |
930 | #define PRIO_MAX 3 |
|
|
931 | #define PRIO_HIGH 1 |
|
|
932 | #define PRIO_NORMAL 0 |
|
|
933 | #define PRIO_LOW -1 |
|
|
934 | #define PRIO_IDLE -3 |
|
|
935 | #define PRIO_MIN -4 |
|
|
936 | |
|
|
937 | /* for Coro.pm */ |
|
|
938 | static SV *coro_current; |
|
|
939 | static AV *coro_ready [PRIO_MAX-PRIO_MIN+1]; |
|
|
940 | static int coro_nready; |
|
|
941 | |
|
|
942 | static void |
973 | static void |
943 | coro_enq (SV *coro_sv) |
974 | coro_enq (SV *coro_sv) |
944 | { |
975 | { |
945 | av_push (coro_ready [SvSTATE (coro_sv)->prio - PRIO_MIN], coro_sv); |
976 | av_push (coro_ready [SvSTATE (coro_sv)->prio - PRIO_MIN], coro_sv); |
946 | coro_nready++; |
|
|
947 | } |
977 | } |
948 | |
978 | |
949 | static SV * |
979 | static SV * |
950 | coro_deq (int min_prio) |
980 | coro_deq (int min_prio) |
951 | { |
981 | { |
… | |
… | |
955 | if (min_prio < 0) |
985 | if (min_prio < 0) |
956 | min_prio = 0; |
986 | min_prio = 0; |
957 | |
987 | |
958 | for (prio = PRIO_MAX - PRIO_MIN + 1; --prio >= min_prio; ) |
988 | for (prio = PRIO_MAX - PRIO_MIN + 1; --prio >= min_prio; ) |
959 | if (AvFILLp (coro_ready [prio]) >= 0) |
989 | if (AvFILLp (coro_ready [prio]) >= 0) |
960 | { |
|
|
961 | coro_nready--; |
|
|
962 | return av_shift (coro_ready [prio]); |
990 | return av_shift (coro_ready [prio]); |
963 | } |
|
|
964 | |
991 | |
965 | return 0; |
992 | return 0; |
966 | } |
993 | } |
967 | |
994 | |
968 | static int |
995 | static int |
… | |
… | |
980 | |
1007 | |
981 | coro->flags |= CF_READY; |
1008 | coro->flags |= CF_READY; |
982 | |
1009 | |
983 | LOCK; |
1010 | LOCK; |
984 | coro_enq (SvREFCNT_inc (coro_sv)); |
1011 | coro_enq (SvREFCNT_inc (coro_sv)); |
|
|
1012 | ++coro_nready; |
985 | UNLOCK; |
1013 | UNLOCK; |
986 | |
1014 | |
987 | return 1; |
1015 | return 1; |
988 | } |
1016 | } |
989 | |
1017 | |
… | |
… | |
1000 | |
1028 | |
1001 | for (;;) |
1029 | for (;;) |
1002 | { |
1030 | { |
1003 | LOCK; |
1031 | LOCK; |
1004 | next_sv = coro_deq (PRIO_MIN); |
1032 | next_sv = coro_deq (PRIO_MIN); |
1005 | UNLOCK; |
|
|
1006 | |
1033 | |
1007 | /* nothing to schedule: call the idle handler */ |
1034 | /* nothing to schedule: call the idle handler */ |
1008 | if (!next_sv) |
1035 | if (!next_sv) |
1009 | { |
1036 | { |
1010 | dSP; |
1037 | dSP; |
|
|
1038 | UNLOCK; |
1011 | |
1039 | |
1012 | ENTER; |
1040 | ENTER; |
1013 | SAVETMPS; |
1041 | SAVETMPS; |
1014 | |
1042 | |
1015 | PUSHMARK (SP); |
1043 | PUSHMARK (SP); |
… | |
… | |
1024 | ta->next = SvSTATE (next_sv); |
1052 | ta->next = SvSTATE (next_sv); |
1025 | |
1053 | |
1026 | /* cannot transfer to destroyed coros, skip and look for next */ |
1054 | /* cannot transfer to destroyed coros, skip and look for next */ |
1027 | if (ta->next->flags & CF_DESTROYED) |
1055 | if (ta->next->flags & CF_DESTROYED) |
1028 | { |
1056 | { |
|
|
1057 | UNLOCK; |
1029 | SvREFCNT_dec (next_sv); |
1058 | SvREFCNT_dec (next_sv); |
|
|
1059 | /* coro_nready is already taken care of by destroy */ |
1030 | continue; |
1060 | continue; |
1031 | } |
1061 | } |
1032 | |
1062 | |
|
|
1063 | --coro_nready; |
|
|
1064 | UNLOCK; |
1033 | break; |
1065 | break; |
1034 | } |
1066 | } |
1035 | |
1067 | |
1036 | /* free this only after the transfer */ |
1068 | /* free this only after the transfer */ |
1037 | prev_sv = SvRV (coro_current); |
1069 | prev_sv = SvRV (coro_current); |
… | |
… | |
1204 | break; |
1236 | break; |
1205 | } |
1237 | } |
1206 | |
1238 | |
1207 | BARRIER; |
1239 | BARRIER; |
1208 | TRANSFER (ta); |
1240 | TRANSFER (ta); |
|
|
1241 | |
|
|
1242 | if (GIMME_V != G_VOID && ta.next != ta.prev) |
|
|
1243 | XSRETURN_YES; |
1209 | } |
1244 | } |
1210 | |
1245 | |
1211 | bool |
1246 | bool |
1212 | _destroy (SV *coro_sv) |
1247 | _destroy (SV *coro_sv) |
1213 | CODE: |
1248 | CODE: |