… | |
… | |
126 | static void (*u2time)(pTHX_ UV ret[2]); |
126 | static void (*u2time)(pTHX_ UV ret[2]); |
127 | |
127 | |
128 | /* we hijack an hopefully unused CV flag for our purposes */ |
128 | /* we hijack an hopefully unused CV flag for our purposes */ |
129 | #define CVf_SLF 0x4000 |
129 | #define CVf_SLF 0x4000 |
130 | static OP *pp_slf (pTHX); |
130 | static OP *pp_slf (pTHX); |
|
|
131 | static void slf_destroy (pTHX_ struct coro *coro); |
131 | |
132 | |
132 | static U32 cctx_gen; |
133 | static U32 cctx_gen; |
133 | static size_t cctx_stacksize = CORO_STACKSIZE; |
134 | static size_t cctx_stacksize = CORO_STACKSIZE; |
134 | static struct CoroAPI coroapi; |
135 | static struct CoroAPI coroapi; |
135 | static AV *main_mainstack; /* used to differentiate between $main and others */ |
136 | static AV *main_mainstack; /* used to differentiate between $main and others */ |
… | |
… | |
165 | static char times_valid; |
166 | static char times_valid; |
166 | |
167 | |
167 | static struct coro_cctx *cctx_first; |
168 | static struct coro_cctx *cctx_first; |
168 | static int cctx_count, cctx_idle; |
169 | static int cctx_count, cctx_idle; |
169 | |
170 | |
170 | enum { |
171 | enum |
|
|
172 | { |
171 | CC_MAPPED = 0x01, |
173 | CC_MAPPED = 0x01, |
172 | CC_NOREUSE = 0x02, /* throw this away after tracing */ |
174 | CC_NOREUSE = 0x02, /* throw this away after tracing */ |
173 | CC_TRACE = 0x04, |
175 | CC_TRACE = 0x04, |
174 | CC_TRACE_SUB = 0x08, /* trace sub calls */ |
176 | CC_TRACE_SUB = 0x08, /* trace sub calls */ |
175 | CC_TRACE_LINE = 0x10, /* trace each statement */ |
177 | CC_TRACE_LINE = 0x10, /* trace each statement */ |
… | |
… | |
200 | |
202 | |
201 | static coro_cctx *cctx_current; /* the currently running cctx */ |
203 | static coro_cctx *cctx_current; /* the currently running cctx */ |
202 | |
204 | |
203 | /*****************************************************************************/ |
205 | /*****************************************************************************/ |
204 | |
206 | |
|
|
207 | static MGVTBL coro_state_vtbl; |
|
|
208 | |
205 | enum { |
209 | enum |
|
|
210 | { |
206 | CF_RUNNING = 0x0001, /* coroutine is running */ |
211 | CF_RUNNING = 0x0001, /* coroutine is running */ |
207 | CF_READY = 0x0002, /* coroutine is ready */ |
212 | CF_READY = 0x0002, /* coroutine is ready */ |
208 | CF_NEW = 0x0004, /* has never been switched to */ |
213 | CF_NEW = 0x0004, /* has never been switched to */ |
209 | CF_DESTROYED = 0x0008, /* coroutine data has been freed */ |
214 | CF_DESTROYED = 0x0008, /* coroutine data has been freed */ |
210 | CF_SUSPENDED = 0x0010, /* coroutine can't be scheduled */ |
215 | CF_SUSPENDED = 0x0010, /* coroutine can't be scheduled */ |
… | |
… | |
222 | #define VAR(name,type) type name; |
227 | #define VAR(name,type) type name; |
223 | # include "state.h" |
228 | # include "state.h" |
224 | #undef VAR |
229 | #undef VAR |
225 | } perl_slots; |
230 | } perl_slots; |
226 | |
231 | |
|
|
232 | // how many context stack entries do we need for perl_slots |
227 | #define SLOT_COUNT ((sizeof (perl_slots) + sizeof (PERL_CONTEXT) - 1) / sizeof (PERL_CONTEXT)) |
233 | #define SLOT_COUNT ((sizeof (perl_slots) + sizeof (PERL_CONTEXT) - 1) / sizeof (PERL_CONTEXT)) |
228 | |
234 | |
229 | /* this is a structure representing a perl-level coroutine */ |
235 | /* this is a structure representing a perl-level coroutine */ |
230 | struct coro { |
236 | struct coro |
|
|
237 | { |
231 | /* the C coroutine allocated to this perl coroutine, if any */ |
238 | /* the C coroutine allocated to this perl coroutine, if any */ |
232 | coro_cctx *cctx; |
239 | coro_cctx *cctx; |
233 | |
240 | |
234 | /* ready queue */ |
241 | /* ready queue */ |
235 | struct coro *next_ready; |
242 | struct coro *next_ready; |
… | |
… | |
237 | /* state data */ |
244 | /* state data */ |
238 | struct CoroSLF slf_frame; /* saved slf frame */ |
245 | struct CoroSLF slf_frame; /* saved slf frame */ |
239 | AV *mainstack; |
246 | AV *mainstack; |
240 | perl_slots *slot; /* basically the saved sp */ |
247 | perl_slots *slot; /* basically the saved sp */ |
241 | |
248 | |
242 | CV *startcv; /* the CV to execute */ |
249 | CV *startcv; /* the CV to execute */ |
243 | AV *args; /* data associated with this coroutine (initial args) */ |
250 | AV *args; /* data associated with this coroutine (initial args) */ |
244 | int refcnt; /* coroutines are refcounted, yes */ |
|
|
245 | int flags; /* CF_ flags */ |
251 | int flags; /* CF_ flags */ |
246 | HV *hv; /* the perl hash associated with this coro, if any */ |
252 | HV *hv; /* the perl hash associated with this coro, if any */ |
247 | void (*on_destroy)(pTHX_ struct coro *coro); /* for temporary use by xs in critical sections */ |
|
|
248 | |
253 | |
249 | /* statistics */ |
254 | /* statistics */ |
250 | int usecount; /* number of transfers to this coro */ |
255 | int usecount; /* number of transfers to this coro */ |
251 | |
256 | |
252 | /* coro process data */ |
257 | /* coro process data */ |
253 | int prio; |
258 | int prio; |
254 | SV *except; /* exception to be thrown */ |
259 | SV *except; /* exception to be thrown */ |
255 | SV *rouse_cb; |
260 | SV *rouse_cb; /* last rouse callback */ |
|
|
261 | AV *on_destroy; /* callbacks or coros to notify on destroy */ |
|
|
262 | AV *status; /* the exit status list */ |
256 | |
263 | |
257 | /* async_pool */ |
264 | /* async_pool */ |
258 | SV *saved_deffh; |
265 | SV *saved_deffh; |
259 | SV *invoke_cb; |
266 | SV *invoke_cb; |
260 | AV *invoke_av; |
267 | AV *invoke_av; |
… | |
… | |
292 | |
299 | |
293 | /* for Coro.pm */ |
300 | /* for Coro.pm */ |
294 | static SV *coro_current; |
301 | static SV *coro_current; |
295 | static SV *coro_readyhook; |
302 | static SV *coro_readyhook; |
296 | static struct coro *coro_ready [CORO_PRIO_MAX - CORO_PRIO_MIN + 1][2]; /* head|tail */ |
303 | static struct coro *coro_ready [CORO_PRIO_MAX - CORO_PRIO_MIN + 1][2]; /* head|tail */ |
297 | static CV *cv_coro_run, *cv_coro_terminate; |
304 | static CV *cv_coro_run; |
298 | static struct coro *coro_first; |
305 | static struct coro *coro_first; |
299 | #define coro_nready coroapi.nready |
306 | #define coro_nready coroapi.nready |
300 | |
307 | |
301 | /** Coro::Select ************************************************************/ |
308 | /** Coro::Select ************************************************************/ |
302 | |
309 | |
… | |
… | |
466 | : 0) |
473 | : 0) |
467 | |
474 | |
468 | #define CORO_MAGIC_cv(cv) CORO_MAGIC (((SV *)(cv)), CORO_MAGIC_type_cv) |
475 | #define CORO_MAGIC_cv(cv) CORO_MAGIC (((SV *)(cv)), CORO_MAGIC_type_cv) |
469 | #define CORO_MAGIC_state(sv) CORO_MAGIC_NN (((SV *)(sv)), CORO_MAGIC_type_state) |
476 | #define CORO_MAGIC_state(sv) CORO_MAGIC_NN (((SV *)(sv)), CORO_MAGIC_type_state) |
470 | |
477 | |
|
|
478 | INLINE MAGIC * |
|
|
479 | SvSTATEhv_p (pTHX_ SV *coro) |
|
|
480 | { |
|
|
481 | MAGIC *mg; |
|
|
482 | |
|
|
483 | if (expect_true ( |
|
|
484 | SvTYPE (coro) == SVt_PVHV |
|
|
485 | && (mg = CORO_MAGIC_state (coro)) |
|
|
486 | && mg->mg_virtual == &coro_state_vtbl |
|
|
487 | )) |
|
|
488 | return mg; |
|
|
489 | |
|
|
490 | return 0; |
|
|
491 | } |
|
|
492 | |
471 | INLINE struct coro * |
493 | INLINE struct coro * |
472 | SvSTATE_ (pTHX_ SV *coro) |
494 | SvSTATE_ (pTHX_ SV *coro) |
473 | { |
495 | { |
474 | HV *stash; |
|
|
475 | MAGIC *mg; |
496 | MAGIC *mg; |
476 | |
497 | |
477 | if (SvROK (coro)) |
498 | if (SvROK (coro)) |
478 | coro = SvRV (coro); |
499 | coro = SvRV (coro); |
479 | |
500 | |
480 | if (expect_false (SvTYPE (coro) != SVt_PVHV)) |
501 | mg = SvSTATEhv_p (coro); |
|
|
502 | if (!mg) |
481 | croak ("Coro::State object required"); |
503 | croak ("Coro::State object required"); |
482 | |
504 | |
483 | stash = SvSTASH (coro); |
|
|
484 | if (expect_false (stash != coro_stash && stash != coro_state_stash)) |
|
|
485 | { |
|
|
486 | /* very slow, but rare, check */ |
|
|
487 | if (!sv_derived_from (sv_2mortal (newRV_inc (coro)), "Coro::State")) |
|
|
488 | croak ("Coro::State object required"); |
|
|
489 | } |
|
|
490 | |
|
|
491 | mg = CORO_MAGIC_state (coro); |
|
|
492 | return (struct coro *)mg->mg_ptr; |
505 | return (struct coro *)mg->mg_ptr; |
493 | } |
506 | } |
494 | |
507 | |
495 | #define SvSTATE(sv) SvSTATE_ (aTHX_ (sv)) |
508 | #define SvSTATE(sv) SvSTATE_ (aTHX_ (sv)) |
496 | |
509 | |
… | |
… | |
798 | |
811 | |
799 | PUTBACK; |
812 | PUTBACK; |
800 | } |
813 | } |
801 | |
814 | |
802 | /* allocate some space on the context stack for our purposes */ |
815 | /* allocate some space on the context stack for our purposes */ |
803 | /* we manually unroll here, as usually 2 slots is enough */ |
816 | if (expect_false (cxstack_ix + SLOT_COUNT >= cxstack_max)) |
804 | if (SLOT_COUNT >= 1) CXINC; |
|
|
805 | if (SLOT_COUNT >= 2) CXINC; |
|
|
806 | if (SLOT_COUNT >= 3) CXINC; |
|
|
807 | { |
817 | { |
808 | unsigned int i; |
818 | unsigned int i; |
|
|
819 | |
809 | for (i = 3; i < SLOT_COUNT; ++i) |
820 | for (i = 0; i < SLOT_COUNT; ++i) |
810 | CXINC; |
821 | CXINC; |
811 | } |
822 | |
812 | cxstack_ix -= SLOT_COUNT; /* undo allocation */ |
823 | cxstack_ix -= SLOT_COUNT; /* undo allocation */ |
|
|
824 | } |
813 | |
825 | |
814 | c->mainstack = PL_mainstack; |
826 | c->mainstack = PL_mainstack; |
815 | |
827 | |
816 | { |
828 | { |
817 | perl_slots *slot = c->slot = (perl_slots *)(cxstack + cxstack_ix + 1); |
829 | perl_slots *slot = c->slot = (perl_slots *)(cxstack + cxstack_ix + 1); |
… | |
… | |
838 | # define coro_init_stacks(thx) init_stacks () |
850 | # define coro_init_stacks(thx) init_stacks () |
839 | #else |
851 | #else |
840 | static void |
852 | static void |
841 | coro_init_stacks (pTHX) |
853 | coro_init_stacks (pTHX) |
842 | { |
854 | { |
843 | PL_curstackinfo = new_stackinfo(32, 8); |
855 | PL_curstackinfo = new_stackinfo(32, 4 + SLOT_COUNT); /* 3 is minimum due to perl rounding down in scope.c:GROW() */ |
844 | PL_curstackinfo->si_type = PERLSI_MAIN; |
856 | PL_curstackinfo->si_type = PERLSI_MAIN; |
845 | PL_curstack = PL_curstackinfo->si_stack; |
857 | PL_curstack = PL_curstackinfo->si_stack; |
846 | PL_mainstack = PL_curstack; /* remember in case we switch stacks */ |
858 | PL_mainstack = PL_curstack; /* remember in case we switch stacks */ |
847 | |
859 | |
848 | PL_stack_base = AvARRAY(PL_curstack); |
860 | PL_stack_base = AvARRAY(PL_curstack); |
… | |
… | |
1106 | /* this newly created coroutine might be run on an existing cctx which most |
1118 | /* this newly created coroutine might be run on an existing cctx which most |
1107 | * likely was suspended in pp_slf, so we have to emulate entering pp_slf here. |
1119 | * likely was suspended in pp_slf, so we have to emulate entering pp_slf here. |
1108 | */ |
1120 | */ |
1109 | slf_frame.prepare = prepare_nop; /* provide a nop function for an eventual pp_slf */ |
1121 | slf_frame.prepare = prepare_nop; /* provide a nop function for an eventual pp_slf */ |
1110 | slf_frame.check = slf_check_nop; /* signal pp_slf to not repeat */ |
1122 | slf_frame.check = slf_check_nop; /* signal pp_slf to not repeat */ |
|
|
1123 | slf_frame.destroy = 0; |
1111 | |
1124 | |
1112 | /* and we have to provide the pp_slf op in any case, so pp_slf can skip it */ |
1125 | /* and we have to provide the pp_slf op in any case, so pp_slf can skip it */ |
1113 | init_perl_op.op_next = PL_op; |
1126 | init_perl_op.op_next = PL_op; |
1114 | init_perl_op.op_type = OP_ENTERSUB; |
1127 | init_perl_op.op_type = OP_ENTERSUB; |
1115 | init_perl_op.op_ppaddr = pp_slf; |
1128 | init_perl_op.op_ppaddr = pp_slf; |
… | |
… | |
1154 | destroy_perl (pTHX_ struct coro *coro) |
1167 | destroy_perl (pTHX_ struct coro *coro) |
1155 | { |
1168 | { |
1156 | SV *svf [9]; |
1169 | SV *svf [9]; |
1157 | |
1170 | |
1158 | { |
1171 | { |
|
|
1172 | SV *old_current = SvRV (coro_current); |
1159 | struct coro *current = SvSTATE_current; |
1173 | struct coro *current = SvSTATE (old_current); |
1160 | |
1174 | |
1161 | assert (("FATAL: tried to destroy currently running coroutine", coro->mainstack != PL_mainstack)); |
1175 | assert (("FATAL: tried to destroy currently running coroutine", coro->mainstack != PL_mainstack)); |
1162 | |
1176 | |
1163 | save_perl (aTHX_ current); |
1177 | save_perl (aTHX_ current); |
|
|
1178 | |
|
|
1179 | /* this will cause transfer_check to croak on block*/ |
|
|
1180 | SvRV_set (coro_current, (SV *)coro->hv); |
|
|
1181 | |
1164 | load_perl (aTHX_ coro); |
1182 | load_perl (aTHX_ coro); |
1165 | |
1183 | |
1166 | coro_unwind_stacks (aTHX); |
1184 | coro_unwind_stacks (aTHX); |
1167 | coro_destruct_stacks (aTHX); |
|
|
1168 | |
1185 | |
1169 | /* restore swapped sv's */ |
1186 | /* restore swapped sv's */ |
1170 | SWAP_SVS (coro); |
1187 | SWAP_SVS (coro); |
|
|
1188 | |
|
|
1189 | coro_destruct_stacks (aTHX); |
1171 | |
1190 | |
1172 | // now save some sv's to be free'd later |
1191 | // now save some sv's to be free'd later |
1173 | svf [0] = GvSV (PL_defgv); |
1192 | svf [0] = GvSV (PL_defgv); |
1174 | svf [1] = (SV *)GvAV (PL_defgv); |
1193 | svf [1] = (SV *)GvAV (PL_defgv); |
1175 | svf [2] = GvSV (PL_errgv); |
1194 | svf [2] = GvSV (PL_errgv); |
… | |
… | |
1179 | svf [6] = (SV *)GvHV (PL_hintgv); |
1198 | svf [6] = (SV *)GvHV (PL_hintgv); |
1180 | svf [7] = PL_diehook; |
1199 | svf [7] = PL_diehook; |
1181 | svf [8] = PL_warnhook; |
1200 | svf [8] = PL_warnhook; |
1182 | assert (9 == sizeof (svf) / sizeof (*svf)); |
1201 | assert (9 == sizeof (svf) / sizeof (*svf)); |
1183 | |
1202 | |
|
|
1203 | SvRV_set (coro_current, old_current); |
|
|
1204 | |
1184 | load_perl (aTHX_ current); |
1205 | load_perl (aTHX_ current); |
1185 | } |
1206 | } |
1186 | |
1207 | |
1187 | { |
1208 | { |
1188 | unsigned int i; |
1209 | unsigned int i; |
… | |
… | |
1200 | INLINE void |
1221 | INLINE void |
1201 | free_coro_mortal (pTHX) |
1222 | free_coro_mortal (pTHX) |
1202 | { |
1223 | { |
1203 | if (expect_true (coro_mortal)) |
1224 | if (expect_true (coro_mortal)) |
1204 | { |
1225 | { |
1205 | SvREFCNT_dec (coro_mortal); |
1226 | SvREFCNT_dec ((SV *)coro_mortal); |
1206 | coro_mortal = 0; |
1227 | coro_mortal = 0; |
1207 | } |
1228 | } |
1208 | } |
1229 | } |
1209 | |
1230 | |
1210 | static int |
1231 | static int |
… | |
… | |
1666 | #define TRANSFER(ta, force_cctx) transfer (aTHX_ (ta).prev, (ta).next, (force_cctx)) |
1687 | #define TRANSFER(ta, force_cctx) transfer (aTHX_ (ta).prev, (ta).next, (force_cctx)) |
1667 | #define TRANSFER_CHECK(ta) transfer_check (aTHX_ (ta).prev, (ta).next) |
1688 | #define TRANSFER_CHECK(ta) transfer_check (aTHX_ (ta).prev, (ta).next) |
1668 | |
1689 | |
1669 | /** high level stuff ********************************************************/ |
1690 | /** high level stuff ********************************************************/ |
1670 | |
1691 | |
|
|
1692 | /* this function is actually Coro, not Coro::State, but we call it from here */ |
|
|
1693 | /* because it is convenient - but it hasn't been declared yet for that reason */ |
|
|
1694 | static void |
|
|
1695 | coro_call_on_destroy (pTHX_ struct coro *coro); |
|
|
1696 | |
1671 | static void |
1697 | static void |
1672 | coro_state_destroy (pTHX_ struct coro *coro) |
1698 | coro_state_destroy (pTHX_ struct coro *coro) |
1673 | { |
1699 | { |
1674 | if (coro->flags & CF_DESTROYED) |
1700 | if (coro->flags & CF_DESTROYED) |
1675 | return; |
1701 | return; |
1676 | |
1702 | |
1677 | /* this callback is reserved for slf functions needing to do cleanup */ |
|
|
1678 | if (coro->on_destroy && !PL_dirty) |
|
|
1679 | coro->on_destroy (aTHX_ coro); |
1703 | slf_destroy (aTHX_ coro); |
1680 | |
|
|
1681 | /* |
|
|
1682 | * The on_destroy above most likely is from an SLF call. |
|
|
1683 | * Since by definition the SLF call will not finish when we destroy |
|
|
1684 | * the coro, we will have to force-finish it here, otherwise |
|
|
1685 | * cleanup functions cannot call SLF functions. |
|
|
1686 | */ |
|
|
1687 | coro->slf_frame.prepare = 0; |
|
|
1688 | |
1704 | |
1689 | coro->flags |= CF_DESTROYED; |
1705 | coro->flags |= CF_DESTROYED; |
1690 | |
1706 | |
1691 | if (coro->flags & CF_READY) |
1707 | if (coro->flags & CF_READY) |
1692 | { |
1708 | { |
… | |
… | |
1694 | /* alternative: look through all ready queues and remove the coro */ |
1710 | /* alternative: look through all ready queues and remove the coro */ |
1695 | --coro_nready; |
1711 | --coro_nready; |
1696 | } |
1712 | } |
1697 | else |
1713 | else |
1698 | coro->flags |= CF_READY; /* make sure it is NOT put into the readyqueue */ |
1714 | coro->flags |= CF_READY; /* make sure it is NOT put into the readyqueue */ |
|
|
1715 | |
|
|
1716 | if (coro->next) coro->next->prev = coro->prev; |
|
|
1717 | if (coro->prev) coro->prev->next = coro->next; |
|
|
1718 | if (coro == coro_first) coro_first = coro->next; |
1699 | |
1719 | |
1700 | if (coro->mainstack |
1720 | if (coro->mainstack |
1701 | && coro->mainstack != main_mainstack |
1721 | && coro->mainstack != main_mainstack |
1702 | && coro->slot |
1722 | && coro->slot |
1703 | && !PL_dirty) |
1723 | && !PL_dirty) |
1704 | destroy_perl (aTHX_ coro); |
1724 | destroy_perl (aTHX_ coro); |
1705 | |
1725 | |
1706 | if (coro->next) coro->next->prev = coro->prev; |
|
|
1707 | if (coro->prev) coro->prev->next = coro->next; |
|
|
1708 | if (coro == coro_first) coro_first = coro->next; |
|
|
1709 | |
|
|
1710 | cctx_destroy (coro->cctx); |
1726 | cctx_destroy (coro->cctx); |
1711 | SvREFCNT_dec (coro->startcv); |
1727 | SvREFCNT_dec (coro->startcv); |
1712 | SvREFCNT_dec (coro->args); |
1728 | SvREFCNT_dec (coro->args); |
1713 | SvREFCNT_dec (coro->swap_sv); |
1729 | SvREFCNT_dec (coro->swap_sv); |
1714 | SvREFCNT_dec (CORO_THROW); |
1730 | SvREFCNT_dec (CORO_THROW); |
|
|
1731 | |
|
|
1732 | coro_call_on_destroy (coro); |
|
|
1733 | |
|
|
1734 | /* more destruction mayhem in coro_state_free */ |
1715 | } |
1735 | } |
1716 | |
1736 | |
1717 | static int |
1737 | static int |
1718 | coro_state_free (pTHX_ SV *sv, MAGIC *mg) |
1738 | coro_state_free (pTHX_ SV *sv, MAGIC *mg) |
1719 | { |
1739 | { |
1720 | struct coro *coro = (struct coro *)mg->mg_ptr; |
1740 | struct coro *coro = (struct coro *)mg->mg_ptr; |
1721 | mg->mg_ptr = 0; |
1741 | mg->mg_ptr = 0; |
1722 | |
1742 | |
1723 | coro->hv = 0; |
|
|
1724 | |
|
|
1725 | if (--coro->refcnt < 0) |
|
|
1726 | { |
|
|
1727 | coro_state_destroy (aTHX_ coro); |
1743 | coro_state_destroy (coro); |
|
|
1744 | SvREFCNT_dec (coro->on_destroy); |
|
|
1745 | SvREFCNT_dec (coro->status); |
|
|
1746 | |
1728 | Safefree (coro); |
1747 | Safefree (coro); |
1729 | } |
|
|
1730 | |
1748 | |
1731 | return 0; |
1749 | return 0; |
1732 | } |
1750 | } |
1733 | |
1751 | |
1734 | static int |
1752 | static int |
1735 | coro_state_dup (pTHX_ MAGIC *mg, CLONE_PARAMS *params) |
1753 | coro_state_dup (pTHX_ MAGIC *mg, CLONE_PARAMS *params) |
1736 | { |
1754 | { |
1737 | struct coro *coro = (struct coro *)mg->mg_ptr; |
1755 | /* called when perl clones the current process the slow way (windows process emulation) */ |
1738 | |
1756 | /* WE SIMply nuke the pointers in the copy, causing perl to croak */ |
1739 | ++coro->refcnt; |
1757 | mg->mg_ptr = 0; |
|
|
1758 | mg->mg_virtual = 0; |
1740 | |
1759 | |
1741 | return 0; |
1760 | return 0; |
1742 | } |
1761 | } |
1743 | |
1762 | |
1744 | static MGVTBL coro_state_vtbl = { |
1763 | static MGVTBL coro_state_vtbl = { |
… | |
… | |
2013 | coro->slot->runops = RUNOPS_DEFAULT; |
2032 | coro->slot->runops = RUNOPS_DEFAULT; |
2014 | } |
2033 | } |
2015 | } |
2034 | } |
2016 | |
2035 | |
2017 | static void |
2036 | static void |
|
|
2037 | coro_push_av (pTHX_ AV *av, I32 gimme_v) |
|
|
2038 | { |
|
|
2039 | if (AvFILLp (av) >= 0 && gimme_v != G_VOID) |
|
|
2040 | { |
|
|
2041 | dSP; |
|
|
2042 | |
|
|
2043 | if (gimme_v == G_SCALAR) |
|
|
2044 | XPUSHs (AvARRAY (av)[AvFILLp (av)]); |
|
|
2045 | else |
|
|
2046 | { |
|
|
2047 | int i; |
|
|
2048 | EXTEND (SP, AvFILLp (av) + 1); |
|
|
2049 | |
|
|
2050 | for (i = 0; i <= AvFILLp (av); ++i) |
|
|
2051 | PUSHs (AvARRAY (av)[i]); |
|
|
2052 | } |
|
|
2053 | |
|
|
2054 | PUTBACK; |
|
|
2055 | } |
|
|
2056 | } |
|
|
2057 | |
|
|
2058 | static void |
|
|
2059 | coro_push_on_destroy (aTHX_ struct coro *coro, SV *cb) |
|
|
2060 | { |
|
|
2061 | if (!coro->on_destroy) |
|
|
2062 | coro->on_destroy = newAV (); |
|
|
2063 | |
|
|
2064 | av_push (coro->on_destroy, cb); |
|
|
2065 | } |
|
|
2066 | |
|
|
2067 | static void |
|
|
2068 | slf_destroy_join (pTHX_ struct CoroSLF *frame) |
|
|
2069 | { |
|
|
2070 | SvREFCNT_dec ((SV *)((struct coro *)frame->data)->hv); |
|
|
2071 | } |
|
|
2072 | |
|
|
2073 | static int |
|
|
2074 | slf_check_join (pTHX_ struct CoroSLF *frame) |
|
|
2075 | { |
|
|
2076 | struct coro *coro = (struct coro *)frame->data; |
|
|
2077 | |
|
|
2078 | if (!coro->status) |
|
|
2079 | return 1; |
|
|
2080 | |
|
|
2081 | frame->destroy = 0; |
|
|
2082 | |
|
|
2083 | coro_push_av (coro->status, GIMME_V); |
|
|
2084 | |
|
|
2085 | SvREFCNT_dec ((SV *)coro->hv); |
|
|
2086 | |
|
|
2087 | return 0; |
|
|
2088 | } |
|
|
2089 | |
|
|
2090 | static void |
|
|
2091 | slf_init_join (pTHX_ struct CoroSLF *frame, CV *cv, SV **arg, int items) |
|
|
2092 | { |
|
|
2093 | struct coro *coro = SvSTATE (items > 0 ? arg [0] : &PL_sv_undef); |
|
|
2094 | |
|
|
2095 | if (items > 1) |
|
|
2096 | croak ("join called with too many arguments"); |
|
|
2097 | |
|
|
2098 | if (coro->status) |
|
|
2099 | frame->prepare = prepare_nop; |
|
|
2100 | else |
|
|
2101 | { |
|
|
2102 | coro_push_on_destroy (aTHX_ coro, SvREFCNT_inc_NN (SvRV (coro_current))); |
|
|
2103 | frame->prepare = prepare_schedule; |
|
|
2104 | } |
|
|
2105 | |
|
|
2106 | frame->check = slf_check_join; |
|
|
2107 | frame->destroy = slf_destroy_join; |
|
|
2108 | frame->data = (void *)coro; |
|
|
2109 | SvREFCNT_inc (coro->hv); |
|
|
2110 | } |
|
|
2111 | |
|
|
2112 | static void |
2018 | coro_call_on_destroy (pTHX_ struct coro *coro) |
2113 | coro_call_on_destroy (pTHX_ struct coro *coro) |
2019 | { |
2114 | { |
2020 | SV **on_destroyp = hv_fetch (coro->hv, "_on_destroy", sizeof ("_on_destroy") - 1, 0); |
2115 | AV *od = coro->on_destroy; |
2021 | |
2116 | |
2022 | if (on_destroyp) |
2117 | if (!od) |
2023 | { |
2118 | return; |
2024 | SV **statusp = hv_fetch (coro->hv, "_status", sizeof ("_status") - 1, 0); |
|
|
2025 | AV *on_destroy = sv_2mortal (SvREFCNT_inc ((AV *)SvRV (*on_destroyp))); |
|
|
2026 | AV *status = statusp ? sv_2mortal (SvREFCNT_inc ((AV *)SvRV (*statusp))) : 0; |
|
|
2027 | |
2119 | |
2028 | while (AvFILLp (on_destroy) >= 0) |
2120 | while (AvFILLp (od) >= 0) |
|
|
2121 | { |
|
|
2122 | SV *cb = sv_2mortal (av_pop (od)); |
|
|
2123 | |
|
|
2124 | /* coro hv's (and only hv's at the moment) are supported as well */ |
|
|
2125 | if (SvSTATEhv_p (aTHX_ cb)) |
|
|
2126 | api_ready (aTHX_ cb); |
|
|
2127 | else |
2029 | { |
2128 | { |
2030 | dSP; /* don't disturb outer sp */ |
2129 | dSP; /* don't disturb outer sp */ |
2031 | SV *cb = av_pop (on_destroy); |
|
|
2032 | |
|
|
2033 | PUSHMARK (SP); |
2130 | PUSHMARK (SP); |
2034 | |
2131 | |
2035 | if (statusp) |
2132 | if (coro->status) |
2036 | { |
2133 | { |
2037 | int i; |
2134 | PUTBACK; |
2038 | EXTEND (SP, AvFILLp (status) + 1); |
2135 | coro_push_av (aTHX_ coro->status, G_ARRAY); |
2039 | |
2136 | SPAGAIN; |
2040 | for (i = 0; i <= AvFILLp (status); ++i) |
|
|
2041 | PUSHs (AvARRAY (status)[i]); |
|
|
2042 | } |
2137 | } |
2043 | |
2138 | |
2044 | PUTBACK; |
2139 | PUTBACK; |
2045 | call_sv (sv_2mortal (cb), G_VOID | G_DISCARD); |
2140 | call_sv (sv_2mortal (cb), G_VOID | G_DISCARD); |
2046 | } |
2141 | } |
2047 | } |
2142 | } |
2048 | } |
2143 | } |
2049 | |
2144 | |
2050 | static void |
2145 | static void |
2051 | coro_set_status (HV *coro_hv, SV **arg, int items) |
2146 | coro_set_status (pTHX_ struct coro *coro, SV **arg, int items) |
2052 | { |
2147 | { |
2053 | AV *av = newAV (); |
2148 | AV *av; |
|
|
2149 | |
|
|
2150 | if (coro->status) |
|
|
2151 | { |
|
|
2152 | av = coro->status; |
|
|
2153 | av_clear (av); |
|
|
2154 | } |
|
|
2155 | else |
|
|
2156 | av = coro->status = newAV (); |
2054 | |
2157 | |
2055 | /* items are actually not so common, so optimise for this case */ |
2158 | /* items are actually not so common, so optimise for this case */ |
2056 | if (items) |
2159 | if (items) |
2057 | { |
2160 | { |
2058 | int i; |
2161 | int i; |
… | |
… | |
2060 | av_extend (av, items - 1); |
2163 | av_extend (av, items - 1); |
2061 | |
2164 | |
2062 | for (i = 0; i < items; ++i) |
2165 | for (i = 0; i < items; ++i) |
2063 | av_push (av, SvREFCNT_inc_NN (arg [i])); |
2166 | av_push (av, SvREFCNT_inc_NN (arg [i])); |
2064 | } |
2167 | } |
2065 | |
|
|
2066 | hv_store (coro_hv, "_status", sizeof ("_status") - 1, newRV_noinc ((SV *)av), 0); |
|
|
2067 | } |
2168 | } |
2068 | |
2169 | |
2069 | static void |
2170 | static void |
2070 | slf_init_terminate_cancel_common (pTHX_ struct CoroSLF *frame, HV *coro_hv) |
2171 | slf_init_terminate_cancel_common (pTHX_ struct CoroSLF *frame, HV *coro_hv) |
2071 | { |
2172 | { |
… | |
… | |
2083 | static void |
2184 | static void |
2084 | slf_init_terminate (pTHX_ struct CoroSLF *frame, CV *cv, SV **arg, int items) |
2185 | slf_init_terminate (pTHX_ struct CoroSLF *frame, CV *cv, SV **arg, int items) |
2085 | { |
2186 | { |
2086 | HV *coro_hv = (HV *)SvRV (coro_current); |
2187 | HV *coro_hv = (HV *)SvRV (coro_current); |
2087 | |
2188 | |
2088 | coro_set_status (coro_hv, arg, items); |
2189 | coro_set_status (aTHX_ SvSTATE ((SV *)coro_hv), arg, items); |
2089 | slf_init_terminate_cancel_common (frame, coro_hv); |
2190 | slf_init_terminate_cancel_common (aTHX_ frame, coro_hv); |
2090 | } |
2191 | } |
2091 | |
2192 | |
2092 | static void |
2193 | static void |
2093 | slf_init_cancel (pTHX_ struct CoroSLF *frame, CV *cv, SV **arg, int items) |
2194 | slf_init_cancel (pTHX_ struct CoroSLF *frame, CV *cv, SV **arg, int items) |
2094 | { |
2195 | { |
… | |
… | |
2099 | croak ("Coro::cancel called without coro object,"); |
2200 | croak ("Coro::cancel called without coro object,"); |
2100 | |
2201 | |
2101 | coro = SvSTATE (arg [0]); |
2202 | coro = SvSTATE (arg [0]); |
2102 | coro_hv = coro->hv; |
2203 | coro_hv = coro->hv; |
2103 | |
2204 | |
2104 | coro_set_status (coro_hv, arg + 1, items - 1); |
2205 | coro_set_status (aTHX_ coro, arg + 1, items - 1); |
2105 | |
2206 | |
2106 | if (expect_false (coro->flags & CF_NOCANCEL)) |
2207 | if (expect_false (coro->flags & CF_NOCANCEL)) |
2107 | { |
2208 | { |
2108 | /* coro currently busy cancelling something, so just notify it */ |
2209 | /* coro currently busy cancelling something, so just notify it */ |
2109 | coro->slf_frame.data = (void *)coro; |
2210 | coro->slf_frame.data = (void *)coro; |
… | |
… | |
2112 | frame->check = slf_check_nop; |
2213 | frame->check = slf_check_nop; |
2113 | } |
2214 | } |
2114 | else if (coro_hv == (HV *)SvRV (coro_current)) |
2215 | else if (coro_hv == (HV *)SvRV (coro_current)) |
2115 | { |
2216 | { |
2116 | /* cancelling the current coro is allowed, and equals terminate */ |
2217 | /* cancelling the current coro is allowed, and equals terminate */ |
2117 | slf_init_terminate_cancel_common (frame, coro_hv); |
2218 | slf_init_terminate_cancel_common (aTHX_ frame, coro_hv); |
2118 | } |
2219 | } |
2119 | else |
2220 | else |
2120 | { |
2221 | { |
2121 | struct coro *self = SvSTATE_current; |
2222 | struct coro *self = SvSTATE_current; |
2122 | |
2223 | |
… | |
… | |
2126 | * this is ugly, and hopefully fully worth the extra speed. |
2227 | * this is ugly, and hopefully fully worth the extra speed. |
2127 | * besides, I can't get the slow-but-safe version working... |
2228 | * besides, I can't get the slow-but-safe version working... |
2128 | */ |
2229 | */ |
2129 | slf_frame.data = 0; |
2230 | slf_frame.data = 0; |
2130 | self->flags |= CF_NOCANCEL; |
2231 | self->flags |= CF_NOCANCEL; |
2131 | |
|
|
2132 | coro_state_destroy (aTHX_ coro); |
2232 | coro_state_destroy (aTHX_ coro); |
2133 | coro_call_on_destroy (aTHX_ coro); |
|
|
2134 | |
|
|
2135 | self->flags &= ~CF_NOCANCEL; |
2233 | self->flags &= ~CF_NOCANCEL; |
2136 | |
2234 | |
2137 | if (slf_frame.data) |
2235 | if (slf_frame.data) |
2138 | { |
2236 | { |
2139 | /* while we were busy we have been cancelled, so terminate */ |
2237 | /* while we were busy we have been cancelled, so terminate */ |
2140 | slf_init_terminate_cancel_common (frame, self->hv); |
2238 | slf_init_terminate_cancel_common (aTHX_ frame, self->hv); |
2141 | } |
2239 | } |
2142 | else |
2240 | else |
2143 | { |
2241 | { |
2144 | frame->prepare = prepare_nop; |
2242 | frame->prepare = prepare_nop; |
2145 | frame->check = slf_check_nop; |
2243 | frame->check = slf_check_nop; |
2146 | } |
2244 | } |
2147 | } |
2245 | } |
2148 | } |
2246 | } |
2149 | |
2247 | |
|
|
2248 | static int |
|
|
2249 | slf_check_safe_cancel (pTHX_ struct CoroSLF *frame) |
|
|
2250 | { |
|
|
2251 | frame->prepare = 0; |
|
|
2252 | coro_unwind_stacks (aTHX); |
|
|
2253 | |
|
|
2254 | slf_init_terminate_cancel_common (aTHX_ frame, (HV *)SvRV (coro_current)); |
|
|
2255 | |
|
|
2256 | return 1; |
|
|
2257 | } |
|
|
2258 | |
|
|
2259 | static int |
|
|
2260 | safe_cancel (pTHX_ struct coro *coro, SV **arg, int items) |
|
|
2261 | { |
|
|
2262 | if (coro->cctx) |
|
|
2263 | croak ("coro inside C callback, unable to cancel at this time, caught"); |
|
|
2264 | |
|
|
2265 | if (coro->flags & CF_NEW) |
|
|
2266 | { |
|
|
2267 | coro_set_status (aTHX_ coro, arg, items); |
|
|
2268 | coro_state_destroy (aTHX_ coro); |
|
|
2269 | } |
|
|
2270 | else |
|
|
2271 | { |
|
|
2272 | if (!coro->slf_frame.prepare) |
|
|
2273 | croak ("coro outside an SLF function, unable to cancel at this time, caught"); |
|
|
2274 | |
|
|
2275 | slf_destroy (aTHX_ coro); |
|
|
2276 | |
|
|
2277 | coro_set_status (aTHX_ coro, arg, items); |
|
|
2278 | coro->slf_frame.prepare = prepare_nop; |
|
|
2279 | coro->slf_frame.check = slf_check_safe_cancel; |
|
|
2280 | |
|
|
2281 | api_ready (aTHX_ (SV *)coro->hv); |
|
|
2282 | } |
|
|
2283 | |
|
|
2284 | return 1; |
|
|
2285 | } |
|
|
2286 | |
2150 | /*****************************************************************************/ |
2287 | /*****************************************************************************/ |
2151 | /* async pool handler */ |
2288 | /* async pool handler */ |
2152 | |
2289 | |
2153 | static int |
2290 | static int |
2154 | slf_check_pool_handler (pTHX_ struct CoroSLF *frame) |
2291 | slf_check_pool_handler (pTHX_ struct CoroSLF *frame) |
… | |
… | |
2192 | coro->saved_deffh = 0; |
2329 | coro->saved_deffh = 0; |
2193 | |
2330 | |
2194 | if (coro_rss (aTHX_ coro) > SvUV (sv_pool_rss) |
2331 | if (coro_rss (aTHX_ coro) > SvUV (sv_pool_rss) |
2195 | || av_len (av_async_pool) + 1 >= SvIV (sv_pool_size)) |
2332 | || av_len (av_async_pool) + 1 >= SvIV (sv_pool_size)) |
2196 | { |
2333 | { |
2197 | coro->invoke_cb = SvREFCNT_inc_NN ((SV *)cv_coro_terminate); |
2334 | slf_init_terminate_cancel_common (aTHX_ frame, hv); |
2198 | coro->invoke_av = newAV (); |
2335 | return; |
2199 | |
|
|
2200 | frame->prepare = prepare_nop; |
|
|
2201 | } |
2336 | } |
2202 | else |
2337 | else |
2203 | { |
2338 | { |
2204 | av_clear (GvAV (PL_defgv)); |
2339 | av_clear (GvAV (PL_defgv)); |
2205 | hv_store (hv, "desc", sizeof ("desc") - 1, SvREFCNT_inc_NN (sv_async_pool_idle), 0); |
2340 | hv_store (hv, "desc", sizeof ("desc") - 1, SvREFCNT_inc_NN (sv_async_pool_idle), 0); |
… | |
… | |
2428 | static void |
2563 | static void |
2429 | slf_init_cede_notself (pTHX_ struct CoroSLF *frame, CV *cv, SV **arg, int items) |
2564 | slf_init_cede_notself (pTHX_ struct CoroSLF *frame, CV *cv, SV **arg, int items) |
2430 | { |
2565 | { |
2431 | frame->prepare = prepare_cede_notself; |
2566 | frame->prepare = prepare_cede_notself; |
2432 | frame->check = slf_check_nop; |
2567 | frame->check = slf_check_nop; |
|
|
2568 | } |
|
|
2569 | |
|
|
2570 | /* "undo"/cancel a running slf call - used when cancelling a coro, mainly */ |
|
|
2571 | static void |
|
|
2572 | slf_destroy (pTHX_ struct coro *coro) |
|
|
2573 | { |
|
|
2574 | /* this callback is reserved for slf functions needing to do cleanup */ |
|
|
2575 | if (coro->slf_frame.destroy && coro->slf_frame.prepare && !PL_dirty) |
|
|
2576 | coro->slf_frame.destroy (aTHX_ &coro->slf_frame); |
|
|
2577 | |
|
|
2578 | /* |
|
|
2579 | * The on_destroy above most likely is from an SLF call. |
|
|
2580 | * Since by definition the SLF call will not finish when we destroy |
|
|
2581 | * the coro, we will have to force-finish it here, otherwise |
|
|
2582 | * cleanup functions cannot call SLF functions. |
|
|
2583 | */ |
|
|
2584 | coro->slf_frame.prepare = 0; |
2433 | } |
2585 | } |
2434 | |
2586 | |
2435 | /* |
2587 | /* |
2436 | * these not obviously related functions are all rolled into one |
2588 | * these not obviously related functions are all rolled into one |
2437 | * function to increase chances that they all will call transfer with the same |
2589 | * function to increase chances that they all will call transfer with the same |
… | |
… | |
2506 | croak (0); |
2658 | croak (0); |
2507 | } |
2659 | } |
2508 | |
2660 | |
2509 | /* return value handling - mostly like entersub */ |
2661 | /* return value handling - mostly like entersub */ |
2510 | /* make sure we put something on the stack in scalar context */ |
2662 | /* make sure we put something on the stack in scalar context */ |
2511 | if (GIMME_V == G_SCALAR) |
2663 | if (GIMME_V == G_SCALAR |
|
|
2664 | && expect_false (PL_stack_sp != PL_stack_base + checkmark + 1)) |
2512 | { |
2665 | { |
2513 | dSP; |
2666 | dSP; |
2514 | SV **bot = PL_stack_base + checkmark; |
2667 | SV **bot = PL_stack_base + checkmark; |
2515 | |
2668 | |
2516 | if (sp == bot) /* too few, push undef */ |
2669 | if (sp == bot) /* too few, push undef */ |
2517 | bot [1] = &PL_sv_undef; |
2670 | bot [1] = &PL_sv_undef; |
2518 | else if (sp != bot + 1) /* too many, take last one */ |
2671 | else /* too many, take last one */ |
2519 | bot [1] = *sp; |
2672 | bot [1] = *sp; |
2520 | |
2673 | |
2521 | SP = bot + 1; |
2674 | SP = bot + 1; |
2522 | |
2675 | |
2523 | PUTBACK; |
2676 | PUTBACK; |
… | |
… | |
2763 | SvREFCNT_dec (cb); |
2916 | SvREFCNT_dec (cb); |
2764 | } |
2917 | } |
2765 | } |
2918 | } |
2766 | |
2919 | |
2767 | static void |
2920 | static void |
2768 | coro_semaphore_on_destroy (pTHX_ struct coro *coro) |
2921 | coro_semaphore_destroy (pTHX_ struct CoroSLF *frame) |
2769 | { |
2922 | { |
2770 | /* call $sem->adjust (0) to possibly wake up some other waiters */ |
2923 | /* call $sem->adjust (0) to possibly wake up some other waiters */ |
2771 | coro_semaphore_adjust (aTHX_ (AV *)coro->slf_frame.data, 0); |
2924 | coro_semaphore_adjust (aTHX_ (AV *)frame->data, 0); |
2772 | } |
2925 | } |
2773 | |
2926 | |
2774 | static int |
2927 | static int |
2775 | slf_check_semaphore_down_or_wait (pTHX_ struct CoroSLF *frame, int acquire) |
2928 | slf_check_semaphore_down_or_wait (pTHX_ struct CoroSLF *frame, int acquire) |
2776 | { |
2929 | { |
2777 | AV *av = (AV *)frame->data; |
2930 | AV *av = (AV *)frame->data; |
2778 | SV *count_sv = AvARRAY (av)[0]; |
2931 | SV *count_sv = AvARRAY (av)[0]; |
|
|
2932 | SV *coro_hv = SvRV (coro_current); |
2779 | |
2933 | |
2780 | /* if we are about to throw, don't actually acquire the lock, just throw */ |
2934 | /* if we are about to throw, don't actually acquire the lock, just throw */ |
2781 | if (CORO_THROW) |
2935 | if (CORO_THROW) |
2782 | return 0; |
2936 | return 0; |
2783 | else if (SvIVX (count_sv) > 0) |
2937 | else if (SvIVX (count_sv) > 0) |
2784 | { |
2938 | { |
2785 | SvSTATE_current->on_destroy = 0; |
2939 | frame->destroy = 0; |
2786 | |
2940 | |
2787 | if (acquire) |
2941 | if (acquire) |
2788 | SvIVX (count_sv) = SvIVX (count_sv) - 1; |
2942 | SvIVX (count_sv) = SvIVX (count_sv) - 1; |
2789 | else |
2943 | else |
2790 | coro_semaphore_adjust (aTHX_ av, 0); |
2944 | coro_semaphore_adjust (aTHX_ av, 0); |
… | |
… | |
2795 | { |
2949 | { |
2796 | int i; |
2950 | int i; |
2797 | /* if we were woken up but can't down, we look through the whole */ |
2951 | /* if we were woken up but can't down, we look through the whole */ |
2798 | /* waiters list and only add us if we aren't in there already */ |
2952 | /* waiters list and only add us if we aren't in there already */ |
2799 | /* this avoids some degenerate memory usage cases */ |
2953 | /* this avoids some degenerate memory usage cases */ |
2800 | |
2954 | for (i = AvFILLp (av); i > 0; --i) // i > 0 is not an off-by-one bug |
2801 | for (i = 1; i <= AvFILLp (av); ++i) |
|
|
2802 | if (AvARRAY (av)[i] == SvRV (coro_current)) |
2955 | if (AvARRAY (av)[i] == coro_hv) |
2803 | return 1; |
2956 | return 1; |
2804 | |
2957 | |
2805 | av_push (av, SvREFCNT_inc (SvRV (coro_current))); |
2958 | av_push (av, SvREFCNT_inc (coro_hv)); |
2806 | return 1; |
2959 | return 1; |
2807 | } |
2960 | } |
2808 | } |
2961 | } |
2809 | |
2962 | |
2810 | static int |
2963 | static int |
… | |
… | |
2833 | { |
2986 | { |
2834 | av_push (av, SvREFCNT_inc (SvRV (coro_current))); |
2987 | av_push (av, SvREFCNT_inc (SvRV (coro_current))); |
2835 | |
2988 | |
2836 | frame->data = (void *)sv_2mortal (SvREFCNT_inc ((SV *)av)); |
2989 | frame->data = (void *)sv_2mortal (SvREFCNT_inc ((SV *)av)); |
2837 | frame->prepare = prepare_schedule; |
2990 | frame->prepare = prepare_schedule; |
2838 | |
|
|
2839 | /* to avoid race conditions when a woken-up coro gets terminated */ |
2991 | /* to avoid race conditions when a woken-up coro gets terminated */ |
2840 | /* we arrange for a temporary on_destroy that calls adjust (0) */ |
2992 | /* we arrange for a temporary on_destroy that calls adjust (0) */ |
2841 | SvSTATE_current->on_destroy = coro_semaphore_on_destroy; |
2993 | frame->destroy = coro_semaphore_destroy; |
2842 | } |
2994 | } |
2843 | } |
2995 | } |
2844 | |
2996 | |
2845 | static void |
2997 | static void |
2846 | slf_init_semaphore_down (pTHX_ struct CoroSLF *frame, CV *cv, SV **arg, int items) |
2998 | slf_init_semaphore_down (pTHX_ struct CoroSLF *frame, CV *cv, SV **arg, int items) |
… | |
… | |
3541 | BOOT: |
3693 | BOOT: |
3542 | { |
3694 | { |
3543 | sv_pool_rss = coro_get_sv (aTHX_ "Coro::POOL_RSS" , TRUE); |
3695 | sv_pool_rss = coro_get_sv (aTHX_ "Coro::POOL_RSS" , TRUE); |
3544 | sv_pool_size = coro_get_sv (aTHX_ "Coro::POOL_SIZE" , TRUE); |
3696 | sv_pool_size = coro_get_sv (aTHX_ "Coro::POOL_SIZE" , TRUE); |
3545 | cv_coro_run = get_cv ( "Coro::_coro_run" , GV_ADD); |
3697 | cv_coro_run = get_cv ( "Coro::_coro_run" , GV_ADD); |
3546 | cv_coro_terminate = get_cv ( "Coro::terminate" , GV_ADD); |
|
|
3547 | coro_current = coro_get_sv (aTHX_ "Coro::current" , FALSE); SvREADONLY_on (coro_current); |
3698 | coro_current = coro_get_sv (aTHX_ "Coro::current" , FALSE); SvREADONLY_on (coro_current); |
3548 | av_async_pool = coro_get_av (aTHX_ "Coro::async_pool", TRUE); |
3699 | av_async_pool = coro_get_av (aTHX_ "Coro::async_pool", TRUE); |
3549 | av_destroy = coro_get_av (aTHX_ "Coro::destroy" , TRUE); |
3700 | av_destroy = coro_get_av (aTHX_ "Coro::destroy" , TRUE); |
3550 | sv_manager = coro_get_sv (aTHX_ "Coro::manager" , TRUE); |
3701 | sv_manager = coro_get_sv (aTHX_ "Coro::manager" , TRUE); |
3551 | sv_idle = coro_get_sv (aTHX_ "Coro::idle" , TRUE); |
3702 | sv_idle = coro_get_sv (aTHX_ "Coro::idle" , TRUE); |
… | |
… | |
3594 | void |
3745 | void |
3595 | _destroy (Coro::State coro) |
3746 | _destroy (Coro::State coro) |
3596 | CODE: |
3747 | CODE: |
3597 | /* used by the manager thread */ |
3748 | /* used by the manager thread */ |
3598 | coro_state_destroy (aTHX_ coro); |
3749 | coro_state_destroy (aTHX_ coro); |
|
|
3750 | |
|
|
3751 | void |
|
|
3752 | on_destroy (Coro::State coro, SV *cb) |
|
|
3753 | CODE: |
3599 | coro_call_on_destroy (aTHX_ coro); |
3754 | coro_push_on_destroy (aTHX_ coro, newSVsv (cb)); |
|
|
3755 | |
|
|
3756 | void |
|
|
3757 | join (...) |
|
|
3758 | CODE: |
|
|
3759 | CORO_EXECUTE_SLF_XS (slf_init_join); |
3600 | |
3760 | |
3601 | void |
3761 | void |
3602 | terminate (...) |
3762 | terminate (...) |
3603 | CODE: |
3763 | CODE: |
3604 | CORO_EXECUTE_SLF_XS (slf_init_terminate); |
3764 | CORO_EXECUTE_SLF_XS (slf_init_terminate); |
3605 | |
3765 | |
3606 | void |
3766 | void |
3607 | cancel (...) |
3767 | cancel (...) |
3608 | CODE: |
3768 | CODE: |
3609 | CORO_EXECUTE_SLF_XS (slf_init_cancel); |
3769 | CORO_EXECUTE_SLF_XS (slf_init_cancel); |
|
|
3770 | |
|
|
3771 | int |
|
|
3772 | safe_cancel (Coro::State self, ...) |
|
|
3773 | C_ARGS: aTHX_ self, &ST (1), items - 1 |
3610 | |
3774 | |
3611 | void |
3775 | void |
3612 | schedule (...) |
3776 | schedule (...) |
3613 | CODE: |
3777 | CODE: |
3614 | CORO_EXECUTE_SLF_XS (slf_init_schedule); |
3778 | CORO_EXECUTE_SLF_XS (slf_init_schedule); |