… | |
… | |
2599 | |
2599 | |
2600 | /* "undo"/cancel a running slf call - used when cancelling a coro, mainly */ |
2600 | /* "undo"/cancel a running slf call - used when cancelling a coro, mainly */ |
2601 | static void |
2601 | static void |
2602 | slf_destroy (pTHX_ struct coro *coro) |
2602 | slf_destroy (pTHX_ struct coro *coro) |
2603 | { |
2603 | { |
2604 | /* this callback is reserved for slf functions needing to do cleanup */ |
2604 | struct CoroSLF frame = coro->slf_frame; |
2605 | if (coro->slf_frame.destroy && coro->slf_frame.prepare && !PL_dirty) |
|
|
2606 | coro->slf_frame.destroy (aTHX_ &coro->slf_frame); |
|
|
2607 | |
2605 | |
2608 | /* |
2606 | /* |
2609 | * The on_destroy above most likely is from an SLF call. |
2607 | * The on_destroy below most likely is from an SLF call. |
2610 | * Since by definition the SLF call will not finish when we destroy |
2608 | * Since by definition the SLF call will not finish when we destroy |
2611 | * the coro, we will have to force-finish it here, otherwise |
2609 | * the coro, we will have to force-finish it here, otherwise |
2612 | * cleanup functions cannot call SLF functions. |
2610 | * cleanup functions cannot call SLF functions. |
2613 | */ |
2611 | */ |
2614 | coro->slf_frame.prepare = 0; |
2612 | coro->slf_frame.prepare = 0; |
|
|
2613 | |
|
|
2614 | /* this callback is reserved for slf functions needing to do cleanup */ |
|
|
2615 | if (frame.destroy && frame.prepare && !PL_dirty) |
|
|
2616 | frame.destroy (aTHX_ &frame); |
2615 | } |
2617 | } |
2616 | |
2618 | |
2617 | /* |
2619 | /* |
2618 | * these not obviously related functions are all rolled into one |
2620 | * these not obviously related functions are all rolled into one |
2619 | * function to increase chances that they all will call transfer with the same |
2621 | * function to increase chances that they all will call transfer with the same |
… | |
… | |
2959 | { |
2961 | { |
2960 | AV *av = (AV *)frame->data; |
2962 | AV *av = (AV *)frame->data; |
2961 | SV *count_sv = AvARRAY (av)[0]; |
2963 | SV *count_sv = AvARRAY (av)[0]; |
2962 | SV *coro_hv = SvRV (coro_current); |
2964 | SV *coro_hv = SvRV (coro_current); |
2963 | |
2965 | |
|
|
2966 | frame->destroy = 0; |
|
|
2967 | |
2964 | /* if we are about to throw, don't actually acquire the lock, just throw */ |
2968 | /* if we are about to throw, don't actually acquire the lock, just throw */ |
2965 | if (CORO_THROW) |
2969 | if (ecb_expect_false (CORO_THROW)) |
|
|
2970 | { |
|
|
2971 | /* we still might be responsible for the semaphore, so wake up others */ |
|
|
2972 | coro_semaphore_adjust (aTHX_ av, 0); |
|
|
2973 | |
2966 | return 0; |
2974 | return 0; |
|
|
2975 | } |
2967 | else if (SvIVX (count_sv) > 0) |
2976 | else if (SvIVX (count_sv) > 0) |
2968 | { |
2977 | { |
2969 | frame->destroy = 0; |
|
|
2970 | |
|
|
2971 | if (acquire) |
2978 | if (acquire) |
2972 | SvIVX (count_sv) = SvIVX (count_sv) - 1; |
2979 | SvIVX (count_sv) = SvIVX (count_sv) - 1; |
2973 | else |
2980 | else |
2974 | coro_semaphore_adjust (aTHX_ av, 0); |
2981 | coro_semaphore_adjust (aTHX_ av, 0); |
2975 | |
2982 | |