… | |
… | |
50 | |
50 | |
51 | our $idle; # idle handler |
51 | our $idle; # idle handler |
52 | our $main; # main coroutine |
52 | our $main; # main coroutine |
53 | our $current; # current coroutine |
53 | our $current; # current coroutine |
54 | |
54 | |
55 | our $VERSION = '3.11'; |
55 | our $VERSION = '3.3'; |
56 | |
56 | |
57 | our @EXPORT = qw(async cede schedule terminate current unblock_sub); |
57 | our @EXPORT = qw(async async_pool cede schedule terminate current unblock_sub); |
58 | our %EXPORT_TAGS = ( |
58 | our %EXPORT_TAGS = ( |
59 | prio => [qw(PRIO_MAX PRIO_HIGH PRIO_NORMAL PRIO_LOW PRIO_IDLE PRIO_MIN)], |
59 | prio => [qw(PRIO_MAX PRIO_HIGH PRIO_NORMAL PRIO_LOW PRIO_IDLE PRIO_MIN)], |
60 | ); |
60 | ); |
61 | our @EXPORT_OK = (@{$EXPORT_TAGS{prio}}, qw(nready)); |
61 | our @EXPORT_OK = (@{$EXPORT_TAGS{prio}}, qw(nready)); |
62 | |
62 | |
… | |
… | |
141 | $idle = sub { |
141 | $idle = sub { |
142 | require Carp; |
142 | require Carp; |
143 | Carp::croak ("FATAL: deadlock detected"); |
143 | Carp::croak ("FATAL: deadlock detected"); |
144 | }; |
144 | }; |
145 | |
145 | |
|
|
146 | sub _cancel { |
|
|
147 | my ($self) = @_; |
|
|
148 | |
|
|
149 | # free coroutine data and mark as destructed |
|
|
150 | $self->_destroy |
|
|
151 | or return; |
|
|
152 | |
|
|
153 | # call all destruction callbacks |
|
|
154 | $_->(@{$self->{status}}) |
|
|
155 | for @{(delete $self->{destroy_cb}) || []}; |
|
|
156 | } |
|
|
157 | |
146 | # this coroutine is necessary because a coroutine |
158 | # this coroutine is necessary because a coroutine |
147 | # cannot destroy itself. |
159 | # cannot destroy itself. |
148 | my @destroy; |
160 | my @destroy; |
|
|
161 | my $manager; |
|
|
162 | |
149 | my $manager; $manager = new Coro sub { |
163 | $manager = new Coro sub { |
150 | while () { |
164 | while () { |
151 | # by overwriting the state object with the manager we destroy it |
165 | (shift @destroy)->_cancel |
152 | # while still being able to schedule this coroutine (in case it has |
|
|
153 | # been readied multiple times. this is harmless since the manager |
|
|
154 | # can be called as many times as neccessary and will always |
|
|
155 | # remove itself from the runqueue |
|
|
156 | while (@destroy) { |
166 | while @destroy; |
157 | my $coro = pop @destroy; |
|
|
158 | $coro->{status} ||= []; |
|
|
159 | $_->ready for @{delete $coro->{join} || []}; |
|
|
160 | |
167 | |
161 | # the next line destroys the coro state, but keeps the |
|
|
162 | # coroutine itself intact (we basically make it a zombie |
|
|
163 | # coroutine that always runs the manager thread, so it's possible |
|
|
164 | # to transfer() to this coroutine). |
|
|
165 | $coro->_clone_state_from ($manager); |
|
|
166 | } |
|
|
167 | &schedule; |
168 | &schedule; |
168 | } |
169 | } |
169 | }; |
170 | }; |
|
|
171 | |
|
|
172 | $manager->prio (PRIO_MAX); |
170 | |
173 | |
171 | # static methods. not really. |
174 | # static methods. not really. |
172 | |
175 | |
173 | =back |
176 | =back |
174 | |
177 | |
… | |
… | |
195 | } 1,2,3,4; |
198 | } 1,2,3,4; |
196 | |
199 | |
197 | =cut |
200 | =cut |
198 | |
201 | |
199 | sub async(&@) { |
202 | sub async(&@) { |
200 | my $pid = new Coro @_; |
203 | my $coro = new Coro @_; |
201 | $pid->ready; |
204 | $coro->ready; |
202 | $pid |
205 | $coro |
|
|
206 | } |
|
|
207 | |
|
|
208 | =item async_pool { ... } [@args...] |
|
|
209 | |
|
|
210 | Similar to C<async>, but uses a coroutine pool, so you should not call |
|
|
211 | terminate or join (although you are allowed to), and you get a coroutine |
|
|
212 | that might have executed other code already (which can be good or bad :). |
|
|
213 | |
|
|
214 | Also, the block is executed in an C<eval> context and a warning will be |
|
|
215 | issued in case of an exception instead of terminating the program, as |
|
|
216 | C<async> does. As the coroutine is being reused, stuff like C<on_destroy> |
|
|
217 | will not work in the expected way, unless you call terminate or cancel, |
|
|
218 | which somehow defeats the purpose of pooling. |
|
|
219 | |
|
|
220 | The priority will be reset to C<0> after each job, otherwise the coroutine |
|
|
221 | will be re-used "as-is". |
|
|
222 | |
|
|
223 | The pool size is limited to 8 idle coroutines (this can be adjusted by |
|
|
224 | changing $Coro::POOL_SIZE), and there can be as many non-idle coros as |
|
|
225 | required. |
|
|
226 | |
|
|
227 | If you are concerned about pooled coroutines growing a lot because a |
|
|
228 | single C<async_pool> used a lot of stackspace you can e.g. C<async_pool { |
|
|
229 | terminate }> once per second or so to slowly replenish the pool. |
|
|
230 | |
|
|
231 | =cut |
|
|
232 | |
|
|
233 | our $POOL_SIZE = 8; |
|
|
234 | our @pool; |
|
|
235 | |
|
|
236 | sub pool_handler { |
|
|
237 | while () { |
|
|
238 | eval { |
|
|
239 | my ($cb, @arg) = @{ delete $current->{_invoke} or return }; |
|
|
240 | $cb->(@arg); |
|
|
241 | }; |
|
|
242 | warn $@ if $@; |
|
|
243 | |
|
|
244 | last if @pool >= $POOL_SIZE; |
|
|
245 | push @pool, $current; |
|
|
246 | |
|
|
247 | $current->prio (0); |
|
|
248 | schedule; |
|
|
249 | } |
|
|
250 | } |
|
|
251 | |
|
|
252 | sub async_pool(&@) { |
|
|
253 | # this is also inlined into the unlock_scheduler |
|
|
254 | my $coro = (pop @pool or new Coro \&pool_handler); |
|
|
255 | |
|
|
256 | $coro->{_invoke} = [@_]; |
|
|
257 | $coro->ready; |
|
|
258 | |
|
|
259 | $coro |
203 | } |
260 | } |
204 | |
261 | |
205 | =item schedule |
262 | =item schedule |
206 | |
263 | |
207 | Calls the scheduler. Please note that the current coroutine will not be put |
264 | Calls the scheduler. Please note that the current coroutine will not be put |
… | |
… | |
232 | |
289 | |
233 | "Cede" to other coroutines. This function puts the current coroutine into the |
290 | "Cede" to other coroutines. This function puts the current coroutine into the |
234 | ready queue and calls C<schedule>, which has the effect of giving up the |
291 | ready queue and calls C<schedule>, which has the effect of giving up the |
235 | current "timeslice" to other coroutines of the same or higher priority. |
292 | current "timeslice" to other coroutines of the same or higher priority. |
236 | |
293 | |
|
|
294 | Returns true if at least one coroutine switch has happened. |
|
|
295 | |
|
|
296 | =item Coro::cede_notself |
|
|
297 | |
|
|
298 | Works like cede, but is not exported by default and will cede to any |
|
|
299 | coroutine, regardless of priority, once. |
|
|
300 | |
|
|
301 | Returns true if at least one coroutine switch has happened. |
|
|
302 | |
237 | =item terminate [arg...] |
303 | =item terminate [arg...] |
238 | |
304 | |
239 | Terminates the current coroutine with the given status values (see L<cancel>). |
305 | Terminates the current coroutine with the given status values (see L<cancel>). |
240 | |
306 | |
241 | =cut |
307 | =cut |
… | |
… | |
286 | Return wether the coroutine is currently the ready queue or not, |
352 | Return wether the coroutine is currently the ready queue or not, |
287 | |
353 | |
288 | =item $coroutine->cancel (arg...) |
354 | =item $coroutine->cancel (arg...) |
289 | |
355 | |
290 | Terminates the given coroutine and makes it return the given arguments as |
356 | Terminates the given coroutine and makes it return the given arguments as |
291 | status (default: the empty list). |
357 | status (default: the empty list). Never returns if the coroutine is the |
|
|
358 | current coroutine. |
292 | |
359 | |
293 | =cut |
360 | =cut |
294 | |
361 | |
295 | sub cancel { |
362 | sub cancel { |
296 | my $self = shift; |
363 | my $self = shift; |
297 | $self->{status} = [@_]; |
364 | $self->{status} = [@_]; |
|
|
365 | |
|
|
366 | if ($current == $self) { |
298 | push @destroy, $self; |
367 | push @destroy, $self; |
299 | $manager->ready; |
368 | $manager->ready; |
300 | &schedule if $current == $self; |
369 | &schedule while 1; |
|
|
370 | } else { |
|
|
371 | $self->_cancel; |
|
|
372 | } |
301 | } |
373 | } |
302 | |
374 | |
303 | =item $coroutine->join |
375 | =item $coroutine->join |
304 | |
376 | |
305 | Wait until the coroutine terminates and return any values given to the |
377 | Wait until the coroutine terminates and return any values given to the |
… | |
… | |
308 | |
380 | |
309 | =cut |
381 | =cut |
310 | |
382 | |
311 | sub join { |
383 | sub join { |
312 | my $self = shift; |
384 | my $self = shift; |
|
|
385 | |
313 | unless ($self->{status}) { |
386 | unless ($self->{status}) { |
314 | push @{$self->{join}}, $current; |
387 | my $current = $current; |
315 | &schedule; |
388 | |
|
|
389 | push @{$self->{destroy_cb}}, sub { |
|
|
390 | $current->ready; |
|
|
391 | undef $current; |
|
|
392 | }; |
|
|
393 | |
|
|
394 | &schedule while $current; |
316 | } |
395 | } |
|
|
396 | |
317 | wantarray ? @{$self->{status}} : $self->{status}[0]; |
397 | wantarray ? @{$self->{status}} : $self->{status}[0]; |
|
|
398 | } |
|
|
399 | |
|
|
400 | =item $coroutine->on_destroy (\&cb) |
|
|
401 | |
|
|
402 | Registers a callback that is called when this coroutine gets destroyed, |
|
|
403 | but before it is joined. The callback gets passed the terminate arguments, |
|
|
404 | if any. |
|
|
405 | |
|
|
406 | =cut |
|
|
407 | |
|
|
408 | sub on_destroy { |
|
|
409 | my ($self, $cb) = @_; |
|
|
410 | |
|
|
411 | push @{ $self->{destroy_cb} }, $cb; |
318 | } |
412 | } |
319 | |
413 | |
320 | =item $oldprio = $coroutine->prio ($newprio) |
414 | =item $oldprio = $coroutine->prio ($newprio) |
321 | |
415 | |
322 | Sets (or gets, if the argument is missing) the priority of the |
416 | Sets (or gets, if the argument is missing) the priority of the |
… | |
… | |
369 | i.e. that can be swicthed to. The value C<0> means that the only runnable |
463 | i.e. that can be swicthed to. The value C<0> means that the only runnable |
370 | coroutine is the currently running one, so C<cede> would have no effect, |
464 | coroutine is the currently running one, so C<cede> would have no effect, |
371 | and C<schedule> would cause a deadlock unless there is an idle handler |
465 | and C<schedule> would cause a deadlock unless there is an idle handler |
372 | that wakes up some coroutines. |
466 | that wakes up some coroutines. |
373 | |
467 | |
|
|
468 | =item my $guard = Coro::guard { ... } |
|
|
469 | |
|
|
470 | This creates and returns a guard object. Nothing happens until the objetc |
|
|
471 | gets destroyed, in which case the codeblock given as argument will be |
|
|
472 | executed. This is useful to free locks or other resources in case of a |
|
|
473 | runtime error or when the coroutine gets canceled, as in both cases the |
|
|
474 | guard block will be executed. The guard object supports only one method, |
|
|
475 | C<< ->cancel >>, which will keep the codeblock from being executed. |
|
|
476 | |
|
|
477 | Example: set some flag and clear it again when the coroutine gets canceled |
|
|
478 | or the function returns: |
|
|
479 | |
|
|
480 | sub do_something { |
|
|
481 | my $guard = Coro::guard { $busy = 0 }; |
|
|
482 | $busy = 1; |
|
|
483 | |
|
|
484 | # do something that requires $busy to be true |
|
|
485 | } |
|
|
486 | |
|
|
487 | =cut |
|
|
488 | |
|
|
489 | sub guard(&) { |
|
|
490 | bless \(my $cb = $_[0]), "Coro::guard" |
|
|
491 | } |
|
|
492 | |
|
|
493 | sub Coro::guard::cancel { |
|
|
494 | ${$_[0]} = sub { }; |
|
|
495 | } |
|
|
496 | |
|
|
497 | sub Coro::guard::DESTROY { |
|
|
498 | ${$_[0]}->(); |
|
|
499 | } |
|
|
500 | |
|
|
501 | |
374 | =item unblock_sub { ... } |
502 | =item unblock_sub { ... } |
375 | |
503 | |
376 | This utility function takes a BLOCK or code reference and "unblocks" it, |
504 | This utility function takes a BLOCK or code reference and "unblocks" it, |
377 | returning the new coderef. This means that the new coderef will return |
505 | returning the new coderef. This means that the new coderef will return |
378 | immediately without blocking, returning nothing, while the original code |
506 | immediately without blocking, returning nothing, while the original code |
… | |
… | |
391 | In short: simply use C<unblock_sub { ... }> instead of C<sub { ... }> when |
519 | In short: simply use C<unblock_sub { ... }> instead of C<sub { ... }> when |
392 | creating event callbacks that want to block. |
520 | creating event callbacks that want to block. |
393 | |
521 | |
394 | =cut |
522 | =cut |
395 | |
523 | |
396 | our @unblock_pool; |
|
|
397 | our @unblock_queue; |
524 | our @unblock_queue; |
398 | our $UNBLOCK_POOL_SIZE = 2; |
|
|
399 | |
525 | |
400 | sub unblock_handler_ { |
526 | # we create a special coro because we want to cede, |
401 | while () { |
527 | # to reduce pressure on the coro pool (because most callbacks |
402 | my ($cb, @arg) = @{ delete $Coro::current->{arg} }; |
528 | # return immediately and can be reused) and because we cannot cede |
403 | $cb->(@arg); |
529 | # inside an event callback. |
404 | |
|
|
405 | last if @unblock_pool >= $UNBLOCK_POOL_SIZE; |
|
|
406 | push @unblock_pool, $Coro::current; |
|
|
407 | schedule; |
|
|
408 | } |
|
|
409 | } |
|
|
410 | |
|
|
411 | our $unblock_scheduler = async { |
530 | our $unblock_scheduler = async { |
412 | while () { |
531 | while () { |
413 | while (my $cb = pop @unblock_queue) { |
532 | while (my $cb = pop @unblock_queue) { |
|
|
533 | # this is an inlined copy of async_pool |
414 | my $handler = (pop @unblock_pool or new Coro \&unblock_handler_); |
534 | my $coro = (pop @pool or new Coro \&pool_handler); |
415 | $handler->{arg} = $cb; |
535 | |
|
|
536 | $coro->{_invoke} = $cb; |
416 | $handler->ready; |
537 | $coro->ready; |
417 | cede; |
538 | cede; # for short-lived callbacks, this reduces pressure on the coro pool |
418 | } |
539 | } |
419 | |
540 | schedule; # sleep well |
420 | schedule; |
|
|
421 | } |
541 | } |
422 | }; |
542 | }; |
423 | |
543 | |
424 | sub unblock_sub(&) { |
544 | sub unblock_sub(&) { |
425 | my $cb = shift; |
545 | my $cb = shift; |
426 | |
546 | |
427 | sub { |
547 | sub { |
428 | push @unblock_queue, [$cb, @_]; |
548 | unshift @unblock_queue, [$cb, @_]; |
429 | $unblock_scheduler->ready; |
549 | $unblock_scheduler->ready; |
430 | } |
550 | } |
431 | } |
551 | } |
432 | |
552 | |
433 | =back |
553 | =back |