… | |
… | |
45 | Unlike the name component C<stamp> might indicate, it is also used for |
45 | Unlike the name component C<stamp> might indicate, it is also used for |
46 | time differences throughout libeio. |
46 | time differences throughout libeio. |
47 | |
47 | |
48 | =head2 FORK SUPPORT |
48 | =head2 FORK SUPPORT |
49 | |
49 | |
50 | Calling C<fork ()> is fully supported by this module. It is implemented in these steps: |
50 | Calling C<fork ()> is fully supported by this module - but you must not |
|
|
51 | rely on this. It is currently implemented in these steps: |
51 | |
52 | |
52 | 1. wait till all requests in "execute" state have been handled |
53 | 1. wait till all requests in "execute" state have been handled |
53 | (basically requests that are already handed over to the kernel). |
54 | (basically requests that are already handed over to the kernel). |
54 | 2. fork |
55 | 2. fork |
55 | 3. in the parent, continue business as usual, done |
56 | 3. in the parent, continue business as usual, done |
56 | 4. in the child, destroy all ready and pending requests and free the |
57 | 4. in the child, destroy all ready and pending requests and free the |
57 | memory used by the worker threads. This gives you a fully empty |
58 | memory used by the worker threads. This gives you a fully empty |
58 | libeio queue. |
59 | libeio queue. |
59 | |
60 | |
60 | Note, however, since libeio does use threads, thr above guarantee doesn't |
61 | Note, however, since libeio does use threads, the above guarantee doesn't |
61 | cover your libc, for example, malloc and other libc functions are not |
62 | cover your libc, for example, malloc and other libc functions are not |
62 | fork-safe, so there is very little you can do after a fork, and in fatc, |
63 | fork-safe, so there is very little you can do after a fork, and in fact, |
63 | the above might crash, and thus change. |
64 | the above might crash, and thus change. |
64 | |
65 | |
65 | =head1 INITIALISATION/INTEGRATION |
66 | =head1 INITIALISATION/INTEGRATION |
66 | |
67 | |
67 | Before you can call any eio functions you first have to initialise the |
68 | Before you can call any eio functions you first have to initialise the |
… | |
… | |
130 | |
131 | |
131 | If C<eio_poll ()> is configured to not handle all results in one go |
132 | If C<eio_poll ()> is configured to not handle all results in one go |
132 | (i.e. it returns C<-1>) then you should start an idle watcher that calls |
133 | (i.e. it returns C<-1>) then you should start an idle watcher that calls |
133 | C<eio_poll> until it returns something C<!= -1>. |
134 | C<eio_poll> until it returns something C<!= -1>. |
134 | |
135 | |
135 | A full-featured conenctor between libeio and libev would look as follows |
136 | A full-featured connector between libeio and libev would look as follows |
136 | (if C<eio_poll> is handling all requests, it can of course be simplified a |
137 | (if C<eio_poll> is handling all requests, it can of course be simplified a |
137 | lot by removing the idle watcher logic): |
138 | lot by removing the idle watcher logic): |
138 | |
139 | |
139 | static struct ev_loop *loop; |
140 | static struct ev_loop *loop; |
140 | static ev_idle repeat_watcher; |
141 | static ev_idle repeat_watcher; |
141 | static ev_async ready_watcher; |
142 | static ev_async ready_watcher; |
142 | |
143 | |
143 | /* idle watcher callback, only used when eio_poll */ |
144 | /* idle watcher callback, only used when eio_poll */ |
144 | /* didn't handle all results in one call */ |
145 | /* didn't handle all results in one call */ |
145 | static void |
146 | static void |
146 | repeat (EV_P_ ev_idle *w, int revents) |
147 | repeat (EV_P_ ev_idle *w, int revents) |
147 | { |
148 | { |
148 | if (eio_poll () != -1) |
149 | if (eio_poll () != -1) |
149 | ev_idle_stop (EV_A_ w); |
150 | ev_idle_stop (EV_A_ w); |
150 | } |
151 | } |
151 | |
152 | |
152 | /* eio has some results, process them */ |
153 | /* eio has some results, process them */ |
153 | static void |
154 | static void |
154 | ready (EV_P_ ev_async *w, int revents) |
155 | ready (EV_P_ ev_async *w, int revents) |
155 | { |
156 | { |
156 | if (eio_poll () == -1) |
157 | if (eio_poll () == -1) |
157 | ev_idle_start (EV_A_ &repeat_watcher); |
158 | ev_idle_start (EV_A_ &repeat_watcher); |
158 | } |
159 | } |
159 | |
160 | |
160 | /* wake up the event loop */ |
161 | /* wake up the event loop */ |
161 | static void |
162 | static void |
162 | want_poll (void) |
163 | want_poll (void) |
163 | { |
164 | { |
164 | ev_async_send (loop, &ready_watcher) |
165 | ev_async_send (loop, &ready_watcher) |
165 | } |
166 | } |
166 | |
167 | |
167 | void |
168 | void |
168 | my_init_eio () |
169 | my_init_eio () |
169 | { |
170 | { |
170 | loop = EV_DEFAULT; |
171 | loop = EV_DEFAULT; |
171 | |
172 | |
172 | ev_idle_init (&repeat_watcher, repeat); |
173 | ev_idle_init (&repeat_watcher, repeat); |
173 | ev_async_init (&ready_watcher, ready); |
174 | ev_async_init (&ready_watcher, ready); |
174 | ev_async_start (loop &watcher); |
175 | ev_async_start (loop &watcher); |
175 | |
176 | |
176 | eio_init (want_poll, 0); |
177 | eio_init (want_poll, 0); |
177 | } |
178 | } |
178 | |
179 | |
179 | For most other event loops, you would typically use a pipe - the event |
180 | For most other event loops, you would typically use a pipe - the event |
180 | loop should be told to wait for read readiness on the read end. In |
181 | loop should be told to wait for read readiness on the read end. In |
181 | C<want_poll> you would write a single byte, in C<done_poll> you would try |
182 | C<want_poll> you would write a single byte, in C<done_poll> you would try |
182 | to read that byte, and in the callback for the read end, you would call |
183 | to read that byte, and in the callback for the read end, you would call |
183 | C<eio_poll>. |
184 | C<eio_poll>. |
184 | |
185 | |
185 | You don't have to take special care in the case C<eio_poll> doesn't handle |
186 | You don't have to take special care in the case C<eio_poll> doesn't handle |
186 | all requests, as the done callback will not be invoked, so the event loop |
187 | all requests, as the done callback will not be invoked, so the event loop |
187 | will still signal readyness for the pipe until I<all> results have been |
188 | will still signal readiness for the pipe until I<all> results have been |
188 | processed. |
189 | processed. |
189 | |
190 | |
190 | |
191 | |
191 | =head1 HIGH LEVEL REQUEST API |
192 | =head1 HIGH LEVEL REQUEST API |
192 | |
193 | |
… | |
… | |
260 | } |
261 | } |
261 | |
262 | |
262 | /* the first three arguments are passed to open(2) */ |
263 | /* the first three arguments are passed to open(2) */ |
263 | /* the remaining are priority, callback and data */ |
264 | /* the remaining are priority, callback and data */ |
264 | if (!eio_open ("/etc/passwd", O_RDONLY, 0, 0, file_open_done, 0)) |
265 | if (!eio_open ("/etc/passwd", O_RDONLY, 0, 0, file_open_done, 0)) |
265 | abort (); /* something ent wrong, we will all die!!! */ |
266 | abort (); /* something went wrong, we will all die!!! */ |
266 | |
267 | |
267 | Note that you additionally need to call C<eio_poll> when the C<want_cb> |
268 | Note that you additionally need to call C<eio_poll> when the C<want_cb> |
268 | indicates that requests are ready to be processed. |
269 | indicates that requests are ready to be processed. |
|
|
270 | |
|
|
271 | =head2 CANCELLING REQUESTS |
|
|
272 | |
|
|
273 | Sometimes the need for a request goes away before the request is |
|
|
274 | finished. In that case, one can cancel the request by a call to |
|
|
275 | C<eio_cancel>: |
|
|
276 | |
|
|
277 | =over 4 |
|
|
278 | |
|
|
279 | =item eio_cancel (eio_req *req) |
|
|
280 | |
|
|
281 | Cancel the request (and all its subrequests). If the request is currently |
|
|
282 | executing it might still continue to execute, and in other cases it might |
|
|
283 | still take a while till the request is cancelled. |
|
|
284 | |
|
|
285 | Even if cancelled, the finish callback will still be invoked - the |
|
|
286 | callbacks of all cancellable requests need to check whether the request |
|
|
287 | has been cancelled by calling C<EIO_CANCELLED (req)>: |
|
|
288 | |
|
|
289 | static int |
|
|
290 | my_eio_cb (eio_req *req) |
|
|
291 | { |
|
|
292 | if (EIO_CANCELLED (req)) |
|
|
293 | return 0; |
|
|
294 | } |
|
|
295 | |
|
|
296 | In addition, cancelled requests will I<either> have C<< req->result >> |
|
|
297 | set to C<-1> and C<errno> to C<ECANCELED>, or I<otherwise> they were |
|
|
298 | successfully executed, despite being cancelled (e.g. when they have |
|
|
299 | already been executed at the time they were cancelled). |
|
|
300 | |
|
|
301 | C<EIO_CANCELLED> is still true for requests that have successfully |
|
|
302 | executed, as long as C<eio_cancel> was called on them at some point. |
|
|
303 | |
|
|
304 | =back |
269 | |
305 | |
270 | =head2 AVAILABLE REQUESTS |
306 | =head2 AVAILABLE REQUESTS |
271 | |
307 | |
272 | The following request functions are available. I<All> of them return the |
308 | The following request functions are available. I<All> of them return the |
273 | C<eio_req *> on success and C<0> on failure, and I<all> of them have the |
309 | C<eio_req *> on success and C<0> on failure, and I<all> of them have the |
… | |
… | |
369 | free (target); |
405 | free (target); |
370 | } |
406 | } |
371 | |
407 | |
372 | =item eio_realpath (const char *path, int pri, eio_cb cb, void *data) |
408 | =item eio_realpath (const char *path, int pri, eio_cb cb, void *data) |
373 | |
409 | |
374 | Similar to the realpath libc function, but unlike that one, result is |
410 | Similar to the realpath libc function, but unlike that one, C<< |
375 | C<-1> on failure and the length of the returned path in C<ptr2> (which is |
411 | req->result >> is C<-1> on failure. On success, the result is the length |
376 | not 0-terminated) - this is similar to readlink. |
412 | of the returned path in C<ptr2> (which is I<NOT> 0-terminated) - this is |
|
|
413 | similar to readlink. |
377 | |
414 | |
378 | =item eio_stat (const char *path, int pri, eio_cb cb, void *data) |
415 | =item eio_stat (const char *path, int pri, eio_cb cb, void *data) |
379 | |
416 | |
380 | =item eio_lstat (const char *path, int pri, eio_cb cb, void *data) |
417 | =item eio_lstat (const char *path, int pri, eio_cb cb, void *data) |
381 | |
418 | |
382 | =item eio_fstat (int fd, int pri, eio_cb cb, void *data) |
419 | =item eio_fstat (int fd, int pri, eio_cb cb, void *data) |
383 | |
420 | |
384 | Stats a file - if C<< req->result >> indicates success, then you can |
421 | Stats a file - if C<< req->result >> indicates success, then you can |
385 | access the C<struct stat>-like structure via C<< req->ptr2 >>: |
422 | access the C<struct stat>-like structure via C<< req->ptr2 >>: |
386 | |
423 | |
387 | EIO_STRUCT_STAT *statdata = (EIO_STRUCT_STAT *)req->ptr2; |
424 | EIO_STRUCT_STAT *statdata = (EIO_STRUCT_STAT *)req->ptr2; |
388 | |
425 | |
389 | =item eio_statvfs (const char *path, int pri, eio_cb cb, void *data) |
426 | =item eio_statvfs (const char *path, int pri, eio_cb cb, void *data) |
390 | |
427 | |
391 | =item eio_fstatvfs (int fd, int pri, eio_cb cb, void *data) |
428 | =item eio_fstatvfs (int fd, int pri, eio_cb cb, void *data) |
392 | |
429 | |
393 | Stats a filesystem - if C<< req->result >> indicates success, then you can |
430 | Stats a filesystem - if C<< req->result >> indicates success, then you can |
394 | access the C<struct statvfs>-like structure via C<< req->ptr2 >>: |
431 | access the C<struct statvfs>-like structure via C<< req->ptr2 >>: |
395 | |
432 | |
396 | EIO_STRUCT_STATVFS *statdata = (EIO_STRUCT_STATVFS *)req->ptr2; |
433 | EIO_STRUCT_STATVFS *statdata = (EIO_STRUCT_STATVFS *)req->ptr2; |
397 | |
434 | |
398 | =back |
435 | =back |
399 | |
436 | |
400 | =head3 READING DIRECTORIES |
437 | =head3 READING DIRECTORIES |
401 | |
438 | |
402 | Reading directories sounds simple, but can be rather demanding, especially |
439 | Reading directories sounds simple, but can be rather demanding, especially |
403 | if you want to do stuff such as traversing a diretcory hierarchy or |
440 | if you want to do stuff such as traversing a directory hierarchy or |
404 | processing all files in a directory. Libeio can assist thess complex tasks |
441 | processing all files in a directory. Libeio can assist these complex tasks |
405 | with it's C<eio_readdir> call. |
442 | with it's C<eio_readdir> call. |
406 | |
443 | |
407 | =over 4 |
444 | =over 4 |
408 | |
445 | |
409 | =item eio_readdir (const char *path, int flags, int pri, eio_cb cb, void *data) |
446 | =item eio_readdir (const char *path, int flags, int pri, eio_cb cb, void *data) |
… | |
… | |
441 | |
478 | |
442 | If this flag is specified, then, in addition to the names in C<ptr2>, |
479 | If this flag is specified, then, in addition to the names in C<ptr2>, |
443 | also an array of C<struct eio_dirent> is returned, in C<ptr1>. A C<struct |
480 | also an array of C<struct eio_dirent> is returned, in C<ptr1>. A C<struct |
444 | eio_dirent> looks like this: |
481 | eio_dirent> looks like this: |
445 | |
482 | |
446 | struct eio_dirent |
483 | struct eio_dirent |
447 | { |
484 | { |
448 | int nameofs; /* offset of null-terminated name string in (char *)req->ptr2 */ |
485 | int nameofs; /* offset of null-terminated name string in (char *)req->ptr2 */ |
449 | unsigned short namelen; /* size of filename without trailing 0 */ |
486 | unsigned short namelen; /* size of filename without trailing 0 */ |
450 | unsigned char type; /* one of EIO_DT_* */ |
487 | unsigned char type; /* one of EIO_DT_* */ |
451 | signed char score; /* internal use */ |
488 | signed char score; /* internal use */ |
452 | ino_t inode; /* the inode number, if available, otherwise unspecified */ |
489 | ino_t inode; /* the inode number, if available, otherwise unspecified */ |
453 | }; |
490 | }; |
454 | |
491 | |
455 | The only members you normally would access are C<nameofs>, which is the |
492 | The only members you normally would access are C<nameofs>, which is the |
456 | byte-offset from C<ptr2> to the start of the name, C<namelen> and C<type>. |
493 | byte-offset from C<ptr2> to the start of the name, C<namelen> and C<type>. |
457 | |
494 | |
458 | C<type> can be one of: |
495 | C<type> can be one of: |
… | |
… | |
501 | When this flag is specified, then the names will be returned in an order |
538 | When this flag is specified, then the names will be returned in an order |
502 | suitable for stat()'ing each one. That is, when you plan to stat() |
539 | suitable for stat()'ing each one. That is, when you plan to stat() |
503 | all files in the given directory, then the returned order will likely |
540 | all files in the given directory, then the returned order will likely |
504 | be fastest. |
541 | be fastest. |
505 | |
542 | |
506 | If both this flag and C<EIO_READDIR_DIRS_FIRST> are specified, then |
543 | If both this flag and C<EIO_READDIR_DIRS_FIRST> are specified, then the |
507 | the likely dirs come first, resulting in a less optimal stat order. |
544 | likely directories come first, resulting in a less optimal stat order. |
508 | |
545 | |
509 | =item EIO_READDIR_FOUND_UNKNOWN |
546 | =item EIO_READDIR_FOUND_UNKNOWN |
510 | |
547 | |
511 | This flag should not be specified when calling C<eio_readdir>. Instead, |
548 | This flag should not be specified when calling C<eio_readdir>. Instead, |
512 | it is being set by C<eio_readdir> (you can access the C<flags> via C<< |
549 | it is being set by C<eio_readdir> (you can access the C<flags> via C<< |
513 | req->int1 >>, when any of the C<type>'s found were C<EIO_DT_UNKNOWN>. The |
550 | req->int1 >>, when any of the C<type>'s found were C<EIO_DT_UNKNOWN>. The |
514 | absense of this flag therefore indicates that all C<type>'s are known, |
551 | absence of this flag therefore indicates that all C<type>'s are known, |
515 | which can be used to speed up some algorithms. |
552 | which can be used to speed up some algorithms. |
516 | |
553 | |
517 | A typical use case would be to identify all subdirectories within a |
554 | A typical use case would be to identify all subdirectories within a |
518 | directory - you would ask C<eio_readdir> for C<EIO_READDIR_DIRS_FIRST>. If |
555 | directory - you would ask C<eio_readdir> for C<EIO_READDIR_DIRS_FIRST>. If |
519 | then this flag is I<NOT> set, then all the entries at the beginning of the |
556 | then this flag is I<NOT> set, then all the entries at the beginning of the |
… | |
… | |
557 | as calling C<fdatasync>. |
594 | as calling C<fdatasync>. |
558 | |
595 | |
559 | Flags can be any combination of C<EIO_SYNC_FILE_RANGE_WAIT_BEFORE>, |
596 | Flags can be any combination of C<EIO_SYNC_FILE_RANGE_WAIT_BEFORE>, |
560 | C<EIO_SYNC_FILE_RANGE_WRITE> and C<EIO_SYNC_FILE_RANGE_WAIT_AFTER>. |
597 | C<EIO_SYNC_FILE_RANGE_WRITE> and C<EIO_SYNC_FILE_RANGE_WAIT_AFTER>. |
561 | |
598 | |
|
|
599 | =item eio_fallocate (int fd, int mode, off_t offset, off_t len, int pri, eio_cb cb, void *data) |
|
|
600 | |
|
|
601 | Calls C<fallocate> (note: I<NOT> C<posix_fallocate>!). If the syscall is |
|
|
602 | missing, then it returns failure and sets C<errno> to C<ENOSYS>. |
|
|
603 | |
|
|
604 | The C<mode> argument can be C<0> (for behaviour similar to |
|
|
605 | C<posix_fallocate>), or C<EIO_FALLOC_FL_KEEP_SIZE>, which keeps the size |
|
|
606 | of the file unchanged (but still preallocates space beyond end of file). |
|
|
607 | |
562 | =back |
608 | =back |
563 | |
609 | |
564 | =head3 LIBEIO-SPECIFIC REQUESTS |
610 | =head3 LIBEIO-SPECIFIC REQUESTS |
565 | |
611 | |
566 | These requests are specific to libeio and do not correspond to any OS call. |
612 | These requests are specific to libeio and do not correspond to any OS call. |
… | |
… | |
607 | |
653 | |
608 | eio_custom (my_open, 0, my_open_done, "/etc/passwd"); |
654 | eio_custom (my_open, 0, my_open_done, "/etc/passwd"); |
609 | |
655 | |
610 | =item eio_busy (eio_tstamp delay, int pri, eio_cb cb, void *data) |
656 | =item eio_busy (eio_tstamp delay, int pri, eio_cb cb, void *data) |
611 | |
657 | |
612 | This is a a request that takes C<delay> seconds to execute, but otherwise |
658 | This is a request that takes C<delay> seconds to execute, but otherwise |
613 | does nothing - it simply puts one of the worker threads to sleep for this |
659 | does nothing - it simply puts one of the worker threads to sleep for this |
614 | long. |
660 | long. |
615 | |
661 | |
616 | This request can be used to artificially increase load, e.g. for debugging |
662 | This request can be used to artificially increase load, e.g. for debugging |
617 | or benchmarking reasons. |
663 | or benchmarking reasons. |
… | |
… | |
633 | There are two primary use cases for this: a) bundle many requests into a |
679 | There are two primary use cases for this: a) bundle many requests into a |
634 | single, composite, request with a definite callback and the ability to |
680 | single, composite, request with a definite callback and the ability to |
635 | cancel the whole request with its subrequests and b) limiting the number |
681 | cancel the whole request with its subrequests and b) limiting the number |
636 | of "active" requests. |
682 | of "active" requests. |
637 | |
683 | |
638 | Further below you will find more dicussion of these topics - first follows |
684 | Further below you will find more discussion of these topics - first |
639 | the reference section detailing the request generator and other methods. |
685 | follows the reference section detailing the request generator and other |
|
|
686 | methods. |
640 | |
687 | |
641 | =over 4 |
688 | =over 4 |
642 | |
689 | |
643 | =item eio_grp (eio_cb cb, void *data) |
690 | =item eio_req *grp = eio_grp (eio_cb cb, void *data) |
644 | |
691 | |
645 | Creates and submits a group request. |
692 | Creates, submits and returns a group request. Note that it doesn't have a |
|
|
693 | priority, unlike all other requests. |
646 | |
694 | |
647 | =back |
695 | =item eio_grp_add (eio_req *grp, eio_req *req) |
648 | |
696 | |
|
|
697 | Adds a request to the request group. |
|
|
698 | |
|
|
699 | =item eio_grp_cancel (eio_req *grp) |
|
|
700 | |
|
|
701 | Cancels all requests I<in> the group, but I<not> the group request |
|
|
702 | itself. You can cancel the group request I<and> all subrequests via a |
|
|
703 | normal C<eio_cancel> call. |
|
|
704 | |
|
|
705 | =back |
|
|
706 | |
|
|
707 | =head4 GROUP REQUEST LIFETIME |
|
|
708 | |
|
|
709 | Left alone, a group request will instantly move to the pending state and |
|
|
710 | will be finished at the next call of C<eio_poll>. |
|
|
711 | |
|
|
712 | The usefulness stems from the fact that, if a subrequest is added to a |
|
|
713 | group I<before> a call to C<eio_poll>, via C<eio_grp_add>, then the group |
|
|
714 | will not finish until all the subrequests have finished. |
|
|
715 | |
|
|
716 | So the usage cycle of a group request is like this: after it is created, |
|
|
717 | you normally instantly add a subrequest. If none is added, the group |
|
|
718 | request will finish on it's own. As long as subrequests are added before |
|
|
719 | the group request is finished it will be kept from finishing, that is the |
|
|
720 | callbacks of any subrequests can, in turn, add more requests to the group, |
|
|
721 | and as long as any requests are active, the group request itself will not |
|
|
722 | finish. |
|
|
723 | |
|
|
724 | =head4 CREATING COMPOSITE REQUESTS |
|
|
725 | |
|
|
726 | Imagine you wanted to create an C<eio_load> request that opens a file, |
|
|
727 | reads it and closes it. This means it has to execute at least three eio |
|
|
728 | requests, but for various reasons it might be nice if that request looked |
|
|
729 | like any other eio request. |
|
|
730 | |
|
|
731 | This can be done with groups: |
|
|
732 | |
|
|
733 | =over 4 |
|
|
734 | |
|
|
735 | =item 1) create the request object |
|
|
736 | |
|
|
737 | Create a group that contains all further requests. This is the request you |
|
|
738 | can return as "the load request". |
|
|
739 | |
|
|
740 | =item 2) open the file, maybe |
|
|
741 | |
|
|
742 | Next, open the file with C<eio_open> and add the request to the group |
|
|
743 | request and you are finished setting up the request. |
|
|
744 | |
|
|
745 | If, for some reason, you cannot C<eio_open> (path is a null ptr?) you |
|
|
746 | can set C<< grp->result >> to C<-1> to signal an error and let the group |
|
|
747 | request finish on its own. |
|
|
748 | |
|
|
749 | =item 3) open callback adds more requests |
|
|
750 | |
|
|
751 | In the open callback, if the open was not successful, copy C<< |
|
|
752 | req->errorno >> to C<< grp->errorno >> and set C<< grp->errorno >> to |
|
|
753 | C<-1> to signal an error. |
|
|
754 | |
|
|
755 | Otherwise, malloc some memory or so and issue a read request, adding the |
|
|
756 | read request to the group. |
|
|
757 | |
|
|
758 | =item 4) continue issuing requests till finished |
|
|
759 | |
|
|
760 | In the real callback, check for errors and possibly continue with |
|
|
761 | C<eio_close> or any other eio request in the same way. |
|
|
762 | |
|
|
763 | As soon as no new requests are added the group request will finish. Make |
|
|
764 | sure you I<always> set C<< grp->result >> to some sensible value. |
|
|
765 | |
|
|
766 | =back |
|
|
767 | |
|
|
768 | =head4 REQUEST LIMITING |
649 | |
769 | |
650 | |
770 | |
651 | #TODO |
771 | #TODO |
652 | |
772 | |
653 | /*****************************************************************************/ |
|
|
654 | /* groups */ |
|
|
655 | |
|
|
656 | eio_req *eio_grp (eio_cb cb, void *data); |
|
|
657 | void eio_grp_feed (eio_req *grp, void (*feed)(eio_req *req), int limit); |
|
|
658 | void eio_grp_limit (eio_req *grp, int limit); |
773 | void eio_grp_limit (eio_req *grp, int limit); |
659 | void eio_grp_add (eio_req *grp, eio_req *req); |
|
|
660 | void eio_grp_cancel (eio_req *grp); /* cancels all sub requests but not the group */ |
|
|
661 | |
774 | |
662 | |
775 | |
663 | =back |
776 | =back |
664 | |
777 | |
665 | |
778 | |
… | |
… | |
671 | =head1 ANATOMY AND LIFETIME OF AN EIO REQUEST |
784 | =head1 ANATOMY AND LIFETIME OF AN EIO REQUEST |
672 | |
785 | |
673 | A request is represented by a structure of type C<eio_req>. To initialise |
786 | A request is represented by a structure of type C<eio_req>. To initialise |
674 | it, clear it to all zero bytes: |
787 | it, clear it to all zero bytes: |
675 | |
788 | |
676 | eio_req req; |
789 | eio_req req; |
677 | |
790 | |
678 | memset (&req, 0, sizeof (req)); |
791 | memset (&req, 0, sizeof (req)); |
679 | |
792 | |
680 | A more common way to initialise a new C<eio_req> is to use C<calloc>: |
793 | A more common way to initialise a new C<eio_req> is to use C<calloc>: |
681 | |
794 | |
682 | eio_req *req = calloc (1, sizeof (*req)); |
795 | eio_req *req = calloc (1, sizeof (*req)); |
683 | |
796 | |
684 | In either case, libeio neither allocates, initialises or frees the |
797 | In either case, libeio neither allocates, initialises or frees the |
685 | C<eio_req> structure for you - it merely uses it. |
798 | C<eio_req> structure for you - it merely uses it. |
686 | |
799 | |
687 | zero |
800 | zero |
… | |
… | |
705 | for example, in interactive programs, you might want to limit this time to |
818 | for example, in interactive programs, you might want to limit this time to |
706 | C<0.01> seconds or so. |
819 | C<0.01> seconds or so. |
707 | |
820 | |
708 | Note that: |
821 | Note that: |
709 | |
822 | |
|
|
823 | =over 4 |
|
|
824 | |
710 | a) libeio doesn't know how long your request callbacks take, so the time |
825 | =item a) libeio doesn't know how long your request callbacks take, so the |
711 | spent in C<eio_poll> is up to one callback invocation longer then this |
826 | time spent in C<eio_poll> is up to one callback invocation longer then |
712 | interval. |
827 | this interval. |
713 | |
828 | |
714 | b) this is implemented by calling C<gettimeofday> after each request, |
829 | =item b) this is implemented by calling C<gettimeofday> after each |
715 | which can be costly. |
830 | request, which can be costly. |
716 | |
831 | |
717 | c) at least one request will be handled. |
832 | =item c) at least one request will be handled. |
|
|
833 | |
|
|
834 | =back |
718 | |
835 | |
719 | =item eio_set_max_poll_reqs (unsigned int nreqs) |
836 | =item eio_set_max_poll_reqs (unsigned int nreqs) |
720 | |
837 | |
721 | When C<nreqs> is non-zero, then C<eio_poll> will not handle more than |
838 | When C<nreqs> is non-zero, then C<eio_poll> will not handle more than |
722 | C<nreqs> requests per invocation. This is a less costly way to limit the |
839 | C<nreqs> requests per invocation. This is a less costly way to limit the |
… | |
… | |
792 | This symbol governs the stack size for each eio thread. Libeio itself |
909 | This symbol governs the stack size for each eio thread. Libeio itself |
793 | was written to use very little stackspace, but when using C<EIO_CUSTOM> |
910 | was written to use very little stackspace, but when using C<EIO_CUSTOM> |
794 | requests, you might want to increase this. |
911 | requests, you might want to increase this. |
795 | |
912 | |
796 | If this symbol is undefined (the default) then libeio will use its default |
913 | If this symbol is undefined (the default) then libeio will use its default |
797 | stack size (C<sizeof (long) * 4096> currently). If it is defined, but |
914 | stack size (C<sizeof (void *) * 4096> currently). If it is defined, but |
798 | C<0>, then the default operating system stack size will be used. In all |
915 | C<0>, then the default operating system stack size will be used. In all |
799 | other cases, the value must be an expression that evaluates to the desired |
916 | other cases, the value must be an expression that evaluates to the desired |
800 | stack size. |
917 | stack size. |
801 | |
918 | |
802 | =back |
919 | =back |