… | |
… | |
45 | Unlike the name component C<stamp> might indicate, it is also used for |
45 | Unlike the name component C<stamp> might indicate, it is also used for |
46 | time differences throughout libeio. |
46 | time differences throughout libeio. |
47 | |
47 | |
48 | =head2 FORK SUPPORT |
48 | =head2 FORK SUPPORT |
49 | |
49 | |
50 | Calling C<fork ()> is fully supported by this module. It is implemented in these steps: |
50 | Calling C<fork ()> is fully supported by this module - but you must not |
|
|
51 | rely on this. It is currently implemented in these steps: |
51 | |
52 | |
52 | 1. wait till all requests in "execute" state have been handled |
53 | 1. wait till all requests in "execute" state have been handled |
53 | (basically requests that are already handed over to the kernel). |
54 | (basically requests that are already handed over to the kernel). |
54 | 2. fork |
55 | 2. fork |
55 | 3. in the parent, continue business as usual, done |
56 | 3. in the parent, continue business as usual, done |
56 | 4. in the child, destroy all ready and pending requests and free the |
57 | 4. in the child, destroy all ready and pending requests and free the |
57 | memory used by the worker threads. This gives you a fully empty |
58 | memory used by the worker threads. This gives you a fully empty |
58 | libeio queue. |
59 | libeio queue. |
59 | |
60 | |
60 | Note, however, since libeio does use threads, thr above guarantee doesn't |
61 | Note, however, since libeio does use threads, the above guarantee doesn't |
61 | cover your libc, for example, malloc and other libc functions are not |
62 | cover your libc, for example, malloc and other libc functions are not |
62 | fork-safe, so there is very little you can do after a fork, and in fatc, |
63 | fork-safe, so there is very little you can do after a fork, and in fact, |
63 | the above might crash, and thus change. |
64 | the above might crash, and thus change. |
64 | |
65 | |
65 | =head1 INITIALISATION/INTEGRATION |
66 | =head1 INITIALISATION/INTEGRATION |
66 | |
67 | |
67 | Before you can call any eio functions you first have to initialise the |
68 | Before you can call any eio functions you first have to initialise the |
… | |
… | |
134 | |
135 | |
135 | A full-featured conenctor between libeio and libev would look as follows |
136 | A full-featured conenctor between libeio and libev would look as follows |
136 | (if C<eio_poll> is handling all requests, it can of course be simplified a |
137 | (if C<eio_poll> is handling all requests, it can of course be simplified a |
137 | lot by removing the idle watcher logic): |
138 | lot by removing the idle watcher logic): |
138 | |
139 | |
139 | static struct ev_loop *loop; |
140 | static struct ev_loop *loop; |
140 | static ev_idle repeat_watcher; |
141 | static ev_idle repeat_watcher; |
141 | static ev_async ready_watcher; |
142 | static ev_async ready_watcher; |
142 | |
143 | |
143 | /* idle watcher callback, only used when eio_poll */ |
144 | /* idle watcher callback, only used when eio_poll */ |
144 | /* didn't handle all results in one call */ |
145 | /* didn't handle all results in one call */ |
145 | static void |
146 | static void |
146 | repeat (EV_P_ ev_idle *w, int revents) |
147 | repeat (EV_P_ ev_idle *w, int revents) |
147 | { |
148 | { |
148 | if (eio_poll () != -1) |
149 | if (eio_poll () != -1) |
149 | ev_idle_stop (EV_A_ w); |
150 | ev_idle_stop (EV_A_ w); |
150 | } |
151 | } |
151 | |
152 | |
152 | /* eio has some results, process them */ |
153 | /* eio has some results, process them */ |
153 | static void |
154 | static void |
154 | ready (EV_P_ ev_async *w, int revents) |
155 | ready (EV_P_ ev_async *w, int revents) |
155 | { |
156 | { |
156 | if (eio_poll () == -1) |
157 | if (eio_poll () == -1) |
157 | ev_idle_start (EV_A_ &repeat_watcher); |
158 | ev_idle_start (EV_A_ &repeat_watcher); |
158 | } |
159 | } |
159 | |
160 | |
160 | /* wake up the event loop */ |
161 | /* wake up the event loop */ |
161 | static void |
162 | static void |
162 | want_poll (void) |
163 | want_poll (void) |
163 | { |
164 | { |
164 | ev_async_send (loop, &ready_watcher) |
165 | ev_async_send (loop, &ready_watcher) |
165 | } |
166 | } |
166 | |
167 | |
167 | void |
168 | void |
168 | my_init_eio () |
169 | my_init_eio () |
169 | { |
170 | { |
170 | loop = EV_DEFAULT; |
171 | loop = EV_DEFAULT; |
171 | |
172 | |
172 | ev_idle_init (&repeat_watcher, repeat); |
173 | ev_idle_init (&repeat_watcher, repeat); |
173 | ev_async_init (&ready_watcher, ready); |
174 | ev_async_init (&ready_watcher, ready); |
174 | ev_async_start (loop &watcher); |
175 | ev_async_start (loop &watcher); |
175 | |
176 | |
176 | eio_init (want_poll, 0); |
177 | eio_init (want_poll, 0); |
177 | } |
178 | } |
178 | |
179 | |
179 | For most other event loops, you would typically use a pipe - the event |
180 | For most other event loops, you would typically use a pipe - the event |
180 | loop should be told to wait for read readiness on the read end. In |
181 | loop should be told to wait for read readiness on the read end. In |
181 | C<want_poll> you would write a single byte, in C<done_poll> you would try |
182 | C<want_poll> you would write a single byte, in C<done_poll> you would try |
182 | to read that byte, and in the callback for the read end, you would call |
183 | to read that byte, and in the callback for the read end, you would call |
… | |
… | |
265 | abort (); /* something ent wrong, we will all die!!! */ |
266 | abort (); /* something ent wrong, we will all die!!! */ |
266 | |
267 | |
267 | Note that you additionally need to call C<eio_poll> when the C<want_cb> |
268 | Note that you additionally need to call C<eio_poll> when the C<want_cb> |
268 | indicates that requests are ready to be processed. |
269 | indicates that requests are ready to be processed. |
269 | |
270 | |
|
|
271 | =head2 CANCELLING REQUESTS |
|
|
272 | |
|
|
273 | Sometimes the need for a request goes away before the request is |
|
|
274 | finished. In that case, one can cancel the reqiest by a call to |
|
|
275 | C<eio_cancel>: |
|
|
276 | |
|
|
277 | =over 4 |
|
|
278 | |
|
|
279 | =item eio_cancel (eio_req *req) |
|
|
280 | |
|
|
281 | Cancel the request. If the request is currently executing it might still |
|
|
282 | continue to execute, and in other cases it might still take a while till |
|
|
283 | the request is cancelled. |
|
|
284 | |
|
|
285 | Even if cancelled, the finish callback will still be invoked - the |
|
|
286 | callbacks of all cancellable requests need to check whether the request |
|
|
287 | has been cancelled by calling C<EIO_CANCELLED (req)>: |
|
|
288 | |
|
|
289 | static int |
|
|
290 | my_eio_cb (eio_req *req) |
|
|
291 | { |
|
|
292 | if (EIO_CANCELLED (req)) |
|
|
293 | return 0; |
|
|
294 | } |
|
|
295 | |
|
|
296 | In addition, cancelled requests will either have C<< req->result >> set to |
|
|
297 | C<-1> and C<errno> to C<ECANCELED>, or otherwise they were successfully |
|
|
298 | executed despite being cancelled (e.g. when they have already been |
|
|
299 | executed at the time they were cancelled). |
|
|
300 | |
|
|
301 | =back |
|
|
302 | |
270 | =head2 AVAILABLE REQUESTS |
303 | =head2 AVAILABLE REQUESTS |
271 | |
304 | |
272 | The following request functions are available. I<All> of them return the |
305 | The following request functions are available. I<All> of them return the |
273 | C<eio_req *> on success and C<0> on failure, and I<all> of them have the |
306 | C<eio_req *> on success and C<0> on failure, and I<all> of them have the |
274 | same three trailing arguments: C<pri>, C<cb> and C<data>. The C<cb> is |
307 | same three trailing arguments: C<pri>, C<cb> and C<data>. The C<cb> is |
… | |
… | |
382 | =item eio_fstat (int fd, int pri, eio_cb cb, void *data) |
415 | =item eio_fstat (int fd, int pri, eio_cb cb, void *data) |
383 | |
416 | |
384 | Stats a file - if C<< req->result >> indicates success, then you can |
417 | Stats a file - if C<< req->result >> indicates success, then you can |
385 | access the C<struct stat>-like structure via C<< req->ptr2 >>: |
418 | access the C<struct stat>-like structure via C<< req->ptr2 >>: |
386 | |
419 | |
387 | EIO_STRUCT_STAT *statdata = (EIO_STRUCT_STAT *)req->ptr2; |
420 | EIO_STRUCT_STAT *statdata = (EIO_STRUCT_STAT *)req->ptr2; |
388 | |
421 | |
389 | =item eio_statvfs (const char *path, int pri, eio_cb cb, void *data) |
422 | =item eio_statvfs (const char *path, int pri, eio_cb cb, void *data) |
390 | |
423 | |
391 | =item eio_fstatvfs (int fd, int pri, eio_cb cb, void *data) |
424 | =item eio_fstatvfs (int fd, int pri, eio_cb cb, void *data) |
392 | |
425 | |
393 | Stats a filesystem - if C<< req->result >> indicates success, then you can |
426 | Stats a filesystem - if C<< req->result >> indicates success, then you can |
394 | access the C<struct statvfs>-like structure via C<< req->ptr2 >>: |
427 | access the C<struct statvfs>-like structure via C<< req->ptr2 >>: |
395 | |
428 | |
396 | EIO_STRUCT_STATVFS *statdata = (EIO_STRUCT_STATVFS *)req->ptr2; |
429 | EIO_STRUCT_STATVFS *statdata = (EIO_STRUCT_STATVFS *)req->ptr2; |
397 | |
430 | |
398 | =back |
431 | =back |
399 | |
432 | |
400 | =head3 READING DIRECTORIES |
433 | =head3 READING DIRECTORIES |
401 | |
434 | |
… | |
… | |
441 | |
474 | |
442 | If this flag is specified, then, in addition to the names in C<ptr2>, |
475 | If this flag is specified, then, in addition to the names in C<ptr2>, |
443 | also an array of C<struct eio_dirent> is returned, in C<ptr1>. A C<struct |
476 | also an array of C<struct eio_dirent> is returned, in C<ptr1>. A C<struct |
444 | eio_dirent> looks like this: |
477 | eio_dirent> looks like this: |
445 | |
478 | |
446 | struct eio_dirent |
479 | struct eio_dirent |
447 | { |
480 | { |
448 | int nameofs; /* offset of null-terminated name string in (char *)req->ptr2 */ |
481 | int nameofs; /* offset of null-terminated name string in (char *)req->ptr2 */ |
449 | unsigned short namelen; /* size of filename without trailing 0 */ |
482 | unsigned short namelen; /* size of filename without trailing 0 */ |
450 | unsigned char type; /* one of EIO_DT_* */ |
483 | unsigned char type; /* one of EIO_DT_* */ |
451 | signed char score; /* internal use */ |
484 | signed char score; /* internal use */ |
452 | ino_t inode; /* the inode number, if available, otherwise unspecified */ |
485 | ino_t inode; /* the inode number, if available, otherwise unspecified */ |
453 | }; |
486 | }; |
454 | |
487 | |
455 | The only members you normally would access are C<nameofs>, which is the |
488 | The only members you normally would access are C<nameofs>, which is the |
456 | byte-offset from C<ptr2> to the start of the name, C<namelen> and C<type>. |
489 | byte-offset from C<ptr2> to the start of the name, C<namelen> and C<type>. |
457 | |
490 | |
458 | C<type> can be one of: |
491 | C<type> can be one of: |
… | |
… | |
638 | Further below you will find more dicussion of these topics - first follows |
671 | Further below you will find more dicussion of these topics - first follows |
639 | the reference section detailing the request generator and other methods. |
672 | the reference section detailing the request generator and other methods. |
640 | |
673 | |
641 | =over 4 |
674 | =over 4 |
642 | |
675 | |
643 | =item eio_grp (eio_cb cb, void *data) |
676 | =item eio_req *grp = eio_grp (eio_cb cb, void *data) |
644 | |
677 | |
645 | Creates and submits a group request. |
678 | Creates, submits and returns a group request. |
|
|
679 | |
|
|
680 | =item eio_grp_add (eio_req *grp, eio_req *req) |
|
|
681 | |
|
|
682 | Adds a request to the request group. |
|
|
683 | |
|
|
684 | =item eio_grp_cancel (eio_req *grp) |
|
|
685 | |
|
|
686 | Cancels all requests I<in> the group, but I<not> the group request |
|
|
687 | itself. You can cancel the group request via a normal C<eio_cancel> call. |
|
|
688 | |
|
|
689 | |
646 | |
690 | |
647 | =back |
691 | =back |
648 | |
692 | |
649 | |
693 | |
650 | |
694 | |
… | |
… | |
654 | /* groups */ |
698 | /* groups */ |
655 | |
699 | |
656 | eio_req *eio_grp (eio_cb cb, void *data); |
700 | eio_req *eio_grp (eio_cb cb, void *data); |
657 | void eio_grp_feed (eio_req *grp, void (*feed)(eio_req *req), int limit); |
701 | void eio_grp_feed (eio_req *grp, void (*feed)(eio_req *req), int limit); |
658 | void eio_grp_limit (eio_req *grp, int limit); |
702 | void eio_grp_limit (eio_req *grp, int limit); |
659 | void eio_grp_add (eio_req *grp, eio_req *req); |
|
|
660 | void eio_grp_cancel (eio_req *grp); /* cancels all sub requests but not the group */ |
703 | void eio_grp_cancel (eio_req *grp); /* cancels all sub requests but not the group */ |
661 | |
704 | |
662 | |
705 | |
663 | =back |
706 | =back |
664 | |
707 | |
… | |
… | |
671 | =head1 ANATOMY AND LIFETIME OF AN EIO REQUEST |
714 | =head1 ANATOMY AND LIFETIME OF AN EIO REQUEST |
672 | |
715 | |
673 | A request is represented by a structure of type C<eio_req>. To initialise |
716 | A request is represented by a structure of type C<eio_req>. To initialise |
674 | it, clear it to all zero bytes: |
717 | it, clear it to all zero bytes: |
675 | |
718 | |
676 | eio_req req; |
719 | eio_req req; |
677 | |
720 | |
678 | memset (&req, 0, sizeof (req)); |
721 | memset (&req, 0, sizeof (req)); |
679 | |
722 | |
680 | A more common way to initialise a new C<eio_req> is to use C<calloc>: |
723 | A more common way to initialise a new C<eio_req> is to use C<calloc>: |
681 | |
724 | |
682 | eio_req *req = calloc (1, sizeof (*req)); |
725 | eio_req *req = calloc (1, sizeof (*req)); |
683 | |
726 | |
684 | In either case, libeio neither allocates, initialises or frees the |
727 | In either case, libeio neither allocates, initialises or frees the |
685 | C<eio_req> structure for you - it merely uses it. |
728 | C<eio_req> structure for you - it merely uses it. |
686 | |
729 | |
687 | zero |
730 | zero |