… | |
… | |
124 | =back |
124 | =back |
125 | |
125 | |
126 | For libev, you would typically use an C<ev_async> watcher: the |
126 | For libev, you would typically use an C<ev_async> watcher: the |
127 | C<want_poll> callback would invoke C<ev_async_send> to wake up the event |
127 | C<want_poll> callback would invoke C<ev_async_send> to wake up the event |
128 | loop. Inside the callback set for the watcher, one would call C<eio_poll |
128 | loop. Inside the callback set for the watcher, one would call C<eio_poll |
129 | ()> (followed by C<ev_async_send> again if C<eio_poll> indicates that not |
129 | ()>. |
130 | all requests have been handled yet). The race is taken care of because |
130 | |
131 | libev resets/rearms the async watcher before calling your callback, |
131 | If C<eio_poll ()> is configured to not handle all results in one go |
132 | and therefore, before calling C<eio_poll>. This might result in (some) |
132 | (i.e. it returns C<-1>) then you should start an idle watcher that calls |
133 | spurious wake-ups, but is generally harmless. |
133 | C<eio_poll> until it returns something C<!= -1>. |
|
|
134 | |
|
|
135 | A full-featured conenctor between libeio and libev would look as follows |
|
|
136 | (if C<eio_poll> is handling all requests, it can of course be simplified a |
|
|
137 | lot by removing the idle watcher logic): |
|
|
138 | |
|
|
139 | static struct ev_loop *loop; |
|
|
140 | static ev_idle repeat_watcher; |
|
|
141 | static ev_async ready_watcher; |
|
|
142 | |
|
|
143 | /* idle watcher callback, only used when eio_poll */ |
|
|
144 | /* didn't handle all results in one call */ |
|
|
145 | static void |
|
|
146 | repeat (EV_P_ ev_idle *w, int revents) |
|
|
147 | { |
|
|
148 | if (eio_poll () != -1) |
|
|
149 | ev_idle_stop (EV_A_ w); |
|
|
150 | } |
|
|
151 | |
|
|
152 | /* eio has some results, process them */ |
|
|
153 | static void |
|
|
154 | ready (EV_P_ ev_async *w, int revents) |
|
|
155 | { |
|
|
156 | if (eio_poll () == -1) |
|
|
157 | ev_idle_start (EV_A_ &repeat_watcher); |
|
|
158 | } |
|
|
159 | |
|
|
160 | /* wake up the event loop */ |
|
|
161 | static void |
|
|
162 | want_poll (void) |
|
|
163 | { |
|
|
164 | ev_async_send (loop, &ready_watcher) |
|
|
165 | } |
|
|
166 | |
|
|
167 | void |
|
|
168 | my_init_eio () |
|
|
169 | { |
|
|
170 | loop = EV_DEFAULT; |
|
|
171 | |
|
|
172 | ev_idle_init (&repeat_watcher, repeat); |
|
|
173 | ev_async_init (&ready_watcher, ready); |
|
|
174 | ev_async_start (loop &watcher); |
|
|
175 | |
|
|
176 | eio_init (want_poll, 0); |
|
|
177 | } |
134 | |
178 | |
135 | For most other event loops, you would typically use a pipe - the event |
179 | For most other event loops, you would typically use a pipe - the event |
136 | loop should be told to wait for read readiness on the read end. In |
180 | loop should be told to wait for read readiness on the read end. In |
137 | C<want_poll> you would write a single byte, in C<done_poll> you would try |
181 | C<want_poll> you would write a single byte, in C<done_poll> you would try |
138 | to read that byte, and in the callback for the read end, you would call |
182 | to read that byte, and in the callback for the read end, you would call |
139 | C<eio_poll>. The race is avoided here because the event loop should invoke |
|
|
140 | your callback again and again until the byte has been read (as the pipe |
|
|
141 | read callback does not read it, only C<done_poll>). |
|
|
142 | |
|
|
143 | =head2 CONFIGURATION |
|
|
144 | |
|
|
145 | The functions in this section can sometimes be useful, but the default |
|
|
146 | configuration will do in most case, so you should skip this section on |
|
|
147 | first reading. |
|
|
148 | |
|
|
149 | =over 4 |
|
|
150 | |
|
|
151 | =item eio_set_max_poll_time (eio_tstamp nseconds) |
|
|
152 | |
|
|
153 | This causes C<eio_poll ()> to return after it has detected that it was |
|
|
154 | running for C<nsecond> seconds or longer (this number can be fractional). |
|
|
155 | |
|
|
156 | This can be used to limit the amount of time spent handling eio requests, |
|
|
157 | for example, in interactive programs, you might want to limit this time to |
|
|
158 | C<0.01> seconds or so. |
|
|
159 | |
|
|
160 | Note that: |
|
|
161 | |
|
|
162 | a) libeio doesn't know how long your request callbacks take, so the time |
|
|
163 | spent in C<eio_poll> is up to one callback invocation longer then this |
|
|
164 | interval. |
|
|
165 | |
|
|
166 | b) this is implemented by calling C<gettimeofday> after each request, |
|
|
167 | which can be costly. |
|
|
168 | |
|
|
169 | c) at least one request will be handled. |
|
|
170 | |
|
|
171 | =item eio_set_max_poll_reqs (unsigned int nreqs) |
|
|
172 | |
|
|
173 | When C<nreqs> is non-zero, then C<eio_poll> will not handle more than |
|
|
174 | C<nreqs> requests per invocation. This is a less costly way to limit the |
|
|
175 | amount of work done by C<eio_poll> then setting a time limit. |
|
|
176 | |
|
|
177 | If you know your callbacks are generally fast, you could use this to |
|
|
178 | encourage interactiveness in your programs by setting it to C<10>, C<100> |
|
|
179 | or even C<1000>. |
|
|
180 | |
|
|
181 | =item eio_set_min_parallel (unsigned int nthreads) |
|
|
182 | |
|
|
183 | Make sure libeio can handle at least this many requests in parallel. It |
|
|
184 | might be able handle more. |
|
|
185 | |
|
|
186 | =item eio_set_max_parallel (unsigned int nthreads) |
|
|
187 | |
|
|
188 | Set the maximum number of threads that libeio will spawn. |
|
|
189 | |
|
|
190 | =item eio_set_max_idle (unsigned int nthreads) |
|
|
191 | |
|
|
192 | Libeio uses threads internally to handle most requests, and will start and stop threads on demand. |
|
|
193 | |
|
|
194 | This call can be used to limit the number of idle threads (threads without |
|
|
195 | work to do): libeio will keep some threads idle in preparation for more |
|
|
196 | requests, but never longer than C<nthreads> threads. |
|
|
197 | |
|
|
198 | In addition to this, libeio will also stop threads when they are idle for |
|
|
199 | a few seconds, regardless of this setting. |
|
|
200 | |
|
|
201 | =item unsigned int eio_nthreads () |
|
|
202 | |
|
|
203 | Return the number of worker threads currently running. |
|
|
204 | |
|
|
205 | =item unsigned int eio_nreqs () |
|
|
206 | |
|
|
207 | Return the number of requests currently handled by libeio. This is the |
|
|
208 | total number of requests that have been submitted to libeio, but not yet |
|
|
209 | destroyed. |
|
|
210 | |
|
|
211 | =item unsigned int eio_nready () |
|
|
212 | |
|
|
213 | Returns the number of ready requests, i.e. requests that have been |
|
|
214 | submitted but have not yet entered the execution phase. |
|
|
215 | |
|
|
216 | =item unsigned int eio_npending () |
|
|
217 | |
|
|
218 | Returns the number of pending requests, i.e. requests that have been |
|
|
219 | executed and have results, but have not been finished yet by a call to |
|
|
220 | C<eio_poll>). |
183 | C<eio_poll>. |
221 | |
184 | |
222 | =back |
185 | You don't have to take special care in the case C<eio_poll> doesn't handle |
|
|
186 | all requests, as the done callback will not be invoked, so the event loop |
|
|
187 | will still signal readyness for the pipe until I<all> results have been |
|
|
188 | processed. |
223 | |
189 | |
224 | |
190 | |
225 | =head1 HIGH LEVEL REQUEST API |
191 | =head1 HIGH LEVEL REQUEST API |
226 | |
192 | |
227 | Libeio has both a high-level API, which consists of calling a request |
193 | Libeio has both a high-level API, which consists of calling a request |
… | |
… | |
234 | |
200 | |
235 | You submit a request by calling the relevant C<eio_TYPE> function with the |
201 | You submit a request by calling the relevant C<eio_TYPE> function with the |
236 | required parameters, a callback of type C<int (*eio_cb)(eio_req *req)> |
202 | required parameters, a callback of type C<int (*eio_cb)(eio_req *req)> |
237 | (called C<eio_cb> below) and a freely usable C<void *data> argument. |
203 | (called C<eio_cb> below) and a freely usable C<void *data> argument. |
238 | |
204 | |
239 | The return value will either be 0 |
205 | The return value will either be 0, in case something went really wrong |
|
|
206 | (which can basically only happen on very fatal errors, such as C<malloc> |
|
|
207 | returning 0, which is rather unlikely), or a pointer to the newly-created |
|
|
208 | and submitted C<eio_req *>. |
240 | |
209 | |
241 | The callback will be called with an C<eio_req *> which contains the |
210 | The callback will be called with an C<eio_req *> which contains the |
242 | results of the request. The members you can access inside that structure |
211 | results of the request. The members you can access inside that structure |
243 | vary from request to request, except for: |
212 | vary from request to request, except for: |
244 | |
213 | |
… | |
… | |
307 | custom data value as C<data>. |
276 | custom data value as C<data>. |
308 | |
277 | |
309 | =head3 POSIX API WRAPPERS |
278 | =head3 POSIX API WRAPPERS |
310 | |
279 | |
311 | These requests simply wrap the POSIX call of the same name, with the same |
280 | These requests simply wrap the POSIX call of the same name, with the same |
312 | arguments: |
281 | arguments. If a function is not implemented by the OS and cannot be emulated |
|
|
282 | in some way, then all of these return C<-1> and set C<errorno> to C<ENOSYS>. |
313 | |
283 | |
314 | =over 4 |
284 | =over 4 |
315 | |
285 | |
316 | =item eio_open (const char *path, int flags, mode_t mode, int pri, eio_cb cb, void *data) |
286 | =item eio_open (const char *path, int flags, mode_t mode, int pri, eio_cb cb, void *data) |
317 | |
287 | |
|
|
288 | =item eio_truncate (const char *path, off_t offset, int pri, eio_cb cb, void *data) |
|
|
289 | |
|
|
290 | =item eio_chown (const char *path, uid_t uid, gid_t gid, int pri, eio_cb cb, void *data) |
|
|
291 | |
|
|
292 | =item eio_chmod (const char *path, mode_t mode, int pri, eio_cb cb, void *data) |
|
|
293 | |
|
|
294 | =item eio_mkdir (const char *path, mode_t mode, int pri, eio_cb cb, void *data) |
|
|
295 | |
|
|
296 | =item eio_rmdir (const char *path, int pri, eio_cb cb, void *data) |
|
|
297 | |
|
|
298 | =item eio_unlink (const char *path, int pri, eio_cb cb, void *data) |
|
|
299 | |
318 | =item eio_utime (const char *path, eio_tstamp atime, eio_tstamp mtime, int pri, eio_cb cb, void *data) |
300 | =item eio_utime (const char *path, eio_tstamp atime, eio_tstamp mtime, int pri, eio_cb cb, void *data) |
319 | |
301 | |
320 | =item eio_truncate (const char *path, off_t offset, int pri, eio_cb cb, void *data) |
|
|
321 | |
|
|
322 | =item eio_chown (const char *path, uid_t uid, gid_t gid, int pri, eio_cb cb, void *data) |
|
|
323 | |
|
|
324 | =item eio_chmod (const char *path, mode_t mode, int pri, eio_cb cb, void *data) |
|
|
325 | |
|
|
326 | =item eio_mkdir (const char *path, mode_t mode, int pri, eio_cb cb, void *data) |
|
|
327 | |
|
|
328 | =item eio_rmdir (const char *path, int pri, eio_cb cb, void *data) |
|
|
329 | |
|
|
330 | =item eio_unlink (const char *path, int pri, eio_cb cb, void *data) |
|
|
331 | |
|
|
332 | =item eio_readlink (const char *path, int pri, eio_cb cb, void *data) /* result=ptr2 allocated dynamically */ |
|
|
333 | |
|
|
334 | =item eio_stat (const char *path, int pri, eio_cb cb, void *data) /* stat buffer=ptr2 allocated dynamically */ |
|
|
335 | |
|
|
336 | =item eio_lstat (const char *path, int pri, eio_cb cb, void *data) /* stat buffer=ptr2 allocated dynamically */ |
|
|
337 | |
|
|
338 | =item eio_statvfs (const char *path, int pri, eio_cb cb, void *data) /* stat buffer=ptr2 allocated dynamically */ |
|
|
339 | |
|
|
340 | =item eio_mknod (const char *path, mode_t mode, dev_t dev, int pri, eio_cb cb, void *data) |
302 | =item eio_mknod (const char *path, mode_t mode, dev_t dev, int pri, eio_cb cb, void *data) |
341 | |
303 | |
342 | =item eio_link (const char *path, const char *new_path, int pri, eio_cb cb, void *data) |
304 | =item eio_link (const char *path, const char *new_path, int pri, eio_cb cb, void *data) |
343 | |
305 | |
344 | =item eio_symlink (const char *path, const char *new_path, int pri, eio_cb cb, void *data) |
306 | =item eio_symlink (const char *path, const char *new_path, int pri, eio_cb cb, void *data) |
345 | |
307 | |
346 | =item eio_rename (const char *path, const char *new_path, int pri, eio_cb cb, void *data) |
308 | =item eio_rename (const char *path, const char *new_path, int pri, eio_cb cb, void *data) |
347 | |
309 | |
348 | =item eio_msync (void *addr, size_t length, int flags, int pri, eio_cb cb, void *data) |
|
|
349 | |
|
|
350 | =item eio_mlock (void *addr, size_t length, int pri, eio_cb cb, void *data) |
310 | =item eio_mlock (void *addr, size_t length, int pri, eio_cb cb, void *data) |
351 | |
|
|
352 | =item eio_mlockall (int flags, int pri, eio_cb cb, void *data) |
|
|
353 | |
311 | |
354 | =item eio_close (int fd, int pri, eio_cb cb, void *data) |
312 | =item eio_close (int fd, int pri, eio_cb cb, void *data) |
355 | |
313 | |
356 | =item eio_sync (int pri, eio_cb cb, void *data) |
314 | =item eio_sync (int pri, eio_cb cb, void *data) |
357 | |
315 | |
… | |
… | |
386 | |
344 | |
387 | Not surprisingly, pread and pwrite are not thread-safe on Darwin (OS/X), |
345 | Not surprisingly, pread and pwrite are not thread-safe on Darwin (OS/X), |
388 | so it is advised not to submit multiple requests on the same fd on this |
346 | so it is advised not to submit multiple requests on the same fd on this |
389 | horrible pile of garbage. |
347 | horrible pile of garbage. |
390 | |
348 | |
|
|
349 | =item eio_mlockall (int flags, int pri, eio_cb cb, void *data) |
|
|
350 | |
|
|
351 | Like C<mlockall>, but the flag value constants are called |
|
|
352 | C<EIO_MCL_CURRENT> and C<EIO_MCL_FUTURE>. |
|
|
353 | |
|
|
354 | =item eio_msync (void *addr, size_t length, int flags, int pri, eio_cb cb, void *data) |
|
|
355 | |
|
|
356 | Just like msync, except that the flag values are called C<EIO_MS_ASYNC>, |
|
|
357 | C<EIO_MS_INVALIDATE> and C<EIO_MS_SYNC>. |
|
|
358 | |
|
|
359 | =item eio_readlink (const char *path, int pri, eio_cb cb, void *data) |
|
|
360 | |
|
|
361 | If successful, the path read by C<readlink(2)> can be accessed via C<< |
|
|
362 | req->ptr2 >> and is I<NOT> null-terminated, with the length specified as |
|
|
363 | C<< req->result >>. |
|
|
364 | |
|
|
365 | if (req->result >= 0) |
|
|
366 | { |
|
|
367 | char *target = strndup ((char *)req->ptr2, req->result); |
|
|
368 | |
|
|
369 | free (target); |
|
|
370 | } |
|
|
371 | |
|
|
372 | =item eio_realpath (const char *path, int pri, eio_cb cb, void *data) |
|
|
373 | |
|
|
374 | Similar to the realpath libc function, but unlike that one, result is |
|
|
375 | C<-1> on failure and the length of the returned path in C<ptr2> (which is |
|
|
376 | not 0-terminated) - this is similar to readlink. |
|
|
377 | |
|
|
378 | =item eio_stat (const char *path, int pri, eio_cb cb, void *data) |
|
|
379 | |
|
|
380 | =item eio_lstat (const char *path, int pri, eio_cb cb, void *data) |
|
|
381 | |
391 | =item eio_fstat (int fd, int pri, eio_cb cb, void *data) |
382 | =item eio_fstat (int fd, int pri, eio_cb cb, void *data) |
392 | |
383 | |
393 | Stats a file - if C<< req->result >> indicates success, then you can |
384 | Stats a file - if C<< req->result >> indicates success, then you can |
394 | access the C<struct stat>-like structure via C<< req->ptr2 >>: |
385 | access the C<struct stat>-like structure via C<< req->ptr2 >>: |
395 | |
386 | |
396 | EIO_STRUCT_STAT *statdata = (EIO_STRUCT_STAT *)req->ptr2; |
387 | EIO_STRUCT_STAT *statdata = (EIO_STRUCT_STAT *)req->ptr2; |
397 | |
388 | |
398 | =item eio_fstatvfs (int fd, int pri, eio_cb cb, void *data) /* stat buffer=ptr2 allocated dynamically */ |
389 | =item eio_statvfs (const char *path, int pri, eio_cb cb, void *data) |
|
|
390 | |
|
|
391 | =item eio_fstatvfs (int fd, int pri, eio_cb cb, void *data) |
399 | |
392 | |
400 | Stats a filesystem - if C<< req->result >> indicates success, then you can |
393 | Stats a filesystem - if C<< req->result >> indicates success, then you can |
401 | access the C<struct statvfs>-like structure via C<< req->ptr2 >>: |
394 | access the C<struct statvfs>-like structure via C<< req->ptr2 >>: |
402 | |
395 | |
403 | EIO_STRUCT_STATVFS *statdata = (EIO_STRUCT_STATVFS *)req->ptr2; |
396 | EIO_STRUCT_STATVFS *statdata = (EIO_STRUCT_STATVFS *)req->ptr2; |
… | |
… | |
419 | (via the C<opendir>, C<readdir> and C<closedir> calls) and returns either |
412 | (via the C<opendir>, C<readdir> and C<closedir> calls) and returns either |
420 | the names or an array of C<struct eio_dirent>, depending on the C<flags> |
413 | the names or an array of C<struct eio_dirent>, depending on the C<flags> |
421 | argument. |
414 | argument. |
422 | |
415 | |
423 | The C<< req->result >> indicates either the number of files found, or |
416 | The C<< req->result >> indicates either the number of files found, or |
424 | C<-1> on error. On success, zero-terminated names can be found as C<< req->ptr2 >>, |
417 | C<-1> on error. On success, null-terminated names can be found as C<< req->ptr2 >>, |
425 | and C<struct eio_dirents>, if requested by C<flags>, can be found via C<< |
418 | and C<struct eio_dirents>, if requested by C<flags>, can be found via C<< |
426 | req->ptr1 >>. |
419 | req->ptr1 >>. |
427 | |
420 | |
428 | Here is an example that prints all the names: |
421 | Here is an example that prints all the names: |
429 | |
422 | |
… | |
… | |
561 | =item eio_sync_file_range (int fd, off_t offset, size_t nbytes, unsigned int flags, int pri, eio_cb cb, void *data) |
554 | =item eio_sync_file_range (int fd, off_t offset, size_t nbytes, unsigned int flags, int pri, eio_cb cb, void *data) |
562 | |
555 | |
563 | Calls C<sync_file_range>. If the syscall is missing, then this is the same |
556 | Calls C<sync_file_range>. If the syscall is missing, then this is the same |
564 | as calling C<fdatasync>. |
557 | as calling C<fdatasync>. |
565 | |
558 | |
|
|
559 | Flags can be any combination of C<EIO_SYNC_FILE_RANGE_WAIT_BEFORE>, |
|
|
560 | C<EIO_SYNC_FILE_RANGE_WRITE> and C<EIO_SYNC_FILE_RANGE_WAIT_AFTER>. |
|
|
561 | |
566 | =back |
562 | =back |
567 | |
563 | |
568 | =head3 LIBEIO-SPECIFIC REQUESTS |
564 | =head3 LIBEIO-SPECIFIC REQUESTS |
569 | |
565 | |
570 | These requests are specific to libeio and do not correspond to any OS call. |
566 | These requests are specific to libeio and do not correspond to any OS call. |
571 | |
567 | |
572 | =over 4 |
568 | =over 4 |
573 | |
569 | |
574 | =item eio_mtouch (void *addr, size_t length, int flags, int pri, eio_cb cb, void *data) |
570 | =item eio_mtouch (void *addr, size_t length, int flags, int pri, eio_cb cb, void *data) |
575 | |
571 | |
|
|
572 | Reads (C<flags == 0>) or modifies (C<flags == EIO_MT_MODIFY) the given |
|
|
573 | memory area, page-wise, that is, it reads (or reads and writes back) the |
|
|
574 | first octet of every page that spans the memory area. |
|
|
575 | |
|
|
576 | This can be used to page in some mmapped file, or dirty some pages. Note |
|
|
577 | that dirtying is an unlocked read-write access, so races can ensue when |
|
|
578 | the some other thread modifies the data stored in that memory area. |
|
|
579 | |
576 | =item eio_custom (void (*)(eio_req *) execute, int pri, eio_cb cb, void *data) |
580 | =item eio_custom (void (*)(eio_req *) execute, int pri, eio_cb cb, void *data) |
577 | |
581 | |
578 | Executes a custom request, i.e., a user-specified callback. |
582 | Executes a custom request, i.e., a user-specified callback. |
579 | |
583 | |
580 | The callback gets the C<eio_req *> as parameter and is expected to read |
584 | The callback gets the C<eio_req *> as parameter and is expected to read |
581 | and modify any request-specific members. Specifically, it should set C<< |
585 | and modify any request-specific members. Specifically, it should set C<< |
… | |
… | |
601 | req->result = open (req->data, O_RDONLY); |
605 | req->result = open (req->data, O_RDONLY); |
602 | } |
606 | } |
603 | |
607 | |
604 | eio_custom (my_open, 0, my_open_done, "/etc/passwd"); |
608 | eio_custom (my_open, 0, my_open_done, "/etc/passwd"); |
605 | |
609 | |
606 | =item eio_busy (eio_tstamp delay, int pri, eio_cb cb, void *data) |
610 | =item eio_busy (eio_tstamp delay, int pri, eio_cb cb, void *data) |
607 | |
611 | |
608 | This is a a request that takes C<delay> seconds to execute, but otherwise |
612 | This is a a request that takes C<delay> seconds to execute, but otherwise |
609 | does nothing - it simply puts one of the worker threads to sleep for this |
613 | does nothing - it simply puts one of the worker threads to sleep for this |
610 | long. |
614 | long. |
611 | |
615 | |
612 | This request can be used to artificially increase load, e.g. for debugging |
616 | This request can be used to artificially increase load, e.g. for debugging |
613 | or benchmarking reasons. |
617 | or benchmarking reasons. |
614 | |
618 | |
615 | =item eio_nop (int pri, eio_cb cb, void *data) |
619 | =item eio_nop (int pri, eio_cb cb, void *data) |
616 | |
620 | |
617 | This request does nothing, except go through the whole request cycle. This |
621 | This request does nothing, except go through the whole request cycle. This |
618 | can be used to measure latency or in some cases to simplify code, but is |
622 | can be used to measure latency or in some cases to simplify code, but is |
619 | not really of much use. |
623 | not really of much use. |
620 | |
624 | |
621 | =back |
625 | =back |
622 | |
626 | |
623 | =head3 GROUPING AND LIMITING REQUESTS |
627 | =head3 GROUPING AND LIMITING REQUESTS |
|
|
628 | |
|
|
629 | There is one more rather special request, C<eio_grp>. It is a very special |
|
|
630 | aio request: Instead of doing something, it is a container for other eio |
|
|
631 | requests. |
|
|
632 | |
|
|
633 | There are two primary use cases for this: a) bundle many requests into a |
|
|
634 | single, composite, request with a definite callback and the ability to |
|
|
635 | cancel the whole request with its subrequests and b) limiting the number |
|
|
636 | of "active" requests. |
|
|
637 | |
|
|
638 | Further below you will find more dicussion of these topics - first follows |
|
|
639 | the reference section detailing the request generator and other methods. |
|
|
640 | |
|
|
641 | =over 4 |
|
|
642 | |
|
|
643 | =item eio_grp (eio_cb cb, void *data) |
|
|
644 | |
|
|
645 | Creates and submits a group request. |
|
|
646 | |
|
|
647 | =back |
|
|
648 | |
|
|
649 | |
624 | |
650 | |
625 | #TODO |
651 | #TODO |
626 | |
652 | |
627 | /*****************************************************************************/ |
653 | /*****************************************************************************/ |
628 | /* groups */ |
654 | /* groups */ |
… | |
… | |
660 | |
686 | |
661 | zero |
687 | zero |
662 | |
688 | |
663 | #TODO |
689 | #TODO |
664 | |
690 | |
|
|
691 | =head2 CONFIGURATION |
|
|
692 | |
|
|
693 | The functions in this section can sometimes be useful, but the default |
|
|
694 | configuration will do in most case, so you should skip this section on |
|
|
695 | first reading. |
|
|
696 | |
|
|
697 | =over 4 |
|
|
698 | |
|
|
699 | =item eio_set_max_poll_time (eio_tstamp nseconds) |
|
|
700 | |
|
|
701 | This causes C<eio_poll ()> to return after it has detected that it was |
|
|
702 | running for C<nsecond> seconds or longer (this number can be fractional). |
|
|
703 | |
|
|
704 | This can be used to limit the amount of time spent handling eio requests, |
|
|
705 | for example, in interactive programs, you might want to limit this time to |
|
|
706 | C<0.01> seconds or so. |
|
|
707 | |
|
|
708 | Note that: |
|
|
709 | |
|
|
710 | a) libeio doesn't know how long your request callbacks take, so the time |
|
|
711 | spent in C<eio_poll> is up to one callback invocation longer then this |
|
|
712 | interval. |
|
|
713 | |
|
|
714 | b) this is implemented by calling C<gettimeofday> after each request, |
|
|
715 | which can be costly. |
|
|
716 | |
|
|
717 | c) at least one request will be handled. |
|
|
718 | |
|
|
719 | =item eio_set_max_poll_reqs (unsigned int nreqs) |
|
|
720 | |
|
|
721 | When C<nreqs> is non-zero, then C<eio_poll> will not handle more than |
|
|
722 | C<nreqs> requests per invocation. This is a less costly way to limit the |
|
|
723 | amount of work done by C<eio_poll> then setting a time limit. |
|
|
724 | |
|
|
725 | If you know your callbacks are generally fast, you could use this to |
|
|
726 | encourage interactiveness in your programs by setting it to C<10>, C<100> |
|
|
727 | or even C<1000>. |
|
|
728 | |
|
|
729 | =item eio_set_min_parallel (unsigned int nthreads) |
|
|
730 | |
|
|
731 | Make sure libeio can handle at least this many requests in parallel. It |
|
|
732 | might be able handle more. |
|
|
733 | |
|
|
734 | =item eio_set_max_parallel (unsigned int nthreads) |
|
|
735 | |
|
|
736 | Set the maximum number of threads that libeio will spawn. |
|
|
737 | |
|
|
738 | =item eio_set_max_idle (unsigned int nthreads) |
|
|
739 | |
|
|
740 | Libeio uses threads internally to handle most requests, and will start and stop threads on demand. |
|
|
741 | |
|
|
742 | This call can be used to limit the number of idle threads (threads without |
|
|
743 | work to do): libeio will keep some threads idle in preparation for more |
|
|
744 | requests, but never longer than C<nthreads> threads. |
|
|
745 | |
|
|
746 | In addition to this, libeio will also stop threads when they are idle for |
|
|
747 | a few seconds, regardless of this setting. |
|
|
748 | |
|
|
749 | =item unsigned int eio_nthreads () |
|
|
750 | |
|
|
751 | Return the number of worker threads currently running. |
|
|
752 | |
|
|
753 | =item unsigned int eio_nreqs () |
|
|
754 | |
|
|
755 | Return the number of requests currently handled by libeio. This is the |
|
|
756 | total number of requests that have been submitted to libeio, but not yet |
|
|
757 | destroyed. |
|
|
758 | |
|
|
759 | =item unsigned int eio_nready () |
|
|
760 | |
|
|
761 | Returns the number of ready requests, i.e. requests that have been |
|
|
762 | submitted but have not yet entered the execution phase. |
|
|
763 | |
|
|
764 | =item unsigned int eio_npending () |
|
|
765 | |
|
|
766 | Returns the number of pending requests, i.e. requests that have been |
|
|
767 | executed and have results, but have not been finished yet by a call to |
|
|
768 | C<eio_poll>). |
|
|
769 | |
|
|
770 | =back |
|
|
771 | |
665 | =head1 EMBEDDING |
772 | =head1 EMBEDDING |
666 | |
773 | |
667 | Libeio can be embedded directly into programs. This functionality is not |
774 | Libeio can be embedded directly into programs. This functionality is not |
668 | documented and not (yet) officially supported. |
775 | documented and not (yet) officially supported. |
669 | |
776 | |