--- IO-AIO/AIO.pm 2007/06/03 09:44:17 1.109 +++ IO-AIO/AIO.pm 2007/10/06 14:05:19 1.117 @@ -185,19 +185,21 @@ package IO::AIO; +use Carp (); + no warnings; use strict 'vars'; use base 'Exporter'; BEGIN { - our $VERSION = '2.4'; + our $VERSION = '2.51'; our @AIO_REQ = qw(aio_sendfile aio_read aio_write aio_open aio_close aio_stat aio_lstat aio_unlink aio_rmdir aio_readdir aio_scandir aio_symlink aio_readlink aio_fsync aio_fdatasync aio_readahead aio_rename aio_link aio_move aio_copy aio_group aio_nop aio_mknod aio_load aio_rmtree aio_mkdir - aio_chown aio_chmod aio_utime); + aio_chown aio_chmod aio_utime aio_truncate); our @EXPORT = (@AIO_REQ, qw(aioreq_pri aioreq_nice aio_block)); our @EXPORT_OK = qw(poll_fileno poll_cb poll_wait flush min_parallel max_parallel max_idle @@ -313,13 +315,67 @@ =item aio_close $fh, $callback->($status) Asynchronously close a file and call the callback with the result -code. I although accepted, you should not pass in a perl -filehandle here, as perl will likely close the file descriptor another -time when the filehandle is destroyed. Normally, you can safely call perls -C or just let filehandles go out of scope. +code. + +Unfortunately, you can't do this to perl. Perl I very strongly on +closing the file descriptor associated with the filehandle itself. Here is +what aio_close will try: + + 1. dup()licate the fd + 2. asynchronously close() the duplicated fd + 3. dup()licate the fd once more + 4. let perl close() the filehandle + 5. asynchronously close the duplicated fd + +The idea is that the first close() flushes stuff to disk that closing an +fd will flush, so when perl closes the fd, nothing much will need to be +flushed. The second async. close() will then flush stuff to disk that +closing the last fd to the file will flush. + +Just FYI, SuSv3 has this to say on close: + + All outstanding record locks owned by the process on the file + associated with the file descriptor shall be removed. + + If fildes refers to a socket, close() shall cause the socket to be + destroyed. ... close() shall block for up to the current linger + interval until all data is transmitted. + [this actually sounds like a specification bug, but who knows] + +And at least Linux additionally actually flushes stuff on every close, +even when the file itself is still open. -This is supposed to be a bug in the API, so that might change. It's -therefore best to avoid this function. +Sounds enourmously inefficient and complicated? Yes... please show me how +to nuke perl's fd out of existence... + +=cut + +sub aio_close($;$) { + aio_block { + my ($fh, $cb) = @_; + + my $pri = aioreq_pri; + my $grp = aio_group $cb; + + my $fd = fileno $fh; + + defined $fd or Carp::croak "aio_close called with fd-less filehandle"; + + # if the dups fail we will simply get EBADF + my $fd2 = _dup $fd; + aioreq_pri $pri; + add $grp _aio_close $fd2, sub { + my $fd2 = _dup $fd; + close $fh; + aioreq_pri $pri; + add $grp _aio_close $fd2, sub { + $grp->result ($_[0]); + }; + }; + + $grp + } +} =item aio_read $fh,$offset,$length, $data,$dataoffset, $callback->($retval) @@ -331,10 +387,11 @@ callback without the actual number of bytes read (or -1 on error, just like the syscall). -If C<$offset> is undefined, then the current file offset will be used (and -updated), otherwise the file offset will not be changed by these calls. +If C<$offset> is undefined, then the current file descriptor offset will +be used (and updated), otherwise the file descriptor offset will not be +changed by these calls. -If C<$length> is undefined in C, use the remaining length of C<$data>. +If C<$length> is undefined in C, use the remaining length of C<$data>. If C<$dataoffset> is less than zero, it will be counted from the end of C<$data>. @@ -445,6 +502,11 @@ aio_chown "path", 0, undef; +=item aio_truncate $fh_or_path, $offset, $callback->($status) + +Works like truncate(2) or ftruncate(2). + + =item aio_chmod $fh_or_path, $mode, $callback->($status) Works like perl's C function. @@ -1203,7 +1265,7 @@ use an C together with a feed callback. Sets the maximum number of outstanding requests to C<$nreqs>. If you -to queue up more than this number of requests, the next call to the +do queue up more than this number of requests, the next call to the C (and C and other functions calling C) function will block until the limit is no longer exceeded. @@ -1244,22 +1306,6 @@ =cut -# support function to convert a fd into a perl filehandle -sub _fd2fh { - return undef if $_[0] < 0; - - # try to generate nice filehandles - my $sym = "IO::AIO::fd#$_[0]"; - local *$sym; - - open *$sym, "+<&=$_[0]" # usually works under any unix - or open *$sym, "<&=$_[0]" # cygwin needs this - or open *$sym, ">&=$_[0]" # or this - or return undef; - - *$sym -} - min_parallel 8; END { flush } @@ -1292,7 +1338,7 @@ scalars and other data passed into aio requests will also be locked and will consume memory till the request has entered the done state. -This is now awfully much, so queuing lots of requests is not usually a +This is not awfully much, so queuing lots of requests is not usually a problem. Per-thread usage: