… | |
… | |
604 | |
604 | |
605 | static void grp_try_feed (eio_req *grp) |
605 | static void grp_try_feed (eio_req *grp) |
606 | { |
606 | { |
607 | while (grp->size < grp->int2 && !EIO_CANCELLED (grp)) |
607 | while (grp->size < grp->int2 && !EIO_CANCELLED (grp)) |
608 | { |
608 | { |
609 | int old_len = grp->size; |
609 | grp->flags &= ~EIO_FLAG_GROUPADD; |
610 | |
610 | |
611 | EIO_FEED (grp); |
611 | EIO_FEED (grp); |
612 | |
612 | |
613 | /* stop if no progress has been made */ |
613 | /* stop if no progress has been made */ |
614 | if (old_len == grp->size) |
614 | if (!(grp->flags & EIO_FLAG_GROUPADD)) |
615 | { |
615 | { |
616 | grp->feed = 0; |
616 | grp->feed = 0; |
617 | break; |
617 | break; |
618 | } |
618 | } |
619 | } |
619 | } |
… | |
… | |
736 | |
736 | |
737 | /*****************************************************************************/ |
737 | /*****************************************************************************/ |
738 | /* work around various missing functions */ |
738 | /* work around various missing functions */ |
739 | |
739 | |
740 | #if !HAVE_PREADWRITE |
740 | #if !HAVE_PREADWRITE |
|
|
741 | # undef pread |
|
|
742 | # undef pwrite |
741 | # define pread eio__pread |
743 | # define pread eio__pread |
742 | # define pwrite eio__pwrite |
744 | # define pwrite eio__pwrite |
743 | |
745 | |
744 | static ssize_t |
746 | static ssize_t |
745 | eio__pread (int fd, void *buf, size_t count, off_t offset) |
747 | eio__pread (int fd, void *buf, size_t count, off_t offset) |
… | |
… | |
774 | } |
776 | } |
775 | #endif |
777 | #endif |
776 | |
778 | |
777 | #ifndef HAVE_FUTIMES |
779 | #ifndef HAVE_FUTIMES |
778 | |
780 | |
|
|
781 | # undef utimes |
|
|
782 | # undef futimes |
779 | # define utimes(path,times) eio__utimes (path, times) |
783 | # define utimes(path,times) eio__utimes (path, times) |
780 | # define futimes(fd,times) eio__futimes (fd, times) |
784 | # define futimes(fd,times) eio__futimes (fd, times) |
781 | |
785 | |
782 | static int |
786 | static int |
783 | eio__utimes (const char *filename, const struct timeval times[2]) |
787 | eio__utimes (const char *filename, const struct timeval times[2]) |
… | |
… | |
802 | } |
806 | } |
803 | |
807 | |
804 | #endif |
808 | #endif |
805 | |
809 | |
806 | #if !HAVE_FDATASYNC |
810 | #if !HAVE_FDATASYNC |
|
|
811 | # undef fdatasync |
807 | # define fdatasync fsync |
812 | # define fdatasync(fd) fsync (fd) |
808 | #endif |
813 | #endif |
|
|
814 | |
|
|
815 | /* sync_file_range always needs emulation */ |
|
|
816 | int |
|
|
817 | eio__sync_file_range (int fd, off_t offset, size_t nbytes, unsigned int flags) |
|
|
818 | { |
|
|
819 | #if HAVE_SYNC_FILE_RANGE |
|
|
820 | int res; |
|
|
821 | |
|
|
822 | if (EIO_SYNC_FILE_RANGE_WAIT_BEFORE != SYNC_FILE_RANGE_WAIT_BEFORE |
|
|
823 | || EIO_SYNC_FILE_RANGE_WRITE != SYNC_FILE_RANGE_WRITE |
|
|
824 | || EIO_SYNC_FILE_RANGE_WAIT_AFTER != SYNC_FILE_RANGE_WAIT_AFTER) |
|
|
825 | { |
|
|
826 | flags = 0 |
|
|
827 | | (flags & EIO_SYNC_FILE_RANGE_WAIT_BEFORE ? SYNC_FILE_RANGE_WAIT_BEFORE : 0) |
|
|
828 | | (flags & EIO_SYNC_FILE_RANGE_WRITE ? SYNC_FILE_RANGE_WRITE : 0) |
|
|
829 | | (flags & EIO_SYNC_FILE_RANGE_WAIT_AFTER ? SYNC_FILE_RANGE_WAIT_AFTER : 0); |
|
|
830 | } |
|
|
831 | |
|
|
832 | res = sync_file_range (fd, offset, nbytes, flags); |
|
|
833 | |
|
|
834 | if (res != ENOSYS) |
|
|
835 | return res; |
|
|
836 | #endif |
|
|
837 | |
|
|
838 | /* even though we could play tricks with the flags, it's better to always |
|
|
839 | * call fdatasync, as thta matches the expectation of it's users best */ |
|
|
840 | return fdatasync (fd); |
|
|
841 | } |
809 | |
842 | |
810 | #if !HAVE_READAHEAD |
843 | #if !HAVE_READAHEAD |
|
|
844 | # undef readahead |
811 | # define readahead(fd,offset,count) eio__readahead (fd, offset, count, self) |
845 | # define readahead(fd,offset,count) eio__readahead (fd, offset, count, self) |
812 | |
846 | |
813 | static ssize_t |
847 | static ssize_t |
814 | eio__readahead (int fd, off_t offset, size_t count, etp_worker *self) |
848 | eio__readahead (int fd, off_t offset, size_t count, etp_worker *self) |
815 | { |
849 | { |
… | |
… | |
981 | |
1015 | |
982 | req->result = res; |
1016 | req->result = res; |
983 | } |
1017 | } |
984 | |
1018 | |
985 | #if !(_POSIX_MAPPED_FILES && _POSIX_SYNCHRONIZED_IO) |
1019 | #if !(_POSIX_MAPPED_FILES && _POSIX_SYNCHRONIZED_IO) |
|
|
1020 | # undef msync |
986 | # define msync(a,b,c) ENOSYS |
1021 | # define msync(a,b,c) ENOSYS |
987 | #endif |
1022 | #endif |
988 | |
1023 | |
989 | int |
1024 | int |
990 | eio__mtouch (void *mem, size_t len, int flags) |
1025 | eio__mtouch (void *mem, size_t len, int flags) |
… | |
… | |
1185 | case EIO_SYNC: req->result = 0; sync (); break; |
1220 | case EIO_SYNC: req->result = 0; sync (); break; |
1186 | case EIO_FSYNC: req->result = fsync (req->int1); break; |
1221 | case EIO_FSYNC: req->result = fsync (req->int1); break; |
1187 | case EIO_FDATASYNC: req->result = fdatasync (req->int1); break; |
1222 | case EIO_FDATASYNC: req->result = fdatasync (req->int1); break; |
1188 | case EIO_MSYNC: req->result = msync (req->ptr2, req->size, req->int1); break; |
1223 | case EIO_MSYNC: req->result = msync (req->ptr2, req->size, req->int1); break; |
1189 | case EIO_MTOUCH: req->result = eio__mtouch (req->ptr2, req->size, req->int1); break; |
1224 | case EIO_MTOUCH: req->result = eio__mtouch (req->ptr2, req->size, req->int1); break; |
|
|
1225 | case EIO_SYNC_FILE_RANGE: req->result = eio__sync_file_range (req->int1, req->offs, req->size, req->int2); break; |
1190 | |
1226 | |
1191 | case EIO_READDIR: eio__scandir (req, self); break; |
1227 | case EIO_READDIR: eio__scandir (req, self); break; |
1192 | |
1228 | |
1193 | case EIO_BUSY: |
1229 | case EIO_BUSY: |
1194 | #ifdef _WIN32 |
1230 | #ifdef _WIN32 |
… | |
… | |
1279 | eio_req *eio_mtouch (void *addr, size_t length, int flags, int pri, eio_cb cb, void *data) |
1315 | eio_req *eio_mtouch (void *addr, size_t length, int flags, int pri, eio_cb cb, void *data) |
1280 | { |
1316 | { |
1281 | REQ (EIO_MTOUCH); req->ptr2 = addr; req->size = length; req->int1 = flags; SEND; |
1317 | REQ (EIO_MTOUCH); req->ptr2 = addr; req->size = length; req->int1 = flags; SEND; |
1282 | } |
1318 | } |
1283 | |
1319 | |
|
|
1320 | eio_req *eio_sync_file_range (int fd, off_t offset, size_t nbytes, unsigned int flags, int pri, eio_cb cb, void *data) |
|
|
1321 | { |
|
|
1322 | REQ (EIO_SYNC_FILE_RANGE); req->int1 = fd; req->offs = offset; req->size = nbytes; req->int2 = flags; SEND; |
|
|
1323 | } |
|
|
1324 | |
1284 | eio_req *eio_fdatasync (int fd, int pri, eio_cb cb, void *data) |
1325 | eio_req *eio_fdatasync (int fd, int pri, eio_cb cb, void *data) |
1285 | { |
1326 | { |
1286 | REQ (EIO_FDATASYNC); req->int1 = fd; SEND; |
1327 | REQ (EIO_FDATASYNC); req->int1 = fd; SEND; |
1287 | } |
1328 | } |
1288 | |
1329 | |
… | |
… | |
1481 | |
1522 | |
1482 | void eio_grp_add (eio_req *grp, eio_req *req) |
1523 | void eio_grp_add (eio_req *grp, eio_req *req) |
1483 | { |
1524 | { |
1484 | assert (("cannot add requests to IO::AIO::GRP after the group finished", grp->int1 != 2)); |
1525 | assert (("cannot add requests to IO::AIO::GRP after the group finished", grp->int1 != 2)); |
1485 | |
1526 | |
|
|
1527 | grp->flags |= EIO_FLAG_GROUPADD; |
|
|
1528 | |
1486 | ++grp->size; |
1529 | ++grp->size; |
1487 | req->grp = grp; |
1530 | req->grp = grp; |
1488 | |
1531 | |
1489 | req->grp_prev = 0; |
1532 | req->grp_prev = 0; |
1490 | req->grp_next = grp->grp_first; |
1533 | req->grp_next = grp->grp_first; |