--- IO-AIO/AIO.xs 2006/10/24 00:26:32 1.65 +++ IO-AIO/AIO.xs 2006/10/27 19:17:23 1.80 @@ -1,7 +1,11 @@ -#if __linux +/* solaris */ +#define _POSIX_PTHREAD_SEMANTICS 1 + +#if __linux && !defined(_GNU_SOURCE) # define _GNU_SOURCE #endif +/* just in case */ #define _REENTRANT 1 #include @@ -46,6 +50,11 @@ # define NAME_MAX 4096 #endif +#ifndef PTHREAD_STACK_MIN +/* care for broken platforms, e.g. windows */ +# define PTHREAD_STACK_MIN 16384 +#endif + #if __ia64 # define STACKSIZE 65536 #elif __i386 || __x86_64 /* 16k is unreasonably high :( */ @@ -54,16 +63,29 @@ # define STACKSIZE 16384 #endif +/* wether word reads are potentially non-atomic. + * this is conservatice, likely most arches this runs + * on have atomic word read/writes. + */ +#ifndef WORDREAD_UNSAFE +# if __i386 || __x86_64 +# define WORDREAD_UNSAFE 0 +# else +# define WORDREAD_UNSAFE 1 +# endif +#endif + /* buffer size for various temporary buffers */ #define AIO_BUFSIZE 65536 #define dBUF \ - char *aio_buf = malloc (AIO_BUFSIZE); \ + char *aio_buf; \ + LOCK (wrklock); \ + self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \ + UNLOCK (wrklock); \ if (!aio_buf) \ return -1; -#define fBUF free (aio_buf) - enum { REQ_QUIT, REQ_OPEN, REQ_CLOSE, @@ -75,7 +97,7 @@ REQ_READDIR, REQ_LINK, REQ_SYMLINK, REQ_GROUP, REQ_NOP, - REQ_SLEEP, + REQ_BUSY, }; #define AIO_REQ_KLASS "IO::AIO::REQ" @@ -119,14 +141,12 @@ DEFAULT_PRI = 0, PRI_BIAS = -PRI_MIN, + NUM_PRI = PRI_MAX + PRI_BIAS + 1, }; static int next_pri = DEFAULT_PRI + PRI_BIAS; -static int started, wanted; -static volatile int nreqs; -static int max_outstanding = 1<<30; -static int respipe [2]; +static unsigned int started, wanted; #if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP) # define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP @@ -134,15 +154,146 @@ # define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER #endif +#define LOCK(mutex) pthread_mutex_lock (&(mutex)) +#define UNLOCK(mutex) pthread_mutex_unlock (&(mutex)) + +/* worker threads management */ +static pthread_mutex_t wrklock = AIO_MUTEX_INIT; + +typedef struct worker { + /* locked by wrklock */ + struct worker *prev, *next; + + pthread_t tid; + + /* locked by reslock, reqlock or wrklock */ + aio_req req; /* currently processed request */ + void *dbuf; + DIR *dirp; +} worker; + +static worker wrk_first = { &wrk_first, &wrk_first, 0 }; + +static void worker_clear (worker *wrk) +{ + if (wrk->dirp) + { + closedir (wrk->dirp); + wrk->dirp = 0; + } + + if (wrk->dbuf) + { + free (wrk->dbuf); + wrk->dbuf = 0; + } +} + +static void worker_free (worker *wrk) +{ + wrk->next->prev = wrk->prev; + wrk->prev->next = wrk->next; + + free (wrk); +} + +static volatile unsigned int nreqs, nready, npending; +static volatile unsigned int max_outstanding = 0xffffffff; +static int respipe [2]; + static pthread_mutex_t reslock = AIO_MUTEX_INIT; static pthread_mutex_t reqlock = AIO_MUTEX_INIT; static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER; -static volatile aio_req reqs, reqe; /* queue start, queue end */ -static volatile aio_req ress, rese; /* queue start, queue end */ +#if WORDREAD_UNSAFE + +static unsigned int get_nready () +{ + unsigned int retval; + + LOCK (reqlock); + retval = nready; + UNLOCK (reqlock); + + return retval; +} + +static unsigned int get_npending () +{ + unsigned int retval; + + LOCK (reslock); + retval = npending; + UNLOCK (reslock); + + return retval; +} + +#else + +# define get_nready() nready +# define get_npending() npending + +#endif + +/* + * a somewhat faster data structure might be nice, but + * with 8 priorities this actually needs <20 insns + * per shift, the most expensive operation. + */ +typedef struct { + aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */ + int size; +} reqq; + +static reqq req_queue; +static reqq res_queue; + +int reqq_push (reqq *q, aio_req req) +{ + int pri = req->pri; + req->next = 0; + + if (q->qe[pri]) + { + q->qe[pri]->next = req; + q->qe[pri] = req; + } + else + q->qe[pri] = q->qs[pri] = req; + + return q->size++; +} + +aio_req reqq_shift (reqq *q) +{ + int pri; + + if (!q->size) + return 0; + + --q->size; + + for (pri = NUM_PRI; pri--; ) + { + aio_req req = q->qs[pri]; + + if (req) + { + if (!(q->qs[pri] = req->next)) + q->qe[pri] = 0; + return req; + } + } + + abort (); +} + +static int poll_cb (int max); static void req_invoke (aio_req req); static void req_free (aio_req req); +static void req_cancel (aio_req req); /* must be called at most once */ static SV *req_sv (aio_req req, const char *klass) @@ -183,7 +334,7 @@ PUSHMARK (SP); XPUSHs (req_sv (grp, AIO_GRP_KLASS)); PUTBACK; - call_sv (grp->fh2, G_VOID | G_EVAL); + call_sv (grp->fh2, G_VOID | G_EVAL | G_KEEPERR); SPAGAIN; FREETMPS; LEAVE; @@ -214,132 +365,96 @@ } } -static void poll_wait () -{ - fd_set rfd; - - while (nreqs) - { - aio_req req; -#if !(__i386 || __x86_64) /* safe without sempahore on this archs */ - pthread_mutex_lock (&reslock); -#endif - req = ress; -#if !(__i386 || __x86_64) /* safe without sempahore on this archs */ - pthread_mutex_unlock (&reslock); -#endif - - if (req) - return; - - FD_ZERO(&rfd); - FD_SET(respipe [0], &rfd); - - select (respipe [0] + 1, &rfd, 0, 0, 0); - } -} - static void req_invoke (aio_req req) { dSP; - int errorno = errno; - if (req->flags & FLAG_CANCELLED || !SvOK (req->callback)) - return; - - errno = req->errorno; - - ENTER; - SAVETMPS; - PUSHMARK (SP); - EXTEND (SP, 1); - - switch (req->type) + if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback)) { - case REQ_READDIR: - { - SV *rv = &PL_sv_undef; + ENTER; + SAVETMPS; + PUSHMARK (SP); + EXTEND (SP, 1); - if (req->result >= 0) + switch (req->type) + { + case REQ_READDIR: { - char *buf = req->data2ptr; - AV *av = newAV (); + SV *rv = &PL_sv_undef; - while (req->result) + if (req->result >= 0) { - SV *sv = newSVpv (buf, 0); + int i; + char *buf = req->data2ptr; + AV *av = newAV (); - av_push (av, sv); - buf += SvCUR (sv) + 1; - req->result--; - } + av_extend (av, req->result - 1); - rv = sv_2mortal (newRV_noinc ((SV *)av)); - } + for (i = 0; i < req->result; ++i) + { + SV *sv = newSVpv (buf, 0); - PUSHs (rv); - } - break; + av_store (av, i, sv); + buf += SvCUR (sv) + 1; + } - case REQ_OPEN: - { - /* convert fd to fh */ - SV *fh; + rv = sv_2mortal (newRV_noinc ((SV *)av)); + } - PUSHs (sv_2mortal (newSViv (req->result))); - PUTBACK; - call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL); - SPAGAIN; + PUSHs (rv); + } + break; - fh = SvREFCNT_inc (POPs); + case REQ_OPEN: + { + /* convert fd to fh */ + SV *fh; - PUSHMARK (SP); - XPUSHs (sv_2mortal (fh)); - } - break; + PUSHs (sv_2mortal (newSViv (req->result))); + PUTBACK; + call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL); + SPAGAIN; - case REQ_GROUP: - req->fd = 2; /* mark group as finished */ + fh = SvREFCNT_inc (POPs); - if (req->data) - { - int i; - AV *av = (AV *)req->data; + PUSHMARK (SP); + XPUSHs (sv_2mortal (fh)); + } + break; - EXTEND (SP, AvFILL (av) + 1); - for (i = 0; i <= AvFILL (av); ++i) - PUSHs (*av_fetch (av, i, 0)); - } - break; + case REQ_GROUP: + req->fd = 2; /* mark group as finished */ - case REQ_NOP: - case REQ_SLEEP: - break; + if (req->data) + { + int i; + AV *av = (AV *)req->data; - default: - PUSHs (sv_2mortal (newSViv (req->result))); - break; - } + EXTEND (SP, AvFILL (av) + 1); + for (i = 0; i <= AvFILL (av); ++i) + PUSHs (*av_fetch (av, i, 0)); + } + break; + case REQ_NOP: + case REQ_BUSY: + break; - PUTBACK; - call_sv (req->callback, G_VOID | G_EVAL); - SPAGAIN; + default: + PUSHs (sv_2mortal (newSViv (req->result))); + break; + } - FREETMPS; - LEAVE; + errno = req->errorno; - errno = errorno; + PUTBACK; + call_sv (req->callback, G_VOID | G_EVAL); + SPAGAIN; - if (SvTRUE (ERRSV)) - { - req_free (req); - croak (0); + FREETMPS; + LEAVE; } -} -static void req_free (aio_req req) -{ if (req->grp) { aio_req grp = req->grp; @@ -354,6 +469,15 @@ aio_grp_dec (grp); } + if (SvTRUE (ERRSV)) + { + req_free (req); + croak (0); + } +} + +static void req_free (aio_req req) +{ if (req->self) { sv_unmagic (req->self, PERL_MAGIC_ext); @@ -366,90 +490,31 @@ SvREFCNT_dec (req->callback); Safefree (req->statdata); - if (req->type == REQ_READDIR && req->result >= 0) + if (req->type == REQ_READDIR) free (req->data2ptr); Safefree (req); } -static void req_cancel (aio_req req) +static void req_cancel_subs (aio_req grp) { - req->flags |= FLAG_CANCELLED; + aio_req sub; - if (req->type == REQ_GROUP) - { - aio_req sub; + if (grp->type != REQ_GROUP) + return; - for (sub = req->grp_first; sub; sub = sub->grp_next) - req_cancel (sub); - } + SvREFCNT_dec (grp->fh2); + grp->fh2 = 0; + + for (sub = grp->grp_first; sub; sub = sub->grp_next) + req_cancel (sub); } -static int poll_cb () +static void req_cancel (aio_req req) { - dSP; - int count = 0; - int do_croak = 0; - aio_req req; - - for (;;) - { - pthread_mutex_lock (&reslock); - req = ress; - - if (req) - { - ress = req->next; - - if (!ress) - { - /* read any signals sent by the worker threads */ - char buf [32]; - while (read (respipe [0], buf, 32) == 32) - ; - - rese = 0; - } - } - - pthread_mutex_unlock (&reslock); - - if (!req) - break; - - --nreqs; - - if (req->type == REQ_QUIT) - started--; - else if (req->type == REQ_GROUP && req->length) - { - req->fd = 1; /* mark request as delayed */ - continue; - } - else - { - if (req->type == REQ_READ) - SvCUR_set (req->data, req->dataoffset + (req->result > 0 ? req->result : 0)); - - if (req->data2ptr && (req->type == REQ_READ || req->type == REQ_WRITE)) - SvREADONLY_off (req->data); - - if (req->statdata) - { - PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT; - PL_laststatval = req->result; - PL_statcache = *(req->statdata); - } - - req_invoke (req); - - count++; - } - - req_free (req); - } + req->flags |= FLAG_CANCELLED; - return count; + req_cancel_subs (req); } static void *aio_proc(void *arg); @@ -457,63 +522,101 @@ static void start_thread (void) { sigset_t fullsigset, oldsigset; - pthread_t tid; pthread_attr_t attr; + worker *wrk = calloc (1, sizeof (worker)); + + if (!wrk) + croak ("unable to allocate worker thread data"); + pthread_attr_init (&attr); pthread_attr_setstacksize (&attr, STACKSIZE); pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED); +#ifdef PTHREAD_SCOPE_PROCESS + pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS); +#endif sigfillset (&fullsigset); + + LOCK (wrklock); sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset); - if (pthread_create (&tid, &attr, aio_proc, 0) == 0) - started++; + if (pthread_create (&wrk->tid, &attr, aio_proc, (void *)wrk) == 0) + { + wrk->prev = &wrk_first; + wrk->next = wrk_first.next; + wrk_first.next->prev = wrk; + wrk_first.next = wrk; + ++started; + } + else + free (wrk); sigprocmask (SIG_SETMASK, &oldsigset, 0); + UNLOCK (wrklock); } -static void req_send (aio_req req) +static void maybe_start_thread () { - while (started < wanted && nreqs >= started) - start_thread (); +#if 0 + static struct timeval last; + struct timeval diff, now; +#endif - ++nreqs; + if (started >= wanted) + return; + + if (nready <= nreqs - get_nready () - get_npending ()) + return; - pthread_mutex_lock (&reqlock); +#if 0 + gettimeofday (&now, 0); - req->next = 0; + diff.tv_sec = now.tv_sec - last.tv_sec; + diff.tv_usec = now.tv_usec - last.tv_usec; - if (reqe) + if (diff.tv_usec < 0) { - reqe->next = req; - reqe = req; + --diff.tv_sec; + diff.tv_usec += 1000000; } - else - reqe = reqs = req; - pthread_cond_signal (&reqwait); - pthread_mutex_unlock (&reqlock); + if (!diff.tv_sec && diff.tv_usec < 10000) + return; - if (nreqs > max_outstanding) - for (;;) - { - poll_cb (); + last = now; +#endif - if (nreqs <= max_outstanding) - break; + start_thread (); +} - poll_wait (); - } +static void req_send (aio_req req) +{ + ++nreqs; + + LOCK (reqlock); + ++nready; + reqq_push (&req_queue, req); + pthread_cond_signal (&reqwait); + UNLOCK (reqlock); + + maybe_start_thread (); } static void end_thread (void) { aio_req req; + Newz (0, req, 1, aio_cb); + req->type = REQ_QUIT; + req->pri = PRI_MAX + PRI_BIAS; req_send (req); + + LOCK (wrklock); + --started; + UNLOCK (wrklock); } static void min_parallel (int nthreads) @@ -524,22 +627,109 @@ static void max_parallel (int nthreads) { - int cur = started; - if (wanted > nthreads) wanted = nthreads; - while (cur > wanted) + while (started > wanted) + end_thread (); +} + +static void poll_wait () +{ + fd_set rfd; + + while (nreqs) { - end_thread (); - cur--; + int size; + if (WORDREAD_UNSAFE) LOCK (reslock); + size = res_queue.size; + if (WORDREAD_UNSAFE) UNLOCK (reslock); + + if (size) + return; + + maybe_start_thread (); + + FD_ZERO(&rfd); + FD_SET(respipe [0], &rfd); + + select (respipe [0] + 1, &rfd, 0, 0, 0); } +} - while (started > wanted) +static int poll_cb (int max) +{ + dSP; + int count = 0; + int do_croak = 0; + aio_req req; + + for (;;) { + while (max <= 0 || count < max) + { + maybe_start_thread (); + + LOCK (reslock); + req = reqq_shift (&res_queue); + + if (req) + { + --npending; + + if (!res_queue.size) + { + /* read any signals sent by the worker threads */ + char buf [32]; + while (read (respipe [0], buf, 32) == 32) + ; + } + } + + UNLOCK (reslock); + + if (!req) + break; + + --nreqs; + + if (req->type == REQ_GROUP && req->length) + { + req->fd = 1; /* mark request as delayed */ + continue; + } + else + { + if (req->type == REQ_READ) + SvCUR_set (req->data, req->dataoffset + (req->result > 0 ? req->result : 0)); + + if (req->data2ptr && (req->type == REQ_READ || req->type == REQ_WRITE)) + SvREADONLY_off (req->data); + + if (req->statdata) + { + PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT; + PL_laststatval = req->result; + PL_statcache = *(req->statdata); + } + + req_invoke (req); + + count++; + } + + req_free (req); + } + + if (nreqs <= max_outstanding) + break; + poll_wait (); - poll_cb (); + + max = 0; } + + return count; } static void create_pipe () @@ -573,12 +763,12 @@ ssize_t res; off_t ooffset; - pthread_mutex_lock (&preadwritelock); + LOCK (preadwritelock); ooffset = lseek (fd, 0, SEEK_CUR); lseek (fd, offset, SEEK_SET); res = read (fd, buf, count); lseek (fd, ooffset, SEEK_SET); - pthread_mutex_unlock (&preadwritelock); + UNLOCK (preadwritelock); return res; } @@ -588,12 +778,12 @@ ssize_t res; off_t ooffset; - pthread_mutex_lock (&preadwritelock); + LOCK (preadwritelock); ooffset = lseek (fd, 0, SEEK_CUR); lseek (fd, offset, SEEK_SET); res = write (fd, buf, count); lseek (fd, offset, SEEK_SET); - pthread_mutex_unlock (&preadwritelock); + UNLOCK (preadwritelock); return res; } @@ -604,9 +794,9 @@ #endif #if !HAVE_READAHEAD -# define readahead aio_readahead +# define readahead(fd,offset,count) aio_readahead (fd, offset, count, self) -static ssize_t readahead (int fd, off_t offset, size_t count) +static ssize_t aio_readahead (int fd, off_t offset, size_t count, worker *self) { dBUF; @@ -619,10 +809,9 @@ count -= len; } - fBUF; - errno = 0; } + #endif #if !HAVE_READDIR_R @@ -635,7 +824,7 @@ struct dirent *e; int errorno; - pthread_mutex_lock (&readdirlock); + LOCK (readdirlock); e = readdir (dirp); errorno = errno; @@ -648,7 +837,7 @@ else *res = 0; - pthread_mutex_unlock (&readdirlock); + UNLOCK (readdirlock); errno = errorno; return e ? 0 : -1; @@ -656,7 +845,7 @@ #endif /* sendfile always needs emulation */ -static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count) +static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self) { ssize_t res; @@ -677,7 +866,7 @@ res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0); if (res < 0 && sbytes) - /* maybe only on EAGAIN only: as usual, the manpage leaves you guessing */ + /* maybe only on EAGAIN: as usual, the manpage leaves you guessing */ res = sbytes; } @@ -743,15 +932,13 @@ res += cnt; count -= cnt; } - - fBUF; } return res; } /* read a full directory */ -static int scandir_ (const char *path, void **namesp) +static void scandir_ (aio_req req, worker *self) { DIR *dirp; union @@ -766,14 +953,13 @@ int res = 0; int errorno; - dirp = opendir (path); - if (!dirp) - return -1; - - u = malloc (sizeof (*u)); - names = malloc (memlen); + LOCK (wrklock); + self->dirp = dirp = opendir (req->dataptr); + self->dbuf = u = malloc (sizeof (*u)); + req->data2ptr = names = malloc (memlen); + UNLOCK (wrklock); - if (u && names) + if (dirp && u && names) for (;;) { errno = 0; @@ -793,7 +979,10 @@ while (memofs + len > memlen) { memlen *= 2; - names = realloc (names, memlen); + LOCK (wrklock); + req->data2ptr = names = realloc (names, memlen); + UNLOCK (wrklock); + if (!names) break; } @@ -803,19 +992,10 @@ } } - errorno = errno; - free (u); - closedir (dirp); - - if (errorno) - { - free (names); - errno = errorno; - res = -1; - } - - *namesp = (void *)names; - return res; + if (errno) + res = -1; + + req->result = res; } /*****************************************************************************/ @@ -823,21 +1003,15 @@ static void *aio_proc (void *thr_arg) { aio_req req; - int type; + worker *self = (worker *)thr_arg; - do + for (;;) { - pthread_mutex_lock (&reqlock); + LOCK (reqlock); for (;;) { - req = reqs; - - if (reqs) - { - reqs = reqs->next; - if (!reqs) reqe = 0; - } + self->req = req = reqq_shift (&req_queue); if (req) break; @@ -845,19 +1019,20 @@ pthread_cond_wait (&reqwait, &reqlock); } - pthread_mutex_unlock (&reqlock); + --nready; + + UNLOCK (reqlock); errno = 0; /* strictly unnecessary */ - type = req->type; /* remember type for QUIT check */ if (!(req->flags & FLAG_CANCELLED)) - switch (type) + switch (req->type) { case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break; case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break; case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break; - case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length); break; + case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length, self); break; case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break; case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break; @@ -873,9 +1048,9 @@ case REQ_FDATASYNC: req->result = fdatasync (req->fd); break; case REQ_FSYNC: req->result = fsync (req->fd); break; - case REQ_READDIR: req->result = scandir_ (req->dataptr, &req->data2ptr); break; + case REQ_READDIR: scandir_ (req, self); break; - case REQ_SLEEP: + case REQ_BUSY: { struct timeval tv; @@ -887,9 +1062,15 @@ case REQ_GROUP: case REQ_NOP: - case REQ_QUIT: break; + case REQ_QUIT: + LOCK (wrklock); + worker_free (self); + --started; + UNLOCK (wrklock); + return 0; + default: req->result = ENOSYS; break; @@ -897,79 +1078,72 @@ req->errorno = errno; - pthread_mutex_lock (&reslock); + LOCK (reslock); - req->next = 0; + ++npending; - if (rese) - { - rese->next = req; - rese = req; - } - else - { - rese = ress = req; + if (!reqq_push (&res_queue, req)) + /* write a dummy byte to the pipe so fh becomes ready */ + write (respipe [1], &respipe, 1); - /* write a dummy byte to the pipe so fh becomes ready */ - write (respipe [1], &respipe, 1); - } + self->req = 0; + worker_clear (self); - pthread_mutex_unlock (&reslock); + UNLOCK (reslock); } - while (type != REQ_QUIT); - - return 0; } /*****************************************************************************/ static void atfork_prepare (void) { - pthread_mutex_lock (&reqlock); - pthread_mutex_lock (&reslock); + LOCK (wrklock); + LOCK (reqlock); + LOCK (reslock); #if !HAVE_PREADWRITE - pthread_mutex_lock (&preadwritelock); + LOCK (preadwritelock); #endif #if !HAVE_READDIR_R - pthread_mutex_lock (&readdirlock); + LOCK (readdirlock); #endif } static void atfork_parent (void) { #if !HAVE_READDIR_R - pthread_mutex_unlock (&readdirlock); + UNLOCK (readdirlock); #endif #if !HAVE_PREADWRITE - pthread_mutex_unlock (&preadwritelock); + UNLOCK (preadwritelock); #endif - pthread_mutex_unlock (&reslock); - pthread_mutex_unlock (&reqlock); + UNLOCK (reslock); + UNLOCK (reqlock); + UNLOCK (wrklock); } static void atfork_child (void) { aio_req prv; - started = 0; + while (prv = reqq_shift (&req_queue)) + req_free (prv); - while (reqs) - { - prv = reqs; - reqs = prv->next; - req_free (prv); - } + while (prv = reqq_shift (&res_queue)) + req_free (prv); - reqs = reqe = 0; - - while (ress) + while (wrk_first.next != &wrk_first) { - prv = ress; - ress = prv->next; - req_free (prv); + worker *wrk = wrk_first.next; + + if (wrk->req) + req_free (wrk->req); + + worker_clear (wrk); + worker_free (wrk); } - - ress = rese = 0; + + started = 0; + nreqs = 0; close (respipe [0]); close (respipe [1]); @@ -1012,25 +1186,26 @@ create_pipe (); pthread_atfork (atfork_prepare, atfork_parent, atfork_child); + + start_thread (); } void -min_parallel (nthreads) - int nthreads +min_parallel (int nthreads) PROTOTYPE: $ void -max_parallel (nthreads) - int nthreads +max_parallel (int nthreads) PROTOTYPE: $ int -max_outstanding (nreqs) - int nreqs - PROTOTYPE: $ +max_outstanding (int maxreqs) + PROTOTYPE: $ CODE: RETVAL = max_outstanding; - max_outstanding = nreqs; + max_outstanding = maxreqs; + OUTPUT: + RETVAL void aio_open (pathname,flags,mode,callback=&PL_sv_undef) @@ -1255,14 +1430,14 @@ } void -aio_sleep (delay,callback=&PL_sv_undef) +aio_busy (delay,callback=&PL_sv_undef) double delay SV * callback PPCODE: { dREQ; - req->type = REQ_SLEEP; + req->type = REQ_BUSY; req->fd = delay < 0. ? 0 : delay; req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd); @@ -1295,16 +1470,27 @@ REQ_SEND; } -#if 0 +int +aioreq_pri (int pri = 0) + PROTOTYPE: ;$ + CODE: + RETVAL = next_pri - PRI_BIAS; + if (items > 0) + { + if (pri < PRI_MIN) pri = PRI_MIN; + if (pri > PRI_MAX) pri = PRI_MAX; + next_pri = pri + PRI_BIAS; + } + OUTPUT: + RETVAL void -aio_pri (int pri = DEFAULT_PRI) +aioreq_nice (int nice = 0) CODE: - if (pri < PRI_MIN) pri = PRI_MIN; - if (pri > PRI_MAX) pri = PRI_MAX; - next_pri = pri + PRI_BIAS; - -#endif + nice = next_pri - nice; + if (nice < PRI_MIN) nice = PRI_MIN; + if (nice > PRI_MAX) nice = PRI_MAX; + next_pri = nice + PRI_BIAS; void flush () @@ -1313,7 +1499,7 @@ while (nreqs) { poll_wait (); - poll_cb (); + poll_cb (0); } void @@ -1323,7 +1509,7 @@ if (nreqs) { poll_wait (); - poll_cb (); + poll_cb (0); } int @@ -1338,7 +1524,15 @@ poll_cb(...) PROTOTYPE: CODE: - RETVAL = poll_cb (); + RETVAL = poll_cb (0); + OUTPUT: + RETVAL + +int +poll_some(int max = 0) + PROTOTYPE: $ + CODE: + RETVAL = poll_cb (max); OUTPUT: RETVAL @@ -1357,13 +1551,28 @@ OUTPUT: RETVAL +int +nready() + PROTOTYPE: + CODE: + RETVAL = get_nready (); + OUTPUT: + RETVAL + +int +npending() + PROTOTYPE: + CODE: + RETVAL = get_npending (); + OUTPUT: + RETVAL + PROTOTYPES: DISABLE MODULE = IO::AIO PACKAGE = IO::AIO::REQ void cancel (aio_req_ornot req) - PROTOTYPE: CODE: req_cancel (req); @@ -1409,11 +1618,20 @@ } void +cancel_subs (aio_req_ornot req) + CODE: + req_cancel_subs (req); + +void result (aio_req grp, ...) CODE: { int i; - AV *av = newAV (); + AV *av; + + grp->errorno = errno; + + av = newAV (); for (i = 1; i < items; ++i ) av_push (av, newSVsv (ST (i))); @@ -1423,7 +1641,12 @@ } void -feed_limit (aio_req grp, int limit) +errno (aio_req grp, int errorno = errno) + CODE: + grp->errorno = errorno; + +void +limit (aio_req grp, int limit) CODE: grp->fd2 = limit; aio_grp_feed (grp);