--- IO-AIO/AIO.xs 2006/10/24 03:17:39 1.67 +++ IO-AIO/AIO.xs 2006/10/24 17:22:17 1.72 @@ -1,3 +1,6 @@ +/* solaris */ +#define _POSIX_PTHREAD_SEMANTICS 1 + #if __linux # define _GNU_SOURCE #endif @@ -58,12 +61,13 @@ #define AIO_BUFSIZE 65536 #define dBUF \ - char *aio_buf = malloc (AIO_BUFSIZE); \ + char *aio_buf; \ + LOCK (wrklock); \ + self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \ + UNLOCK (wrklock); \ if (!aio_buf) \ return -1; -#define fBUF free (aio_buf) - enum { REQ_QUIT, REQ_OPEN, REQ_CLOSE, @@ -75,7 +79,7 @@ REQ_READDIR, REQ_LINK, REQ_SYMLINK, REQ_GROUP, REQ_NOP, - REQ_SLEEP, + REQ_BUSY, }; #define AIO_REQ_KLASS "IO::AIO::REQ" @@ -135,6 +139,49 @@ # define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER #endif +#define LOCK(mutex) pthread_mutex_lock (&(mutex)) +#define UNLOCK(mutex) pthread_mutex_unlock (&(mutex)) + +/* worker threasd management */ +static pthread_mutex_t wrklock = AIO_MUTEX_INIT; + +typedef struct worker { + /* locked by wrklock */ + struct worker *prev, *next; + + pthread_t tid; + + /* locked by reslock, reqlock or wrklock */ + aio_req req; /* currently processed request */ + void *dbuf; + DIR *dirp; +} worker; + +static worker wrk_first = { &wrk_first, &wrk_first, 0 }; + +static void worker_clear (worker *wrk) +{ + if (wrk->dirp) + { + closedir (wrk->dirp); + wrk->dirp = 0; + } + + if (wrk->dbuf) + { + free (wrk->dbuf); + wrk->dbuf = 0; + } +} + +static void worker_free (worker *wrk) +{ + wrk->next->prev = wrk->prev; + wrk->prev->next = wrk->next; + + free (wrk); +} + static pthread_mutex_t reslock = AIO_MUTEX_INIT; static pthread_mutex_t reqlock = AIO_MUTEX_INIT; static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER; @@ -195,6 +242,7 @@ static void req_invoke (aio_req req); static void req_free (aio_req req); +static void req_cancel (aio_req req); /* must be called at most once */ static SV *req_sv (aio_req req, const char *klass) @@ -273,12 +321,12 @@ while (nreqs) { int size; -#if !(__i386 || __x86_64) /* safe without sempahore on this archs */ - pthread_mutex_lock (&reslock); +#if !(__i386 || __x86_64) /* safe without sempahore on these archs */ + LOCK (reslock); #endif size = res_queue.size; -#if !(__i386 || __x86_64) /* safe without sempahore on this archs */ - pthread_mutex_unlock (&reslock); +#if !(__i386 || __x86_64) /* safe without sempahore on these archs */ + UNLOCK (reslock); #endif if (size) @@ -312,16 +360,18 @@ if (req->result >= 0) { + int i; char *buf = req->data2ptr; AV *av = newAV (); - while (req->result) + av_extend (av, req->result - 1); + + for (i = 0; i < req->result; ++i) { SV *sv = newSVpv (buf, 0); - av_push (av, sv); + av_store (av, i, sv); buf += SvCUR (sv) + 1; - req->result--; } rv = sv_2mortal (newRV_noinc ((SV *)av)); @@ -363,7 +413,7 @@ break; case REQ_NOP: - case REQ_SLEEP: + case REQ_BUSY: break; default: @@ -415,23 +465,31 @@ SvREFCNT_dec (req->callback); Safefree (req->statdata); - if (req->type == REQ_READDIR && req->result >= 0) + if (req->type == REQ_READDIR) free (req->data2ptr); Safefree (req); } +static void req_cancel_subs (aio_req grp) +{ + aio_req sub; + + if (grp->type != REQ_GROUP) + return; + + SvREFCNT_dec (grp->fh2); + grp->fh2 = 0; + + for (sub = grp->grp_first; sub; sub = sub->grp_next) + req_cancel (sub); +} + static void req_cancel (aio_req req) { req->flags |= FLAG_CANCELLED; - if (req->type == REQ_GROUP) - { - aio_req sub; - - for (sub = req->grp_first; sub; sub = sub->grp_next) - req_cancel (sub); - } + req_cancel_subs (req); } static int poll_cb () @@ -443,7 +501,7 @@ for (;;) { - pthread_mutex_lock (&reslock); + LOCK (reslock); req = reqq_shift (&res_queue); if (req) @@ -457,7 +515,7 @@ } } - pthread_mutex_unlock (&reslock); + UNLOCK (reslock); if (!req) break; @@ -501,8 +559,12 @@ static void start_thread (void) { + worker *wrk = calloc (1, sizeof (worker)); + + if (!wrk) + croak ("unable to allocate worker thread data"); + sigset_t fullsigset, oldsigset; - pthread_t tid; pthread_attr_t attr; pthread_attr_init (&attr); @@ -510,12 +572,23 @@ pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED); sigfillset (&fullsigset); + + LOCK (wrklock); sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset); - if (pthread_create (&tid, &attr, aio_proc, 0) == 0) - started++; + if (pthread_create (&wrk->tid, &attr, aio_proc, (void *)wrk) == 0) + { + wrk->prev = &wrk_first; + wrk->next = wrk_first.next; + wrk_first.next->prev = wrk; + wrk_first.next = wrk; + started++; + } + else + free (wrk); sigprocmask (SIG_SETMASK, &oldsigset, 0); + UNLOCK (wrklock); } static void req_send (aio_req req) @@ -525,10 +598,10 @@ ++nreqs; - pthread_mutex_lock (&reqlock); + LOCK (reqlock); reqq_push (&req_queue, req); pthread_cond_signal (&reqwait); - pthread_mutex_unlock (&reqlock); + UNLOCK (reqlock); if (nreqs > max_outstanding) for (;;) @@ -611,12 +684,12 @@ ssize_t res; off_t ooffset; - pthread_mutex_lock (&preadwritelock); + LOCK (preadwritelock); ooffset = lseek (fd, 0, SEEK_CUR); lseek (fd, offset, SEEK_SET); res = read (fd, buf, count); lseek (fd, ooffset, SEEK_SET); - pthread_mutex_unlock (&preadwritelock); + UNLOCK (preadwritelock); return res; } @@ -626,12 +699,12 @@ ssize_t res; off_t ooffset; - pthread_mutex_lock (&preadwritelock); + LOCK (preadwritelock); ooffset = lseek (fd, 0, SEEK_CUR); lseek (fd, offset, SEEK_SET); res = write (fd, buf, count); lseek (fd, offset, SEEK_SET); - pthread_mutex_unlock (&preadwritelock); + UNLOCK (preadwritelock); return res; } @@ -657,8 +730,6 @@ count -= len; } - fBUF; - errno = 0; } #endif @@ -673,7 +744,7 @@ struct dirent *e; int errorno; - pthread_mutex_lock (&readdirlock); + LOCK (readdirlock); e = readdir (dirp); errorno = errno; @@ -686,7 +757,7 @@ else *res = 0; - pthread_mutex_unlock (&readdirlock); + UNLOCK (readdirlock); errno = errorno; return e ? 0 : -1; @@ -694,7 +765,7 @@ #endif /* sendfile always needs emulation */ -static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count) +static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self) { ssize_t res; @@ -781,15 +852,13 @@ res += cnt; count -= cnt; } - - fBUF; } return res; } /* read a full directory */ -static int scandir_ (const char *path, void **namesp) +static void scandir_ (aio_req req, worker *self) { DIR *dirp; union @@ -804,14 +873,14 @@ int res = 0; int errorno; - dirp = opendir (path); - if (!dirp) - return -1; + LOCK (wrklock); + self->dirp = dirp = opendir (req->dataptr); + self->dbuf = u = malloc (sizeof (*u)); + UNLOCK (wrklock); - u = malloc (sizeof (*u)); - names = malloc (memlen); + req->data2ptr = names = malloc (memlen); - if (u && names) + if (dirp && u && names) for (;;) { errno = 0; @@ -831,7 +900,10 @@ while (memofs + len > memlen) { memlen *= 2; - names = realloc (names, memlen); + LOCK (wrklock); + req->data2ptr = names = realloc (names, memlen); + UNLOCK (wrklock); + if (!names) break; } @@ -841,19 +913,10 @@ } } - errorno = errno; - free (u); - closedir (dirp); - - if (errorno) - { - free (names); - errno = errorno; - res = -1; - } - - *namesp = (void *)names; - return res; + if (errno) + res = -1; + + req->result = res; } /*****************************************************************************/ @@ -862,14 +925,15 @@ { aio_req req; int type; + worker *self = (worker *)thr_arg; do { - pthread_mutex_lock (&reqlock); + LOCK (reqlock); for (;;) { - req = reqq_shift (&req_queue); + self->req = req = reqq_shift (&req_queue); if (req) break; @@ -877,7 +941,7 @@ pthread_cond_wait (&reqwait, &reqlock); } - pthread_mutex_unlock (&reqlock); + UNLOCK (reqlock); errno = 0; /* strictly unnecessary */ type = req->type; /* remember type for QUIT check */ @@ -889,7 +953,7 @@ case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break; case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break; - case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length); break; + case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length, self); break; case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break; case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break; @@ -905,9 +969,9 @@ case REQ_FDATASYNC: req->result = fdatasync (req->fd); break; case REQ_FSYNC: req->result = fsync (req->fd); break; - case REQ_READDIR: req->result = scandir_ (req->dataptr, &req->data2ptr); break; + case REQ_READDIR: scandir_ (req, self); break; - case REQ_SLEEP: + case REQ_BUSY: { struct timeval tv; @@ -929,16 +993,23 @@ req->errorno = errno; - pthread_mutex_lock (&reslock); + LOCK (reslock); if (!reqq_push (&res_queue, req)) /* write a dummy byte to the pipe so fh becomes ready */ write (respipe [1], &respipe, 1); - pthread_mutex_unlock (&reslock); + self->req = 0; + worker_clear (self); + + UNLOCK (reslock); } while (type != REQ_QUIT); + LOCK (wrklock); + worker_free (self); + UNLOCK (wrklock); + return 0; } @@ -946,40 +1017,54 @@ static void atfork_prepare (void) { - pthread_mutex_lock (&reqlock); - pthread_mutex_lock (&reslock); + LOCK (wrklock); + LOCK (reqlock); + LOCK (reslock); #if !HAVE_PREADWRITE - pthread_mutex_lock (&preadwritelock); + LOCK (preadwritelock); #endif #if !HAVE_READDIR_R - pthread_mutex_lock (&readdirlock); + LOCK (readdirlock); #endif } static void atfork_parent (void) { #if !HAVE_READDIR_R - pthread_mutex_unlock (&readdirlock); + UNLOCK (readdirlock); #endif #if !HAVE_PREADWRITE - pthread_mutex_unlock (&preadwritelock); + UNLOCK (preadwritelock); #endif - pthread_mutex_unlock (&reslock); - pthread_mutex_unlock (&reqlock); + UNLOCK (reslock); + UNLOCK (reqlock); + UNLOCK (wrklock); } static void atfork_child (void) { aio_req prv; - started = 0; - while (prv = reqq_shift (&req_queue)) req_free (prv); while (prv = reqq_shift (&res_queue)) req_free (prv); - + + while (wrk_first.next != &wrk_first) + { + worker *wrk = wrk_first.next; + + if (wrk->req) + req_free (wrk->req); + + worker_clear (wrk); + worker_free (wrk); + } + + started = 0; + nreqs = 0; + close (respipe [0]); close (respipe [1]); create_pipe (); @@ -1264,14 +1349,14 @@ } void -aio_sleep (delay,callback=&PL_sv_undef) +aio_busy (delay,callback=&PL_sv_undef) double delay SV * callback PPCODE: { dREQ; - req->type = REQ_SLEEP; + req->type = REQ_BUSY; req->fd = delay < 0. ? 0 : delay; req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd); @@ -1306,10 +1391,18 @@ void aioreq_pri (int pri = DEFAULT_PRI) - CODE: - if (pri < PRI_MIN) pri = PRI_MIN; - if (pri > PRI_MAX) pri = PRI_MAX; - next_pri = pri + PRI_BIAS; + CODE: + if (pri < PRI_MIN) pri = PRI_MIN; + if (pri > PRI_MAX) pri = PRI_MAX; + next_pri = pri + PRI_BIAS; + +void +aioreq_nice (int nice = 0) + CODE: + nice = next_pri - nice; + if (nice < PRI_MIN) nice = PRI_MIN; + if (nice > PRI_MAX) nice = PRI_MAX; + next_pri = nice + PRI_BIAS; void flush () @@ -1413,6 +1506,11 @@ } void +cancel_subs (aio_req_ornot req) + CODE: + req_cancel_subs (req); + +void result (aio_req grp, ...) CODE: {