--- AnyEvent-HTTP/HTTP.pm 2008/06/05 15:34:00 1.11 +++ AnyEvent-HTTP/HTTP.pm 2011/06/14 05:20:13 1.106 @@ -6,6 +6,10 @@ use AnyEvent::HTTP; + http_get "http://www.nethype.de/", sub { print $_[1] }; + + # ... do something else here + =head1 DESCRIPTION This module is an L user, you need to make sure that you use and @@ -13,7 +17,7 @@ This module implements a simple, stateless and non-blocking HTTP client. It supports GET, POST and other request methods, cookies and more, -all on a very low level. It can follow redirects supports proxies and +all on a very low level. It can follow redirects, supports proxies, and automatically limits the number of connections to the values specified in the RFC. @@ -34,70 +38,104 @@ package AnyEvent::HTTP; -use strict; -no warnings; +use common::sense; -use Carp; +use Errno (); -use AnyEvent (); +use AnyEvent 5.0 (); use AnyEvent::Util (); -use AnyEvent::Socket (); use AnyEvent::Handle (); use base Exporter::; -our $VERSION = '1.0'; +our $VERSION = '2.11'; -our @EXPORT = qw(http_get http_request); +our @EXPORT = qw(http_get http_post http_head http_request); -our $USERAGENT = "Mozilla/5.0 (compatible; AnyEvent::HTTP/$VERSION; +http://software.schmorp.de/pkg/AnyEvent)"; +our $USERAGENT = "Mozilla/5.0 (compatible; U; AnyEvent-HTTP/$VERSION; +http://software.schmorp.de/pkg/AnyEvent)"; our $MAX_RECURSE = 10; -our $MAX_PERSISTENT = 8; -our $PERSISTENT_TIMEOUT = 2; +our $PERSISTENT_TIMEOUT = 3; our $TIMEOUT = 300; - -# changing these is evil -our $MAX_PERSISTENT_PER_HOST = 2; -our $MAX_PER_HOST = 4; +our $MAX_PER_HOST = 4; # changing this is evil our $PROXY; +our $ACTIVE = 0; -my %KA_COUNT; # number of open keep-alive connections per host +my %KA_CACHE; # indexed by uhost currently, points to [$handle...] array my %CO_SLOT; # number of open connections, and wait queue, per host =item http_get $url, key => value..., $cb->($data, $headers) Executes an HTTP-GET request. See the http_request function for details on -additional parameters. +additional parameters and the return value. =item http_head $url, key => value..., $cb->($data, $headers) -Executes an HTTP-HEAD request. See the http_request function for details on -additional parameters. +Executes an HTTP-HEAD request. See the http_request function for details +on additional parameters and the return value. =item http_post $url, $body, key => value..., $cb->($data, $headers) -Executes an HTTP-POST request with a request body of C<$bod>. See the -http_request function for details on additional parameters. +Executes an HTTP-POST request with a request body of C<$body>. See the +http_request function for details on additional parameters and the return +value. =item http_request $method => $url, key => value..., $cb->($data, $headers) Executes a HTTP request of type C<$method> (e.g. C, C). The URL must be an absolute http or https URL. -The callback will be called with the response data as first argument -(or C if it wasn't available due to errors), and a hash-ref with -response headers as second argument. +When called in void context, nothing is returned. In other contexts, +C returns a "cancellation guard" - you have to keep the +object at least alive until the callback get called. If the object gets +destroyed before the callback is called, the request will be cancelled. + +The callback will be called with the response body data as first argument +(or C if an error occured), and a hash-ref with response headers +(and trailers) as second argument. All the headers in that hash are lowercased. In addition to the response -headers, the three "pseudo-headers" C, C and -C contain the three parts of the HTTP Status-Line of the same -name. If the server sends a header multiple lines, then their contents -will be joined together with C<\x00>. +headers, the "pseudo-headers" (uppercase to avoid clashing with possible +response headers) C, C and C contain the +three parts of the HTTP Status-Line of the same name. If an error occurs +during the body phase of a request, then the original C and +C values from the header are available as C and +C. + +The pseudo-header C contains the actual URL (which can differ from +the requested URL when following redirects - for example, you might get +an error that your URL scheme is not supported even though your URL is a +valid http URL because it redirected to an ftp URL, in which case you can +look at the URL pseudo header). + +The pseudo-header C only exists when the request was a result +of an internal redirect. In that case it is an array reference with +the C<($data, $headers)> from the redirect response. Note that this +response could in turn be the result of a redirect itself, and C<< +$headers->{Redirect}[1]{Redirect} >> will then contain the original +response, and so on. + +If the server sends a header multiple times, then their contents will be +joined together with a comma (C<,>), as per the HTTP spec. If an internal error occurs, such as not being able to resolve a hostname, -then C<$data> will be C, C<< $headers->{Status} >> will be C<599> -and the C pseudo-header will contain an error message. +then C<$data> will be C, C<< $headers->{Status} >> will be +C<590>-C<599> and the C pseudo-header will contain an error +message. Currently the following status codes are used: + +=over 4 + +=item 595 - errors during connection etsbalishment, proxy handshake. + +=item 596 - errors during TLS negotiation, request sending and header processing. + +=item 597 - errors during body receiving or processing. + +=item 598 - user aborted request via C or C. + +=item 599 - other, usually nonretryable, errors (garbled URL etc.). + +=back A typical callback might look like this: @@ -123,24 +161,40 @@ =item headers => hashref -The request headers to use. +The request headers to use. Currently, C may provide its own +C, C, C and C headers and +will provide defaults at least for C, C and C +(this can be suppressed by using C for these headers in which case +they won't be sent at all). + +You really should provide your own C header value that is +appropriate for your program - I wouldn't be surprised if the default +AnyEvent string gets blocked by webservers sooner or later. + +Also, make sure that your headers names and values do not contain any +embedded newlines. =item timeout => $seconds The time-out to use for various stages - each connect attempt will reset -the timeout, as will read or write activity. Default timeout is 5 minutes. +the timeout, as will read or write activity, i.e. this is not an overall +timeout. + +Default timeout is 5 minutes. =item proxy => [$host, $port[, $scheme]] or undef -Use the given http proxy for all requests. If not specified, then the -default proxy (as specified by C<$ENV{http_proxy}>) is used. +Use the given http proxy for all requests, or no proxy if C is +used. -C<$scheme> must be either missing or C for HTTP, or C for -HTTPS. +C<$scheme> must be either missing or must be C for HTTP. + +If not specified, then the default proxy is used (see +C). =item body => $string -The request body, usually empty. Will be-sent as-is (future versions of +The request body, usually empty. Will be sent as-is (future versions of this module might offer more options). =item cookie_jar => $hash_ref @@ -148,31 +202,192 @@ Passing this parameter enables (simplified) cookie-processing, loosely based on the original netscape specification. -The C<$hash_ref> must be an (initially empty) hash reference which will -get updated automatically. It is possible to save the cookie_jar to -persistent storage with something like JSON or Storable, but this is not -recommended, as expire times are currently being ignored. - -Note that this cookie implementation is not of very high quality, nor -meant to be complete. If you want complete cookie management you have to -do that on your own. C is meant as a quick fix to get some -cookie-using sites working. Cookies are a privacy disaster, do not use -them unless required to. +The C<$hash_ref> must be an (initially empty) hash reference which +will get updated automatically. It is possible to save the cookie jar +to persistent storage with something like JSON or Storable - see the +C function if you wish to remove +expired or session-only cookies, and also for documentation on the format +of the cookie jar. + +Note that this cookie implementation is not meant to be complete. If +you want complete cookie management you have to do that on your +own. C is meant as a quick fix to get most cookie-using sites +working. Cookies are a privacy disaster, do not use them unless required +to. + +When cookie processing is enabled, the C and C +headers will be set and handled by this module, otherwise they will be +left untouched. + +=item tls_ctx => $scheme | $tls_ctx + +Specifies the AnyEvent::TLS context to be used for https connections. This +parameter follows the same rules as the C parameter to +L, but additionally, the two strings C or +C can be specified, which give you a predefined low-security (no +verification, highest compatibility) and high-security (CA and common-name +verification) TLS context. + +The default for this option is C, which could be interpreted as "give +me the page, no matter what". + +See also the C parameter. + +=item session => $string + +The module might reuse connections to the same host internally. Sometimes +(e.g. when using TLS), you do not want to reuse connections from other +sessions. This can be achieved by setting this parameter to some unique +ID (such as the address of an object storing your state data, or the TLS +context) - only connections using the same unique ID will be reused. + +=item on_prepare => $callback->($fh) + +In rare cases you need to "tune" the socket before it is used to +connect (for exmaple, to bind it on a given IP address). This parameter +overrides the prepare callback passed to C +and behaves exactly the same way (e.g. it has to provide a +timeout). See the description for the C<$prepare_cb> argument of +C for details. + +=item tcp_connect => $callback->($host, $service, $connect_cb, $prepare_cb) + +In even rarer cases you want total control over how AnyEvent::HTTP +establishes connections. Normally it uses L +to do this, but you can provide your own C function - +obviously, it has to follow the same calling conventions, except that it +may always return a connection guard object. + +There are probably lots of weird uses for this function, starting from +tracing the hosts C actually tries to connect, to (inexact +but fast) host => IP address caching or even socks protocol support. + +=item on_header => $callback->($headers) + +When specified, this callback will be called with the header hash as soon +as headers have been successfully received from the remote server (not on +locally-generated errors). + +It has to return either true (in which case AnyEvent::HTTP will continue), +or false, in which case AnyEvent::HTTP will cancel the download (and call +the finish callback with an error code of C<598>). + +This callback is useful, among other things, to quickly reject unwanted +content, which, if it is supposed to be rare, can be faster than first +doing a C request. + +The downside is that cancelling the request makes it impossible to re-use +the connection. Also, the C callback will not receive any +trailer (headers sent after the response body). + +Example: cancel the request unless the content-type is "text/html". + + on_header => sub { + $_[0]{"content-type"} =~ /^text\/html\s*(?:;|$)/ + }, + +=item on_body => $callback->($partial_body, $headers) + +When specified, all body data will be passed to this callback instead of +to the completion callback. The completion callback will get the empty +string instead of the body data. + +It has to return either true (in which case AnyEvent::HTTP will continue), +or false, in which case AnyEvent::HTTP will cancel the download (and call +the completion callback with an error code of C<598>). + +The downside to cancelling the request is that it makes it impossible to +re-use the connection. + +This callback is useful when the data is too large to be held in memory +(so the callback writes it to a file) or when only some information should +be extracted, or when the body should be processed incrementally. + +It is usually preferred over doing your own body handling via +C, but in case of streaming APIs, where HTTP is +only used to create a connection, C is the better +alternative, as it allows you to install your own event handler, reducing +resource usage. + +=item want_body_handle => $enable + +When enabled (default is disabled), the behaviour of AnyEvent::HTTP +changes considerably: after parsing the headers, and instead of +downloading the body (if any), the completion callback will be +called. Instead of the C<$body> argument containing the body data, the +callback will receive the L object associated with the +connection. In error cases, C will be passed. When there is no body +(e.g. status C<304>), the empty string will be passed. + +The handle object might or might not be in TLS mode, might be connected +to a proxy, be a persistent connection, use chunked transfer encoding +etc., and configured in unspecified ways. The user is responsible for this +handle (it will not be used by this module anymore). + +This is useful with some push-type services, where, after the initial +headers, an interactive protocol is used (typical example would be the +push-style twitter API which starts a JSON/XML stream). + +If you think you need this, first have a look at C, to see if +that doesn't solve your problem in a better way. + +=item persistent => $boolean + +Try to create/reuse a persistent connection. When this flag is set +(default: true for idempotent requests, false for all others), then +C tries to re-use an existing (previously-created) +persistent connection to the host and, failing that, tries to create a new +one. + +Requests failing in certain ways will be automatically retried once, which +is dangerous for non-idempotent requests, which is why it defaults to off +for them. The reason for this is because the bozos who designed HTTP/1.1 +made it impossible to distinguish between a fatal error and a normal +connection timeout, so you never know whether there was a problem with +your request or not. + +When reusing an existent connection, many parameters (such as TLS context) +will be ignored. See the C parameter for a workaround. + +=item keepalive => $boolean + +Only used when C is also true. This parameter decides whether +C tries to handshake a HTTP/1.0-style keep-alive connection +(as opposed to only a HTTP/1.1 persistent connection). + +The default is true, except when using a proxy, in which case it defaults +to false, as HTTP/1.0 proxies cannot support this in a meaningful way. + +=item handle_params => { key => value ... } + +The key-value pairs in this hash will be passed to any L +constructor that is called - not all requests will create a handle, and +sometimes more than one is created, so this parameter is only good for +setting hints. + +Example: set the maximum read size to 4096, to potentially conserve memory +at the cost of speed. + + handle_params => { + max_read_size => 4096, + }, =back -Example: make a simple HTTP GET request for http://www.nethype.de/ +Example: do a simple HTTP GET request for http://www.nethype.de/ and print +the response body. http_request GET => "http://www.nethype.de/", sub { my ($body, $hdr) = @_; print "$body\n"; }; -Example: make a HTTP HEAD request on https://www.google.com/, use a +Example: do a HTTP HEAD request on https://www.google.com/, use a timeout of 30 seconds. http_request GET => "https://www.google.com", + headers => { "user-agent" => "MySearchClient 1.0" }, timeout => 30, sub { my ($body, $hdr) = @_; @@ -181,24 +396,39 @@ } ; +Example: do another simple HTTP GET request, but immediately try to +cancel it. + + my $request = http_request GET => "http://www.nethype.de/", sub { + my ($body, $hdr) = @_; + print "$body\n"; + }; + + undef $request; + =cut +############################################################################# +# wait queue/slots + +sub _slot_schedule; sub _slot_schedule($) { my $host = shift; while ($CO_SLOT{$host}[0] < $MAX_PER_HOST) { if (my $cb = shift @{ $CO_SLOT{$host}[1] }) { - # somebody wnats that slot + # somebody wants that slot ++$CO_SLOT{$host}[0]; + ++$ACTIVE; $cb->(AnyEvent::Util::guard { + --$ACTIVE; --$CO_SLOT{$host}[0]; _slot_schedule $host; }); } else { # nobody wants the slot, maybe we can forget about it delete $CO_SLOT{$host} unless $CO_SLOT{$host}[0]; - warn "$host deleted" unless $CO_SLOT{$host}[0];#d# last; } } @@ -211,12 +441,265 @@ _slot_schedule $_[0]; } -sub http_request($$$;@) { +############################################################################# +# cookie handling + +# expire cookies +sub cookie_jar_expire($;$) { + my ($jar, $session_end) = @_; + + %$jar = () if $jar->{version} != 1; + + my $anow = AE::now; + + while (my ($chost, $paths) = each %$jar) { + next unless ref $paths; + + while (my ($cpath, $cookies) = each %$paths) { + while (my ($cookie, $kv) = each %$cookies) { + if (exists $kv->{_expires}) { + delete $cookies->{$cookie} + if $anow > $kv->{_expires}; + } elsif ($session_end) { + delete $cookies->{$cookie}; + } + } + + delete $paths->{$cpath} + unless %$cookies; + } + + delete $jar->{$chost} + unless %$paths; + } +} + +# extract cookies from jar +sub cookie_jar_extract($$$$) { + my ($jar, $scheme, $host, $path) = @_; + + %$jar = () if $jar->{version} != 1; + + my @cookies; + + while (my ($chost, $paths) = each %$jar) { + next unless ref $paths; + + if ($chost =~ /^\./) { + next unless $chost eq substr $host, -length $chost; + } elsif ($chost =~ /\./) { + next unless $chost eq $host; + } else { + next; + } + + while (my ($cpath, $cookies) = each %$paths) { + next unless $cpath eq substr $path, 0, length $cpath; + + while (my ($cookie, $kv) = each %$cookies) { + next if $scheme ne "https" && exists $kv->{secure}; + + if (exists $kv->{_expires} and AE::now > $kv->{_expires}) { + delete $cookies->{$cookie}; + next; + } + + my $value = $kv->{value}; + + if ($value =~ /[=;,[:space:]]/) { + $value =~ s/([\\"])/\\$1/g; + $value = "\"$value\""; + } + + push @cookies, "$cookie=$value"; + } + } + } + + \@cookies +} + +# parse set_cookie header into jar +sub cookie_jar_set_cookie($$$$) { + my ($jar, $set_cookie, $host, $date) = @_; + + my $anow = int AE::now; + my $snow; # server-now + + for ($set_cookie) { + # parse NAME=VALUE + my @kv; + + # expires is not http-compliant in the original cookie-spec, + # we support the official date format and some extensions + while ( + m{ + \G\s* + (?: + expires \s*=\s* ([A-Z][a-z][a-z]+,\ [^,;]+) + | ([^=;,[:space:]]+) (?: \s*=\s* (?: "((?:[^\\"]+|\\.)*)" | ([^;,[:space:]]*) ) )? + ) + }gcxsi + ) { + my $name = $2; + my $value = $4; + + if (defined $1) { + # expires + $name = "expires"; + $value = $1; + } elsif (defined $3) { + # quoted + $value = $3; + $value =~ s/\\(.)/$1/gs; + } + + push @kv, @kv ? lc $name : $name, $value; + + last unless /\G\s*;/gc; + } + + last unless @kv; + + my $name = shift @kv; + my %kv = (value => shift @kv, @kv); + + if (exists $kv{"max-age"}) { + $kv{_expires} = $anow + delete $kv{"max-age"}; + } elsif (exists $kv{expires}) { + $snow ||= parse_date ($date) || $anow; + $kv{_expires} = $anow + (parse_date (delete $kv{expires}) - $snow); + } else { + delete $kv{_expires}; + } + + my $cdom; + my $cpath = (delete $kv{path}) || "/"; + + if (exists $kv{domain}) { + $cdom = delete $kv{domain}; + + $cdom =~ s/^\.?/./; # make sure it starts with a "." + + next if $cdom =~ /\.$/; + + # this is not rfc-like and not netscape-like. go figure. + my $ndots = $cdom =~ y/.//; + next if $ndots < ($cdom =~ /\.[^.][^.]\.[^.][^.]$/ ? 3 : 2); + } else { + $cdom = $host; + } + + # store it + $jar->{version} = 1; + $jar->{lc $cdom}{$cpath}{$name} = \%kv; + + redo if /\G\s*,/gc; + } +} + +############################################################################# +# keepalive/persistent connection cache + +# fetch a connection from the keepalive cache +sub ka_fetch($) { + my $ka_key = shift; + + my $hdl = pop @{ $KA_CACHE{$ka_key} }; # currently we reuse the MOST RECENTLY USED connection + delete $KA_CACHE{$ka_key} + unless @{ $KA_CACHE{$ka_key} }; + + $hdl +} + +sub ka_store($$) { + my ($ka_key, $hdl) = @_; + + my $kaa = $KA_CACHE{$ka_key} ||= []; + + my $destroy = sub { + my @ka = grep $_ != $hdl, @{ $KA_CACHE{$ka_key} }; + + $hdl->destroy; + + @ka + ? $KA_CACHE{$ka_key} = \@ka + : delete $KA_CACHE{$ka_key}; + }; + + # on error etc., destroy + $hdl->on_error ($destroy); + $hdl->on_eof ($destroy); + $hdl->on_read ($destroy); + $hdl->timeout ($PERSISTENT_TIMEOUT); + + push @$kaa, $hdl; + shift @$kaa while @$kaa > $MAX_PER_HOST; +} + +############################################################################# +# utilities + +# continue to parse $_ for headers and place them into the arg +sub _parse_hdr() { + my %hdr; + + # things seen, not parsed: + # p3pP="NON CUR OTPi OUR NOR UNI" + + $hdr{lc $1} .= ",$2" + while /\G + ([^:\000-\037]*): + [\011\040]* + ((?: [^\012]+ | \012[\011\040] )*) + \012 + /gxc; + + /\G$/ + or return; + + # remove the "," prefix we added to all headers above + substr $_, 0, 1, "" + for values %hdr; + + \%hdr +} + +############################################################################# +# http_get + +our $qr_nlnl = qr{(? 1, sslv2 => 1 }; +our $TLS_CTX_HIGH = { cache => 1, verify => 1, verify_peername => "https" }; + +# maybe it should just become a normal object :/ + +sub _destroy_state(\%) { + my ($state) = @_; + + $state->{handle}->destroy if $state->{handle}; + %$state = (); +} + +sub _error(\%$$) { + my ($state, $cb, $hdr) = @_; + + &_destroy_state ($state); + + $cb->(undef, $hdr); + () +} + +sub http_request($$@) { my $cb = pop; my ($method, $url, %arg) = @_; my %hdr; + $arg{tls_ctx} = $TLS_CTX_LOW if $arg{tls_ctx} eq "low" || !exists $arg{tls_ctx}; + $arg{tls_ctx} = $TLS_CTX_HIGH if $arg{tls_ctx} eq "high"; + $method = uc $method; if (my $hdr = $arg{headers}) { @@ -225,239 +708,484 @@ } } - my $recurse = exists $arg{recurse} ? $arg{recurse} : $MAX_RECURSE; + # pseudo headers for all subsequent responses + my @pseudo = (URL => $url); + push @pseudo, Redirect => delete $arg{Redirect} if exists $arg{Redirect}; - return $cb->(undef, { Status => 599, Reason => "recursion limit reached" }) + my $recurse = exists $arg{recurse} ? delete $arg{recurse} : $MAX_RECURSE; + + return $cb->(undef, { @pseudo, Status => 599, Reason => "Too many redirections" }) if $recurse < 0; - my $proxy = $arg{proxy} || $PROXY; + my $proxy = exists $arg{proxy} ? $arg{proxy} : $PROXY; my $timeout = $arg{timeout} || $TIMEOUT; - $hdr{"user-agent"} ||= $USERAGENT; - - my ($scheme, $authority, $upath, $query, $fragment) = - $url =~ m|(?:([^:/?#]+):)?(?://([^/?#]*))?([^?#]*)(?:\?([^#]*))?(?:#(.*))?|; + my ($uscheme, $uauthority, $upath, $query, undef) = # ignore fragment + $url =~ m|^([^:]+):(?://([^/?#]*))?([^?#]*)(?:(\?[^#]*))?(?:#(.*))?$|; - $scheme = lc $scheme; + $uscheme = lc $uscheme; - my $uport = $scheme eq "http" ? 80 - : $scheme eq "https" ? 443 - : return $cb->(undef, { Status => 599, Reason => "only http and https URL schemes supported" }); + my $uport = $uscheme eq "http" ? 80 + : $uscheme eq "https" ? 443 + : return $cb->(undef, { @pseudo, Status => 599, Reason => "Only http and https URL schemes supported" }); - $authority =~ /^(?: .*\@ )? ([^\@:]+) (?: : (\d+) )?$/x - or return $cb->(undef, { Status => 599, Reason => "unparsable URL" }); + $uauthority =~ /^(?: .*\@ )? ([^\@:]+) (?: : (\d+) )?$/x + or return $cb->(undef, { @pseudo, Status => 599, Reason => "Unparsable URL" }); - my $uhost = $1; + my $uhost = lc $1; $uport = $2 if defined $2; + $hdr{host} = defined $2 ? "$uhost:$2" : "$uhost" + unless exists $hdr{host}; + $uhost =~ s/^\[(.*)\]$/$1/; - $upath .= "?$query" if length $query; + $upath .= $query if length $query; $upath =~ s%^/?%/%; # cookie processing if (my $jar = $arg{cookie_jar}) { - %$jar = () if $jar->{version} < 1; - - my @cookie; - - while (my ($chost, $v) = each %$jar) { - next unless $chost eq substr $uhost, -length $chost; - next unless $chost =~ /^\./; - - while (my ($cpath, $v) = each %$v) { - next unless $cpath eq substr $upath, 0, length $cpath; - - while (my ($k, $v) = each %$v) { - next if $scheme ne "https" && exists $v->{secure}; - push @cookie, "$k=$v->{value}"; - } - } - } - - $hdr{cookie} = join "; ", @cookie - if @cookie; + my $cookies = cookie_jar_extract $jar, $uscheme, $uhost, $upath; + + $hdr{cookie} = join "; ", @$cookies + if @$cookies; } - my ($rhost, $rport, $rpath); # request host, port, path + my ($rhost, $rport, $rscheme, $rpath); # request host, port, path if ($proxy) { - ($rhost, $rport, $scheme) = @$proxy; - $rpath = $url; + ($rpath, $rhost, $rport, $rscheme) = ($url, @$proxy); + + $rscheme = "http" unless defined $rscheme; + + # don't support https requests over https-proxy transport, + # can't be done with tls as spec'ed, unless you double-encrypt. + $rscheme = "http" if $uscheme eq "https" && $rscheme eq "https"; + + $rhost = lc $rhost; + $rscheme = lc $rscheme; } else { - ($rhost, $rport, $rpath) = ($uhost, $uport, $upath); - $hdr{host} = $uhost; + ($rhost, $rport, $rscheme, $rpath) = ($uhost, $uport, $uscheme, $upath); } - $hdr{"content-length"} = length $arg{body}; + # leave out fragment and query string, just a heuristic + $hdr{referer} = "$uscheme://$uauthority$upath" unless exists $hdr{referer}; + $hdr{"user-agent"} = $USERAGENT unless exists $hdr{"user-agent"}; + + $hdr{"content-length"} = length $arg{body} + if length $arg{body} || $method ne "GET"; + + my $idempotent = $method =~ /^(?:GET|HEAD|PUT|DELETE|OPTIONS|TRACE)$/; + + # default value for keepalive is true iff the request is for an idempotent method + my $persistent = exists $arg{persistent} ? !!$arg{persistent} : $idempotent; + my $keepalive = exists $arg{keepalive} ? !!$arg{keepalive} : !$proxy; + my $was_persistent; # true if this is actually a recycled connection + + # the key to use in the keepalive cache + my $ka_key = "$uscheme\x00$uhost\x00$uport\x00$arg{sessionid}"; + + $hdr{connection} = ($persistent ? $keepalive ? "keep-alive " : "" : "close ") . "Te"; #1.1 + $hdr{te} = "trailers" unless exists $hdr{te}; #1.1 my %state = (connect_guard => 1); - _get_slot $uhost, sub { - $state{slot_guard} = shift; + my $ae_error = 595; # connecting - return unless $state{connect_guard}; + # handle actual, non-tunneled, request + my $handle_actual_request = sub { + $ae_error = 596; # request phase + + my $hdl = $state{handle}; + + $hdl->starttls ("connect") if $uscheme eq "https" && !exists $hdl->{tls}; + + # send request + $hdl->push_write ( + "$method $rpath HTTP/1.1\015\012" + . (join "", map "\u$_: $hdr{$_}\015\012", grep defined $hdr{$_}, keys %hdr) + . "\015\012" + . (delete $arg{body}) + ); + + # return if error occured during push_write() + return unless %state; + + # reduce memory usage, save a kitten, also re-use it for the response headers. + %hdr = (); + + # status line and headers + $state{read_response} = sub { + return unless %state; + + for ("$_[1]") { + y/\015//d; # weed out any \015, as they show up in the weirdest of places. + + /^HTTP\/0*([0-9\.]+) \s+ ([0-9]{3}) (?: \s+ ([^\012]*) )? \012/gxci + or return _error %state, $cb, { @pseudo, Status => 599, Reason => "Invalid server response" }; + + # 100 Continue handling + # should not happen as we don't send expect: 100-continue, + # but we handle it just in case. + # since we send the request body regardless, if we get an error + # we are out of-sync, which we currently do NOT handle correctly. + return $state{handle}->push_read (line => $qr_nlnl, $state{read_response}) + if $2 eq 100; + + push @pseudo, + HTTPVersion => $1, + Status => $2, + Reason => $3, + ; - $state{connect_guard} = AnyEvent::Socket::tcp_connect $rhost, $rport, sub { - $state{fh} = shift - or return $cb->(undef, { Status => 599, Reason => "$!" }); - - delete $state{connect_guard}; # reduce memory usage, save a tree - - # get handle - $state{handle} = new AnyEvent::Handle - fh => $state{fh}, - ($scheme eq "https" ? (tls => "connect") : ()); - - # limit the number of persistent connections - if ($KA_COUNT{$_[1]} < $MAX_PERSISTENT_PER_HOST) { - ++$KA_COUNT{$_[1]}; - $state{handle}{ka_count_guard} = AnyEvent::Util::guard { --$KA_COUNT{$_[1]} }; - $hdr{connection} = "keep-alive"; - delete $hdr{connection}; # keep-alive not yet supported - } else { - delete $hdr{connection}; + my $hdr = _parse_hdr + or return _error %state, $cb, { @pseudo, Status => 599, Reason => "Garbled response headers" }; + + %hdr = (%$hdr, @pseudo); } - # (re-)configure handle - $state{handle}->timeout ($timeout); - $state{handle}->on_error (sub { - %state = (); - $cb->(undef, { Status => 599, Reason => "$!" }); - }); - $state{handle}->on_eof (sub { - %state = (); - $cb->(undef, { Status => 599, Reason => "unexpected end-of-file" }); - }); + # redirect handling + # microsoft and other shitheads don't give a shit for following standards, + # try to support some common forms of broken Location headers. + if ($hdr{location} !~ /^(?: $ | [^:\/?\#]+ : )/x) { + $hdr{location} =~ s/^\.\/+//; + + my $url = "$rscheme://$uhost:$uport"; + + unless ($hdr{location} =~ s/^\///) { + $url .= $upath; + $url =~ s/\/[^\/]*$//; + } - # send request - $state{handle}->push_write ( - "$method $rpath HTTP/1.0\015\012" - . (join "", map "$_: $hdr{$_}\015\012", keys %hdr) - . "\015\012" - . (delete $arg{body}) - ); - - %hdr = (); # reduce memory usage, save a kitten - - # status line - $state{handle}->push_read (line => qr/\015?\012/, sub { - $_[1] =~ /^HTTP\/([0-9\.]+) \s+ ([0-9]{3}) \s+ ([^\015\012]+)/ix - or return (%state = (), $cb->(undef, { Status => 599, Reason => "invalid server response ($_[1])" })); - - my %hdr = ( # response headers - HTTPVersion => "\x00$1", - Status => "\x00$2", - Reason => "\x00$3", - ); - - # headers, could be optimized a bit - $state{handle}->unshift_read (line => qr/\015?\012\015?\012/, sub { - for ("$_[1]\012") { - # we support spaces in field names, as lotus domino - # creates them. - $hdr{lc $1} .= "\x00$2" - while /\G - ([^:\000-\037]+): - [\011\040]* - ((?: [^\015\012]+ | \015?\012[\011\040] )*) - \015?\012 - /gxc; + $hdr{location} = "$url/$hdr{location}"; + } - /\G$/ - or return (%state = (), $cb->(undef, { Status => 599, Reason => "garbled response headers" })); + my $redirect; + + if ($recurse) { + my $status = $hdr{Status}; + + # industry standard is to redirect POST as GET for + # 301, 302 and 303, in contrast to HTTP/1.0 and 1.1. + # also, the UA should ask the user for 301 and 307 and POST, + # industry standard seems to be to simply follow. + # we go with the industry standard. + if ($status == 301 or $status == 302 or $status == 303) { + # HTTP/1.1 is unclear on how to mutate the method + $method = "GET" unless $method eq "HEAD"; + $redirect = 1; + } elsif ($status == 307) { + $redirect = 1; + } + } + + my $finish = sub { # ($data, $err_status, $err_reason[, $persistent]) + if ($state{handle}) { + # handle keepalive + if ( + $persistent + && $_[3] + && ($hdr{HTTPVersion} < 1.1 + ? $hdr{connection} =~ /\bkeep-?alive\b/i + : $hdr{connection} !~ /\bclose\b/i) + ) { + ka_store $ka_key, delete $state{handle}; + } else { + # no keepalive, destroy the handle + $state{handle}->destroy; } + } - substr $_, 0, 1, "" - for values %hdr; + %state = (); - my $finish = sub { - %state = (); + if (defined $_[1]) { + $hdr{OrigStatus} = $hdr{Status}; $hdr{Status} = $_[1]; + $hdr{OrigReason} = $hdr{Reason}; $hdr{Reason} = $_[2]; + } - # set-cookie processing - if ($arg{cookie_jar} && exists $hdr{"set-cookie"}) { - for (split /\x00/, $hdr{"set-cookie"}) { - my ($cookie, @arg) = split /;\s*/; - my ($name, $value) = split /=/, $cookie, 2; - my %kv = (value => $value, map { split /=/, $_, 2 } @arg); - - my $cdom = (delete $kv{domain}) || $uhost; - my $cpath = (delete $kv{path}) || "/"; - - $cdom =~ s/^.?/./; # make sure it starts with a "." - - next if $cdom =~ /\.$/; - - # this is not rfc-like and not netscape-like. go figure. - my $ndots = $cdom =~ y/.//; - next if $ndots < ($cdom =~ /\.[^.][^.]\.[^.][^.]$/ ? 3 : 2); - - # store it - $arg{cookie_jar}{version} = 1; - $arg{cookie_jar}{$cdom}{$cpath}{$name} = \%kv; - } - } + # set-cookie processing + if ($arg{cookie_jar}) { + cookie_jar_set_cookie $arg{cookie_jar}, $hdr{"set-cookie"}, $uhost, $hdr{date}; + } - if ($_[1]{Status} =~ /^x30[12]$/ && $recurse) { - # microsoft and other assholes don't give a shit for following standards, - # try to support a common form of broken Location header. - $_[1]{location} =~ s%^/%$scheme://$uhost:$uport/%; - - http_request ($method, $_[1]{location}, %arg, recurse => $recurse - 1, $cb); - } else { - $cb->($_[0], $_[1]); - } - }; + if ($redirect && exists $hdr{location}) { + # we ignore any errors, as it is very common to receive + # Content-Length != 0 but no actual body + # we also access %hdr, as $_[1] might be an erro + $state{recurse} = + http_request ( + $method => $hdr{location}, + %arg, + recurse => $recurse - 1, + Redirect => [$_[0], \%hdr], + sub { + %state = (); + &$cb + }, + ); + } else { + $cb->($_[0], \%hdr); + } + }; - if ($hdr{Status} =~ /^(?:1..|204|304)$/ or $method eq "HEAD") { - $finish->(undef, \%hdr); - } else { - if (exists $hdr{"content-length"}) { - $_[0]->unshift_read (chunk => $hdr{"content-length"}, sub { - # could cache persistent connection now - if ($hdr{connection} =~ /\bkeep-alive\b/i) { - # but we don't, due to misdesigns, this is annoyingly complex - }; + $ae_error = 597; # body phase - $finish->($_[1], \%hdr); - }); - } else { - # too bad, need to read until we get an error or EOF, - # no way to detect winged data. - $_[0]->on_error (sub { - $finish->($_[0]{rbuf}, \%hdr); + my $chunked = $hdr{"transfer-encoding"} =~ /\bchunked\b/i; # not quite correct... + + my $len = $chunked ? undef : $hdr{"content-length"}; + + # body handling, many different code paths + # - no body expected + # - want_body_handle + # - te chunked + # - 2x length known (with or without on_body) + # - 2x length not known (with or without on_body) + if (!$redirect && $arg{on_header} && !$arg{on_header}(\%hdr)) { + $finish->(undef, 598 => "Request cancelled by on_header"); + } elsif ( + $hdr{Status} =~ /^(?:1..|204|205|304)$/ + or $method eq "HEAD" + or (defined $len && $len == 0) # == 0, not !, because "0 " is true + ) { + # no body + $finish->("", undef, undef, 1); + + } elsif (!$redirect && $arg{want_body_handle}) { + $_[0]->on_eof (undef); + $_[0]->on_error (undef); + $_[0]->on_read (undef); + + $finish->(delete $state{handle}); + + } elsif ($chunked) { + my $cl = 0; + my $body = ""; + my $on_body = $arg{on_body} || sub { $body .= shift; 1 }; + + $state{read_chunk} = sub { + $_[1] =~ /^([0-9a-fA-F]+)/ + or $finish->(undef, $ae_error => "Garbled chunked transfer encoding"); + + my $len = hex $1; + + if ($len) { + $cl += $len; + + $_[0]->push_read (chunk => $len, sub { + $on_body->($_[1], \%hdr) + or return $finish->(undef, 598 => "Request cancelled by on_body"); + + $_[0]->push_read (line => sub { + length $_[1] + and return $finish->(undef, $ae_error => "Garbled chunked transfer encoding"); + $_[0]->push_read (line => $state{read_chunk}); }); - $_[0]->on_eof (undef); - $_[0]->on_read (sub { }); - } + }); + } else { + $hdr{"content-length"} ||= $cl; + + $_[0]->push_read (line => $qr_nlnl, sub { + if (length $_[1]) { + for ("$_[1]") { + y/\015//d; # weed out any \015, as they show up in the weirdest of places. + + my $hdr = _parse_hdr + or return $finish->(undef, $ae_error => "Garbled response trailers"); + + %hdr = (%hdr, %$hdr); + } + } + + $finish->($body, undef, undef, 1); + }); } - }); - }); - }, sub { - $timeout + }; + + $_[0]->push_read (line => $state{read_chunk}); + + } elsif ($arg{on_body}) { + if (defined $len) { + $_[0]->on_read (sub { + $len -= length $_[0]{rbuf}; + + $arg{on_body}(delete $_[0]{rbuf}, \%hdr) + or return $finish->(undef, 598 => "Request cancelled by on_body"); + + $len > 0 + or $finish->("", undef, undef, 1); + }); + } else { + $_[0]->on_eof (sub { + $finish->(""); + }); + $_[0]->on_read (sub { + $arg{on_body}(delete $_[0]{rbuf}, \%hdr) + or $finish->(undef, 598 => "Request cancelled by on_body"); + }); + } + } else { + $_[0]->on_eof (undef); + + if (defined $len) { + $_[0]->on_read (sub { + $finish->((substr delete $_[0]{rbuf}, 0, $len, ""), undef, undef, 1) + if $len <= length $_[0]{rbuf}; + }); + } else { + $_[0]->on_error (sub { + ($! == Errno::EPIPE || !$!) + ? $finish->(delete $_[0]{rbuf}) + : $finish->(undef, $ae_error => $_[2]); + }); + $_[0]->on_read (sub { }); + } + } }; + + # if keepalive is enabled, then the server closing the connection + # before a response can happen legally - we retry on idempotent methods. + if ($was_persistent && $idempotent) { + my $old_eof = $hdl->{on_eof}; + $hdl->{on_eof} = sub { + _destroy_state %state; + + %state = (); + $state{recurse} = + http_request ( + $method => $url, + %arg, + keepalive => 0, + sub { + %state = (); + &$cb + } + ); + }; + $hdl->on_read (sub { + return unless %state; + + # as soon as we receive something, a connection close + # once more becomes a hard error + $hdl->{on_eof} = $old_eof; + $hdl->push_read (line => $qr_nlnl, $state{read_response}); + }); + } else { + $hdl->push_read (line => $qr_nlnl, $state{read_response}); + } + }; + + my $prepare_handle = sub { + my ($hdl) = $state{handle}; + + $hdl->on_error (sub { + _error %state, $cb, { @pseudo, Status => $ae_error, Reason => $_[2] }; + }); + $hdl->on_eof (sub { + _error %state, $cb, { @pseudo, Status => $ae_error, Reason => "Unexpected end-of-file" }; + }); + $hdl->timeout_reset; + $hdl->timeout ($timeout); + }; + + # connected to proxy (or origin server) + my $connect_cb = sub { + my $fh = shift + or return _error %state, $cb, { @pseudo, Status => $ae_error, Reason => "$!" }; + + return unless delete $state{connect_guard}; + + # get handle + $state{handle} = new AnyEvent::Handle + %{ $arg{handle_params} }, + fh => $fh, + peername => $uhost, + tls_ctx => $arg{tls_ctx}, + ; + + $prepare_handle->(); + + #$state{handle}->starttls ("connect") if $rscheme eq "https"; + + # now handle proxy-CONNECT method + if ($proxy && $uscheme eq "https") { + # oh dear, we have to wrap it into a connect request + + # maybe re-use $uauthority with patched port? + $state{handle}->push_write ("CONNECT $uhost:$uport HTTP/1.0\015\012\015\012"); + $state{handle}->push_read (line => $qr_nlnl, sub { + $_[1] =~ /^HTTP\/([0-9\.]+) \s+ ([0-9]{3}) (?: \s+ ([^\015\012]*) )?/ix + or return _error %state, $cb, { @pseudo, Status => 599, Reason => "Invalid proxy connect response ($_[1])" }; + + if ($2 == 200) { + $rpath = $upath; + $handle_actual_request->(); + } else { + _error %state, $cb, { @pseudo, Status => $2, Reason => $3 }; + } + }); + } else { + $handle_actual_request->(); + } + }; + + _get_slot $uhost, sub { + $state{slot_guard} = shift; + + return unless $state{connect_guard}; + + # try to use an existing keepalive connection, but only if we, ourselves, plan + # on a keepalive request (in theory, this should be a separate config option). + if ($persistent && $KA_CACHE{$ka_key}) { + $was_persistent = 1; + + $state{handle} = ka_fetch $ka_key; + $state{handle}->destroyed + and die "got a destructed handle. pah\n";#d# + $prepare_handle->(); + $state{handle}->destroyed + and die "got a destructed handle. pa2\n";#d# + $handle_actual_request->(); + $state{handle}->destroyed + and die "got a destructed handle. pa3\n";#d# + + } else { + my $tcp_connect = $arg{tcp_connect} + || do { require AnyEvent::Socket; \&AnyEvent::Socket::tcp_connect }; + + $state{connect_guard} = $tcp_connect->($rhost, $rport, $connect_cb, $arg{on_prepare} || sub { $timeout }); + } }; - defined wantarray && AnyEvent::Util::guard { %state = () } + defined wantarray && AnyEvent::Util::guard { _destroy_state %state } } -sub http_get($$;@) { +sub http_get($@) { unshift @_, "GET"; &http_request } -sub http_head($$;@) { +sub http_head($@) { unshift @_, "HEAD"; &http_request } -sub http_post($$$;@) { - unshift @_, "POST", "body"; +sub http_post($$@) { + my $url = shift; + unshift @_, "POST", $url, "body"; &http_request } =back +=head2 DNS CACHING + +AnyEvent::HTTP uses the AnyEvent::Socket::tcp_connect function for +the actual connection, which in turn uses AnyEvent::DNS to resolve +hostnames. The latter is a simple stub resolver and does no caching +on its own. If you want DNS caching, you currently have to provide +your own default resolver (by storing a suitable resolver object in +C<$AnyEvent::DNS::RESOLVER>) or your own C callback. + =head2 GLOBAL FUNCTIONS AND VARIABLES =over 4 @@ -465,39 +1193,324 @@ =item AnyEvent::HTTP::set_proxy "proxy-url" Sets the default proxy server to use. The proxy-url must begin with a -string of the form C (optionally C). +string of the form C, croaks otherwise. + +To clear an already-set proxy, use C. + +When AnyEvent::HTTP is laoded for the first time it will query the +default proxy from the operating system, currently by looking at +C<$ENV{http_proxy>}. + +=item AnyEvent::HTTP::cookie_jar_expire $jar[, $session_end] + +Remove all cookies from the cookie jar that have been expired. If +C<$session_end> is given and true, then additionally remove all session +cookies. + +You should call this function (with a true C<$session_end>) before you +save cookies to disk, and you should call this function after loading them +again. If you have a long-running program you can additonally call this +function from time to time. + +A cookie jar is initially an empty hash-reference that is managed by this +module. It's format is subject to change, but currently it is like this: + +The key C has to contain C<1>, otherwise the hash gets +emptied. All other keys are hostnames or IP addresses pointing to +hash-references. The key for these inner hash references is the +server path for which this cookie is meant, and the values are again +hash-references. The keys of those hash-references is the cookie name, and +the value, you guessed it, is another hash-reference, this time with the +key-value pairs from the cookie, except for C and C, +which have been replaced by a C<_expires> key that contains the cookie +expiry timestamp. + +Here is an example of a cookie jar with a single cookie, so you have a +chance of understanding the above paragraph: + + { + version => 1, + "10.0.0.1" => { + "/" => { + "mythweb_id" => { + _expires => 1293917923, + value => "ooRung9dThee3ooyXooM1Ohm", + }, + }, + }, + } + +=item $date = AnyEvent::HTTP::format_date $timestamp + +Takes a POSIX timestamp (seconds since the epoch) and formats it as a HTTP +Date (RFC 2616). + +=item $timestamp = AnyEvent::HTTP::parse_date $date + +Takes a HTTP Date (RFC 2616) or a Cookie date (netscape cookie spec) or a +bunch of minor variations of those, and returns the corresponding POSIX +timestamp, or C if the date cannot be parsed. =item $AnyEvent::HTTP::MAX_RECURSE The default value for the C request parameter (default: C<10>). +=item $AnyEvent::HTTP::TIMEOUT + +The default timeout for conenction operations (default: C<300>). + =item $AnyEvent::HTTP::USERAGENT The default value for the C header (the default is -C). +C). -=item $AnyEvent::HTTP::MAX_PERSISTENT +=item $AnyEvent::HTTP::MAX_PER_HOST -The maximum number of persistent connections to keep open (default: 8). - -Not implemented currently. +The maximum number of concurrent connections to the same host (identified +by the hostname). If the limit is exceeded, then the additional requests +are queued until previous connections are closed. Both persistent and +non-persistent connections are counted in this limit. + +The default value for this is C<4>, and it is highly advisable to not +increase it much. + +For comparison: the RFC's recommend 4 non-persistent or 2 persistent +connections, older browsers used 2, newers (such as firefox 3) typically +use 6, and Opera uses 8 because like, they have the fastest browser and +give a shit for everybody else on the planet. =item $AnyEvent::HTTP::PERSISTENT_TIMEOUT -The maximum time to cache a persistent connection, in seconds (default: 2). +The time after which idle persistent conenctions get closed by +AnyEvent::HTTP (default: C<3>). + +=item $AnyEvent::HTTP::ACTIVE -Not implemented currently. +The number of active connections. This is not the number of currently +running requests, but the number of currently open and non-idle TCP +connections. This number can be useful for load-leveling. =back =cut +our @month = qw(Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec); +our @weekday = qw(Sun Mon Tue Wed Thu Fri Sat); + +sub format_date($) { + my ($time) = @_; + + # RFC 822/1123 format + my ($S, $M, $H, $mday, $mon, $year, $wday, $yday, undef) = gmtime $time; + + sprintf "%s, %02d %s %04d %02d:%02d:%02d GMT", + $weekday[$wday], $mday, $month[$mon], $year + 1900, + $H, $M, $S; +} + +sub parse_date($) { + my ($date) = @_; + + my ($d, $m, $y, $H, $M, $S); + + if ($date =~ /^[A-Z][a-z][a-z]+, ([0-9][0-9]?)[\- ]([A-Z][a-z][a-z])[\- ]([0-9][0-9][0-9][0-9]) ([0-9][0-9]?):([0-9][0-9]?):([0-9][0-9]?) GMT$/) { + # RFC 822/1123, required by RFC 2616 (with " ") + # cookie dates (with "-") + + ($d, $m, $y, $H, $M, $S) = ($1, $2, $3, $4, $5, $6); + + } elsif ($date =~ /^[A-Z][a-z][a-z]+, ([0-9][0-9]?)-([A-Z][a-z][a-z])-([0-9][0-9]) ([0-9][0-9]?):([0-9][0-9]?):([0-9][0-9]?) GMT$/) { + # RFC 850 + ($d, $m, $y, $H, $M, $S) = ($1, $2, $3 < 69 ? $3 + 2000 : $3 + 1900, $4, $5, $6); + + } elsif ($date =~ /^[A-Z][a-z][a-z]+ ([A-Z][a-z][a-z]) ([0-9 ]?[0-9]) ([0-9][0-9]?):([0-9][0-9]?):([0-9][0-9]?) ([0-9][0-9][0-9][0-9])$/) { + # ISO C's asctime + ($d, $m, $y, $H, $M, $S) = ($2, $1, $6, $3, $4, $5); + } + # other formats fail in the loop below + + for (0..11) { + if ($m eq $month[$_]) { + require Time::Local; + return Time::Local::timegm ($S, $M, $H, $d, $_, $y); + } + } + + undef +} + sub set_proxy($) { - $PROXY = [$2, $3 || 3128, $1] if $_[0] =~ m%^(https?):// ([^:/]+) (?: : (\d*) )?%ix; + if (length $_[0]) { + $_[0] =~ m%^(http):// ([^:/]+) (?: : (\d*) )?%ix + or Carp::croak "$_[0]: invalid proxy URL"; + $PROXY = [$2, $3 || 3128, $1] + } else { + undef $PROXY; + } } # initialise proxy from environment -set_proxy $ENV{http_proxy}; +eval { + set_proxy $ENV{http_proxy}; +}; + +=head2 SHOWCASE + +This section contaisn some more elaborate "real-world" examples or code +snippets. + +=head2 HTTP/1.1 FILE DOWNLOAD + +Downloading files with HTTP can be quite tricky, especially when something +goes wrong and you want to resume. + +Here is a function that initiates and resumes a download. It uses the +last modified time to check for file content changes, and works with many +HTTP/1.0 servers as well, and usually falls back to a complete re-download +on older servers. + +It calls the completion callback with either C, which means a +nonretryable error occured, C<0> when the download was partial and should +be retried, and C<1> if it was successful. + + use AnyEvent::HTTP; + + sub download($$$) { + my ($url, $file, $cb) = @_; + + open my $fh, "+<", $file + or die "$file: $!"; + + my %hdr; + my $ofs = 0; + + warn stat $fh; + warn -s _; + if (stat $fh and -s _) { + $ofs = -s _; + warn "-s is ", $ofs;#d# + $hdr{"if-unmodified-since"} = AnyEvent::HTTP::format_date +(stat _)[9]; + $hdr{"range"} = "bytes=$ofs-"; + } + + http_get $url, + headers => \%hdr, + on_header => sub { + my ($hdr) = @_; + + if ($hdr->{Status} == 200 && $ofs) { + # resume failed + truncate $fh, $ofs = 0; + } + + sysseek $fh, $ofs, 0; + + 1 + }, + on_body => sub { + my ($data, $hdr) = @_; + + if ($hdr->{Status} =~ /^2/) { + length $data == syswrite $fh, $data + or return; # abort on write errors + } + + 1 + }, + sub { + my (undef, $hdr) = @_; + + my $status = $hdr->{Status}; + + if (my $time = AnyEvent::HTTP::parse_date $hdr->{"last-modified"}) { + utime $fh, $time, $time; + } + + if ($status == 200 || $status == 206 || $status == 416) { + # download ok || resume ok || file already fully downloaded + $cb->(1, $hdr); + + } elsif ($status == 412) { + # file has changed while resuming, delete and retry + unlink $file; + $cb->(0, $hdr); + + } elsif ($status == 500 or $status == 503 or $status =~ /^59/) { + # retry later + $cb->(0, $hdr); + + } else { + $cb->(undef, $hdr); + } + } + ; + } + + download "http://server/somelargefile", "/tmp/somelargefile", sub { + if ($_[0]) { + print "OK!\n"; + } elsif (defined $_[0]) { + print "please retry later\n"; + } else { + print "ERROR\n"; + } + }; + +=head3 SOCKS PROXIES + +Socks proxies are not directly supported by AnyEvent::HTTP. You can +compile your perl to support socks, or use an external program such as +F (dante) or F to make your program use a socks proxy +transparently. + +Alternatively, for AnyEvent::HTTP only, you can use your own +C function that does the proxy handshake - here is an example +that works with socks4a proxies: + + use Errno; + use AnyEvent::Util; + use AnyEvent::Socket; + use AnyEvent::Handle; + + # host, port and username of/for your socks4a proxy + my $socks_host = "10.0.0.23"; + my $socks_port = 9050; + my $socks_user = ""; + + sub socks4a_connect { + my ($host, $port, $connect_cb, $prepare_cb) = @_; + + my $hdl = new AnyEvent::Handle + connect => [$socks_host, $socks_port], + on_prepare => sub { $prepare_cb->($_[0]{fh}) }, + on_error => sub { $connect_cb->() }, + ; + + $hdl->push_write (pack "CCnNZ*Z*", 4, 1, $port, 1, $socks_user, $host); + + $hdl->push_read (chunk => 8, sub { + my ($hdl, $chunk) = @_; + my ($status, $port, $ipn) = unpack "xCna4", $chunk; + + if ($status == 0x5a) { + $connect_cb->($hdl->{fh}, (format_address $ipn) . ":$port"); + } else { + $! = Errno::ENXIO; $connect_cb->(); + } + }); + + $hdl + } + +Use C instead of C when doing Cs, +possibly after switching off other proxy types: + + AnyEvent::HTTP::set_proxy undef; # usually you do not want other proxies + + http_get 'http://www.google.com', tcp_connect => \&socks4a_connect, sub { + my ($data, $headers) = @_; + ... + }; =head1 SEE ALSO @@ -505,8 +1518,11 @@ =head1 AUTHOR - Marc Lehmann - http://home.schmorp.de/ + Marc Lehmann + http://home.schmorp.de/ + +With many thanks to Дмитрий Шалашов, who provided countless +testcases and bugreports. =cut