--- AnyEvent-HTTP/HTTP.pm 2010/12/31 06:18:30 1.66 +++ AnyEvent-HTTP/HTTP.pm 2011/02/19 06:46:14 1.102 @@ -17,7 +17,7 @@ This module implements a simple, stateless and non-blocking HTTP client. It supports GET, POST and other request methods, cookies and more, -all on a very low level. It can follow redirects supports proxies and +all on a very low level. It can follow redirects, supports proxies, and automatically limits the number of connections to the values specified in the RFC. @@ -38,8 +38,7 @@ package AnyEvent::HTTP; -use strict; -no warnings; +use common::sense; use Errno (); @@ -49,24 +48,20 @@ use base Exporter::; -our $VERSION = '1.5'; +our $VERSION = '2.04'; our @EXPORT = qw(http_get http_post http_head http_request); our $USERAGENT = "Mozilla/5.0 (compatible; U; AnyEvent-HTTP/$VERSION; +http://software.schmorp.de/pkg/AnyEvent)"; our $MAX_RECURSE = 10; -our $MAX_PERSISTENT = 8; -our $PERSISTENT_TIMEOUT = 2; +our $PERSISTENT_TIMEOUT = 3; our $TIMEOUT = 300; - -# changing these is evil -our $MAX_PERSISTENT_PER_HOST = 0; -our $MAX_PER_HOST = 4; +our $MAX_PER_HOST = 4; # changing this is evil our $PROXY; our $ACTIVE = 0; -my %KA_COUNT; # number of open keep-alive connections per host +my %KA_CACHE; # indexed by uhost currently, points to [$handle...] array my %CO_SLOT; # number of open connections, and wait queue, per host =item http_get $url, key => value..., $cb->($data, $headers) @@ -96,8 +91,8 @@ destroyed before the callback is called, the request will be cancelled. The callback will be called with the response body data as first argument -(or C if an error occured), and a hash-ref with response headers as -second argument. +(or C if an error occured), and a hash-ref with response headers +(and trailers) as second argument. All the headers in that hash are lowercased. In addition to the response headers, the "pseudo-headers" (uppercase to avoid clashing with possible @@ -124,9 +119,23 @@ joined together with a comma (C<,>), as per the HTTP spec. If an internal error occurs, such as not being able to resolve a hostname, -then C<$data> will be C, C<< $headers->{Status} >> will be C<59x> -(usually C<599>) and the C pseudo-header will contain an error -message. +then C<$data> will be C, C<< $headers->{Status} >> will be +C<590>-C<599> and the C pseudo-header will contain an error +message. Currently the following status codes are used: + +=over 4 + +=item 595 - errors during connection etsbalishment, proxy handshake. + +=item 596 - errors during TLS negotiation, request sending and header processing. + +=item 597 - errors during body receiving or processing. + +=item 598 - user aborted request via C or C. + +=item 599 - other, usually nonretryable, errors (garbled URL etc.). + +=back A typical callback might look like this: @@ -152,11 +161,15 @@ =item headers => hashref -The request headers to use. Currently, C may provide its -own C, C, C and C headers -and will provide defaults for C and C (this can be -suppressed by using C for these headers in which case they won't be -sent at all). +The request headers to use. Currently, C may provide its own +C, C, C and C headers and +will provide defaults at least for C, C and C +(this can be suppressed by using C for these headers in which case +they won't be sent at all). + +You really should provide your own C header value that is +appropriate for your program - I wouldn't be surprised if the default +AnyEvent string gets blocked by webservers sooner or later. =item timeout => $seconds @@ -168,15 +181,17 @@ =item proxy => [$host, $port[, $scheme]] or undef -Use the given http proxy for all requests. If not specified, then the -default proxy (as specified by C<$ENV{http_proxy}>) is used. +Use the given http proxy for all requests, or no proxy if C is +used. + +C<$scheme> must be either missing or must be C for HTTP. -C<$scheme> must be either missing, C for HTTP or C for -HTTPS. +If not specified, then the default proxy is used (see +C). =item body => $string -The request body, usually empty. Will be-sent as-is (future versions of +The request body, usually empty. Will be sent as-is (future versions of this module might offer more options). =item cookie_jar => $hash_ref @@ -184,16 +199,22 @@ Passing this parameter enables (simplified) cookie-processing, loosely based on the original netscape specification. -The C<$hash_ref> must be an (initially empty) hash reference which will -get updated automatically. It is possible to save the cookie_jar to -persistent storage with something like JSON or Storable, but this is not -recommended, as expiry times are currently being ignored. - -Note that this cookie implementation is not of very high quality, nor -meant to be complete. If you want complete cookie management you have to -do that on your own. C is meant as a quick fix to get some -cookie-using sites working. Cookies are a privacy disaster, do not use -them unless required to. +The C<$hash_ref> must be an (initially empty) hash reference which +will get updated automatically. It is possible to save the cookie jar +to persistent storage with something like JSON or Storable - see the +C function if you wish to remove +expired or session-only cookies, and also for documentation on the format +of the cookie jar. + +Note that this cookie implementation is not meant to be complete. If +you want complete cookie management you have to do that on your +own. C is meant as a quick fix to get most cookie-using sites +working. Cookies are a privacy disaster, do not use them unless required +to. + +When cookie processing is enabled, the C and C +headers will be set and handled by this module, otherwise they will be +left untouched. =item tls_ctx => $scheme | $tls_ctx @@ -207,6 +228,16 @@ The default for this option is C, which could be interpreted as "give me the page, no matter what". +See also the C parameter. + +=item session => $string + +The module might reuse connections to the same host internally. Sometimes +(e.g. when using TLS), you do not want to reuse connections from other +sessions. This can be achieved by setting this parameter to some unique +ID (such as the address of an object storing your state data, or the TLS +context) - only connections using the same unique ID will be reused. + =item on_prepare => $callback->($fh) In rare cases you need to "tune" the socket before it is used to @@ -242,6 +273,10 @@ content, which, if it is supposed to be rare, can be faster than first doing a C request. +The downside is that cancelling the request makes it impossible to re-use +the connection. Also, the C callback will not receive any +trailer (headers sent after the response body). + Example: cancel the request unless the content-type is "text/html". on_header => sub { @@ -258,6 +293,9 @@ or false, in which case AnyEvent::HTTP will cancel the download (and call the completion callback with an error code of C<598>). +The downside to cancelling the request is that it makes it impossible to +re-use the connection. + This callback is useful when the data is too large to be held in memory (so the callback writes it to a file) or when only some information should be extracted, or when the body should be processed incrementally. @@ -278,10 +316,10 @@ connection. In error cases, C will be passed. When there is no body (e.g. status C<304>), the empty string will be passed. -The handle object might or might not be in TLS mode, might be connected to -a proxy, be a persistent connection etc., and configured in unspecified -ways. The user is responsible for this handle (it will not be used by this -module anymore). +The handle object might or might not be in TLS mode, might be connected +to a proxy, be a persistent connection, use chunked transfer encoding +etc., and configured in unspecified ways. The user is responsible for this +handle (it will not be used by this module anymore). This is useful with some push-type services, where, after the initial headers, an interactive protocol is used (typical example would be the @@ -290,20 +328,63 @@ If you think you need this, first have a look at C, to see if that doesn't solve your problem in a better way. +=item persistent => $boolean + +Try to create/reuse a persistent connection. When this flag is set +(default: true for idempotent requests, false for all others), then +C tries to re-use an existing (previously-created) +persistent connection to the host and, failing that, tries to create a new +one. + +Requests failing in certain ways will be automatically retried once, which +is dangerous for non-idempotent requests, which is why it defaults to off +for them. The reason for this is because the bozos who designed HTTP/1.1 +made it impossible to distinguish between a fatal error and a normal +connection timeout, so you never know whether there was a problem with +your request or not. + +When reusing an existent connection, many parameters (such as TLS context) +will be ignored. See the C parameter for a workaround. + +=item keepalive => $boolean + +Only used when C is also true. This parameter decides whether +C tries to handshake a HTTP/1.0-style keep-alive connection +(as opposed to only a HTTP/1.1 persistent connection). + +The default is true, except when using a proxy, in which case it defaults +to false, as HTTP/1.0 proxies cannot support this in a meaningful way. + +=item handle_params => { key => value ... } + +The key-value pairs in this hash will be passed to any L +constructor that is called - not all requests will create a handle, and +sometimes more than one is created, so this parameter is only good for +setting hints. + +Example: set the maximum read size to 4096, to potentially conserve memory +at the cost of speed. + + handle_params => { + max_read_size => 4096, + }, + =back -Example: make a simple HTTP GET request for http://www.nethype.de/ +Example: do a simple HTTP GET request for http://www.nethype.de/ and print +the response body. http_request GET => "http://www.nethype.de/", sub { my ($body, $hdr) = @_; print "$body\n"; }; -Example: make a HTTP HEAD request on https://www.google.com/, use a +Example: do a HTTP HEAD request on https://www.google.com/, use a timeout of 30 seconds. http_request GET => "https://www.google.com", + headers => { "user-agent" => "MySearchClient 1.0" }, timeout => 30, sub { my ($body, $hdr) = @_; @@ -312,7 +393,7 @@ } ; -Example: make another simple HTTP GET request, but immediately try to +Example: do another simple HTTP GET request, but immediately try to cancel it. my $request = http_request GET => "http://www.nethype.de/", sub { @@ -324,6 +405,9 @@ =cut +############################################################################# +# wait queue/slots + sub _slot_schedule; sub _slot_schedule($) { my $host = shift; @@ -354,8 +438,207 @@ _slot_schedule $_[0]; } +############################################################################# +# cookie handling + +# expire cookies +sub cookie_jar_expire($;$) { + my ($jar, $session_end) = @_; + + %$jar = () if $jar->{version} != 1; + + my $anow = AE::now; + + while (my ($chost, $paths) = each %$jar) { + next unless ref $paths; + + while (my ($cpath, $cookies) = each %$paths) { + while (my ($cookie, $kv) = each %$cookies) { + if (exists $kv->{_expires}) { + delete $cookies->{$cookie} + if $anow > $kv->{_expires}; + } elsif ($session_end) { + delete $cookies->{$cookie}; + } + } + + delete $paths->{$cpath} + unless %$cookies; + } + + delete $jar->{$chost} + unless %$paths; + } +} + +# extract cookies from jar +sub cookie_jar_extract($$$$) { + my ($jar, $scheme, $host, $path) = @_; + + %$jar = () if $jar->{version} != 1; + + my @cookies; + + while (my ($chost, $paths) = each %$jar) { + next unless ref $paths; + + if ($chost =~ /^\./) { + next unless $chost eq substr $host, -length $chost; + } elsif ($chost =~ /\./) { + next unless $chost eq $host; + } else { + next; + } + + while (my ($cpath, $cookies) = each %$paths) { + next unless $cpath eq substr $path, 0, length $cpath; + + while (my ($cookie, $kv) = each %$cookies) { + next if $scheme ne "https" && exists $kv->{secure}; + + if (exists $kv->{_expires} and AE::now > $kv->{_expires}) { + delete $cookies->{$cookie}; + next; + } + + my $value = $kv->{value}; + + if ($value =~ /[=;,[:space:]]/) { + $value =~ s/([\\"])/\\$1/g; + $value = "\"$value\""; + } + + push @cookies, "$cookie=$value"; + } + } + } + + \@cookies +} + +# parse set_cookie header into jar +sub cookie_jar_set_cookie($$$$) { + my ($jar, $set_cookie, $host, $date) = @_; + + my $anow = int AE::now; + my $snow; # server-now + + for ($set_cookie) { + # parse NAME=VALUE + my @kv; + + # expires is not http-compliant in the original cookie-spec, + # we support the official date format and some extensions + while ( + m{ + \G\s* + (?: + expires \s*=\s* ([A-Z][a-z][a-z]+,\ [^,;]+) + | ([^=;,[:space:]]+) (?: \s*=\s* (?: "((?:[^\\"]+|\\.)*)" | ([^;,[:space:]]*) ) )? + ) + }gcxsi + ) { + my $name = $2; + my $value = $4; + + if (defined $1) { + # expires + $name = "expires"; + $value = $1; + } elsif (defined $3) { + # quoted + $value = $3; + $value =~ s/\\(.)/$1/gs; + } + + push @kv, @kv ? lc $name : $name, $value; + + last unless /\G\s*;/gc; + } + + last unless @kv; + + my $name = shift @kv; + my %kv = (value => shift @kv, @kv); + + if (exists $kv{"max-age"}) { + $kv{_expires} = $anow + delete $kv{"max-age"}; + } elsif (exists $kv{expires}) { + $snow ||= parse_date ($date) || $anow; + $kv{_expires} = $anow + (parse_date (delete $kv{expires}) - $snow); + } else { + delete $kv{_expires}; + } + + my $cdom; + my $cpath = (delete $kv{path}) || "/"; + + if (exists $kv{domain}) { + $cdom = delete $kv{domain}; + + $cdom =~ s/^\.?/./; # make sure it starts with a "." + + next if $cdom =~ /\.$/; + + # this is not rfc-like and not netscape-like. go figure. + my $ndots = $cdom =~ y/.//; + next if $ndots < ($cdom =~ /\.[^.][^.]\.[^.][^.]$/ ? 3 : 2); + } else { + $cdom = $host; + } + + # store it + $jar->{version} = 1; + $jar->{lc $cdom}{$cpath}{$name} = \%kv; + + redo if /\G\s*,/gc; + } +} + +############################################################################# +# keepalive/persistent connection cache + +# fetch a connection from the keepalive cache +sub ka_fetch($) { + my $ka_key = shift; + + my $hdl = pop @{ $KA_CACHE{$ka_key} }; # currently we reuse the MOST RECENTLY USED connection + delete $KA_CACHE{$ka_key} + unless @{ $KA_CACHE{$ka_key} }; + + $hdl +} + +sub ka_store($$) { + my ($ka_key, $hdl) = @_; + + my $kaa = $KA_CACHE{$ka_key} ||= []; + + my $destroy = sub { + my @ka = grep $_ != $hdl, @{ $KA_CACHE{$ka_key} }; + + $hdl->destroy; + + @ka + ? $KA_CACHE{$ka_key} = \@ka + : delete $KA_CACHE{$ka_key}; + }; + + # on error etc., destroy + $hdl->on_error ($destroy); + $hdl->on_eof ($destroy); + $hdl->on_read ($destroy); + $hdl->timeout ($PERSISTENT_TIMEOUT); + + push @$kaa, $hdl; + shift @$kaa while @$kaa > $MAX_PER_HOST; +} + +############################################################################# +# utilities + # continue to parse $_ for headers and place them into the arg -sub parse_hdr() { +sub _parse_hdr() { my %hdr; # things seen, not parsed: @@ -379,11 +662,32 @@ \%hdr } +############################################################################# +# http_get + our $qr_nlnl = qr{(? 1, sslv2 => 1 }; our $TLS_CTX_HIGH = { cache => 1, verify => 1, verify_peername => "https" }; +# maybe it should just become a normal object :/ + +sub _destroy_state(\%) { + my ($state) = @_; + + $state->{handle}->destroy if $state->{handle}; + %$state = (); +} + +sub _error(\%$$) { + my ($state, $cb, $hdr) = @_; + + &_destroy_state ($state); + + $cb->(undef, $hdr); + () +} + sub http_request($$@) { my $cb = pop; my ($method, $url, %arg) = @_; @@ -410,11 +714,11 @@ return $cb->(undef, { @pseudo, Status => 599, Reason => "Too many redirections" }) if $recurse < 0; - my $proxy = $arg{proxy} || $PROXY; + my $proxy = exists $arg{proxy} ? $arg{proxy} : $PROXY; my $timeout = $arg{timeout} || $TIMEOUT; - my ($uscheme, $uauthority, $upath, $query, $fragment) = - $url =~ m|(?:([^:/?#]+):)?(?://([^/?#]*))?([^?#]*)(?:(\?[^#]*))?(?:#(.*))?|; + my ($uscheme, $uauthority, $upath, $query, undef) = # ignore fragment + $url =~ m|^([^:]+):(?://([^/?#]*))?([^?#]*)(?:(\?[^#]*))?(?:#(.*))?$|; $uscheme = lc $uscheme; @@ -425,7 +729,7 @@ $uauthority =~ /^(?: .*\@ )? ([^\@:]+) (?: : (\d+) )?$/x or return $cb->(undef, { @pseudo, Status => 599, Reason => "Unparsable URL" }); - my $uhost = $1; + my $uhost = lc $1; $uport = $2 if defined $2; $hdr{host} = defined $2 ? "$uhost:$2" : "$uhost" @@ -438,33 +742,10 @@ # cookie processing if (my $jar = $arg{cookie_jar}) { - %$jar = () if $jar->{version} != 1; - - my @cookie; - - while (my ($chost, $v) = each %$jar) { - if ($chost =~ /^\./) { - next unless $chost eq substr $uhost, -length $chost; - } elsif ($chost =~ /\./) { - next unless $chost eq $uhost; - } else { - next; - } - - while (my ($cpath, $v) = each %$v) { - next unless $cpath eq substr $upath, 0, length $cpath; - - while (my ($k, $v) = each %$v) { - next if $uscheme ne "https" && exists $v->{secure}; - my $value = $v->{value}; - $value =~ s/([\\"])/\\$1/g; - push @cookie, "$k=\"$value\""; - } - } - } - - $hdr{cookie} = join "; ", @cookie - if @cookie; + my $cookies = cookie_jar_extract $jar, $uscheme, $uhost, $upath; + + $hdr{cookie} = join "; ", @$cookies + if @$cookies; } my ($rhost, $rport, $rscheme, $rpath); # request host, port, path @@ -477,6 +758,9 @@ # don't support https requests over https-proxy transport, # can't be done with tls as spec'ed, unless you double-encrypt. $rscheme = "http" if $uscheme eq "https" && $rscheme eq "https"; + + $rhost = lc $rhost; + $rscheme = lc $rscheme; } else { ($rhost, $rport, $rscheme, $rpath) = ($uhost, $uport, $uscheme, $upath); } @@ -488,351 +772,371 @@ $hdr{"content-length"} = length $arg{body} if length $arg{body} || $method ne "GET"; - $hdr{connection} = "close TE"; - $hdr{te} = "trailers" unless exists $hdr{te}; + my $idempotent = $method =~ /^(?:GET|HEAD|PUT|DELETE|OPTIONS|TRACE)$/; - my %state = (connect_guard => 1); + # default value for keepalive is true iff the request is for an idempotent method + my $keepalive = exists $arg{keepalive} ? !!$arg{keepalive} : $idempotent; + my $keepalive10 = exists $arg{keepalive10} ? $arg{keepalive10} : !$proxy; + my $keptalive; # true if this is actually a recycled connection - _get_slot $uhost, sub { - $state{slot_guard} = shift; + # the key to use in the keepalive cache + my $ka_key = "$uhost\x00$arg{sessionid}"; - return unless $state{connect_guard}; + $hdr{connection} = ($keepalive ? $keepalive10 ? "keep-alive " : "" : "close ") . "Te"; #1.1 + $hdr{te} = "trailers" unless exists $hdr{te}; #1.1 - my $connect_cb = sub { - $state{fh} = shift - or do { - my $err = "$!"; - %state = (); - return $cb->(undef, { @pseudo, Status => 599, Reason => $err }); - }; + my %state = (connect_guard => 1); - pop; # free memory, save a tree + my $ae_error = 595; # connecting - return unless delete $state{connect_guard}; + # handle actual, non-tunneled, request + my $handle_actual_request = sub { + $ae_error = 596; # request phase + + my $hdl = $state{handle}; + + $hdl->starttls ("connect") if $uscheme eq "https" && !exists $hdl->{tls}; + + # send request + $hdl->push_write ( + "$method $rpath HTTP/1.1\015\012" + . (join "", map "\u$_: $hdr{$_}\015\012", grep defined $hdr{$_}, keys %hdr) + . "\015\012" + . (delete $arg{body}) + ); + + # return if error occured during push_write() + return unless %state; + + # reduce memory usage, save a kitten, also re-use it for the response headers. + %hdr = (); + + # status line and headers + $state{read_response} = sub { + return unless %state; + + for ("$_[1]") { + y/\015//d; # weed out any \015, as they show up in the weirdest of places. + + /^HTTP\/0*([0-9\.]+) \s+ ([0-9]{3}) (?: \s+ ([^\012]*) )? \012/gxci + or return _error %state, $cb, { @pseudo, Status => 599, Reason => "Invalid server response" }; + + # 100 Continue handling + # should not happen as we don't send expect: 100-continue, + # but we handle it just in case. + # since we send the request body regardless, if we get an error + # we are out of-sync, which we currently do NOT handle correctly. + return $state{handle}->push_read (line => $qr_nlnl, $state{read_response}) + if $2 eq 100; + + push @pseudo, + HTTPVersion => $1, + Status => $2, + Reason => $3, + ; - # get handle - $state{handle} = new AnyEvent::Handle - fh => $state{fh}, - peername => $rhost, - tls_ctx => $arg{tls_ctx}, - # these need to be reconfigured on keepalive handles - timeout => $timeout, - on_error => sub { - %state = (); - $cb->(undef, { @pseudo, Status => 599, Reason => $_[2] }); - }, - on_eof => sub { - %state = (); - $cb->(undef, { @pseudo, Status => 599, Reason => "Unexpected end-of-file" }); - }, - ; + my $hdr = _parse_hdr + or return _error %state, $cb, { @pseudo, Status => 599, Reason => "Garbled response headers" }; - # limit the number of persistent connections - # keepalive not yet supported -# if ($KA_COUNT{$_[1]} < $MAX_PERSISTENT_PER_HOST) { -# ++$KA_COUNT{$_[1]}; -# $state{handle}{ka_count_guard} = AnyEvent::Util::guard { -# --$KA_COUNT{$_[1]} -# }; -# $hdr{connection} = "keep-alive"; -# } else { -# delete $hdr{connection}; -# } - - $state{handle}->starttls ("connect") if $rscheme eq "https"; - - # handle actual, non-tunneled, request - my $handle_actual_request = sub { - $state{handle}->starttls ("connect") if $uscheme eq "https" && !exists $state{handle}{tls}; - - # send request - $state{handle}->push_write ( - "$method $rpath HTTP/1.1\015\012" - . (join "", map "\u$_: $hdr{$_}\015\012", grep defined $hdr{$_}, keys %hdr) - . "\015\012" - . (delete $arg{body}) - ); + %hdr = (%$hdr, @pseudo); + } - # return if error occured during push_write() - return unless %state; + # redirect handling + # microsoft and other shitheads don't give a shit for following standards, + # try to support some common forms of broken Location headers. + if ($hdr{location} !~ /^(?: $ | [^:\/?\#]+ : )/x) { + $hdr{location} =~ s/^\.\/+//; + + my $url = "$rscheme://$uhost:$uport"; + + unless ($hdr{location} =~ s/^\///) { + $url .= $upath; + $url =~ s/\/[^\/]*$//; + } - %hdr = (); # reduce memory usage, save a kitten, also make it possible to re-use + $hdr{location} = "$url/$hdr{location}"; + } - # status line and headers - $state{handle}->push_read (line => $qr_nlnl, sub { - my $keepalive = pop; + my $redirect; - for ("$_[1]") { - y/\015//d; # weed out any \015, as they show up in the weirdest of places. + if ($recurse) { + my $status = $hdr{Status}; - /^HTTP\/([0-9\.]+) \s+ ([0-9]{3}) (?: \s+ ([^\012]*) )? \012/igxc - or return (%state = (), $cb->(undef, { @pseudo, Status => 599, Reason => "Invalid server response" })); + # industry standard is to redirect POST as GET for + # 301, 302 and 303, in contrast to HTTP/1.0 and 1.1. + # also, the UA should ask the user for 301 and 307 and POST, + # industry standard seems to be to simply follow. + # we go with the industry standard. + if ($status == 301 or $status == 302 or $status == 303) { + # HTTP/1.1 is unclear on how to mutate the method + $method = "GET" unless $method eq "HEAD"; + $redirect = 1; + } elsif ($status == 307) { + $redirect = 1; + } + } - push @pseudo, - HTTPVersion => $1, - Status => $2, - Reason => $3, - ; + my $finish = sub { # ($data, $err_status, $err_reason[, $keepalive]) + if ($state{handle}) { + # handle keepalive + if ( + $keepalive + && $_[3] + && ($hdr{HTTPVersion} < 1.1 + ? $hdr{connection} =~ /\bkeep-?alive\b/i + : $hdr{connection} !~ /\bclose\b/i) + ) { + ka_store $ka_key, delete $state{handle}; + } else { + # no keepalive, destroy the handle + $state{handle}->destroy; + } + } - my $hdr = parse_hdr - or return (%state = (), $cb->(undef, { @pseudo, Status => 599, Reason => "Garbled response headers" })); + %state = (); - %hdr = (%$hdr, @pseudo); - } + if (defined $_[1]) { + $hdr{OrigStatus} = $hdr{Status}; $hdr{Status} = $_[1]; + $hdr{OrigReason} = $hdr{Reason}; $hdr{Reason} = $_[2]; + } - # redirect handling - # microsoft and other shitheads don't give a shit for following standards, - # try to support some common forms of broken Location headers. - if ($hdr{location} !~ /^(?: $ | [^:\/?\#]+ : )/x) { - $hdr{location} =~ s/^\.\/+//; - - my $url = "$rscheme://$uhost:$uport"; - - unless ($hdr{location} =~ s/^\///) { - $url .= $upath; - $url =~ s/\/[^\/]*$//; - } + # set-cookie processing + if ($arg{cookie_jar}) { + cookie_jar_set_cookie $arg{cookie_jar}, $hdr{"set-cookie"}, $uhost, $hdr{date}; + } - $hdr{location} = "$url/$hdr{location}"; - } + if ($redirect && exists $hdr{location}) { + # we ignore any errors, as it is very common to receive + # Content-Length != 0 but no actual body + # we also access %hdr, as $_[1] might be an erro + http_request ( + $method => $hdr{location}, + %arg, + recurse => $recurse - 1, + Redirect => [$_[0], \%hdr], + $cb + ); + } else { + $cb->($_[0], \%hdr); + } + }; - my $redirect; + $ae_error = 597; # body phase - if ($recurse) { - my $status = $hdr{Status}; + my $chunked = $hdr{"transfer-encoding"} =~ /\bchunked\b/i; # not quite correct... - # industry standard is to redirect POST as GET for - # 301, 302 and 303, in contrast to http/1.0 and 1.1. - # also, the UA should ask the user for 301 and 307 and POST, - # industry standard seems to be to simply follow. - # we go with the industry standard. - if ($status == 301 or $status == 302 or $status == 303) { - # HTTP/1.1 is unclear on how to mutate the method - $method = "GET" unless $method eq "HEAD"; - $redirect = 1; - } elsif ($status == 307) { - $redirect = 1; - } - } + my $len = $chunked ? undef : $hdr{"content-length"}; - my $finish = sub { # ($data, $err_status, $err_reason[, $keepalive]) - $state{handle}->destroy if $state{handle}; - %state = (); - - if (defined $_[1]) { - $hdr{OrigStatus} = $hdr{Status}; $hdr{Status} = $_[1]; - $hdr{OrigReason} = $hdr{Reason}; $hdr{Reason} = $_[2]; - } - - # set-cookie processing - if ($arg{cookie_jar}) { - for ($hdr{"set-cookie"}) { - # parse NAME=VALUE - my @kv; - - while (/\G\s* ([^=;,[:space:]]+) \s*=\s* (?: "((?:[^\\"]+|\\.)*)" | ([^=;,[:space:]]*) )/gcxs) { - my $name = $1; - my $value = $3; - - unless ($value) { - $value = $2; - $value =~ s/\\(.)/$1/gs; - } + # body handling, many different code paths + # - no body expected + # - want_body_handle + # - te chunked + # - 2x length known (with or without on_body) + # - 2x length not known (with or without on_body) + if (!$redirect && $arg{on_header} && !$arg{on_header}(\%hdr)) { + $finish->(undef, 598 => "Request cancelled by on_header"); + } elsif ( + $hdr{Status} =~ /^(?:1..|204|205|304)$/ + or $method eq "HEAD" + or (defined $len && $len == 0) # == 0, not !, because "0 " is true + ) { + # no body + $finish->("", undef, undef, 1); + + } elsif (!$redirect && $arg{want_body_handle}) { + $_[0]->on_eof (undef); + $_[0]->on_error (undef); + $_[0]->on_read (undef); + + $finish->(delete $state{handle}); + + } elsif ($chunked) { + my $cl = 0; + my $body = ""; + my $on_body = $arg{on_body} || sub { $body .= shift; 1 }; + + $state{read_chunk} = sub { + $_[1] =~ /^([0-9a-fA-F]+)/ + or $finish->(undef, $ae_error => "Garbled chunked transfer encoding"); + + my $len = hex $1; + + if ($len) { + $cl += $len; + + $_[0]->push_read (chunk => $len, sub { + $on_body->($_[1], \%hdr) + or return $finish->(undef, 598 => "Request cancelled by on_body"); + + $_[0]->push_read (line => sub { + length $_[1] + and return $finish->(undef, $ae_error => "Garbled chunked transfer encoding"); + $_[0]->push_read (line => $state{read_chunk}); + }); + }); + } else { + $hdr{"content-length"} ||= $cl; - push @kv, $name => $value; + $_[0]->push_read (line => $qr_nlnl, sub { + if (length $_[1]) { + for ("$_[1]") { + y/\015//d; # weed out any \015, as they show up in the weirdest of places. - last unless /\G\s*;/gc; - } + my $hdr = _parse_hdr + or return $finish->(undef, $ae_error => "Garbled response trailers"); - last unless @kv; + %hdr = (%hdr, %$hdr); + } + } - my $name = shift @kv; - my %kv = (value => shift @kv, @kv); + $finish->($body, undef, undef, 1); + }); + } + }; - my $cdom; - my $cpath = (delete $kv{path}) || "/"; + $_[0]->push_read (line => $state{read_chunk}); - if (exists $kv{domain}) { - $cdom = delete $kv{domain}; - - $cdom =~ s/^\.?/./; # make sure it starts with a "." - - next if $cdom =~ /\.$/; - - # this is not rfc-like and not netscape-like. go figure. - my $ndots = $cdom =~ y/.//; - next if $ndots < ($cdom =~ /\.[^.][^.]\.[^.][^.]$/ ? 3 : 2); - } else { - $cdom = $uhost; - } - - # store it - $arg{cookie_jar}{version} = 1; - $arg{cookie_jar}{$cdom}{$cpath}{$name} = \%kv; + } elsif ($arg{on_body}) { + if (defined $len) { + $_[0]->on_read (sub { + $len -= length $_[0]{rbuf}; + + $arg{on_body}(delete $_[0]{rbuf}, \%hdr) + or return $finish->(undef, 598 => "Request cancelled by on_body"); + + $len > 0 + or $finish->("", undef, undef, 1); + }); + } else { + $_[0]->on_eof (sub { + $finish->(""); + }); + $_[0]->on_read (sub { + $arg{on_body}(delete $_[0]{rbuf}, \%hdr) + or $finish->(undef, 598 => "Request cancelled by on_body"); + }); + } + } else { + $_[0]->on_eof (undef); - redo if /\G\s*,/gc; - } - } + if (defined $len) { + $_[0]->on_read (sub { + $finish->((substr delete $_[0]{rbuf}, 0, $len, ""), undef, undef, 1) + if $len <= length $_[0]{rbuf}; + }); + } else { + $_[0]->on_error (sub { + ($! == Errno::EPIPE || !$!) + ? $finish->(delete $_[0]{rbuf}) + : $finish->(undef, $ae_error => $_[2]); + }); + $_[0]->on_read (sub { }); + } + } + }; - if ($redirect && exists $hdr{location}) { - # we ignore any errors, as it is very common to receive - # Content-Length != 0 but no actual body - # we also access %hdr, as $_[1] might be an erro - http_request ( - $method => $hdr{location}, - %arg, - recurse => $recurse - 1, - Redirect => [$_[0], \%hdr], - $cb); - } else { - $cb->($_[0], \%hdr); - } - }; - - my $len = $hdr{"content-length"}; - - if (!$redirect && $arg{on_header} && !$arg{on_header}(\%hdr)) { - $finish->(undef, 598 => "Request cancelled by on_header"); - } elsif ( - $hdr{Status} =~ /^(?:1..|204|205|304)$/ - or $method eq "HEAD" - or (defined $len && !$len) - ) { - # no body - $finish->("", undef, undef, 1); - } else { - # body handling, many different code paths - # - no body expected - # - want_body_handle - # - te chunked - # - 2x length known (with or without on_body) - # - 2x length not known (with or without on_body) - if (!$redirect && $arg{want_body_handle}) { - $_[0]->on_eof (undef); - $_[0]->on_error (undef); - $_[0]->on_read (undef); - - $finish->(delete $state{handle}); - - } elsif ($hdr{"transfer-encoding"} =~ /chunked/) { - my $body = undef; - my $on_body = $arg{on_body} || sub { $body .= shift; 1 }; - - $_[0]->on_error (sub { $finish->(undef, 599 => $_[2]) }); - - my $read_chunk; $read_chunk = sub { - warn $_[1];#d# - $_[1] =~ /^([0-9a-fA-F]+)/ - or $finish->(undef, 599 => "Garbled chunked transfer encoding"); - - my $len = hex $1; - - if ($len) { - $_[0]->push_read (chunk => hex $1, sub { - $on_body->($_[1], \%hdr) - or return $finish->(undef, 598 => "Request cancelled by on_body"); - - $_[0]->push_read (line => sub { - length $_[1] - and return $finish->(undef, 599 => "Garbled chunked transfer encoding"); - $_[0]->push_read (line => $read_chunk); - }); - }); - } else { - $_[0]->push_read (line => $qr_nlnl, sub { - if (length $_[1]) { - for ("$_[1]") { - y/\015//d; # weed out any \015, as they show up in the weirdest of places. - - my $hdr = parse_hdr - or return $finish->(undef, 599 => "Garbled response trailers"); - - %hdr = (%hdr, %$hdr); - } - } + # if keepalive is enabled, then the server closing the connection + # before a response can happen legally - we retry on idempotent methods. + if ($keptalive && $idempotent) { + my $old_eof = $hdl->{on_eof}; + $hdl->{on_eof} = sub { + _destroy_state %state; + + http_request ( + $method => $url, + %arg, + keepalive => 0, + $cb + ); + }; + $hdl->on_read (sub { + return unless %state; - $finish->($body, undef, undef, 1); - }); - } - }; + # as soon as we receive something, a connection close + # once more becomes a hard error + $hdl->{on_eof} = $old_eof; + $hdl->push_read (line => $qr_nlnl, $state{read_response}); + }); + } else { + $hdl->push_read (line => $qr_nlnl, $state{read_response}); + } + }; - $_[0]->push_read (line => $read_chunk); + my $prepare_handle = sub { + my ($hdl) = $state{handle}; - } elsif ($arg{on_body}) { - $_[0]->on_error (sub { $finish->(undef, 599 => $_[2]) }); + $hdl->timeout ($timeout); + $hdl->on_error (sub { + _error %state, $cb, { @pseudo, Status => $ae_error, Reason => $_[2] }; + }); + $hdl->on_eof (sub { + _error %state, $cb, { @pseudo, Status => $ae_error, Reason => "Unexpected end-of-file" }; + }); + }; - if ($len) { - $_[0]->on_read (sub { - $len -= length $_[0]{rbuf}; - - $arg{on_body}(delete $_[0]{rbuf}, \%hdr) - or return $finish->(undef, 598 => "Request cancelled by on_body"); - - $len > 0 - or $finish->("", undef, undef, 1); - }); - } else { - $_[0]->on_eof (sub { - $finish->(""); - }); - $_[0]->on_read (sub { - $arg{on_body}(delete $_[0]{rbuf}, \%hdr) - or $finish->(undef, 598 => "Request cancelled by on_body"); - }); - } - } else { - $_[0]->on_eof (undef); + # connected to proxy (or origin server) + my $connect_cb = sub { + my $fh = shift + or return _error %state, $cb, { @pseudo, Status => $ae_error, Reason => "$!" }; + + return unless delete $state{connect_guard}; + + # get handle + $state{handle} = new AnyEvent::Handle + %{ $arg{handle_params} }, + fh => $fh, + peername => $uhost, + tls_ctx => $arg{tls_ctx}, + ; + + $prepare_handle->(); + + #$state{handle}->starttls ("connect") if $rscheme eq "https"; + + # now handle proxy-CONNECT method + if ($proxy && $uscheme eq "https") { + # oh dear, we have to wrap it into a connect request + + # maybe re-use $uauthority with patched port? + $state{handle}->push_write ("CONNECT $uhost:$uport HTTP/1.0\015\012\015\012"); + $state{handle}->push_read (line => $qr_nlnl, sub { + $_[1] =~ /^HTTP\/([0-9\.]+) \s+ ([0-9]{3}) (?: \s+ ([^\015\012]*) )?/ix + or return _error %state, $cb, { @pseudo, Status => 599, Reason => "Invalid proxy connect response ($_[1])" }; + + if ($2 == 200) { + $rpath = $upath; + $handle_actual_request->(); + } else { + _error %state, $cb, { @pseudo, Status => $2, Reason => $3 }; + } + }); + } else { + $handle_actual_request->(); + } + }; - if ($len) { - $_[0]->on_error (sub { $finish->(undef, 599 => $_[2]) }); - $_[0]->on_read (sub { - $finish->((substr delete $_[0]{rbuf}, 0, $len, ""), undef, undef, 1) - if $len <= length $_[0]{rbuf}; - }); - } else { - $_[0]->on_error (sub { - ($! == Errno::EPIPE || !$!) - ? $finish->(delete $_[0]{rbuf}) - : $finish->(undef, 599 => $_[2]); - }); - $_[0]->on_read (sub { }); - } - } - } - }); - }; + _get_slot $uhost, sub { + $state{slot_guard} = shift; - # now handle proxy-CONNECT method - if ($proxy && $uscheme eq "https") { - # oh dear, we have to wrap it into a connect request - - # maybe re-use $uauthority with patched port? - $state{handle}->push_write ("CONNECT $uhost:$uport HTTP/1.0\015\012Host: $uhost\015\012\015\012"); - $state{handle}->push_read (line => $qr_nlnl, sub { - $_[1] =~ /^HTTP\/([0-9\.]+) \s+ ([0-9]{3}) (?: \s+ ([^\015\012]*) )?/ix - or return (%state = (), $cb->(undef, { @pseudo, Status => 599, Reason => "Invalid proxy connect response ($_[1])" })); - - if ($2 == 200) { - $rpath = $upath; - &$handle_actual_request; - } else { - %state = (); - $cb->(undef, { @pseudo, Status => $2, Reason => $3 }); - } - }); - } else { - &$handle_actual_request; - } - }; + return unless $state{connect_guard}; - my $tcp_connect = $arg{tcp_connect} - || do { require AnyEvent::Socket; \&AnyEvent::Socket::tcp_connect }; + # try to use an existing keepalive connection, but only if we, ourselves, plan + # on a keepalive request (in theory, this should be a separate config option). + if ($keepalive && $KA_CACHE{$ka_key}) { + $keptalive = 1; + $state{handle} = ka_fetch $ka_key; + $prepare_handle->(); + $handle_actual_request->(); - $state{connect_guard} = $tcp_connect->($rhost, $rport, $connect_cb, $arg{on_prepare} || sub { $timeout }); + } else { + my $tcp_connect = $arg{tcp_connect} + || do { require AnyEvent::Socket; \&AnyEvent::Socket::tcp_connect }; + $state{connect_guard} = $tcp_connect->($rhost, $rport, $connect_cb, $arg{on_prepare} || sub { $timeout }); + } }; - defined wantarray && AnyEvent::Util::guard { %state = () } + defined wantarray && AnyEvent::Util::guard { _destroy_state %state } } sub http_get($@) { @@ -860,7 +1164,7 @@ hostnames. The latter is a simple stub resolver and does no caching on its own. If you want DNS caching, you currently have to provide your own default resolver (by storing a suitable resolver object in -C<$AnyEvent::DNS::RESOLVER>). +C<$AnyEvent::DNS::RESOLVER>) or your own C callback. =head2 GLOBAL FUNCTIONS AND VARIABLES @@ -869,11 +1173,53 @@ =item AnyEvent::HTTP::set_proxy "proxy-url" Sets the default proxy server to use. The proxy-url must begin with a -string of the form C (optionally C), croaks -otherwise. +string of the form C, croaks otherwise. To clear an already-set proxy, use C. +When AnyEvent::HTTP is laoded for the first time it will query the +default proxy from the operating system, currently by looking at +C<$ENV{http_proxy>}. + +=item AnyEvent::HTTP::cookie_jar_expire $jar[, $session_end] + +Remove all cookies from the cookie jar that have been expired. If +C<$session_end> is given and true, then additionally remove all session +cookies. + +You should call this function (with a true C<$session_end>) before you +save cookies to disk, and you should call this function after loading them +again. If you have a long-running program you can additonally call this +function from time to time. + +A cookie jar is initially an empty hash-reference that is managed by this +module. It's format is subject to change, but currently it is like this: + +The key C has to contain C<1>, otherwise the hash gets +emptied. All other keys are hostnames or IP addresses pointing to +hash-references. The key for these inner hash references is the +server path for which this cookie is meant, and the values are again +hash-references. The keys of those hash-references is the cookie name, and +the value, you guessed it, is another hash-reference, this time with the +key-value pairs from the cookie, except for C and C, +which have been replaced by a C<_expires> key that contains the cookie +expiry timestamp. + +Here is an example of a cookie jar with a single cookie, so you have a +chance of understanding the above paragraph: + + { + version => 1, + "10.0.0.1" => { + "/" => { + "mythweb_id" => { + _expires => 1293917923, + value => "ooRung9dThee3ooyXooM1Ohm", + }, + }, + }, + } + =item $date = AnyEvent::HTTP::format_date $timestamp Takes a POSIX timestamp (seconds since the epoch) and formats it as a HTTP @@ -881,13 +1227,18 @@ =item $timestamp = AnyEvent::HTTP::parse_date $date -Takes a HTTP Date (RFC 2616) and returns the corresponding POSIX +Takes a HTTP Date (RFC 2616) or a Cookie date (netscape cookie spec) or a +bunch of minor variations of those, and returns the corresponding POSIX timestamp, or C if the date cannot be parsed. =item $AnyEvent::HTTP::MAX_RECURSE The default value for the C request parameter (default: C<10>). +=item $AnyEvent::HTTP::TIMEOUT + +The default timeout for conenction operations (default: C<300>). + =item $AnyEvent::HTTP::USERAGENT The default value for the C header (the default is @@ -897,16 +1248,27 @@ The maximum number of concurrent connections to the same host (identified by the hostname). If the limit is exceeded, then the additional requests -are queued until previous connections are closed. +are queued until previous connections are closed. Both persistent and +non-persistent connections are counted in this limit. The default value for this is C<4>, and it is highly advisable to not -increase it. +increase it much. + +For comparison: the RFC's recommend 4 non-persistent or 2 persistent +connections, older browsers used 2, newers (such as firefox 3) typically +use 6, and Opera uses 8 because like, they have the fastest browser and +give a shit for everybody else on the planet. + +=item $AnyEvent::HTTP::PERSISTENT_TIMEOUT + +The time after which idle persistent conenctions get closed by +AnyEvent::HTTP (default: C<3>). =item $AnyEvent::HTTP::ACTIVE The number of active connections. This is not the number of currently running requests, but the number of currently open and non-idle TCP -connections. This number of can be useful for load-leveling. +connections. This number can be useful for load-leveling. =back @@ -931,15 +1293,17 @@ my ($d, $m, $y, $H, $M, $S); - if ($date =~ /^[A-Z][a-z][a-z], ([0-9][0-9]) ([A-Z][a-z][a-z]) ([0-9][0-9][0-9][0-9]) ([0-9][0-9]):([0-9][0-9]):([0-9][0-9]) GMT$/) { - # RFC 822/1123, required by RFC 2616 + if ($date =~ /^[A-Z][a-z][a-z]+, ([0-9][0-9]?)[\- ]([A-Z][a-z][a-z])[\- ]([0-9][0-9][0-9][0-9]) ([0-9][0-9]?):([0-9][0-9]?):([0-9][0-9]?) GMT$/) { + # RFC 822/1123, required by RFC 2616 (with " ") + # cookie dates (with "-") + ($d, $m, $y, $H, $M, $S) = ($1, $2, $3, $4, $5, $6); - } elsif ($date =~ /^[A-Z][a-z]+, ([0-9][0-9])-([A-Z][a-z][a-z])-([0-9][0-9]) ([0-9][0-9]):([0-9][0-9]):([0-9][0-9]) GMT$/) { + } elsif ($date =~ /^[A-Z][a-z][a-z]+, ([0-9][0-9]?)-([A-Z][a-z][a-z])-([0-9][0-9]) ([0-9][0-9]?):([0-9][0-9]?):([0-9][0-9]?) GMT$/) { # RFC 850 ($d, $m, $y, $H, $M, $S) = ($1, $2, $3 < 69 ? $3 + 2000 : $3 + 1900, $4, $5, $6); - } elsif ($date =~ /^[A-Z][a-z][a-z] ([A-Z][a-z][a-z]) ([0-9 ][0-9]) ([0-9][0-9]):([0-9][0-9]):([0-9][0-9]) ([0-9][0-9][0-9][0-9])$/) { + } elsif ($date =~ /^[A-Z][a-z][a-z]+ ([A-Z][a-z][a-z]) ([0-9 ]?[0-9]) ([0-9][0-9]?):([0-9][0-9]?):([0-9][0-9]?) ([0-9][0-9][0-9][0-9])$/) { # ISO C's asctime ($d, $m, $y, $H, $M, $S) = ($2, $1, $6, $3, $4, $5); } @@ -957,7 +1321,7 @@ sub set_proxy($) { if (length $_[0]) { - $_[0] =~ m%^(https?):// ([^:/]+) (?: : (\d*) )?%ix + $_[0] =~ m%^(http):// ([^:/]+) (?: : (\d*) )?%ix or Carp::croak "$_[0]: invalid proxy URL"; $PROXY = [$2, $3 || 3128, $1] } else { @@ -970,7 +1334,109 @@ set_proxy $ENV{http_proxy}; }; -=head2 SOCKS PROXIES +=head2 SHOWCASE + +This section contaisn some more elaborate "real-world" examples or code +snippets. + +=head2 HTTP/1.1 FILE DOWNLOAD + +Downloading files with HTTP can be quite tricky, especially when something +goes wrong and you want to resume. + +Here is a function that initiates and resumes a download. It uses the +last modified time to check for file content changes, and works with many +HTTP/1.0 servers as well, and usually falls back to a complete re-download +on older servers. + +It calls the completion callback with either C, which means a +nonretryable error occured, C<0> when the download was partial and should +be retried, and C<1> if it was successful. + + use AnyEvent::HTTP; + + sub download($$$) { + my ($url, $file, $cb) = @_; + + open my $fh, "+<", $file + or die "$file: $!"; + + my %hdr; + my $ofs = 0; + + warn stat $fh; + warn -s _; + if (stat $fh and -s _) { + $ofs = -s _; + warn "-s is ", $ofs;#d# + $hdr{"if-unmodified-since"} = AnyEvent::HTTP::format_date +(stat _)[9]; + $hdr{"range"} = "bytes=$ofs-"; + } + + http_get $url, + headers => \%hdr, + on_header => sub { + my ($hdr) = @_; + + if ($hdr->{Status} == 200 && $ofs) { + # resume failed + truncate $fh, $ofs = 0; + } + + sysseek $fh, $ofs, 0; + + 1 + }, + on_body => sub { + my ($data, $hdr) = @_; + + if ($hdr->{Status} =~ /^2/) { + length $data == syswrite $fh, $data + or return; # abort on write errors + } + + 1 + }, + sub { + my (undef, $hdr) = @_; + + my $status = $hdr->{Status}; + + if (my $time = AnyEvent::HTTP::parse_date $hdr->{"last-modified"}) { + utime $fh, $time, $time; + } + + if ($status == 200 || $status == 206 || $status == 416) { + # download ok || resume ok || file already fully downloaded + $cb->(1, $hdr); + + } elsif ($status == 412) { + # file has changed while resuming, delete and retry + unlink $file; + $cb->(0, $hdr); + + } elsif ($status == 500 or $status == 503 or $status =~ /^59/) { + # retry later + $cb->(0, $hdr); + + } else { + $cb->(undef, $hdr); + } + } + ; + } + + download "http://server/somelargefile", "/tmp/somelargefile", sub { + if ($_[0]) { + print "OK!\n"; + } elsif (defined $_[0]) { + print "please retry later\n"; + } else { + print "ERROR\n"; + } + }; + +=head3 SOCKS PROXIES Socks proxies are not directly supported by AnyEvent::HTTP. You can compile your perl to support socks, or use an external program such as