--- AnyEvent-HTTP/HTTP.pm 2010/12/31 06:18:54 1.67 +++ AnyEvent-HTTP/HTTP.pm 2011/01/02 08:51:53 1.87 @@ -38,8 +38,7 @@ package AnyEvent::HTTP; -use strict; -no warnings; +use common::sense; use Errno (); @@ -96,8 +95,8 @@ destroyed before the callback is called, the request will be cancelled. The callback will be called with the response body data as first argument -(or C if an error occured), and a hash-ref with response headers as -second argument. +(or C if an error occured), and a hash-ref with response headers +(and trailers) as second argument. All the headers in that hash are lowercased. In addition to the response headers, the "pseudo-headers" (uppercase to avoid clashing with possible @@ -124,9 +123,23 @@ joined together with a comma (C<,>), as per the HTTP spec. If an internal error occurs, such as not being able to resolve a hostname, -then C<$data> will be C, C<< $headers->{Status} >> will be C<59x> -(usually C<599>) and the C pseudo-header will contain an error -message. +then C<$data> will be C, C<< $headers->{Status} >> will be +C<590>-C<599> and the C pseudo-header will contain an error +message. Currently the following status codes are used: + +=over 4 + +=item 595 - errors during connection etsbalishment, proxy handshake. + +=item 596 - errors during TLS negotiation, request sending and header processing. + +=item 597 - errors during body receiving or processing. + +=item 598 - user aborted request via C or C. + +=item 599 - other, usually nonretryable, errors (garbled URL etc.). + +=back A typical callback might look like this: @@ -152,11 +165,11 @@ =item headers => hashref -The request headers to use. Currently, C may provide its -own C, C, C and C headers -and will provide defaults for C and C (this can be -suppressed by using C for these headers in which case they won't be -sent at all). +The request headers to use. Currently, C may provide its own +C, C, C and C headers and +will provide defaults at least for C, C and C +(this can be suppressed by using C for these headers in which case +they won't be sent at all). =item timeout => $seconds @@ -176,7 +189,7 @@ =item body => $string -The request body, usually empty. Will be-sent as-is (future versions of +The request body, usually empty. Will be sent as-is (future versions of this module might offer more options). =item cookie_jar => $hash_ref @@ -184,16 +197,22 @@ Passing this parameter enables (simplified) cookie-processing, loosely based on the original netscape specification. -The C<$hash_ref> must be an (initially empty) hash reference which will -get updated automatically. It is possible to save the cookie_jar to -persistent storage with something like JSON or Storable, but this is not -recommended, as expiry times are currently being ignored. - -Note that this cookie implementation is not of very high quality, nor -meant to be complete. If you want complete cookie management you have to -do that on your own. C is meant as a quick fix to get some -cookie-using sites working. Cookies are a privacy disaster, do not use -them unless required to. +The C<$hash_ref> must be an (initially empty) hash reference which +will get updated automatically. It is possible to save the cookie jar +to persistent storage with something like JSON or Storable - see the +C function if you wish to remove +expired or session-only cookies, and also for documentation on the format +of the cookie jar. + +Note that this cookie implementation is not meant to be complete. If +you want complete cookie management you have to do that on your +own. C is meant as a quick fix to get most cookie-using sites +working. Cookies are a privacy disaster, do not use them unless required +to. + +When cookie processing is enabled, the C and C +headers will be set and handled by this module, otherwise they will be +left untouched. =item tls_ctx => $scheme | $tls_ctx @@ -242,6 +261,10 @@ content, which, if it is supposed to be rare, can be faster than first doing a C request. +The downside is that cancelling the request makes it impossible to re-use +the connection. Also, the C callback will not receive any +trailer (headers sent after the response body). + Example: cancel the request unless the content-type is "text/html". on_header => sub { @@ -258,6 +281,9 @@ or false, in which case AnyEvent::HTTP will cancel the download (and call the completion callback with an error code of C<598>). +The downside to cancelling the request is that it makes it impossible to +re-use the connection. + This callback is useful when the data is too large to be held in memory (so the callback writes it to a file) or when only some information should be extracted, or when the body should be processed incrementally. @@ -292,14 +318,15 @@ =back -Example: make a simple HTTP GET request for http://www.nethype.de/ +Example: do a simple HTTP GET request for http://www.nethype.de/ and print +the response body. http_request GET => "http://www.nethype.de/", sub { my ($body, $hdr) = @_; print "$body\n"; }; -Example: make a HTTP HEAD request on https://www.google.com/, use a +Example: do a HTTP HEAD request on https://www.google.com/, use a timeout of 30 seconds. http_request @@ -312,7 +339,7 @@ } ; -Example: make another simple HTTP GET request, but immediately try to +Example: do another simple HTTP GET request, but immediately try to cancel it. my $request = http_request GET => "http://www.nethype.de/", sub { @@ -354,6 +381,162 @@ _slot_schedule $_[0]; } +############################################################################# + +# expire cookies +sub cookie_jar_expire($;$) { + my ($jar, $session_end) = @_; + + %$jar = () if $jar->{version} != 1; + + my $anow = AE::now; + + while (my ($chost, $paths) = each %$jar) { + next unless ref $paths; + + while (my ($cpath, $cookies) = each %$paths) { + while (my ($cookie, $kv) = each %$cookies) { + if (exists $kv->{_expires}) { + delete $cookies->{$cookie} + if $anow > $kv->{_expires}; + } elsif ($session_end) { + delete $cookies->{$cookie}; + } + } + + delete $paths->{$cpath} + unless %$cookies; + } + + delete $jar->{$chost} + unless %$paths; + } +} + +# extract cookies from jar +sub cookie_jar_extract($$$$) { + my ($jar, $uscheme, $uhost, $upath) = @_; + + %$jar = () if $jar->{version} != 1; + + my @cookies; + + while (my ($chost, $paths) = each %$jar) { + next unless ref $paths; + + if ($chost =~ /^\./) { + next unless $chost eq substr $uhost, -length $chost; + } elsif ($chost =~ /\./) { + next unless $chost eq $uhost; + } else { + next; + } + + while (my ($cpath, $cookies) = each %$paths) { + next unless $cpath eq substr $upath, 0, length $cpath; + + while (my ($cookie, $kv) = each %$cookies) { + next if $uscheme ne "https" && exists $kv->{secure}; + + if (exists $kv->{_expires} and AE::now > $kv->{_expires}) { + delete $cookies->{$cookie}; + next; + } + + my $value = $kv->{value}; + + if ($value =~ /[=;,[:space:]]/) { + $value =~ s/([\\"])/\\$1/g; + $value = "\"$value\""; + } + + push @cookies, "$cookie=$value"; + } + } + } + + \@cookies +} + +# parse set_cookie header into jar +sub cookie_jar_set_cookie($$$$) { + my ($jar, $set_cookie, $uhost, $date) = @_; + + my $anow = int AE::now; + my $snow; # server-now + + for ($set_cookie) { + # parse NAME=VALUE + my @kv; + + # expires is not http-compliant in the original cookie-spec, + # we support the official date format and some extensions + while ( + m{ + \G\s* + (?: + expires \s*=\s* ([A-Z][a-z][a-z]+,\ [^,;]+) + | ([^=;,[:space:]]+) (?: \s*=\s* (?: "((?:[^\\"]+|\\.)*)" | ([^=;,[:space:]]*) ) )? + ) + }gcxsi + ) { + my $name = $2; + my $value = $4; + + if (defined $1) { + # expires + $name = "expires"; + $value = $1; + } elsif (defined $3) { + # quoted + $value = $3; + $value =~ s/\\(.)/$1/gs; + } + + push @kv, lc $name, $value; + + last unless /\G\s*;/gc; + } + + last unless @kv; + + my $name = shift @kv; + my %kv = (value => shift @kv, @kv); + + if (exists $kv{"max-age"}) { + $kv{_expires} = $anow + delete $kv{"max-age"}; + } elsif (exists $kv{expires}) { + $snow ||= parse_date ($date) || $anow; + $kv{_expires} = $anow + (parse_date (delete $kv{expires}) - $snow); + } else { + delete $kv{_expires}; + } + + my $cdom; + my $cpath = (delete $kv{path}) || "/"; + + if (exists $kv{domain}) { + $cdom = delete $kv{domain}; + + $cdom =~ s/^\.?/./; # make sure it starts with a "." + + next if $cdom =~ /\.$/; + + # this is not rfc-like and not netscape-like. go figure. + my $ndots = $cdom =~ y/.//; + next if $ndots < ($cdom =~ /\.[^.][^.]\.[^.][^.]$/ ? 3 : 2); + } else { + $cdom = $uhost; + } + + # store it + $jar->{version} = 1; + $jar->{lc $cdom}{$cpath}{$name} = \%kv; + + redo if /\G\s*,/gc; + } +} + # continue to parse $_ for headers and place them into the arg sub parse_hdr() { my %hdr; @@ -425,7 +608,7 @@ $uauthority =~ /^(?: .*\@ )? ([^\@:]+) (?: : (\d+) )?$/x or return $cb->(undef, { @pseudo, Status => 599, Reason => "Unparsable URL" }); - my $uhost = $1; + my $uhost = lc $1; $uport = $2 if defined $2; $hdr{host} = defined $2 ? "$uhost:$2" : "$uhost" @@ -438,33 +621,10 @@ # cookie processing if (my $jar = $arg{cookie_jar}) { - %$jar = () if $jar->{version} != 1; - - my @cookie; - - while (my ($chost, $v) = each %$jar) { - if ($chost =~ /^\./) { - next unless $chost eq substr $uhost, -length $chost; - } elsif ($chost =~ /\./) { - next unless $chost eq $uhost; - } else { - next; - } - - while (my ($cpath, $v) = each %$v) { - next unless $cpath eq substr $upath, 0, length $cpath; - - while (my ($k, $v) = each %$v) { - next if $uscheme ne "https" && exists $v->{secure}; - my $value = $v->{value}; - $value =~ s/([\\"])/\\$1/g; - push @cookie, "$k=\"$value\""; - } - } - } - - $hdr{cookie} = join "; ", @cookie - if @cookie; + my $cookies = cookie_jar_extract $jar, $uscheme, $uhost, $upath; + + $hdr{cookie} = join "; ", @$cookies + if @$cookies; } my ($rhost, $rport, $rscheme, $rpath); # request host, port, path @@ -477,6 +637,9 @@ # don't support https requests over https-proxy transport, # can't be done with tls as spec'ed, unless you double-encrypt. $rscheme = "http" if $uscheme eq "https" && $rscheme eq "https"; + + $rhost = lc $rhost; + $rscheme = lc $rscheme; } else { ($rhost, $rport, $rscheme, $rpath) = ($uhost, $uport, $uscheme, $upath); } @@ -488,8 +651,8 @@ $hdr{"content-length"} = length $arg{body} if length $arg{body} || $method ne "GET"; - $hdr{connection} = "close TE"; - $hdr{te} = "trailers" unless exists $hdr{te}; + $hdr{connection} = "close Te"; #1.1 + $hdr{te} = "trailers" unless exists $hdr{te}; #1.1 my %state = (connect_guard => 1); @@ -498,329 +661,295 @@ return unless $state{connect_guard}; - my $connect_cb = sub { - $state{fh} = shift - or do { - my $err = "$!"; - %state = (); - return $cb->(undef, { @pseudo, Status => 599, Reason => $err }); - }; - - pop; # free memory, save a tree - - return unless delete $state{connect_guard}; - - # get handle - $state{handle} = new AnyEvent::Handle - fh => $state{fh}, - peername => $rhost, - tls_ctx => $arg{tls_ctx}, - # these need to be reconfigured on keepalive handles - timeout => $timeout, - on_error => sub { - %state = (); - $cb->(undef, { @pseudo, Status => 599, Reason => $_[2] }); - }, - on_eof => sub { - %state = (); - $cb->(undef, { @pseudo, Status => 599, Reason => "Unexpected end-of-file" }); - }, - ; + my $ae_error = 595; # connecting - # limit the number of persistent connections - # keepalive not yet supported -# if ($KA_COUNT{$_[1]} < $MAX_PERSISTENT_PER_HOST) { -# ++$KA_COUNT{$_[1]}; -# $state{handle}{ka_count_guard} = AnyEvent::Util::guard { -# --$KA_COUNT{$_[1]} -# }; -# $hdr{connection} = "keep-alive"; -# } else { -# delete $hdr{connection}; -# } + # handle actual, non-tunneled, request + my $handle_actual_request = sub { + $ae_error = 596; # request phase + + $state{handle}->starttls ("connect") if $uscheme eq "https" && !exists $state{handle}{tls}; + + # send request + $state{handle}->push_write ( + "$method $rpath HTTP/1.1\015\012" + . (join "", map "\u$_: $hdr{$_}\015\012", grep defined $hdr{$_}, keys %hdr) + . "\015\012" + . (delete $arg{body}) + ); + + # return if error occured during push_write() + return unless %state; + + %hdr = (); # reduce memory usage, save a kitten, also make it possible to re-use + + # status line and headers + $state{read_response} = sub { + for ("$_[1]") { + y/\015//d; # weed out any \015, as they show up in the weirdest of places. + + /^HTTP\/0*([0-9\.]+) \s+ ([0-9]{3}) (?: \s+ ([^\012]*) )? \012/gxci + or return (%state = (), $cb->(undef, { @pseudo, Status => 599, Reason => "Invalid server response" })); + + # 100 Continue handling + # should not happen as we don't send expect: 100-continue, + # but we handle it just in case. + # since we send the request body regardless, if we get an error + # we are out of-sync, which we currently do NOT handle correctly. + return $state{handle}->push_read (line => $qr_nlnl, $state{read_response}) + if $2 eq 100; + + push @pseudo, + HTTPVersion => $1, + Status => $2, + Reason => $3, + ; - $state{handle}->starttls ("connect") if $rscheme eq "https"; + my $hdr = parse_hdr + or return (%state = (), $cb->(undef, { @pseudo, Status => 599, Reason => "Garbled response headers" })); - # handle actual, non-tunneled, request - my $handle_actual_request = sub { - $state{handle}->starttls ("connect") if $uscheme eq "https" && !exists $state{handle}{tls}; - - # send request - $state{handle}->push_write ( - "$method $rpath HTTP/1.1\015\012" - . (join "", map "\u$_: $hdr{$_}\015\012", grep defined $hdr{$_}, keys %hdr) - . "\015\012" - . (delete $arg{body}) - ); + %hdr = (%$hdr, @pseudo); + } - # return if error occured during push_write() - return unless %state; + # redirect handling + # microsoft and other shitheads don't give a shit for following standards, + # try to support some common forms of broken Location headers. + if ($hdr{location} !~ /^(?: $ | [^:\/?\#]+ : )/x) { + $hdr{location} =~ s/^\.\/+//; + + my $url = "$rscheme://$uhost:$uport"; + + unless ($hdr{location} =~ s/^\///) { + $url .= $upath; + $url =~ s/\/[^\/]*$//; + } - %hdr = (); # reduce memory usage, save a kitten, also make it possible to re-use + $hdr{location} = "$url/$hdr{location}"; + } - # status line and headers - $state{handle}->push_read (line => $qr_nlnl, sub { - my $keepalive = pop; + my $redirect; - for ("$_[1]") { - y/\015//d; # weed out any \015, as they show up in the weirdest of places. + if ($recurse) { + my $status = $hdr{Status}; - /^HTTP\/([0-9\.]+) \s+ ([0-9]{3}) (?: \s+ ([^\012]*) )? \012/igxc - or return (%state = (), $cb->(undef, { @pseudo, Status => 599, Reason => "Invalid server response" })); + # industry standard is to redirect POST as GET for + # 301, 302 and 303, in contrast to HTTP/1.0 and 1.1. + # also, the UA should ask the user for 301 and 307 and POST, + # industry standard seems to be to simply follow. + # we go with the industry standard. + if ($status == 301 or $status == 302 or $status == 303) { + # HTTP/1.1 is unclear on how to mutate the method + $method = "GET" unless $method eq "HEAD"; + $redirect = 1; + } elsif ($status == 307) { + $redirect = 1; + } + } - push @pseudo, - HTTPVersion => $1, - Status => $2, - Reason => $3, - ; + my $finish = sub { # ($data, $err_status, $err_reason[, $keepalive]) + my $may_keep_alive = $_[3]; - my $hdr = parse_hdr - or return (%state = (), $cb->(undef, { @pseudo, Status => 599, Reason => "Garbled response headers" })); + $state{handle}->destroy if $state{handle}; + %state = (); - %hdr = (%$hdr, @pseudo); + if (defined $_[1]) { + $hdr{OrigStatus} = $hdr{Status}; $hdr{Status} = $_[1]; + $hdr{OrigReason} = $hdr{Reason}; $hdr{Reason} = $_[2]; } - # redirect handling - # microsoft and other shitheads don't give a shit for following standards, - # try to support some common forms of broken Location headers. - if ($hdr{location} !~ /^(?: $ | [^:\/?\#]+ : )/x) { - $hdr{location} =~ s/^\.\/+//; - - my $url = "$rscheme://$uhost:$uport"; - - unless ($hdr{location} =~ s/^\///) { - $url .= $upath; - $url =~ s/\/[^\/]*$//; - } - - $hdr{location} = "$url/$hdr{location}"; + # set-cookie processing + if ($arg{cookie_jar}) { + cookie_jar_set_cookie $arg{cookie_jar}, $hdr{"set-cookie"}, $uhost, $hdr{date}; } - my $redirect; - - if ($recurse) { - my $status = $hdr{Status}; - - # industry standard is to redirect POST as GET for - # 301, 302 and 303, in contrast to http/1.0 and 1.1. - # also, the UA should ask the user for 301 and 307 and POST, - # industry standard seems to be to simply follow. - # we go with the industry standard. - if ($status == 301 or $status == 302 or $status == 303) { - # HTTP/1.1 is unclear on how to mutate the method - $method = "GET" unless $method eq "HEAD"; - $redirect = 1; - } elsif ($status == 307) { - $redirect = 1; - } + if ($redirect && exists $hdr{location}) { + # we ignore any errors, as it is very common to receive + # Content-Length != 0 but no actual body + # we also access %hdr, as $_[1] might be an erro + http_request ( + $method => $hdr{location}, + %arg, + recurse => $recurse - 1, + Redirect => [$_[0], \%hdr], + $cb); + } else { + $cb->($_[0], \%hdr); } + }; - my $finish = sub { # ($data, $err_status, $err_reason[, $keepalive]) - $state{handle}->destroy if $state{handle}; - %state = (); - - if (defined $_[1]) { - $hdr{OrigStatus} = $hdr{Status}; $hdr{Status} = $_[1]; - $hdr{OrigReason} = $hdr{Reason}; $hdr{Reason} = $_[2]; - } - - # set-cookie processing - if ($arg{cookie_jar}) { - for ($hdr{"set-cookie"}) { - # parse NAME=VALUE - my @kv; - - while (/\G\s* ([^=;,[:space:]]+) \s*=\s* (?: "((?:[^\\"]+|\\.)*)" | ([^=;,[:space:]]*) )/gcxs) { - my $name = $1; - my $value = $3; - - unless ($value) { - $value = $2; - $value =~ s/\\(.)/$1/gs; - } - - push @kv, $name => $value; + $ae_error = 597; # body phase - last unless /\G\s*;/gc; - } + my $len = $hdr{"content-length"}; - last unless @kv; + # body handling, many different code paths + # - no body expected + # - want_body_handle + # - te chunked + # - 2x length known (with or without on_body) + # - 2x length not known (with or without on_body) + if (!$redirect && $arg{on_header} && !$arg{on_header}(\%hdr)) { + $finish->(undef, 598 => "Request cancelled by on_header"); + } elsif ( + $hdr{Status} =~ /^(?:1..|204|205|304)$/ + or $method eq "HEAD" + or (defined $len && $len == 0) # == 0, not !, because "0 " is true + ) { + # no body + $finish->("", undef, undef, 1); + + } elsif (!$redirect && $arg{want_body_handle}) { + $_[0]->on_eof (undef); + $_[0]->on_error (undef); + $_[0]->on_read (undef); + + $finish->(delete $state{handle}); + + } elsif ($hdr{"transfer-encoding"} =~ /\bchunked\b/i) { + my $cl = 0; + my $body = undef; + my $on_body = $arg{on_body} || sub { $body .= shift; 1 }; + + $state{read_chunk} = sub { + $_[1] =~ /^([0-9a-fA-F]+)/ + or $finish->(undef, $ae_error => "Garbled chunked transfer encoding"); + + my $len = hex $1; + + if ($len) { + $cl += $len; + + $_[0]->push_read (chunk => $len, sub { + $on_body->($_[1], \%hdr) + or return $finish->(undef, 598 => "Request cancelled by on_body"); + + $_[0]->push_read (line => sub { + length $_[1] + and return $finish->(undef, $ae_error => "Garbled chunked transfer encoding"); + $_[0]->push_read (line => $state{read_chunk}); + }); + }); + } else { + $hdr{"content-length"} ||= $cl; - my $name = shift @kv; - my %kv = (value => shift @kv, @kv); + $_[0]->push_read (line => $qr_nlnl, sub { + if (length $_[1]) { + for ("$_[1]") { + y/\015//d; # weed out any \015, as they show up in the weirdest of places. - my $cdom; - my $cpath = (delete $kv{path}) || "/"; + my $hdr = parse_hdr + or return $finish->(undef, $ae_error => "Garbled response trailers"); - if (exists $kv{domain}) { - $cdom = delete $kv{domain}; - - $cdom =~ s/^\.?/./; # make sure it starts with a "." - - next if $cdom =~ /\.$/; - - # this is not rfc-like and not netscape-like. go figure. - my $ndots = $cdom =~ y/.//; - next if $ndots < ($cdom =~ /\.[^.][^.]\.[^.][^.]$/ ? 3 : 2); - } else { - $cdom = $uhost; + %hdr = (%hdr, %$hdr); + } } - - # store it - $arg{cookie_jar}{version} = 1; - $arg{cookie_jar}{$cdom}{$cpath}{$name} = \%kv; - - redo if /\G\s*,/gc; - } - } - if ($redirect && exists $hdr{location}) { - # we ignore any errors, as it is very common to receive - # Content-Length != 0 but no actual body - # we also access %hdr, as $_[1] might be an erro - http_request ( - $method => $hdr{location}, - %arg, - recurse => $recurse - 1, - Redirect => [$_[0], \%hdr], - $cb); - } else { - $cb->($_[0], \%hdr); + $finish->($body, undef, undef, 1); + }); } }; - my $len = $hdr{"content-length"}; + $_[0]->push_read (line => $state{read_chunk}); - if (!$redirect && $arg{on_header} && !$arg{on_header}(\%hdr)) { - $finish->(undef, 598 => "Request cancelled by on_header"); - } elsif ( - $hdr{Status} =~ /^(?:1..|204|205|304)$/ - or $method eq "HEAD" - or (defined $len && !$len) - ) { - # no body - $finish->("", undef, undef, 1); + } elsif ($arg{on_body}) { + if (defined $len) { + $_[0]->on_read (sub { + $len -= length $_[0]{rbuf}; + + $arg{on_body}(delete $_[0]{rbuf}, \%hdr) + or return $finish->(undef, 598 => "Request cancelled by on_body"); + + $len > 0 + or $finish->("", undef, undef, 1); + }); } else { - # body handling, many different code paths - # - no body expected - # - want_body_handle - # - te chunked - # - 2x length known (with or without on_body) - # - 2x length not known (with or without on_body) - if (!$redirect && $arg{want_body_handle}) { - $_[0]->on_eof (undef); - $_[0]->on_error (undef); - $_[0]->on_read (undef); - - $finish->(delete $state{handle}); - - } elsif ($hdr{"transfer-encoding"} =~ /chunked/) { - my $body = undef; - my $on_body = $arg{on_body} || sub { $body .= shift; 1 }; - - $_[0]->on_error (sub { $finish->(undef, 599 => $_[2]) }); - - my $read_chunk; $read_chunk = sub { - $_[1] =~ /^([0-9a-fA-F]+)/ - or $finish->(undef, 599 => "Garbled chunked transfer encoding"); - - my $len = hex $1; - - if ($len) { - $_[0]->push_read (chunk => hex $1, sub { - $on_body->($_[1], \%hdr) - or return $finish->(undef, 598 => "Request cancelled by on_body"); - - $_[0]->push_read (line => sub { - length $_[1] - and return $finish->(undef, 599 => "Garbled chunked transfer encoding"); - $_[0]->push_read (line => $read_chunk); - }); - }); - } else { - $_[0]->push_read (line => $qr_nlnl, sub { - if (length $_[1]) { - for ("$_[1]") { - y/\015//d; # weed out any \015, as they show up in the weirdest of places. - - my $hdr = parse_hdr - or return $finish->(undef, 599 => "Garbled response trailers"); - - %hdr = (%hdr, %$hdr); - } - } + $_[0]->on_eof (sub { + $finish->(""); + }); + $_[0]->on_read (sub { + $arg{on_body}(delete $_[0]{rbuf}, \%hdr) + or $finish->(undef, 598 => "Request cancelled by on_body"); + }); + } + } else { + $_[0]->on_eof (undef); - $finish->($body, undef, undef, 1); - }); - } - }; + if (defined $len) { + $_[0]->on_read (sub { + $finish->((substr delete $_[0]{rbuf}, 0, $len, ""), undef, undef, 1) + if $len <= length $_[0]{rbuf}; + }); + } else { + $_[0]->on_error (sub { + ($! == Errno::EPIPE || !$!) + ? $finish->(delete $_[0]{rbuf}) + : $finish->(undef, $ae_error => $_[2]); + }); + $_[0]->on_read (sub { }); + } + } + }; - $_[0]->push_read (line => $read_chunk); + $state{handle}->push_read (line => $qr_nlnl, $state{read_response}); + }; - } elsif ($arg{on_body}) { - $_[0]->on_error (sub { $finish->(undef, 599 => $_[2]) }); + my $connect_cb = sub { + $state{fh} = shift + or do { + my $err = "$!"; + %state = (); + return $cb->(undef, { @pseudo, Status => $ae_error, Reason => $err }); + }; - if ($len) { - $_[0]->on_read (sub { - $len -= length $_[0]{rbuf}; + return unless delete $state{connect_guard}; - $arg{on_body}(delete $_[0]{rbuf}, \%hdr) - or return $finish->(undef, 598 => "Request cancelled by on_body"); + # get handle + $state{handle} = new AnyEvent::Handle + fh => $state{fh}, + peername => $rhost, + tls_ctx => $arg{tls_ctx}, + # these need to be reconfigured on keepalive handles + timeout => $timeout, + on_error => sub { + %state = (); + $cb->(undef, { @pseudo, Status => $ae_error, Reason => $_[2] }); + }, + on_eof => sub { + %state = (); + $cb->(undef, { @pseudo, Status => $ae_error, Reason => "Unexpected end-of-file" }); + }, + ; - $len > 0 - or $finish->("", undef, undef, 1); - }); - } else { - $_[0]->on_eof (sub { - $finish->(""); - }); - $_[0]->on_read (sub { - $arg{on_body}(delete $_[0]{rbuf}, \%hdr) - or $finish->(undef, 598 => "Request cancelled by on_body"); - }); - } - } else { - $_[0]->on_eof (undef); + # limit the number of persistent connections + # keepalive not yet supported +# if ($KA_COUNT{$_[1]} < $MAX_PERSISTENT_PER_HOST) { +# ++$KA_COUNT{$_[1]}; +# $state{handle}{ka_count_guard} = AnyEvent::Util::guard { +# --$KA_COUNT{$_[1]} +# }; +# $hdr{connection} = "keep-alive"; +# } - if ($len) { - $_[0]->on_error (sub { $finish->(undef, 599 => $_[2]) }); - $_[0]->on_read (sub { - $finish->((substr delete $_[0]{rbuf}, 0, $len, ""), undef, undef, 1) - if $len <= length $_[0]{rbuf}; - }); - } else { - $_[0]->on_error (sub { - ($! == Errno::EPIPE || !$!) - ? $finish->(delete $_[0]{rbuf}) - : $finish->(undef, 599 => $_[2]); - }); - $_[0]->on_read (sub { }); - } - } - } - }); - }; + $state{handle}->starttls ("connect") if $rscheme eq "https"; # now handle proxy-CONNECT method if ($proxy && $uscheme eq "https") { # oh dear, we have to wrap it into a connect request # maybe re-use $uauthority with patched port? - $state{handle}->push_write ("CONNECT $uhost:$uport HTTP/1.0\015\012Host: $uhost\015\012\015\012"); + $state{handle}->push_write ("CONNECT $uhost:$uport HTTP/1.0\015\012\015\012"); $state{handle}->push_read (line => $qr_nlnl, sub { $_[1] =~ /^HTTP\/([0-9\.]+) \s+ ([0-9]{3}) (?: \s+ ([^\015\012]*) )?/ix or return (%state = (), $cb->(undef, { @pseudo, Status => 599, Reason => "Invalid proxy connect response ($_[1])" })); if ($2 == 200) { $rpath = $upath; - &$handle_actual_request; + $handle_actual_request->(); } else { %state = (); $cb->(undef, { @pseudo, Status => $2, Reason => $3 }); } }); } else { - &$handle_actual_request; + $handle_actual_request->(); } }; @@ -828,7 +957,6 @@ || do { require AnyEvent::Socket; \&AnyEvent::Socket::tcp_connect }; $state{connect_guard} = $tcp_connect->($rhost, $rport, $connect_cb, $arg{on_prepare} || sub { $timeout }); - }; defined wantarray && AnyEvent::Util::guard { %state = () } @@ -873,6 +1001,45 @@ To clear an already-set proxy, use C. +=item AnyEvent::HTTP::cookie_jar_expire $jar[, $session_end] + +Remove all cookies from the cookie jar that have been expired. If +C<$session_end> is given and true, then additionally remove all session +cookies. + +You should call this function (with a true C<$session_end>) before you +save cookies to disk, and you should call this function after loading them +again. If you have a long-running program you can additonally call this +function from time to time. + +A cookie jar is initially an empty hash-reference that is managed by this +module. It's format is subject to change, but currently it is like this: + +The key C has to contain C<1>, otherwise the hash gets +emptied. All other keys are hostnames or IP addresses pointing to +hash-references. The key for these inner hash references is the +server path for which this cookie is meant, and the values are again +hash-references. The keys of those hash-references is the cookie name, and +the value, you guessed it, is another hash-reference, this time with the +key-value pairs from the cookie, except for C and C, +which have been replaced by a C<_expires> key that contains the cookie +expiry timestamp. + +Here is an example of a cookie jar with a single cookie, so you have a +chance of understanding the above paragraph: + + { + version => 1, + "10.0.0.1" => { + "/" => { + "mythweb_id" => { + _expires => 1293917923, + value => "ooRung9dThee3ooyXooM1Ohm", + }, + }, + }, + } + =item $date = AnyEvent::HTTP::format_date $timestamp Takes a POSIX timestamp (seconds since the epoch) and formats it as a HTTP @@ -880,7 +1047,8 @@ =item $timestamp = AnyEvent::HTTP::parse_date $date -Takes a HTTP Date (RFC 2616) and returns the corresponding POSIX +Takes a HTTP Date (RFC 2616) or a Cookie date (netscape cookie spec) or a +bunch of minor variations of those, and returns the corresponding POSIX timestamp, or C if the date cannot be parsed. =item $AnyEvent::HTTP::MAX_RECURSE @@ -930,15 +1098,17 @@ my ($d, $m, $y, $H, $M, $S); - if ($date =~ /^[A-Z][a-z][a-z], ([0-9][0-9]) ([A-Z][a-z][a-z]) ([0-9][0-9][0-9][0-9]) ([0-9][0-9]):([0-9][0-9]):([0-9][0-9]) GMT$/) { - # RFC 822/1123, required by RFC 2616 + if ($date =~ /^[A-Z][a-z][a-z]+, ([0-9][0-9]?)[\- ]([A-Z][a-z][a-z])[\- ]([0-9][0-9][0-9][0-9]) ([0-9][0-9]?):([0-9][0-9]?):([0-9][0-9]?) GMT$/) { + # RFC 822/1123, required by RFC 2616 (with " ") + # cookie dates (with "-") + ($d, $m, $y, $H, $M, $S) = ($1, $2, $3, $4, $5, $6); - } elsif ($date =~ /^[A-Z][a-z]+, ([0-9][0-9])-([A-Z][a-z][a-z])-([0-9][0-9]) ([0-9][0-9]):([0-9][0-9]):([0-9][0-9]) GMT$/) { + } elsif ($date =~ /^[A-Z][a-z][a-z]+, ([0-9][0-9]?)-([A-Z][a-z][a-z])-([0-9][0-9]) ([0-9][0-9]?):([0-9][0-9]?):([0-9][0-9]?) GMT$/) { # RFC 850 ($d, $m, $y, $H, $M, $S) = ($1, $2, $3 < 69 ? $3 + 2000 : $3 + 1900, $4, $5, $6); - } elsif ($date =~ /^[A-Z][a-z][a-z] ([A-Z][a-z][a-z]) ([0-9 ][0-9]) ([0-9][0-9]):([0-9][0-9]):([0-9][0-9]) ([0-9][0-9][0-9][0-9])$/) { + } elsif ($date =~ /^[A-Z][a-z][a-z]+ ([A-Z][a-z][a-z]) ([0-9 ]?[0-9]) ([0-9][0-9]?):([0-9][0-9]?):([0-9][0-9]?) ([0-9][0-9][0-9][0-9])$/) { # ISO C's asctime ($d, $m, $y, $H, $M, $S) = ($2, $1, $6, $3, $4, $5); }