diff options
| -rw-r--r-- | doc/http.n | 172 | ||||
| -rw-r--r-- | library/http/http.tcl | 2508 | ||||
| -rw-r--r-- | tests/http11.test | 5 | ||||
| -rw-r--r-- | tests/httpPipeline.test | 866 | ||||
| -rw-r--r-- | tests/httpTest.tcl | 505 | ||||
| -rw-r--r-- | tests/httpTestScript.tcl | 509 |
6 files changed, 4251 insertions, 314 deletions
@@ -6,14 +6,14 @@ '\" See the file "license.terms" for information on usage and redistribution '\" of this file, and for a DISCLAIMER OF ALL WARRANTIES. '\" -.TH "http" n 2.7 http "Tcl Bundled Packages" +.TH "http" n 2.8 http "Tcl Bundled Packages" .so man.macros .BS '\" Note: do not modify the .SH NAME line immediately below! .SH NAME http \- Client-side implementation of the HTTP/1.1 protocol .SH SYNOPSIS -\fBpackage require http ?2.7?\fR +\fBpackage require http ?2.8?\fR .\" See Also -useragent option documentation in body! .sp \fB::http::config ?\fI\-option value\fR ...? @@ -44,12 +44,14 @@ http \- Client-side implementation of the HTTP/1.1 protocol .sp \fB::http::register \fIproto port command\fR .sp +\fB::http::registerError \fIport\fR ?\fImessage\fR? +.sp \fB::http::unregister \fIproto\fR .BE .SH DESCRIPTION .PP The \fBhttp\fR package provides the client side of the HTTP/1.1 -protocol, as defined in RFC 2616. +protocol, as defined in RFC 7230 to RFC 7235, which supersede RFC 2616. The package implements the GET, POST, and HEAD operations of HTTP/1.1. It allows configuration of a proxy host to get through firewalls. The package is compatible with the \fBSafesock\fR security @@ -95,6 +97,19 @@ comma-separated list of mime type patterns that you are willing to receive. For example, .QW "image/gif, image/jpeg, text/*" . .TP +\fB\-pipeline\fR \fIboolean\fR +. +Specifies whether HTTP/1.1 transactions on a persistent socket will be +pipelined. See the \fBPERSISTENT SOCKETS\fR section for details. The default +is 1. +.TP +\fB\-postfresh\fR \fIboolean\fR +. +Specifies whether requests that use the \fBPOST\fR method will always use a +fresh socket, overriding the \fB-keepalive\fR option of +command \fBhttp::geturl\fR. See the \fBPERSISTENT SOCKETS\fR section for details. +The default is 0. +.TP \fB\-proxyhost\fR \fIhostname\fR . The name of the proxy host, if any. If this value is the @@ -116,6 +131,18 @@ an empty list. The default filter returns the values of the \fB\-proxyhost\fR and \fB\-proxyport\fR settings if they are non-empty. .TP +\fB\-repost\fR \fIboolean\fR +. +Specifies what to do if a POST request over a persistent connection fails +because the server has half-closed the connection. If boolean \fBtrue\fR, the +request +will be automatically retried; if boolean \fBfalse\fR it will not, and the +application +that uses \fBhttp::geturl\fR is expected to seek user confirmation before +retrying the POST. The value \fBtrue\fR should be used only under certain +conditions. See the \fBPERSISTENT SOCKETS\fR section for details. The +default is 0. +.TP \fB\-urlencoding\fR \fIencoding\fR . The \fIencoding\fR used for creating the x-url-encoded URLs with @@ -128,8 +155,22 @@ characters. .TP \fB\-useragent\fR \fIstring\fR . -The value of the User-Agent header in the HTTP request. The default is -.QW "\fBTcl http client package 2.7\fR" . +The value of the User-Agent header in the HTTP request. In an unsafe +interpreter, the default value depends upon the operating system, and +the version numbers of \fBhttp\fR and \fBTcl\fR, and is (for example) +.QW "\fBMozilla/5.0 (Windows; U; Windows NT 10.0) http/2.8.12 Tcl/8.6.8\fR" . +A safe interpreter cannot determine its operating system, and so the default +in a safe interpreter is to use a Windows 10 value with the current version +numbers of \fBhttp\fR and \fBTcl\fR. +.TP +\fB\-zip\fR \fIboolean\fR +. +If the value is boolean \fBtrue\fR, then by default requests will send a header +.QW "\fBAccept-Encoding: gzip,deflate,compress\fR" . +If the value is boolean \fBfalse\fR, then by default this header will not be sent. +In either case the default can be overridden for an individual request by +supplying a custom \fBAccept-Encoding\fR header in the \fB-headers\fR option +of \fBhttp::geturl\fR. The default is 1. .RE .TP \fB::http::geturl\fR \fIurl\fR ?\fIoptions\fR? @@ -227,7 +268,7 @@ Pragma: no-cache .TP \fB\-keepalive\fR \fIboolean\fR . -If true, attempt to keep the connection open for servicing +If boolean \fBtrue\fR, attempt to keep the connection open for servicing multiple requests. Default is 0. .TP \fB\-method\fR \fItype\fR @@ -415,6 +456,17 @@ set token [::http::geturl https://my.secure.site/] .CE .RE .TP +\fB::http::registerError\fR \fIport\fR ?\fImessage\fR? +. +This procedure allows a registered protocol handler to deliver an error +message for use by \fBhttp\fR. Calling this command does not raise an +error. The command is useful when a registered protocol detects an problem +(for example, an invalid TLS certificate) that will cause an error to +propagate to \fBhttp\fR. The command allows \fBhttp\fR to provide a +precise error message rather than a general one. The command returns the +value provided by the last call with argument \fImessage\fR, or the empty +string if no such call has been made. +.TP \fB::http::unregister\fR \fIproto\fR . This procedure unregisters a protocol handler that was previously @@ -504,6 +556,14 @@ The following elements of the array are supported: .RS .TP +\fBbinary\fR +. +This is boolean \fBtrue\fR if (after decoding any compression specified +by the +.QW "Content-Encoding" +response header) the HTTP response is binary. It is boolean \fBfalse\fR +if the HTTP response is text. +.TP \fBbody\fR . The contents of the URL. This will be empty if the \fB\-channel\fR @@ -602,6 +662,106 @@ A copy of the \fBContent-Type\fR meta-data value. . The requested URL. .RE +.SH "PERSISTENT CONNECTIONS" +.PP +.SS "BASICS" +.PP +See RFC 7230 Sec 6, which supersedes RFC 2616 Sec 8.1. +.PP +A persistent connection allows multiple HTTP/1.1 transactions to be +carried over the same TCP connection. Pipelining allows a +client to make multiple requests over a persistent connection without +waiting for each response. The server sends responses in the same order +that the requests were received. +.PP +If a POST request fails to complete, typically user confirmation is +needed before sending the request again. The user may wish to verify +whether the server was modified by the failed POST request, before +sending the same request again. +.PP +A HTTP request will use a persistent socket if the call to +\fBhttp::geturl\fR has the option \fB-keepalive true\fR. It will use +pipelining where permitted if the \fBhttp::config\fR option +\fB-pipeline\fR is boolean \fBtrue\fR (its default value). +.PP +The http package maintains no more than one persistent connection to each +server (i.e. each value of +.QW "domain:port" ). +If \fBhttp::geturl\fR is called to make a request over a persistent +connection while the connection is busy with another request, the new +request will be held in a queue until the connection is free. +.PP +The http package does not support HTTP/1.0 persistent connections +controlled by the \fBKeep-Alive\fR header. +.SS "SPECIAL CASES" +.PP +This subsection discusses issues related to closure of the +persistent connection by the server, automatic retry of failed requests, +the special treatment necessary for POST requests, and the options for +dealing with these cases. +.PP +In accordance with RFC 7230, \fBhttp::geturl\fR does not pipeline +requests that use the POST method. If a POST uses a persistent +connection and is not the first request on that connection, +\fBhttp::geturl\fR waits until it has received the response for the previous +request; or (if \fBhttp::config\fR option \fB-postfresh\fR is boolean \fBtrue\fR) it +uses a new connection for each POST. +.PP +If the server is processing a number of pipelined requests, and sends a +response header +.QW "\fBConnection: close\fR" +with one of the responses (other than the last), then subsequent responses +are unfulfilled. \fBhttp::geturl\fR will send the unfulfilled requests again +over a new connection. +.PP +A difficulty arises when a HTTP client sends a request over a persistent +connection that has been idle for a while. The HTTP server may +half-close an apparently idle connection while the client is sending a +request, but before the request arrives at the server: in this case (an +.QW "asynchronous close event" ) +the request will fail. The difficulty arises because the client cannot +be certain whether the POST modified the state of the server. For HEAD or +GET requests, \fBhttp::geturl\fR opens another connection and retransmits +the failed request. However, if the request was a POST, RFC 7230 forbids +automatic retry by default, suggesting either user confirmation, or +confirmation by user-agent software that has semantic understanding of +the application. The \fBhttp::config\fR option \fB-repost\fR allows for +either possibility. +.PP +Asynchronous close events can occur only in a short interval of time. The +\fBhttp\fR package monitors each persistent connection for closure by the +server. Upon detection, the connection is also closed at the client end, +and subsequent requests will use a fresh connection. +.PP +If the \fBhttp::geturl\fR command is called with option \fB-keepalive true\fR, +then it will both try to use an existing persistent connection +(if one is available), and it will send the server a +.QW "\fBConnection: keep-alive\fR" +request header asking to keep the connection open for future requests. +.PP +The \fBhttp::config\fR options \fB-pipeline\fR, \fB-postfresh\fR, and +\fB-repost\fR relate to persistent connections. +.PP +Option \fB-pipeline\fR, if boolean \fBtrue\fR, will pipeline GET and HEAD requests +made +over a persistent connection. POST requests will not be pipelined - if the +POST is not the first transaction on the connection, its request will not +be sent until the previous response has finished. GET and HEAD requests +made after a POST will not be sent until the POST response has been +delivered, and will not be sent if the POST fails. +.PP +Option \fB-postfresh\fR, if boolean \fBtrue\fR, will override the \fBhttp::geturl\fR option +\fB-keepalive\fR, and always open a fresh connection for a POST request. +.PP +Option \fB-repost\fR, if \fBtrue\fR, permits automatic retry of a POST request +that fails because it uses a persistent connection that the server has +half-closed (an +.QW "asynchronous close event" ). +Subsequent GET and HEAD requests in a failed pipeline will also be retried. +\fIThe -repost option should be used only if the application understands +that the retry is appropriate\fR - specifically, the application must know +that if the failed POST successfully modified the state of the server, a repeat POST +would have no adverse effect. .SH EXAMPLE .PP This example creates a procedure to copy a URL to a file while printing a diff --git a/library/http/http.tcl b/library/http/http.tcl index 186d067..c177374 100644 --- a/library/http/http.tcl +++ b/library/http/http.tcl @@ -20,26 +20,30 @@ namespace eval http { if {![info exists http]} { array set http { -accept */* + -pipeline 1 + -postfresh 0 -proxyhost {} -proxyport {} -proxyfilter http::ProxyRequired + -repost 0 -urlencoding utf-8 + -zip 1 } - # We need a useragent string of this style or various servers will refuse to - # send us compressed content even when we ask for it. This follows the - # de-facto layout of user-agent strings in current browsers. + # We need a useragent string of this style or various servers will + # refuse to send us compressed content even when we ask for it. This + # follows the de-facto layout of user-agent strings in current browsers. # Safe interpreters do not have ::tcl_platform(os) or # ::tcl_platform(osVersion). if {[interp issafe]} { set http(-useragent) "Mozilla/5.0\ - (Windows; U;\ - Windows NT 10.0)\ - http/[package provide http] Tcl/[package provide Tcl]" + (Windows; U;\ + Windows NT 10.0)\ + http/[package provide http] Tcl/[package provide Tcl]" } else { set http(-useragent) "Mozilla/5.0\ - ([string totitle $::tcl_platform(platform)]; U;\ - $::tcl_platform(os) $::tcl_platform(osVersion))\ - http/[package provide http] Tcl/[package provide Tcl]" + ([string totitle $::tcl_platform(platform)]; U;\ + $::tcl_platform(os) $::tcl_platform(osVersion))\ + http/[package provide http] Tcl/[package provide Tcl]" } } @@ -60,14 +64,43 @@ namespace eval http { variable formMap [array get map] # Create a map for HTTP/1.1 open sockets - variable socketmap - if {[info exists socketmap]} { - # Close but don't remove open sockets on re-init - foreach {url sock} [array get socketmap] { - catch {close $sock} + variable socketMapping + variable socketRdState + variable socketWrState + variable socketRdQueue + variable socketWrQueue + variable socketClosing + variable socketPlayCmd + if {[info exists socketMapping]} { + # Close open sockets on re-init. Do not permit retries. + foreach {url sock} [array get socketMapping] { + unset -nocomplain socketClosing($url) + unset -nocomplain socketPlayCmd($url) + CloseSocket $sock } } - array set socketmap {} + + # CloseSocket should have unset the socket* arrays, one element at + # a time. Now unset anything that was overlooked. + # Traces on "unset socketRdState(*)" will call CancelReadPipeline and + # cancel any queued responses. + # Traces on "unset socketWrState(*)" will call CancelWritePipeline and + # cancel any queued requests. + array unset socketMapping + array unset socketRdState + array unset socketWrState + array unset socketRdQueue + array unset socketWrQueue + array unset socketClosing + array unset socketPlayCmd + array set socketMapping {} + array set socketRdState {} + array set socketWrState {} + array set socketRdQueue {} + array set socketWrQueue {} + array set socketClosing {} + array set socketPlayCmd {} + return } init @@ -95,8 +128,13 @@ namespace eval http { set defaultKeepalive 0 } - namespace export geturl config reset wait formatQuery register unregister - # Useful, but not exported: data size status code + namespace export geturl config reset wait formatQuery + namespace export register unregister registerError + # - Useful, but not exported: data, size, status, code, cleanup, error, + # meta, ncode, mapReply, init. Comments suggest that "init" can be used + # for re-initialisation, although the command is undocumented. + # - Not exported, probably should be upper-case initial letter as part + # of the internals: getTextLine, make-transformation-chunked. } # http::Log -- @@ -123,6 +161,7 @@ if {[info command http::Log] eq {}} {proc http::Log {args} {}} proc http::register {proto port command} { variable urlTypes set urlTypes([string tolower $proto]) [list $port $command] + # N.B. Implicit Return. } # http::unregister -- @@ -180,6 +219,7 @@ proc http::config {args} { } set http($flag) $value } + return } } @@ -190,40 +230,288 @@ proc http::config {args} { # Arguments: # token Connection token. # errormsg (optional) If set, forces status to error. -# skipCB (optional) If set, don't call the -command callback. This +# skipCB (optional) If set, don't call the -command callback. This # is useful when geturl wants to throw an exception instead # of calling the callback. That way, the same error isn't # reported to two places. # # Side Effects: -# Closes the socket +# May close the socket. proc http::Finish {token {errormsg ""} {skipCB 0}} { + variable socketMapping + variable socketRdState + variable socketWrState + variable socketRdQueue + variable socketWrQueue + variable socketClosing + variable socketPlayCmd + variable $token upvar 0 $token state global errorInfo errorCode + set closeQueue 0 if {$errormsg ne ""} { set state(error) [list $errormsg $errorInfo $errorCode] set state(status) "error" } - if { ($state(status) eq "timeout") + if {[info commands ${token}EventCoroutine] ne {}} { + rename ${token}EventCoroutine {} + } + if { ($state(status) eq "timeout") || ($state(status) eq "error") + || ($state(status) eq "eof") || ([info exists state(-keepalive)] && !$state(-keepalive)) || ([info exists state(connection)] && ($state(connection) eq "close")) } { - CloseSocket $state(sock) $token + set closeQueue 1 + set connId $state(socketinfo) + set sock $state(sock) + CloseSocket $state(sock) $token + } elseif { + ([info exists state(-keepalive)] && $state(-keepalive)) + && ([info exists state(connection)] && ($state(connection) ne "close")) + } { + KeepSocket $token } if {[info exists state(after)]} { after cancel $state(after) + unset state(after) } - if {[info exists state(-command)] && !$skipCB - && ![info exists state(done-command-cb)]} { + if {[info exists state(-command)] && (!$skipCB) + && (![info exists state(done-command-cb)])} { set state(done-command-cb) yes if {[catch {eval $state(-command) {$token}} err] && $errormsg eq ""} { set state(error) [list $err $errorInfo $errorCode] set state(status) error } } + + if { $closeQueue + && [info exists socketMapping($connId)] + && ($socketMapping($connId) eq $sock) + } { + http::CloseQueuedQueries $connId $token + } + + return +} + +# http::KeepSocket - +# +# Keep a socket in the persistent sockets table and connect it to its next +# queued task if possible. Otherwise leave it idle and ready for its next +# use. +# +# If $socketClosing(*), then ($state(connection) eq "close") and therefore +# this command will not be called by Finish. +# +# Arguments: +# token Connection token. + +proc http::KeepSocket {token} { + variable http + variable socketMapping + variable socketRdState + variable socketWrState + variable socketRdQueue + variable socketWrQueue + variable socketClosing + variable socketPlayCmd + + variable $token + upvar 0 $token state + set tk [namespace tail $token] + + # Keep this socket open for another request ("Keep-Alive"). + # React if the server half-closes the socket. + # Discussion is in http::geturl. + catch {fileevent $state(sock) readable [list http::CheckEof $state(sock)]} + + # The line below should not be changed in production code. + # It is edited by the test suite. + set TEST_EOF 0 + if {$TEST_EOF} { + # ONLY for testing reaction to server eof. + # No server timeouts will be caught. + catch {fileevent $state(sock) readable {}} + } else { + # Normal operation. + # Test constraint normalEof. + } + + if { [info exists state(socketinfo)] + && [info exists socketMapping($state(socketinfo))] + } { + set connId $state(socketinfo) + # The value "Rready" is set only here. + set socketRdState($connId) Rready + + if { $state(-pipeline) + && [info exists socketRdQueue($connId)] + && [llength $socketRdQueue($connId)] + } { + # The usual case for pipelined responses - if another response is + # queued, arrange to read it. + set token3 [lindex $socketRdQueue($connId) 0] + set socketRdQueue($connId) [lrange $socketRdQueue($connId) 1 end] + variable $token3 + upvar 0 $token3 state3 + set tk2 [namespace tail $token3] + + #Log pipelined, GRANT read access to $token3 in KeepSocket + set socketRdState($connId) $token3 + ReceiveResponse $token3 + + # Other pipelined cases. + # - The test above ensures that, for the pipelined cases in the two + # tests below, the read queue is empty. + # - In those two tests, check whether the next write will be + # nonpipeline. + } elseif { + $state(-pipeline) + && [info exists socketWrState($connId)] + && ($socketWrState($connId) eq "peNding") + + && [info exists socketWrQueue($connId)] + && [llength $socketWrQueue($connId)] + && (![set token3 [lindex $socketWrQueue($connId) 0] + set ${token3}(-pipeline) + ] + ) + } { + # This case: + # - Now it the time to run the "pending" request. + # - The next token in the write queue is nonpipeline, and + # socketWrState has been marked "pending" (in + # http::NextPipelinedWrite or http::geturl) so a new pipelined + # request cannot jump the queue. + # + # Tests: + # - In this case the read queue (tested above) is empty and this + # "pending" write token is in front of the rest of the write + # queue. + # - The write state is not Wready and therefore appears to be busy, + # but because it is "pending" we know that it is reserved for the + # first item in the write queue, a non-pipelined request that is + # waiting for the read queue to empty. That has now happened: so + # give that request read and write access. + variable $token3 + set conn [set ${token3}(tmpConnArgs)] + #Log nonpipeline, GRANT r/w access to $token3 in KeepSocket + set socketRdState($connId) $token3 + set socketWrState($connId) $token3 + set socketWrQueue($connId) [lrange $socketWrQueue($connId) 1 end] + # Connect does its own fconfigure. + fileevent $state(sock) writable [list http::Connect $token3 {*}$conn] + #Log ---- $state(sock) << conn to $token3 for HTTP request (c) + + } elseif { + $state(-pipeline) + && [info exists socketWrState($connId)] + && ($socketWrState($connId) eq "peNding") + + } { + # Should not come here. The second block in the previous "elseif" + # test should be tautologous (but was needed in an earlier + # implementation) and will be removed after testing. + # If we get here, the value "pending" was assigned in error. + # This error would block the queue for ever. + Log ^X$tk <<<<< Error in queueing of requests >>>>> - token $token + + } elseif { + $state(-pipeline) + && [info exists socketWrState($connId)] + && ($socketWrState($connId) eq "Wready") + + && [info exists socketWrQueue($connId)] + && [llength $socketWrQueue($connId)] + && (![set token3 [lindex $socketWrQueue($connId) 0] + set ${token3}(-pipeline) + ] + ) + } { + # This case: + # - The next token in the write queue is nonpipeline, and + # socketWrState is Wready. Get the next event from socketWrQueue. + # Tests: + # - In this case the read state (tested above) is Rready and the + # write state (tested here) is Wready - there is no "pending" + # request. + # Code: + # - The code is the same as the code below for the nonpipelined + # case with a queued request. + variable $token3 + set conn [set ${token3}(tmpConnArgs)] + #Log nonpipeline, GRANT r/w access to $token3 in KeepSocket + set socketRdState($connId) $token3 + set socketWrState($connId) $token3 + set socketWrQueue($connId) [lrange $socketWrQueue($connId) 1 end] + # Connect does its own fconfigure. + fileevent $state(sock) writable [list http::Connect $token3 {*}$conn] + #Log ---- $state(sock) << conn to $token3 for HTTP request (c) + + } elseif { + (!$state(-pipeline)) + && [info exists socketWrQueue($connId)] + && [llength $socketWrQueue($connId)] + && ($state(connection) ne "close") + } { + # If not pipelined, (socketRdState eq Rready) tells us that we are + # ready for the next write - there is no need to check + # socketWrState. Write the next request, if one is waiting. + # If the next request is pipelined, it receives premature read + # access to the socket. This is not a problem. + set token3 [lindex $socketWrQueue($connId) 0] + variable $token3 + set conn [set ${token3}(tmpConnArgs)] + #Log nonpipeline, GRANT r/w access to $token3 in KeepSocket + set socketRdState($connId) $token3 + set socketWrState($connId) $token3 + set socketWrQueue($connId) [lrange $socketWrQueue($connId) 1 end] + # Connect does its own fconfigure. + fileevent $state(sock) writable [list http::Connect $token3 {*}$conn] + #Log ---- $state(sock) << conn to $token3 for HTTP request (d) + + } elseif {(!$state(-pipeline))} { + set socketWrState($connId) Wready + # Rready and Wready and idle: nothing to do. + } else { + # Rready and idle: nothing to do. + } + + } else { + CloseSocket $state(sock) $token + # There is no socketMapping($state(socketinfo)), so it does not matter + # that CloseQueuedQueries is not called. + } + return +} + +# http::CheckEof - +# +# Read from a socket and close it if eof. +# The command is bound to "fileevent readable" on an idle socket, and +# "eof" is the only event that should trigger the binding, occurring when +# the server times out and half-closes the socket. +# +# A read is necessary so that [eof] gives a meaningful result. +# Any bytes sent are junk (or a bug). + +proc http::CheckEof {sock} { + set junk [read $sock] + set n [string length $junk] + if {$n} { + Log "WARNING: $n bytes received but no HTTP request sent" + } + + if {[catch {eof $sock} res] || $res} { + # The server has half-closed the socket. + # If a new write has started, its transaction will fail and + # will then be error-handled. + CloseSocket $sock + } + return } # http::CloseSocket - @@ -231,42 +519,144 @@ proc http::Finish {token {errormsg ""} {skipCB 0}} { # Close a socket and remove it from the persistent sockets table. If # possible an http token is included here but when we are called from a # fileevent on remote closure we need to find the correct entry - hence -# the second section. +# the "else" block of the first "if" command. + +proc http::CloseSocket {s {token {}}} { + variable socketMapping + variable socketRdState + variable socketWrState + variable socketRdQueue + variable socketWrQueue + variable socketClosing + variable socketPlayCmd + + set tk [namespace tail $token] -proc ::http::CloseSocket {s {token {}}} { - variable socketmap catch {fileevent $s readable {}} - set conn_id {} + set connId {} if {$token ne ""} { - variable $token - upvar 0 $token state - if {[info exists state(socketinfo)]} { - set conn_id $state(socketinfo) - } + variable $token + upvar 0 $token state + if {[info exists state(socketinfo)]} { + set connId $state(socketinfo) + } else { + } } else { - set map [array get socketmap] - set ndx [lsearch -exact $map $s] - if {$ndx != -1} { + set map [array get socketMapping] + set ndx [lsearch -exact $map $s] + if {$ndx != -1} { incr ndx -1 - set conn_id [lindex $map $ndx] - } + set connId [lindex $map $ndx] + } else { + } } - if {$conn_id eq {} || ![info exists socketmap($conn_id)]} { - Log "Closing socket $s (no connection info)" - if {[catch {close $s} err]} { - Log "Error: $err" + if { ($connId ne {}) + && [info exists socketMapping($connId)] + && ($socketMapping($connId) eq $s) + } { + Log "Closing connection $connId (sock $socketMapping($connId))" + if {[catch {close $socketMapping($connId)} err]} { + Log "Error closing connection: $err" + } else { + } + if {$token eq {}} { + # Cases with a non-empty token are handled by Finish, so the tokens + # are finished in connection order. + http::CloseQueuedQueries $connId + } else { } } else { - if {[info exists socketmap($conn_id)]} { - Log "Closing connection $conn_id (sock $socketmap($conn_id))" - if {[catch {close $socketmap($conn_id)} err]} { - Log "Error: $err" - } - unset socketmap($conn_id) + Log "Closing socket $s (no connection info)" + if {[catch {close $s} err]} { + Log "Error closing socket: $err" } else { - Log "Cannot close connection $conn_id - no socket in socket map" } } + return +} + +# http::CloseQueuedQueries +# +# connId - identifier "domain:port" for the connection +# token - (optional) used only for logging +# +# Called from http::CloseSocket and http::Finish, after a connection is closed, +# to clear the read and write queues if this has not already been done. + +proc http::CloseQueuedQueries {connId {token {}}} { + variable socketMapping + variable socketRdState + variable socketWrState + variable socketRdQueue + variable socketWrQueue + variable socketClosing + variable socketPlayCmd + + if {![info exists socketMapping($connId)]} { + # Command has already been called. + # Don't come here again - especially recursively. + return + } + + # Used only for logging. + if {$token eq {}} { + set tk {} + } else { + set tk [namespace tail $token] + } + + if { [info exists socketPlayCmd($connId)] + && ($socketPlayCmd($connId) ne {ReplayIfClose Wready {} {}}) + } { + # Before unsetting, there is some unfinished business. + # - If the server sent "Connection: close", we have stored the command + # for retrying any queued requests in socketPlayCmd, so copy that + # value for execution below. socketClosing(*) was also set. + # - Also clear the queues to prevent calls to Finish that would set the + # state for the requests that will be retried to "finished with error + # status". + set unfinished $socketPlayCmd($connId) + set socketRdQueue($connId) {} + set socketWrQueue($connId) {} + } else { + set unfinished {} + } + + Unset $connId + + if {$unfinished ne {}} { + Log ^R$tk Any unfinished transactions (excluding $token) failed \ + - token $token + {*}$unfinished + } + return +} + +# http::Unset +# +# The trace on "unset socketRdState(*)" will call CancelReadPipeline +# and cancel any queued responses. +# The trace on "unset socketWrState(*)" will call CancelWritePipeline +# and cancel any queued requests. + +proc http::Unset {connId} { + variable socketMapping + variable socketRdState + variable socketWrState + variable socketRdQueue + variable socketWrQueue + variable socketClosing + variable socketPlayCmd + + unset socketMapping($connId) + unset socketRdState($connId) + unset socketWrState($connId) + unset -nocomplain socketRdQueue($connId) + unset -nocomplain socketWrQueue($connId) + unset -nocomplain socketClosing($connId) + unset -nocomplain socketPlayCmd($connId) + + return } # http::reset -- @@ -278,7 +668,7 @@ proc ::http::CloseSocket {s {token {}}} { # why Status info. # # Side Effects: -# See Finish +# See Finish proc http::reset {token {why reset}} { variable $token @@ -292,6 +682,7 @@ proc http::reset {token {why reset}} { unset state eval ::error $errorlist } + return } # http::geturl -- @@ -299,8 +690,8 @@ proc http::reset {token {why reset}} { # Establishes a connection to a remote url via http. # # Arguments: -# url The http URL to goget. -# args Option value pairs. Valid options include: +# url The http URL to goget. +# args Option value pairs. Valid options include: # -blocksize, -validate, -headers, -timeout # Results: # Returns a token for this connection. This token is the name of an @@ -320,9 +711,12 @@ proc http::geturl {url args} { set http(uid) 0 } set token [namespace current]::[incr http(uid)] + ##Log Starting http::geturl - token $token variable $token upvar 0 $token state + set tk [namespace tail $token] reset $token + Log ^A$tk URL $url - token $token # Process command options. @@ -337,8 +731,9 @@ proc http::geturl {url args} { -queryprogress {} -protocol 1.1 binary 0 - state connecting + state created meta {} + method {} coding {} currentsize 0 totalsize 0 @@ -555,14 +950,7 @@ proc http::geturl {url args} { # Don't append the fragment! set state(url) $url - # If a timeout is specified we set up the after event and arrange for an - # asynchronous socket connection. - set sockopts [list -async] - if {$state(-timeout) > 0} { - set state(after) [after $state(-timeout) \ - [list http::reset $token timeout]] - } # If we are using the proxy, we must pass in the full URL that includes # the server name. @@ -580,52 +968,298 @@ proc http::geturl {url args} { # c11a51c482] set state(accept-types) $http(-accept) + if {$isQuery || $isQueryChannel} { + # It's a POST. + # A client wishing to send a non-idempotent request SHOULD wait to send + # that request until it has received the response status for the + # previous request. + if {$http(-postfresh)} { + # Override -keepalive for a POST. Use a new connection, and thus + # avoid the small risk of a race against server timeout. + set state(-keepalive) 0 + } else { + # Allow -keepalive but do not -pipeline - wait for the previous + # transaction to finish. + # There is a small risk of a race against server timeout. + set state(-pipeline) 0 + } + } else { + # It's a GET or HEAD. + set state(-pipeline) $http(-pipeline) + } + # See if we are supposed to use a previously opened channel. + # - In principle, ANY call to http::geturl could use a previously opened + # channel if it is available - the "Connection: keep-alive" header is a + # request to leave the channel open AFTER completion of this call. + # - In fact, we try to use an existing channel only if -keepalive 1 -- this + # means that at most one channel is left open for each value of + # $state(socketinfo). This property simplifies the mapping of open + # channels. + set reusing 0 + set alreadyQueued 0 if {$state(-keepalive)} { - variable socketmap - if {[info exists socketmap($state(socketinfo))]} { - if {[catch {fconfigure $socketmap($state(socketinfo))}]} { - Log "WARNING: socket for $state(socketinfo) was closed" - unset socketmap($state(socketinfo)) + variable socketMapping + variable socketRdState + variable socketWrState + variable socketRdQueue + variable socketWrQueue + variable socketClosing + variable socketPlayCmd + + if {[info exists socketMapping($state(socketinfo))]} { + # - If the connection is idle, it has a "fileevent readable" binding + # to http::CheckEof, in case the server times out and half-closes + # the socket (http::CheckEof closes the other half). + # - We leave this binding in place until just before the last + # puts+flush in http::Connected (GET/HEAD) or http::Write (POST), + # after which the HTTP response might be generated. + + if { [info exists socketClosing($state(socketinfo))] + && $socketClosing($state(socketinfo)) + } { + # socketClosing(*) is set because the server has sent a + # "Connection: close" header. + # Do not use the persistent socket again. + # Since we have only one persistent socket per server, and the + # old socket is not yet dead, add the request to the write queue + # of the dying socket, which will be replayed by ReplayIfClose. + # Also add it to socketWrQueue(*) which is used only if an error + # causes a call to Finish. + set reusing 1 + set sock $socketMapping($state(socketinfo)) + Log "reusing socket $sock for $state(socketinfo) - token $token" + + set alreadyQueued 1 + lassign $socketPlayCmd($state(socketinfo)) com0 com1 com2 com3 + lappend com3 $token + set socketPlayCmd($state(socketinfo)) [list $com0 $com1 $com2 $com3] + lappend socketWrQueue($state(socketinfo)) $token + } elseif {[catch {fconfigure $socketMapping($state(socketinfo))}]} { + # FIXME Is it still possible for this code to be executed? If + # so, this could be another place to call TestForReplay, + # rather than discarding the queued transactions. + Log "WARNING: socket for $state(socketinfo) was closed\ + - token $token" + Log "WARNING - if testing, pay special attention to this\ + case (GH) which is seldom executed - token $token" + + # This will call CancelReadPipeline, CancelWritePipeline, and + # cancel any queued requests, responses. + Unset $state(socketinfo) } else { - set sock $socketmap($state(socketinfo)) - Log "reusing socket $sock for $state(socketinfo)" - catch {fileevent $sock writable {}} - catch {fileevent $sock readable {}} + # Use the persistent socket. + # The socket may not be ready to write: an earlier request might + # still be still writing (in the pipelined case) or + # writing/reading (in the nonpipeline case). This possibility + # is handled by socketWrQueue later in this command. + set reusing 1 + set sock $socketMapping($state(socketinfo)) + Log "reusing socket $sock for $state(socketinfo) - token $token" + } + # Do not automatically close the connection socket. + set state(connection) {} + } + } + + if {$reusing} { + # Define state(tmpState) and state(tmpOpenCmd) for use + # by http::ReplayIfDead if the persistent connection has died. + set state(tmpState) [array get state] + + # Pass -myaddr directly to the socket command + if {[info exists state(-myaddr)]} { + lappend sockopts -myaddr $state(-myaddr) } - # don't automatically close this connection socket - set state(connection) {} + + set state(tmpOpenCmd) [list {*}$defcmd {*}$sockopts {*}$targetAddr] } + + set state(reusing) $reusing + # Excluding ReplayIfDead and the decision whether to call it, there are four + # places outside http::geturl where state(reusing) is used: + # - Connected - if reusing and not pipelined, start the state(-timeout) + # timeout (when writing). + # - DoneRequest - if reusing and pipelined, send the next pipelined write + # - Event - if reusing and pipelined, start the state(-timeout) + # timeout (when reading). + # - Event - if (not reusing) and pipelined, send the next pipelined + # write + + # See comments above re the start of this timeout in other cases. + if {(!$state(reusing)) && ($state(-timeout) > 0)} { + set state(after) [after $state(-timeout) \ + [list http::reset $token timeout]] + } + if {![info exists sock]} { # Pass -myaddr directly to the socket command if {[info exists state(-myaddr)]} { lappend sockopts -myaddr $state(-myaddr) } - if {[catch {eval $defcmd $sockopts $targetAddr} sock errdict]} { - # something went wrong while trying to establish the connection. + set pre [clock milliseconds] + ##Log pre socket opened, - token $token + ##Log [concat $defcmd $sockopts $targetAddr] - token $token + if {[catch {eval $defcmd $sockopts $targetAddr} sock errdict]} { + # Something went wrong while trying to establish the connection. # Clean up after events and such, but DON'T call the command # callback (if available) because we're going to throw an # exception from here instead. - set state(sock) $sock - Finish $token "" 1 + set state(sock) NONE + Finish $token $sock 1 cleanup $token dict unset errdict -level return -options $errdict $sock - } + } else { + # Initialisation of a new socket. + ##Log post socket opened, - token $token + ##Log socket opened, now fconfigure - token $token + set delay [expr {[clock milliseconds] - $pre}] + if {$delay > 3000} { + Log socket delay $delay - token $token + } + fconfigure $sock -translation {auto crlf} \ + -buffersize $state(-blocksize) + ##Log socket opened, DONE fconfigure - token $token + } } + # Command [socket] is called with -async, but takes 5s to 5.1s to return, + # with probability of order 1 in 10,000. This may be a bizarre scheduling + # issue with my (KJN's) system (Fedora Linux). + # This does not cause a problem (unless the request times out when this + # command returns). + set state(sock) $sock - Log "Using $sock for $state(socketinfo)" \ - [expr {$state(-keepalive)?"keepalive":""}] - if {$state(-keepalive)} { - set socketmap($state(socketinfo)) $sock + Log "Using $sock for $state(socketinfo) - token $token" \ + [expr {$state(-keepalive)?"keepalive":""}] + + if { $state(-keepalive) + && (![info exists socketMapping($state(socketinfo))]) + } { + # Freshly-opened socket that we would like to become persistent. + set socketMapping($state(socketinfo)) $sock + + if {![info exists socketRdState($state(socketinfo))]} { + set socketRdState($state(socketinfo)) {} + set varName ::http::socketRdState($state(socketinfo)) + trace add variable $varName unset ::http::CancelReadPipeline + } + if {![info exists socketWrState($state(socketinfo))]} { + set socketWrState($state(socketinfo)) {} + set varName ::http::socketWrState($state(socketinfo)) + trace add variable $varName unset ::http::CancelWritePipeline + } + + if {$state(-pipeline)} { + #Log new, init for pipelined, GRANT write access to $token in geturl + # Also grant premature read access to the socket. This is OK. + set socketRdState($state(socketinfo)) $token + set socketWrState($state(socketinfo)) $token + } else { + # socketWrState is not used by this non-pipelined transaction. + # We cannot leave it as "Wready" because the next call to + # http::geturl with a pipelined transaction would conclude that the + # socket is available for writing. + #Log new, init for nonpipeline, GRANT r/w access to $token in geturl + set socketRdState($state(socketinfo)) $token + set socketWrState($state(socketinfo)) $token + } + + set socketRdQueue($state(socketinfo)) {} + set socketWrQueue($state(socketinfo)) {} + set socketClosing($state(socketinfo)) 0 + set socketPlayCmd($state(socketinfo)) {ReplayIfClose Wready {} {}} } if {![info exists phost]} { set phost "" } - fileevent $sock writable [list http::Connect $token $proto $phost $srvurl] + if {$reusing} { + # For use by http::ReplayIfDead if the persistent connection has died. + # Also used by NextPipelinedWrite. + set state(tmpConnArgs) [list $proto $phost $srvurl] + } + + # The element socketWrState($connId) has a value which is either the name of + # the token that is permitted to write to the socket, or "Wready" if no + # token is permitted to write. + # + # The code that sets the value to Wready immediately calls + # http::NextPipelinedWrite, which examines socketWrQueue($connId) and + # processes the next request in the queue, if there is one. The value + # Wready is not found when the interpreter is in the event loop unless the + # socket is idle. + # + # The element socketRdState($connId) has a value which is either the name of + # the token that is permitted to read from the socket, or "Rready" if no + # token is permitted to read. + # + # The code that sets the value to Rready then examines + # socketRdQueue($connId) and processes the next request in the queue, if + # there is one. The value Rready is not found when the interpreter is in + # the event loop unless the socket is idle. + + if {$alreadyQueued} { + # A write may or may not be in progress. There is no need to set + # socketWrState to prevent another call stealing write access - all + # subsequent calls on this socket will come here because the socket + # will close after the current read, and its + # socketClosing($connId) is 1. + ##Log "HTTP request for token $token is queued" + + } elseif { $reusing + && $state(-pipeline) + && ($socketWrState($state(socketinfo)) ne "Wready") + } { + ##Log "HTTP request for token $token is queued for pipelined use" + lappend socketWrQueue($state(socketinfo)) $token + + } elseif { $reusing + && (!$state(-pipeline)) + && ($socketWrState($state(socketinfo)) ne "Wready") + } { + # A write is queued or in progress. Lappend to the write queue. + ##Log "HTTP request for token $token is queued for nonpipeline use" + lappend socketWrQueue($state(socketinfo)) $token + + } elseif { $reusing + && (!$state(-pipeline)) + && ($socketWrState($state(socketinfo)) eq "Wready") + && ($socketRdState($state(socketinfo)) ne "Rready") + } { + # A read is queued or in progress, but not a write. Cannot start the + # nonpipeline transaction, but must set socketWrState to prevent a + # pipelined request jumping the queue. + ##Log "HTTP request for token $token is queued for nonpipeline use" + #Log re-use nonpipeline, GRANT delayed write access to $token in geturl + + set socketWrState($state(socketinfo)) peNding + lappend socketWrQueue($state(socketinfo)) $token + + } else { + if {$reusing && $state(-pipeline)} { + #Log re-use pipelined, GRANT write access to $token in geturl + set socketWrState($state(socketinfo)) $token + + } elseif {$reusing} { + # Cf tests above - both are ready. + #Log re-use nonpipeline, GRANT r/w access to $token in geturl + set socketRdState($state(socketinfo)) $token + set socketWrState($state(socketinfo)) $token + + } else { + # (!$reusing) + } + + # All (!$reusing) cases come here, and also some $reusing cases if the + # connection is ready. + #Log ---- $state(socketinfo) << conn to $token for HTTP request (a) + # Connect does its own fconfigure. + fileevent $sock writable \ + [list http::Connect $token $proto $phost $srvurl] + } # Wait for the connection to complete. if {![info exists state(-command)]} { @@ -648,7 +1282,7 @@ proc http::geturl {url args} { return -code error $err } } - + ##Log Leaving http::geturl - token $token return $token } @@ -658,8 +1292,8 @@ proc http::geturl {url args} { # established. # # Arguments: -# token State token. -# proto What protocol (http, https, etc.) was used to connect. +# token State token. +# proto What protocol (http, https, etc.) was used to connect. # phost Are we using keep-alive? Non-empty if yes. # srvurl Service-local URL that we're requesting # Results: @@ -668,11 +1302,24 @@ proc http::geturl {url args} { proc http::Connected {token proto phost srvurl} { variable http variable urlTypes + variable socketMapping + variable socketRdState + variable socketWrState + variable socketRdQueue + variable socketWrQueue + variable socketClosing + variable socketPlayCmd variable $token upvar 0 $token state + set tk [namespace tail $token] + + if {$state(reusing) && (!$state(-pipeline)) && ($state(-timeout) > 0)} { + set state(after) [after $state(-timeout) \ + [list http::reset $token timeout]] + } - # Set back the variables needed here + # Set back the variables needed here. set sock $state(sock) set isQueryChannel [info exists state(-querychannel)] set isQuery [info exists state(-query)] @@ -682,9 +1329,12 @@ proc http::Connected {token proto phost srvurl} { set lower [string tolower $proto] set defport [lindex $urlTypes($lower) 0] - # Send data in cr-lf format, but accept any line terminators - - fconfigure $sock -translation {auto crlf} -buffersize $state(-blocksize) + # Send data in cr-lf format, but accept any line terminators. + # Initialisation to {auto *} now done in geturl, KeepSocket and DoneRequest. + # We are concerned here with the request (write) not the response (read). + lassign [fconfigure $sock -translation] trRead trWrite + fconfigure $sock -translation [list $trRead crlf] \ + -buffersize $state(-blocksize) # The following is disallowed in safe interpreters, but the socket is # already in non-blocking mode in that case. @@ -707,10 +1357,12 @@ proc http::Connected {token proto phost srvurl} { set how POST # The query channel must be blocking for the async Write to # work properly. - fconfigure $state(-querychannel) -blocking 1 -translation binary + lassign [fconfigure $sock -translation] trRead trWrite + fconfigure $state(-querychannel) -blocking 1 \ + -translation [list $trRead binary] set contDone 0 } - if {[info exists state(-method)] && $state(-method) ne ""} { + if {[info exists state(-method)] && ($state(-method) ne "")} { set how $state(-method) } # We cannot handle chunked encodings with -handler, so force HTTP/1.0 @@ -719,7 +1371,11 @@ proc http::Connected {token proto phost srvurl} { set state(-protocol) 1.0 } set accept_types_seen 0 + + Log ^B$tk begin sending request - token $token + if {[catch { + set state(method) $how puts $sock "$how $srvurl HTTP/$state(-protocol)" if {[dict exists $state(-headers) Host]} { # Allow Host spoofing. [Bug 928154] @@ -732,16 +1388,18 @@ proc http::Connected {token proto phost srvurl} { puts $sock "Host: $host:$port" } puts $sock "User-Agent: $http(-useragent)" - if {$state(-protocol) == 1.0 && $state(-keepalive)} { + if {($state(-protocol) >= 1.0) && $state(-keepalive)} { + # Send this header, because a 1.1 server is not compelled to treat + # this as the default. puts $sock "Connection: keep-alive" - } - if {$state(-protocol) > 1.0 && !$state(-keepalive)} { + } + if {($state(-protocol) > 1.0) && !$state(-keepalive)} { puts $sock "Connection: close" ;# RFC2616 sec 8.1.2.1 - } - if {[info exists phost] && ($phost ne "") && $state(-keepalive)} { + } + if {[info exists phost] && ($phost ne "") && $state(-keepalive)} { puts $sock "Proxy-Connection: Keep-Alive" - } - set accept_encoding_seen 0 + } + set accept_encoding_seen 0 set content_type_seen 0 dict for {key value} $state(-headers) { set value [string map [list \n "" \r ""] $value] @@ -771,10 +1429,13 @@ proc http::Connected {token proto phost srvurl} { if {!$accept_types_seen} { puts $sock "Accept: $state(accept-types)" } - if {!$accept_encoding_seen && ![info exists state(-handler)]} { + if { (!$accept_encoding_seen) + && (![info exists state(-handler)]) + && $http(-zip) + } { puts $sock "Accept-Encoding: gzip,deflate,compress" - } - if {$isQueryChannel && $state(querylength) == 0} { + } + if {$isQueryChannel && ($state(querylength) == 0)} { # Try to determine size of data in channel. If we cannot seek, the # surrounding catch will trap us @@ -803,6 +1464,7 @@ proc http::Connected {token proto phost srvurl} { # response. if {$isQuery || $isQueryChannel} { + # POST method. if {!$content_type_seen} { puts $sock "Content-Type: $state(-type)" } @@ -810,29 +1472,776 @@ proc http::Connected {token proto phost srvurl} { puts $sock "Content-Length: $state(querylength)" } puts $sock "" - fconfigure $sock -translation {auto binary} + flush $sock + # Flush flushes the error in the https case with a bad handshake: + # else the socket never becomes writable again, and hangs until + # timeout (if any). + + lassign [fconfigure $sock -translation] trRead trWrite + fconfigure $sock -translation [list $trRead binary] fileevent $sock writable [list http::Write $token] + # The http::Write command decides when to make the socket readable, + # using the same test as the GET/HEAD case below. } else { + # GET or HEAD method. + if { (![catch {fileevent $sock readable} binding]) + && ($binding eq [list http::CheckEof $sock]) + } { + # Remove the "fileevent readable" binding of an idle persistent + # socket to http::CheckEof. We can no longer treat bytes + # received as junk. The server might still time out and + # half-close the socket if it has not yet received the first + # "puts". + fileevent $sock readable {} + } puts $sock "" flush $sock - fileevent $sock readable [list http::Event $sock $token] + Log ^C$tk end sending request - token $token + # End of writing (GET/HEAD methods). The request has been sent. + + DoneRequest $token } } err]} { - # The socket probably was never connected, or the connection dropped - # later. + # The socket probably was never connected, OR the connection dropped + # later, OR https handshake error, which may be discovered as late as + # the "flush" command above... + Log "WARNING - if testing, pay special attention to this\ + case (GI) which is seldom executed - token $token" + if {[info exists state(reusing)] && $state(reusing)} { + # The socket was closed at the server end, and closed at + # this end by http::CheckEof. + if {[TestForReplay $token write $err a]} { + return + } else { + Finish $token {failed to re-use socket} + } - # if state(status) is error, it means someone's already called Finish - # to do the above-described clean up. - if {$state(status) ne "error"} { + # else: + # This is NOT a persistent socket that has been closed since its + # last use. + # If any other requests are in flight or pipelined/queued, they will + # be discarded. + } elseif {$state(status) eq ""} { + # ...https handshake errors come here. + set msg [registerError $sock] + registerError $sock {} + if {$msg eq {}} { + set msg {failed to use socket} + } else { + } + Finish $token $msg + } elseif {$state(status) ne "error"} { Finish $token $err + } else { + # if state(status) is error, it means someone's already called + # Finish to do the above-described clean up. + } + } + return +} + +# http::registerError +# +# Called (for example when processing TclTLS activity) to register +# an error for a connection on a specific socket. This helps +# http::Connected to deliver meaningful error messages, e.g. when a TLS +# certificate fails verification. +# +# Usage: http::registerError socket ?newValue? +# +# "set" semantics, except that a "get" (a call without a new value) for a +# non-existent socket returns {}, not an error. + +proc http::registerError {sock args} { + variable registeredErrors + + if { ([llength $args] == 0) + && (![info exists registeredErrors($sock)]) + } { + return + } elseif { ([llength $args] == 1) + && ([lindex $args 0] eq {}) + } { + unset -nocomplain registeredErrors($sock) + return + } + set registeredErrors($sock) {*}$args + # N.B. Implicit Return +} + +# http::DoneRequest -- +# +# Command called when a request has been sent. It will arrange the +# next request and/or response as appropriate. +# +# If this command is called when $socketClosing(*), the request $token +# that calls it must be pipelined and destined to fail. + +proc http::DoneRequest {token} { + variable http + variable socketMapping + variable socketRdState + variable socketWrState + variable socketRdQueue + variable socketWrQueue + variable socketClosing + variable socketPlayCmd + + variable $token + upvar 0 $token state + set tk [namespace tail $token] + set sock $state(sock) + + # If pipelined, connect the next HTTP request to the socket. + if {$state(reusing) && $state(-pipeline)} { + # Enable next token (if any) to write. + # The value "Wready" is set only here, and + # in http::Event after reading the response-headers of a + # non-reusing transaction. + # Previous value is $token. It cannot be pending. + set socketWrState($state(socketinfo)) Wready + + # Now ready to write the next pipelined request (if any). + http::NextPipelinedWrite $token + } else { + # If pipelined, this is the first transaction on this socket. We wait + # for the response headers to discover whether the connection is + # persistent. (If this is not done and the connection is not + # persistent, we SHOULD retry and then MUST NOT pipeline before knowing + # that we have a persistent connection + # (rfc2616 8.1.2.2)). + } + + # Connect to receive the response, unless the socket is pipelined + # and another response is being sent. + # This code block is separate from the code below because there are + # cases where socketRdState already has the value $token. + if { $state(-keepalive) + && $state(-pipeline) + && [info exists socketRdState($state(socketinfo))] + && ($socketRdState($state(socketinfo)) eq "Rready") + } { + #Log pipelined, GRANT read access to $token in Connected + set socketRdState($state(socketinfo)) $token + } + + if { $state(-keepalive) + && $state(-pipeline) + && [info exists socketRdState($state(socketinfo))] + && ($socketRdState($state(socketinfo)) ne $token) + } { + # Do not read from the socket until it is ready. + ##Log "HTTP response for token $token is queued for pipelined use" + # If $socketClosing(*), then the caller will be a pipelined write and + # execution will come here. + # This token has already been recorded as "in flight" for writing. + # When the socket is closed, the read queue will be cleared in + # CloseQueuedQueries and so the "lappend" here has no effect. + lappend socketRdQueue($state(socketinfo)) $token + } else { + # In the pipelined case, connection for reading depends on the + # value of socketRdState. + # In the nonpipeline case, connection for reading always occurs. + ReceiveResponse $token + } + return +} + +# http::ReceiveResponse +# +# Connects token to its socket for reading. + +proc http::ReceiveResponse {token} { + variable $token + upvar 0 $token state + set tk [namespace tail $token] + set sock $state(sock) + + #Log ---- $state(socketinfo) >> conn to $token for HTTP response + lassign [fconfigure $sock -translation] trRead trWrite + fconfigure $sock -translation [list auto $trWrite] \ + -buffersize $state(-blocksize) + Log ^D$tk begin receiving response - token $token + + coroutine ${token}EventCoroutine http::Event $sock $token + fileevent $sock readable ${token}EventCoroutine + return +} + +# http::NextPipelinedWrite +# +# - Connecting a socket to a token for writing is done by this command and by +# command KeepSocket. +# - If another request has a pipelined write scheduled for $token's socket, +# and if the socket is ready to accept it, connect the write and update +# the queue accordingly. +# - This command is called from http::DoneRequest and http::Event, +# IF $state(-pipeline) AND (the current transfer has reached the point at +# which the socket is ready for the next request to be written). +# - This command is called when a token has write access and is pipelined and +# keep-alive, and sets socketWrState to Wready. +# - The command need not consider the case where socketWrState is set to a token +# that does not yet have write access. Such a token is waiting for Rready, +# and the assignment of the connection to the token will be done elsewhere (in +# http::KeepSocket). +# - This command cannot be called after socketWrState has been set to a +# "pending" token value (that is then overwritten by the caller), because that +# value is set by this command when it is called by an earlier token when it +# relinquishes its write access, and the pending token is always the next in +# line to write. + +proc http::NextPipelinedWrite {token} { + variable http + variable socketRdState + variable socketWrState + variable socketWrQueue + variable socketClosing + variable $token + upvar 0 $token state + set connId $state(socketinfo) + + if { [info exists socketClosing($connId)] + && $socketClosing($connId) + } { + # socketClosing(*) is set because the server has sent a + # "Connection: close" header. + # Behave as if the queues are empty - so do nothing. + } elseif { $state(-pipeline) + && [info exists socketWrState($connId)] + && ($socketWrState($connId) eq "Wready") + + && [info exists socketWrQueue($connId)] + && [llength $socketWrQueue($connId)] + && ([set token2 [lindex $socketWrQueue($connId) 0] + set ${token2}(-pipeline) + ] + ) + } { + # - The usual case for a pipelined connection, ready for a new request. + #Log pipelined, GRANT write access to $token2 in NextPipelinedWrite + set conn [set ${token2}(tmpConnArgs)] + set socketWrState($connId) $token2 + set socketWrQueue($connId) [lrange $socketWrQueue($connId) 1 end] + # Connect does its own fconfigure. + fileevent $state(sock) writable [list http::Connect $token2 {*}$conn] + #Log ---- $connId << conn to $token2 for HTTP request (b) + + # In the tests below, the next request will be nonpipeline. + } elseif { $state(-pipeline) + && [info exists socketWrState($connId)] + && ($socketWrState($connId) eq "Wready") + + && [info exists socketWrQueue($connId)] + && [llength $socketWrQueue($connId)] + && (![ set token3 [lindex $socketWrQueue($connId) 0] + set ${token3}(-pipeline) + ] + ) + + && [info exists socketRdState($connId)] + && ($socketRdState($connId) eq "Rready") + } { + # The case in which the next request will be non-pipelined, and the read + # and write queues is ready: which is the condition for a non-pipelined + # write. + variable $token3 + upvar 0 $token3 state3 + set conn [set ${token3}(tmpConnArgs)] + #Log nonpipeline, GRANT r/w access to $token3 in NextPipelinedWrite + set socketRdState($connId) $token3 + set socketWrState($connId) $token3 + set socketWrQueue($connId) [lrange $socketWrQueue($connId) 1 end] + # Connect does its own fconfigure. + fileevent $state(sock) writable [list http::Connect $token3 {*}$conn] + #Log ---- $state(sock) << conn to $token3 for HTTP request (c) + + } elseif { $state(-pipeline) + && [info exists socketWrState($connId)] + && ($socketWrState($connId) eq "Wready") + + && [info exists socketWrQueue($connId)] + && [llength $socketWrQueue($connId)] + && (![set token2 [lindex $socketWrQueue($connId) 0] + set ${token2}(-pipeline) + ] + ) + } { + # - The case in which the next request will be non-pipelined, but the + # read queue is NOT ready. + # - A read is queued or in progress, but not a write. Cannot start the + # nonpipeline transaction, but must set socketWrState to prevent a new + # pipelined request (in http::geturl) jumping the queue. + # - Because socketWrState($connId) is not set to Wready, the assignment + # of the connection to $token2 will be done elsewhere - by command + # http::KeepSocket when $socketRdState($connId) is set to "Rready". + + #Log re-use nonpipeline, GRANT delayed write access to $token in NextP.. + set socketWrState($connId) peNding + + } else { + # No requests in socketWrQueue. Nothing to do. + } + + return +} + +# http::CancelReadPipeline +# +# Cancel pipelined responses on a closing "Keep-Alive" socket. +# +# - Called by a variable trace on "unset socketRdState($connId)". +# - The variable relates to a Keep-Alive socket, which has been closed. +# - Cancels all pipelined responses. The requests have been sent, +# the responses have not yet been received. +# - This is a hard cancel that ends each transaction with error status, +# and closes the connection. Do not use it if you want to replay failed +# transactions. +# - N.B. Always delete ::http::socketRdState($connId) before deleting +# ::http::socketRdQueue($connId), or this command will do nothing. +# +# Arguments +# As for a trace command on a variable. + +proc http::CancelReadPipeline {name1 connId op} { + variable socketRdQueue + ##Log CancelReadPipeline $name1 $connId $op + if {[info exists socketRdQueue($connId)]} { + set msg {the connection was closed by CancelReadPipeline} + foreach token $socketRdQueue($connId) { + set tk [namespace tail $token] + Log ^X$tk end of response "($msg)" - token $token + set ${token}(status) eof + Finish $token ;#$msg } + set socketRdQueue($connId) {} } + return +} + +# http::CancelWritePipeline +# +# Cancel queued events on a closing "Keep-Alive" socket. +# +# - Called by a variable trace on "unset socketWrState($connId)". +# - The variable relates to a Keep-Alive socket, which has been closed. +# - In pipelined or nonpipeline case: cancels all queued requests. The +# requests have not yet been sent, the responses are not due. +# - This is a hard cancel that ends each transaction with error status, +# and closes the connection. Do not use it if you want to replay failed +# transactions. +# - N.B. Always delete ::http::socketWrState($connId) before deleting +# ::http::socketWrQueue($connId), or this command will do nothing. +# +# Arguments +# As for a trace command on a variable. + +proc http::CancelWritePipeline {name1 connId op} { + variable socketWrQueue + + ##Log CancelWritePipeline $name1 $connId $op + if {[info exists socketWrQueue($connId)]} { + set msg {the connection was closed by CancelWritePipeline} + foreach token $socketWrQueue($connId) { + set tk [namespace tail $token] + Log ^X$tk end of response "($msg)" - token $token + set ${token}(status) eof + Finish $token ;#$msg + } + set socketWrQueue($connId) {} + } + return +} + +# http::ReplayIfDead -- +# +# - A query on a re-used persistent socket failed at the earliest opportunity, +# because the socket had been closed by the server. Keep the token, tidy up, +# and try to connect on a fresh socket. +# - The connection is monitored for eof by the command http::CheckEof. Thus +# http::ReplayIfDead is needed only when a server event (half-closing an +# apparently idle connection), and a client event (sending a request) occur at +# almost the same time, and neither client nor server detects the other's +# action before performing its own (an "asynchronous close event"). +# - To simplify testing of http::ReplayIfDead, set TEST_EOF 1 in +# http::KeepSocket, and then http::ReplayIfDead will be called if http::geturl +# is called at any time after the server timeout. +# +# Arguments: +# token Connection token. +# +# Side Effects: +# Use the same token, but try to open a new socket. + +proc http::ReplayIfDead {tokenArg doing} { + variable socketMapping + variable socketRdState + variable socketWrState + variable socketRdQueue + variable socketWrQueue + variable socketClosing + variable socketPlayCmd + + variable $tokenArg + upvar 0 $tokenArg stateArg + + Log running http::ReplayIfDead for $tokenArg $doing + + # 1. Merge the tokens for transactions in flight, the read (response) queue, + # and the write (request) queue. + + set InFlightR {} + set InFlightW {} + + # Obtain the tokens for transactions in flight. + if {$stateArg(-pipeline)} { + # Two transactions may be in flight. The "read" transaction was first. + # It is unlikely that the server would close the socket if a response + # was pending; however, an earlier request (as well as the present + # request) may have been sent and ignored if the socket was half-closed + # by the server. + + if { [info exists socketRdState($stateArg(socketinfo))] + && ($socketRdState($stateArg(socketinfo)) ne "Rready") + } { + lappend InFlightR $socketRdState($stateArg(socketinfo)) + } elseif {($doing eq "read")} { + lappend InFlightR $tokenArg + } else { + } + + if { [info exists socketWrState($stateArg(socketinfo))] + && $socketWrState($stateArg(socketinfo)) ni {Wready peNding} + } { + lappend InFlightW $socketWrState($stateArg(socketinfo)) + } elseif {($doing eq "write")} { + lappend InFlightW $tokenArg + } else { + } + + # Report any inconsistency of $tokenArg with socket*state. + if { ($doing eq "read") + && [info exists socketRdState($stateArg(socketinfo))] + && ($tokenArg ne $socketRdState($stateArg(socketinfo))) + } { + Log WARNING - ReplayIfDead pipelined tokenArg $tokenArg $doing \ + ne socketRdState($stateArg(socketinfo)) \ + $socketRdState($stateArg(socketinfo)) + + } elseif { + ($doing eq "write") + && [info exists socketWrState($stateArg(socketinfo))] + && ($tokenArg ne $socketWrState($stateArg(socketinfo))) + } { + Log WARNING - ReplayIfDead pipelined tokenArg $tokenArg $doing \ + ne socketWrState($stateArg(socketinfo)) \ + $socketWrState($stateArg(socketinfo)) + } else { + } + } else { + # One transaction should be in flight. + # socketRdState, socketWrQueue are used. + # socketRdQueue should be empty. + + # Report any inconsistency of $tokenArg with socket*state. + if {$tokenArg ne $socketRdState($stateArg(socketinfo))} { + Log WARNING - ReplayIfDead nonpipeline tokenArg $tokenArg $doing \ + ne socketRdState($stateArg(socketinfo)) \ + $socketRdState($stateArg(socketinfo)) + } else { + } + + # Report the inconsistency that socketRdQueue is non-empty. + if { [info exists socketRdQueue($stateArg(socketinfo))] + && ($socketRdQueue($stateArg(socketinfo)) ne {}) + } { + Log WARNING - ReplayIfDead nonpipeline tokenArg $tokenArg $doing \ + has read queue socketRdQueue($stateArg(socketinfo)) \ + $socketRdQueue($stateArg(socketinfo)) ne {} + } else { + } + + lappend InFlightW $socketRdState($stateArg(socketinfo)) + set socketRdQueue($stateArg(socketinfo)) {} + } + + set newQueue {} + lappend newQueue {*}$InFlightR + lappend newQueue {*}$socketRdQueue($stateArg(socketinfo)) + lappend newQueue {*}$InFlightW + lappend newQueue {*}$socketWrQueue($stateArg(socketinfo)) + + + # 2. Tidy up tokenArg. This is a cut-down form of Finish/CloseSocket. + # Do not change state(status). + # No need to after cancel stateArg(after) - either this is done in + # ReplayCore/ReInit, or Finish is called. + + catch {close $stateArg(sock)} + + # 2a. Tidy the tokens in the queues - this is done in ReplayCore/ReInit. + # - Transactions, if any, that are awaiting responses cannot be completed. + # They are listed for re-sending in newQueue. + # - All tokens are preserved for re-use by ReplayCore, and their variables + # will be re-initialised by calls to ReInit. + # - The relevant element of socketMapping, socketRdState, socketWrState, + # socketRdQueue, socketWrQueue, socketClosing, socketPlayCmd will be set + # to new values in ReplayCore. + + ReplayCore $newQueue + return +} + +# http::ReplayIfClose -- +# +# A request on a socket that was previously "Connection: keep-alive" has +# received a "Connection: close" response header. The server supplies +# that response correctly, but any later requests already queued on this +# connection will be lost when the socket closes. +# +# This command takes arguments that represent the socketWrState, +# socketRdQueue and socketWrQueue for this connection. The socketRdState +# is not needed because the server responds in full to the request that +# received the "Connection: close" response header. +# +# Existing request tokens $token (::http::$n) are preserved. The caller +# will be unaware that the request was processed this way. + +proc http::ReplayIfClose {Wstate Rqueue Wqueue} { + Log running http::ReplayIfClose for $Wstate $Rqueue $Wqueue + + if {$Wstate in $Rqueue || $Wstate in $Wqueue} { + Log WARNING duplicate token in http::ReplayIfClose - token $Wstate + set Wstate Wready + } + + # 1. Create newQueue + set InFlightW {} + if {$Wstate ni {Wready peNding}} { + lappend InFlightW $Wstate + } + + set newQueue {} + lappend newQueue {*}$Rqueue + lappend newQueue {*}$InFlightW + lappend newQueue {*}$Wqueue + + # 2. Cleanup - none needed, done by the caller. + + ReplayCore $newQueue + return +} + +# http::ReInit -- +# +# Command to restore a token's state to a condition that +# makes it ready to replay a request. +# +# Command http::geturl stores extra state in state(tmp*) so +# we don't need to do the argument processing again. +# +# The caller must: +# - Set state(reusing) and state(sock) to their new values after calling +# this command. +# - Unset state(tmpState), state(tmpOpenCmd) if future calls to ReplayCore +# or ReInit are inappropriate for this token. Typically only one retry +# is allowed. +# The caller may also unset state(tmpConnArgs) if this value (and the +# token) will be used immediately. The value is needed by tokens that +# will be stored in a queue. +# +# Arguments: +# token Connection token. +# +# Return Value: (boolean) true iff the re-initialisation was successful. + +proc http::ReInit {token} { + variable $token + upvar 0 $token state + + if {!( + [info exists state(tmpState)] + && [info exists state(tmpOpenCmd)] + && [info exists state(tmpConnArgs)] + ) + } { + Log FAILED in http::ReInit via ReplayCore - NO tmp vars for $token + return 0 + } + + if {[info exists state(after)]} { + after cancel $state(after) + unset state(after) + } + + # Don't alter state(status) - this would trigger http::wait if it is in use. + set tmpState $state(tmpState) + set tmpOpenCmd $state(tmpOpenCmd) + set tmpConnArgs $state(tmpConnArgs) + foreach name [array names state] { + if {$name ne "status"} { + unset state($name) + } + } + + # Don't alter state(status). + # Restore state(tmp*) - the caller may decide to unset them. + # Restore state(tmpConnArgs) which is needed for connection. + # state(tmpState), state(tmpOpenCmd) are needed only for retries. + + dict unset tmpState status + array set state $tmpState + set state(tmpState) $tmpState + set state(tmpOpenCmd) $tmpOpenCmd + set state(tmpConnArgs) $tmpConnArgs + + return 1 +} + +# http::ReplayCore -- +# +# Command to replay a list of requests, using existing connection tokens. +# +# Abstracted from http::geturl which stores extra state in state(tmp*) so +# we don't need to do the argument processing again. +# +# Arguments: +# newQueue List of connection tokens. +# +# Side Effects: +# Use existing tokens, but try to open a new socket. + +proc http::ReplayCore {newQueue} { + variable socketMapping + variable socketRdState + variable socketWrState + variable socketRdQueue + variable socketWrQueue + variable socketClosing + variable socketPlayCmd + + if {[llength $newQueue] == 0} { + # Nothing to do. + return + } + + ##Log running ReplayCore for {*}$newQueue + set newToken [lindex $newQueue 0] + set newQueue [lrange $newQueue 1 end] + + # 3. Use newToken, and restore its values of state(*). Do not restore + # elements tmp* - we try again only once. + + set token $newToken + variable $token + upvar 0 $token state + + if {![ReInit $token]} { + Log FAILED in http::ReplayCore - NO tmp vars + Finish $token {cannot send this request again} + return + } + + set tmpState $state(tmpState) + set tmpOpenCmd $state(tmpOpenCmd) + set tmpConnArgs $state(tmpConnArgs) + unset state(tmpState) + unset state(tmpOpenCmd) + unset state(tmpConnArgs) + + set state(reusing) 0 + + if {$state(-timeout) > 0} { + set resetCmd [list http::reset $token timeout] + set state(after) [after $state(-timeout) $resetCmd] + } + + set pre [clock milliseconds] + ##Log pre socket opened, - token $token + ##Log $tmpOpenCmd - token $token + # 4. Open a socket. + if {[catch {eval $tmpOpenCmd} sock]} { + # Something went wrong while trying to establish the connection. + Log FAILED - $sock + set state(sock) NONE + Finish $token $sock + return + } + ##Log post socket opened, - token $token + set delay [expr {[clock milliseconds] - $pre}] + if {$delay > 3000} { + Log socket delay $delay - token $token + } + # Command [socket] is called with -async, but takes 5s to 5.1s to return, + # with probability of order 1 in 10,000. This may be a bizarre scheduling + # issue with my (KJN's) system (Fedora Linux). + # This does not cause a problem (unless the request times out when this + # command returns). + + # 5. Configure the persistent socket data. + if {$state(-keepalive)} { + set socketMapping($state(socketinfo)) $sock + + if {![info exists socketRdState($state(socketinfo))]} { + set socketRdState($state(socketinfo)) {} + set varName ::http::socketRdState($state(socketinfo)) + trace add variable $varName unset ::http::CancelReadPipeline + } + + if {![info exists socketWrState($state(socketinfo))]} { + set socketWrState($state(socketinfo)) {} + set varName ::http::socketWrState($state(socketinfo)) + trace add variable $varName unset ::http::CancelWritePipeline + } + + if {$state(-pipeline)} { + #Log new, init for pipelined, GRANT write acc to $token ReplayCore + set socketRdState($state(socketinfo)) $token + set socketWrState($state(socketinfo)) $token + } else { + #Log new, init for nonpipeline, GRANT r/w acc to $token ReplayCore + set socketRdState($state(socketinfo)) $token + set socketWrState($state(socketinfo)) $token + } + + set socketRdQueue($state(socketinfo)) {} + set socketWrQueue($state(socketinfo)) $newQueue + set socketClosing($state(socketinfo)) 0 + set socketPlayCmd($state(socketinfo)) {ReplayIfClose Wready {} {}} + } + + ##Log pre newQueue ReInit, - token $token + # 6. Configure sockets in the queue. + foreach tok $newQueue { + if {[ReInit $tok]} { + set ${tok}(reusing) 1 + set ${tok}(sock) $sock + } else { + set ${tok}(reusing) 1 + set ${tok}(sock) NONE + Finish $token {cannot send this request again} + } + } + + # 7. Configure the socket for newToken to send a request. + set state(sock) $sock + Log "Using $sock for $state(socketinfo) - token $token" \ + [expr {$state(-keepalive)?"keepalive":""}] + + # Initialisation of a new socket. + ##Log socket opened, now fconfigure - token $token + fconfigure $sock -translation {auto crlf} -buffersize $state(-blocksize) + ##Log socket opened, DONE fconfigure - token $token + + # Connect does its own fconfigure. + fileevent $sock writable [list http::Connect $token {*}$tmpConnArgs] + #Log ---- $sock << conn to $token for HTTP request (e) + return } # Data access functions: # Data - the URL data -# Status - the transaction status: ok, reset, eof, timeout +# Status - the transaction status: ok, reset, eof, timeout, error # Code - the HTTP transaction code, e.g., 200 # Size - the size of the URL data @@ -895,9 +2304,17 @@ proc http::error {token} { proc http::cleanup {token} { variable $token upvar 0 $token state + if {[info commands ${token}EventCoroutine] ne {}} { + rename ${token}EventCoroutine {} + } + if {[info exists state(after)]} { + after cancel $state(after) + unset state(after) + } if {[info exists state]} { unset state } + return } # http::Connect @@ -914,13 +2331,30 @@ proc http::cleanup {token} { proc http::Connect {token proto phost srvurl} { variable $token upvar 0 $token state + set tk [namespace tail $token] set err "due to unexpected EOF" if { [eof $state(sock)] || [set err [fconfigure $state(sock) -error]] ne "" } { + Log "WARNING - if testing, pay special attention to this\ + case (GJ) which is seldom executed - token $token" + if {[info exists state(reusing)] && $state(reusing)} { + # The socket was closed at the server end, and closed at + # this end by http::CheckEof. + if {[TestForReplay $token write $err b]} { + return + } + + # else: + # This is NOT a persistent socket that has been closed since its + # last use. + # If any other requests are in flight or pipelined/queued, they will + # be discarded. + } Finish $token "connect failed $err" } else { + set state(state) connecting fileevent $state(sock) writable {} ::http::Connected $token $proto $phost $srvurl } @@ -938,8 +2372,18 @@ proc http::Connect {token proto phost srvurl} { # Write the socket and handle callbacks. proc http::Write {token} { + variable http + variable socketMapping + variable socketRdState + variable socketWrState + variable socketRdQueue + variable socketWrQueue + variable socketClosing + variable socketPlayCmd + variable $token upvar 0 $token state + set tk [namespace tail $token] set sock $state(sock) # Output a block. Tcl will buffer this if the socket blocks @@ -950,7 +2394,21 @@ proc http::Write {token} { if {[info exists state(-query)]} { # Chop up large query strings so queryprogress callback can give # smooth feedback. - + if { $state(queryoffset) + $state(-queryblocksize) + >= $state(querylength) + } { + # This will be the last puts for the request-body. + if { (![catch {fileevent $sock readable} binding]) + && ($binding eq [list http::CheckEof $sock]) + } { + # Remove the "fileevent readable" binding of an idle + # persistent socket to http::CheckEof. We can no longer + # treat bytes received as junk. The server might still time + # out and half-close the socket if it has not yet received + # the first "puts". + fileevent $sock readable {} + } + } puts -nonewline $sock \ [string range $state(-query) $state(queryoffset) \ [expr {$state(queryoffset) + $state(-queryblocksize) - 1}]] @@ -963,6 +2421,19 @@ proc http::Write {token} { # Copy blocks from the query channel set outStr [read $state(-querychannel) $state(-queryblocksize)] + if {[eof $state(-querychannel)]} { + # This will be the last puts for the request-body. + if { (![catch {fileevent $sock readable} binding]) + && ($binding eq [list http::CheckEof $sock]) + } { + # Remove the "fileevent readable" binding of an idle + # persistent socket to http::CheckEof. We can no longer + # treat bytes received as junk. The server might still time + # out and half-close the socket if it has not yet received + # the first "puts". + fileevent $sock readable {} + } + } puts -nonewline $sock $outStr incr state(queryoffset) [string length $outStr] if {[eof $state(-querychannel)]} { @@ -976,10 +2447,14 @@ proc http::Write {token} { set state(posterror) $err set done 1 } + if {$done} { catch {flush $sock} fileevent $sock writable {} - fileevent $sock readable [list http::Event $sock $token] + Log ^C$tk end sending request - token $token + # End of writing (POST method). The request has been sent. + + DoneRequest $token } # Callback to the client after we've completely handled everything. @@ -988,11 +2463,14 @@ proc http::Write {token} { eval $state(-queryprogress) \ [list $token $state(querylength) $state(queryoffset)] } + return } # http::Event # -# Handle input on the socket +# Handle input on the socket. This command is the core of +# the coroutine commands ${token}EventCoroutine that are +# bound to "fileevent $sock readable" and process input. # # Arguments # sock The socket receiving input. @@ -1002,196 +2480,540 @@ proc http::Write {token} { # Read the socket and handle callbacks. proc http::Event {sock token} { + variable http + variable socketMapping + variable socketRdState + variable socketWrState + variable socketRdQueue + variable socketWrQueue + variable socketClosing + variable socketPlayCmd + variable $token upvar 0 $token state + set tk [namespace tail $token] + while 1 { + yield + ##Log Event call - token $token - if {![info exists state]} { - Log "Event $sock with invalid token '$token' - remote close?" - if {![eof $sock]} { - if {[set d [read $sock]] ne ""} { - Log "WARNING: additional data left on closed socket" + if {![info exists state]} { + Log "Event $sock with invalid token '$token' - remote close?" + if {![eof $sock]} { + if {[set d [read $sock]] ne ""} { + Log "WARNING: additional data left on closed socket\ + - token $token" + } } + Log ^X$tk end of response (token error) - token $token + CloseSocket $sock + return } - CloseSocket $sock - return - } - if {$state(state) eq "connecting"} { - if {[catch {gets $sock state(http)} n]} { - return [Finish $token $n] - } elseif {$n >= 0} { - set state(state) "header" - } - } elseif {$state(state) eq "header"} { - if {[catch {gets $sock line} n]} { - return [Finish $token $n] - } elseif {$n == 0} { - # We have now read all headers - # We ignore HTTP/1.1 100 Continue returns. RFC2616 sec 8.2.3 - if {$state(http) == "" || ([regexp {^\S+\s(\d+)} $state(http) {} x] && $x == 100)} { - set state(state) "connecting" - return + if {$state(state) eq "connecting"} { + ##Log - connecting - token $token + if { $state(reusing) + && $state(-pipeline) + && ($state(-timeout) > 0) + && (![info exists state(after)]) + } { + set state(after) [after $state(-timeout) \ + [list http::reset $token timeout]] } - set state(state) body + if {[catch {gets $sock state(http)} nsl]} { + Log "WARNING - if testing, pay special attention to this\ + case (GK) which is seldom executed - token $token" + if {[info exists state(reusing)] && $state(reusing)} { + # The socket was closed at the server end, and closed at + # this end by http::CheckEof. - # If doing a HEAD, then we won't get any body - if {$state(-validate)} { - Eof $token - return - } + if {[TestForReplay $token read $nsl c]} { + return + } - # For non-chunked transfer we may have no body - in this case we - # may get no further file event if the connection doesn't close - # and no more data is sent. We can tell and must finish up now - - # not later. - if { - !(([info exists state(connection)] - && ($state(connection) eq "close")) - || [info exists state(transfer)]) - && ($state(totalsize) == 0) + # else: + # This is NOT a persistent socket that has been closed since + # its last use. + # If any other requests are in flight or pipelined/queued, + # they will be discarded. + } else { + Log ^X$tk end of response (error) - token $token + Finish $token $nsl + return + } + } elseif {$nsl >= 0} { + ##Log - connecting 1 - token $token + set state(state) "header" + } elseif { [eof $sock] + && [info exists state(reusing)] + && $state(reusing) } { - Log "body size is 0 and no events likely - complete." - Eof $token - return - } + # The socket was closed at the server end, and we didn't notice. + # This is the first read - where the closure is usually first + # detected. - # We have to use binary translation to count bytes properly. - fconfigure $sock -translation binary + if {[TestForReplay $token read {} d]} { + return + } - if { - $state(-binary) || [IsBinaryContentType $state(type)] - } { - # Turn off conversions for non-text data - set state(binary) 1 + # else: + # This is NOT a persistent socket that has been closed since its + # last use. + # If any other requests are in flight or pipelined/queued, they + # will be discarded. + } else { + ##Log - connecting 2 - token $token + # nsl is -1 so either fblocked (OK) or (eof and not reusing). + # Continue. Any eof is processed at the end of this proc. } - if {[info exists state(-channel)]} { - if {$state(binary) || [llength [ContentEncoding $token]]} { - fconfigure $state(-channel) -translation binary + } elseif {$state(state) eq "header"} { + if {[catch {gets $sock line} nhl]} { + ##Log header failed - token $token + Log ^X$tk end of response (error) - token $token + Finish $token $nhl + return + } elseif {$nhl == 0} { + ##Log header done - token $token + Log ^E$tk end of response headers - token $token + # We have now read all headers + # We ignore HTTP/1.1 100 Continue returns. RFC2616 sec 8.2.3 + if { ($state(http) == "") + || ([regexp {^\S+\s(\d+)} $state(http) {} x] && $x == 100) + } { + set state(state) "connecting" + continue + # This was a "return" in the pre-coroutine code. } - if {![info exists state(-handler)]} { - # Initiate a sequence of background fcopies - fileevent $sock readable {} - CopyStart $sock $token - return + + if { ([info exists state(connection)]) + && ([info exists socketMapping($state(socketinfo))]) + && ($state(connection) eq "keep-alive") + && ($state(-keepalive)) + && (!$state(reusing)) + && ($state(-pipeline)) + } { + # Response headers received for first request on a + # persistent socket. Now ready for pipelined writes (if + # any). + # Previous value is $token. It cannot be "pending". + set socketWrState($state(socketinfo)) Wready + http::NextPipelinedWrite $token } - } - } elseif {$n > 0} { - # Process header lines - if {[regexp -nocase {^([^:]+):(.+)$} $line x key value]} { - switch -- [string tolower $key] { - content-type { - set state(type) [string trim [string tolower $value]] - # grab the optional charset information - if {[regexp -nocase \ - {charset\s*=\s*\"((?:[^""]|\\\")*)\"} \ - $state(type) -> cs]} { - set state(charset) [string map {{\"} \"} $cs] + + # Once a "close" has been signaled, the client MUST NOT send any + # more requests on that connection. + # + # If either the client or the server sends the "close" token in + # the Connection header, that request becomes the last one for + # the connection. + + if { ([info exists state(connection)]) + && ([info exists socketMapping($state(socketinfo))]) + && ($state(connection) eq "close") + && ($state(-keepalive)) + } { + # The server warns that it will close the socket after this + # response. + ##Log WARNING - socket will close after response for $token + # Prepare data for a call to ReplayIfClose. + if { ($socketRdQueue($state(socketinfo)) ne {}) + || ($socketWrQueue($state(socketinfo)) ne {}) + || ($socketWrState($state(socketinfo)) ni + [list Wready peNding $token]) + } { + set InFlightW $socketWrState($state(socketinfo)) + if {$InFlightW in [list Wready peNding $token]} { + set InFlightW Wready } else { - regexp -nocase {charset\s*=\s*(\S+?);?} \ - $state(type) -> state(charset) + set msg "token ${InFlightW} is InFlightW" + ##Log $msg - token $token } + + set socketPlayCmd($state(socketinfo)) \ + [list ReplayIfClose $InFlightW \ + $socketRdQueue($state(socketinfo)) \ + $socketWrQueue($state(socketinfo))] + + # - All tokens are preserved for re-use by ReplayCore. + # - Queues are preserved in case of Finish with error, + # but are not used for anything else because + # socketClosing(*) is set below. + # - Cancel the state(after) timeout events. + foreach tokenVal $socketRdQueue($state(socketinfo)) { + if {[info exists ${tokenVal}(after)]} { + after cancel [set ${tokenVal}(after)] + unset ${tokenVal}(after) + } + } + + } else { + set socketPlayCmd($state(socketinfo)) \ + {ReplayIfClose Wready {} {}} } - content-length { - set state(totalsize) [string trim $value] - } - content-encoding { - set state(coding) [string trim $value] + + # Do not allow further connections on this socket. + set socketClosing($state(socketinfo)) 1 + } + + set state(state) body + + # If doing a HEAD, then we won't get any body + if {$state(-validate)} { + Log ^F$tk end of response for HEAD request - token $token + set state(state) complete + Eot $token + return + } + + # - For non-chunked transfer we may have no body - in this case + # we may get no further file event if the connection doesn't + # close and no more data is sent. We can tell and must finish + # up now - not later - the alternative would be to wait until + # the server times out. + # - In this case, the server has NOT told the client it will + # close the connection, AND it has NOT indicated the resource + # length EITHER by setting the Content-Length (totalsize) OR + # by using chunked Transfer-Encoding. + # - Do not worry here about the case (Connection: close) because + # the server should close the connection. + # - IF (NOT Connection: close) AND (NOT chunked encoding) AND + # (totalsize == 0). + + if { (!( [info exists state(connection)] + && ($state(connection) eq "close") + ) + ) + && (![info exists state(transfer)]) + && ($state(totalsize) == 0) + } { + set msg {body size is 0 and no events likely - complete} + Log "$msg - token $token" + set msg {(length unknown, set to 0)} + Log ^F$tk end of response body {*}$msg - token $token + set state(state) complete + Eot $token + return + } + + # We have to use binary translation to count bytes properly. + lassign [fconfigure $sock -translation] trRead trWrite + fconfigure $sock -translation [list binary $trWrite] + + if { + $state(-binary) || [IsBinaryContentType $state(type)] + } { + # Turn off conversions for non-text data. + set state(binary) 1 + } + if {[info exists state(-channel)]} { + if {$state(binary) || [llength [ContentEncoding $token]]} { + fconfigure $state(-channel) -translation binary } - transfer-encoding { - set state(transfer) \ - [string trim [string tolower $value]] + if {![info exists state(-handler)]} { + # Initiate a sequence of background fcopies. + fileevent $sock readable {} + rename ${token}EventCoroutine {} + CopyStart $sock $token + return } - proxy-connection - - connection { - set state(connection) \ - [string trim [string tolower $value]] + } + } elseif {$nhl > 0} { + # Process header lines. + ##Log header - token $token - $line + if {[regexp -nocase {^([^:]+):(.+)$} $line x key value]} { + switch -- [string tolower $key] { + content-type { + set state(type) [string trim [string tolower $value]] + # Grab the optional charset information. + if {[regexp -nocase \ + {charset\s*=\s*\"((?:[^""]|\\\")*)\"} \ + $state(type) -> cs]} { + set state(charset) [string map {{\"} \"} $cs] + } else { + regexp -nocase {charset\s*=\s*(\S+?);?} \ + $state(type) -> state(charset) + } + } + content-length { + set state(totalsize) [string trim $value] + } + content-encoding { + set state(coding) [string trim $value] + } + transfer-encoding { + set state(transfer) \ + [string trim [string tolower $value]] + } + proxy-connection - + connection { + set state(connection) \ + [string trim [string tolower $value]] + } } + lappend state(meta) $key [string trim $value] } - lappend state(meta) $key [string trim $value] } - } - } else { - # Now reading body - if {[catch { - if {[info exists state(-handler)]} { - set n [eval $state(-handler) [list $sock $token]] - } elseif {[info exists state(transfer_final)]} { - set line [getTextLine $sock] - set n [string length $line] - if {$n > 0} { - Log "found $n bytes following final chunk" - append state(transfer_final) $line - } else { - Log "final chunk part" - Eof $token - } - } elseif { - [info exists state(transfer)] - && $state(transfer) eq "chunked" - } { - set size 0 - set chunk [getTextLine $sock] - set n [string length $chunk] - if {[string trim $chunk] ne ""} { - scan $chunk %x size - if {$size != 0} { - set bl [fconfigure $sock -blocking] - fconfigure $sock -blocking 1 - set chunk [read $sock $size] - fconfigure $sock -blocking $bl - set n [string length $chunk] - if {$n >= 0} { - append state(body) $chunk + } else { + # Now reading body + ##Log body - token $token + if {[catch { + if {[info exists state(-handler)]} { + set n [eval $state(-handler) [list $sock $token]] + ##Log handler $n - token $token + # N.B. the protocol has been set to 1.0 because the -handler + # logic is not expected to handle chunked encoding. + # FIXME Allow -handler with 1.1 on dechunked stacked chan. + if {$state(totalsize) == 0} { + # We know the transfer is complete only when the server + # closes the connection - i.e. eof is not an error. + set state(state) complete + } + if {![string is integer -strict $n]} { + if 1 { + # Do not tolerate bad -handler - fail with error + # status. + set msg {the -handler command for http::geturl must\ + return an integer (the number of bytes\ + read)} + Log ^X$tk end of response (handler error) -\ + token $token + Eot $token $msg + } else { + # Tolerate the bad -handler, and continue. The + # penalty: + # (a) Because the handler returns nonsense, we know + # the transfer is complete only when the server + # closes the connection - i.e. eof is not an + # error. + # (b) http::size will not be accurate. + # (c) The transaction is already downgraded to 1.0 + # to avoid chunked transfer encoding. It MUST + # also be forced to "Connection: close" or the + # HTTP/1.0 equivalent; or it MUST fail (as + # above) if the server sends + # "Connection: keep-alive" or the HTTP/1.0 + # equivalent. + set n 0 + set state(state) complete } - if {$size != [string length $chunk]} { - Log "WARNING: mis-sized chunk:\ - was [string length $chunk], should be $size" + } else { + } + } elseif {[info exists state(transfer_final)]} { + # This code forgives EOF in place of the final CRLF. + set line [getTextLine $sock] + set n [string length $line] + set state(state) complete + if {$n > 0} { + # - HTTP trailers (late response headers) are permitted + # by Chunked Transfer-Encoding, and can be safely + # ignored. + # - Do not count these bytes in the total received for + # the response body. + Log "trailer of $n bytes after final chunk -\ + token $token" + append state(transfer_final) $line + set n 0 + } else { + Log ^F$tk end of response body (chunked) - token $token + Log "final chunk part - token $token" + Eot $token + } + } elseif { [info exists state(transfer)] + && ($state(transfer) eq "chunked") + } { + ##Log chunked - token $token + set size 0 + set hexLenChunk [getTextLine $sock] + #set ntl [string length $hexLenChunk] + if {[string trim $hexLenChunk] ne ""} { + scan $hexLenChunk %x size + if {$size != 0} { + ##Log chunk-measure $size - token $token + set chunk [BlockingRead $sock $size] + set n [string length $chunk] + if {$n >= 0} { + append state(body) $chunk + incr state(log_size) [string length $chunk] + ##Log chunk $n cumul $state(log_size) -\ + token $token + } + if {$size != [string length $chunk]} { + Log "WARNING: mis-sized chunk:\ + was [string length $chunk], should be\ + $size - token $token" + set n 0 + set state(connection) close + Log ^X$tk end of response (chunk error) \ + - token $token + set msg {error in chunked encoding - fetch\ + terminated} + Eot $token $msg + } + # CRLF that follows chunk. + # If eof, this is handled at the end of this proc. + getTextLine $sock + } else { + set n 0 + set state(transfer_final) {} } - getTextLine $sock } else { - set state(transfer_final) {} + # Line expected to hold chunk length is empty, or eof. + ##Log bad-chunk-measure - token $token + set n 0 + set state(connection) close + Log ^X$tk end of response (chunk error) - token $token + Eot $token {error in chunked encoding -\ + fetch terminated} + } + } else { + ##Log unchunked - token $token + if {$state(totalsize) == 0} { + # We know the transfer is complete only when the server + # closes the connection. + set state(state) complete + set reqSize $state(-blocksize) + } else { + # Ask for the whole of the unserved response-body. + # This works around a problem with a tls::socket - for + # https in keep-alive mode, and a request for + # $state(-blocksize) bytes, the last part of the + # resource does not get read until the server times out. + set reqSize [expr { $state(totalsize) + - $state(currentsize)}] + + # The workaround fails if reqSize is + # capped at $state(-blocksize). + # set reqSize [expr {min($reqSize, $state(-blocksize))}] + } + set c $state(currentsize) + set t $state(totalsize) + ##Log non-chunk currentsize $c of totalsize $t -\ + token $token + set block [read $sock $reqSize] + set n [string length $block] + if {$n >= 0} { + append state(body) $block + ##Log non-chunk [string length $state(body)] -\ + token $token } } + # This calculation uses n from the -handler, chunked, or + # unchunked case as appropriate. + if {[info exists state]} { + if {$n >= 0} { + incr state(currentsize) $n + set c $state(currentsize) + set t $state(totalsize) + ##Log another $n currentsize $c totalsize $t -\ + token $token + } + # If Content-Length - check for end of data. + if { + ($state(totalsize) > 0) + && ($state(currentsize) >= $state(totalsize)) + } { + Log ^F$tk end of response body (unchunked) -\ + token $token + set state(state) complete + Eot $token + } + } + } err]} { + Log ^X$tk end of response (error ${err}) - token $token + Finish $token $err + return } else { - #Log "read non-chunk $state(currentsize) of $state(totalsize)" - set block [read $sock $state(-blocksize)] - set n [string length $block] - if {$n >= 0} { - append state(body) $block + if {[info exists state(-progress)]} { + eval $state(-progress) \ + [list $token $state(totalsize) $state(currentsize)] } } - if {[info exists state]} { - if {$n >= 0} { - incr state(currentsize) $n - } - # If Content-Length - check for end of data. - if { - ($state(totalsize) > 0) - && ($state(currentsize) >= $state(totalsize)) - } { - Eof $token + } + + # catch as an Eot above may have closed the socket already + # $state(state) may be connecting, header, body, or complete + if {![set cc [catch {eof $sock} eof]] && $eof} { + ##Log eof - token $token + if {[info exists $token]} { + set state(connection) close + if {$state(state) eq "complete"} { + # This includes all cases in which the transaction + # can be completed by eof. + # The value "complete" is set only in http::Event, and it is + # used only in the test above. + Log ^F$tk end of response body (unchunked, eof) -\ + token $token + Eot $token + } else { + # Premature eof. + Log ^X$tk end of response (unexpected eof) - token $token + Eot $token eof } + } else { + # open connection closed on a token that has been cleaned up. + Log ^X$tk end of response (token error) - token $token + CloseSocket $sock } - } err]} { - return [Finish $token $err] + } elseif {$cc} { + return } else { - if {[info exists state(-progress)]} { - eval $state(-progress) \ - [list $token $state(totalsize) $state(currentsize)] - } + # Not eof, continue and yield. } } + return +} - # catch as an Eof above may have closed the socket already - if {![catch {eof $sock} eof] && $eof} { - if {[info exists $token]} { - set state(connection) close - Eof $token - } else { - # open connection closed on a token that has been cleaned up. - CloseSocket $sock - } - return +# http::TestForReplay +# +# Command called if eof is discovered when a socket is first used for a +# new transaction. Typically this occurs if a persistent socket is used +# after a period of idleness and the server has half-closed the socket. +# +# token - the connection token returned by http::geturl +# doing - "read" or "write" +# err - error message, if any +# caller - code to identify the caller - used only in logging +# +# Return Value: boolean, true iff the command calls http::ReplayIfDead. + +proc http::TestForReplay {token doing err caller} { + variable http + variable $token + upvar 0 $token state + set tk [namespace tail $token] + if {$doing eq "read"} { + set code Q + set action response + set ing reading + } else { + set code P + set action request + set ing writing + } + + if {$err eq {}} { + set err "detect eof when $ing (server timed out?)" + } + + if {$state(method) eq "POST" && !$http(-repost)} { + # No Replay. + # The present transaction will end when Finish is called. + # That call to Finish will abort any other transactions + # currently in the write queue. + # For calls from http::Event this occurs when execution + # reaches the code block at the end of that proc. + set msg {no retry for POST with http::config -repost 0} + Log reusing socket failed "($caller)" - $msg - token $token + Log error - $err - token $token + Log ^X$tk end of $action (error) - token $token + return 0 + } else { + # Replay. + set msg {try a new socket} + Log reusing socket failed "($caller)" - $msg - token $token + Log error - $err - token $token + Log ^$code$tk Any unfinished (incl this one) failed - token $token + ReplayIfDead $token $doing + return 1 } } @@ -1229,7 +3051,10 @@ proc http::IsBinaryContentType {type} { # http::getTextLine -- # -# Get one line with the stream in blocking crlf mode +# Get one line with the stream in crlf mode. +# Used if Transfer-Encoding is chunked. +# Empty line is not distinguished from eof. The caller must +# be able to handle this. # # Arguments # sock The socket receiving input. @@ -1239,13 +3064,55 @@ proc http::IsBinaryContentType {type} { proc http::getTextLine {sock} { set tr [fconfigure $sock -translation] - set bl [fconfigure $sock -blocking] - fconfigure $sock -translation crlf -blocking 1 - set r [gets $sock] - fconfigure $sock -translation $tr -blocking $bl + lassign $tr trRead trWrite + fconfigure $sock -translation [list crlf $trWrite] + set r [BlockingGets $sock] + fconfigure $sock -translation $tr return $r } +# http::BlockingRead +# +# Replacement for a blocking read. +# The caller must be a coroutine. + +proc http::BlockingRead {sock size} { + if {$size < 1} { + return + } + set result {} + while 1 { + set need [expr {$size - [string length $result]}] + set block [read $sock $need] + set eof [eof $sock] + append result $block + if {[string length $result] >= $size || $eof} { + return $result + } else { + yield + } + } +} + +# http::BlockingGets +# +# Replacement for a blocking gets. +# The caller must be a coroutine. +# Empty line is not distinguished from eof. The caller must +# be able to handle this. + +proc http::BlockingGets {sock} { + while 1 { + set count [gets $sock line] + set eof [eof $sock] + if {$count > -1 || $eof} { + return $line + } else { + yield + } + } +} + # http::CopyStart # # Error handling wrapper around fcopy @@ -1271,12 +3138,17 @@ proc http::CopyStart {sock token {initial 1}} { } } if {[catch { + # FIXME Keep-Alive on https tls::socket with unchunked transfer + # hangs until the server times out. A workaround is possible, as for + # the case without -channel, but it does not use the neat "fcopy" + # solution. fcopy $sock $state(-channel) -size $state(-blocksize) -command \ [list http::CopyDone $token] } err]} { Finish $token $err } } + return } proc http::CopyChunk {token chunk} { @@ -1294,7 +3166,7 @@ proc http::CopyChunk {token chunk} { $token $state(totalsize) $state(currentsize)] } } else { - Log "CopyChunk Finish $token" + Log "CopyChunk Finish - token $token" if {[info exists state(zlib)]} { set excess "" foreach stream $state(zlib) { @@ -1304,8 +3176,9 @@ proc http::CopyChunk {token chunk} { foreach stream $state(zlib) { $stream close } unset state(zlib) } - Eof $token ;# FIX ME: pipelining. + Eot $token ;# FIX ME: pipelining. } + return } # http::CopyDone @@ -1328,33 +3201,54 @@ proc http::CopyDone {token count {error {}}} { eval $state(-progress) \ [list $token $state(totalsize) $state(currentsize)] } - # At this point the token may have been reset + # At this point the token may have been reset. if {[string length $error]} { Finish $token $error } elseif {[catch {eof $sock} iseof] || $iseof} { - Eof $token + Eot $token } else { CopyStart $sock $token 0 } + return } -# http::Eof +# http::Eot # -# Handle eof on the socket +# Called when either: +# a. An eof condition is detected on the socket. +# b. The client decides that the response is complete. +# c. The client detects an inconsistency and aborts the transaction. +# +# Does: +# 1. Set state(status) +# 2. Reverse any Content-Encoding +# 3. Convert charset encoding and line ends if necessary +# 4. Call http::Finish # # Arguments # token The token returned from http::geturl +# force (previously) optional, has no effect +# reason - "eof" means premature EOF (not EOF as the natural end of +# the response) +# - "" means completion of response, with or without EOF +# - anything else describes an error confition other than +# premature EOF. # # Side Effects # Clean up the socket -proc http::Eof {token {force 0}} { +proc http::Eot {token {reason {}}} { variable $token upvar 0 $token state - if {$state(state) eq "header"} { - # Premature eof + if {$reason eq "eof"} { + # Premature eof. set state(status) eof + set reason {} + } elseif {$reason ne ""} { + # Abort the transaction. + set state(status) $reason } else { + # The response is complete. set state(status) ok } @@ -1364,14 +3258,15 @@ proc http::Eof {token {force 0}} { set state(body) [zlib $coding $state(body)] } } err]} { - Log "error doing decompression: $err" - return [Finish $token $err] + Log "error doing decompression for token $token: $err" + Finish $token $err + return } if {!$state(binary)} { # If we are getting text, set the incoming channel's encoding - # correctly. iso8859-1 is the RFC default, but this could be any IANA - # charset. However, we only know how to convert what we have + # correctly. iso8859-1 is the RFC default, but this could be any + # IANA charset. However, we only know how to convert what we have # encodings for. set enc [CharsetToEncoding $state(charset)] @@ -1383,7 +3278,8 @@ proc http::Eof {token {force 0}} { set state(body) [string map {\r\n \n \r \n} $state(body)] } } - Finish $token + Finish $token $reason + return } # http::wait -- @@ -1394,7 +3290,7 @@ proc http::Eof {token {force 0}} { # token Connection token. # # Results: -# The status after the wait. +# The status after the wait. proc http::wait {token} { variable $token @@ -1486,6 +3382,7 @@ proc http::ProxyRequired {host} { } return [list $http(-proxyhost) $http(-proxyport)] } + return } # http::CharsetToEncoding -- @@ -1539,7 +3436,8 @@ proc http::ContentEncoding {token} { compress - x-compress { lappend r decompress } identity {} default { - return -code error "unsupported content-encoding \"$coding\"" + set msg "unsupported content-encoding \"$coding\"" + return -code error $msg } } } @@ -1549,31 +3447,33 @@ proc http::ContentEncoding {token} { proc http::make-transformation-chunked {chan command} { set lambda {{chan command} { - set data "" - set size -1 - yield - while {1} { - chan configure $chan -translation {crlf binary} - while {[gets $chan line] < 1} { yield } - chan configure $chan -translation {binary binary} - if {[scan $line %x size] != 1} { return -code error "invalid size: \"$line\"" } - set chunk "" - while {$size && ![chan eof $chan]} { - set part [chan read $chan $size] - incr size -[string length $part] - append chunk $part - } - if {[catch { + set data "" + set size -1 + yield + while {1} { + chan configure $chan -translation {crlf binary} + while {[gets $chan line] < 1} { yield } + chan configure $chan -translation {binary binary} + if {[scan $line %x size] != 1} { + return -code error "invalid size: \"$line\"" + } + set chunk "" + while {$size && ![chan eof $chan]} { + set part [chan read $chan $size] + incr size -[string length $part] + append chunk $part + } + if {[catch { uplevel #0 [linsert $command end $chunk] }]} { http::Log "Error in callback: $::errorInfo" } - if {[string length $chunk] == 0} { + if {[string length $chunk] == 0} { # channel might have been closed in the callback - catch {chan event $chan readable {}} - return - } - } + catch {chan event $chan readable {}} + return + } + } }} coroutine dechunk$chan ::apply $lambda $chan $command chan event $chan readable [namespace origin dechunk$chan] diff --git a/tests/http11.test b/tests/http11.test index 2e50837..1e30802 100644 --- a/tests/http11.test +++ b/tests/http11.test @@ -515,10 +515,7 @@ proc handler {var sock token} { set chunk [read $sock] append data $chunk #::http::Log "handler read [string length $chunk] ([chan configure $sock -buffersize])" - if {[eof $sock]} { - #::http::Log "handler eof $sock" - chan event $sock readable {} - } + return [string length $chunk] } test http11-3.0 "-handler,close,identity" -setup { diff --git a/tests/httpPipeline.test b/tests/httpPipeline.test new file mode 100644 index 0000000..5eb02d3 --- /dev/null +++ b/tests/httpPipeline.test @@ -0,0 +1,866 @@ +# httpPipeline.test +# +# Test HTTP/1.1 concurrent requests including +# queueing, pipelining and retries. +# +# Copyright (C) 2018 Keith Nash <kjnash@users.sourceforge.net> +# +# See the file "license.terms" for information on usage and redistribution +# of this file, and for a DISCLAIMER OF ALL WARRANTIES. + +package require tcltest 2 +namespace import -force ::tcltest::* + +package require http 2.8 + +set sourcedir [file normalize [file dirname [info script]]] +source [file join $sourcedir httpTest.tcl] +source [file join $sourcedir httpTestScript.tcl] + +# ------------------------------------------------------------------------------ +# (1) Define the test scripts that will be used to generate logs for analysis - +# and also define the "correct" results. +# ------------------------------------------------------------------------------ + +proc ReturnTestScriptAndResult {ca cb delay te} { + + switch -- $ca { + 1 {set start { + START + KEEPALIVE 0 + PIPELINE 0 + }} + + 2 {set start { + START + KEEPALIVE 0 + PIPELINE 1 + }} + + 3 {set start { + START + KEEPALIVE 1 + PIPELINE 0 + }} + + 4 {set start { + START + KEEPALIVE 1 + PIPELINE 1 + }} + + default { + return -code error {no matching script} + } + } + + set middle " + [list DELAY $delay] + " + + switch -- $cb { + 1 {set end { + GET a + GET b + GET c + GET a + STOP + } + set resShort {1 ? ? ?} + set resLong {1 2 3 4} + } + + 2 {set end { + GET a + HEAD b + GET c + HEAD a + HEAD c + STOP + } + set resShort {1 ? ? ? ?} + set resLong {1 2 3 4 5} + } + + 3 {set end { + HEAD a + GET b + HEAD c + HEAD b + GET a + GET b + STOP + } + set resShort {1 ? ? ? ? ?} + set resLong {1 2 3 4 5 6} + } + + 4 {set end { + GET a + GET b + GET c + GET a + POST b address=home code=brief paid=yes + GET c + GET a + GET b + GET c + STOP + } + set resShort {1 ? ? ? 5 ? ? ? ?} + set resLong {1 2 3 4 5 6 7 8 9} + } + + 5 {set end { + POST a address=home code=brief paid=yes + POST b address=home code=brief paid=yes + POST c address=home code=brief paid=yes + POST a address=home code=brief paid=yes + POST b address=home code=brief paid=yes + POST c address=home code=brief paid=yes + POST a address=home code=brief paid=yes + POST b address=home code=brief paid=yes + POST c address=home code=brief paid=yes + STOP + } + set resShort {1 2 3 4 5 6 7 8 9} + set resLong {1 2 3 4 5 6 7 8 9} + } + + 6 {set end { + POST a address=home code=brief paid=yes + GET b address=home code=brief paid=yes + POST c address=home code=brief paid=yes + GET a address=home code=brief paid=yes + GET b address=home code=brief paid=yes + POST c address=home code=brief paid=yes + POST a address=home code=brief paid=yes + HEAD b address=home code=brief paid=yes + GET c address=home code=brief paid=yes + STOP + } + set resShort {1 ? 3 ? ? 6 7 ? ?} + set resLong {1 2 3 4 5 6 7 8 9} + } + + 7 {set end { + GET b address=home code=brief paid=yes + POST a address=home code=brief paid=yes + GET a address=home code=brief paid=yes + POST c address=home code=brief paid=yes + GET b address=home code=brief paid=yes + HEAD b address=home code=brief paid=yes + POST c address=home code=brief paid=yes + POST a address=home code=brief paid=yes + GET c address=home code=brief paid=yes + STOP + } + set resShort {1 2 ? 4 ? ? 7 8 ?} + set resLong {1 2 3 4 5 6 7 8 9} + } + + 8 {set end { + # Telling the server to close the connection. + GET a + GET b close=y + GET c + GET a + GET b + GET c + GET a + GET b + GET c + STOP + } + set resShort {1 ? 3 ? ? ? ? ? ?} + set resLong {1 2 3 4 5 6 7 8 9} + } + + 9 {set end { + # Telling the server to close the connection. + GET a + POST b close=y address=home code=brief paid=yes + GET c + GET a + GET b + GET c + GET a + GET b + GET c + STOP + } + set resShort {1 2 3 ? ? ? ? ? ?} + set resLong {1 2 3 4 5 6 7 8 9} + } + + 10 {set end { + # Telling the server to close the connection. + GET a + GET b close=y + POST c address=home code=brief paid=yes + GET a + GET b + GET c + GET a + GET b + GET c + STOP + } + set resShort {1 ? 3 ? ? ? ? ? ?} + set resLong {1 2 3 4 5 6 7 8 9} + } + + 11 {set end { + # Telling the server to close the connection twice. + GET a + GET b close=y + GET c + GET a + GET b close=y + GET c + GET a + GET b + GET c + STOP + } + set resShort {1 ? 3 ? ? 6 ? ? ?} + set resLong {1 2 3 4 5 6 7 8 9} + } + + 12 {set end { + # Telling the server to delay before sending the response. + GET a + GET b delay=1 + GET c + GET a + GET b + STOP + } + set resShort {1 ? ? ? ?} + set resLong {1 2 3 4 5} + } + + 13 {set end { + # Making the server close the connection (time out). + GET a + WAIT 2000 + GET b + GET c + GET a + GET b + STOP + } + set resShort {1 2 ? ? ?} + set resLong {1 2 3 4 5} + } + + 14 {set end { + # Making the server close the connection (time out) twice. + GET a + WAIT 2000 + GET b + GET c + GET a + WAIT 2000 + GET b + GET c + GET a + GET b + GET c + STOP + } + set resShort {1 2 ? ? 5 ? ? ? ?} + set resLong {1 2 3 4 5 6 7 8 9} + } + + 15 {set end { + POST a address=home code=brief paid=yes + POST b address=home code=brief paid=yes close=y delay=1 + POST c address=home code=brief paid=yes delay=1 + POST a address=home code=brief paid=yes close=y + WAIT 2000 + POST b address=home code=brief paid=yes delay=1 + POST c address=home code=brief paid=yes close=y + POST a address=home code=brief paid=yes + POST b address=home code=brief paid=yes close=y + POST c address=home code=brief paid=yes + STOP + } + set resShort {1 2 3 4 5 6 7 8 9} + set resLong {1 2 3 4 5 6 7 8 9} + } + + 16 {set end { + POST a address=home code=brief paid=yes + GET b address=home code=brief paid=yes + POST c address=home code=brief paid=yes close=y + GET a address=home code=brief paid=yes + GET b address=home code=brief paid=yes close=y + POST c address=home code=brief paid=yes + WAIT 2000 + POST a address=home code=brief paid=yes + HEAD b address=home code=brief paid=yes close=y + GET c address=home code=brief paid=yes + STOP + } + set resShort {1 ? 3 4 ? 6 7 ? 9} + set resLong {1 2 3 4 5 6 7 8 9} + } + + 17 {set end { + GET b address=home code=brief paid=yes + POST a address=home code=brief paid=yes + GET a address=home code=brief paid=yes + POST c address=home code=brief paid=yes close=y + GET b address=home code=brief paid=yes + HEAD b address=home code=brief paid=yes close=y + POST c address=home code=brief paid=yes + WAIT 2000 + POST a address=home code=brief paid=yes + WAIT 2000 + GET c address=home code=brief paid=yes + STOP + } + set resShort {1 2 3 4 5 ? 7 8 9} + set resLong {1 2 3 4 5 6 7 8 9} + } + + + 18 {set end { + REPOST 0 + GET a + WAIT 2000 + POST b address=home code=brief paid=yes + GET c + GET a + STOP + } + set resShort {1 2 ? ?} + set resLong {1 2 3 4} + # resShort is overwritten below for the case ($te == 1). + } + + + 19 {set end { + REPOST 0 + GET a + WAIT 2000 + GET b address=home code=brief paid=yes + GET c + GET a + STOP + } + set resShort {1 2 ? ?} + set resLong {1 2 3 4} + } + + + 20 {set end { + POSTFRESH 1 + GET a + WAIT 2000 + POST b address=home code=brief paid=yes + GET c + GET a + STOP + } + set resShort {1 3 ?} + set resLong {1 3 4} + } + + + 21 {set end { + POSTFRESH 1 + GET a + WAIT 2000 + GET b address=home code=brief paid=yes + GET c + GET a + STOP + } + set resShort {1 2 ? ?} + set resLong {1 2 3 4} + } + + 22 {set end { + GET a + WAIT 2000 + KEEPALIVE 0 + POST b address=home code=brief paid=yes + KEEPALIVE 1 + GET c + GET a + STOP + } + set resShort {1 3 ?} + set resLong {1 3 4} + } + + + 23 {set end { + GET a + WAIT 2000 + KEEPALIVE 0 + GET b address=home code=brief paid=yes + KEEPALIVE 1 + GET c + GET a + STOP + } + set resShort {1 3 ?} + set resLong {1 3 4} + } + + 24 {set end { + GET a + KEEPALIVE 0 + POST b address=home code=brief paid=yes + KEEPALIVE 1 + GET c + GET a + STOP + } + set resShort {1 ? ?} + set resLong {1 3 4} + } + + + 25 {set end { + GET a + KEEPALIVE 0 + GET b address=home code=brief paid=yes + KEEPALIVE 1 + GET c + GET a + STOP + } + set resShort {1 ? ?} + set resLong {1 3 4} + } + + default { + return -code error {no matching script} + } + } + + + if {$ca < 3} { + # Not Keep-Alive. + set result "Passed all sanity checks." + + } elseif {$ca == 3} { + # Keep-Alive, not pipelined. + set result {} + append result "Passed all sanity checks.\n" + append result "Have overlaps including response body:\n" + + } else { + # Keep-Alive, pipelined: ($ca == 4) + set result {} + append result "Passed all sanity checks.\n" + append result "Overlap-free without response body:\n" + append result "$resShort" + } + + # - The special case of test *.18*-testEof needs test results to be + # individually written. + # - These test -repost 0 when there is a POST to apply it to, and the server + # timeout has not been detected. + if {($cb == 18) && ($te == 1)} { + if {$ca < 3} { + # Not Keep-Alive. + set result "Passed all sanity checks." + + } elseif {$ca == 3 && $delay == 0} { + # Keep-Alive, not pipelined. + set result [MakeMessage { + |Problems with sanity checks: + |Wrong sequence for token ::http::2 - {A B C D X X X} + |- and error(s) X + |Wrong sequence for token ::http::3 - {A X X} + |- and error(s) X + |Wrong sequence for token ::http::4 - {A X X X} + |- and error(s) X + | + |Have overlaps including response body: + | + }] + + } elseif {$ca == 3} { + # Keep-Alive, not pipelined. + set result [MakeMessage { + |Problems with sanity checks: + |Wrong sequence for token ::http::2 - {A B C D X X X} + |- and error(s) X + | + |Have overlaps including response body: + | + }] + + } elseif {$delay == 0} { + # Keep-Alive, pipelined: ($ca == 4) + set result [MakeMessage { + |Problems with sanity checks: + |Wrong sequence for token ::http::2 - {A B C D X X X} + |- and error(s) X + |Wrong sequence for token ::http::3 - {A X X} + |- and error(s) X + |Wrong sequence for token ::http::4 - {A X X X} + |- and error(s) X + | + |Overlap-free without response body: + | + }] + + } else { + set result [MakeMessage { + |Problems with sanity checks: + |Wrong sequence for token ::http::2 - {A B C D X X X} + |- and error(s) X + | + |Overlap-free without response body: + | + }] + + } + } + + return [list "$start$middle$end" $result] +} + +# ------------------------------------------------------------------------------ +# Proc MakeMessage +# ------------------------------------------------------------------------------ +# WHD's one-line command to generate multi-line strings from readable code. +# +# Example: +# set blurb [MakeMessage { +# |This command allows multi-line strings to be created with readable +# |code, and without breaking the rules for indentation. +# | +# |The command shifts the entire block of text to the left, omitting +# |the pipe character and the spaces to its left. +# }] +# ------------------------------------------------------------------------------ + +proc MakeMessage {in} { + regsub -all -line {^\s*\|} [string trim $in] {} + # N.B. Implicit Return. +} + + +proc ReturnTestScript {ca cb delay te} { + lassign [ReturnTestScriptAndResult $ca $cb $delay $te] script result + return $script +} + +proc ReturnTestResult {ca cb delay te} { + lassign [ReturnTestScriptAndResult $ca $cb $delay $te] script result + return $result +} + + +# ------------------------------------------------------------------------------ +# (2) Command to run a test script and use httpTest to analyse the logs. +# ------------------------------------------------------------------------------ + +namespace import httpTestScript::runHttpTestScript +namespace import httpTestScript::cleanupHttpTestScript +namespace import httpTest::cleanupHttpTest +namespace import httpTest::logAnalyse +namespace import httpTest::setHttpTestOptions + +proc RunTest {header footer delay te} { + set num [runHttpTestScript [ReturnTestScript $header $footer $delay $te]] + set skipOverlaps 0 + set notPiped {} + set notIncluded {} + + # -------------------------------------------------------------------------- + # Custom code for specific tests + # -------------------------------------------------------------------------- + if {$header < 3} { + set skipOverlaps 1 + for {set i 1} {$i <= $num} {incr i} { + lappend notPiped $i + } + } elseif {$header > 2 && $footer == 18 && $te == 1} { + set skipOverlaps 1 + if {$delay == 0} { + # Transaction 1 is conventional. + # Check that transactions 2,3,4 are cancelled. + set notPiped {1} + set notIncluded $notPiped + } else { + # Transaction 1 is conventional. + # Check that transaction 2 is cancelled. + # The timing of transactions 3 and 4 is uncertain. + set notPiped {1 3 4} + set notIncluded $notPiped + } + } elseif {$footer in {20 22 23 24 25}} { + # Transaction 2 uses its own socket. + set notPiped 2 + set notIncluded $notPiped + } else { + } + # -------------------------------------------------------------------------- + # End of custom code for specific tests + # -------------------------------------------------------------------------- + + + set Results [logAnalyse $num $skipOverlaps $notIncluded $notPiped] + lassign $Results msg cleanE cleanF dirtyE dirtyF + if {$msg eq {}} { + set msg "Passed all sanity checks." + } else { + set msg "Problems with sanity checks:\n$msg" + } + + if 0 { + puts $msg + puts "Overlap-free including response body:\n$cleanF" + puts "Have overlaps including response body:\n$dirtyF" + puts "Overlap-free without response body:\n$cleanE" + puts "Have overlaps without response body:\n$dirtyE" + } + + if {$header < 3} { + # No ordering, just check that transactions all finish + set result $msg + } elseif {$header == 3} { + # Not pipelined - check overlaps with response body. + set result "$msg\nHave overlaps including response body:\n$dirtyF" + } else { + # Pipelined - check overlaps without response body. Check that the + # first request, the first requests after replay, and POSTs are clean. + set result "$msg\nOverlap-free without response body:\n$cleanE" + } + set ::nTokens $num + return $result +} + + +# ------------------------------------------------------------------------------ +# (3) VERBOSITY CONTROL +# ------------------------------------------------------------------------------ +# If tests fail, run an individual test with -verbose 1 or 2 for diagnosis. +# If still obscure, uncomment #Log and ##Log lines in the http package. +# ------------------------------------------------------------------------------ + +setHttpTestOptions -verbose 0 + +# ------------------------------------------------------------------------------ +# (4) Define the base URLs used for testing. Each must have a query string. +# ------------------------------------------------------------------------------ +# - A HTTP/1.1 server is required. It should be configured to provide +# persistent connections when requested to do so, and to close these +# connections if they are idle for one second. +# - The resource must be served with status 200 in response to a valid GET or +# POST. +# - The value of "page" is always specified in the query-string. Different +# resources for the three values of "page" allow testing of both chunked and +# unchunked transfer encoding. +# - The variables "close" and "delay" may be specified in the query-string (for +# a GET) or the request body (for a POST). +# - "delay" is a numerical value in seconds, and causes the server to delay +# the response, including headers. +# - "close", if it has the value "y", instructs the server to close the +# connection ater the current request. +# - Any other variables should be ignored. +# ------------------------------------------------------------------------------ + +namespace eval ::httpTestScript { + variable URL + array set URL { + a http://test-tcl-http.kerlin.org/index.html?page=privacy + b http://test-tcl-http.kerlin.org/index.html?page=conditions + c http://test-tcl-http.kerlin.org/index.html?page=welcome + } +} + + +# ------------------------------------------------------------------------------ +# (5) Define the tests +# ------------------------------------------------------------------------------ +# Constraints: +# - serverNeeded - the URLs defined at (4) must be available, and must have the +# properties specified there. +# - duplicate - the value of -pipeline does not matter if -keepalive 0 +# - timeout1s - tests that work correctly only if the server closes +# persistent connections after one second. +# +# Server timeout of persistent connections should be 1s. Delays of 2s are +# intended to cause timeout. +# Servers are usually configured to use a longer timeout: this will cause the +# tests to fail. The "2000" could be replaced with a larger number, but the +# tests will then be inconveniently slow. +# ------------------------------------------------------------------------------ + +#testConstraint serverNeeded 1 +#testConstraint timeout1s 1 +#testConstraint duplicate 1 + +# ------------------------------------------------------------------------------ +# Proc SetTestEof - to edit the command ::http::KeepSocket +# ------------------------------------------------------------------------------ +# The usual line in command ::http::KeepSocket is " set TEST_EOF 0". +# Whether the value set in the file is 0 or 1, change it here to the value +# specified by the argument. +# +# It is worth doing all tests for both values of the argument. +# +# test 0 - ::http::KeepSocket is unchanged, detects server eof where possible +# and closes the connection. +# test 1 - ::http::KeepSocket is edited, does not detect server eof, so the +# reaction to finding server eof can be tested without the difficulty +# of testing in the few milliseconds of an asynchronous close event. +# ------------------------------------------------------------------------------ + +proc SetTestEof {test} { + set body [info body ::http::KeepSocket] + set subs " set TEST_EOF $test" + set count [regsub -line -all -- {^\s*set TEST_EOF .*$} $body $subs newBody] + if {$count != 1} { + return -code error {proc ::http::KeepSocket has unexpected form} + } + proc ::http::KeepSocket {token} $newBody + return +} + +for {set header 1} {$header <= 4} {incr header} { + if {$header == 4} { + setHttpTestOptions -dotted 1 + set match glob + } else { + setHttpTestOptions -dotted 0 + set match exact + } + + if {$header == 2} { + set cons0 {serverNeeded duplicate} + } else { + set cons0 serverNeeded + } + + for {set footer 1} {$footer <= 25} {incr footer} { + foreach {delay label} { + 0 a + 1 b + 2 c + 3 d + 5 e + 8 f + 12 g + 100 h + 500 i + 2000 j + } { + foreach te {0 1} { + if {$te} { + set tag testEof + } else { + set tag normal + } + set suffix {} + set cons $cons0 + + # ------------------------------------------------------------------ + # Custom code for individual tests + # ------------------------------------------------------------------ + if {$footer in {18}} { + # Custom code: + if {($label eq "j") && ($te == 1)} { + continue + } + if {$te == 1} { + # The test (of REPOST 0) is useful if tag is "testEof" + # (server timeout without client reaction). The same test + # has a different result if tag is "normal". + + set suffix " - extra test for -repost 0 - ::http::2 must be" + append suffix " cancelled" + if {($delay == 0)} { + append suffix ", along with ::http::3 ::http::4 if" + append suffix " the test creates these before ::http::2" + append suffix " is cancelled" + } + } else { + } + } elseif {$footer in {19}} { + set suffix " - extra test for -repost 0" + } elseif {$footer in {20 21}} { + set suffix " - extra test for -postfresh 1" + if {($footer == 20)} { + append suffix " - ::http::2 uses a separate socket" + append suffix ", other requests use a persistent connection" + } + } elseif {$footer in {22 23 24 25}} { + append suffix " - ::http::2 uses a separate socket" + append suffix ", other requests use a persistent connection" + } else { + } + + if {($footer >= 13 && $footer <= 23)} { + # Test use WAIT and depend on server timeout before this time. + lappend cons timeout1s + } + # ------------------------------------------------------------------ + # End of custom code. + # ------------------------------------------------------------------ + + set name "pipeline test header $header footer $footer delay $delay $tag$suffix" + + + # Here's the test: + test httpPipeline-${header}.${footer}${label}-${tag} $name \ + -constraints $cons \ + -setup [string map [list TE $te] { + # Restore default values for tests: + http::config -pipeline 1 -postfresh 0 -repost 1 + http::init + set http::http(uid) 0 + SetTestEof {TE} + }] -body [list RunTest $header $footer $delay $te] -cleanup { + # Restore default values for tests: + http::config -pipeline 1 -postfresh 0 -repost 1 + cleanupHttpTestScript + SetTestEof 0 + cleanupHttpTest + after 2000 + # Wait for persistent sockets on the server to time out. + } -result [ReturnTestResult $header $footer $delay $te] -match $match + + + } + + } + } +} + +# ------------------------------------------------------------------------------ +# (*) Notes on tests *.18*-testEof, *.19*-testEof - these test -repost 0 +# ------------------------------------------------------------------------------ +# These tests are a bit awkward because the main test kit analyses whether all +# requests are satisfied, with retries if necessary, and it has result analysis +# for processing retry logs. +# - *.18*-testEof tests that certain requests are NOT satisfied, so the analysis +# is a one-off. +# - Tests *.18a-testEof depend on client/server timing - the test needs to call +# http::geturl for all requests before the POST (request 2) is cancelled. +# We test that requests 2, 3, 4 are all cancelled. +# - Other tests *.18*-testEof may not request 3 and 4 in time for the to be +# added to the write queue before request 2 is completed. We simply check that +# request 2 is cancelled. +# - The behaviour is different if all connections are allowed to time out +# (label "j"). This case is not needed to test -repost 0, and is omitted. +# - Tests *.18*-normal and *.19* are conventional (-repost 0 should have no +# effect). +# ------------------------------------------------------------------------------ + + +unset header footer delay label suffix match cons name te +namespace delete ::httpTest +namespace delete ::httpTestScript + +::tcltest::cleanupTests diff --git a/tests/httpTest.tcl b/tests/httpTest.tcl new file mode 100644 index 0000000..9cd7a5d --- /dev/null +++ b/tests/httpTest.tcl @@ -0,0 +1,505 @@ +# httpTest.tcl +# +# Test HTTP/1.1 concurrent requests including +# queueing, pipelining and retries. +# +# Copyright (C) 2018 Keith Nash <kjnash@users.sourceforge.net> +# +# See the file "license.terms" for information on usage and redistribution +# of this file, and for a DISCLAIMER OF ALL WARRANTIES. + +# ------------------------------------------------------------------------------ +# "Package" httpTest for analysis of Log output of http requests. +# ------------------------------------------------------------------------------ +# This is a specialised test kit for examining the presence, ordering, and +# overlap of multiple HTTP transactions over a persistent ("Keep-Alive") +# connection; and also for testing reconnection in accordance with RFC 7230 when +# the connection is lost. +# +# This kit is probably not useful for other purposes. It depends on the +# presence of specific Log commands in the http library, and it interprets the +# logs that these commands create. +# ------------------------------------------------------------------------------ + +package require http + +namespace eval ::http { + variable TestStartTimeInMs [clock milliseconds] + catch {puts stdout "Start time (zero ms) is $TestStartTimeInMs"} +} + +namespace eval ::httpTest { + variable testResults {} + variable testOptions + array set testOptions { + -verbose 0 + -dotted 1 + } + # -verbose - 0 quiet 1 write to stdout 2 write more + # -dotted - (boolean) use dots for absences in lists of transactions +} + +proc httpTest::Puts {txt} { + variable testOptions + if {$testOptions(-verbose) > 0} { + puts stdout $txt + flush stdout + } + return +} + +# http::Log +# +# A special-purpose logger used for running tests. +# - Processes Log calls that have "^" in their arguments, and records them in +# variable ::httpTest::testResults. +# - Also writes them to stdout (using Puts) if ($testOptions(-verbose) > 0). +# - Also writes Log calls that do not have "^", if ($testOptions(-verbose) > 1). + +proc http::Log {args} { + variable TestStartTimeInMs + set time [expr {[clock milliseconds] - $TestStartTimeInMs}] + set txt [list $time {*}$args] + if {[string first ^ $txt] != -1} { + ::httpTest::LogRecord $txt + ::httpTest::Puts $txt + } elseif {$::httpTest::testOptions(-verbose) > 1} { + ::httpTest::Puts $txt + } + return +} + + +# Called by http::Log (the "testing" version) to record logs for later analysis. + +proc httpTest::LogRecord {txt} { + variable testResults + + set pos [string first ^ $txt] + set len [string length $txt] + if {$pos > $len - 3} { + puts stdout "Logging Error: $txt" + puts stdout "Fix this call to Log in http-*.tm so it has ^ then\ + a letter then a numeral." + flush stdout + } elseif {$pos == -1} { + # Called by mistake. + } else { + set letter [string index $txt [incr pos]] + set number [string index $txt [incr pos]] + # Max 9 requests! + lappend testResults [list $letter $number] + } + + return +} + + +# ------------------------------------------------------------------------------ +# Commands for analysing the logs recorded when calling http::geturl. +# ------------------------------------------------------------------------------ + +# httpTest::TestOverlaps -- +# +# The main test for correct behaviour of pipelined and sequential +# (non-pipelined) transactions. Other tests should be run first to detect +# any inconsistencies in the data (e.g. absence of the elements that are +# examined here). +# +# Examine the sequence $someResults for each transaction from 1 to $n, +# ignoring any that are listed in $badTrans. +# Determine whether the elements "B" to $term for one transaction overlap +# elements "B" to $term for the previous and following transactions. +# +# Transactions in the list $badTrans are not included in "clean" or +# "dirty", but their possible overlap with other transactions is noted. +# Transactions in the list $notPiped are a subset of $badTrans, and +# their possible overlap with other transactions is NOT noted. +# +# Arguments: +# someResults - list of results, each of the form {letter numeral} +# n - number of HTTP transactions +# term - letter that indicated end of search range. "E" for testing +# overlaps from start of request to end of response headers. +# "F" to extend to the end of the response body. +# msg - the cumulative message from sanity checks. Append to it only +# to report a test failure. +# badTrans - list of transaction numbers not to be assessed as "clean" or +# "dirty" +# notPiped - subset of badTrans. List of transaction numbers that cannot +# taint another transaction by overlapping with it, because it +# used a different socket. +# +# Return value: [list $msg $clean $dirty] +# msg - warning messages: nothing will be appended to argument $msg if there +# is an error with the test. +# clean - list of transactions that have no overlap with other transactions +# dirty - list of transactions that have YES overlap with other transactions + +proc httpTest::TestOverlaps {someResults n term msg badTrans notPiped} { + variable testOptions + + # Check whether transactions overlap: + set clean {} + set dirty {} + for {set i 1} {$i <= $n} {incr i} { + if {$i in $badTrans} { + continue + } + set myStart [lsearch -exact $someResults [list B $i]] + set myEnd [lsearch -exact $someResults [list $term $i]] + + if {($myStart == -1 || $myEnd == -1)} { + set res "Cannot find positions of transaction $i" + append msg $res \n + Puts $res + } + + set overlaps {} + for {set j $myStart} {$j <= $myEnd} {incr j} { + lassign [lindex $someResults $j] letter number + if {$number != $i && $letter ne "A" && $number ni $notPiped} { + lappend overlaps $number + } + } + + if {[llength $overlaps] == 0} { + set res "Transaction $i has no overlaps" + Puts $res + lappend clean $i + if {$testOptions(-dotted)} { + # N.B. results from different segments are concatenated. + lappend dirty . + } else { + } + } else { + set res "Transaction $i overlaps with [join $overlaps { }]" + Puts $res + lappend dirty $i + if {$testOptions(-dotted)} { + # N.B. results from different segments are concatenated. + lappend clean . + } else { + } + } + } + return [list $msg $clean $dirty] +} + +# httpTest::PipelineNext -- +# +# Test whether prevPair, pair are valid as consecutive elements of a pipelined +# sequence (Start 1), (End 1), (Start 2), (End 2) ... +# Numbers are integers increasing (by 1 if argument "any" is false), and need +# not begin with 1. +# The first element of the sequence has prevPair {} and is always passed as +# valid. +# +# Arguments; +# Start - string that labels the start of a segment +# End - string that labels the end of a segment +# prevPair - previous "pair" (list of string and number) element of a +# sequence, or {} if argument "pair" is the first in the +# sequence. +# pair - current "pair" (list of string and number) element of a +# sequence +# any - (boolean) iff true, accept any increasing sequence of integers. +# If false, integers must increase by 1. +# +# Return value - boolean, true iff the two pairs are valid consecutive elements. + +proc httpTest::PipelineNext {Start End prevPair pair any} { + if {$prevPair eq {}} { + return 1 + } + + lassign $prevPair letter number + lassign $pair newLetter newNumber + if {$letter eq $Start} { + return [expr {($newLetter eq $End) && ($newNumber == $number)}] + } elseif {$any} { + set nxt [list $Start [expr {$number + 1}]] + return [expr {($newLetter eq $Start) && ($newNumber > $number)}] + } else { + set nxt [list $Start [expr {$number + 1}]] + return [expr {($newLetter eq $Start) && ($newNumber == $number + 1)}] + } +} + +# httpTest::TestPipeline -- +# +# Given a sequence of "pair" elements, check that the elements whose string is +# $Start or $End form a valid pipeline. Ignore other elements. +# +# Return value: {} if valid pipeline, otherwise a non-empty error message. + +proc httpTest::TestPipeline {someResults n Start End msg desc badTrans} { + set sequence {} + set prevPair {} + set ok 1 + set any [llength $badTrans] + foreach pair $someResults { + lassign $pair letter number + if {($letter in [list $Start $End]) && ($number ni $badTrans)} { + lappend sequence $pair + if {![PipelineNext $Start $End $prevPair $pair $any]} { + set ok 0 + break + } + set prevPair $pair + } + } + + if {!$ok} { + set res "$desc are not pipelined: {$sequence}" + append msg $res \n + Puts $res + } + return $msg +} + +# httpTest::TestSequence -- +# +# Examine each transaction from 1 to $n, ignoring any that are listed +# in $badTrans. +# Check that each transaction has elements A to F, in alphabetical order. + +proc httpTest::TestSequence {someResults n msg badTrans} { + variable testOptions + + for {set i 1} {$i <= $n} {incr i} { + if {$i in $badTrans} { + continue + } + set sequence {} + foreach pair $someResults { + lassign $pair letter number + if {$number == $i} { + lappend sequence $letter + } + } + if {$sequence eq {A B C D E F}} { + } else { + set res "Wrong sequence for token ::http::$i - {$sequence}" + append msg $res \n + Puts $res + if {"X" in $sequence} { + set res "- and error(s) X" + append msg $res \n + Puts $res + } + if {"Y" in $sequence} { + set res "- and warnings(s) Y" + append msg $res \n + Puts $res + } + } + } + return $msg +} + +# +# Arguments: +# someResults - list of elements, each a list of a letter and a number +# n - (positive integer) the number of HTTP requests +# msg - accumulated warning messages +# skipOverlaps - (boolean) whether to skip testing of transaction overlaps +# badTrans - list of transaction numbers not to be assessed as "clean" or +# "dirty" by their overlaps +# for 1/2 includes all transactions +# for 3/4 includes an increasing (with recursion) set that will not be included in the list because they are already handled. +# notPiped - subset of badTrans. List of transaction numbers that cannot +# taint another transaction by overlapping with it, because it +# used a different socket. +# +# Return value: [list $msg $cleanE $cleanF $dirtyE $dirtyF] +# msg - warning messages: nothing will be appended to argument $msg if there +# is no error with the test. +# cleanE - list of transactions that have no overlap with other transactions +# (not considering response body) +# dirtyE - list of transactions that have YES overlap with other transactions +# (not considering response body) +# cleanF - list of transactions that have no overlap with other transactions +# (including response body) +# dirtyF - list of transactions that have YES overlap with other transactions +# (including response body) + +proc httpTest::MostAnalysis {someResults n msg skipOverlaps badTrans notPiped} { + variable testOptions + + # Check that stages for "good" transactions are all present and correct: + set msg [TestSequence $someResults $n $msg $badTrans] + + # Check that requests are pipelined: + set msg [TestPipeline $someResults $n B C $msg Requests $notPiped] + + # Check that responses are pipelined: + set msg [TestPipeline $someResults $n D F $msg Responses $notPiped] + + if {$skipOverlaps} { + set cleanE {} + set dirtyE {} + set cleanF {} + set dirtyF {} + } else { + Puts "Overlaps including response body (test for non-pipelined case)" + lassign [TestOverlaps $someResults $n F $msg $badTrans $notPiped] msg cleanF dirtyF + + Puts "Overlaps without response body (test for pipelined case)" + lassign [TestOverlaps $someResults $n E $msg $badTrans $notPiped] msg cleanE dirtyE + } + + return [list $msg $cleanE $cleanF $dirtyE $dirtyF] +} + +# httpTest::ProcessRetries -- +# +# Command to examine results for socket-changing records [PQR], +# divide the results into segments for each connection, and analyse each segment +# individually. +# (Could add $sock to the logging to simplify this, but never mind.) +# +# In each segment, identify any transactions that are not included, and +# any that are aborted, to assist subsequent testing. +# +# Prepend A records (socket-independent) to each segment for transactions that +# were scheduled (by A) but not completed (by F). Pass each segment to +# MostAnalysis for processing. + +proc httpTest::ProcessRetries {someResults n msg skipOverlaps notIncluded notPiped} { + variable testOptions + + set nextRetry [lsearch -glob -index 0 $someResults {[PQR]}] + if {$nextRetry == -1} { + return [MostAnalysis $someResults $n $msg $skipOverlaps $notIncluded $notPiped] + } + set badTrans $notIncluded + set tryCount 0 + set try $nextRetry + incr tryCount + lassign [lindex $someResults $try] letter number + Puts "Processing retry [lindex $someResults $try]" + set beforeTry [lrange $someResults 0 $try-1] + Puts [join $beforeTry \n] + set afterTry [lrange $someResults $try+1 end] + + set dummyTry {} + for {set i 1} {$i <= $n} {incr i} { + set first [lsearch -exact $beforeTry [list A $i]] + set last [lsearch -exact $beforeTry [list F $i]] + if {$first == -1} { + set res "Transaction $i was not started in connection number $tryCount" + # So lappend it to badTrans and don't include it in the call below of MostAnalysis. + # append msg $res \n + Puts $res + if {$i ni $badTrans} { + lappend badTrans $i + } else { + } + } elseif {$last == -1} { + set res "Transaction $i was started but unfinished in connection number $tryCount" + # So lappend it to badTrans and don't include it in the call below of MostAnalysis. + # append msg $res \n + Puts $res + lappend badTrans $i + lappend dummyTry [list A $i] + } else { + set res "Transaction $i was started and finished in connection number $tryCount" + # So include it in the call below of MostAnalysis. + # So lappend it to notIncluded and don't include it in the recursive call of + # ProcessRetries which handles the later connections. + # append msg $res \n + Puts $res + lappend notIncluded $i + } + } + + # Analyse the part of the results before the first replay: + set HeadResults [MostAnalysis $beforeTry $n $msg $skipOverlaps $badTrans $notPiped] + lassign $HeadResults msg cleanE1 cleanF1 dirtyE1 dirtyF1 + + # Pass the rest of the results to be processed recursively. + set afterTry [concat $dummyTry $afterTry] + set TailResults [ProcessRetries $afterTry $n $msg $skipOverlaps $notIncluded $notPiped] + lassign $TailResults msg cleanE2 cleanF2 dirtyE2 dirtyF2 + + set cleanE [concat $cleanE1 $cleanE2] + set cleanF [concat $cleanF1 $cleanF2] + set dirtyE [concat $dirtyE1 $dirtyE2] + set dirtyF [concat $dirtyF1 $dirtyF2] + return [list $msg $cleanE $cleanF $dirtyE $dirtyF] +} + +# httpTest::logAnalyse -- +# +# The main command called to analyse logs for a single test. +# +# Arguments: +# n - (positive integer) the number of HTTP requests +# skipOverlaps - (boolean) whether to skip testing of transaction overlaps +# notIncluded - list of transaction numbers not to be assessed as "clean" or +# "dirty" by their overlaps +# notPiped - subset of notIncluded. List of transaction numbers that cannot +# taint another transaction by overlapping with it, because it +# used a different socket. +# +# Return value: [list $msg $cleanE $cleanF $dirtyE $dirtyF] +# msg - warning messages: {} if there is no error with the test. +# cleanE - list of transactions that have no overlap with other transactions +# (not considering response body) +# dirtyE - list of transactions that have YES overlap with other transactions +# (not considering response body) +# cleanF - list of transactions that have no overlap with other transactions +# (including response body) +# dirtyF - list of transactions that have YES overlap with other transactions +# (including response body) + +proc httpTest::logAnalyse {n skipOverlaps notIncluded notPiped} { + variable testResults + variable testOptions + + # Check that each data item has the correct form {letter numeral}. + set ii 0 + set ok 1 + foreach pair $testResults { + lassign $pair letter number + if { [string match {[A-Z]} $letter] + && [string match {[0-9]} $number] + } { + # OK + } else { + set ok 0 + set res "Error: testResults has bad element {$pair} at position $ii" + append msg $res \n + Puts $res + } + incr ii + } + + if {!$ok} { + return $msg + } + set msg {} + + Puts [join $testResults \n] + ProcessRetries $testResults $n $msg $skipOverlaps $notIncluded $notPiped + # N.B. Implicit Return. +} + +proc httpTest::cleanupHttpTest {} { + variable testResults + set testResults {} + return +} + +proc httpTest::setHttpTestOptions {key args} { + variable testOptions + if {$key ni {-dotted -verbose}} { + return -code error {valid options are -dotted, -verbose} + } + set testOptions($key) {*}$args +} + +namespace eval httpTest { + namespace export cleanupHttpTest logAnalyse setHttpTestOptions +} diff --git a/tests/httpTestScript.tcl b/tests/httpTestScript.tcl new file mode 100644 index 0000000..a8ef9c8 --- /dev/null +++ b/tests/httpTestScript.tcl @@ -0,0 +1,509 @@ +# httpTestScript.tcl +# +# Test HTTP/1.1 concurrent requests including +# queueing, pipelining and retries. +# +# Copyright (C) 2018 Keith Nash <kjnash@users.sourceforge.net> +# +# See the file "license.terms" for information on usage and redistribution +# of this file, and for a DISCLAIMER OF ALL WARRANTIES. + +# ------------------------------------------------------------------------------ +# "Package" httpTestScript for executing test scripts written in a convenient +# shorthand. +# ------------------------------------------------------------------------------ + +# ------------------------------------------------------------------------------ +# Documentation for "package" httpTestScript. +# ------------------------------------------------------------------------------ +# To use the package: +# (a) define URLs as the values of elements in the array ::httpTestScript +# (b) define a script in terms of the commands +# START STOP DELAY KEEPALIVE WAIT PIPELINE GET HEAD POST +# referring to URLs by the name of the corresponding array element. The +# script can include any other Tcl commands, and evaluates in the +# httpTestScript namespace. +# (c) Use the command httpTestScript::runHttpTestScript to evaluate the script. +# (d) For tcltest tests, wrap the runHttpTestScript call in a suitable "test" +# command. +# ------------------------------------------------------------------------------ +# START +# Must be the first command of the script. +# +# STOP +# Must be present in the script to avoid waiting for client timeout. +# Usually the last command, but can be elsewhere to end a script prematurely. +# Subsequent httpTestScript commands will have no effect. +# +# DELAY ms +# If there are no WAIT commands, this sets the delay in ms between subsequent +# calls to http::geturl. Default 500ms. +# +# KEEPALIVE +# Set the value passed to http::geturl for the -keepalive option. The command +# applies to subsequent requests in the script. Default 1. +# +# WAIT ms +# Pause for a time in ms before sending subsequent requests. +# +# PIPELINE boolean +# Set the value of -pipeline using http::config. The last PIPELINE command +# in the script applies to every request. Default 1. +# +# POSTFRESH boolean +# Set the value of -postfresh using http::config. The last POSTFRESH command +# in the script applies to every request. Default 0. +# +# REPOST boolean +# Set the value of -repost using http::config. The last REPOST command +# in the script applies to every request. Default 1 for httpTestScript. +# (Default value in http is 0). +# +# GET uriCode ?arg ...? +# Send a HTTP request using the GET method. +# Arguments: +# uriCode - the code for the base URI - the value must be stored in +# ::httpTestScript::URL($uriCode). +# args - strings that will be joined by "&" and appended to the query +# string with a preceding "&". +# +# HEAD uriCode ?arg ...? +# Send a HTTP request using the HEAD method. +# Arguments: as for GET +# +# POST uriCode ?arg ...? +# Send a HTTP request using the POST method. +# Arguments: +# uriCode - the code for the base URI - the value must be stored in +# ::httpTestScript::URL($uriCode). +# args - strings that will be joined by "&" and used as the request body. +# ------------------------------------------------------------------------------ + +namespace eval ::httpTestScript { + namespace export runHttpTestScript cleanupHttpTestScript +} + +# httpTestScript::START -- +# Initialise, and create a long-stop timeout. + +proc httpTestScript::START {} { + variable CountRequestedSoFar + variable RequestsWhenStopped + variable KeepAlive + variable Delay + variable TimeOutCode + variable TimeOutDone + variable StartDone + variable StopDone + variable CountFinishedSoFar + variable RequestList + variable RequestsMade + variable ExtraTime + variable ActualKeepAlive + + if {[info exists StartDone] && ($StartDone == 1)} { + set msg {START has been called twice without an intervening STOP} + return -code error $msg + } + + set StartDone 1 + set StopDone 0 + set TimeOutDone 0 + set CountFinishedSoFar 0 + set CountRequestedSoFar 0 + set RequestList {} + set RequestsMade {} + set ExtraTime 0 + set ActualKeepAlive 1 + + # Undefined until a STOP command: + unset -nocomplain RequestsWhenStopped + + # Default values: + set KeepAlive 1 + set Delay 500 + + # Default values for tests: + KEEPALIVE 1 + PIPELINE 1 + POSTFRESH 0 + REPOST 1 + + set TimeOutCode [after 30000 httpTestScript::TimeOutNow] +# set TimeOutCode [after 4000 httpTestScript::TimeOutNow] + return +} + +# httpTestScript::STOP -- +# Do not process any more commands. The commands will be executed but will +# silently do nothing. + +proc httpTestScript::STOP {} { + variable CountRequestedSoFar + variable CountFinishedSoFar + variable RequestsWhenStopped + variable TimeOutCode + variable StartDone + variable StopDone + variable RequestsMade + + if {$StopDone} { + # Don't do anything on a second call. + return + } + + if {![info exists StartDone]} { + return -code error {initialise the script by calling command START} + } + + set StopDone 1 + set StartDone 0 + set RequestsWhenStopped $CountRequestedSoFar + unset -nocomplain StartDone + + if {$CountFinishedSoFar == $RequestsWhenStopped} { + if {[info exists TimeOutCode]} { + after cancel $TimeOutCode + } + set ::httpTestScript::FOREVER 0 + } + return +} + +# httpTestScript::DELAY -- +# If there are no WAIT commands, this sets the delay in ms between subsequent +# calls to http::geturl. Default 500ms. + +proc httpTestScript::DELAY {t} { + variable StartDone + variable StopDone + + if {$StopDone} { + return + } + + if {![info exists StartDone]} { + return -code error {initialise the script by calling command START} + } + + variable Delay + + set Delay $t + return +} + +# httpTestScript::KEEPALIVE -- +# Set the value passed to http::geturl for the -keepalive option. Default 1. + +proc httpTestScript::KEEPALIVE {b} { + variable StartDone + variable StopDone + + if {$StopDone} { + return + } + + if {![info exists StartDone]} { + return -code error {initialise the script by calling command START} + } + + variable KeepAlive + set KeepAlive $b + return +} + +# httpTestScript::WAIT -- +# Pause for a time in ms before processing any more commands. + +proc httpTestScript::WAIT {t} { + variable StartDone + variable StopDone + variable ExtraTime + + if {$StopDone} { + return + } + + if {![info exists StartDone]} { + return -code error {initialise the script by calling command START} + } + + if {(![string is integer -strict $t]) || $t < 0} { + return -code error {argument to WAIT must be a non-negative integer} + } + + incr ExtraTime $t + + return +} + +# httpTestScript::PIPELINE -- +# Pass a value to http::config -pipeline. + +proc httpTestScript::PIPELINE {b} { + variable StartDone + variable StopDone + + if {$StopDone} { + return + } + + if {![info exists StartDone]} { + return -code error {initialise the script by calling command START} + } + + ::http::config -pipeline $b + ##::http::Log http(-pipeline) is now [::http::config -pipeline] + return +} + +# httpTestScript::POSTFRESH -- +# Pass a value to http::config -postfresh. + +proc httpTestScript::POSTFRESH {b} { + variable StartDone + variable StopDone + + if {$StopDone} { + return + } + + if {![info exists StartDone]} { + return -code error {initialise the script by calling command START} + } + + ::http::config -postfresh $b + ##::http::Log http(-postfresh) is now [::http::config -postfresh] + return +} + +# httpTestScript::REPOST -- +# Pass a value to http::config -repost. + +proc httpTestScript::REPOST {b} { + variable StartDone + variable StopDone + + if {$StopDone} { + return + } + + if {![info exists StartDone]} { + return -code error {initialise the script by calling command START} + } + + ::http::config -repost $b + ##::http::Log http(-repost) is now [::http::config -repost] + return +} + +# httpTestScript::GET -- +# Send a HTTP request using the GET method. +# Arguments: +# uriCode - the code for the base URI - the value must be stored in +# ::httpTestScript::URL($uriCode). +# args - strings that will each be preceded by "&" and appended to the query +# string. + +proc httpTestScript::GET {uriCode args} { + variable RequestList + lappend RequestList GET + RequestAfter $uriCode 0 {} {*}$args + return +} + +# httpTestScript::HEAD -- +# Send a HTTP request using the HEAD method. +# Arguments: as for GET + +proc httpTestScript::HEAD {uriCode args} { + variable RequestList + lappend RequestList HEAD + RequestAfter $uriCode 1 {} {*}$args + return +} + +# httpTestScript::POST -- +# Send a HTTP request using the POST method. +# Arguments: +# uriCode - the code for the base URI - the value must be stored in +# ::httpTestScript::URL($uriCode). +# args - strings that will be joined by "&" and used as the request body. + +proc httpTestScript::POST {uriCode args} { + variable RequestList + lappend RequestList POST + RequestAfter $uriCode 0 {use} {*}$args + return +} + + +proc httpTestScript::RequestAfter {uriCode validate query args} { + variable CountRequestedSoFar + variable Delay + variable ExtraTime + variable StartDone + variable StopDone + variable KeepAlive + + if {$StopDone} { + return + } + + if {![info exists StartDone]} { + return -code error {initialise the script by calling command START} + } + + incr CountRequestedSoFar + set idelay [expr {($CountRequestedSoFar - 1) * $Delay + 10 + $ExtraTime}] + + # Could pass values of -pipeline, -postfresh, -repost if it were + # useful to change these mid-script. + after $idelay [list httpTestScript::Requester $uriCode $KeepAlive $validate $query {*}$args] + return +} + +proc httpTestScript::Requester {uriCode keepAlive validate query args} { + variable URL + + ::http::config -accept {*/*} + + set absUrl $URL($uriCode) + if {$query eq {}} { + if {$args ne {}} { + append absUrl & [join $args &] + } + set queryArgs {} + } elseif {$validate} { + return -code error {cannot have both -validate (HEAD) and -query (POST)} + } else { + set queryArgs [list -query [join $args &]] + } + + if {[catch { + ::http::geturl $absUrl \ + -validate $validate \ + -timeout 10000 \ + {*}$queryArgs \ + -keepalive $keepAlive \ + -command ::httpTestScript::WhenFinished + } token]} { + set msg $token + catch {puts stdout "Error: $msg"} + return + } else { + # Request will begin. + } + + return + +} + +proc httpTestScript::TimeOutNow {} { + variable TimeOutDone + + set TimeOutDone 1 + set ::httpTestScript::FOREVER 0 + return +} + +proc httpTestScript::WhenFinished {hToken} { + variable CountFinishedSoFar + variable RequestsWhenStopped + variable TimeOutCode + variable StopDone + variable RequestList + variable RequestsMade + variable ActualKeepAlive + + upvar #0 $hToken state + + if {[catch { + if { [info exists state(transfer)] + && ($state(transfer) eq "chunked") + } { + set Trans chunked + } else { + set Trans unchunked + } + + if { [info exists ::httpTest::testOptions(-verbose)] + && ($::httpTest::testOptions(-verbose) > 0) + } { + puts "Token $hToken +Response $state(http) +Status $state(status) +Method $state(method) +Transfer $Trans +Size $state(currentsize) +URL $state(url) +" + } + + if {!$state(-keepalive)} { + set ActualKeepAlive 0 + } + + if {[info exists state(method)]} { + lappend RequestsMade $state(method) + } else { + lappend RequestsMade UNKNOWN + } + set tk [namespace tail $hToken] + + if { ($state(http) != {HTTP/1.1 200 OK}) + || ($state(status) != {ok}) + || (($state(currentsize) == 0) && ($state(method) ne "HEAD")) + } { + ::http::Log ^X$tk unexpected result Response $state(http) Status $state(status) Size $state(currentsize) - token $hToken + } + } err]} { + ::http::Log ^X$tk httpTestScript::WhenFinished failed with error status: $err - token $hToken + } + + incr CountFinishedSoFar + if {$StopDone && ($CountFinishedSoFar == $RequestsWhenStopped)} { + if {[info exists TimeOutCode]} { + after cancel $TimeOutCode + } + if {$RequestsMade ne $RequestList && $ActualKeepAlive} { + ::http::Log ^X$tk unexpected result - Script asked for "{$RequestList}" but got "{$RequestsMade}" - token $hToken + } + set ::httpTestScript::FOREVER 0 + } + + return +} + + +proc httpTestScript::runHttpTestScript {scr} { + variable TimeOutDone + variable RequestsWhenStopped + + after idle [list namespace eval ::httpTestScript $scr] + vwait ::httpTestScript::FOREVER + # N.B. does not automatically execute in this namespace, unlike some other events. + # Release when all requests have been served or have timed out. + + if {$TimeOutDone} { + return -code error {test script timed out} + } + + return $RequestsWhenStopped +} + + +proc httpTestScript::cleanupHttpTestScript {} { + variable TimeOutDone + variable RequestsWhenStopped + + if {![info exists RequestsWhenStopped]} { + return -code error {Cleanup Failed: RequestsWhenStopped is undefined} + } + + for {set i 1} {$i <= $RequestsWhenStopped} {incr i} { + http::cleanup ::http::$i + } + + return +} |
