From 70af5c2b8260845974300e98c2e4c464b787d94e Mon Sep 17 00:00:00 2001 From: kjnash Date: Tue, 27 Mar 2018 14:20:23 +0000 Subject: Implement queuing and pipelining for HTTP requests over a persistent connection. --- doc/http.n | 159 ++++- library/http/http.tcl | 1448 ++++++++++++++++++++++++++++++++++++++++++++-- tests/httpPipeline.test | 859 +++++++++++++++++++++++++++ tests/httpTest.tcl | 431 ++++++++++++++ tests/httpTestScript.tcl | 509 ++++++++++++++++ 5 files changed, 3356 insertions(+), 50 deletions(-) create mode 100644 tests/httpPipeline.test create mode 100644 tests/httpTest.tcl create mode 100644 tests/httpTestScript.tcl diff --git a/doc/http.n b/doc/http.n index 40ced23..2dae77e 100644 --- a/doc/http.n +++ b/doc/http.n @@ -6,14 +6,14 @@ '\" See the file "license.terms" for information on usage and redistribution '\" of this file, and for a DISCLAIMER OF ALL WARRANTIES. '\" -.TH "http" n 2.7 http "Tcl Bundled Packages" +.TH "http" n 2.8 http "Tcl Bundled Packages" .so man.macros .BS '\" Note: do not modify the .SH NAME line immediately below! .SH NAME http \- Client-side implementation of the HTTP/1.1 protocol .SH SYNOPSIS -\fBpackage require http ?2.7?\fR +\fBpackage require http ?2.8?\fR .\" See Also -useragent option documentation in body! .sp \fB::http::config ?\fI\-option value\fR ...? @@ -49,7 +49,7 @@ http \- Client-side implementation of the HTTP/1.1 protocol .SH DESCRIPTION .PP The \fBhttp\fR package provides the client side of the HTTP/1.1 -protocol, as defined in RFC 2616. +protocol, as defined in RFC 7230 to RFC 7235, which supersede RFC 2616. The package implements the GET, POST, and HEAD operations of HTTP/1.1. It allows configuration of a proxy host to get through firewalls. The package is compatible with the \fBSafesock\fR security @@ -95,6 +95,19 @@ comma-separated list of mime type patterns that you are willing to receive. For example, .QW "image/gif, image/jpeg, text/*" . .TP +\fB\-pipeline\fR \fIboolean\fR +. +Specifies whether HTTP/1.1 transactions on a persistent socket will be +pipelined. See the \fBPERSISTENT SOCKETS\fR section for details. The default +is 1. +.TP +\fB\-postfresh\fR \fIboolean\fR +. +Specifies whether requests that use the \fBPOST\fR method will always use a +fresh socket, overriding the \fB-keepalive\fR option of +command \fBhttp::geturl\fR. See the \fBPERSISTENT SOCKETS\fR section for details. +The default is 0. +.TP \fB\-proxyhost\fR \fIhostname\fR . The name of the proxy host, if any. If this value is the @@ -116,6 +129,18 @@ an empty list. The default filter returns the values of the \fB\-proxyhost\fR and \fB\-proxyport\fR settings if they are non-empty. .TP +\fB\-repost\fR \fIboolean\fR +. +Specifies what to do if a POST request over a persistent connection fails +because the server has half-closed the connection. If boolean \fBtrue\fR, the +request +will be automatically retried; if boolean \fBfalse\fR it will not, and the +application +that uses \fBhttp::geturl\fR is expected to seek user confirmation before +retrying the POST. The value \fBtrue\fR should be used only under certain +conditions. See the \fBPERSISTENT SOCKETS\fR section for details. The +default is 0. +.TP \fB\-urlencoding\fR \fIencoding\fR . The \fIencoding\fR used for creating the x-url-encoded URLs with @@ -128,8 +153,22 @@ characters. .TP \fB\-useragent\fR \fIstring\fR . -The value of the User-Agent header in the HTTP request. The default is -.QW "\fBTcl http client package 2.7\fR" . +The value of the User-Agent header in the HTTP request. In an unsafe +interpreter, the default value depends upon the operating system, and +the version numbers of \fBhttp\fR and \fBTcl\fR, and is (for example) +.QW "\fBMozilla/5.0 (Windows; U; Windows NT 10.0) http/2.8.12 Tcl/8.6.8\fR" . +A safe interpreter cannot determine its operating system, and so the default +in a safe interpreter is to use a Windows 10 value with the current version +numbers of \fBhttp\fR and \fBTcl\fR. +.TP +\fB\-zip\fR \fIboolean\fR +. +If the value is boolean \fBtrue\fR, then by default requests will send a header +.QW "\fBAccept-Encoding: gzip,deflate,compress\fR" . +If the value is boolean \fBfalse\fR, then by default this header will not be sent. +In either case the default can be overridden for an individual request by +supplying a custom \fBAccept-Encoding\fR header in the \fB-headers\fR option +of \fBhttp::geturl\fR. The default is 1. .RE .TP \fB::http::geturl\fR \fIurl\fR ?\fIoptions\fR? @@ -227,7 +266,7 @@ Pragma: no-cache .TP \fB\-keepalive\fR \fIboolean\fR . -If true, attempt to keep the connection open for servicing +If boolean \fBtrue\fR, attempt to keep the connection open for servicing multiple requests. Default is 0. .TP \fB\-method\fR \fItype\fR @@ -504,6 +543,14 @@ The following elements of the array are supported: .RS .TP +\fBbinary\fR +. +This is boolean \fBtrue\fR if (after decoding any compression specified +by the +.QW "Content-Encoding" +response header) the HTTP response is binary. It is boolean \fBfalse\fR +if the HTTP response is text. +.TP \fBbody\fR . The contents of the URL. This will be empty if the \fB\-channel\fR @@ -602,6 +649,106 @@ A copy of the \fBContent-Type\fR meta-data value. . The requested URL. .RE +.SH "PERSISTENT CONNECTIONS" +.PP +.SS "BASICS" +.PP +See RFC 7230 Sec 6, which supersedes RFC 2616 Sec 8.1. +.PP +A persistent connection allows multiple HTTP/1.1 transactions to be +carried over the same TCP connection. Pipelining allows a +client to make multiple requests over a persistent connection without +waiting for each response. The server sends responses in the same order +that the requests were received. +.PP +If a POST request fails to complete, typically user confirmation is +needed before sending the request again. The user may wish to verify +whether the server was modified by the failed POST request, before +sending the same request again. +.PP +A HTTP request will use a persistent socket if the call to +\fBhttp::geturl\fR has the option \fB-keepalive true\fR. It will use +pipelining where permitted if the \fBhttp::config\fR option +\fB-pipeline\fR is boolean \fBtrue\fR (its default value). +.PP +The http package maintains no more than one persistent connection to each +server (i.e. each value of +.QW "domain:port" ). +If \fBhttp::geturl\fR is called to make a request over a persistent +connection while the connection is busy with another request, the new +request will be held in a queue until the connection is free. +.PP +The http package does not support HTTP/1.0 persistent connections +controlled by the \fBKeep-Alive\fR header. +.SS "SPECIAL CASES" +.PP +This subsection discusses issues related to closure of the +persistent connection by the server, automatic retry of failed requests, +the special treatment necessary for POST requests, and the options for +dealing with these cases. +.PP +In accordance with RFC 7230, \fBhttp::geturl\fR does not pipeline +requests that use the POST method. If a POST uses a persistent +connection and is not the first request on that connection, +\fBhttp::geturl\fR waits until it has received the response for the previous +request; or (if \fBhttp::config\fR option \fB-postfresh\fR is boolean \fBtrue\fR) it +uses a new connection for each POST. +.PP +If the server is processing a number of pipelined requests, and sends a +response header +.QW "\fBConnection: close\fR" +with one of the responses (other than the last), then subsequent responses +are unfulfilled. \fBhttp::geturl\fR will send the unfulfilled requests again +over a new connection. +.PP +A difficulty arises when a HTTP client sends a request over a persistent +connection that has been idle for a while. The HTTP server may +half-close an apparently idle connection while the client is sending a +request, but before the request arrives at the server: in this case (an +.QW "asynchronous close event" ) +the request will fail. The difficulty arises because the client cannot +be certain whether the POST modified the state of the server. For HEAD or +GET requests, \fBhttp::geturl\fR opens another connection and retransmits +the failed request. However, if the request was a POST, RFC 7230 forbids +automatic retry by default, suggesting either user confirmation, or +confirmation by user-agent software that has semantic understanding of +the application. The \fBhttp::config\fR option \fB-repost\fR allows for +either possibility. +.PP +Asynchronous close events can occur only in a short interval of time. The +\fBhttp\fR package monitors each persistent connection for closure by the +server. Upon detection, the connection is also closed at the client end, +and subsequent requests will use a fresh connection. +.PP +If the \fBhttp::geturl\fR command is called with option \fB-keepalive true\fR, +then it will both try to use an existing persistent connection +(if one is available), and it will send the server a +.QW "\fBConnection: keep-alive\fR" +request header asking to keep the connection open for future requests. +.PP +The \fBhttp::config\fR options \fB-pipeline\fR, \fB-postfresh\fR, and +\fB-repost\fR relate to persistent connections. +.PP +Option \fB-pipeline\fR, if boolean \fBtrue\fR, will pipeline GET and HEAD requests +made +over a persistent connection. POST requests will not be pipelined - if the +POST is not the first transaction on the connection, its request will not +be sent until the previous response has finished. GET and HEAD requests +made after a POST will not be sent until the POST response has been +delivered, and will not be sent if the POST fails. +.PP +Option \fB-postfresh\fR, if boolean \fBtrue\fR, will override the \fBhttp::geturl\fR option +\fB-keepalive\fR, and always open a fresh connection for a POST request. +.PP +Option \fB-repost\fR, if \fBtrue\fR, permits automatic retry of a POST request +that fails because it uses a persistent connection that the server has +half-closed (an +.QW "asynchronous close event" ). +Subsequent GET and HEAD requests in a failed pipeline will also be retried. +\fIThe -repost option should be used only if the application understands +that the retry is appropriate\fR - specifically, the application must know +that if the failed POST successfully modified the state of the server, a repeat POST +would have no adverse effect. .SH EXAMPLE .PP This example creates a procedure to copy a URL to a file while printing a diff --git a/library/http/http.tcl b/library/http/http.tcl index 06f452d..f4f83c6 100644 --- a/library/http/http.tcl +++ b/library/http/http.tcl @@ -20,9 +20,12 @@ namespace eval http { if {![info exists http]} { array set http { -accept */* + -pipeline 1 + -postfresh 0 -proxyhost {} -proxyport {} -proxyfilter http::ProxyRequired + -repost 0 -urlencoding utf-8 -zip 1 } @@ -220,7 +223,7 @@ proc http::config {args} { # Arguments: # token Connection token. # errormsg (optional) If set, forces status to error. -# skipCB (optional) If set, don't call the -command callback. This +# skipCB (optional) If set, don't call the -command callback. This # is useful when geturl wants to throw an exception instead # of calling the callback. That way, the same error isn't # reported to two places. @@ -240,6 +243,7 @@ proc http::Finish {token {errormsg ""} {skipCB 0}} { variable $token upvar 0 $token state global errorInfo errorCode + set closeQueue 0 if {$errormsg ne ""} { set state(error) [list $errormsg $errorInfo $errorCode] set state(status) "error" @@ -251,6 +255,12 @@ proc http::Finish {token {errormsg ""} {skipCB 0}} { || ([info exists state(connection)] && ($state(connection) eq "close")) } { CloseSocket $state(sock) $token + set closeQueue 1 + } elseif { + ([info exists state(-keepalive)] && $state(-keepalive)) + && ([info exists state(connection)] && ($state(connection) ne "close")) + } { + KeepSocket $token } if {[info exists state(after)]} { after cancel $state(after) @@ -263,6 +273,233 @@ proc http::Finish {token {errormsg ""} {skipCB 0}} { set state(status) error } } + + if { $closeQueue + && [info exists state(socketinfo)] + && [info exists socketMapping($state(socketinfo))] + && ($socketMapping($state(socketinfo)) eq $state(sock)) + } { + http::CloseQueuedQueries $state(socketinfo) $token + } + + return +} + +# http::KeepSocket - +# +# Keep a socket in the persistent sockets table and connect it to its next +# queued task if possible. Otherwise leave it idle and ready for its next +# use. +# +# Arguments: +# token Connection token. + +proc http::KeepSocket {token} { + variable http + variable socketMapping + variable socketRdState + variable socketWrState + variable socketRdQueue + variable socketWrQueue + variable socketClosing + variable socketPlayCmd + + variable $token + upvar 0 $token state + set tk [namespace tail $token] + + # Keep this socket open for another request ("Keep-Alive"). + # React if the server half-closes the socket. + # Discussion is in http::geturl. + catch {fileevent $state(sock) readable [list http::CheckEof $state(sock)]} + + # The line below should not be changed in production code. + # It is edited by the test suite. + set TEST_EOF 0 + if {$TEST_EOF} { + # ONLY for testing reaction to server eof. + # No server timeouts will be caught. + catch {fileevent $state(sock) readable {}} + } else { + # Normal operation. + # Test constraint normalEof. + } + + if { [info exists state(socketinfo)] + && [info exists socketMapping($state(socketinfo))] + } { + set connId $state(socketinfo) + # The value "Rready" is set only here. + set socketRdState($connId) Rready + + if { $state(-pipeline) + && [info exists socketRdQueue($connId)] + && [llength $socketRdQueue($connId)] + } { + # The usual case for pipelined responses - if another response is + # queued, arrange to read it. + set token3 [lindex $socketRdQueue($connId) 0] + set socketRdQueue($connId) [lrange $socketRdQueue($connId) 1 end] + variable $token3 + upvar 0 $token3 state3 + set tk2 [namespace tail $token3] + + #Log pipelined, GRANT read access to $token3 in KeepSocket + set socketRdState($connId) $token3 + lassign [fconfigure $state3(sock) -translation] trRead trWrite + fconfigure $state3(sock) -translation [list auto $trWrite] \ + -buffersize $state3(-blocksize) + Log ^D$tk2 begin receiving response - token $token3 + fileevent $state3(sock) readable \ + [list http::Event $state3(sock) $token3] + #Log ---- $state3(sock) >> conn to $token3 for HTTP response (a) + + # Other pipelined cases. + # - The test above ensures that, for the pipelined cases in the two + # tests below, the read queue is empty. + # - In those two tests, check whether the next write will be + # nonpipeline. + } elseif { + $state(-pipeline) + && [info exists socketWrState($connId)] + && ($socketWrState($connId) eq "peNding") + + && [info exists socketWrQueue($connId)] + && [llength $socketWrQueue($connId)] + && (![set token3 [lindex $socketWrQueue($connId) 0] + set ${token3}(-pipeline) + ] + ) + } { + # This case: + # - Now it the time to run the "pending" request. + # - The next token in the write queue is nonpipeline, and + # socketWrState has been marked "pending" (in + # http::NextPipelinedWrite or http::geturl) so a new pipelined + # request cannot jump the queue. + # + # Tests: + # - In this case the read queue (tested above) is empty and this + # "pending" write token is in front of the rest of the write + # queue. + # - The write state is not Wready and therefore appears to be busy, + # but because it is "pending" we know that it is reserved for the + # first item in the write queue, a non-pipelined request that is + # waiting for the read queue to empty. That has now happened: so + # give that request read and write access. + variable $token3 + set conn [set ${token3}(tmpConnArgs)] + #Log nonpipeline, GRANT r/w access to $token3 in KeepSocket + set socketRdState($connId) $token3 + set socketWrState($connId) $token3 + set socketWrQueue($connId) [lrange $socketWrQueue($connId) 1 end] + # Connect does its own fconfigure. + fileevent $state(sock) writable [list http::Connect $token3 {*}$conn] + #Log ---- $state(sock) << conn to $token3 for HTTP request (c) + + } elseif { + $state(-pipeline) + && [info exists socketWrState($connId)] + && ($socketWrState($connId) eq "peNding") + + } { + # Should not come here. The second block in the previous "elseif" + # test should be tautologous (but was needed in an earlier + # implementation) and will be removed after testing. + # If we get here, the value "pending" was assigned in error. + # This error would block the queue for ever. + Log ^X$tk <<<<< Error in queueing of requests >>>>> - token $token + + } elseif { + $state(-pipeline) + && [info exists socketWrState($connId)] + && ($socketWrState($connId) eq "Wready") + + && [info exists socketWrQueue($connId)] + && [llength $socketWrQueue($connId)] + && (![set token3 [lindex $socketWrQueue($connId) 0] + set ${token3}(-pipeline) + ] + ) + } { + # This case: + # - The next token in the write queue is nonpipeline, and + # socketWrState is Wready. Get the next event from socketWrQueue. + # Tests: + # - In this case the read state (tested above) is Rready and the + # write state (tested here) is Wready - there is no "pending" + # request. + # Code: + # - The code is the same as the code below for the nonpipelined + # case with a queued request. + variable $token3 + set conn [set ${token3}(tmpConnArgs)] + #Log nonpipeline, GRANT r/w access to $token3 in KeepSocket + set socketRdState($connId) $token3 + set socketWrState($connId) $token3 + set socketWrQueue($connId) [lrange $socketWrQueue($connId) 1 end] + # Connect does its own fconfigure. + fileevent $state(sock) writable [list http::Connect $token3 {*}$conn] + #Log ---- $state(sock) << conn to $token3 for HTTP request (c) + + } elseif { + (!$state(-pipeline)) + && [info exists socketWrQueue($connId)] + && [llength $socketWrQueue($connId)] + && ($state(connection) ne "close") + } { + # If not pipelined, (socketRdState eq Rready) tells us that we are + # ready for the next write - there is no need to check + # socketWrState. Write the next request, if one is waiting. + # If the next request is pipelined, it receives premature read + # access to the socket. This is not a problem. + set token3 [lindex $socketWrQueue($connId) 0] + variable $token3 + set conn [set ${token3}(tmpConnArgs)] + #Log nonpipeline, GRANT r/w access to $token3 in KeepSocket + set socketRdState($connId) $token3 + set socketWrState($connId) $token3 + set socketWrQueue($connId) [lrange $socketWrQueue($connId) 1 end] + # Connect does its own fconfigure. + fileevent $state(sock) writable [list http::Connect $token3 {*}$conn] + #Log ---- $state(sock) << conn to $token3 for HTTP request (d) + + } elseif {(!$state(-pipeline))} { + set socketWrState($connId) Wready + # Rready and Wready and idle: nothing to do. + } else { + # Rready and idle: nothing to do. + } + + } else { + CloseSocket $state(sock) $token + } + return +} + +# http::CheckEof - +# +# Read from a socket and close it if eof. +# The command is bound to "fileevent readable" on an idle socket, and +# "eof" is the only event that should trigger the binding, occurring when +# the server times out and half-closes the socket. +# +# A read is necessary so that [eof] gives a meaningful result. +# Any bytes sent are junk (or a bug). + +proc http::CheckEof {sock} { + set junk [read $sock] + set n [string length $junk] + if {$n} { + Log "WARNING: $n bytes received but no HTTP request sent" + } + + if {[catch {eof $sock} res] || $res} { + # The server has half-closed the socket. + # If a new write has started, its transaction will fail and + # will then be error-handled. + CloseSocket $sock + } return } @@ -302,23 +539,85 @@ proc http::CloseSocket {s {token {}}} { } else { } } - if {$connId eq {} || ![info exists socketMapping($connId)]} { + if { ($connId ne {}) + && [info exists socketMapping($connId)] + && ($socketMapping($connId) eq $s) + } { + Log "Closing connection $connId (sock $socketMapping($connId))" + if {[catch {close $socketMapping($connId)} err]} { + Log "Error closing connection: $err" + } else { + } + if {$token eq {}} { + # Cases with a non-empty token are handled by Finish, so the tokens + # are finished in connection order. + http::CloseQueuedQueries $connId $token + } else { + } + } else { Log "Closing socket $s (no connection info)" if {[catch {close $s} err]} { Log "Error closing socket: $err" } else { } + } + return +} + +# http::CloseQueuedQueries +# +# connId - identifier "domain:port" for the connection +# token - (optional) used only for logging +# +# Called from http::CloseSocket and http::Finish, after a connection is closed, +# to clear the read and write queues if this has not already been done. + +proc http::CloseQueuedQueries {connId {token {}}} { + variable socketMapping + variable socketRdState + variable socketWrState + variable socketRdQueue + variable socketWrQueue + variable socketClosing + variable socketPlayCmd + + if {![info exists socketMapping($connId)]} { + # Command has already been called. + # Don't come here again - especially recursively. + return + } + + # Used only for logging. + if {$token eq {}} { + set tk {} } else { - if {[info exists socketMapping($connId)]} { - Log "Closing connection $connId (sock $socketMapping($connId))" - if {[catch {close $socketMapping($connId)} err]} { - Log "Error closing connection: $err" - } else { - } - unset socketMapping($connId) - } else { - Log "Cannot close connection $connId - no socket in socket map" - } + set tk [namespace tail $token] + } + + if { [info exists socketPlayCmd($connId)] + && ($socketPlayCmd($connId) ne {ReplayIfClose Wready {} {}}) + } { + set unfinished $socketPlayCmd($connId) + } else { + set unfinished {} + } + + # The trace on "unset socketRdState(*)" cancels any pipelined + # responses. + # The trace on "unset socketWrState(*)" cancels any pipelined + # requests. + unset socketMapping($connId) + unset socketRdState($connId) + unset socketWrState($connId) + unset -nocomplain socketRdQueue($connId) + unset -nocomplain socketWrQueue($connId) + unset -nocomplain socketClosing($connId) + unset -nocomplain socketPlayCmd($connId) + + if {$unfinished ne {}} { + Log ^R$tk Any unfinished transactions (excluding $token) failed \ + - token $token + {*}$unfinished } return } @@ -332,7 +631,7 @@ proc http::CloseSocket {s {token {}}} { # why Status info. # # Side Effects: -# See Finish +# See Finish proc http::reset {token {why reset}} { variable $token @@ -354,8 +653,8 @@ proc http::reset {token {why reset}} { # Establishes a connection to a remote url via http. # # Arguments: -# url The http URL to goget. -# args Option value pairs. Valid options include: +# url The http URL to goget. +# args Option value pairs. Valid options include: # -blocksize, -validate, -headers, -timeout # Results: # Returns a token for this connection. This token is the name of an @@ -375,10 +674,12 @@ proc http::geturl {url args} { set http(uid) 0 } set token [namespace current]::[incr http(uid)] + ##Log Starting http::geturl - token $token variable $token upvar 0 $token state set tk [namespace tail $token] reset $token + Log ^A$tk URL $url - token $token # Process command options. @@ -393,8 +694,9 @@ proc http::geturl {url args} { -queryprogress {} -protocol 1.1 binary 0 - state connecting + state created meta {} + method {} coding {} currentsize 0 totalsize 0 @@ -611,14 +913,7 @@ proc http::geturl {url args} { # Don't append the fragment! set state(url) $url - # If a timeout is specified we set up the after event and arrange for an - # asynchronous socket connection. - set sockopts [list -async] - if {$state(-timeout) > 0} { - set state(after) [after $state(-timeout) \ - [list http::reset $token timeout]] - } # If we are using the proxy, we must pass in the full URL that includes # the server name. @@ -636,7 +931,36 @@ proc http::geturl {url args} { # c11a51c482] set state(accept-types) $http(-accept) + if {$isQuery || $isQueryChannel} { + # It's a POST. + # A client wishing to send a non-idempotent request SHOULD wait to send + # that request until it has received the response status for the + # previous request. + if {$http(-postfresh)} { + # Override -keepalive for a POST. Use a new connection, and thus + # avoid the small risk of a race against server timeout. + set state(-keepalive) 0 + } else { + # Allow -keepalive but do not -pipeline - wait for the previous + # transaction to finish. + # There is a small risk of a race against server timeout. + set state(-pipeline) 0 + } + } else { + # It's a GET or HEAD. + set state(-pipeline) $http(-pipeline) + } + # See if we are supposed to use a previously opened channel. + # - In principle, ANY call to http::geturl could use a previously opened + # channel if it is available - the "Connection: keep-alive" header is a + # request to leave the channel open AFTER completion of this call. + # - In fact, we try to use an existing channel only if -keepalive 1 -- this + # means that at most one channel is left open for each value of + # $state(socketinfo). This property simplifies the mapping of open + # channels. + set reusing 0 + set alreadyQueued 0 if {$state(-keepalive)} { variable socketMapping variable socketRdState @@ -647,20 +971,97 @@ proc http::geturl {url args} { variable socketPlayCmd if {[info exists socketMapping($state(socketinfo))]} { + # - If the connection is idle, it has a "fileevent readable" binding + # to http::CheckEof, in case the server times out and half-closes + # the socket (http::CheckEof closes the other half). + # - We leave this binding in place until just before the last + # puts+flush in http::Connected (GET/HEAD) or http::Write (POST), + # after which the HTTP response might be generated. + # - Therefore we must be prepared for full closure of the socket, + # and catch errors on any socket operation. + if {[catch {fconfigure $socketMapping($state(socketinfo))}]} { Log "WARNING: socket for $state(socketinfo) was closed\ - - token $token" + - token $token" + + # The trace on "unset socketRdState(*)" cancels any pipelined + # responses. + # The trace on "`(*)" cancels any pipelined + # requests. unset socketMapping($state(socketinfo)) + unset socketRdState($state(socketinfo)) + unset socketWrState($state(socketinfo)) + unset -nocomplain socketRdQueue($state(socketinfo)) + unset -nocomplain socketWrQueue($state(socketinfo)) + unset -nocomplain socketClosing($state(socketinfo)) + unset -nocomplain socketPlayCmd($state(socketinfo)) + + # Do not automatically close the eventual connection socket. + set state(connection) {} + } elseif { [info exists socketClosing($state(socketinfo))] + && $socketClosing($state(socketinfo)) + } { + # The server has sent a "Connection: close" header. + # Do not use the persistent socket again. + # Since we have only one persistent socket per server, and the + # old socket is not yet dead, add the request to the write queue + # of the dying socket, which will be replayed by ReplayIfClose. + set reusing 1 + set sock $socketMapping($state(socketinfo)) + Log "reusing socket $sock for $state(socketinfo) - token $token" + + # Do not automatically close this connection socket. + set state(connection) {} + set alreadyQueued 1 + lassign $socketPlayCmd($state(socketinfo)) com0 com1 com2 com3 + lappend com3 $token + set socketPlayCmd($state(socketinfo)) [list $com0 $com1 $com2 $com3] } else { + # Use the persistent socket. + # The socket may not be ready to write: an earlier request might + # still be still writing (in the pipelined case) or + # writing/reading (in the nonpipeline case). This possibility + # is handled by socketWrQueue later in this command. + set reusing 1 set sock $socketMapping($state(socketinfo)) Log "reusing socket $sock for $state(socketinfo) - token $token" - catch {fileevent $sock writable {}} - catch {fileevent $sock readable {}} + + # Do not automatically close this connection socket. + set state(connection) {} } } - # Do not automatically close this connection socket. - set state(connection) {} } + + if {$reusing} { + # Define state(tmpState) and state(tmpOpenCmd) for use + # by http::ReplayIfDead if the persistent connection has died. + set state(tmpState) [array get state] + + # Pass -myaddr directly to the socket command + if {[info exists state(-myaddr)]} { + lappend sockopts -myaddr $state(-myaddr) + } + + set state(tmpOpenCmd) [list {*}$defcmd {*}$sockopts {*}$targetAddr] + } + + set state(reusing) $reusing + # Excluding ReplayIfDead and the decision whether to call it, there are four + # places outside http::geturl where state(reusing) is used: + # - Connected - if reusing and not pipelined, start the state(-timeout) + # timeout (when writing). + # - DoneRequest - if reusing and pipelined, send the next pipelined write + # - Event - if reusing and pipelined, start the state(-timeout) + # timeout (when reading). + # - Event - if not reusing and pipelined, send the next pipelined + # write + + # See comments above re the start of this timeout in other cases. + if {(!$state(reusing)) && ($state(-timeout) > 0)} { + set state(after) [after $state(-timeout) \ + [list http::reset $token timeout]] + } + if {![info exists sock]} { # Pass -myaddr directly to the socket command if {[info exists state(-myaddr)]} { @@ -686,14 +1087,126 @@ proc http::geturl {url args} { set state(sock) $sock Log "Using $sock for $state(socketinfo) - token $token" \ [expr {$state(-keepalive)?"keepalive":""}] - if {$state(-keepalive)} { + + if { $state(-keepalive) + && (![info exists socketMapping($state(socketinfo))]) + } { + # Freshly-opened socket that we would like to become persistent. set socketMapping($state(socketinfo)) $sock + if {$state(-pipeline)} { + #Log new, init for pipelined, GRANT write access to $token in geturl + # Also grant premature read access to the socket. This is OK. + set socketRdState($state(socketinfo)) $token + set socketWrState($state(socketinfo)) $token + } else { + # socketWrState is not used by this non-pipelined transaction. + # We cannot leave it as "Wready" because the next call to + # http::geturl with a pipelined transaction would conclude that the + # socket is available for writing. + #Log new, init for nonpipeline, GRANT r/w access to $token in geturl + set socketRdState($state(socketinfo)) $token + set socketWrState($state(socketinfo)) $token + } + + if {![info exists socketRdQueue($state(socketinfo))]} { + set socketRdQueue($state(socketinfo)) {} + set varName ::http::socketRdState($state(socketinfo)) + trace add variable $varName unset ::http::CancelReadPipeline + } + if {![info exists socketWrQueue($state(socketinfo))]} { + set socketWrQueue($state(socketinfo)) {} + set varName ::http::socketWrState($state(socketinfo)) + trace add variable $varName unset ::http::CancelWritePipeline + } } if {![info exists phost]} { set phost "" } - fileevent $sock writable [list http::Connect $token $proto $phost $srvurl] + if {$reusing} { + # For use by http::ReplayIfDead if the persistent connection has died. + # Also used by NextPipelinedWrite. + set state(tmpConnArgs) [list $proto $phost $srvurl] + } + + # The element socketWrState($connId) has a value which is either the name of + # the token that is permitted to write to the socket, or "Wready" if no + # token is permitted to write. + # + # The code that sets the value to Wready immediately calls + # http::NextPipelinedWrite, which examines socketWrQueue($connId) and + # processes the next request in the queue, if there is one. The value + # Wready is not found when the interpreter is in the event loop unless the + # socket is idle. + # + # The element socketRdState($connId) has a value which is either the name of + # the token that is permitted to read from the socket, or "Rready" if no + # token is permitted to read. + # + # The code that sets the value to Rready then examines + # socketRdQueue($connId) and processes the next request in the queue, if + # there is one. The value Rready is not found when the interpreter is in + # the event loop unless the socket is idle. + + if {$alreadyQueued} { + # A write may or may not be in progress. There is no need to set + # socketWrState to prevent another call stealing write access - all + # subsequent calls on this socket will come here because the socket + # will close after the current read, and its + # socketClosing($connId) is 1. + ##Log "HTTP request for token $token is queued" + + } elseif { $reusing + && $state(-pipeline) + && ($socketWrState($state(socketinfo)) ne "Wready") + } { + ##Log "HTTP request for token $token is queued for pipelined use" + lappend socketWrQueue($state(socketinfo)) $token + + } elseif { $reusing + && (!$state(-pipeline)) + && ($socketWrState($state(socketinfo)) ne "Wready") + } { + # A write is queued or in progress. Lappend to the write queue. + ##Log "HTTP request for token $token is queued for nonpipeline use" + lappend socketWrQueue($state(socketinfo)) $token + + } elseif { $reusing + && (!$state(-pipeline)) + && ($socketWrState($state(socketinfo)) eq "Wready") + && ($socketRdState($state(socketinfo)) ne "Rready") + } { + # A read is queued or in progress, but not a write. Cannot start the + # nonpipeline transaction, but must set socketWrState to prevent a + # pipelined request jumping the queue. + ##Log "HTTP request for token $token is queued for nonpipeline use" + #Log re-use nonpipeline, GRANT delayed write access to $token in geturl + + set socketWrState($state(socketinfo)) peNding + lappend socketWrQueue($state(socketinfo)) $token + + } else { + if {$reusing && $state(-pipeline)} { + #Log re-use pipelined, GRANT write access to $token in geturl + set socketWrState($state(socketinfo)) $token + + } elseif {$reusing} { + # Cf tests above - both are ready. + #Log re-use nonpipeline, GRANT r/w access to $token in geturl + set socketRdState($state(socketinfo)) $token + set socketWrState($state(socketinfo)) $token + + } else { + # (!$reusing) + } + + # All (!$reusing) cases come here, and also some $reusing cases if the + # connection is ready. + #Log ---- $state(socketinfo) << conn to $token for HTTP request (a) + # Connect does its own fconfigure. + fileevent $sock writable \ + [list http::Connect $token $proto $phost $srvurl] + } # Wait for the connection to complete. if {![info exists state(-command)]} { @@ -716,7 +1229,7 @@ proc http::geturl {url args} { return -code error $err } } - + ##Log Leaving http::geturl - token $token return $token } @@ -726,8 +1239,8 @@ proc http::geturl {url args} { # established. # # Arguments: -# token State token. -# proto What protocol (http, https, etc.) was used to connect. +# token State token. +# proto What protocol (http, https, etc.) was used to connect. # phost Are we using keep-alive? Non-empty if yes. # srvurl Service-local URL that we're requesting # Results: @@ -748,6 +1261,11 @@ proc http::Connected {token proto phost srvurl} { upvar 0 $token state set tk [namespace tail $token] + if {$state(reusing) && (!$state(-pipeline)) && ($state(-timeout) > 0)} { + set state(after) [after $state(-timeout) \ + [list http::reset $token timeout]] + } + # Set back the variables needed here. set sock $state(sock) set isQueryChannel [info exists state(-querychannel)] @@ -759,7 +1277,7 @@ proc http::Connected {token proto phost srvurl} { set defport [lindex $urlTypes($lower) 0] # Send data in cr-lf format, but accept any line terminators. - # Initialisation to {auto *} now done in geturl. + # Initialisation to {auto *} now done in geturl, KeepSocket and DoneRequest. # We are concerned here with the request (write) not the response (read). lassign [fconfigure $sock -translation] trRead trWrite fconfigure $sock -translation [list $trRead crlf] \ @@ -800,7 +1318,11 @@ proc http::Connected {token proto phost srvurl} { set state(-protocol) 1.0 } set accept_types_seen 0 + + Log ^B$tk begin sending request - token $token + if {[catch { + set state(method) $how puts $sock "$how $srvurl HTTP/$state(-protocol)" if {[dict exists $state(-headers) Host]} { # Allow Host spoofing. [Bug 928154] @@ -889,6 +1411,7 @@ proc http::Connected {token proto phost srvurl} { # response. if {$isQuery || $isQueryChannel} { + # POST method. if {!$content_type_seen} { puts $sock "Content-Type: $state(-type)" } @@ -899,25 +1422,624 @@ proc http::Connected {token proto phost srvurl} { lassign [fconfigure $sock -translation] trRead trWrite fconfigure $sock -translation [list $trRead binary] fileevent $sock writable [list http::Write $token] + # The http::Write command decides when to make the socket readable, + # using the same test as the GET/HEAD case below. } else { + # GET or HEAD method. + if { (![catch {fileevent $sock readable} binding]) + && ($binding eq [list http::CheckEof $sock]) + } { + # Remove the "fileevent readable" binding of an idle persistent + # socket to http::CheckEof. We can no longer treat bytes + # received as junk. The server might still time out and + # half-close the socket if it has not yet received the first + # "puts". + fileevent $sock readable {} + } puts $sock "" flush $sock - fileevent $sock readable [list http::Event $sock $token] + Log ^C$tk end sending request - token $token + # End of writing (GET/HEAD methods). The request has been sent. + + DoneRequest $token } } err]} { # The socket probably was never connected, or the connection dropped # later. + if {[info exists state(reusing)] && $state(reusing)} { + # The socket was closed at the server end, and closed at + # this end by http::CheckEof. + if {[TestForReplay $token write $err a]} { + return + } else { + Finish $token {failed to re-use socket} + } - # if state(status) is error, it means someone's already called - # Finish to do the above-described clean up. - if {$state(status) ne "error"} { + # else: + # This is NOT a persistent socket that has been closed since its + # last use. + # If any other requests are in flight or pipelined/queued, they will + # be discarded. + } elseif {$state(status) eq ""} { + Finish $token {failed to re-use socket} + } elseif {$state(status) ne "error"} { Finish $token $err + } else { + # if state(status) is error, it means someone's already called + # Finish to do the above-described clean up. } } return } +# http::DoneRequest -- +# +# Command called when a request has been sent. It will arrange the +# next request and/or response as appropriate. + +proc http::DoneRequest {token} { + variable http + variable socketMapping + variable socketRdState + variable socketWrState + variable socketRdQueue + variable socketWrQueue + variable socketClosing + variable socketPlayCmd + + variable $token + upvar 0 $token state + set tk [namespace tail $token] + set sock $state(sock) + + # If pipelined, connect the next HTTP request to the socket. + if {$state(reusing) && $state(-pipeline)} { + # Enable next token (if any) to write. + # The value "Wready" is set only here, and + # in http::Event after reading the response-headers of a + # non-reusing transaction. + # Previous value is $token. It cannot be pending. + set socketWrState($state(socketinfo)) Wready + + # Now ready to write the next pipelined request (if any). + http::NextPipelinedWrite $token + } else { + # If pipelined, this is the first transaction on this socket. We wait + # for the response headers to discover whether the connection is + # persistent. (If this is not done and the connection is not + # persistent, we SHOULD retry and then MUST NOT pipeline before knowing + # that we have a persistent connection + # (rfc2616 8.1.2.2)). + } + + # Connect to receive the response, unless the socket is pipelined + # and another response is being sent. + # This code block is separate from the code below because there are + # cases where socketRdState already has the value $token. + if { $state(-keepalive) + && $state(-pipeline) + && [info exists socketRdState($state(socketinfo))] + && ($socketRdState($state(socketinfo)) eq "Rready") + } { + #Log pipelined, GRANT read access to $token in Connected + set socketRdState($state(socketinfo)) $token + } + + if { $state(-keepalive) + && $state(-pipeline) + && [info exists socketRdState($state(socketinfo))] + && ($socketRdState($state(socketinfo)) ne $token) + } { + # Do not read from the socket until it is ready. + ##Log "HTTP response for token $token is queued for pipelined use" + lappend socketRdQueue($state(socketinfo)) $token + } else { + # In the pipelined case, connection for reading depends on the + # value of socketRdState. + # In the nonpipeline case, connection for reading always occurs. + #Log ---- $state(socketinfo) >> conn to $token for HTTP response (b) + lassign [fconfigure $sock -translation] trRead trWrite + fconfigure $sock -translation [list auto $trWrite] \ + -buffersize $state(-blocksize) + Log ^D$tk begin receiving response - token $token + fileevent $sock readable [list http::Event $sock $token] + } + return +} + +# http::NextPipelinedWrite +# +# - Connecting a socket to a token for writing is done by this command and by +# command KeepSocket. +# - If another request has a pipelined write scheduled for $token's socket, +# and if the socket is ready to accept it, connect the write and update +# the queue accordingly. +# - This command is called from http::DoneRequest and http::Event, +# IF $state(-pipeline) AND (the current transfer has reached the point at +# which the socket is ready for the next request to be written). +# - This command is called when a token has write access and is pipelined and +# keep-alive, and sets socketWrState to Wready. +# - The command need not consider the case where socketWrState is set to a token +# that does not yet have write access. Such a token is waiting for Rready, +# and the assignment of the connection to the token will be done elsewhere (in +# http::KeepSocket). +# - This command cannot be called after socketWrState has been set to a +# "pending" token value (that is then overwritten by the caller), because that +# value is set by this command when it is called by an earlier token when it +# relinquishes its write access, and the pending token is always the next in +# line to write. + +proc http::NextPipelinedWrite {token} { + variable http + variable socketRdState + variable socketWrState + variable socketWrQueue + + variable $token + upvar 0 $token state + set connId $state(socketinfo) + + if { $state(-pipeline) + && [info exists socketWrState($connId)] + && ($socketWrState($connId) eq "Wready") + + && [info exists socketWrQueue($connId)] + && [llength $socketWrQueue($connId)] + && ([set token2 [lindex $socketWrQueue($connId) 0] + set ${token2}(-pipeline) + ] + ) + } { + # - The usual case for a pipelined connection, ready for a new request. + #Log pipelined, GRANT write access to $token2 in NextPipelinedWrite + set conn [set ${token2}(tmpConnArgs)] + set socketWrState($connId) $token2 + set socketWrQueue($connId) [lrange $socketWrQueue($connId) 1 end] + # Connect does its own fconfigure. + fileevent $state(sock) writable [list http::Connect $token2 {*}$conn] + #Log ---- $connId << conn to $token2 for HTTP request (b) + + # In the tests below, the next request will be nonpipeline. + } elseif { $state(-pipeline) + && [info exists socketWrState($connId)] + && ($socketWrState($connId) eq "Wready") + + && [info exists socketWrQueue($connId)] + && [llength $socketWrQueue($connId)] + && (![ set token3 [lindex $socketWrQueue($connId) 0] + set ${token3}(-pipeline) + ] + ) + + && [info exists socketRdState($connId)] + && ($socketRdState($connId) eq "Rready") + } { + # The case in which the next request will be non-pipelined, and the read + # and write queues is ready: which is the condition for a non-pipelined + # write. + variable $token3 + upvar 0 $token3 state3 + set conn [set ${token3}(tmpConnArgs)] + #Log nonpipeline, GRANT r/w access to $token3 in NextPipelinedWrite + set socketRdState($connId) $token3 + set socketWrState($connId) $token3 + set socketWrQueue($connId) [lrange $socketWrQueue($connId) 1 end] + # Connect does its own fconfigure. + fileevent $state(sock) writable [list http::Connect $token3 {*}$conn] + #Log ---- $state(sock) << conn to $token3 for HTTP request (c) + + } elseif { $state(-pipeline) + && [info exists socketWrState($connId)] + && ($socketWrState($connId) eq "Wready") + + && [info exists socketWrQueue($connId)] + && [llength $socketWrQueue($connId)] + && (![set token2 [lindex $socketWrQueue($connId) 0] + set ${token2}(-pipeline) + ] + ) + } { + # - The case in which the next request will be non-pipelined, but the + # read queue is NOT ready. + # - A read is queued or in progress, but not a write. Cannot start the + # nonpipeline transaction, but must set socketWrState to prevent a new + # pipelined request (in http::geturl) jumping the queue. + # - Because socketWrState($connId) is not set to Wready, the assignment + # of the connection to $token2 will be done elsewhere - by command + # http::KeepSocket when $socketRdState($connId) is set to "Rready". + + #Log re-use nonpipeline, GRANT delayed write access to $token in NextP.. + set socketWrState($connId) peNding + + } else { + # No requests in socketWrQueue. Nothing to do. + } + + return +} + +# http::CancelReadPipeline +# +# Cancel pipelined responses on a closing "Keep-Alive" socket. +# +# - Called by a trace when the variable ::http::socketRdState($connId) is +# unset (the trace itself is automatically removed). +# - The variable relates to a Keep-Alive socket, which has been closed. +# - Cancels all pipelined responses. The requests have been sent, +# the responses have not yet been received. +# - N.B. Always delete ::http::socketRdState($connId) before deleting +# ::http::socketRdQueue($connId), or this command will do nothing. +# +# Arguments +# As for a trace command on a variable. + +proc http::CancelReadPipeline {name1 connId op} { + variable socketRdQueue + + ##Log CancelReadPipeline $name1 $connId $op + if {[info exists socketRdQueue($connId)]} { + set msg {the connection was Closed} + foreach token $socketRdQueue($connId) { + set tk [namespace tail $token] + Log ^X$tk end of response "($msg)" - token $token + set ${token}(status) eof + Finish $token ;#$msg + } + set socketRdQueue($connId) {} + } + return +} + +# http::CancelWritePipeline +# +# Cancel queued events on a closing "Keep-Alive" socket. +# +# - Called by a trace when the variable ::http::socketWrState($connId) is +# unset (the trace itself is automatically removed). +# - The variable relates to a Keep-Alive socket, which has been closed. +# - In pipelined or nonpipeline case: cancels all queued requests. The +# requests have not yet been sent, the responses are not due and have +# no data to cancel. +# - N.B. Always delete ::http::socketWrState($connId) before deleting +# ::http::socketWrQueue($connId), or this command will do nothing. +# +# Arguments +# As for a trace command on a variable. + +proc http::CancelWritePipeline {name1 connId op} { + variable socketWrQueue + + ##Log CancelWritePipeline $name1 $connId $op + if {[info exists socketWrQueue($connId)]} { + set msg {the connection was Closed} + foreach token $socketWrQueue($connId) { + set tk [namespace tail $token] + Log ^X$tk end of response "($msg)" - token $token + set ${token}(status) eof + Finish $token ;#$msg + } + set socketWrQueue($connId) {} + } + return +} + +# http::ReplayIfDead -- +# +# - A query on a re-used persistent socket failed at the earliest opportunity, +# because the socket had been closed by the server. Keep the token, tidy up, +# and try to connect on a fresh socket. +# - The connection is monitored for eof by the command http::CheckEof. Thus +# http::ReplayIfDead is needed only when a server event (half-closing an +# apparently idle connection), and a client event (sending a request) occur at +# almost the same time, and neither client nor server detects the other's +# action before performing its own (an "asynchronous close event"). +# - To simplify testing of http::ReplayIfDead, set TEST_EOF 1 in +# http::KeepSocket, and then http::ReplayIfDead will be called if http::geturl +# is called at any time after the server timeout. +# +# Arguments: +# token Connection token. +# +# Side Effects: +# Use the same token, but try to open a new socket. + +proc http::ReplayIfDead {tokenArg doing} { + variable socketMapping + variable socketRdState + variable socketWrState + variable socketRdQueue + variable socketWrQueue + variable socketClosing + variable socketPlayCmd + + variable $tokenArg + upvar 0 $tokenArg stateArg + + Log running http::ReplayIfDead for $tokenArg $doing + + # 1. Merge the tokens for transactions in flight, the read (response) queue, + # and the write (request) queue. + + set InFlightR {} + set InFlightW {} + + # Obtain the tokens for transactions in flight. + if {$stateArg(-pipeline)} { + # Two transactions may be in flight. The "read" transaction was first. + # It is unlikely that the server would close the socket if a response + # was pending; however, an earlier request (as well as the present + # request) may have been sent and ignored if the socket was half-closed + # by the server. + + if { [info exists socketRdState($stateArg(socketinfo))] + && ($socketRdState($stateArg(socketinfo)) ne "Rready") + } { + lappend InFlightR $socketRdState($stateArg(socketinfo)) + } elseif {($doing eq "read")} { + lappend InFlightR $tokenArg + } else { + } + + if { [info exists socketWrState($stateArg(socketinfo))] + && $socketWrState($stateArg(socketinfo)) ni {Wready peNding} + } { + lappend InFlightW $socketWrState($stateArg(socketinfo)) + } elseif {($doing eq "write")} { + lappend InFlightW $tokenArg + } else { + } + + # Report any inconsistency of $tokenArg with socket*state. + if { ($doing eq "read") + && [info exists socketRdState($stateArg(socketinfo))] + && ($tokenArg ne $socketRdState($stateArg(socketinfo))) + } { + Log WARNING - ReplayIfDead pipelined tokenArg $tokenArg $doing \ + ne socketRdState($stateArg(socketinfo)) \ + $socketRdState($stateArg(socketinfo)) + + } elseif { + ($doing eq "write") + && [info exists socketWrState($stateArg(socketinfo))] + && ($tokenArg ne $socketWrState($stateArg(socketinfo))) + } { + Log WARNING - ReplayIfDead pipelined tokenArg $tokenArg $doing \ + ne socketWrState($stateArg(socketinfo)) \ + $socketWrState($stateArg(socketinfo)) + } else { + } + } else { + # One transaction should be in flight. + # socketRdState, socketWrQueue are used. + # socketRdQueue should be empty. + + # Report any inconsistency of $tokenArg with socket*state. + if {$tokenArg ne $socketRdState($stateArg(socketinfo))} { + Log WARNING - ReplayIfDead nonpipeline tokenArg $tokenArg $doing \ + ne socketRdState($stateArg(socketinfo)) \ + $socketRdState($stateArg(socketinfo)) + } else { + } + + # Report the inconsistency that socketRdQueue is non-empty. + if { [info exists socketRdQueue($stateArg(socketinfo))] + && ($socketRdQueue($stateArg(socketinfo)) ne {}) + } { + Log WARNING - ReplayIfDead nonpipeline tokenArg $tokenArg $doing \ + has read queue socketRdQueue($stateArg(socketinfo)) \ + $socketRdQueue($stateArg(socketinfo)) ne {} + } else { + } + + lappend InFlightW $socketRdState($stateArg(socketinfo)) + set socketRdQueue($stateArg(socketinfo)) {} + } + + set newQueue {} + lappend newQueue {*}$InFlightR + lappend newQueue {*}$socketRdQueue($stateArg(socketinfo)) + lappend newQueue {*}$InFlightW + lappend newQueue {*}$socketWrQueue($stateArg(socketinfo)) + + + # 2. Tidy up tokenArg. This is a cut-down form of Finish/CloseSocket. + # CloseSocket cancels file events, closes the socket, and unsets the + # socketMapping. + # Finish calls CloseSocket, if called as below. + # Don't want Eot. + # Do not change state(status). + # Want to not unset socketWrState(*). + + if {[info exists stateArg(after)]} { + after cancel $stateArg(after) + } + catch {close $stateArg(sock)} + + # The relevant element of socketMapping, socketRdState, socketWrState, + # socketRdQueue, socketWrQueue, socketClosing, socketPlayCmd will be set to + # new values in ReplayCore. + # The trace on "unset socketRdState(*)" cancels any pipelined responses. + # It also clears socketRdQueue(*). + # Transactions, if any, that are awaiting responses cannot be completed. + # They are listed for re-sending in newQueue. + # There is no need to unset socketWrState - the write queue transactions + # have not yet been sent, nor the state(-timeout) events. + # All tokens are preserved for re-use by ReplayCore. + + unset socketRdState($stateArg(socketinfo)) + + ReplayCore $newQueue + return +} + +# http::ReplayIfClose -- +# +# A request on a socket that was previously "Connection: keep-alive" has +# received a "Connection: close" response header. The server supplies +# that response correctly, but any later requests already queued on this +# connection will be lost when the socket closes. +# +# This command takes arguments that represent the socketWrState, +# socketRdQueue and socketWrQueue for this connection. The socketRdState +# is not needed because the server responds in full to the request that +# received the "Connection: close" response header. +# +# Existing request tokens $token (::http::$n) are preserved. The caller +# will be unaware that the request was processed this way. + +proc http::ReplayIfClose {Wstate Rqueue Wqueue} { + Log running http::ReplayIfClose for $Wstate $Rqueue $Wqueue + + if {$Wstate in $Rqueue || $Wstate in $Wqueue} { + Log WARNING duplicate token in http::ReplayIfClose - token $Wstate + set Wstate Wready + } + + # 1. Create newQueue + set InFlightW {} + if {$Wstate ni {Wready peNding}} { + lappend InFlightW $Wstate + } + + set newQueue {} + lappend newQueue {*}$Rqueue + lappend newQueue {*}$InFlightW + lappend newQueue {*}$Wqueue + + # 2. Cleanup - none needed, done by the caller. + + ReplayCore $newQueue + return +} + +# http::ReplayCore -- +# +# Command to replay a list of requests, using existing connection tokens. +# +# Abstracted from http::geturl which stores extra state in state(tmp*) so +# we don't need to do the argument processing again. +# +# Arguments: +# newQueue List of connection tokens. +# +# Side Effects: +# Use existing tokens, but try to open a new socket. + +proc http::ReplayCore {newQueue} { + variable socketMapping + variable socketRdState + variable socketWrState + variable socketRdQueue + variable socketWrQueue + variable socketClosing + variable socketPlayCmd + + if {[llength $newQueue] == 0} { + # Nothing to do. + return + } + + ##Log running ReplayCore for {*}$newQueue + set newToken [lindex $newQueue 0] + set newQueue [lrange $newQueue 1 end] + + # 3. Use newToken, and restore its values of state(*). Do not restore + # elements tmp* - we try again only once. + + set token $newToken + variable $token + upvar 0 $token state + + if {!( + [info exists state(tmpState)] + && [info exists state(tmpOpenCmd)] + && [info exists state(tmpConnArgs)] + ) + } { + Log FAILED in http::ReplayCore - NO tmp vars + Finish $token error 1 + return + } + + # Don't alter state(status) - this would trigger http::wait if it is in use. + set tmpState $state(tmpState) + set tmpOpenCmd $state(tmpOpenCmd) + set tmpConnArgs $state(tmpConnArgs) + foreach name [array names state] { + if {$name ne "status"} { + unset state($name) + } + } + + # Don't alter state(status). + dict unset tmpState status + array set state $tmpState + set state(reusing) 0 + + if {$state(-timeout) > 0} { + set resetCmd [list http::reset $token timeout] + set state(after) [after $state(-timeout) $resetCmd] + } + + # 4. Open a socket. + if {[catch {eval $tmpOpenCmd} sock]} { + # Something went wrong while trying to establish the connection. + Log FAILED - $tmpOpenCmd + set state(sock) $sock + Finish $token error 1 + return + } + + # 5. Configure the persistent socket data. + if {$state(-keepalive)} { + set socketMapping($state(socketinfo)) $sock + if {$state(-pipeline)} { + #Log new, init for pipelined, GRANT write acc to $token ReplayCore + set socketRdState($state(socketinfo)) $token + set socketWrState($state(socketinfo)) $token + } else { + #Log new, init for nonpipeline, GRANT r/w acc to $token ReplayCore + set socketRdState($state(socketinfo)) $token + set socketWrState($state(socketinfo)) $token + } + + if {![info exists socketRdQueue($state(socketinfo))]} { + set socketRdQueue($state(socketinfo)) {} + set varName ::http::socketRdState($state(socketinfo)) + trace add variable $varName unset ::http::CancelReadPipeline + } + set socketRdQueue($state(socketinfo)) {} + + if {![info exists socketWrQueue($state(socketinfo))]} { + set socketWrQueue($state(socketinfo)) {} + set varName ::http::socketWrState($state(socketinfo)) + trace add variable $varName unset ::http::CancelWritePipeline + } + set socketWrQueue($state(socketinfo)) $newQueue + set socketClosing($state(socketinfo)) 0 + set socketPlayCmd($state(socketinfo)) {} + } + + # 6. Configure sockets in the queue. + foreach tok $newQueue { + set ${tok}(sock) $sock + } + + # 7. Configure the socket for newToken to send a request. + set state(sock) $sock + Log "Using $sock for $state(socketinfo) - token $token" \ + [expr {$state(-keepalive)?"keepalive":""}] + + # Initialisation of a new socket. + fconfigure $sock -translation {auto crlf} -buffersize $state(-blocksize) + + # Connect does its own fconfigure. + fileevent $sock writable [list http::Connect $token {*}$tmpConnArgs] + #Log ---- $sock << conn to $token for HTTP request (e) + return +} + # Data access functions: # Data - the URL data # Status - the transaction status: ok, reset, eof, timeout, error @@ -1009,8 +2131,22 @@ proc http::Connect {token proto phost srvurl} { [eof $state(sock)] || [set err [fconfigure $state(sock) -error]] ne "" } { + if {[info exists state(reusing)] && $state(reusing)} { + # The socket was closed at the server end, and closed at + # this end by http::CheckEof. + if {[TestForReplay $token write $err b]} { + return + } + + # else: + # This is NOT a persistent socket that has been closed since its + # last use. + # If any other requests are in flight or pipelined/queued, they will + # be discarded. + } Finish $token "connect failed $err" } else { + set state(state) connecting fileevent $state(sock) writable {} ::http::Connected $token $proto $phost $srvurl } @@ -1050,7 +2186,21 @@ proc http::Write {token} { if {[info exists state(-query)]} { # Chop up large query strings so queryprogress callback can give # smooth feedback. - + if { $state(queryoffset) + $state(-queryblocksize) + >= $state(querylength) + } { + # This will be the last puts for the request-body. + if { (![catch {fileevent $sock readable} binding]) + && ($binding eq [list http::CheckEof $sock]) + } { + # Remove the "fileevent readable" binding of an idle + # persistent socket to http::CheckEof. We can no longer + # treat bytes received as junk. The server might still time + # out and half-close the socket if it has not yet received + # the first "puts". + fileevent $sock readable {} + } + } puts -nonewline $sock \ [string range $state(-query) $state(queryoffset) \ [expr {$state(queryoffset) + $state(-queryblocksize) - 1}]] @@ -1063,6 +2213,19 @@ proc http::Write {token} { # Copy blocks from the query channel set outStr [read $state(-querychannel) $state(-queryblocksize)] + if {[eof $state(-querychannel)]} { + # This will be the last puts for the request-body. + if { (![catch {fileevent $sock readable} binding]) + && ($binding eq [list http::CheckEof $sock]) + } { + # Remove the "fileevent readable" binding of an idle + # persistent socket to http::CheckEof. We can no longer + # treat bytes received as junk. The server might still time + # out and half-close the socket if it has not yet received + # the first "puts". + fileevent $sock readable {} + } + } puts -nonewline $sock $outStr incr state(queryoffset) [string length $outStr] if {[eof $state(-querychannel)]} { @@ -1076,10 +2239,14 @@ proc http::Write {token} { set state(posterror) $err set done 1 } + if {$done} { catch {flush $sock} fileevent $sock writable {} - fileevent $sock readable [list http::Event $sock $token] + Log ^C$tk end sending request - token $token + # End of writing (POST method). The request has been sent. + + DoneRequest $token } # Callback to the client after we've completely handled everything. @@ -1126,29 +2293,74 @@ proc http::Event {sock token} { - token $token" } } + Log ^X$tk end of response (token error) - token $token CloseSocket $sock return } if {$state(state) eq "connecting"} { ##Log - connecting - token $token + if { $state(reusing) + && $state(-pipeline) + && ($state(-timeout) > 0) + && (![info exists state(after)]) + } { + set state(after) [after $state(-timeout) \ + [list http::reset $token timeout]] + } + if {[catch {gets $sock state(http)} nsl]} { - Finish $token $nsl - return + if {[info exists state(reusing)] && $state(reusing)} { + # The socket was closed at the server end, and closed at + # this end by http::CheckEof. + + if {[TestForReplay $token read $nsl c]} { + return + } + + # else: + # This is NOT a persistent socket that has been closed since its + # last use. + # If any other requests are in flight or pipelined/queued, they + # will be discarded. + } else { + Log ^X$tk end of response (error) - token $token + Finish $token $nsl + return + } } elseif {$nsl >= 0} { ##Log - connecting 1 - token $token set state(state) "header" + } elseif { [eof $sock] + && [info exists state(reusing)] + && $state(reusing) + } { + # The socket was closed at the server end, and we didn't notice. + # This is the first read - where the closure is usually first + # detected. + + if {[TestForReplay $token read {} d]} { + return + } + + # else: + # This is NOT a persistent socket that has been closed since its + # last use. + # If any other requests are in flight or pipelined/queued, they will + # be discarded. } else { ##Log - connecting 2 - token $token - # nsl is -1 so either fblocked (OK) or eof. + # nsl is -1 so either fblocked (OK) or (eof and not reusing). # Continue. Any eof is processed at the end of this proc. } } elseif {$state(state) eq "header"} { if {[catch {gets $sock line} nhl]} { ##Log header failed - token $token + Log ^X$tk end of response (error) - token $token Finish $token $nhl return } elseif {$nhl == 0} { ##Log header done - token $token + Log ^E$tk end of response headers - token $token # We have now read all headers # We ignore HTTP/1.1 100 Continue returns. RFC2616 sec 8.2.3 if { ($state(http) == "") @@ -1158,10 +2370,89 @@ proc http::Event {sock token} { return } + if { ([info exists state(connection)]) + && ([info exists socketMapping($state(socketinfo))]) + && ($state(connection) eq "keep-alive") + && ($state(-keepalive)) + && (!$state(reusing)) + && ($state(-pipeline)) + } { + # Response headers received for first request on a persistent + # socket. Now ready for pipelined writes (if any). + # Previous value is $token. It cannot be pending. + set socketWrState($state(socketinfo)) Wready + http::NextPipelinedWrite $token + } + + # Once a "close" has been signaled, the client MUST NOT send any + # more requests on that connection. + # + # If either the client or the server sends the "close" token in the + # Connection header, that request becomes the last one for the + # connection. + + if { ([info exists state(connection)]) + && ([info exists socketMapping($state(socketinfo))]) + && ($state(connection) eq "close") + && ($state(-keepalive)) + } { + # The server warns that it will close the socket after this + # response. + ##Log WARNING - socket will close after response for $token + # Prepare data for a call to ReplayIfClose. + if { ($socketRdQueue($state(socketinfo)) ne {}) + || ($socketWrQueue($state(socketinfo)) ne {}) + || ($socketWrState($state(socketinfo)) ni + [list Wready peNding $token]) + } { + set InFlightW $socketWrState($state(socketinfo)) + if {$InFlightW in [list Wready peNding $token]} { + set InFlightW Wready + } else { + set msg "token ${InFlightW} is InFlightW" + ##Log $msg - token $token + } + + set socketPlayCmd($state(socketinfo)) \ + [list ReplayIfClose $InFlightW \ + $socketRdQueue($state(socketinfo)) \ + $socketWrQueue($state(socketinfo))] + + # See discussion below. + foreach tokenElement $socketRdQueue($state(socketinfo)) { + if {[info exists ${tokenElement}(after)]} { + after cancel [set ${tokenElement}(after)] + } + } + + # - Clear the queues. By doing this here, the code for + # connecting the next token to the socket needs no + # modification. + # - Do not unset socketRdState and socketWrState and trigger + # their traces, because this will close the socket, which + # is still needed for the current read. + # - The only other thing that the traces would have done is + # cancel the state(after) timeout events. This is now + # done above. + # - All tokens are preserved for re-use by ReplayCore. + + set socketRdQueue($state(socketinfo)) {} + set socketWrQueue($state(socketinfo)) {} + + } else { + set socketPlayCmd($state(socketinfo)) \ + {ReplayIfClose Wready {} {}} + } + + # Do not allow further connections on this socket. + set socketClosing($state(socketinfo)) 1 + } + set state(state) body # If doing a HEAD, then we won't get any body if {$state(-validate)} { + Log ^F$tk end of response for HEAD request - token $token set state(state) complete Eot $token return @@ -1190,6 +2481,8 @@ proc http::Event {sock token} { } { set msg {body size is 0 and no events likely - complete} Log "$msg - token $token" + set msg {(length unknown, set to 0)} + Log ^F$tk end of response body {*}$msg - token $token set state(state) complete Eot $token return @@ -1272,6 +2565,7 @@ proc http::Event {sock token} { # Do not tolerate bad -handler - fail with error status. set msg {the -handler command for http::geturl must\ return an integer (the number of bytes read)} + Log ^X$tk end of response (handler error) - token $token Eot $token $msg } else { # Tolerate the bad -handler, and continue. The penalty: @@ -1303,6 +2597,7 @@ proc http::Event {sock token} { append state(transfer_final) $line set n 0 } else { + Log ^F$tk end of response body (chunked) - token $token Log "final chunk part - token $token" Eot $token } @@ -1333,6 +2628,8 @@ proc http::Event {sock token} { token $token" set n 0 set state(connection) close + Log ^X$tk end of response (chunk error) \ + - token $token set msg {error in chunked encoding - fetch\ terminated} Eot $token $msg @@ -1348,6 +2645,7 @@ proc http::Event {sock token} { ##Log bad-chunk-measure - token $token set n 0 set state(connection) close + Log ^X$tk end of response (chunk error) - token $token Eot $token {error in chunked encoding - fetch terminated} } } else { @@ -1393,11 +2691,13 @@ proc http::Event {sock token} { ($state(totalsize) > 0) && ($state(currentsize) >= $state(totalsize)) } { + Log ^F$tk end of response body (unchunked) - token $token set state(state) complete Eot $token } } } err]} { + Log ^X$tk end of response (error ${err}) - token $token Finish $token $err return } else { @@ -1419,19 +2719,77 @@ proc http::Event {sock token} { # can be completed by eof. # The value "complete" is set only in http::Event, and it is # used only in the test above. + Log ^F$tk end of response body (unchunked, eof) - token $token Eot $token } else { # Premature eof. + Log ^X$tk end of response (unexpected eof) - token $token Eot $token eof } } else { # open connection closed on a token that has been cleaned up. + Log ^X$tk end of response (token error) - token $token CloseSocket $sock } } return } +# http::TestForReplay +# +# Command called if eof is discovered when a socket is first used for a +# new transaction. Typically this occurs if a persistent socket is used +# after a period of idleness and the server has half-closed the socket. +# +# token - the connection token returned by http::geturl +# doing - "read" or "write" +# err - error message, if any +# caller - code to identify the caller - used only in logging +# +# Return Value: boolean, true iff the command calls http::ReplayIfDead. + +proc http::TestForReplay {token doing err caller} { + variable http + variable $token + upvar 0 $token state + set tk [namespace tail $token] + if {$doing eq "read"} { + set code Q + set action response + set ing reading + } else { + set code P + set action request + set ing writing + } + + if {$err eq {}} { + set err "detect eof when $ing (server timed out?)" + } + + if {$state(method) eq "POST" && !$http(-repost)} { + # No Replay. + # The present transaction will end when Finish is called. + # That call to Finish will abort any other transactions + # currently in the write queue. + # For calls from http::Event this occurs when execution + # reaches the code block at the end of that proc. + set msg {no retry for POST with http::config -repost 0} + Log reusing socket failed "($caller)" - $msg - token $token + Log error - $err - token $token + Log ^X$tk end of $action (error) - token $token + return 0 + } else { + # Replay. + set msg {try a new socket} + Log reusing socket failed "($caller)" - $msg - token $token + Log error - $err - token $token + Log ^$code$tk Any unfinished (incl this one) failed - token $token + ReplayIfDead $token $doing + return 1 + } +} + # http::IsBinaryContentType -- # # Determine if the content-type means that we should definitely transfer @@ -1475,6 +2833,8 @@ proc http::IsBinaryContentType {type} { # Results: # The line of text, without trailing newline +# FIXME get rid of blocking + proc http::getTextLine {sock} { set tr [fconfigure $sock -translation] lassign $tr trRead trWrite @@ -1662,7 +3022,7 @@ proc http::Eot {token {reason {}}} { # token Connection token. # # Results: -# The status after the wait. +# The status after the wait. proc http::wait {token} { variable $token diff --git a/tests/httpPipeline.test b/tests/httpPipeline.test new file mode 100644 index 0000000..017661d --- /dev/null +++ b/tests/httpPipeline.test @@ -0,0 +1,859 @@ +# httpPipeline.test +# +# Test HTTP/1.1 concurrent requests including +# queueing, pipelining and retries. +# +# Copyright (C) 2018 Keith Nash +# +# See the file "license.terms" for information on usage and redistribution +# of this file, and for a DISCLAIMER OF ALL WARRANTIES. + +package require tcltest 2 +namespace import -force ::tcltest::* + +package require http 2.8 + +set sourcedir [file normalize [file dirname [info script]]] +source [file join $sourcedir httpTest.tcl] +source [file join $sourcedir httpTestScript.tcl] + +# ------------------------------------------------------------------------------ +# (1) Define the test scripts that will be used to generate logs for analysis - +# and also define the "correct" results. +# ------------------------------------------------------------------------------ + +proc ReturnTestScriptAndResult {ca cb delay te} { + + switch -- $ca { + 1 {set start { + START + KEEPALIVE 0 + PIPELINE 0 + }} + + 2 {set start { + START + KEEPALIVE 0 + PIPELINE 1 + }} + + 3 {set start { + START + KEEPALIVE 1 + PIPELINE 0 + }} + + 4 {set start { + START + KEEPALIVE 1 + PIPELINE 1 + }} + + default { + return -code error {no matching script} + } + } + + set middle " + [list DELAY $delay] + " + + switch -- $cb { + 1 {set end { + GET a + GET b + GET c + GET a + STOP + } + set resShort {1 ? ? ?} + set resLong {1 2 3 4} + } + + 2 {set end { + GET a + HEAD b + GET c + HEAD a + HEAD c + STOP + } + set resShort {1 ? ? ? ?} + set resLong {1 2 3 4 5} + } + + 3 {set end { + HEAD a + GET b + HEAD c + HEAD b + GET a + GET b + STOP + } + set resShort {1 ? ? ? ? ?} + set resLong {1 2 3 4 5 6} + } + + 4 {set end { + GET a + GET b + GET c + GET a + POST b address=home code=brief paid=yes + GET c + GET a + GET b + GET c + STOP + } + set resShort {1 ? ? ? 5 ? ? ? ?} + set resLong {1 2 3 4 5 6 7 8 9} + } + + 5 {set end { + POST a address=home code=brief paid=yes + POST b address=home code=brief paid=yes + POST c address=home code=brief paid=yes + POST a address=home code=brief paid=yes + POST b address=home code=brief paid=yes + POST c address=home code=brief paid=yes + POST a address=home code=brief paid=yes + POST b address=home code=brief paid=yes + POST c address=home code=brief paid=yes + STOP + } + set resShort {1 2 3 4 5 6 7 8 9} + set resLong {1 2 3 4 5 6 7 8 9} + } + + 6 {set end { + POST a address=home code=brief paid=yes + GET b address=home code=brief paid=yes + POST c address=home code=brief paid=yes + GET a address=home code=brief paid=yes + GET b address=home code=brief paid=yes + POST c address=home code=brief paid=yes + POST a address=home code=brief paid=yes + HEAD b address=home code=brief paid=yes + GET c address=home code=brief paid=yes + STOP + } + set resShort {1 ? 3 ? ? 6 7 ? ?} + set resLong {1 2 3 4 5 6 7 8 9} + } + + 7 {set end { + GET b address=home code=brief paid=yes + POST a address=home code=brief paid=yes + GET a address=home code=brief paid=yes + POST c address=home code=brief paid=yes + GET b address=home code=brief paid=yes + HEAD b address=home code=brief paid=yes + POST c address=home code=brief paid=yes + POST a address=home code=brief paid=yes + GET c address=home code=brief paid=yes + STOP + } + set resShort {1 2 ? 4 ? ? 7 8 ?} + set resLong {1 2 3 4 5 6 7 8 9} + } + + 8 {set end { + # Telling the server to close the connection. + GET a + GET b close=y + GET c + GET a + GET b + GET c + GET a + GET b + GET c + STOP + } + set resShort {1 ? 3 ? ? ? ? ? ?} + set resLong {1 2 3 4 5 6 7 8 9} + } + + 9 {set end { + # Telling the server to close the connection. + GET a + POST b close=y address=home code=brief paid=yes + GET c + GET a + GET b + GET c + GET a + GET b + GET c + STOP + } + set resShort {1 2 3 ? ? ? ? ? ?} + set resLong {1 2 3 4 5 6 7 8 9} + } + + 10 {set end { + # Telling the server to close the connection. + GET a + GET b close=y + POST c address=home code=brief paid=yes + GET a + GET b + GET c + GET a + GET b + GET c + STOP + } + set resShort {1 ? 3 ? ? ? ? ? ?} + set resLong {1 2 3 4 5 6 7 8 9} + } + + 11 {set end { + # Telling the server to close the connection twice. + GET a + GET b close=y + GET c + GET a + GET b close=y + GET c + GET a + GET b + GET c + STOP + } + set resShort {1 ? 3 ? ? 6 ? ? ?} + set resLong {1 2 3 4 5 6 7 8 9} + } + + 12 {set end { + # Telling the server to delay before sending the response. + GET a + GET b delay=1 + GET c + GET a + GET b + STOP + } + set resShort {1 ? ? ? ?} + set resLong {1 2 3 4 5} + } + + 13 {set end { + # Making the server close the connection (time out). + GET a + WAIT 2000 + GET b + GET c + GET a + GET b + STOP + } + set resShort {1 2 ? ? ?} + set resLong {1 2 3 4 5} + } + + 14 {set end { + # Making the server close the connection (time out) twice. + GET a + WAIT 2000 + GET b + GET c + GET a + WAIT 2000 + GET b + GET c + GET a + GET b + GET c + STOP + } + set resShort {1 2 ? ? 5 ? ? ? ?} + set resLong {1 2 3 4 5 6 7 8 9} + } + + 15 {set end { + POST a address=home code=brief paid=yes + POST b address=home code=brief paid=yes close=y delay=1 + POST c address=home code=brief paid=yes delay=1 + POST a address=home code=brief paid=yes close=y + WAIT 2000 + POST b address=home code=brief paid=yes delay=1 + POST c address=home code=brief paid=yes close=y + POST a address=home code=brief paid=yes + POST b address=home code=brief paid=yes close=y + POST c address=home code=brief paid=yes + STOP + } + set resShort {1 2 3 4 5 6 7 8 9} + set resLong {1 2 3 4 5 6 7 8 9} + } + + 16 {set end { + POST a address=home code=brief paid=yes + GET b address=home code=brief paid=yes + POST c address=home code=brief paid=yes close=y + GET a address=home code=brief paid=yes + GET b address=home code=brief paid=yes close=y + POST c address=home code=brief paid=yes + WAIT 2000 + POST a address=home code=brief paid=yes + HEAD b address=home code=brief paid=yes close=y + GET c address=home code=brief paid=yes + STOP + } + set resShort {1 ? 3 4 ? 6 7 ? 9} + set resLong {1 2 3 4 5 6 7 8 9} + } + + 17 {set end { + GET b address=home code=brief paid=yes + POST a address=home code=brief paid=yes + GET a address=home code=brief paid=yes + POST c address=home code=brief paid=yes close=y + GET b address=home code=brief paid=yes + HEAD b address=home code=brief paid=yes close=y + POST c address=home code=brief paid=yes + WAIT 2000 + POST a address=home code=brief paid=yes + WAIT 2000 + GET c address=home code=brief paid=yes + STOP + } + set resShort {1 2 3 4 5 ? 7 8 9} + set resLong {1 2 3 4 5 6 7 8 9} + } + + + 18 {set end { + REPOST 0 + GET a + WAIT 2000 + POST b address=home code=brief paid=yes + GET c + GET a + STOP + } + set resShort {1 2 ? ?} + set resLong {1 2 3 4} + # resShort is overwritten below for the case ($te == 1). + } + + + 19 {set end { + REPOST 0 + GET a + WAIT 2000 + GET b address=home code=brief paid=yes + GET c + GET a + STOP + } + set resShort {1 2 ? ?} + set resLong {1 2 3 4} + } + + + 20 {set end { + POSTFRESH 1 + GET a + WAIT 2000 + POST b address=home code=brief paid=yes + GET c + GET a + STOP + } + set resShort {1 3 ?} + set resLong {1 3 4} + } + + + 21 {set end { + POSTFRESH 1 + GET a + WAIT 2000 + GET b address=home code=brief paid=yes + GET c + GET a + STOP + } + set resShort {1 2 ? ?} + set resLong {1 2 3 4} + } + + 22 {set end { + GET a + WAIT 2000 + KEEPALIVE 0 + POST b address=home code=brief paid=yes + KEEPALIVE 1 + GET c + GET a + STOP + } + set resShort {1 3 ?} + set resLong {1 3 4} + } + + + 23 {set end { + GET a + WAIT 2000 + KEEPALIVE 0 + GET b address=home code=brief paid=yes + KEEPALIVE 1 + GET c + GET a + STOP + } + set resShort {1 3 ?} + set resLong {1 3 4} + } + + 24 {set end { + GET a + KEEPALIVE 0 + POST b address=home code=brief paid=yes + KEEPALIVE 1 + GET c + GET a + STOP + } + set resShort {1 ? ?} + set resLong {1 3 4} + } + + + 25 {set end { + GET a + KEEPALIVE 0 + GET b address=home code=brief paid=yes + KEEPALIVE 1 + GET c + GET a + STOP + } + set resShort {1 ? ?} + set resLong {1 3 4} + } + + default { + return -code error {no matching script} + } + } + + + if {$ca < 3} { + # Not Keep-Alive. + set result "Passed all sanity checks." + + } elseif {$ca == 3} { + # Keep-Alive, not pipelined. + set result {} + append result "Passed all sanity checks.\n" + append result "Have overlaps including response body:\n" + + } else { + # Keep-Alive, pipelined: ($ca == 4) + set result {} + append result "Passed all sanity checks.\n" + append result "Overlap-free without response body:\n" + append result "$resShort" + } + + # - The special case of test *.18*-testEof needs test results to be + # individually written. + # - These test -repost 0 when there is a POST to apply it to, and the server + # timeout has not been detected. + if {($cb == 18) && ($te == 1)} { + if {$ca < 3} { + # Not Keep-Alive. + set result "Passed all sanity checks." + + } elseif {$ca == 3 && $delay == 0} { + # Keep-Alive, not pipelined. + set result [MakeMessage { + |Problems with sanity checks: + |Wrong sequence for token ::http::2 - {A B C D X X X} + |- and error(s) X + |Wrong sequence for token ::http::3 - {A X X} + |- and error(s) X + |Wrong sequence for token ::http::4 - {A X X X} + |- and error(s) X + | + |Have overlaps including response body: + | + }] + + } elseif {$ca == 3} { + # Keep-Alive, not pipelined. + set result [MakeMessage { + |Problems with sanity checks: + |Wrong sequence for token ::http::2 - {A B C D X X X} + |- and error(s) X + | + |Have overlaps including response body: + | + }] + + } elseif {$delay == 0} { + # Keep-Alive, pipelined: ($ca == 4) + set result [MakeMessage { + |Problems with sanity checks: + |Wrong sequence for token ::http::2 - {A B C D X X X} + |- and error(s) X + |Wrong sequence for token ::http::3 - {A X X} + |- and error(s) X + |Wrong sequence for token ::http::4 - {A X X X} + |- and error(s) X + | + |Overlap-free without response body: + | + }] + + } else { + set result [MakeMessage { + |Problems with sanity checks: + |Wrong sequence for token ::http::2 - {A B C D X X X} + |- and error(s) X + | + |Overlap-free without response body: + | + }] + + } + } + + return [list "$start$middle$end" $result] +} + +# ------------------------------------------------------------------------------ +# Proc MakeMessage +# ------------------------------------------------------------------------------ +# WHD's one-line command to generate multi-line strings from readable code. +# +# Example: +# set blurb [MakeMessage { +# |This command allows multi-line strings to be created with readable +# |code, and without breaking the rules for indentation. +# | +# |The command shifts the entire block of text to the left, omitting +# |the pipe character and the spaces to its left. +# }] +# ------------------------------------------------------------------------------ + +proc MakeMessage {in} { + regsub -all -line {^\s*\|} [string trim $in] {} + # N.B. Implicit Return. +} + + +proc ReturnTestScript {ca cb delay te} { + lassign [ReturnTestScriptAndResult $ca $cb $delay $te] script result + return $script +} + +proc ReturnTestResult {ca cb delay te} { + lassign [ReturnTestScriptAndResult $ca $cb $delay $te] script result + return $result +} + + +# ------------------------------------------------------------------------------ +# (2) Command to run a test script and use httpTest to analyse the logs. +# ------------------------------------------------------------------------------ + +namespace import httpTestScript::runHttpTestScript +namespace import httpTestScript::cleanupHttpTestScript + +proc RunTest {header footer delay te} { + set num [runHttpTestScript [ReturnTestScript $header $footer $delay $te]] + set skipOverlaps 0 + set notIncluded {} + + # -------------------------------------------------------------------------- + # Custom code for specific tests + # -------------------------------------------------------------------------- + if {$header < 3} { + set skipOverlaps 1 + for {set i 1} {$i <= $num} {incr i} { + lappend notIncluded $i + } + } elseif {$header > 2 && $footer == 18 && $te == 1} { + set skipOverlaps 1 + if {$delay == 0} { + # Transaction 1 is conventional. + # Check that transactions 2,3,4 are cancelled. + set notIncluded {1} + } else { + # Transaction 1 is conventional. + # Check that transaction 2 is cancelled. + # The timing of transactions 3 and 4 is uncertain. + set notIncluded {1 3 4} + } + } elseif {$footer in {20 22 23 24 25}} { + # Transaction 2 uses its own socket. + set notIncluded 2 + } else { + } + # -------------------------------------------------------------------------- + # End of custom code for specific tests + # -------------------------------------------------------------------------- + + + set Results [httpTest::LogAnalyse $num $skipOverlaps $notIncluded $notIncluded] + lassign $Results msg cleanE cleanF dirtyE dirtyF + if {$msg eq {}} { + set msg "Passed all sanity checks." + } else { + set msg "Problems with sanity checks:\n$msg" + } + + if 0 { + puts $msg + puts "Overlap-free including response body:\n$cleanF" + puts "Have overlaps including response body:\n$dirtyF" + puts "Overlap-free without response body:\n$cleanE" + puts "Have overlaps without response body:\n$dirtyE" + } + + if {$header < 3} { + # No ordering, just check that transactions all finish + set result $msg + } elseif {$header == 3} { + # Not pipelined - check overlaps with response body. + set result "$msg\nHave overlaps including response body:\n$dirtyF" + } else { + # Pipelined - check overlaps without response body. Check that the + # first request, the first requests after replay, and POSTs are clean. + set result "$msg\nOverlap-free without response body:\n$cleanE" + } + set ::nTokens $num + return $result +} + + +# ------------------------------------------------------------------------------ +# (3) VERBOSITY CONTROL +# ------------------------------------------------------------------------------ +# If tests fail, run an individual test with -verbose 1 or 2 for diagnosis. +# If still obscure, uncomment #Log and ##Log lines in the http package. +# ------------------------------------------------------------------------------ + +set ::httpTest::testOptions(-verbose) 0 + + +# ------------------------------------------------------------------------------ +# (4) Define the base URLs used for testing. Each must have a query string. +# ------------------------------------------------------------------------------ +# - A HTTP/1.1 server is required. It should be configured to provide +# persistent connections when requested to do so, and to close these +# connections if they are idle for one second. +# - The resource must be served with status 200 in response to a valid GET or +# POST. +# - The value of "page" is always specified in the query-string. Different +# resources for the three values of "page" allow testing of both chunked and +# unchunked transfer encoding. +# - The variables "close" and "delay" may be specified in the query-string (for +# a GET) or the request body (for a POST). +# - "delay" is a numerical value in seconds, and causes the server to delay +# the response, including headers. +# - "close", if it has the value "y", instructs the server to close the +# connection ater the current request. +# - Any other variables should be ignored. +# ------------------------------------------------------------------------------ + +namespace eval ::httpTestScript { + variable URL + array set URL { + a http://test-tcl-http.kerlin.org/index.html?page=privacy + b http://test-tcl-http.kerlin.org/index.html?page=conditions + c http://test-tcl-http.kerlin.org/index.html?page=welcome + } +} + + +# ------------------------------------------------------------------------------ +# (5) Define the tests +# ------------------------------------------------------------------------------ +# Constraints: +# - serverNeeded - the URLs defined at (4) must be available, and must have the +# properties specified there. +# - duplicate - the value of -pipeline does not matter if -keepalive 0 +# - timeout1s - tests that work correctly only if the server closes +# persistent connections after one second. +# +# Server timeout of persistent connections should be 1s. Delays of 2s are +# intended to cause timeout. +# Servers are usually configured to use a longer timeout: this will cause the +# tests to fail. The "2000" could be replaced with a larger number, but the +# tests will then be inconveniently slow. +# ------------------------------------------------------------------------------ + +#testConstraint serverNeeded 1 +#testConstraint timeout1s 1 +#testConstraint duplicate 1 + +# ------------------------------------------------------------------------------ +# Proc SetTestEof - to edit the command ::http::KeepSocket +# ------------------------------------------------------------------------------ +# The usual line in command ::http::KeepSocket is " set TEST_EOF 0". +# Whether the value set in the file is 0 or 1, change it here to the value +# specified by the argument. +# +# It is worth doing all tests for both values of the argument. +# +# test 0 - ::http::KeepSocket is unchanged, detects server eof where possible +# and closes the connection. +# test 1 - ::http::KeepSocket is edited, does not detect server eof, so the +# reaction to finding server eof can be tested without the difficulty +# of testing in the few milliseconds of an asynchronous close event. +# ------------------------------------------------------------------------------ + +proc SetTestEof {test} { + set body [info body ::http::KeepSocket] + set subs " set TEST_EOF $test" + set count [regsub -line -all -- {^\s*set TEST_EOF .*$} $body $subs newBody] + if {$count != 1} { + return -code error {proc ::http::KeepSocket has unexpected form} + } + proc ::http::KeepSocket {token} $newBody + return +} + +for {set header 1} {$header <= 4} {incr header} { + if {$header == 4} { + set ::httpTest::testOptions(-dotted) 1 + set match glob + } else { + set ::httpTest::testOptions(-dotted) 0 + set match exact + } + + if {$header == 2} { + set cons0 {serverNeeded duplicate} + } else { + set cons0 serverNeeded + } + + for {set footer 1} {$footer <= 25} {incr footer} { + foreach {delay label} { + 0 a + 1 b + 2 c + 3 d + 5 e + 8 f + 12 g + 100 h + 500 i + 2000 j + } { + foreach te {0 1} { + if {$te} { + set tag testEof + } else { + set tag normal + } + set suffix {} + set cons $cons0 + + # ------------------------------------------------------------------ + # Custom code for individual tests + # ------------------------------------------------------------------ + if {$footer in {18}} { + # Custom code: + if {($label eq "j") && ($te == 1)} { + continue + } + if {$te == 1} { + # The test (of REPOST 0) is useful if tag is "testEof" + # (server timeout without client reaction). The same test + # has a different result if tag is "normal". + + set suffix " - extra test for -repost 0 - ::http::2 must be" + append suffix " cancelled" + if {($delay == 0)} { + append suffix ", along with ::http::3 ::http::4 if" + append suffix " the test creates these before ::http::2" + append suffix " is cancelled" + } + } else { + } + } elseif {$footer in {19}} { + set suffix " - extra test for -repost 0" + } elseif {$footer in {20 21}} { + set suffix " - extra test for -postfresh 1" + if {($footer == 20)} { + append suffix " - ::http::2 uses a separate socket" + append suffix ", other requests use a persistent connection" + } + } elseif {$footer in {22 23 24 25}} { + append suffix " - ::http::2 uses a separate socket" + append suffix ", other requests use a persistent connection" + } else { + } + + if {($footer >= 13 && $footer <= 23)} { + # Test use WAIT and depend on server timeout before this time. + lappend cons timeout1s + } + # ------------------------------------------------------------------ + # End of custom code. + # ------------------------------------------------------------------ + + set name "pipeline test header $header footer $footer delay $delay $tag$suffix" + + + # Here's the test: + test http11-${header}.${footer}${label}-${tag} $name -constraints $cons \ + -setup [string map [list TE $te] { + http::init + set http::http(uid) 0 + # Restore default values for tests: + http::config -pipeline 1 -postfresh 0 -repost 1 + SetTestEof {TE} + }] -body [list RunTest $header $footer $delay $te] -cleanup { + # Restore default values for tests: + http::config -pipeline 1 -postfresh 0 -repost 1 + cleanupHttpTestScript + SetTestEof 0 + set ::httpTest::testResults {} + after 2000 + # Wait for persistent sockets on the server to time out. + } -result [ReturnTestResult $header $footer $delay $te] -match $match + + + } + + } + } +} + +# ------------------------------------------------------------------------------ +# (*) Notes on tests *.18*-testEof, *.19*-testEof - these test -repost 0 +# ------------------------------------------------------------------------------ +# These tests are a bit awkward because the main test kit analyses whether all +# requests are satisfied, with retries if necessary, and it has result analysis +# for processing retry logs. +# - *.18*-testEof tests that certain requests are NOT satisfied, so the analysis +# is a one-off. +# - Tests *.18a-testEof depend on client/server timing - the test needs to call +# http::geturl for all requests before the POST (request 2) is cancelled. +# We test that requests 2, 3, 4 are all cancelled. +# - Other tests *.18*-testEof may not request 3 and 4 in time for the to be +# added to the write queue before request 2 is completed. We simply check that +# request 2 is cancelled. +# - The behaviour is different if all connections are allowed to time out +# (label "j"). This case is not needed to test -repost 0, and is omitted. +# - Tests *.18*-normal and *.19* are conventional (-repost 0 should have no +# effect). +# ------------------------------------------------------------------------------ + + +unset header footer delay label suffix match cons name te +namespace delete ::httpTest +namespace delete ::httpTestScript + +::tcltest::cleanupTests diff --git a/tests/httpTest.tcl b/tests/httpTest.tcl new file mode 100644 index 0000000..ad08048 --- /dev/null +++ b/tests/httpTest.tcl @@ -0,0 +1,431 @@ +# httpTest.tcl +# +# Test HTTP/1.1 concurrent requests including +# queueing, pipelining and retries. +# +# Copyright (C) 2018 Keith Nash +# +# See the file "license.terms" for information on usage and redistribution +# of this file, and for a DISCLAIMER OF ALL WARRANTIES. + +# ------------------------------------------------------------------------------ +# "Package" httpTest for analysis of Log output of http requests. +# ------------------------------------------------------------------------------ +# This is a specialised test kit for examining the presence, ordering, and +# overlap of multiple HTTP transactions over a persistent ("Keep-Alive") +# connection; and also for testing reconnection in accordance with RFC 7230 when +# the connection is lost. +# +# This kit is probably not useful for other purposes. It depends on the +# presence of specific Log commands in the http library, and it interprets the +# logs that these commands create. +# ------------------------------------------------------------------------------ + +package require http + +namespace eval ::http { + variable TestStartTimeInMs [clock milliseconds] +} + +namespace eval ::httpTest { + variable testResults {} + variable testOptions + array set testOptions { + -verbose 0 + -dotted 1 + } + # -verbose - 0 quiet 1 write to stderr 2 write more + # -dotted - (boolean) use dots for absences in lists of transactions +} + +proc httpTest::Puts {txt} { + variable testOptions + if {$testOptions(-verbose) > 0} { + puts stderr $txt + flush stderr + } + return +} + +# http::Log +# +# A special-purpose logger used for running tests. +# - Processes Log calls that have "^" in their arguments, and records them in +# variable ::httpTest::testResults. +# - Also writes them to stderr (using Puts) if ($testOptions(-verbose) > 0). +# - Also writes Log calls that do not have "^", if ($testOptions(-verbose) > 1). + +proc http::Log {args} { + variable TestStartTimeInMs + set time [expr {[clock milliseconds] - $TestStartTimeInMs}] + set txt [list $time {*}$args] + if {[string first ^ $txt] != -1} { + ::httpTest::LogRecord $txt + ::httpTest::Puts $txt + } elseif {$::httpTest::testOptions(-verbose) > 1} { + ::httpTest::Puts $txt + } + return +} + + +# Called by http::Log (the "testing" version) to record logs for later analysis. + +proc httpTest::LogRecord {txt} { + variable testResults + + set pos [string first ^ $txt] + set len [string length $txt] + if {$pos > $len - 3} { + puts stderr "Logging Error: $txt" + puts stderr "Fix this call to Log in http-*.tm so it has ^ then\ + a letter then a numeral." + flush stderr + } elseif {$pos == -1} { + # Called by mistake. + } else { + set letter [string index $txt [incr pos]] + set number [string index $txt [incr pos]] + # Max 9 requests! + lappend testResults [list $letter $number] + } + + return +} + + +# ------------------------------------------------------------------------------ +# Commands for analysing the logs recorded when calling http::geturl. +# ------------------------------------------------------------------------------ + +# httpTest::TestOverlaps -- +# +# The main test for correct behaviour of pipelined and sequential +# (non-pipelined) transactions. Other tests should be run first to detect +# any inconsistencies in the data (e.g. absence of the elements that are +# examined here). +# +# Examine the sequence $someResults for each transaction from 1 to $n, +# ignoring any that are listed in $badTrans. +# Determine whether the elements "B" to $term for one transaction overlap +# elements "B" to $term for the previous and following transactions. +# +# Transactions in the list $badTrans are not included in "clean" or +# "dirty", but their possible overlap with other transactions is noted. +# Transactions in the list $notPiped are a subset of $badTrans, and +# their possible overlap with other transactions is NOT noted. +# +# Arguments: +# someResults - list of results, each of the form {letter numeral} +# n - number of HTTP transactions +# term - letter that indicated end of search range. "E" for testing +# overlaps from start of request to end of response headers. +# "F" to extend to the end of the response body. +# msg - the cumulative message from sanity checks. Append to it only +# to report a test failure. +# badTrans - list of transaction numbers not to be assessed as "clean" or +# "dirty" +# notPiped - subset of badTrans. List of transaction numbers that cannot +# taint another transaction by overlapping with it, because it +# used a different socket. +# +# Return value: [list $msg $clean $dirty] +# msg - warning messages: nothing will be appended to argument $msg if there +# is an error with the test. +# clean - list of transactions that have no overlap with other transactions +# dirty - list of transactions that have YES overlap with other transactions + +proc httpTest::TestOverlaps {someResults n term msg badTrans notPiped} { + variable testOptions + + # Check whether transactions overlap: + set clean {} + set dirty {} + for {set i 1} {$i <= $n} {incr i} { + if {$i in $badTrans} { + continue + } + set myStart [lsearch -exact $someResults [list B $i]] + set myEnd [lsearch -exact $someResults [list $term $i]] + + if {($myStart == -1 || $myEnd == -1)} { + set res "Cannot find positions of transaction $i" + append msg $res \n + Puts $res + } + + set overlaps {} + for {set j $myStart} {$j <= $myEnd} {incr j} { + lassign [lindex $someResults $j] letter number + if {$number != $i && $letter ne "A" && $number ni $notPiped} { + lappend overlaps $number + } + } + + if {[llength $overlaps] == 0} { + set res "Transaction $i has no overlaps" + Puts $res + lappend clean $i + if {$testOptions(-dotted)} { + # N.B. results from different segments are concatenated. + lappend dirty . + } else { + } + } else { + set res "Transaction $i overlaps with [join $overlaps { }]" + Puts $res + lappend dirty $i + if {$testOptions(-dotted)} { + # N.B. results from different segments are concatenated. + lappend clean . + } else { + } + } + } + return [list $msg $clean $dirty] +} + +# httpTest::PipelineNext -- +# +# Test whether prevPair, pair are valid as consecutive elements of a pipelined +# sequence (Start 1), (End 1), (Start 2), (End 2) ... +# Numbers are integers increasing (by 1 if argument "any" is false), and need +# not begin with 1. +# The first element of the sequence has prevPair {} and is always passed as +# valid. +# +# Arguments; +# Start - string that labels the start of a segment +# End - string that labels the end of a segment +# prevPair - previous "pair" (list of string and number) element of a +# sequence, or {} if argument "pair" is the first in the +# sequence. +# pair - current "pair" (list of string and number) element of a +# sequence +# any - (boolean) iff true, accept any increasing sequence of integers. +# If false, integers must increase by 1. +# +# Return value - boolean, true iff the two pairs are valid consecutive elements. + +proc httpTest::PipelineNext {Start End prevPair pair any} { + if {$prevPair eq {}} { + return 1 + } + + lassign $prevPair letter number + lassign $pair newLetter newNumber + if {$letter eq $Start} { + return [expr {($newLetter eq $End) && ($newNumber == $number)}] + } elseif {$any} { + set nxt [list $Start [expr {$number + 1}]] + return [expr {($newLetter eq $Start) && ($newNumber > $number)}] + } else { + set nxt [list $Start [expr {$number + 1}]] + return [expr {($newLetter eq $Start) && ($newNumber == $number + 1)}] + } +} + +# httpTest::TestPipeline -- +# +# Given a sequence of "pair" elements, check that the elements whose string is +# $Start or $End form a valid pipeline. Ignore other elements. +# +# Return value: {} if valid pipeline, otherwise a non-empty error message. + +proc httpTest::TestPipeline {someResults n Start End msg desc badTrans} { + set sequence {} + set prevPair {} + set ok 1 + set any [llength $badTrans] + foreach pair $someResults { + lassign $pair letter number + if {($letter in [list $Start $End]) && ($number ni $badTrans)} { + lappend sequence $pair + if {![PipelineNext $Start $End $prevPair $pair $any]} { + set ok 0 + break + } + set prevPair $pair + } + } + + if {!$ok} { + set res "$desc are not pipelined: {$sequence}" + append msg $res \n + Puts $res + } + return $msg +} + +# httpTest::TestSequence -- +# +# Examine each transaction from 1 to $n, ignoring any that are listed +# in $badTrans. +# Check that each transaction has elements A to F, in alphabetical order. + +proc httpTest::TestSequence {someResults n msg badTrans} { + variable testOptions + + for {set i 1} {$i <= $n} {incr i} { + if {$i in $badTrans} { + continue + } + set sequence {} + foreach pair $someResults { + lassign $pair letter number + if {$number == $i} { + lappend sequence $letter + } + } + if {$sequence eq {A B C D E F}} { + } else { + set res "Wrong sequence for token ::http::$i - {$sequence}" + append msg $res \n + Puts $res + if {"X" in $sequence} { + set res "- and error(s) X" + append msg $res \n + Puts $res + } + if {"Y" in $sequence} { + set res "- and warnings(s) Y" + append msg $res \n + Puts $res + } + } + } + return $msg +} + +proc httpTest::MostAnalysis {someResults n msg skipOverlaps badTrans notPiped} { + variable testOptions + + # Check that stages for "good" transactions are all present and correct: + set msg [TestSequence $someResults $n $msg $badTrans] + + # Check that requests are pipelined: + set msg [TestPipeline $someResults $n B C $msg Requests $notPiped] + + # Check that responses are pipelined: + set msg [TestPipeline $someResults $n D F $msg Responses $notPiped] + + if {$skipOverlaps} { + set cleanE {} + set dirtyE {} + set cleanF {} + set dirtyF {} + } else { + Puts "Overlaps including response body (test for non-pipelined case)" + lassign [TestOverlaps $someResults $n F $msg $badTrans $notPiped] msg cleanF dirtyF + + Puts "Overlaps without response body (test for pipelined case)" + lassign [TestOverlaps $someResults $n E $msg $badTrans $notPiped] msg cleanE dirtyE + } + + return [list $msg $cleanE $cleanF $dirtyE $dirtyF] +} + +# httpTest::ProcessRetries -- +# +# Command to examine results for socket-changing records [PQR], +# divide the results into segments for each connection, and analyse each segment +# individually. +# (Could add $sock to the logging to simplify this, but never mind.) +# +# In each segment, identify any transactions that are not included, and +# any that are aborted, to assist subsequent testing. +# +# Prepend A records (socket-independent) to each segment for transactions that +# were scheduled (by A) but not completed (by F). Pass each segment to +# MostAnalysis for processing. + +proc httpTest::ProcessRetries {someResults n msg skipOverlaps notIncluded notPiped} { + variable testOptions + + set nextRetry [lsearch -glob -index 0 $someResults {[PQR]}] + if {$nextRetry == -1} { + return [MostAnalysis $someResults $n $msg $skipOverlaps $notIncluded $notPiped] + } + set badTrans $notIncluded + set tryCount 0 + set try $nextRetry + incr tryCount + lassign [lindex $someResults $try] letter number + Puts "Processing retry [lindex $someResults $try]" + set beforeTry [lrange $someResults 0 $try-1] + Puts [join $beforeTry \n] + set afterTry [lrange $someResults $try+1 end] + + set dummyTry {} + for {set i 1} {$i <= $n} {incr i} { + set first [lsearch -exact $beforeTry [list A $i]] + set last [lsearch -exact $beforeTry [list F $i]] + if {$first == -1} { + set res "Transaction $i was not started in connection number $tryCount" + # append msg $res \n + Puts $res + if {$i ni $badTrans} { + lappend badTrans $i + } else { + } + } elseif {$last == -1} { + set res "Transaction $i was started but unfinished in connection number $tryCount" + # append msg $res \n + Puts $res + lappend badTrans $i + lappend dummyTry [list A $i] + } else { + set res "Transaction $i was started and finished in connection number $tryCount" + # append msg $res \n + Puts $res + lappend notIncluded $i + } + } + + # Analyse the part of the results before the first replay: + set HeadResults [MostAnalysis $beforeTry $n $msg $skipOverlaps $badTrans $notPiped] + lassign $HeadResults msg cleanE1 cleanF1 dirtyE1 dirtyF1 + + # Pass the rest of the results to be processed recursively. + set afterTry [concat $dummyTry $afterTry] + set TailResults [ProcessRetries $afterTry $n $msg $skipOverlaps $notIncluded $notPiped] + lassign $TailResults msg cleanE2 cleanF2 dirtyE2 dirtyF2 + + set cleanE [concat $cleanE1 $cleanE2] + set cleanF [concat $cleanF1 $cleanF2] + set dirtyE [concat $dirtyE1 $dirtyE2] + set dirtyF [concat $dirtyF1 $dirtyF2] + return [list $msg $cleanE $cleanF $dirtyE $dirtyF] +} + +proc httpTest::LogAnalyse {n skipOverlaps notIncluded notPiped} { + variable testResults + variable testOptions + + # Check that each data item has the correct form {letter numeral}. + set ii 0 + set ok 1 + foreach pair $testResults { + lassign $pair letter number + if { [string match {[A-Z]} $letter] + && [string match {[0-9]} $number] + } { + # OK + } else { + set ok 0 + set res "Error: testResults has bad element {$pair} at position $ii" + append msg $res \n + Puts $res + } + incr ii + } + + if {!$ok} { + return $msg + } + set msg {} + + Puts [join $testResults \n] + ProcessRetries $testResults $n $msg $skipOverlaps $notIncluded $notPiped + # N.B. Implicit Return. +} diff --git a/tests/httpTestScript.tcl b/tests/httpTestScript.tcl new file mode 100644 index 0000000..a826c81 --- /dev/null +++ b/tests/httpTestScript.tcl @@ -0,0 +1,509 @@ +# httpTestScript.tcl +# +# Test HTTP/1.1 concurrent requests including +# queueing, pipelining and retries. +# +# Copyright (C) 2018 Keith Nash +# +# See the file "license.terms" for information on usage and redistribution +# of this file, and for a DISCLAIMER OF ALL WARRANTIES. + +# ------------------------------------------------------------------------------ +# "Package" httpTestScript for executing test scripts written in a convenient +# shorthand. +# ------------------------------------------------------------------------------ + +# ------------------------------------------------------------------------------ +# Documentation for "package" httpTestScript. +# ------------------------------------------------------------------------------ +# To use the package: +# (a) define URLs as the values of elements in the array ::httpTestScript +# (b) define a script in terms of the commands +# START STOP DELAY KEEPALIVE WAIT PIPELINE GET HEAD POST +# referring to URLs by the name of the corresponding array element. The +# script can include any other Tcl commands, and evaluates in the +# httpTestScript namespace. +# (c) Use the command httpTestScript::runHttpTestScript to evaluate the script. +# (d) For tcltest tests, wrap the runHttpTestScript call in a suitable "test" +# command. +# ------------------------------------------------------------------------------ +# START +# Must be the first command of the script. +# +# STOP +# Must be present in the script to avoid waiting for client timeout. +# Usually the last command, but can be elsewhere to end a script prematurely. +# Subsequent httpTestScript commands will have no effect. +# +# DELAY ms +# If there are no WAIT commands, this sets the delay in ms between subsequent +# calls to http::geturl. Default 500ms. +# +# KEEPALIVE +# Set the value passed to http::geturl for the -keepalive option. The command +# applies to subsequent requests in the script. Default 1. +# +# WAIT ms +# Pause for a time in ms before sending subsequent requests. +# +# PIPELINE boolean +# Set the value of -pipeline using http::config. The last PIPELINE command +# in the script applies to every request. Default 1. +# +# POSTFRESH boolean +# Set the value of -postfresh using http::config. The last POSTFRESH command +# in the script applies to every request. Default 0. +# +# REPOST boolean +# Set the value of -repost using http::config. The last REPOST command +# in the script applies to every request. Default 1 for httpTestScript. +# (Default value in http is 0). +# +# GET uriCode ?arg ...? +# Send a HTTP request using the GET method. +# Arguments: +# uriCode - the code for the base URI - the value must be stored in +# ::httpTestScript::URL($uriCode). +# args - strings that will be joined by "&" and appended to the query +# string with a preceding "&". +# +# HEAD uriCode ?arg ...? +# Send a HTTP request using the HEAD method. +# Arguments: as for GET +# +# POST uriCode ?arg ...? +# Send a HTTP request using the POST method. +# Arguments: +# uriCode - the code for the base URI - the value must be stored in +# ::httpTestScript::URL($uriCode). +# args - strings that will be joined by "&" and used as the request body. +# ------------------------------------------------------------------------------ + +namespace eval ::httpTestScript { + namespace export runHttpTestScript cleanupHttpTestScript +} + +# httpTestScript::START -- +# Initialise, and create a long-stop timeout. + +proc httpTestScript::START {} { + variable CountRequestedSoFar + variable RequestsWhenStopped + variable KeepAlive + variable Delay + variable TimeOutCode + variable TimeOutDone + variable StartDone + variable StopDone + variable CountFinishedSoFar + variable RequestList + variable RequestsMade + variable ExtraTime + variable ActualKeepAlive + + if {[info exists StartDone] && ($StartDone == 1)} { + set msg {START has been called twice without an intervening STOP} + return -code error $msg + } + + set StartDone 1 + set StopDone 0 + set TimeOutDone 0 + set CountFinishedSoFar 0 + set CountRequestedSoFar 0 + set RequestList {} + set RequestsMade {} + set ExtraTime 0 + set ActualKeepAlive 1 + + # Undefined until a STOP command: + unset -nocomplain RequestsWhenStopped + + # Default values: + set KeepAlive 1 + set Delay 500 + + # Default values for tests: + KEEPALIVE 1 + PIPELINE 1 + POSTFRESH 0 + REPOST 1 + + set TimeOutCode [after 30000 httpTestScript::TimeOutNow] +# set TimeOutCode [after 4000 httpTestScript::TimeOutNow] + return +} + +# httpTestScript::STOP -- +# Do not process any more commands. The commands will be executed but will +# silently do nothing. + +proc httpTestScript::STOP {} { + variable CountRequestedSoFar + variable CountFinishedSoFar + variable RequestsWhenStopped + variable TimeOutCode + variable StartDone + variable StopDone + variable RequestsMade + + if {$StopDone} { + # Don't do anything on a second call. + return + } + + if {![info exists StartDone]} { + return -code error {initialise the script by calling command START} + } + + set StopDone 1 + set StartDone 0 + set RequestsWhenStopped $CountRequestedSoFar + unset -nocomplain StartDone + + if {$CountFinishedSoFar == $RequestsWhenStopped} { + if {[info exists TimeOutCode]} { + after cancel $TimeOutCode + } + set ::httpTestScript::FOREVER 0 + } + return +} + +# httpTestScript::DELAY -- +# If there are no WAIT commands, this sets the delay in ms between subsequent +# calls to http::geturl. Default 500ms. + +proc httpTestScript::DELAY {t} { + variable StartDone + variable StopDone + + if {$StopDone} { + return + } + + if {![info exists StartDone]} { + return -code error {initialise the script by calling command START} + } + + variable Delay + + set Delay $t + return +} + +# httpTestScript::KEEPALIVE -- +# Set the value passed to http::geturl for the -keepalive option. Default 1. + +proc httpTestScript::KEEPALIVE {b} { + variable StartDone + variable StopDone + + if {$StopDone} { + return + } + + if {![info exists StartDone]} { + return -code error {initialise the script by calling command START} + } + + variable KeepAlive + set KeepAlive $b + return +} + +# httpTestScript::WAIT -- +# Pause for a time in ms before processing any more commands. + +proc httpTestScript::WAIT {t} { + variable StartDone + variable StopDone + variable ExtraTime + + if {$StopDone} { + return + } + + if {![info exists StartDone]} { + return -code error {initialise the script by calling command START} + } + + if {(![string is integer -strict $t]) || $t < 0} { + return -code error {argument to WAIT must be a non-negative integer} + } + + incr ExtraTime $t + + return +} + +# httpTestScript::PIPELINE -- +# Pass a value to http::config -pipeline. + +proc httpTestScript::PIPELINE {b} { + variable StartDone + variable StopDone + + if {$StopDone} { + return + } + + if {![info exists StartDone]} { + return -code error {initialise the script by calling command START} + } + + ::http::config -pipeline $b + ::http::Log http(-pipeline) is now [::http::config -pipeline] + return +} + +# httpTestScript::POSTFRESH -- +# Pass a value to http::config -postfresh. + +proc httpTestScript::POSTFRESH {b} { + variable StartDone + variable StopDone + + if {$StopDone} { + return + } + + if {![info exists StartDone]} { + return -code error {initialise the script by calling command START} + } + + ::http::config -postfresh $b + ::http::Log http(-postfresh) is now [::http::config -postfresh] + return +} + +# httpTestScript::REPOST -- +# Pass a value to http::config -repost. + +proc httpTestScript::REPOST {b} { + variable StartDone + variable StopDone + + if {$StopDone} { + return + } + + if {![info exists StartDone]} { + return -code error {initialise the script by calling command START} + } + + ::http::config -repost $b + ::http::Log http(-repost) is now [::http::config -repost] + return +} + +# httpTestScript::GET -- +# Send a HTTP request using the GET method. +# Arguments: +# uriCode - the code for the base URI - the value must be stored in +# ::httpTestScript::URL($uriCode). +# args - strings that will each be preceded by "&" and appended to the query +# string. + +proc httpTestScript::GET {uriCode args} { + variable RequestList + lappend RequestList GET + RequestAfter $uriCode 0 {} {*}$args + return +} + +# httpTestScript::HEAD -- +# Send a HTTP request using the HEAD method. +# Arguments: as for GET + +proc httpTestScript::HEAD {uriCode args} { + variable RequestList + lappend RequestList HEAD + RequestAfter $uriCode 1 {} {*}$args + return +} + +# httpTestScript::POST -- +# Send a HTTP request using the POST method. +# Arguments: +# uriCode - the code for the base URI - the value must be stored in +# ::httpTestScript::URL($uriCode). +# args - strings that will be joined by "&" and used as the request body. + +proc httpTestScript::POST {uriCode args} { + variable RequestList + lappend RequestList POST + RequestAfter $uriCode 0 {use} {*}$args + return +} + + +proc httpTestScript::RequestAfter {uriCode validate query args} { + variable CountRequestedSoFar + variable Delay + variable ExtraTime + variable StartDone + variable StopDone + variable KeepAlive + + if {$StopDone} { + return + } + + if {![info exists StartDone]} { + return -code error {initialise the script by calling command START} + } + + incr CountRequestedSoFar + set idelay [expr {($CountRequestedSoFar - 1) * $Delay + 10 + $ExtraTime}] + + # Could pass values of -pipeline, -postfresh, -repost if it were + # useful to change these mid-script. + after $idelay [list httpTestScript::Requester $uriCode $KeepAlive $validate $query {*}$args] + return +} + +proc httpTestScript::Requester {uriCode keepAlive validate query args} { + variable URL + + ::http::config -accept {*/*} + + set absUrl $URL($uriCode) + if {$query eq {}} { + if {$args ne {}} { + append absUrl & [join $args &] + } + set queryArgs {} + } elseif {$validate} { + return -code error {cannot have both -validate (HEAD) and -query (POST)} + } else { + set queryArgs [list -query [join $args &]] + } + + if {[catch { + ::http::geturl $absUrl \ + -validate $validate \ + -timeout 5000 \ + {*}$queryArgs \ + -keepalive $keepAlive \ + -command ::httpTestScript::WhenFinished + } token]} { + set msg $token + catch {puts stderr "Error: $msg"} + return + } else { + # Request will begin. + } + + return + +} + +proc httpTestScript::TimeOutNow {} { + variable TimeOutDone + + set TimeOutDone 1 + set ::httpTestScript::FOREVER 0 + return +} + +proc httpTestScript::WhenFinished {hToken} { + variable CountFinishedSoFar + variable RequestsWhenStopped + variable TimeOutCode + variable StopDone + variable RequestList + variable RequestsMade + variable ActualKeepAlive + + upvar #0 $hToken state + + if {[catch { + if { [info exists state(transfer)] + && ($state(transfer) eq "chunked") + } { + set Trans chunked + } else { + set Trans unchunked + } + + if { [info exists ::httpTest::testOptions(-verbose)] + && ($::httpTest::testOptions(-verbose) > 0) + } { + puts "Token $hToken +Response $state(http) +Status $state(status) +Method $state(method) +Transfer $Trans +Size $state(currentsize) +URL $state(url) +" + } + + if {!$state(-keepalive)} { + set ActualKeepAlive 0 + } + + if {[info exists state(method)]} { + lappend RequestsMade $state(method) + } else { + lappend RequestsMade UNKNOWN + } + set tk [namespace tail $hToken] + + if { ($state(http) != {HTTP/1.1 200 OK}) + || ($state(status) != {ok}) + || (($state(currentsize) == 0) && ($state(method) ne "HEAD")) + } { + ::http::Log ^X$tk unexpected result Response $state(http) Status $state(status) Size $state(currentsize) - token $hToken + } + } err]} { + ::http::Log ^X$tk httpTestScript::WhenFinished failed with error status: $err - token $hToken + } + + incr CountFinishedSoFar + if {$StopDone && ($CountFinishedSoFar == $RequestsWhenStopped)} { + if {[info exists TimeOutCode]} { + after cancel $TimeOutCode + } + if {$RequestsMade ne $RequestList && $ActualKeepAlive} { + ::http::Log ^X$tk unexpected result - Script asked for "{$RequestList}" but got "{$RequestsMade}" - token $hToken + } + set ::httpTestScript::FOREVER 0 + } + + return +} + + +proc httpTestScript::runHttpTestScript {scr} { + variable TimeOutDone + variable RequestsWhenStopped + + after idle [list namespace eval ::httpTestScript $scr] + vwait ::httpTestScript::FOREVER + # N.B. does not automatically execute in this namespace, unlike some other events. + # Release when all requests have been served or have timed out. + + if {$TimeOutDone} { + return -code error {test script timed out} + } + + return $RequestsWhenStopped +} + + +proc httpTestScript::cleanupHttpTestScript {} { + variable TimeOutDone + variable RequestsWhenStopped + + if {![info exists RequestsWhenStopped]} { + return -code error {Cleanup Failed: RequestsWhenStopped is undefined} + } + + for {set i 1} {$i <= $RequestsWhenStopped} {incr i} { + http::cleanup ::http::$i + } + + return +} -- cgit v0.12