summaryrefslogtreecommitdiffstats
path: root/Utilities/cmcurl/lib/transfer.c
diff options
context:
space:
mode:
authorBrad King <brad.king@kitware.com>2015-08-12 19:43:52 (GMT)
committerBrad King <brad.king@kitware.com>2015-08-12 19:43:52 (GMT)
commit91e8d35ab8ec2d62478a42eff10af88713497fad (patch)
treeff8cd1bd1ce3c42ef56c76b5ab471831a80c4665 /Utilities/cmcurl/lib/transfer.c
parent602cdc06a01b7c5c0eb444111382b09040f677ee (diff)
parent706542615828488a5ad197d0ef3dd5e42eb739c4 (diff)
downloadCMake-91e8d35ab8ec2d62478a42eff10af88713497fad.zip
CMake-91e8d35ab8ec2d62478a42eff10af88713497fad.tar.gz
CMake-91e8d35ab8ec2d62478a42eff10af88713497fad.tar.bz2
Merge branch 'curl-upstream' into update-curl
Resolve conflicts by taking upstream side when possible and otherwise integrating the changes from both sides. Be carful in CMakeLists.txt where the OPENSSL code block that we modified previously has moved, and preserve our previous modifications in the new location.
Diffstat (limited to 'Utilities/cmcurl/lib/transfer.c')
-rw-r--r--Utilities/cmcurl/lib/transfer.c145
1 files changed, 79 insertions, 66 deletions
diff --git a/Utilities/cmcurl/lib/transfer.c b/Utilities/cmcurl/lib/transfer.c
index dc817a6..718139b 100644
--- a/Utilities/cmcurl/lib/transfer.c
+++ b/Utilities/cmcurl/lib/transfer.c
@@ -5,7 +5,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
- * Copyright (C) 1998 - 2014, Daniel Stenberg, <daniel@haxx.se>, et al.
+ * Copyright (C) 1998 - 2015, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -75,16 +75,14 @@
#include "curl_ntlm.h"
#include "http_negotiate.h"
#include "share.h"
-#include "curl_memory.h"
#include "select.h"
#include "multiif.h"
#include "connect.h"
#include "non-ascii.h"
+#include "curl_printf.h"
-#define _MPRINTF_REPLACE /* use our functions only */
-#include <curl/mprintf.h>
-
-/* The last #include file should be: */
+/* The last #include files should be: */
+#include "curl_memory.h"
#include "memdebug.h"
/*
@@ -117,8 +115,8 @@ CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
/* this function returns a size_t, so we typecast to int to prevent warnings
with picky compilers */
- nread = (int)conn->fread_func(data->req.upload_fromhere, 1,
- buffersize, conn->fread_in);
+ nread = (int)data->set.fread_func(data->req.upload_fromhere, 1,
+ buffersize, data->set.in);
if(nread == CURL_READFUNC_ABORT) {
failf(data, "operation aborted by callback");
@@ -203,7 +201,7 @@ CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
strlen(endofline_network));
#ifdef CURL_DOES_CONVERSIONS
- CURLcode res;
+ CURLcode result;
int length;
if(data->set.prefer_ascii) {
/* translate the protocol and data */
@@ -213,10 +211,10 @@ CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
/* just translate the protocol portion */
length = strlen(hexbuffer);
}
- res = Curl_convert_to_network(data, data->req.upload_fromhere, length);
+ result = Curl_convert_to_network(data, data->req.upload_fromhere, length);
/* Curl_convert_to_network calls failf if unsuccessful */
- if(res)
- return(res);
+ if(result)
+ return result;
#endif /* CURL_DOES_CONVERSIONS */
if((nread - hexlen) == 0)
@@ -227,11 +225,11 @@ CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
}
#ifdef CURL_DOES_CONVERSIONS
else if((data->set.prefer_ascii) && (!sending_http_headers)) {
- CURLcode res;
- res = Curl_convert_to_network(data, data->req.upload_fromhere, nread);
+ CURLcode result;
+ result = Curl_convert_to_network(data, data->req.upload_fromhere, nread);
/* Curl_convert_to_network calls failf if unsuccessful */
- if(res != CURLE_OK)
- return(res);
+ if(result)
+ return result;
}
#endif /* CURL_DOES_CONVERSIONS */
@@ -319,8 +317,7 @@ static int data_pending(const struct connectdata *conn)
TRUE. The thing is if we read everything, then http2_recv won't
be called and we cannot signal the HTTP/2 stream has closed. As
a workaround, we return nonzero here to call http2_recv. */
- ((conn->handler->protocol&PROTO_FAMILY_HTTP) && conn->httpversion == 20 &&
- conn->proto.httpc.closed);
+ ((conn->handler->protocol&PROTO_FAMILY_HTTP) && conn->httpversion == 20);
#else
Curl_ssl_data_pending(conn, FIRSTSOCKET);
#endif
@@ -435,6 +432,7 @@ static CURLcode readwrite_data(struct SessionHandle *data,
else {
/* read nothing but since we wanted nothing we consider this an OK
situation to proceed from */
+ DEBUGF(infof(data, "readwrite_data: we're done!\n"));
nread = 0;
}
@@ -496,7 +494,7 @@ static CURLcode readwrite_data(struct SessionHandle *data,
/* We've stopped dealing with input, get out of the do-while loop */
if(nread > 0) {
- if(Curl_multi_pipeline_enabled(conn->data->multi)) {
+ if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
infof(data,
"Rewinding stream by : %zd"
" bytes on url %s (zero-length body)\n",
@@ -547,6 +545,18 @@ static CURLcode readwrite_data(struct SessionHandle *data,
if(data->state.resume_from && !k->content_range &&
(data->set.httpreq==HTTPREQ_GET) &&
!k->ignorebody) {
+
+ if(k->size == data->state.resume_from) {
+ /* The resume point is at the end of file, consider this fine
+ even if it doesn't allow resume from here. */
+ infof(data, "The entire document is already downloaded");
+ connclose(conn, "already downloaded");
+ /* Abort download */
+ k->keepon &= ~KEEP_RECV;
+ *done = TRUE;
+ return CURLE_OK;
+ }
+
/* we wanted to resume a download, although the server doesn't
* seem to support this and we did this with a GET (if it
* wasn't a GET we did a POST or PUT resume) */
@@ -629,7 +639,7 @@ static CURLcode readwrite_data(struct SessionHandle *data,
if(dataleft != 0) {
infof(conn->data, "Leftovers after chunking: %zu bytes\n",
dataleft);
- if(Curl_multi_pipeline_enabled(conn->data->multi)) {
+ if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
/* only attempt the rewind if we truly are pipelining */
infof(conn->data, "Rewinding %zu bytes\n",dataleft);
read_rewind(conn, dataleft);
@@ -652,7 +662,7 @@ static CURLcode readwrite_data(struct SessionHandle *data,
excess = (size_t)(k->bytecount + nread - k->maxdownload);
if(excess > 0 && !k->ignorebody) {
- if(Curl_multi_pipeline_enabled(conn->data->multi)) {
+ if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
/* The 'excess' amount below can't be more than BUFSIZE which
always will fit in a size_t */
infof(data,
@@ -746,7 +756,6 @@ static CURLcode readwrite_data(struct SessionHandle *data,
result = Curl_unencode_gzip_write(conn, k, nread);
break;
- case COMPRESS:
default:
failf (data, "Unrecognized content encoding type. "
"libcurl understands `identity', `deflate' and `gzip' "
@@ -818,13 +827,6 @@ static CURLcode readwrite_upload(struct SessionHandle *data,
*didwhat |= KEEP_SEND;
- /*
- * We loop here to do the READ and SEND loop until we run out of
- * data to send or until we get EWOULDBLOCK back
- *
- * FIXME: above comment is misleading. Currently no looping is
- * actually done in do-while loop below.
- */
do {
/* only read more data if there's no upload data already
@@ -891,15 +893,6 @@ static CURLcode readwrite_upload(struct SessionHandle *data,
/* store number of bytes available for upload */
data->req.upload_present = nread;
-#ifndef CURL_DISABLE_SMTP
- if(conn->handler->protocol & PROTO_FAMILY_SMTP) {
- result = Curl_smtp_escape_eob(conn, nread);
- if(result)
- return result;
- }
- else
-#endif /* CURL_DISABLE_SMTP */
-
/* convert LF to CRLF if so asked */
if((!sending_http_headers) && (
#ifdef CURL_DO_LINEEND_CONV
@@ -907,12 +900,16 @@ static CURLcode readwrite_upload(struct SessionHandle *data,
(data->set.prefer_ascii) ||
#endif
(data->set.crlf))) {
- if(data->state.scratch == NULL)
- data->state.scratch = malloc(2*BUFSIZE);
- if(data->state.scratch == NULL) {
- failf (data, "Failed to alloc scratch buffer!");
- return CURLE_OUT_OF_MEMORY;
+ /* Do we need to allocate a scratch buffer? */
+ if(!data->state.scratch) {
+ data->state.scratch = malloc(2 * BUFSIZE);
+ if(!data->state.scratch) {
+ failf(data, "Failed to alloc scratch buffer!");
+
+ return CURLE_OUT_OF_MEMORY;
+ }
}
+
/*
* ASCII/EBCDIC Note: This is presumably a text (not binary)
* transfer so the data should already be in ASCII.
@@ -932,6 +929,7 @@ static CURLcode readwrite_upload(struct SessionHandle *data,
else
data->state.scratch[si] = data->req.upload_fromhere[i];
}
+
if(si != nread) {
/* only perform the special operation if we really did replace
anything */
@@ -944,6 +942,14 @@ static CURLcode readwrite_upload(struct SessionHandle *data,
data->req.upload_present = nread;
}
}
+
+#ifndef CURL_DISABLE_SMTP
+ if(conn->handler->protocol & PROTO_FAMILY_SMTP) {
+ result = Curl_smtp_escape_eob(conn, nread);
+ if(result)
+ return result;
+ }
+#endif /* CURL_DISABLE_SMTP */
} /* if 0 == data->req.upload_present */
else {
/* We have a partial buffer left from a previous "round". Use
@@ -1006,9 +1012,9 @@ static CURLcode readwrite_upload(struct SessionHandle *data,
* be read and written to/from the connection.
*/
CURLcode Curl_readwrite(struct connectdata *conn,
+ struct SessionHandle *data,
bool *done)
{
- struct SessionHandle *data = conn->data;
struct SingleRequest *k = &data->req;
CURLcode result;
int didwhat=0;
@@ -1032,6 +1038,11 @@ CURLcode Curl_readwrite(struct connectdata *conn,
else
fd_write = CURL_SOCKET_BAD;
+ if(conn->data->state.drain) {
+ select_res |= CURL_CSELECT_IN;
+ DEBUGF(infof(data, "Curl_readwrite: forcibly told to drain data\n"));
+ }
+
if(!select_res) /* Call for select()/poll() only, if read/write/error
status is not known. */
select_res = Curl_socket_ready(fd_read, fd_write, 0);
@@ -1202,10 +1213,10 @@ int Curl_single_getsock(const struct connectdata *conn,
if((data->req.keepon & KEEP_SENDBITS) == KEEP_SEND) {
if((conn->sockfd != conn->writesockfd) ||
- !(data->req.keepon & KEEP_RECV)) {
- /* only if they are not the same socket or we didn't have a readable
+ bitmap == GETSOCK_BLANK) {
+ /* only if they are not the same socket and we have a readable
one, we increase index */
- if(data->req.keepon & KEEP_RECV)
+ if(bitmap != GETSOCK_BLANK)
sockindex++; /* increase index if we need two entries */
DEBUGASSERT(conn->writesockfd != CURL_SOCKET_BAD);
@@ -1256,7 +1267,7 @@ long Curl_sleep_time(curl_off_t rate_bps, curl_off_t cur_rate_bps,
* the next packet at the adjusted rate. We should wait
* longer when using larger packets, for instance.
*/
- rv = ((curl_off_t)((pkt_size * 8) * 1000) / rate_bps);
+ rv = ((curl_off_t)(pkt_size * 1000) / rate_bps);
/* Catch rounding errors and always slow down at least 1ms if
* we are running too fast.
@@ -1278,7 +1289,7 @@ long Curl_sleep_time(curl_off_t rate_bps, curl_off_t cur_rate_bps,
*/
CURLcode Curl_pretransfer(struct SessionHandle *data)
{
- CURLcode res;
+ CURLcode result;
if(!data->change.url) {
/* we can't do anything without URL */
failf(data, "No URL set!");
@@ -1288,32 +1299,35 @@ CURLcode Curl_pretransfer(struct SessionHandle *data)
/* Init the SSL session ID cache here. We do it here since we want to do it
after the *_setopt() calls (that could specify the size of the cache) but
before any transfer takes place. */
- res = Curl_ssl_initsessions(data, data->set.ssl.max_ssl_sessions);
- if(res)
- return res;
+ result = Curl_ssl_initsessions(data, data->set.ssl.max_ssl_sessions);
+ if(result)
+ return result;
data->set.followlocation=0; /* reset the location-follow counter */
data->state.this_is_a_follow = FALSE; /* reset this */
data->state.errorbuf = FALSE; /* no error has occurred */
data->state.httpversion = 0; /* don't assume any particular server version */
- data->state.ssl_connect_retry = FALSE;
-
data->state.authproblem = FALSE;
data->state.authhost.want = data->set.httpauth;
data->state.authproxy.want = data->set.proxyauth;
Curl_safefree(data->info.wouldredirect);
data->info.wouldredirect = NULL;
+ if(data->set.httpreq == HTTPREQ_PUT)
+ data->state.infilesize = data->set.filesize;
+ else
+ data->state.infilesize = data->set.postfieldsize;
+
/* If there is a list of cookie files to read, do it now! */
if(data->change.cookielist)
Curl_cookie_loadfiles(data);
/* If there is a list of host pairs to deal with */
if(data->change.resolve)
- res = Curl_loadhostpairs(data);
+ result = Curl_loadhostpairs(data);
- if(!res) {
+ if(!result) {
/* Allow data->set.use_port to set which port to use. This needs to be
* disabled for example when we follow Location: headers to URLs using
* different ports! */
@@ -1328,6 +1342,7 @@ CURLcode Curl_pretransfer(struct SessionHandle *data)
#endif
Curl_initinfo(data); /* reset session-specific information "variables" */
+ Curl_pgrsResetTimesSizes(data);
Curl_pgrsStartNow(data);
if(data->set.timeout)
@@ -1343,7 +1358,7 @@ CURLcode Curl_pretransfer(struct SessionHandle *data)
data->state.authproxy.picked &= data->state.authproxy.want;
}
- return res;
+ return result;
}
/*
@@ -1619,7 +1634,7 @@ CURLcode Curl_follow(struct SessionHandle *data,
if(type == FOLLOW_REDIR) {
if((data->set.maxredirs != -1) &&
(data->set.followlocation >= data->set.maxredirs)) {
- failf(data,"Maximum (%ld) redirects followed", data->set.maxredirs);
+ failf(data, "Maximum (%ld) redirects followed", data->set.maxredirs);
return CURLE_TOO_MANY_REDIRECTS;
}
@@ -1828,13 +1843,13 @@ Curl_reconnect_request(struct connectdata **connp)
* (again). Slight Lack of feedback in the report, but I don't think this
* extra check can do much harm.
*/
- if((CURLE_OK == result) || (CURLE_SEND_ERROR == result)) {
+ if(!result || (CURLE_SEND_ERROR == result)) {
bool async;
bool protocol_done = TRUE;
/* Now, redo the connect and get a new connection */
result = Curl_connect(data, connp, &async, &protocol_done);
- if(CURLE_OK == result) {
+ if(!result) {
/* We have connected or sent away a name resolve query fine */
conn = *connp; /* setup conn to again point to something nice */
@@ -1872,12 +1887,10 @@ CURLcode Curl_retry_request(struct connectdata *conn,
!(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)))
return CURLE_OK;
- if(/* workaround for broken TLS servers */ data->state.ssl_connect_retry ||
- ((data->req.bytecount +
- data->req.headerbytecount == 0) &&
- conn->bits.reuse &&
- !data->set.opt_no_body &&
- data->set.rtspreq != RTSPREQ_RECEIVE)) {
+ if((data->req.bytecount + data->req.headerbytecount == 0) &&
+ conn->bits.reuse &&
+ !data->set.opt_no_body &&
+ (data->set.rtspreq != RTSPREQ_RECEIVE)) {
/* We got no data, we attempted to re-use a connection and yet we want a
"body". This might happen if the connection was left alive when we were
done using it before, but that was closed when we wanted to read from