diff options
Diffstat (limited to 'Utilities/cmcurl/lib/transfer.c')
-rw-r--r-- | Utilities/cmcurl/lib/transfer.c | 127 |
1 files changed, 64 insertions, 63 deletions
diff --git a/Utilities/cmcurl/lib/transfer.c b/Utilities/cmcurl/lib/transfer.c index 1f6d26d..43e8f64 100644 --- a/Utilities/cmcurl/lib/transfer.c +++ b/Utilities/cmcurl/lib/transfer.c @@ -405,8 +405,7 @@ static CURLcode readwrite_data(struct Curl_easy *data, /* This is where we loop until we have read everything there is to read or we get a CURLE_AGAIN */ do { - size_t buffersize = data->set.buffer_size? - data->set.buffer_size : BUFSIZE; + size_t buffersize = data->set.buffer_size; size_t bytestoread = buffersize; if( @@ -681,8 +680,6 @@ static CURLcode readwrite_data(struct Curl_easy *data, excess = (size_t)(k->bytecount + nread - k->maxdownload); if(excess > 0 && !k->ignorebody) { if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) { - /* The 'excess' amount below can't be more than BUFSIZE which - always will fit in a size_t */ infof(data, "Rewinding stream by : %zu" " bytes on url %s (size = %" CURL_FORMAT_CURL_OFF_T @@ -853,7 +850,6 @@ static CURLcode done_sending(struct connectdata *conn, */ static CURLcode readwrite_upload(struct Curl_easy *data, struct connectdata *conn, - struct SingleRequest *k, int *didwhat) { ssize_t i, si; @@ -861,6 +857,7 @@ static CURLcode readwrite_upload(struct Curl_easy *data, CURLcode result; ssize_t nread; /* number of bytes read */ bool sending_http_headers = FALSE; + struct SingleRequest *k = &data->req; if((k->bytecount == 0) && (k->writebytecount == 0)) Curl_pgrsTime(data, TIMER_STARTTRANSFER); @@ -871,15 +868,15 @@ static CURLcode readwrite_upload(struct Curl_easy *data, /* only read more data if there's no upload data already present in the upload buffer */ - if(0 == data->req.upload_present) { + if(0 == k->upload_present) { /* init the "upload from here" pointer */ - data->req.upload_fromhere = k->uploadbuf; + k->upload_fromhere = data->state.uploadbuffer; if(!k->upload_done) { /* HTTP pollution, this should be written nicer to become more protocol agnostic. */ int fillcount; - struct HTTP *http = data->req.protop; + struct HTTP *http = k->protop; if((k->exp100 == EXP100_SENDING_REQUEST) && (http->sending == HTTPSEND_BODY)) { @@ -892,7 +889,7 @@ static CURLcode readwrite_upload(struct Curl_easy *data, *didwhat &= ~KEEP_SEND; /* we didn't write anything actually */ /* set a timeout for the multi interface */ - Curl_expire(data, data->set.expect_100_timeout); + Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT); break; } @@ -905,7 +902,7 @@ static CURLcode readwrite_upload(struct Curl_easy *data, sending_http_headers = FALSE; } - result = Curl_fillreadbuffer(conn, BUFSIZE, &fillcount); + result = Curl_fillreadbuffer(conn, UPLOAD_BUFSIZE, &fillcount); if(result) return result; @@ -926,7 +923,7 @@ static CURLcode readwrite_upload(struct Curl_easy *data, } /* store number of bytes available for upload */ - data->req.upload_present = nread; + k->upload_present = nread; /* convert LF to CRLF if so asked */ if((!sending_http_headers) && ( @@ -937,7 +934,7 @@ static CURLcode readwrite_upload(struct Curl_easy *data, (data->set.crlf))) { /* Do we need to allocate a scratch buffer? */ if(!data->state.scratch) { - data->state.scratch = malloc(2 * BUFSIZE); + data->state.scratch = malloc(2 * data->set.buffer_size); if(!data->state.scratch) { failf(data, "Failed to alloc scratch buffer!"); @@ -952,7 +949,7 @@ static CURLcode readwrite_upload(struct Curl_easy *data, * must be used instead of the escape sequences \r & \n. */ for(i = 0, si = 0; i < nread; i++, si++) { - if(data->req.upload_fromhere[i] == 0x0a) { + if(k->upload_fromhere[i] == 0x0a) { data->state.scratch[si++] = 0x0d; data->state.scratch[si] = 0x0a; if(!data->set.crlf) { @@ -963,7 +960,7 @@ static CURLcode readwrite_upload(struct Curl_easy *data, } } else - data->state.scratch[si] = data->req.upload_fromhere[i]; + data->state.scratch[si] = k->upload_fromhere[i]; } if(si != nread) { @@ -972,10 +969,10 @@ static CURLcode readwrite_upload(struct Curl_easy *data, nread = si; /* upload from the new (replaced) buffer instead */ - data->req.upload_fromhere = data->state.scratch; + k->upload_fromhere = data->state.scratch; /* set the new amount too */ - data->req.upload_present = nread; + k->upload_present = nread; } } @@ -986,7 +983,7 @@ static CURLcode readwrite_upload(struct Curl_easy *data, return result; } #endif /* CURL_DISABLE_SMTP */ - } /* if 0 == data->req.upload_present */ + } /* if 0 == k->upload_present */ else { /* We have a partial buffer left from a previous "round". Use that instead of reading more data */ @@ -994,17 +991,17 @@ static CURLcode readwrite_upload(struct Curl_easy *data, /* write to socket (send away data) */ result = Curl_write(conn, - conn->writesockfd, /* socket to send to */ - data->req.upload_fromhere, /* buffer pointer */ - data->req.upload_present, /* buffer size */ - &bytes_written); /* actually sent */ + conn->writesockfd, /* socket to send to */ + k->upload_fromhere, /* buffer pointer */ + k->upload_present, /* buffer size */ + &bytes_written); /* actually sent */ if(result) return result; if(data->set.verbose) /* show the data before we change the pointer upload_fromhere */ - Curl_debug(data, CURLINFO_DATA_OUT, data->req.upload_fromhere, + Curl_debug(data, CURLINFO_DATA_OUT, k->upload_fromhere, (size_t)bytes_written, conn); k->writebytecount += bytes_written; @@ -1015,20 +1012,20 @@ static CURLcode readwrite_upload(struct Curl_easy *data, infof(data, "We are completely uploaded and fine\n"); } - if(data->req.upload_present != bytes_written) { + if(k->upload_present != bytes_written) { /* we only wrote a part of the buffer (if anything), deal with it! */ /* store the amount of bytes left in the buffer to write */ - data->req.upload_present -= bytes_written; + k->upload_present -= bytes_written; /* advance the pointer where to find the buffer when the next send is to happen */ - data->req.upload_fromhere += bytes_written; + k->upload_fromhere += bytes_written; } else { /* we've uploaded that buffer now */ - data->req.upload_fromhere = k->uploadbuf; - data->req.upload_present = 0; /* no more bytes left */ + k->upload_fromhere = data->state.uploadbuffer; + k->upload_present = 0; /* no more bytes left */ if(k->upload_done) { result = done_sending(conn, k); @@ -1108,7 +1105,7 @@ CURLcode Curl_readwrite(struct connectdata *conn, if((k->keepon & KEEP_SEND) && (select_res & CURL_CSELECT_OUT)) { /* write */ - result = readwrite_upload(data, conn, k, &didwhat); + result = readwrite_upload(data, conn, &didwhat); if(result) return result; } @@ -1142,6 +1139,7 @@ CURLcode Curl_readwrite(struct connectdata *conn, /* we've waited long enough, continue anyway */ k->exp100 = EXP100_SEND_DATA; k->keepon |= KEEP_SEND; + Curl_expire_done(data, EXPIRE_100_TIMEOUT); infof(data, "Done waiting for 100-continue\n"); } } @@ -1186,15 +1184,13 @@ CURLcode Curl_readwrite(struct connectdata *conn, */ (k->bytecount != (k->size + data->state.crlf_conversions)) && #endif /* CURL_DO_LINEEND_CONV */ - !data->req.newurl) { + !k->newurl) { failf(data, "transfer closed with %" CURL_FORMAT_CURL_OFF_T - " bytes remaining to read", - k->size - k->bytecount); + " bytes remaining to read", k->size - k->bytecount); return CURLE_PARTIAL_FILE; } - if(!(data->set.opt_no_body) && - k->chunk && - (conn->chunk.state != CHUNK_STOP)) { + if(!(data->set.opt_no_body) && k->chunk && + (conn->chunk.state != CHUNK_STOP)) { /* * In chunked mode, return an error if the connection is closed prior to * the empty (terminating) chunk is read. @@ -1313,8 +1309,11 @@ CURLcode Curl_pretransfer(struct Curl_easy *data) if(data->set.httpreq == HTTPREQ_PUT) data->state.infilesize = data->set.filesize; - else + else { data->state.infilesize = data->set.postfieldsize; + if(data->set.postfields && (data->state.infilesize == -1)) + data->state.infilesize = (curl_off_t)strlen(data->set.postfields); + } /* If there is a list of cookie files to read, do it now! */ if(data->change.cookielist) @@ -1343,10 +1342,10 @@ CURLcode Curl_pretransfer(struct Curl_easy *data) Curl_pgrsStartNow(data); if(data->set.timeout) - Curl_expire(data, data->set.timeout); + Curl_expire(data, data->set.timeout, EXPIRE_TIMEOUT); if(data->set.connecttimeout) - Curl_expire(data, data->set.connecttimeout); + Curl_expire(data, data->set.connecttimeout, EXPIRE_CONNECTTIMEOUT); /* In case the handle is re-used and an authentication method was picked in the session we need to make sure we only use the one(s) we now @@ -1628,9 +1627,7 @@ static char *concat_url(const char *base, const char *relurl) * as given by the remote server and set up the new URL to request. */ CURLcode Curl_follow(struct Curl_easy *data, - char *newurl, /* this 'newurl' is the Location: string, - and it must be malloc()ed before passed - here */ + char *newurl, /* the Location: string */ followtype type) /* see transfer.h */ { #ifdef CURL_DISABLE_HTTP @@ -1643,33 +1640,36 @@ CURLcode Curl_follow(struct Curl_easy *data, /* Location: redirect */ bool disallowport = FALSE; + bool reachedmax = FALSE; if(type == FOLLOW_REDIR) { if((data->set.maxredirs != -1) && - (data->set.followlocation >= data->set.maxredirs)) { - failf(data, "Maximum (%ld) redirects followed", data->set.maxredirs); - return CURLE_TOO_MANY_REDIRECTS; + (data->set.followlocation >= data->set.maxredirs)) { + reachedmax = TRUE; + type = FOLLOW_FAKE; /* switch to fake to store the would-be-redirected + to URL */ } + else { + /* mark the next request as a followed location: */ + data->state.this_is_a_follow = TRUE; - /* mark the next request as a followed location: */ - data->state.this_is_a_follow = TRUE; + data->set.followlocation++; /* count location-followers */ - data->set.followlocation++; /* count location-followers */ + if(data->set.http_auto_referer) { + /* We are asked to automatically set the previous URL as the referer + when we get the next URL. We pick the ->url field, which may or may + not be 100% correct */ - if(data->set.http_auto_referer) { - /* We are asked to automatically set the previous URL as the referer - when we get the next URL. We pick the ->url field, which may or may - not be 100% correct */ + if(data->change.referer_alloc) { + Curl_safefree(data->change.referer); + data->change.referer_alloc = FALSE; + } - if(data->change.referer_alloc) { - Curl_safefree(data->change.referer); - data->change.referer_alloc = FALSE; + data->change.referer = strdup(data->change.url); + if(!data->change.referer) + return CURLE_OUT_OF_MEMORY; + data->change.referer_alloc = TRUE; /* yes, free this later */ } - - data->change.referer = strdup(data->change.url); - if(!data->change.referer) - return CURLE_OUT_OF_MEMORY; - data->change.referer_alloc = TRUE; /* yes, free this later */ } } @@ -1681,7 +1681,6 @@ CURLcode Curl_follow(struct Curl_easy *data, char *absolute = concat_url(data->change.url, newurl); if(!absolute) return CURLE_OUT_OF_MEMORY; - free(newurl); newurl = absolute; } else { @@ -1697,8 +1696,6 @@ CURLcode Curl_follow(struct Curl_easy *data, if(!newest) return CURLE_OUT_OF_MEMORY; strcpy_url(newest, newurl); /* create a space-free URL */ - - free(newurl); /* that was no good */ newurl = newest; /* use this instead now */ } @@ -1707,6 +1704,11 @@ CURLcode Curl_follow(struct Curl_easy *data, /* we're only figuring out the new url if we would've followed locations but now we're done so we can get out! */ data->info.wouldredirect = newurl; + + if(reachedmax) { + failf(data, "Maximum (%ld) redirects followed", data->set.maxredirs); + return CURLE_TOO_MANY_REDIRECTS; + } return CURLE_OK; } @@ -1720,7 +1722,6 @@ CURLcode Curl_follow(struct Curl_easy *data, data->change.url = newurl; data->change.url_alloc = TRUE; - newurl = NULL; /* don't free! */ infof(data, "Issue another request to this URL: '%s'\n", data->change.url); @@ -1947,7 +1948,7 @@ Curl_setup_transfer( /* Set a timeout for the multi interface. Add the inaccuracy margin so that we don't fire slightly too early and get denied to run. */ - Curl_expire(data, data->set.expect_100_timeout); + Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT); } else { if(data->state.expect100header) |