diff options
Diffstat (limited to 'Utilities/cmcurl/lib/transfer.c')
-rw-r--r-- | Utilities/cmcurl/lib/transfer.c | 215 |
1 files changed, 160 insertions, 55 deletions
diff --git a/Utilities/cmcurl/lib/transfer.c b/Utilities/cmcurl/lib/transfer.c index 43e8f64..8e66d0d 100644 --- a/Utilities/cmcurl/lib/transfer.c +++ b/Utilities/cmcurl/lib/transfer.c @@ -73,12 +73,38 @@ #include "connect.h" #include "non-ascii.h" #include "http2.h" +#include "mime.h" +#include "strcase.h" /* The last 3 #include files should be in this order */ #include "curl_printf.h" #include "curl_memory.h" #include "memdebug.h" +#if !defined(CURL_DISABLE_HTTP) || !defined(CURL_DISABLE_SMTP) || \ + !defined(CURL_DISABLE_IMAP) +/* + * checkheaders() checks the linked list of custom headers for a + * particular header (prefix). + * + * Returns a pointer to the first matching header or NULL if none matched. + */ +char *Curl_checkheaders(const struct connectdata *conn, + const char *thisheader) +{ + struct curl_slist *head; + size_t thislen = strlen(thisheader); + struct Curl_easy *data = conn->data; + + for(head = data->set.headers; head; head = head->next) { + if(strncasecompare(head->data, thisheader, thislen)) + return head->data; + } + + return NULL; +} +#endif + /* * This function will call the read callback to fill our buffer with data * to upload. @@ -195,27 +221,28 @@ CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp) strlen(endofline_network)); #ifdef CURL_DOES_CONVERSIONS - CURLcode result; - int length; - if(data->set.prefer_ascii) { - /* translate the protocol and data */ - length = nread; - } - else { - /* just translate the protocol portion */ - length = strlen(hexbuffer); + { + CURLcode result; + int length; + if(data->set.prefer_ascii) + /* translate the protocol and data */ + length = nread; + else + /* just translate the protocol portion */ + length = (int)strlen(hexbuffer); + result = Curl_convert_to_network(data, data->req.upload_fromhere, + length); + /* Curl_convert_to_network calls failf if unsuccessful */ + if(result) + return result; } - result = Curl_convert_to_network(data, data->req.upload_fromhere, length); - /* Curl_convert_to_network calls failf if unsuccessful */ - if(result) - return result; #endif /* CURL_DOES_CONVERSIONS */ if((nread - hexlen) == 0) /* mark this as done once this chunk is transferred */ data->req.upload_done = TRUE; - nread+=(int)strlen(endofline_native); /* for the added end of line */ + nread += (int)strlen(endofline_native); /* for the added end of line */ } #ifdef CURL_DOES_CONVERSIONS else if((data->set.prefer_ascii) && (!sending_http_headers)) { @@ -241,6 +268,7 @@ CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp) CURLcode Curl_readrewind(struct connectdata *conn) { struct Curl_easy *data = conn->data; + curl_mimepart *mimepart = &data->set.mimepost; conn->bits.rewindaftersend = FALSE; /* we rewind now */ @@ -253,9 +281,21 @@ CURLcode Curl_readrewind(struct connectdata *conn) /* We have sent away data. If not using CURLOPT_POSTFIELDS or CURLOPT_HTTPPOST, call app to rewind */ - if(data->set.postfields || - (data->set.httpreq == HTTPREQ_POST_FORM)) + if(conn->handler->protocol & PROTO_FAMILY_HTTP) { + struct HTTP *http = data->req.protop; + + if(http->sendit) + mimepart = http->sendit; + } + if(data->set.postfields) ; /* do nothing */ + else if(data->set.httpreq == HTTPREQ_POST_MIME || + data->set.httpreq == HTTPREQ_POST_FORM) { + if(Curl_mime_rewind(mimepart)) { + failf(data, "Cannot rewind mime/post data"); + return CURLE_SEND_FAIL_REWIND; + } + } else { if(data->set.seek_func) { int err; @@ -560,7 +600,7 @@ static CURLcode readwrite_data(struct Curl_easy *data, infof(data, "Ignoring the response-body\n"); } if(data->state.resume_from && !k->content_range && - (data->set.httpreq==HTTPREQ_GET) && + (data->set.httpreq == HTTPREQ_GET) && !k->ignorebody) { if(k->size == data->state.resume_from) { @@ -915,7 +955,7 @@ static CURLcode readwrite_upload(struct Curl_easy *data, /* this is a paused transfer */ break; } - if(nread<=0) { + if(nread <= 0) { result = done_sending(conn, k); if(result) return result; @@ -1055,7 +1095,7 @@ CURLcode Curl_readwrite(struct connectdata *conn, { struct SingleRequest *k = &data->req; CURLcode result; - int didwhat=0; + int didwhat = 0; curl_socket_t fd_read; curl_socket_t fd_write; @@ -1288,6 +1328,13 @@ CURLcode Curl_pretransfer(struct Curl_easy *data) failf(data, "No URL set!"); return CURLE_URL_MALFORMAT; } + /* since the URL may have been redirected in a previous use of this handle */ + if(data->change.url_alloc) { + /* the already set URL is allocated, free it first! */ + Curl_safefree(data->change.url); + data->change.url_alloc = FALSE; + } + data->change.url = data->set.str[STRING_SET_URL]; /* Init the SSL session ID cache here. We do it here since we want to do it after the *_setopt() calls (that could specify the size of the cache) but @@ -1296,7 +1343,7 @@ CURLcode Curl_pretransfer(struct Curl_easy *data) if(result) return result; - data->set.followlocation=0; /* reset the location-follow counter */ + data->set.followlocation = 0; /* reset the location-follow counter */ data->state.this_is_a_follow = FALSE; /* reset this */ data->state.errorbuf = FALSE; /* no error has occurred */ data->state.httpversion = 0; /* don't assume any particular server version */ @@ -1338,7 +1385,7 @@ CURLcode Curl_pretransfer(struct Curl_easy *data) #endif Curl_initinfo(data); /* reset session-specific information "variables" */ - Curl_pgrsResetTimesSizes(data); + Curl_pgrsResetTransferSizes(data); Curl_pgrsStartNow(data); if(data->set.timeout) @@ -1384,19 +1431,59 @@ CURLcode Curl_posttransfer(struct Curl_easy *data) #ifndef CURL_DISABLE_HTTP /* + * Find the separator at the end of the host name, or the '?' in cases like + * http://www.url.com?id=2380 + */ +static const char *find_host_sep(const char *url) +{ + const char *sep; + const char *query; + + /* Find the start of the hostname */ + sep = strstr(url, "//"); + if(!sep) + sep = url; + else + sep += 2; + + query = strchr(sep, '?'); + sep = strchr(sep, '/'); + + if(!sep) + sep = url + strlen(url); + + if(!query) + query = url + strlen(url); + + return sep < query ? sep : query; +} + +/* * strlen_url() returns the length of the given URL if the spaces within the * URL were properly URL encoded. + * URL encoding should be skipped for host names, otherwise IDN resolution + * will fail. */ -static size_t strlen_url(const char *url) +static size_t strlen_url(const char *url, bool relative) { const unsigned char *ptr; - size_t newlen=0; - bool left=TRUE; /* left side of the ? */ + size_t newlen = 0; + bool left = TRUE; /* left side of the ? */ + const unsigned char *host_sep = (const unsigned char *) url; + + if(!relative) + host_sep = (const unsigned char *) find_host_sep(url); + + for(ptr = (unsigned char *)url; *ptr; ptr++) { + + if(ptr < host_sep) { + ++newlen; + continue; + } - for(ptr=(unsigned char *)url; *ptr; ptr++) { switch(*ptr) { case '?': - left=FALSE; + left = FALSE; /* fall through */ default: if(*ptr >= 0x80) @@ -1405,7 +1492,7 @@ static size_t strlen_url(const char *url) break; case ' ': if(left) - newlen+=3; + newlen += 3; else newlen++; break; @@ -1416,19 +1503,32 @@ static size_t strlen_url(const char *url) /* strcpy_url() copies a url to a output buffer and URL-encodes the spaces in * the source URL accordingly. + * URL encoding should be skipped for host names, otherwise IDN resolution + * will fail. */ -static void strcpy_url(char *output, const char *url) +static void strcpy_url(char *output, const char *url, bool relative) { /* we must add this with whitespace-replacing */ - bool left=TRUE; + bool left = TRUE; const unsigned char *iptr; char *optr = output; + const unsigned char *host_sep = (const unsigned char *) url; + + if(!relative) + host_sep = (const unsigned char *) find_host_sep(url); + for(iptr = (unsigned char *)url; /* read from here */ *iptr; /* until zero byte */ iptr++) { + + if(iptr < host_sep) { + *optr++ = *iptr; + continue; + } + switch(*iptr) { case '?': - left=FALSE; + left = FALSE; /* fall through */ default: if(*iptr >= 0x80) { @@ -1449,7 +1549,7 @@ static void strcpy_url(char *output, const char *url) break; } } - *optr=0; /* zero terminate output buffer */ + *optr = 0; /* zero terminate output buffer */ } @@ -1481,32 +1581,33 @@ static char *concat_url(const char *base, const char *relurl) char *protsep; char *pathsep; size_t newlen; + bool host_changed = FALSE; const char *useurl = relurl; size_t urllen; /* we must make our own copy of the URL to play with, as it may point to read-only data */ - char *url_clone=strdup(base); + char *url_clone = strdup(base); if(!url_clone) return NULL; /* skip out of this NOW */ /* protsep points to the start of the host name */ - protsep=strstr(url_clone, "//"); + protsep = strstr(url_clone, "//"); if(!protsep) - protsep=url_clone; + protsep = url_clone; else - protsep+=2; /* pass the slashes */ + protsep += 2; /* pass the slashes */ if('/' != relurl[0]) { - int level=0; + int level = 0; /* First we need to find out if there's a ?-letter in the URL, and cut it and the right-side of that off */ pathsep = strchr(protsep, '?'); if(pathsep) - *pathsep=0; + *pathsep = 0; /* we have a relative path to append to the last slash if there's one available, or if the new URL is just a query string (starts with a @@ -1515,14 +1616,14 @@ static char *concat_url(const char *base, const char *relurl) if(useurl[0] != '?') { pathsep = strrchr(protsep, '/'); if(pathsep) - *pathsep=0; + *pathsep = 0; } /* Check if there's any slash after the host name, and if so, remember that position instead */ pathsep = strchr(protsep, '/'); if(pathsep) - protsep = pathsep+1; + protsep = pathsep + 1; else protsep = NULL; @@ -1530,13 +1631,13 @@ static char *concat_url(const char *base, const char *relurl) and act accordingly */ if((useurl[0] == '.') && (useurl[1] == '/')) - useurl+=2; /* just skip the "./" */ + useurl += 2; /* just skip the "./" */ while((useurl[0] == '.') && (useurl[1] == '.') && (useurl[2] == '/')) { level++; - useurl+=3; /* pass the "../" */ + useurl += 3; /* pass the "../" */ } if(protsep) { @@ -1544,9 +1645,9 @@ static char *concat_url(const char *base, const char *relurl) /* cut off one more level from the right of the original URL */ pathsep = strrchr(protsep, '/'); if(pathsep) - *pathsep=0; + *pathsep = 0; else { - *protsep=0; + *protsep = 0; break; } } @@ -1558,9 +1659,10 @@ static char *concat_url(const char *base, const char *relurl) if((relurl[0] == '/') && (relurl[1] == '/')) { /* the new URL starts with //, just keep the protocol part from the original one */ - *protsep=0; + *protsep = 0; useurl = &relurl[2]; /* we keep the slashes from the original, so we skip the new ones */ + host_changed = TRUE; } else { /* cut off the original URL from the first slash, or deal with URLs @@ -1573,7 +1675,7 @@ static char *concat_url(const char *base, const char *relurl) char *sep = strchr(protsep, '?'); if(sep && (sep < pathsep)) pathsep = sep; - *pathsep=0; + *pathsep = 0; } else { /* There was no slash. Now, since we might be operating on a badly @@ -1582,7 +1684,7 @@ static char *concat_url(const char *base, const char *relurl) ?-letter as well! */ pathsep = strchr(protsep, '?'); if(pathsep) - *pathsep=0; + *pathsep = 0; } } } @@ -1592,7 +1694,7 @@ static char *concat_url(const char *base, const char *relurl) letter we replace each space with %20 while it is replaced with '+' on the right side of the '?' letter. */ - newlen = strlen_url(useurl); + newlen = strlen_url(useurl, !host_changed); urllen = strlen(url_clone); @@ -1614,7 +1716,7 @@ static char *concat_url(const char *base, const char *relurl) newest[urllen++]='/'; /* then append the new piece on the right side */ - strcpy_url(&newest[urllen], useurl); + strcpy_url(&newest[urllen], useurl, !host_changed); free(url_clone); @@ -1673,7 +1775,7 @@ CURLcode Curl_follow(struct Curl_easy *data, } } - if(!is_absolute_url(newurl)) { + if(!is_absolute_url(newurl)) { /*** *DANG* this is an RFC 2068 violation. The URL is supposed to be absolute and this doesn't seem to be that! @@ -1687,15 +1789,16 @@ CURLcode Curl_follow(struct Curl_easy *data, /* The new URL MAY contain space or high byte values, that means a mighty stupid redirect URL but we still make an effort to do "right". */ char *newest; - size_t newlen = strlen_url(newurl); + size_t newlen = strlen_url(newurl, FALSE); /* This is an absolute URL, don't allow the custom port number */ disallowport = TRUE; - newest = malloc(newlen+1); /* get memory for this */ + newest = malloc(newlen + 1); /* get memory for this */ if(!newest) return CURLE_OUT_OF_MEMORY; - strcpy_url(newest, newurl); /* create a space-free URL */ + + strcpy_url(newest, newurl, FALSE); /* create a space-free URL */ newurl = newest; /* use this instead now */ } @@ -1763,7 +1866,8 @@ CURLcode Curl_follow(struct Curl_easy *data, * can be overridden with CURLOPT_POSTREDIR. */ if((data->set.httpreq == HTTPREQ_POST - || data->set.httpreq == HTTPREQ_POST_FORM) + || data->set.httpreq == HTTPREQ_POST_FORM + || data->set.httpreq == HTTPREQ_POST_MIME) && !(data->set.keep_post & CURL_REDIR_POST_301)) { infof(data, "Switch from POST to GET\n"); data->set.httpreq = HTTPREQ_GET; @@ -1787,7 +1891,8 @@ CURLcode Curl_follow(struct Curl_easy *data, * can be overridden with CURLOPT_POSTREDIR. */ if((data->set.httpreq == HTTPREQ_POST - || data->set.httpreq == HTTPREQ_POST_FORM) + || data->set.httpreq == HTTPREQ_POST_FORM + || data->set.httpreq == HTTPREQ_POST_MIME) && !(data->set.keep_post & CURL_REDIR_POST_302)) { infof(data, "Switch from POST to GET\n"); data->set.httpreq = HTTPREQ_GET; @@ -1820,7 +1925,7 @@ CURLcode Curl_follow(struct Curl_easy *data, break; } Curl_pgrsTime(data, TIMER_REDIRECT); - Curl_pgrsResetTimesSizes(data); + Curl_pgrsResetTransferSizes(data); return CURLE_OK; #endif /* CURL_DISABLE_HTTP */ |