summaryrefslogtreecommitdiffstats
path: root/Source/CTest/Curl
diff options
context:
space:
mode:
Diffstat (limited to 'Source/CTest/Curl')
-rw-r--r--Source/CTest/Curl/CMakeLists.txt2
-rw-r--r--Source/CTest/Curl/base64.c98
-rw-r--r--Source/CTest/Curl/connect.c23
-rw-r--r--Source/CTest/Curl/curl/curl.h109
-rw-r--r--Source/CTest/Curl/easy.c14
-rw-r--r--Source/CTest/Curl/escape.c15
-rw-r--r--Source/CTest/Curl/formdata.c2
-rw-r--r--Source/CTest/Curl/ftp.c247
-rw-r--r--Source/CTest/Curl/ftp.h4
-rw-r--r--Source/CTest/Curl/getinfo.c3
-rw-r--r--Source/CTest/Curl/hostip.c42
-rw-r--r--Source/CTest/Curl/hostip.h5
-rw-r--r--Source/CTest/Curl/http.c301
-rw-r--r--Source/CTest/Curl/http.h4
-rw-r--r--Source/CTest/Curl/if2ip.h35
-rw-r--r--Source/CTest/Curl/krb4.c52
-rw-r--r--Source/CTest/Curl/krb4.h2
-rw-r--r--Source/CTest/Curl/multi.c5
-rw-r--r--Source/CTest/Curl/security.c88
-rw-r--r--Source/CTest/Curl/sendf.c42
-rw-r--r--Source/CTest/Curl/sendf.h7
-rw-r--r--Source/CTest/Curl/setup.h5
-rw-r--r--Source/CTest/Curl/share.h52
-rw-r--r--Source/CTest/Curl/ssluse.c56
-rw-r--r--Source/CTest/Curl/transfer.c1475
-rw-r--r--Source/CTest/Curl/url.c59
-rw-r--r--Source/CTest/Curl/urldata.h50
27 files changed, 1613 insertions, 1184 deletions
diff --git a/Source/CTest/Curl/CMakeLists.txt b/Source/CTest/Curl/CMakeLists.txt
index 9464e0d..16d28d2 100644
--- a/Source/CTest/Curl/CMakeLists.txt
+++ b/Source/CTest/Curl/CMakeLists.txt
@@ -2,7 +2,7 @@ CMAKE_MINIMUM_REQUIRED(VERSION 1.5)
PROJECT(LIBCURL C)
SET(PACKAGE "curl")
-SET(VERSION "7.10.2")
+SET(VERSION "7.10.3")
SET(PACKAGE_TARNAME " ")
SET(OPERATING_SYSTEM ${CMAKE_SYSTEM_NAME})
diff --git a/Source/CTest/Curl/base64.c b/Source/CTest/Curl/base64.c
index 41c451a..183df27 100644
--- a/Source/CTest/Curl/base64.c
+++ b/Source/CTest/Curl/base64.c
@@ -61,6 +61,8 @@ static void decodeQuantum(unsigned char *dest, char *src)
x = (x << 6) + 62;
else if(src[i] == '/')
x = (x << 6) + 63;
+ else if(src[i] == '=')
+ x = (x << 6);
}
dest[2] = (unsigned char)(x & 255); x >>= 8;
@@ -78,6 +80,7 @@ static void base64Decode(unsigned char *dest, char *src, int *rawLength)
int length = 0;
int equalsTerm = 0;
int i;
+ int numQuantums;
unsigned char lastQuantum[3];
while((src[length] != '=') && src[length])
@@ -85,16 +88,18 @@ static void base64Decode(unsigned char *dest, char *src, int *rawLength)
while(src[length+equalsTerm] == '=')
equalsTerm++;
+ numQuantums = (length + equalsTerm) / 4;
if(rawLength)
- *rawLength = (length * 3 / 4) - equalsTerm;
+ *rawLength = (numQuantums * 3) - equalsTerm;
- for(i = 0; i < length/4 - 1; i++) {
+ for(i = 0; i < numQuantums - 1; i++) {
decodeQuantum(dest, src);
dest += 3; src += 4;
}
decodeQuantum(lastQuantum, src);
- for(i = 0; i < 3 - equalsTerm; i++) dest[i] = lastQuantum[i];
+ for(i = 0; i < 3 - equalsTerm; i++)
+ dest[i] = lastQuantum[i];
}
@@ -194,20 +199,21 @@ int Curl_base64_decode(const char *str, void *data)
#define TEST_NEED_SUCK
void *suck(int *);
-int main(int argc, char **argv, char **envp) {
- char *base64;
- int base64Len;
- unsigned char *data;
- int dataLen;
+int main(int argc, char **argv, char **envp)
+{
+ char *base64;
+ int base64Len;
+ unsigned char *data;
+ int dataLen;
- data = (unsigned char *)suck(&dataLen);
- base64Len = Curl_base64_encode(data, dataLen, &base64);
-
- fprintf(stderr, "%d\n", base64Len);
- fprintf(stdout, "%s", base64);
+ data = (unsigned char *)suck(&dataLen);
+ base64Len = Curl_base64_encode(data, dataLen, &base64);
- free(base64); free(data);
- return 0;
+ fprintf(stderr, "%d\n", base64Len);
+ fprintf(stdout, "%s", base64);
+
+ free(base64); free(data);
+ return 0;
}
#endif
@@ -220,47 +226,47 @@ int main(int argc, char **argv, char **envp) {
#define TEST_NEED_SUCK
void *suck(int *);
-int main(int argc, char **argv, char **envp) {
- char *base64;
- int base64Len;
- unsigned char *data;
- int dataLen;
-
- base64 = (char *)suck(&base64Len);
- data = (unsigned char *)malloc(base64Len * 3/4 + 8);
- dataLen = Curl_base64_decode(base64, data);
-
- fprintf(stderr, "%d\n", dataLen);
- fwrite(data,1,dataLen,stdout);
+int main(int argc, char **argv, char **envp)
+{
+ char *base64;
+ int base64Len;
+ unsigned char *data;
+ int dataLen;
-
- free(base64); free(data);
- return 0;
+ base64 = (char *)suck(&base64Len);
+ data = (unsigned char *)malloc(base64Len * 3/4 + 8);
+ dataLen = Curl_base64_decode(base64, data);
+
+ fprintf(stderr, "%d\n", dataLen);
+ fwrite(data,1,dataLen,stdout);
+
+ free(base64); free(data);
+ return 0;
}
#endif
#ifdef TEST_NEED_SUCK
/* this function 'sucks' in as much as possible from stdin */
-void *suck(int *lenptr) {
- int cursize = 8192;
- unsigned char *buf = NULL;
- int lastread;
- int len = 0;
-
- do {
- cursize *= 2;
- buf = (unsigned char *)realloc(buf, cursize);
- memset(buf + len, 0, cursize - len);
- lastread = fread(buf + len, 1, cursize - len, stdin);
- len += lastread;
- } while(!feof(stdin));
+void *suck(int *lenptr)
+{
+ int cursize = 8192;
+ unsigned char *buf = NULL;
+ int lastread;
+ int len = 0;
- lenptr[0] = len;
- return (void *)buf;
+ do {
+ cursize *= 2;
+ buf = (unsigned char *)realloc(buf, cursize);
+ memset(buf + len, 0, cursize - len);
+ lastread = fread(buf + len, 1, cursize - len, stdin);
+ len += lastread;
+ } while(!feof(stdin));
+
+ lenptr[0] = len;
+ return (void *)buf;
}
#endif
-
/*
* local variables:
* eval: (load-file "../curl-mode.el")
diff --git a/Source/CTest/Curl/connect.c b/Source/CTest/Curl/connect.c
index eb6f4f3..5aa9f8b 100644
--- a/Source/CTest/Curl/connect.c
+++ b/Source/CTest/Curl/connect.c
@@ -176,10 +176,9 @@ int waitconnect(int sockfd, /* socket */
/* timeout, no connect today */
return 1;
- if(FD_ISSET(sockfd, &errfd)) {
+ if(FD_ISSET(sockfd, &errfd))
/* error condition caught */
return 2;
- }
/* we have a connect! */
return 0;
@@ -380,6 +379,11 @@ CURLcode Curl_is_connected(struct connectdata *conn,
return CURLE_OPERATION_TIMEOUTED;
}
}
+ if(conn->bits.tcpconnect) {
+ /* we are connected already! */
+ *connected = TRUE;
+ return CURLE_OK;
+ }
/* check for connect without timeout as we want to return immediately */
rc = waitconnect(sockfd, 0);
@@ -646,6 +650,15 @@ CURLcode Curl_connecthost(struct connectdata *conn, /* context */
}
}
+ /* The '1 == rc' comes from the waitconnect(), and not from connect().
+ We can be sure of this since connect() cannot return 1. */
+ if((1 == rc) && (data->state.used_interface == Curl_if_multi)) {
+ /* Timeout when running the multi interface, we return here with a
+ CURLE_OK return code. */
+ rc = 0;
+ break;
+ }
+
if(0 == rc) {
int err = socketerror(sockfd);
if ((0 == err) || (EISCONN == err)) {
@@ -658,12 +671,6 @@ CURLcode Curl_connecthost(struct connectdata *conn, /* context */
}
if(0 != rc) {
- if(data->state.used_interface == Curl_if_multi) {
- /* When running the multi interface, we bail out here */
- rc = 0;
- break;
- }
-
/* get a new timeout for next attempt */
after = Curl_tvnow();
timeout_ms -= Curl_tvdiff(after, before);
diff --git a/Source/CTest/Curl/curl/curl.h b/Source/CTest/Curl/curl/curl.h
index c56c965..689b585 100644
--- a/Source/CTest/Curl/curl/curl.h
+++ b/Source/CTest/Curl/curl/curl.h
@@ -66,9 +66,9 @@ struct curl_httppost {
char *contents; /* pointer to allocated data contents */
long contentslength; /* length of contents field */
- /* CMC: Added support for buffer uploads */
- char *buffer; /* pointer to allocated buffer contents */
- long bufferlength; /* length of buffer field */
+ /* CMC: Added support for buffer uploads */
+ char *buffer; /* pointer to allocated buffer contents */
+ long bufferlength; /* length of buffer field */
char *contenttype; /* Content-Type */
struct curl_slist* contentheader; /* list of extra headers for this form */
@@ -96,7 +96,9 @@ typedef int (*curl_progress_callback)(void *clientp,
double ultotal,
double ulnow);
-#define CURL_MAX_WRITE_SIZE 20480
+ /* Tests have proven that 20K is a very bad buffer size for uploads on
+ Windows, while 16K for some odd reason performed a lot better. */
+#define CURL_MAX_WRITE_SIZE 16384
typedef size_t (*curl_write_callback)(char *buffer,
size_t size,
@@ -160,7 +162,7 @@ typedef enum {
CURLE_FTP_COULDNT_RETR_FILE, /* 19 */
CURLE_FTP_WRITE_ERROR, /* 20 */
CURLE_FTP_QUOTE_ERROR, /* 21 */
- CURLE_HTTP_NOT_FOUND, /* 22 */
+ CURLE_HTTP_RETURNED_ERROR, /* 22 */
CURLE_WRITE_ERROR, /* 23 */
CURLE_MALFORMAT_USER, /* 24 - user name is illegally specified */
CURLE_FTP_COULDNT_STOR_FILE, /* 25 - failed FTP upload */
@@ -207,6 +209,7 @@ typedef enum {
/* Make a spelling correction for the operation timed-out define */
#define CURLE_OPERATION_TIMEDOUT CURLE_OPERATION_TIMEOUTED
+#define CURLE_HTTP_NOT_FOUND CURLE_HTTP_RETURNED_ERROR
typedef enum {
CURLPROXY_HTTP = 0,
@@ -610,6 +613,11 @@ typedef enum {
the response to be compressed. */
CINIT(ENCODING, OBJECTPOINT, 102),
+ /* Set pointer to private data */
+ CINIT(PRIVATE, OBJECTPOINT, 103),
+
+ /* Set aliases for HTTP 200 in the HTTP Response header */
+ CINIT(HTTP200ALIASES, OBJECTPOINT, 104),
CURLOPT_LASTENTRY /* the last unused */
} CURLoption;
@@ -803,8 +811,8 @@ CURLcode curl_global_init(long flags);
void curl_global_cleanup(void);
/* This is the version number */
-#define LIBCURL_VERSION "7.10.2"
-#define LIBCURL_VERSION_NUM 0x070a02
+#define LIBCURL_VERSION "7.10.3"
+#define LIBCURL_VERSION_NUM 0x070a03
/* linked-list structure for the CURLOPT_QUOTE option (and other) */
struct curl_slist {
@@ -861,16 +869,13 @@ typedef enum {
CURLINFO_REDIRECT_TIME = CURLINFO_DOUBLE + 19,
CURLINFO_REDIRECT_COUNT = CURLINFO_LONG + 20,
+ CURLINFO_PRIVATE = CURLINFO_STRING + 21,
+
/* Fill in new entries here! */
- CURLINFO_LASTONE = 21
+ CURLINFO_LASTONE = 22
} CURLINFO;
-/* unfortunately, the easy.h and multi.h include files need options and info
- stuff before they can be included! */
-#include "easy.h" /* nothing in curl is fun without the easy stuff */
-#include "multi.h"
-
typedef enum {
CURLCLOSEPOLICY_NONE, /* first, never use this */
@@ -894,35 +899,56 @@ typedef enum {
* Setup defines, protos etc for the sharing stuff.
*/
-/* Different types of locks that a share can aquire */
+/* Different data locks for a single share */
typedef enum {
- CURL_LOCK_TYPE_NONE = 0,
- CURL_LOCK_TYPE_COOKIE = 1<<0,
- CURL_LOCK_TYPE_DNS = 1<<1,
- CURL_LOCK_TYPE_SSL_SESSION = 2<<1,
- CURL_LOCK_TYPE_CONNECT = 2<<2,
- CURL_LOCK_TYPE_LAST
-} curl_lock_type;
+ CURL_LOCK_DATA_NONE = 0,
+ CURL_LOCK_DATA_COOKIE = 1,
+ CURL_LOCK_DATA_DNS = 2,
+ CURL_LOCK_DATA_SSL_SESSION = 3,
+ CURL_LOCK_DATA_CONNECT = 4,
+ CURL_LOCK_DATA_LAST
+} curl_lock_data;
+
+/* Different lock access types */
+typedef enum {
+ CURL_LOCK_ACCESS_NONE = 0, /* unspecified action */
+ CURL_LOCK_ACCESS_SHARED = 1, /* for read perhaps */
+ CURL_LOCK_ACCESS_SINGLE = 2, /* for write perhaps */
+ CURL_LOCK_ACCESS_LAST /* never use */
+} curl_lock_access;
+
+typedef void (*curl_lock_function)(CURL *handle,
+ curl_lock_data data,
+ curl_lock_access access,
+ void *userptr);
+typedef void (*curl_unlock_function)(CURL *handle,
+ curl_lock_data data,
+ void *userptr);
+
+typedef void CURLSH;
-typedef void (*curl_lock_function)(CURL *, curl_lock_type, void *);
-typedef void (*curl_unlock_function)(CURL *, curl_lock_type, void *);
+typedef enum {
+ CURLSHE_OK, /* all is fine */
+ CURLSHE_BAD_OPTION, /* 1 */
+ CURLSHE_IN_USE, /* 2 */
+ CURLSHE_INVALID, /* 3 */
+ CURLSHE_LAST /* never use */
+} CURLSHcode;
-typedef struct {
- unsigned int specifier;
- unsigned int locked;
- unsigned int dirty;
-
- curl_lock_function lockfunc;
- curl_unlock_function unlockfunc;
- void *clientdata;
-} curl_share;
-
-curl_share *curl_share_init (void);
-CURLcode curl_share_setopt (curl_share *, curl_lock_type, int);
-CURLcode curl_share_set_lock_function (curl_share *, curl_lock_function);
-CURLcode curl_share_set_unlock_function (curl_share *, curl_unlock_function);
-CURLcode curl_share_set_lock_data (curl_share *, void *);
-CURLcode curl_share_destroy (curl_share *);
+typedef enum {
+ CURLSHOPT_NONE, /* don't use */
+ CURLSHOPT_SHARE, /* specify a data type to share */
+ CURLSHOPT_UNSHARE, /* specify shich data type to stop sharing */
+ CURLSHOPT_LOCKFUNC, /* pass in a 'curl_lock_function' pointer */
+ CURLSHOPT_UNLOCKFUNC, /* pass in a 'curl_unlock_function' pointer */
+ CURLSHOPT_USERDATA, /* pass in a user data pointer used in the lock/unlock
+ callback functions */
+ CURLSHOPT_LAST /* never use */
+} CURLSHoption;
+
+CURLSH *curl_share_init(void);
+CURLSHcode curl_share_setopt(CURLSH *, CURLSHoption option, ...);
+CURLSHcode curl_share_cleanup(CURLSH *);
/****************************************************************************
* Structures for querying information about the curl library at runtime.
@@ -965,4 +991,9 @@ curl_version_info_data *curl_version_info(CURLversion);
}
#endif
+/* unfortunately, the easy.h and multi.h include files need options and info
+ stuff before they can be included! */
+#include "easy.h" /* nothing in curl is fun without the easy stuff */
+#include "multi.h"
+
#endif /* __CURL_CURL_H */
diff --git a/Source/CTest/Curl/easy.c b/Source/CTest/Curl/easy.c
index b128180..145db5a 100644
--- a/Source/CTest/Curl/easy.c
+++ b/Source/CTest/Curl/easy.c
@@ -233,15 +233,17 @@ CURLcode curl_easy_perform(CURL *curl)
{
struct SessionHandle *data = (struct SessionHandle *)curl;
- if (!data->hostcache) {
- if (Curl_global_host_cache_use(data)) {
- data->hostcache = Curl_global_host_cache_get();
- }
- else {
- data->hostcache = Curl_hash_alloc(7, Curl_freeaddrinfo);
+ if (Curl_global_host_cache_use(data) && data->hostcache != Curl_global_host_cache_get()) {
+ if (data->hostcache) {
+ Curl_hash_destroy(data->hostcache);
}
+ data->hostcache = Curl_global_host_cache_get();
}
+ if (!data->hostcache) {
+ data->hostcache = Curl_hash_alloc(7, Curl_freednsinfo);
+ }
+
return Curl_perform(data);
}
diff --git a/Source/CTest/Curl/escape.c b/Source/CTest/Curl/escape.c
index 0ec7ae5..b35333d 100644
--- a/Source/CTest/Curl/escape.c
+++ b/Source/CTest/Curl/escape.c
@@ -41,6 +41,7 @@ char *curl_escape(const char *string, int length)
{
int alloc = (length?length:(int)strlen(string))+1;
char *ns = malloc(alloc);
+ char *testing_ptr = NULL;
unsigned char in;
int newlen = alloc;
int index=0;
@@ -55,9 +56,14 @@ char *curl_escape(const char *string, int length)
newlen += 2; /* the size grows with two, since this'll become a %XX */
if(newlen > alloc) {
alloc *= 2;
- ns = realloc(ns, alloc);
- if(!ns)
+ testing_ptr = realloc(ns, alloc);
+ if(!testing_ptr) {
+ free( ns );
return NULL;
+ }
+ else {
+ ns = testing_ptr;
+ }
}
sprintf(&ns[index], "%%%02X", in);
@@ -80,6 +86,10 @@ char *curl_unescape(const char *string, int length)
unsigned char in;
int index=0;
unsigned int hex;
+
+ if( !ns ) {
+ return NULL;
+ }
while(--alloc > 0) {
in = *string;
@@ -97,7 +107,6 @@ char *curl_unescape(const char *string, int length)
}
ns[index]=0; /* terminate it */
return ns;
-
}
void curl_free(void *p)
diff --git a/Source/CTest/Curl/formdata.c b/Source/CTest/Curl/formdata.c
index b3503f3..15cbea0 100644
--- a/Source/CTest/Curl/formdata.c
+++ b/Source/CTest/Curl/formdata.c
@@ -1319,7 +1319,7 @@ int Curl_FormReader(char *buffer,
wantedsize = size * nitems;
if(!form->data)
- return -1; /* nothing, error, empty */
+ return 0; /* nothing, error, empty */
do {
diff --git a/Source/CTest/Curl/ftp.c b/Source/CTest/Curl/ftp.c
index ac5f841..9d4af81 100644
--- a/Source/CTest/Curl/ftp.c
+++ b/Source/CTest/Curl/ftp.c
@@ -173,9 +173,9 @@ static CURLcode AllowServerConnect(struct SessionHandle *data,
* response and extract the relevant return code for the invoking function.
*/
-int Curl_GetFTPResponse(char *buf,
- struct connectdata *conn,
- int *ftpcode)
+CURLcode Curl_GetFTPResponse(int *nreadp, /* return number of bytes read */
+ struct connectdata *conn,
+ int *ftpcode) /* return the ftp-code */
{
/* Brand new implementation.
* We cannot read just one byte per read() and then go back to select()
@@ -185,28 +185,21 @@ int Curl_GetFTPResponse(char *buf,
* line in a response or continue reading. */
int sockfd = conn->firstsocket;
- int nread; /* total size read */
int perline; /* count bytes per line */
bool keepon=TRUE;
ssize_t gotbytes;
char *ptr;
- int timeout = 3600; /* default timeout in seconds */
+ int timeout; /* timeout in seconds */
struct timeval interval;
fd_set rkeepfd;
fd_set readfd;
struct SessionHandle *data = conn->data;
char *line_start;
- int code=0; /* default "error code" to return */
-
-#define SELECT_OK 0
-#define SELECT_ERROR 1 /* select() problems */
-#define SELECT_TIMEOUT 2 /* took too long */
-#define SELECT_MEMORY 3 /* no available memory */
-#define SELECT_CALLBACK 4 /* aborted by callback */
-
- int error = SELECT_OK;
-
+ int code=0; /* default ftp "error code" to return */
+ char *buf = data->state.buffer;
+ CURLcode result = CURLE_OK;
struct FTP *ftp = conn->proto.ftp;
+ struct timeval now = Curl_tvnow();
if (ftpcode)
*ftpcode = 0; /* 0 for errors */
@@ -221,20 +214,25 @@ int Curl_GetFTPResponse(char *buf,
ptr=buf;
line_start = buf;
- nread=0;
+ *nreadp=0;
perline=0;
keepon=TRUE;
- while((nread<BUFSIZE) && (keepon && !error)) {
+ while((*nreadp<BUFSIZE) && (keepon && !result)) {
/* check and reset timeout value every lap */
- if(data->set.timeout) {
+ if(data->set.timeout)
/* if timeout is requested, find out how much remaining time we have */
timeout = data->set.timeout - /* timeout time */
Curl_tvdiff(Curl_tvnow(), conn->now)/1000; /* spent time */
- if(timeout <=0 ) {
- failf(data, "Transfer aborted due to timeout");
- return -SELECT_TIMEOUT; /* already too little time */
- }
+ else
+ /* Even without a requested timeout, we only wait response_time
+ seconds for the full response to arrive before we bail out */
+ timeout = ftp->response_time -
+ Curl_tvdiff(Curl_tvnow(), now)/1000; /* spent time */
+
+ if(timeout <=0 ) {
+ failf(data, "Transfer aborted due to timeout");
+ return CURLE_OPERATION_TIMEDOUT; /* already too little time */
}
if(!ftp->cache) {
@@ -244,19 +242,18 @@ int Curl_GetFTPResponse(char *buf,
switch (select (sockfd+1, &readfd, NULL, NULL, &interval)) {
case -1: /* select() error, stop reading */
- error = SELECT_ERROR;
- failf(data, "Transfer aborted due to select() error");
+ result = CURLE_RECV_ERROR;
+ failf(data, "Transfer aborted due to select() error: %d", errno);
break;
case 0: /* timeout */
- error = SELECT_TIMEOUT;
+ result = CURLE_OPERATION_TIMEDOUT;
failf(data, "Transfer aborted due to timeout");
break;
default:
- error = SELECT_OK;
break;
}
}
- if(SELECT_OK == error) {
+ if(CURLE_OK == result) {
/*
* This code previously didn't use the kerberos sec_read() code
* to read, but when we use Curl_read() it may do so. Do confirm
@@ -272,8 +269,7 @@ int Curl_GetFTPResponse(char *buf,
ftp->cache_size = 0; /* zero the size just in case */
}
else {
- int res = Curl_read(conn, sockfd, ptr,
- BUFSIZE-nread, &gotbytes);
+ int res = Curl_read(conn, sockfd, ptr, BUFSIZE-*nreadp, &gotbytes);
if(res < 0)
/* EWOULDBLOCK */
continue; /* go looping again */
@@ -286,7 +282,7 @@ int Curl_GetFTPResponse(char *buf,
;
else if(gotbytes <= 0) {
keepon = FALSE;
- error = SELECT_ERROR;
+ result = CURLE_RECV_ERROR;
failf(data, "Connection aborted");
}
else {
@@ -295,7 +291,7 @@ int Curl_GetFTPResponse(char *buf,
* line */
int i;
- nread += gotbytes;
+ *nreadp += gotbytes;
for(i = 0; i < gotbytes; ptr++, i++) {
perline++;
if(*ptr=='\n') {
@@ -315,7 +311,7 @@ int Curl_GetFTPResponse(char *buf,
result = Curl_client_write(data, CLIENTWRITE_HEADER,
line_start, perline);
if(result)
- return -SELECT_CALLBACK;
+ return result;
#define lastline(line) (isdigit((int)line[0]) && isdigit((int)line[1]) && \
isdigit((int)line[2]) && (' ' == line[3]))
@@ -350,13 +346,13 @@ int Curl_GetFTPResponse(char *buf,
if(ftp->cache)
memcpy(ftp->cache, line_start, ftp->cache_size);
else
- return -SELECT_MEMORY; /**BANG**/
+ return CURLE_OUT_OF_MEMORY; /**BANG**/
}
} /* there was data */
} /* if(no error) */
} /* while there's buffer left and loop is requested */
- if(!error)
+ if(!result)
code = atoi(buf);
#ifdef KRB4
@@ -378,13 +374,10 @@ int Curl_GetFTPResponse(char *buf,
}
#endif
- if(error)
- return -error;
-
if(ftpcode)
*ftpcode=code; /* return the initial number like this */
- return nread; /* total amount of bytes read */
+ return result;
}
/*
@@ -417,6 +410,7 @@ CURLcode Curl_ftp_connect(struct connectdata *conn)
/* no need to duplicate them, the data struct won't change */
ftp->user = data->state.user;
ftp->passwd = data->state.passwd;
+ ftp->response_time = 3600; /* set default response time-out */
if (data->set.tunnel_thru_httpproxy) {
/* We want "seamless" FTP operations through HTTP proxy tunnel */
@@ -436,9 +430,9 @@ CURLcode Curl_ftp_connect(struct connectdata *conn)
/* The first thing we do is wait for the "220*" line: */
- nread = Curl_GetFTPResponse(buf, conn, &ftpcode);
- if(nread < 0)
- return CURLE_OPERATION_TIMEOUTED;
+ result = Curl_GetFTPResponse(&nread, conn, &ftpcode);
+ if(result)
+ return result;
if(ftpcode != 220) {
failf(data, "This doesn't seem like a nice ftp-server response");
@@ -467,9 +461,9 @@ CURLcode Curl_ftp_connect(struct connectdata *conn)
FTPSENDF(conn, "USER %s", ftp->user);
/* wait for feedback */
- nread = Curl_GetFTPResponse(buf, conn, &ftpcode);
- if(nread < 0)
- return CURLE_OPERATION_TIMEOUTED;
+ result = Curl_GetFTPResponse(&nread, conn, &ftpcode);
+ if(result)
+ return result;
if(ftpcode == 530) {
/* 530 User ... access denied
@@ -481,9 +475,9 @@ CURLcode Curl_ftp_connect(struct connectdata *conn)
/* 331 Password required for ...
(the server requires to send the user's password too) */
FTPSENDF(conn, "PASS %s", ftp->passwd);
- nread = Curl_GetFTPResponse(buf, conn, &ftpcode);
- if(nread < 0)
- return CURLE_OPERATION_TIMEOUTED;
+ result = Curl_GetFTPResponse(&nread, conn, &ftpcode);
+ if(result)
+ return result;
if(ftpcode == 530) {
/* 530 Login incorrect.
@@ -516,8 +510,11 @@ CURLcode Curl_ftp_connect(struct connectdata *conn)
/* we may need to issue a KAUTH here to have access to the files
* do it if user supplied a password
*/
- if(data->state.passwd && *data->state.passwd)
- Curl_krb_kauth(conn);
+ if(data->state.passwd && *data->state.passwd) {
+ result = Curl_krb_kauth(conn);
+ if(result)
+ return result;
+ }
#endif
}
else {
@@ -529,9 +526,9 @@ CURLcode Curl_ftp_connect(struct connectdata *conn)
FTPSENDF(conn, "PWD", NULL);
/* wait for feedback */
- nread = Curl_GetFTPResponse(buf, conn, &ftpcode);
- if(nread < 0)
- return CURLE_OPERATION_TIMEOUTED;
+ result = Curl_GetFTPResponse(&nread, conn, &ftpcode);
+ if(result)
+ return result;
if(ftpcode == 257) {
char *dir = (char *)malloc(nread+1);
@@ -544,7 +541,7 @@ CURLcode Curl_ftp_connect(struct connectdata *conn)
The directory name can contain any character; embedded double-quotes
should be escaped by double-quotes (the "quote-doubling" convention).
*/
- if('\"' == *ptr) {
+ if(dir && ('\"' == *ptr)) {
/* it started good */
ptr++;
while(ptr && *ptr) {
@@ -570,6 +567,8 @@ CURLcode Curl_ftp_connect(struct connectdata *conn)
}
else {
/* couldn't get the path */
+ free(dir);
+ infof(data, "Failed to figure out path\n");
}
}
@@ -594,7 +593,6 @@ CURLcode Curl_ftp_done(struct connectdata *conn)
struct SessionHandle *data = conn->data;
struct FTP *ftp = conn->proto.ftp;
ssize_t nread;
- char *buf = data->state.buffer; /* this is our buffer */
int ftpcode;
CURLcode result=CURLE_OK;
@@ -633,11 +631,24 @@ CURLcode Curl_ftp_done(struct connectdata *conn)
conn->secondarysocket = -1;
if(!ftp->no_transfer) {
- /* now let's see what the server says about the transfer we just
- performed: */
- nread = Curl_GetFTPResponse(buf, conn, &ftpcode);
- if(nread < 0)
- return CURLE_OPERATION_TIMEOUTED;
+ /* Let's see what the server says about the transfer we just performed,
+ but lower the timeout as sometimes this connection has died while
+ the data has been transfered. This happens when doing through NATs
+ etc that abandon old silent connections.
+ */
+ ftp->response_time = 60; /* give it only a minute for now */
+
+ result = Curl_GetFTPResponse(&nread, conn, &ftpcode);
+
+ ftp->response_time = 3600; /* set this back to one hour waits */
+
+ if(!nread && (CURLE_OPERATION_TIMEDOUT == result)) {
+ failf(data, "control connection looks dead");
+ return result;
+ }
+
+ if(result)
+ return result;
if(!ftp->dont_check) {
/* 226 Transfer complete, 250 Requested file action okay, completed. */
@@ -680,9 +691,9 @@ CURLcode ftp_sendquote(struct connectdata *conn, struct curl_slist *quote)
if (item->data) {
FTPSENDF(conn, "%s", item->data);
- nread = Curl_GetFTPResponse(conn->data->state.buffer, conn, &ftpcode);
- if (nread < 0)
- return CURLE_OPERATION_TIMEOUTED;
+ result = Curl_GetFTPResponse(&nread, conn, &ftpcode);
+ if (result)
+ return result;
if (ftpcode >= 400) {
failf(conn->data, "QUOT string not accepted: %s", item->data);
@@ -711,9 +722,9 @@ CURLcode ftp_cwd(struct connectdata *conn, char *path)
CURLcode result;
FTPSENDF(conn, "CWD %s", path);
- nread = Curl_GetFTPResponse(conn->data->state.buffer, conn, &ftpcode);
- if (nread < 0)
- return CURLE_OPERATION_TIMEOUTED;
+ result = Curl_GetFTPResponse(&nread, conn, &ftpcode);
+ if (result)
+ return result;
if (ftpcode != 250) {
failf(conn->data, "Couldn't cd to %s", path);
@@ -741,26 +752,34 @@ CURLcode ftp_getfiletime(struct connectdata *conn, char *file)
again a grey area as the MDTM is not kosher RFC959 */
FTPSENDF(conn, "MDTM %s", file);
- nread = Curl_GetFTPResponse(buf, conn, &ftpcode);
- if(nread < 0)
- return CURLE_OPERATION_TIMEOUTED;
+ result = Curl_GetFTPResponse(&nread, conn, &ftpcode);
+ if(result)
+ return result;
- if(ftpcode == 213) {
- /* we got a time. Format should be: "YYYYMMDDHHMMSS[.sss]" where the
- last .sss part is optional and means fractions of a second */
- int year, month, day, hour, minute, second;
- if(6 == sscanf(buf+4, "%04d%02d%02d%02d%02d%02d",
- &year, &month, &day, &hour, &minute, &second)) {
- /* we have a time, reformat it */
- time_t secs=time(NULL);
- sprintf(buf, "%04d%02d%02d %02d:%02d:%02d",
- year, month, day, hour, minute, second);
- /* now, convert this into a time() value: */
- conn->data->info.filetime = curl_getdate(buf, &secs);
- }
- else {
- infof(conn->data, "unsupported MDTM reply format\n");
+ switch(ftpcode) {
+ case 213:
+ {
+ /* we got a time. Format should be: "YYYYMMDDHHMMSS[.sss]" where the
+ last .sss part is optional and means fractions of a second */
+ int year, month, day, hour, minute, second;
+ if(6 == sscanf(buf+4, "%04d%02d%02d%02d%02d%02d",
+ &year, &month, &day, &hour, &minute, &second)) {
+ /* we have a time, reformat it */
+ time_t secs=time(NULL);
+ sprintf(buf, "%04d%02d%02d %02d:%02d:%02d",
+ year, month, day, hour, minute, second);
+ /* now, convert this into a time() value: */
+ conn->data->info.filetime = curl_getdate(buf, &secs);
+ }
}
+ break;
+ default:
+ infof(conn->data, "unsupported MDTM reply format\n");
+ break;
+ case 550: /* "No such file or directory" */
+ failf(conn->data, "Given file does not exist");
+ result = CURLE_FTP_COULDNT_RETR_FILE;
+ break;
}
return result;
}
@@ -778,14 +797,13 @@ static CURLcode ftp_transfertype(struct connectdata *conn,
struct SessionHandle *data = conn->data;
int ftpcode;
ssize_t nread;
- char *buf=data->state.buffer;
CURLcode result;
FTPSENDF(conn, "TYPE %s", ascii?"A":"I");
- nread = Curl_GetFTPResponse(buf, conn, &ftpcode);
- if(nread < 0)
- return CURLE_OPERATION_TIMEOUTED;
+ result = Curl_GetFTPResponse(&nread, conn, &ftpcode);
+ if(result)
+ return result;
if(ftpcode != 200) {
failf(data, "Couldn't set %s mode",
@@ -814,9 +832,9 @@ CURLcode ftp_getsize(struct connectdata *conn, char *file,
CURLcode result;
FTPSENDF(conn, "SIZE %s", file);
- nread = Curl_GetFTPResponse(buf, conn, &ftpcode);
- if(nread < 0)
- return CURLE_OPERATION_TIMEOUTED;
+ result = Curl_GetFTPResponse(&nread, conn, &ftpcode);
+ if(result)
+ return result;
if(ftpcode == 213) {
/* get the size from the ascii string: */
@@ -975,7 +993,6 @@ CURLcode ftp_use_port(struct connectdata *conn)
struct SessionHandle *data=conn->data;
int portsock=-1;
ssize_t nread;
- char *buf = data->state.buffer; /* this is our buffer */
int ftpcode; /* receive FTP response codes in this */
CURLcode result;
@@ -1155,9 +1172,9 @@ CURLcode ftp_use_port(struct connectdata *conn)
return result;
}
- nread = Curl_GetFTPResponse(buf, conn, &ftpcode);
- if(nread < 0)
- return CURLE_OPERATION_TIMEOUTED;
+ result = Curl_GetFTPResponse(&nread, conn, &ftpcode);
+ if(result)
+ return result;
if (ftpcode != 200) {
failf(data, "Server does not grok %s", *modep);
@@ -1301,9 +1318,9 @@ CURLcode ftp_use_port(struct connectdata *conn)
return result;
}
- nread = Curl_GetFTPResponse(buf, conn, &ftpcode);
- if(nread < 0)
- return CURLE_OPERATION_TIMEOUTED;
+ result = Curl_GetFTPResponse(&nread, conn, &ftpcode);
+ if(result)
+ return result;
if(ftpcode != 200) {
failf(data, "Server does not grok PORT, try without it!");
@@ -1375,9 +1392,9 @@ CURLcode ftp_use_pasv(struct connectdata *conn,
result = Curl_ftpsendf(conn, "%s", mode[modeoff]);
if(result)
return result;
- nread = Curl_GetFTPResponse(buf, conn, &ftpcode);
- if(nread < 0)
- return CURLE_OPERATION_TIMEOUTED;
+ result = Curl_GetFTPResponse(&nread, conn, &ftpcode);
+ if(result)
+ return result;
if (ftpcode == results[modeoff])
break;
}
@@ -1522,7 +1539,7 @@ CURLcode Curl_ftp_nextconnect(struct connectdata *conn)
ssize_t nread;
int ftpcode; /* for ftp status */
- /* the ftp struct is already inited in ftp_connect() */
+ /* the ftp struct is already inited in Curl_ftp_connect() */
struct FTP *ftp = conn->proto.ftp;
long *bytecountp = ftp->bytecountp;
@@ -1582,8 +1599,8 @@ CURLcode Curl_ftp_nextconnect(struct connectdata *conn)
readthisamountnow = BUFSIZE;
actuallyread =
- data->set.fread(data->state.buffer, 1, readthisamountnow,
- data->set.in);
+ conn->fread(data->state.buffer, 1, readthisamountnow,
+ conn->fread_in);
passed += actuallyread;
if(actuallyread != readthisamountnow) {
@@ -1614,7 +1631,7 @@ CURLcode Curl_ftp_nextconnect(struct connectdata *conn)
}
}
- /* Send everything on data->set.in to the socket */
+ /* Send everything on data->state.in to the socket */
if(data->set.ftp_append) {
/* we append onto the file instead of rewriting it */
FTPSENDF(conn, "APPE %s", ftp->file);
@@ -1623,9 +1640,9 @@ CURLcode Curl_ftp_nextconnect(struct connectdata *conn)
FTPSENDF(conn, "STOR %s", ftp->file);
}
- nread = Curl_GetFTPResponse(buf, conn, &ftpcode);
- if(nread < 0)
- return CURLE_OPERATION_TIMEOUTED;
+ result = Curl_GetFTPResponse(&nread, conn, &ftpcode);
+ if(result)
+ return result;
if(ftpcode>=400) {
failf(data, "Failed FTP upload:%s", buf+3);
@@ -1799,9 +1816,9 @@ CURLcode Curl_ftp_nextconnect(struct connectdata *conn)
FTPSENDF(conn, "REST %d", conn->resume_from);
- nread = Curl_GetFTPResponse(buf, conn, &ftpcode);
- if(nread < 0)
- return CURLE_OPERATION_TIMEOUTED;
+ result = Curl_GetFTPResponse(&nread, conn, &ftpcode);
+ if(result)
+ return result;
if(ftpcode != 350) {
failf(data, "Couldn't use REST: %s", buf+4);
@@ -1812,9 +1829,9 @@ CURLcode Curl_ftp_nextconnect(struct connectdata *conn)
FTPSENDF(conn, "RETR %s", ftp->file);
}
- nread = Curl_GetFTPResponse(buf, conn, &ftpcode);
- if(nread < 0)
- return CURLE_OPERATION_TIMEOUTED;
+ result = Curl_GetFTPResponse(&nread, conn, &ftpcode);
+ if(result)
+ return result;
if((ftpcode == 150) || (ftpcode == 125)) {
@@ -1919,7 +1936,7 @@ CURLcode ftp_perform(struct connectdata *conn,
struct SessionHandle *data=conn->data;
char *buf = data->state.buffer; /* this is our buffer */
- /* the ftp struct is already inited in ftp_connect() */
+ /* the ftp struct is already inited in Curl_ftp_connect() */
struct FTP *ftp = conn->proto.ftp;
/* Send any QUOTE strings? */
@@ -1980,7 +1997,7 @@ CURLcode ftp_perform(struct connectdata *conn,
well, we "emulate" a HTTP-style header in our output. */
#ifdef HAVE_STRFTIME
- if(data->set.get_filetime && data->info.filetime) {
+ if(data->set.get_filetime && (data->info.filetime>=0) ) {
struct tm *tm;
#ifdef HAVE_LOCALTIME_R
struct tm buffer;
diff --git a/Source/CTest/Curl/ftp.h b/Source/CTest/Curl/ftp.h
index 2f8c15a..60440db 100644
--- a/Source/CTest/Curl/ftp.h
+++ b/Source/CTest/Curl/ftp.h
@@ -29,8 +29,8 @@ CURLcode Curl_ftp_done(struct connectdata *conn);
CURLcode Curl_ftp_connect(struct connectdata *conn);
CURLcode Curl_ftp_disconnect(struct connectdata *conn);
CURLcode Curl_ftpsendf(struct connectdata *, const char *fmt, ...);
-int Curl_GetFTPResponse(char *buf, struct connectdata *conn,
- int *ftpcode);
+CURLcode Curl_GetFTPResponse(int *nread, struct connectdata *conn,
+ int *ftpcode);
CURLcode Curl_ftp_nextconnect(struct connectdata *conn);
#endif
diff --git a/Source/CTest/Curl/getinfo.c b/Source/CTest/Curl/getinfo.c
index 6639833..f8a4a10 100644
--- a/Source/CTest/Curl/getinfo.c
+++ b/Source/CTest/Curl/getinfo.c
@@ -158,6 +158,9 @@ CURLcode Curl_getinfo(struct SessionHandle *data, CURLINFO info, ...)
case CURLINFO_CONTENT_TYPE:
*param_charp = data->info.contenttype;
break;
+ case CURLINFO_PRIVATE:
+ *param_charp = data->set.private?data->set.private:(char *)"";
+ break;
default:
return CURLE_BAD_FUNCTION_ARGUMENT;
}
diff --git a/Source/CTest/Curl/hostip.c b/Source/CTest/Curl/hostip.c
index 07fb1d2..d79e832 100644
--- a/Source/CTest/Curl/hostip.c
+++ b/Source/CTest/Curl/hostip.c
@@ -88,7 +88,7 @@ static Curl_addrinfo *my_getaddrinfo(struct SessionHandle *data,
void Curl_global_host_cache_init(void)
{
if (!host_cache_initialized) {
- Curl_hash_init(&hostname_cache, 7, Curl_freeaddrinfo);
+ Curl_hash_init(&hostname_cache, 7, Curl_freednsinfo);
host_cache_initialized = 1;
}
}
@@ -287,17 +287,25 @@ struct Curl_dns_entry *Curl_resolv(struct SessionHandle *data,
/*
* This is a wrapper function for freeing name information in a protocol
* independent way. This takes care of using the appropriate underlaying
- * proper function.
+ * function.
*/
-void Curl_freeaddrinfo(void *freethis)
+void Curl_freeaddrinfo(Curl_addrinfo *p)
{
- struct Curl_dns_entry *p = (struct Curl_dns_entry *) freethis;
-
#ifdef ENABLE_IPV6
- freeaddrinfo(p->addr);
+ freeaddrinfo(p);
#else
- free(p->addr);
+ free(p);
#endif
+}
+
+/*
+ * Free a cache dns entry.
+ */
+void Curl_freednsinfo(void *freethis)
+{
+ struct Curl_dns_entry *p = (struct Curl_dns_entry *) freethis;
+
+ Curl_freeaddrinfo(p->addr);
free(p);
}
@@ -623,16 +631,28 @@ static Curl_addrinfo *my_getaddrinfo(struct SessionHandle *data,
&h, /* DIFFERENCE */
&h_errnop);
/* Redhat 8, using glibc 2.2.93 changed the behavior. Now all of a
- sudden this function seems to be setting EAGAIN if the given buffer
- size is too small. Previous versions are known to return ERANGE for
- the same. */
+ sudden this function returns EAGAIN if the given buffer size is too
+ small. Previous versions are known to return ERANGE for the same
+ problem.
+
+ This wouldn't be such a big problem if older versions wouldn't
+ sometimes return EAGAIN on a common failure case. Alas, we can't
+ assume that EAGAIN *or* ERANGE means ERANGE for any given version of
+ glibc.
+
+ For now, we do that and thus we may call the function repeatedly and
+ fail for older glibc versions that return EAGAIN, until we run out
+ of buffer size (step_size grows beyond CURL_NAMELOOKUP_SIZE).
+
+ If anyone has a better fix, please tell us!
+ */
if((ERANGE == res) || (EAGAIN == res)) {
step_size+=200;
continue;
}
break;
- } while(1);
+ } while(step_size <= CURL_NAMELOOKUP_SIZE);
if(!h) /* failure */
res=1;
diff --git a/Source/CTest/Curl/hostip.h b/Source/CTest/Curl/hostip.h
index 78a17e2..169ed17 100644
--- a/Source/CTest/Curl/hostip.h
+++ b/Source/CTest/Curl/hostip.h
@@ -65,7 +65,10 @@ struct Curl_dns_entry *Curl_resolv(struct SessionHandle *data,
void Curl_scan_cache_used(void *user, void *ptr);
/* free name info */
-void Curl_freeaddrinfo(void *freethis);
+void Curl_freeaddrinfo(Curl_addrinfo *freeaddr);
+
+/* free cached name info */
+void Curl_freednsinfo(void *freethis);
#ifdef MALLOCDEBUG
void curl_freeaddrinfo(struct addrinfo *freethis,
diff --git a/Source/CTest/Curl/http.c b/Source/CTest/Curl/http.c
index 24794e2..91ae720 100644
--- a/Source/CTest/Curl/http.c
+++ b/Source/CTest/Curl/http.c
@@ -98,12 +98,65 @@
#include "memdebug.h"
#endif
+/* fread() emulation to provide POST and/or request data */
+static int readmoredata(char *buffer,
+ size_t size,
+ size_t nitems,
+ void *userp)
+{
+ struct connectdata *conn = (struct connectdata *)userp;
+ struct HTTP *http = conn->proto.http;
+ int fullsize = size * nitems;
+
+ if(0 == http->postsize)
+ /* nothing to return */
+ return 0;
+
+ /* make sure that a HTTP request is never sent away chunked! */
+ conn->bits.forbidchunk= (http->sending == HTTPSEND_REQUEST)?TRUE:FALSE;
+
+ if(http->postsize <= fullsize) {
+ memcpy(buffer, http->postdata, http->postsize);
+ fullsize = http->postsize;
+
+ if(http->backup.postsize) {
+ /* move backup data into focus and continue on that */
+ http->postdata = http->backup.postdata;
+ http->postsize = http->backup.postsize;
+ conn->fread = http->backup.fread;
+ conn->fread_in = http->backup.fread_in;
+
+ http->sending++; /* move one step up */
+
+ http->backup.postsize=0;
+ }
+ else
+ http->postsize = 0;
+
+ return fullsize;
+ }
+
+ memcpy(buffer, http->postdata, fullsize);
+ http->postdata += fullsize;
+ http->postsize -= fullsize;
+
+ return fullsize;
+}
+
/* ------------------------------------------------------------------------- */
/*
* The add_buffer series of functions are used to build one large memory chunk
* from repeated function invokes. Used so that the entire HTTP request can
* be sent in one go.
*/
+
+struct send_buffer {
+ char *buffer;
+ size_t size_max;
+ size_t size_used;
+};
+typedef struct send_buffer send_buffer;
+
static CURLcode
add_buffer(send_buffer *in, const void *inptr, size_t size);
@@ -126,44 +179,66 @@ send_buffer *add_buffer_init(void)
* add_buffer_send() sends a buffer and frees all associated memory.
*/
static
-CURLcode add_buffer_send(int sockfd, struct connectdata *conn, send_buffer *in,
- long *bytes_written)
+CURLcode add_buffer_send(send_buffer *in,
+ int sockfd,
+ struct connectdata *conn,
+ long *bytes_written) /* add the number of sent
+ bytes to this counter */
{
ssize_t amount;
CURLcode res;
char *ptr;
int size;
+ struct HTTP *http = conn->proto.http;
/* The looping below is required since we use non-blocking sockets, but due
to the circumstances we will just loop and try again and again etc */
ptr = in->buffer;
size = in->size_used;
- do {
- res = Curl_write(conn, sockfd, ptr, size, &amount);
- if(CURLE_OK != res)
- break;
+ res = Curl_write(conn, sockfd, ptr, size, &amount);
+
+ if(CURLE_OK == res) {
if(conn->data->set.verbose)
/* this data _may_ contain binary stuff */
Curl_debug(conn->data, CURLINFO_HEADER_OUT, ptr, amount);
+ *bytes_written += amount;
+
if(amount != size) {
+ /* The whole request could not be sent in one system call. We must queue
+ it up and send it later when we get the chance. We must not loop here
+ and wait until it might work again. */
+
size -= amount;
ptr += amount;
+
+ /* backup the currently set pointers */
+ http->backup.fread = conn->fread;
+ http->backup.fread_in = conn->fread_in;
+ http->backup.postdata = http->postdata;
+ http->backup.postsize = http->postsize;
+
+ /* set the new pointers for the request-sending */
+ conn->fread = (curl_read_callback)readmoredata;
+ conn->fread_in = (void *)conn;
+ http->postdata = ptr;
+ http->postsize = size;
+
+ http->send_buffer = in;
+ http->sending = HTTPSEND_REQUEST;
+
+ return CURLE_OK;
}
- else
- break;
-
- } while(1);
+ /* the full buffer was sent, clean up and return */
+ }
if(in->buffer)
free(in->buffer);
free(in);
- *bytes_written += amount;
-
return res;
}
@@ -224,20 +299,74 @@ CURLcode add_buffer(send_buffer *in, const void *inptr, size_t size)
/* ------------------------------------------------------------------------- */
/*
+ * Curl_compareheader()
+ *
+ * Returns TRUE if 'headerline' contains the 'header' with given 'content'.
+ * Pass headers WITH the colon.
+ */
+bool
+Curl_compareheader(char *headerline, /* line to check */
+ const char *header, /* header keyword _with_ colon */
+ const char *content) /* content string to find */
+{
+ /* RFC2616, section 4.2 says: "Each header field consists of a name followed
+ * by a colon (":") and the field value. Field names are case-insensitive.
+ * The field value MAY be preceded by any amount of LWS, though a single SP
+ * is preferred." */
+
+ size_t hlen = strlen(header);
+ size_t clen;
+ size_t len;
+ char *start;
+ char *end;
+
+ if(!strnequal(headerline, header, hlen))
+ return FALSE; /* doesn't start with header */
+
+ /* pass the header */
+ start = &headerline[hlen];
+
+ /* pass all white spaces */
+ while(*start && isspace((int)*start))
+ start++;
+
+ /* find the end of the header line */
+ end = strchr(start, '\r'); /* lines end with CRLF */
+ if(!end) {
+ /* in case there's a non-standard compliant line here */
+ end = strchr(start, '\n');
+
+ if(!end)
+ /* hm, there's no line ending here, use the zero byte! */
+ end = strchr(start, '\0');
+ }
+
+ len = end-start; /* length of the content part of the input line */
+ clen = strlen(content); /* length of the word to find */
+
+ /* find the content string in the rest of the line */
+ for(;len>=clen;len--, start++) {
+ if(strnequal(start, content, clen))
+ return TRUE; /* match! */
+ }
+
+ return FALSE; /* no match */
+}
+
+/*
* This function checks the linked list of custom HTTP headers for a particular
* header (prefix).
*/
-static bool checkheaders(struct SessionHandle *data, const char *thisheader)
+static char *checkheaders(struct SessionHandle *data, const char *thisheader)
{
struct curl_slist *head;
size_t thislen = strlen(thisheader);
for(head = data->set.headers; head; head=head->next) {
- if(strnequal(head->data, thisheader, thislen)) {
- return TRUE;
- }
+ if(strnequal(head->data, thisheader, thislen))
+ return head->data;
}
- return FALSE;
+ return NULL;
}
/*
@@ -440,6 +569,10 @@ CURLcode Curl_http_connect(struct connectdata *conn)
if(conn->bits.user_passwd && !data->state.this_is_a_follow) {
/* Authorization: is requested, this is not a followed location, get the
original host name */
+ if (data->state.auth_host)
+ /* Free to avoid leaking memory on multiple requests*/
+ free(data->state.auth_host);
+
data->state.auth_host = strdup(conn->hostname);
}
@@ -454,13 +587,21 @@ CURLcode Curl_http_done(struct connectdata *conn)
data=conn->data;
http=conn->proto.http;
+ /* set the proper values (possibly modified on POST) */
+ conn->fread = data->set.fread; /* restore */
+ conn->fread_in = data->set.in; /* restore */
+
+ if(http->send_buffer) {
+ send_buffer *buff = http->send_buffer;
+
+ free(buff->buffer);
+ free(buff);
+ }
+
if(HTTPREQ_POST_FORM == data->set.httpreq) {
conn->bytecount = http->readbytecount + http->writebytecount;
Curl_formclean(http->sendit); /* Now free that whole lot */
-
- data->set.fread = http->storefread; /* restore */
- data->set.in = http->in; /* restore */
}
else if(HTTPREQ_PUT == data->set.httpreq)
conn->bytecount = http->readbytecount + http->writebytecount;
@@ -475,7 +616,6 @@ CURLcode Curl_http_done(struct connectdata *conn)
return CURLE_OK;
}
-
CURLcode Curl_http(struct connectdata *conn)
{
struct SessionHandle *data=conn->data;
@@ -523,7 +663,7 @@ CURLcode Curl_http(struct connectdata *conn)
host due to a location-follow, we do some weirdo checks here */
if(!data->state.this_is_a_follow ||
!data->state.auth_host ||
- strequal(data->state.auth_host, conn->hostname)) {
+ curl_strequal(data->state.auth_host, conn->hostname)) {
sprintf(data->state.buffer, "%s:%s",
data->state.user, data->state.passwd);
if(Curl_base64_encode(data->state.buffer, strlen(data->state.buffer),
@@ -547,12 +687,30 @@ CURLcode Curl_http(struct connectdata *conn)
conn->allocptr.cookie = aprintf("Cookie: %s\015\012", data->set.cookie);
}
+ if(!conn->bits.upload_chunky && (data->set.httpreq != HTTPREQ_GET)) {
+ /* not a chunky transfer but data is to be sent */
+ char *ptr = checkheaders(data, "Transfer-Encoding:");
+ if(ptr) {
+ /* Some kind of TE is requested, check if 'chunked' is chosen */
+ if(Curl_compareheader(ptr, "Transfer-Encoding:", "chunked"))
+ /* we have been told explicitly to upload chunky so deal with it! */
+ conn->bits.upload_chunky = TRUE;
+ }
+ }
+
if(conn->bits.upload_chunky) {
+ /* RFC2616 section 4.4:
+ Messages MUST NOT include both a Content-Length header field and a
+ non-identity transfer-coding. If the message does include a non-
+ identity transfer-coding, the Content-Length MUST be ignored. */
+
if(!checkheaders(data, "Transfer-Encoding:")) {
te = "Transfer-Encoding: chunked\r\n";
}
- /* else
- our header was already added, what to do now? */
+ else {
+ /* The "Transfer-Encoding:" header was already added. */
+ te = "";
+ }
}
if(data->cookies) {
@@ -847,16 +1005,16 @@ CURLcode Curl_http(struct connectdata *conn)
return CURLE_HTTP_POST_ERROR;
}
- http->storefread = data->set.fread; /* backup */
- http->in = data->set.in; /* backup */
-
- data->set.fread = (curl_read_callback)
- Curl_FormReader; /* set the read function to read from the
- generated form data */
- data->set.in = (FILE *)&http->form;
+ /* set the read function to read from the generated form data */
+ conn->fread = (curl_read_callback)Curl_FormReader;
+ conn->fread_in = &http->form;
- add_bufferf(req_buffer,
- "Content-Length: %d\r\n", http->postsize);
+ http->sending = HTTPSEND_BODY;
+
+ if(!conn->bits.upload_chunky)
+ /* only add Content-Length if not uploading chunked */
+ add_bufferf(req_buffer,
+ "Content-Length: %d\r\n", http->postsize);
if(!checkheaders(data, "Expect:")) {
/* if not disabled explicitly we add a Expect: 100-continue
@@ -896,7 +1054,7 @@ CURLcode Curl_http(struct connectdata *conn)
Curl_pgrsSetUploadSize(data, http->postsize);
/* fire away the whole request to the server */
- result = add_buffer_send(conn->firstsocket, conn, req_buffer,
+ result = add_buffer_send(req_buffer, conn->firstsocket, conn,
&data->info.request_size);
if(result)
failf(data, "Failed sending POST request");
@@ -914,22 +1072,22 @@ CURLcode Curl_http(struct connectdata *conn)
case HTTPREQ_PUT: /* Let's PUT the data to the server! */
- if(data->set.infilesize>0) {
+ if((data->set.infilesize>0) && !conn->bits.upload_chunky)
+ /* only add Content-Length if not uploading chunked */
add_bufferf(req_buffer,
- "Content-Length: %d\r\n\r\n", /* file size */
+ "Content-Length: %d\r\n", /* file size */
data->set.infilesize );
- }
- else
- add_bufferf(req_buffer, "\015\012");
+
+ add_bufferf(req_buffer, "\r\n");
/* set the upload size to the progress meter */
Curl_pgrsSetUploadSize(data, data->set.infilesize);
/* this sends the buffer and frees all the buffer resources */
- result = add_buffer_send(conn->firstsocket, conn, req_buffer,
+ result = add_buffer_send(req_buffer, conn->firstsocket, conn,
&data->info.request_size);
if(result)
- failf(data, "Faied sending POST request");
+ failf(data, "Failed sending POST request");
else
/* prepare for transfer */
result = Curl_Transfer(conn, conn->firstsocket, -1, TRUE,
@@ -943,14 +1101,20 @@ CURLcode Curl_http(struct connectdata *conn)
case HTTPREQ_POST:
/* this is the simple POST, using x-www-form-urlencoded style */
- if(!checkheaders(data, "Content-Length:"))
- /* we allow replacing this header, although it isn't very wise to
- actually set your own */
- add_bufferf(req_buffer,
- "Content-Length: %d\r\n",
- data->set.postfieldsize?
- data->set.postfieldsize:
- (data->set.postfields?strlen(data->set.postfields):0) );
+ if(!conn->bits.upload_chunky) {
+ /* We only set Content-Length and allow a custom Content-Length if
+ we don't upload data chunked, as RFC2616 forbids us to set both
+ kinds of headers (Transfer-Encoding: chunked and Content-Length) */
+
+ if(!checkheaders(data, "Content-Length:"))
+ /* we allow replacing this header, although it isn't very wise to
+ actually set your own */
+ add_bufferf(req_buffer,
+ "Content-Length: %d\r\n",
+ data->set.postfieldsize?
+ data->set.postfieldsize:
+ (data->set.postfields?strlen(data->set.postfields):0) );
+ }
if(!checkheaders(data, "Content-Type:"))
add_bufferf(req_buffer,
@@ -958,18 +1122,28 @@ CURLcode Curl_http(struct connectdata *conn)
add_buffer(req_buffer, "\r\n", 2);
- /* and here comes the actual data */
- if(data->set.postfieldsize && data->set.postfields) {
- add_buffer(req_buffer, data->set.postfields,
- data->set.postfieldsize);
+ /* and here we setup the pointers to the actual data */
+ if(data->set.postfields) {
+ if(data->set.postfieldsize)
+ http->postsize = data->set.postfieldsize;
+ else
+ http->postsize = strlen(data->set.postfields);
+ http->postdata = data->set.postfields;
+
+ http->sending = HTTPSEND_BODY;
+
+ conn->fread = (curl_read_callback)readmoredata;
+ conn->fread_in = (void *)conn;
+
+ /* set the upload size to the progress meter */
+ Curl_pgrsSetUploadSize(data, http->postsize);
}
- else if(data->set.postfields)
- add_bufferf(req_buffer,
- "%s",
- data->set.postfields );
+ else
+ /* set the upload size to the progress meter */
+ Curl_pgrsSetUploadSize(data, data->set.infilesize);
- /* issue the request */
- result = add_buffer_send(conn->firstsocket, conn, req_buffer,
+ /* issue the request, headers-only */
+ result = add_buffer_send(req_buffer, conn->firstsocket, conn,
&data->info.request_size);
if(result)
@@ -978,15 +1152,15 @@ CURLcode Curl_http(struct connectdata *conn)
result =
Curl_Transfer(conn, conn->firstsocket, -1, TRUE,
&http->readbytecount,
- data->set.postfields?-1:conn->firstsocket,
- data->set.postfields?NULL:&http->writebytecount);
+ conn->firstsocket,
+ &http->writebytecount);
break;
default:
add_buffer(req_buffer, "\r\n", 2);
/* issue the request */
- result = add_buffer_send(conn->firstsocket, conn, req_buffer,
+ result = add_buffer_send(req_buffer, conn->firstsocket, conn,
&data->info.request_size);
if(result)
@@ -995,7 +1169,8 @@ CURLcode Curl_http(struct connectdata *conn)
/* HTTP GET/HEAD download: */
result = Curl_Transfer(conn, conn->firstsocket, -1, TRUE,
&http->readbytecount,
- -1, NULL); /* nothing to upload */
+ http->postdata?conn->firstsocket:-1,
+ http->postdata?&http->writebytecount:NULL);
}
if(result)
return result;
diff --git a/Source/CTest/Curl/http.h b/Source/CTest/Curl/http.h
index d78322c..2f40ea1 100644
--- a/Source/CTest/Curl/http.h
+++ b/Source/CTest/Curl/http.h
@@ -24,6 +24,10 @@
* $Id$
***************************************************************************/
#ifndef CURL_DISABLE_HTTP
+bool Curl_compareheader(char *headerline, /* line to check */
+ const char *header, /* header keyword _with_ colon */
+ const char *content); /* content string to find */
+
/* ftp can use this as well */
CURLcode Curl_ConnectHTTPProxyTunnel(struct connectdata *conn,
int tunnelsocket,
diff --git a/Source/CTest/Curl/if2ip.h b/Source/CTest/Curl/if2ip.h
index 2be0ea5..3cb0a46 100644
--- a/Source/CTest/Curl/if2ip.h
+++ b/Source/CTest/Curl/if2ip.h
@@ -29,5 +29,40 @@ extern char *Curl_if2ip(char *interface, char *buf, int buf_size);
#else
#define Curl_if2ip(a,b,c) NULL
#endif
+#ifdef __INTERIX
+/* Nedelcho Stanev's work-around for SFU 3.0 */
+struct ifreq {
+#define IFNAMSIZ 16
+#define IFHWADDRLEN 6
+ union {
+ char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */
+ } ifr_ifrn;
+
+ union {
+ struct sockaddr ifru_addr;
+ struct sockaddr ifru_broadaddr;
+ struct sockaddr ifru_netmask;
+ struct sockaddr ifru_hwaddr;
+ short ifru_flags;
+ int ifru_metric;
+ int ifru_mtu;
+ } ifr_ifru;
+};
+
+/* This define was added by Daniel to avoid an extra #ifdef INTERIX in the
+ C code. */
+#define ifr_dstaddr ifr_addr
+
+#define ifr_name ifr_ifrn.ifrn_name /* interface name */
+#define ifr_addr ifr_ifru.ifru_addr /* address */
+#define ifr_broadaddr ifr_ifru.ifru_broadaddr /* broadcast address */
+#define ifr_netmask ifr_ifru.ifru_netmask /* interface net mask */
+#define ifr_flags ifr_ifru.ifru_flags /* flags */
+#define ifr_hwaddr ifr_ifru.ifru_hwaddr /* MAC address */
+#define ifr_metric ifr_ifru.ifru_metric /* metric */
+#define ifr_mtu ifr_ifru.ifru_mtu /* mtu */
+
+#define SIOCGIFADDR _IOW('s', 102, struct ifreq) /* Get if addr */
+#endif /* interix */
#endif
diff --git a/Source/CTest/Curl/krb4.c b/Source/CTest/Curl/krb4.c
index ab38d30..af8fc87 100644
--- a/Source/CTest/Curl/krb4.c
+++ b/Source/CTest/Curl/krb4.c
@@ -202,6 +202,7 @@ krb4_auth(void *app_data, struct connectdata *conn)
ssize_t nread;
int l = sizeof(conn->local_addr);
struct SessionHandle *data = conn->data;
+ CURLcode result;
if(getsockname(conn->firstsocket,
(struct sockaddr *)LOCAL_ADDR, &l) < 0)
@@ -246,13 +247,15 @@ krb4_auth(void *app_data, struct connectdata *conn)
return AUTH_CONTINUE;
}
- if(Curl_ftpsendf(conn, "ADAT %s", p))
+ result = Curl_ftpsendf(conn, "ADAT %s", p);
+
+ free(p);
+
+ if(result)
return -2;
- nread = Curl_GetFTPResponse(data->state.buffer, conn, NULL);
- if(nread < 0)
+ if(Curl_GetFTPResponse(&nread, conn, NULL))
return -1;
- free(p);
if(data->state.buffer[0] != '2'){
Curl_failf(data, "Server didn't accept auth data");
@@ -299,7 +302,7 @@ struct Curl_sec_client_mech Curl_krb4_client_mech = {
krb4_decode
};
-void Curl_krb_kauth(struct connectdata *conn)
+CURLcode Curl_krb_kauth(struct connectdata *conn)
{
des_cblock key;
des_key_schedule schedule;
@@ -309,18 +312,19 @@ void Curl_krb_kauth(struct connectdata *conn)
char passwd[100];
int tmp;
ssize_t nread;
-
int save;
+ CURLcode result;
save = Curl_set_command_prot(conn, prot_private);
- if(Curl_ftpsendf(conn, "SITE KAUTH %s", conn->data->state.user))
- return;
+ result = Curl_ftpsendf(conn, "SITE KAUTH %s", conn->data->state.user);
+
+ if(result)
+ return result;
- nread = Curl_GetFTPResponse(conn->data->state.buffer,
- conn, NULL);
- if(nread < 0)
- return /*CURLE_OPERATION_TIMEOUTED*/;
+ result = Curl_GetFTPResponse(&nread, conn, NULL);
+ if(result)
+ return result;
if(conn->data->state.buffer[0] != '3'){
Curl_set_command_prot(conn, save);
@@ -331,7 +335,7 @@ void Curl_krb_kauth(struct connectdata *conn)
if(!p) {
Curl_failf(conn->data, "Bad reply from server");
Curl_set_command_prot(conn, save);
- return;
+ return CURLE_FTP_WEIRD_SERVER_REPLY;
}
p += 2;
@@ -339,7 +343,7 @@ void Curl_krb_kauth(struct connectdata *conn)
if(tmp < 0) {
Curl_failf(conn->data, "Failed to decode base64 in reply.\n");
Curl_set_command_prot(conn, save);
- return;
+ return CURLE_FTP_WEIRD_SERVER_REPLY;
}
tkt.length = tmp;
tktcopy.length = tkt.length;
@@ -348,7 +352,7 @@ void Curl_krb_kauth(struct connectdata *conn)
if(!p) {
Curl_failf(conn->data, "Bad reply from server");
Curl_set_command_prot(conn, save);
- return;
+ return CURLE_FTP_WEIRD_SERVER_REPLY;
}
name = p + 2;
for(; *p && *p != ' ' && *p != '\r' && *p != '\n'; p++);
@@ -376,19 +380,21 @@ void Curl_krb_kauth(struct connectdata *conn)
if(Curl_base64_encode(tktcopy.dat, tktcopy.length, &p) < 0) {
failf(conn->data, "Out of memory base64-encoding.");
Curl_set_command_prot(conn, save);
- return;
+ return CURLE_OUT_OF_MEMORY;
}
memset (tktcopy.dat, 0, tktcopy.length);
- if(Curl_ftpsendf(conn, "SITE KAUTH %s %s", name, p))
- return;
-
- nread = Curl_GetFTPResponse(conn->data->state.buffer,
- conn, NULL);
- if(nread < 0)
- return /*CURLE_OPERATION_TIMEOUTED*/;
+ result = Curl_ftpsendf(conn, "SITE KAUTH %s %s", name, p);
free(p);
+ if(result)
+ return result;
+
+ result = Curl_GetFTPResponse(&nread, conn, NULL);
+ if(result)
+ return result;
Curl_set_command_prot(conn, save);
+
+ return CURLE_OK;
}
#endif /* KRB4 */
diff --git a/Source/CTest/Curl/krb4.h b/Source/CTest/Curl/krb4.h
index 8057058..ad314b5 100644
--- a/Source/CTest/Curl/krb4.h
+++ b/Source/CTest/Curl/krb4.h
@@ -22,6 +22,6 @@
*
* $Id$
***************************************************************************/
-void Curl_krb_kauth(struct connectdata *conn);
+CURLcode Curl_krb_kauth(struct connectdata *conn);
#endif
diff --git a/Source/CTest/Curl/multi.c b/Source/CTest/Curl/multi.c
index f901f3a..8548354 100644
--- a/Source/CTest/Curl/multi.c
+++ b/Source/CTest/Curl/multi.c
@@ -313,9 +313,8 @@ CURLMcode curl_multi_perform(CURLM *multi_handle, int *running_handles)
easy->easy_handle->hostcache = Curl_global_host_cache_get();
}
else {
- if (multi->hostcache == NULL) {
- multi->hostcache = Curl_hash_alloc(7, Curl_freeaddrinfo);
- }
+ if (multi->hostcache == NULL)
+ multi->hostcache = Curl_hash_alloc(7, Curl_freednsinfo);
easy->easy_handle->hostcache = multi->hostcache;
}
diff --git a/Source/CTest/Curl/security.c b/Source/CTest/Curl/security.c
index aa11a8f..c8f3c22 100644
--- a/Source/CTest/Curl/security.c
+++ b/Source/CTest/Curl/security.c
@@ -279,32 +279,6 @@ Curl_sec_write(struct connectdata *conn, int fd, char *buffer, int length)
}
int
-Curl_sec_vfprintf2(struct connectdata *conn, FILE *f, const char *fmt, va_list ap)
-{
- char *buf;
- int ret;
- if(conn->data_prot == prot_clear)
- return vfprintf(f, fmt, ap);
- else {
- buf = aprintf(fmt, ap);
- ret = buffer_write(&conn->out_buffer, buf, strlen(buf));
- free(buf);
- return ret;
- }
-}
-
-int
-Curl_sec_fprintf2(struct connectdata *conn, FILE *f, const char *fmt, ...)
-{
- int ret;
- va_list ap;
- va_start(ap, fmt);
- ret = Curl_sec_vfprintf2(conn, f, fmt, ap);
- va_end(ap);
- return ret;
-}
-
-int
Curl_sec_putc(struct connectdata *conn, int c, FILE *F)
{
char ch = c;
@@ -313,7 +287,8 @@ Curl_sec_putc(struct connectdata *conn, int c, FILE *F)
buffer_write(&conn->out_buffer, &ch, 1);
if(c == '\n' || conn->out_buffer.index >= 1024 /* XXX */) {
- Curl_sec_write(conn, fileno(F), conn->out_buffer.data, conn->out_buffer.index);
+ Curl_sec_write(conn, fileno(F), conn->out_buffer.data,
+ conn->out_buffer.index);
conn->out_buffer.index = 0;
}
return c;
@@ -346,53 +321,6 @@ Curl_sec_read_msg(struct connectdata *conn, char *s, int level)
return code;
}
-/* modified to return how many bytes written, or -1 on error ***/
-int
-Curl_sec_vfprintf(struct connectdata *conn, FILE *f, const char *fmt, va_list ap)
-{
- int ret = 0;
- char *buf;
- void *enc;
- int len;
- if(!conn->sec_complete)
- return vfprintf(f, fmt, ap);
-
- buf = aprintf(fmt, ap);
- len = (conn->mech->encode)(conn->app_data, buf, strlen(buf),
- conn->command_prot, &enc,
- conn);
- free(buf);
- if(len < 0) {
- failf(conn->data, "Failed to encode command.");
- return -1;
- }
- if(Curl_base64_encode(enc, len, &buf) < 0){
- failf(conn->data, "Out of memory base64-encoding.");
- return -1;
- }
- if(conn->command_prot == prot_safe)
- ret = fprintf(f, "MIC %s", buf);
- else if(conn->command_prot == prot_private)
- ret = fprintf(f, "ENC %s", buf);
- else if(conn->command_prot == prot_confidential)
- ret = fprintf(f, "CONF %s", buf);
-
- free(buf);
- return ret;
-}
-
-int
-Curl_sec_fprintf(struct connectdata *conn, FILE *f, const char *fmt, ...)
-{
- va_list ap;
- int ret;
- va_start(ap, fmt);
- ret = Curl_sec_vfprintf(conn, f, fmt, ap);
- va_end(ap);
- return ret;
-}
-
-
enum protection_level
Curl_set_command_prot(struct connectdata *conn, enum protection_level level)
{
@@ -414,14 +342,14 @@ sec_prot_internal(struct connectdata *conn, int level)
}
if(level){
+ int code;
if(Curl_ftpsendf(conn, "PBSZ %u", s))
return -1;
- nread = Curl_GetFTPResponse(conn->data->state.buffer, conn, NULL);
- if(nread < 0)
+ if(Curl_GetFTPResponse(&nread, conn, &code))
return -1;
- if(conn->data->state.buffer[0] != '2'){
+ if(code/100 != '2'){
failf(conn->data, "Failed to set protection buffer size.");
return -1;
}
@@ -437,8 +365,7 @@ sec_prot_internal(struct connectdata *conn, int level)
if(Curl_ftpsendf(conn, "PROT %c", level["CSEP"]))
return -1;
- nread = Curl_GetFTPResponse(conn->data->state.buffer, conn, NULL);
- if(nread < 0)
+ if(Curl_GetFTPResponse(&nread, conn, NULL))
return -1;
if(conn->data->state.buffer[0] != '2'){
@@ -496,8 +423,7 @@ Curl_sec_login(struct connectdata *conn)
if(Curl_ftpsendf(conn, "AUTH %s", (*m)->name))
return -1;
- nread = Curl_GetFTPResponse(conn->data->state.buffer, conn, &ftpcode);
- if(nread < 0)
+ if(Curl_GetFTPResponse(&nread, conn, &ftpcode))
return -1;
if(conn->data->state.buffer[0] != '3'){
diff --git a/Source/CTest/Curl/sendf.c b/Source/CTest/Curl/sendf.c
index 6ebe014..0eef961 100644
--- a/Source/CTest/Curl/sendf.c
+++ b/Source/CTest/Curl/sendf.c
@@ -154,9 +154,19 @@ void Curl_failf(struct SessionHandle *data, const char *fmt, ...)
vsnprintf(data->set.errorbuffer, CURL_ERROR_SIZE, fmt, ap);
data->state.errorbuf = TRUE; /* wrote error string */
- if(data->set.verbose)
- Curl_debug(data, CURLINFO_TEXT, data->set.errorbuffer,
- strlen(data->set.errorbuffer));
+ if(data->set.verbose) {
+ int len = strlen(data->set.errorbuffer);
+ bool doneit=FALSE;
+ if(len < CURL_ERROR_SIZE) {
+ doneit = TRUE;
+ data->set.errorbuffer[len] = '\n';
+ data->set.errorbuffer[++len] = '\0';
+ }
+ Curl_debug(data, CURLINFO_TEXT, data->set.errorbuffer, len);
+ if(doneit)
+ /* cut off the newline again */
+ data->set.errorbuffer[--len]=0;
+ }
}
va_end(ap);
}
@@ -235,6 +245,9 @@ CURLcode Curl_write(struct connectdata *conn, int sockfd,
/* this is basicly the EWOULDBLOCK equivalent */
*written = 0;
return CURLE_OK;
+ case SSL_ERROR_SYSCALL:
+ failf(conn->data, "SSL_write() returned SYSCALL, errno = %d\n", errno);
+ return CURLE_SEND_ERROR;
}
/* a true error */
failf(conn->data, "SSL_write() return error %d\n", err);
@@ -328,36 +341,29 @@ int Curl_read(struct connectdata *conn,
ssize_t *n)
{
ssize_t nread;
+ *n=0; /* reset amount to zero */
#ifdef USE_SSLEAY
if (conn->ssl.use) {
- bool loop=TRUE;
- int err;
- do {
- nread = SSL_read(conn->ssl.handle, buf, buffersize);
+ nread = SSL_read(conn->ssl.handle, buf, buffersize);
- if(nread >= 0)
- /* successful read */
- break;
-
- err = SSL_get_error(conn->ssl.handle, nread);
+ if(nread < 0) {
+ /* failed SSL_read */
+ int err = SSL_get_error(conn->ssl.handle, nread);
switch(err) {
case SSL_ERROR_NONE: /* this is not an error */
case SSL_ERROR_ZERO_RETURN: /* no more data */
- loop=0; /* get out of loop */
break;
case SSL_ERROR_WANT_READ:
case SSL_ERROR_WANT_WRITE:
- /* if there's data pending, then we re-invoke SSL_read() */
- break;
+ /* there's data pending, re-invoke SSL_read() */
+ return -1; /* basicly EWOULDBLOCK */
default:
failf(conn->data, "SSL read error: %d", err);
return CURLE_RECV_ERROR;
}
- } while(loop);
- if(loop && SSL_pending(conn->ssl.handle))
- return -1; /* basicly EWOULDBLOCK */
+ }
}
else {
#endif
diff --git a/Source/CTest/Curl/sendf.h b/Source/CTest/Curl/sendf.h
index 665a4c8..97e2fa9 100644
--- a/Source/CTest/Curl/sendf.h
+++ b/Source/CTest/Curl/sendf.h
@@ -30,13 +30,6 @@ void Curl_failf(struct SessionHandle *, const char *fmt, ...);
#define infof Curl_infof
#define failf Curl_failf
-struct send_buffer {
- char *buffer;
- size_t size_max;
- size_t size_used;
-};
-typedef struct send_buffer send_buffer;
-
#define CLIENTWRITE_BODY 1
#define CLIENTWRITE_HEADER 2
#define CLIENTWRITE_BOTH (CLIENTWRITE_BODY|CLIENTWRITE_HEADER)
diff --git a/Source/CTest/Curl/setup.h b/Source/CTest/Curl/setup.h
index 783fb83..0612df4 100644
--- a/Source/CTest/Curl/setup.h
+++ b/Source/CTest/Curl/setup.h
@@ -35,9 +35,8 @@
#define CURL_DISABLE_GOPHER
#endif
-#if !defined(WIN32) && defined(_WIN32)
-/* This _might_ be a good Borland fix. Please report whether this works or
- not! */
+#if !defined(WIN32) && defined(__WIN32__)
+/* This should be a good Borland fix. Alexander J. Oss told us! */
#define WIN32
#endif
diff --git a/Source/CTest/Curl/share.h b/Source/CTest/Curl/share.h
new file mode 100644
index 0000000..fdd6ec5
--- /dev/null
+++ b/Source/CTest/Curl/share.h
@@ -0,0 +1,52 @@
+#ifndef __CURL_SHARE_H
+#define __CURL_SHARE_H
+
+/***************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at http://curl.haxx.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ * $Id$
+ ***************************************************************************/
+
+#include "setup.h"
+#include <curl/curl.h>
+
+/* this struct is libcurl-private, don't export details */
+struct Curl_share {
+ unsigned int specifier;
+ unsigned int locked;
+ unsigned int dirty;
+
+ curl_lock_function lockfunc;
+ curl_unlock_function unlockfunc;
+ void *clientdata;
+};
+
+CURLSHcode Curl_share_aquire_lock (struct SessionHandle *, curl_lock_data);
+CURLSHcode Curl_share_release_lock (struct SessionHandle *, curl_lock_data);
+
+#endif /* __CURL_SHARE_H */
+
+/*
+ * local variables:
+ * eval: (load-file "../curl-mode.el")
+ * end:
+ * vim600: fdm=marker
+ * vim: et sw=2 ts=2 sts=2 tw=78
+ */
diff --git a/Source/CTest/Curl/ssluse.c b/Source/CTest/Curl/ssluse.c
index d3162f7..07dae8b 100644
--- a/Source/CTest/Curl/ssluse.c
+++ b/Source/CTest/Curl/ssluse.c
@@ -275,7 +275,8 @@ int cert_stuff(struct connectdata *conn,
if (SSL_CTX_use_PrivateKey_file(conn->ssl.ctx,
key_file,
file_type) != 1) {
- failf(data, "unable to set private key file\n");
+ failf(data, "unable to set private key file: '%s' type %s\n",
+ key_file, key_type?key_type:"PEM");
return 0;
}
break;
@@ -324,10 +325,15 @@ int cert_stuff(struct connectdata *conn,
ssl=SSL_new(conn->ssl.ctx);
x509=SSL_get_certificate(ssl);
-
- if (x509 != NULL)
- EVP_PKEY_copy_parameters(X509_get_pubkey(x509),
- SSL_get_privatekey(ssl));
+
+ /* This version was provided by Evan Jordan and is supposed to not
+ leak memory as the previous version: */
+ if (x509 != NULL) {
+ EVP_PKEY *pktmp = X509_get_pubkey(x509);
+ EVP_PKEY_copy_parameters(pktmp,SSL_get_privatekey(ssl));
+ EVP_PKEY_free(pktmp);
+ }
+
SSL_free(ssl);
/* If we are using DSA, we can copy the parameters from
@@ -667,6 +673,44 @@ static int Curl_ASN1_UTCTIME_output(struct connectdata *conn,
#endif
/* ====================================================== */
+static int
+cert_hostcheck(const char *certname, const char *hostname)
+{
+ char *tmp;
+ const char *certdomain;
+
+ if(!certname ||
+ strlen(certname)<3 ||
+ !hostname ||
+ !strlen(hostname)) /* sanity check */
+ return 0;
+
+ if(strequal(certname, hostname)) /* trivial case */
+ return 1;
+
+ certdomain = certname + 1;
+
+ if((certname[0] != '*') || (certdomain[0] != '.'))
+ return 0; /* not a wildcard certificate, check failed */
+
+ if(!strchr(certdomain+1, '.'))
+ return 0; /* the certificate must have at least another dot in its name */
+
+ /* find 'certdomain' within 'hostname' */
+ tmp = strstr(hostname, certdomain);
+ if(tmp) {
+ /* ok the certname's domain matches the hostname, let's check that it's a
+ tail-match */
+ if(strequal(tmp, certdomain))
+ /* looks like a match. Just check we havent swallowed a '.' */
+ return tmp == strchr(hostname, '.');
+ else
+ return 0;
+ }
+ return 0;
+}
+
+/* ====================================================== */
CURLcode
Curl_SSLConnect(struct connectdata *conn)
{
@@ -904,7 +948,7 @@ Curl_SSLConnect(struct connectdata *conn)
return CURLE_SSL_PEER_CERTIFICATE;
}
- if (!strequal(peer_CN, conn->hostname)) {
+ if (!cert_hostcheck(peer_CN, conn->hostname)) {
if (data->set.ssl.verifyhost > 1) {
failf(data, "SSL: certificate subject name '%s' does not match "
"target host name '%s'",
diff --git a/Source/CTest/Curl/transfer.c b/Source/CTest/Curl/transfer.c
index 5e4e4fb..75cdc69 100644
--- a/Source/CTest/Curl/transfer.c
+++ b/Source/CTest/Curl/transfer.c
@@ -114,65 +114,76 @@ enum {
KEEP_WRITE
};
+/* We keep this static and global since this is read-only and NEVER
+ changed. It should just remain a blanked-out timeout value. */
+static struct timeval notimeout={0,0};
/*
- * compareheader()
- *
- * Returns TRUE if 'headerline' contains the 'header' with given 'content'.
- * Pass headers WITH the colon.
+ * This function will call the read callback to fill our buffer with data
+ * to upload.
*/
-static bool
-compareheader(char *headerline, /* line to check */
- const char *header, /* header keyword _with_ colon */
- const char *content) /* content string to find */
+static int fillbuffer(struct connectdata *conn,
+ int bytes)
{
- /* RFC2616, section 4.2 says: "Each header field consists of a name followed
- * by a colon (":") and the field value. Field names are case-insensitive.
- * The field value MAY be preceded by any amount of LWS, though a single SP
- * is preferred." */
-
- size_t hlen = strlen(header);
- size_t clen;
- size_t len;
- char *start;
- char *end;
-
- if(!strnequal(headerline, header, hlen))
- return FALSE; /* doesn't start with header */
-
- /* pass the header */
- start = &headerline[hlen];
-
- /* pass all white spaces */
- while(*start && isspace((int)*start))
- start++;
-
- /* find the end of the header line */
- end = strchr(start, '\r'); /* lines end with CRLF */
- if(!end) {
- /* in case there's a non-standard compliant line here */
- end = strchr(start, '\n');
-
- if(!end)
- /* hm, there's no line ending here, return false and bail out! */
- return FALSE;
+ int buffersize = bytes;
+ int nread;
+
+ if(conn->bits.upload_chunky) {
+ /* if chunked Transfer-Encoding */
+ buffersize -= (8 + 2 + 2); /* 32bit hex + CRLF + CRLF */
+ conn->upload_fromhere += 10; /* 32bit hex + CRLF */
+ }
+
+ nread = conn->fread(conn->upload_fromhere, 1,
+ buffersize, conn->fread_in);
+
+ if(!conn->bits.forbidchunk && conn->bits.upload_chunky) {
+ /* if chunked Transfer-Encoding */
+ char hexbuffer[11];
+ int hexlen = snprintf(hexbuffer, sizeof(hexbuffer),
+ "%x\r\n", nread);
+ /* move buffer pointer */
+ conn->upload_fromhere -= hexlen;
+ nread += hexlen;
+
+ /* copy the prefix to the buffer */
+ memcpy(conn->upload_fromhere, hexbuffer, hexlen);
+ if(nread>hexlen) {
+ /* append CRLF to the data */
+ memcpy(conn->upload_fromhere +
+ nread, "\r\n", 2);
+ nread+=2;
+ }
+ else {
+ /* mark this as done once this chunk is transfered */
+ conn->keep.upload_done = TRUE;
+ }
}
+ return nread;
+}
- len = end-start; /* length of the content part of the input line */
- clen = strlen(content); /* length of the word to find */
+/*
+ * checkhttpprefix()
+ *
+ * Returns TRUE if member of the list matches prefix of string
+ */
+static bool
+checkhttpprefix(struct SessionHandle *data,
+ const char *s)
+{
+ struct curl_slist *head = data->set.http200aliases;
- /* find the content string in the rest of the line */
- for(;len>=clen;len--, start++) {
- if(strnequal(start, content, clen))
- return TRUE; /* match! */
+ while (head) {
+ if (checkprefix(head->data, s))
+ return TRUE;
+ head = head->next;
}
- return FALSE; /* no match */
-}
+ if(checkprefix("HTTP/", s))
+ return TRUE;
-/* We keep this static and global since this is read-only and NEVER
- changed. It should just remain a blanked-out timeout value. */
-static struct timeval notimeout={0,0};
+ return FALSE;
+}
CURLcode Curl_readwrite(struct connectdata *conn,
bool *done)
@@ -220,61 +231,111 @@ CURLcode Curl_readwrite(struct connectdata *conn,
if((k->keepon & KEEP_READ) &&
(FD_ISSET(conn->sockfd, readfdp))) {
- /* read! */
- result = Curl_read(conn, conn->sockfd, k->buf,
- data->set.buffer_size?
- data->set.buffer_size:BUFSIZE -1,
- &nread);
+ bool readdone = FALSE;
- if(0>result)
- break; /* get out of loop */
- if(result>0)
- return result;
+ /* This is where we loop until we have read everything there is to
+ read or we get a EWOULDBLOCK */
+ do {
- if ((k->bytecount == 0) && (k->writebytecount == 0))
- Curl_pgrsTime(data, TIMER_STARTTRANSFER);
+ /* read! */
+ result = Curl_read(conn, conn->sockfd, k->buf,
+ data->set.buffer_size?
+ data->set.buffer_size:BUFSIZE -1,
+ &nread);
- didwhat |= KEEP_READ;
+ if(0>result)
+ break; /* get out of loop */
+ if(result>0)
+ return result;
- /* NULL terminate, allowing string ops to be used */
- if (0 < nread)
- k->buf[nread] = 0;
+ if ((k->bytecount == 0) && (k->writebytecount == 0))
+ Curl_pgrsTime(data, TIMER_STARTTRANSFER);
- /* if we receive 0 or less here, the server closed the connection and
- we bail out from this! */
- else if (0 >= nread) {
- k->keepon &= ~KEEP_READ;
- FD_ZERO(&k->rkeepfd);
- break;
- }
+ didwhat |= KEEP_READ;
+
+ /* NULL terminate, allowing string ops to be used */
+ if (0 < nread)
+ k->buf[nread] = 0;
+
+ /* if we receive 0 or less here, the server closed the connection and
+ we bail out from this! */
+ else if (0 >= nread) {
+ k->keepon &= ~KEEP_READ;
+ FD_ZERO(&k->rkeepfd);
+ readdone = TRUE;
+ break;
+ }
- /* Default buffer to use when we write the buffer, it may be changed
- in the flow below before the actual storing is done. */
- k->str = k->buf;
+ /* Default buffer to use when we write the buffer, it may be changed
+ in the flow below before the actual storing is done. */
+ k->str = k->buf;
- /* Since this is a two-state thing, we check if we are parsing
- headers at the moment or not. */
- if (k->header) {
- /* we are in parse-the-header-mode */
- bool stop_reading = FALSE;
+ /* Since this is a two-state thing, we check if we are parsing
+ headers at the moment or not. */
+ if (k->header) {
+ /* we are in parse-the-header-mode */
+ bool stop_reading = FALSE;
- /* header line within buffer loop */
- do {
- int hbufp_index;
+ /* header line within buffer loop */
+ do {
+ int hbufp_index;
- /* str_start is start of line within buf */
- k->str_start = k->str;
+ /* str_start is start of line within buf */
+ k->str_start = k->str;
- k->end_ptr = strchr (k->str_start, '\n');
+ k->end_ptr = strchr (k->str_start, '\n');
- if (!k->end_ptr) {
- /* Not a complete header line within buffer, append the data to
- the end of the headerbuff. */
+ if (!k->end_ptr) {
+ /* Not a complete header line within buffer, append the data to
+ the end of the headerbuff. */
+
+ if (k->hbuflen + nread >= data->state.headersize) {
+ /* We enlarge the header buffer as it is too small */
+ char *newbuff;
+ long newsize=MAX((k->hbuflen+nread)*3/2,
+ data->state.headersize*2);
+ hbufp_index = k->hbufp - data->state.headerbuff;
+ newbuff = (char *)realloc(data->state.headerbuff, newsize);
+ if(!newbuff) {
+ failf (data, "Failed to alloc memory for big header!");
+ return CURLE_OUT_OF_MEMORY;
+ }
+ data->state.headersize=newsize;
+ data->state.headerbuff = newbuff;
+ k->hbufp = data->state.headerbuff + hbufp_index;
+ }
+ memcpy(k->hbufp, k->str, nread);
+ k->hbufp += nread;
+ k->hbuflen += nread;
+ if (!k->headerline && (k->hbuflen>5)) {
+ /* make a first check that this looks like a HTTP header */
+ if(!checkhttpprefix(data, data->state.headerbuff)) {
+ /* this is not the beginning of a HTTP first header line */
+ k->header = FALSE;
+ k->badheader = HEADER_ALLBAD;
+ break;
+ }
+ }
+
+ break; /* read more and try again */
+ }
+
+ /* decrease the size of the remaining buffer */
+ nread -= (k->end_ptr - k->str)+1;
- if (k->hbuflen + nread >= data->state.headersize) {
- /* We enlarge the header buffer as it is too small */
+ k->str = k->end_ptr + 1; /* move past new line */
+
+ /*
+ * We're about to copy a chunk of data to the end of the
+ * already received header. We make sure that the full string
+ * fit in the allocated header buffer, or else we enlarge
+ * it.
+ */
+ if (k->hbuflen + (k->str - k->str_start) >=
+ data->state.headersize) {
char *newbuff;
- long newsize=MAX((k->hbuflen+nread)*3/2,
+ long newsize=MAX((k->hbuflen+
+ (k->str-k->str_start))*3/2,
data->state.headersize*2);
hbufp_index = k->hbufp - data->state.headerbuff;
newbuff = (char *)realloc(data->state.headerbuff, newsize);
@@ -282,602 +343,576 @@ CURLcode Curl_readwrite(struct connectdata *conn,
failf (data, "Failed to alloc memory for big header!");
return CURLE_OUT_OF_MEMORY;
}
- data->state.headersize=newsize;
+ data->state.headersize= newsize;
data->state.headerbuff = newbuff;
k->hbufp = data->state.headerbuff + hbufp_index;
}
- memcpy(k->hbufp, k->str, nread);
- k->hbufp += nread;
- k->hbuflen += nread;
- if (!k->headerline && (k->hbuflen>5)) {
- /* make a first check that this looks like a HTTP header */
- if(!checkprefix("HTTP/", data->state.headerbuff)) {
+
+ /* copy to end of line */
+ strncpy (k->hbufp, k->str_start, k->str - k->str_start);
+ k->hbufp += k->str - k->str_start;
+ k->hbuflen += k->str - k->str_start;
+ *k->hbufp = 0;
+
+ k->p = data->state.headerbuff;
+
+ /****
+ * We now have a FULL header line that p points to
+ *****/
+
+ if(!k->headerline) {
+ /* the first read header */
+ if((k->hbuflen>5) &&
+ !checkhttpprefix(data, data->state.headerbuff)) {
/* this is not the beginning of a HTTP first header line */
k->header = FALSE;
- k->badheader = HEADER_ALLBAD;
+ k->badheader = HEADER_PARTHEADER;
break;
}
}
- break; /* read more and try again */
- }
+ if (('\n' == *k->p) || ('\r' == *k->p)) {
+ int headerlen;
+ /* Zero-length header line means end of headers! */
+
+ if ('\r' == *k->p)
+ k->p++; /* pass the \r byte */
+ if ('\n' == *k->p)
+ k->p++; /* pass the \n byte */
+
+ if(100 == k->httpcode) {
+ /*
+ * we have made a HTTP PUT or POST and this is 1.1-lingo
+ * that tells us that the server is OK with this and ready
+ * to receive our stuff.
+ * However, we'll get more headers now so we must get
+ * back into the header-parsing state!
+ */
+ k->header = TRUE;
+ k->headerline = 0; /* restart the header line counter */
+ /* if we did wait for this do enable write now! */
+ if (k->write_after_100_header) {
+
+ k->write_after_100_header = FALSE;
+ FD_SET (conn->writesockfd, &k->writefd); /* write */
+ k->keepon |= KEEP_WRITE;
+ k->wkeepfd = k->writefd;
+ }
+ }
+ else
+ k->header = FALSE; /* no more header to parse! */
+
+ if (417 == k->httpcode) {
+ /*
+ * we got: "417 Expectation Failed" this means:
+ * we have made a HTTP call and our Expect Header
+ * seems to cause a problem => abort the write operations
+ * (or prevent them from starting).
+ */
+ k->write_after_100_header = FALSE;
+ k->keepon &= ~KEEP_WRITE;
+ FD_ZERO(&k->wkeepfd);
+ }
- /* decrease the size of the remaining buffer */
- nread -= (k->end_ptr - k->str)+1;
+ /* now, only output this if the header AND body are requested:
+ */
+ k->writetype = CLIENTWRITE_HEADER;
+ if (data->set.http_include_header)
+ k->writetype |= CLIENTWRITE_BODY;
+
+ headerlen = k->p - data->state.headerbuff;
+
+ result = Curl_client_write(data, k->writetype,
+ data->state.headerbuff,
+ headerlen);
+ if(result)
+ return result;
+
+ data->info.header_size += headerlen;
+ conn->headerbytecount += headerlen;
+
+ if(!k->header) {
+ /*
+ * really end-of-headers.
+ *
+ * If we requested a "no body", this is a good time to get
+ * out and return home.
+ */
+ if(data->set.no_body)
+ stop_reading = TRUE;
+ else if(!conn->bits.close) {
+ /* If this is not the last request before a close, we must
+ set the maximum download size to the size of the
+ expected document or else, we won't know when to stop
+ reading! */
+ if(-1 != conn->size)
+ conn->maxdownload = conn->size;
+ }
+ /* If max download size is *zero* (nothing) we already
+ have nothing and can safely return ok now! */
+ if(0 == conn->maxdownload)
+ stop_reading = TRUE;
+
+ if(stop_reading) {
+ /* we make sure that this socket isn't read more now */
+ k->keepon &= ~KEEP_READ;
+ FD_ZERO(&k->rkeepfd);
+ }
- k->str = k->end_ptr + 1; /* move past new line */
+ break; /* exit header line loop */
+ }
- /*
- * We're about to copy a chunk of data to the end of the
- * already received header. We make sure that the full string
- * fit in the allocated header buffer, or else we enlarge
- * it.
- */
- if (k->hbuflen + (k->str - k->str_start) >=
- data->state.headersize) {
- char *newbuff;
- long newsize=MAX((k->hbuflen+
- (k->str-k->str_start))*3/2,
- data->state.headersize*2);
- hbufp_index = k->hbufp - data->state.headerbuff;
- newbuff = (char *)realloc(data->state.headerbuff, newsize);
- if(!newbuff) {
- failf (data, "Failed to alloc memory for big header!");
- return CURLE_OUT_OF_MEMORY;
+ /* We continue reading headers, so reset the line-based
+ header parsing variables hbufp && hbuflen */
+ k->hbufp = data->state.headerbuff;
+ k->hbuflen = 0;
+ continue;
}
- data->state.headersize= newsize;
- data->state.headerbuff = newbuff;
- k->hbufp = data->state.headerbuff + hbufp_index;
- }
- /* copy to end of line */
- strncpy (k->hbufp, k->str_start, k->str - k->str_start);
- k->hbufp += k->str - k->str_start;
- k->hbuflen += k->str - k->str_start;
- *k->hbufp = 0;
-
- k->p = data->state.headerbuff;
+ /*
+ * Checks for special headers coming up.
+ */
- /****
- * We now have a FULL header line that p points to
- *****/
-
- if(!k->headerline) {
- /* the first read header */
- if((k->hbuflen>5) &&
- !checkprefix("HTTP/", data->state.headerbuff)) {
- /* this is not the beginning of a HTTP first header line */
- k->header = FALSE;
- k->badheader = HEADER_PARTHEADER;
- break;
- }
- }
-
- if (('\n' == *k->p) || ('\r' == *k->p)) {
- int headerlen;
- /* Zero-length header line means end of headers! */
-
- if ('\r' == *k->p)
- k->p++; /* pass the \r byte */
- if ('\n' == *k->p)
- k->p++; /* pass the \n byte */
-
- if(100 == k->httpcode) {
- /*
- * we have made a HTTP PUT or POST and this is 1.1-lingo
- * that tells us that the server is OK with this and ready
- * to receive our stuff.
- * However, we'll get more headers now so we must get
- * back into the header-parsing state!
+ if (!k->headerline++) {
+ /* This is the first header, it MUST be the error code line
+ or else we consiser this to be the body right away! */
+ int httpversion_major;
+ int nc=sscanf (k->p, " HTTP/%d.%d %3d",
+ &httpversion_major,
+ &k->httpversion,
+ &k->httpcode);
+ if (nc==3) {
+ k->httpversion += 10 * httpversion_major;
+ }
+ else {
+ /* this is the real world, not a Nirvana
+ NCSA 1.5.x returns this crap when asked for HTTP/1.1
+ */
+ nc=sscanf (k->p, " HTTP %3d", &k->httpcode);
+ k->httpversion = 10;
+
+ /* If user has set option HTTP200ALIASES,
+ compare header line against list of aliases
*/
- k->header = TRUE;
- k->headerline = 0; /* restart the header line counter */
- /* if we did wait for this do enable write now! */
- if (k->write_after_100_header) {
+ if (!nc) {
+ if (checkhttpprefix(data, k->p)) {
+ nc = 1;
+ k->httpcode = 200;
+ k->httpversion =
+ (data->set.httpversion==CURL_HTTP_VERSION_1_0)? 10 : 11;
+ }
+ }
+ }
- k->write_after_100_header = FALSE;
- FD_SET (conn->writesockfd, &k->writefd); /* write */
- k->keepon |= KEEP_WRITE;
- k->wkeepfd = k->writefd;
+ if (nc) {
+ data->info.httpcode = k->httpcode;
+ data->info.httpversion = k->httpversion;
+
+ /* 404 -> URL not found! */
+ if (data->set.http_fail_on_error &&
+ (k->httpcode >= 400)) {
+ /* If we have been told to fail hard on HTTP-errors,
+ here is the check for that: */
+ /* serious error, go home! */
+ failf (data, "The requested file was not found");
+ return CURLE_HTTP_RETURNED_ERROR;
+ }
+
+ if(k->httpversion == 10)
+ /* Default action for HTTP/1.0 must be to close, unless
+ we get one of those fancy headers that tell us the
+ server keeps it open for us! */
+ conn->bits.close = TRUE;
+
+ switch(k->httpcode) {
+ case 204:
+ /* (quote from RFC2616, section 10.2.5): The server has
+ * fulfilled the request but does not need to return an
+ * entity-body ... The 204 response MUST NOT include a
+ * message-body, and thus is always terminated by the first
+ * empty line after the header fields. */
+ /* FALLTHROUGH */
+ case 304:
+ /* (quote from RFC2616, section 10.3.5): The 304 response MUST
+ * NOT contain a message-body, and thus is always terminated
+ * by the first empty line after the header fields. */
+ conn->size=0;
+ conn->maxdownload=0;
+ break;
+ default:
+ /* nothing */
+ break;
+ }
+ }
+ else {
+ k->header = FALSE; /* this is not a header line */
+ break;
}
}
- else
- k->header = FALSE; /* no more header to parse! */
- if (417 == k->httpcode) {
+ /* check for Content-Length: header lines to get size */
+ if (checkprefix("Content-Length:", k->p) &&
+ sscanf (k->p+15, " %ld", &k->contentlength)) {
+ conn->size = k->contentlength;
+ Curl_pgrsSetDownloadSize(data, k->contentlength);
+ }
+ /* check for Content-Type: header lines to get the mime-type */
+ else if (checkprefix("Content-Type:", k->p)) {
+ char *start;
+ char *end;
+ int len;
+
+ /* Find the first non-space letter */
+ for(start=k->p+14;
+ *start && isspace((int)*start);
+ start++);
+
+ /* count all non-space letters following */
+ for(end=start, len=0;
+ *end && !isspace((int)*end);
+ end++, len++);
+
+ /* allocate memory of a cloned copy */
+ data->info.contenttype = malloc(len + 1);
+ if (NULL == data->info.contenttype)
+ return CURLE_OUT_OF_MEMORY;
+
+ /* copy the content-type string */
+ memcpy(data->info.contenttype, start, len);
+ data->info.contenttype[len] = 0; /* zero terminate */
+ }
+ else if((k->httpversion == 10) &&
+ conn->bits.httpproxy &&
+ Curl_compareheader(k->p,
+ "Proxy-Connection:", "keep-alive")) {
/*
- * we got: "417 Expectation Failed" this means:
- * we have made a HTTP call and our Expect Header
- * seems to cause a problem => abort the write operations
- * (or prevent them from starting).
+ * When a HTTP/1.0 reply comes when using a proxy, the
+ * 'Proxy-Connection: keep-alive' line tells us the
+ * connection will be kept alive for our pleasure.
+ * Default action for 1.0 is to close.
*/
- k->write_after_100_header = FALSE;
- k->keepon &= ~KEEP_WRITE;
- FD_ZERO(&k->wkeepfd);
+ conn->bits.close = FALSE; /* don't close when done */
+ infof(data, "HTTP/1.0 proxy connection set to keep alive!\n");
}
-
- /* now, only output this if the header AND body are requested:
- */
- k->writetype = CLIENTWRITE_HEADER;
- if (data->set.http_include_header)
- k->writetype |= CLIENTWRITE_BODY;
-
- headerlen = k->p - data->state.headerbuff;
-
- result = Curl_client_write(data, k->writetype,
- data->state.headerbuff,
- headerlen);
- if(result)
- return result;
-
- data->info.header_size += headerlen;
- conn->headerbytecount += headerlen;
-
- if(!k->header) {
+ else if((k->httpversion == 10) &&
+ Curl_compareheader(k->p, "Connection:", "keep-alive")) {
/*
- * really end-of-headers.
+ * A HTTP/1.0 reply with the 'Connection: keep-alive' line
+ * tells us the connection will be kept alive for our
+ * pleasure. Default action for 1.0 is to close.
*
- * If we requested a "no body", this is a good time to get
- * out and return home.
+ * [RFC2068, section 19.7.1] */
+ conn->bits.close = FALSE; /* don't close when done */
+ infof(data, "HTTP/1.0 connection set to keep alive!\n");
+ }
+ else if (Curl_compareheader(k->p, "Connection:", "close")) {
+ /*
+ * [RFC 2616, section 8.1.2.1]
+ * "Connection: close" is HTTP/1.1 language and means that
+ * the connection will close when this request has been
+ * served.
*/
- if(data->set.no_body)
- stop_reading = TRUE;
- else if(!conn->bits.close) {
- /* If this is not the last request before a close, we must
- set the maximum download size to the size of the
- expected document or else, we won't know when to stop
- reading! */
- if(-1 != conn->size)
- conn->maxdownload = conn->size;
- }
- /* If max download size is *zero* (nothing) we already
- have nothing and can safely return ok now! */
- if(0 == conn->maxdownload)
- stop_reading = TRUE;
-
- if(stop_reading) {
- /* we make sure that this socket isn't read more now */
- k->keepon &= ~KEEP_READ;
- FD_ZERO(&k->rkeepfd);
- }
-
- break; /* exit header line loop */
+ conn->bits.close = TRUE; /* close when done */
}
-
- /* We continue reading headers, so reset the line-based
- header parsing variables hbufp && hbuflen */
- k->hbufp = data->state.headerbuff;
- k->hbuflen = 0;
- continue;
- }
-
- /*
- * Checks for special headers coming up.
- */
-
- if (!k->headerline++) {
- /* This is the first header, it MUST be the error code line
- or else we consiser this to be the body right away! */
- int httpversion_major;
- int nc=sscanf (k->p, " HTTP/%d.%d %3d",
- &httpversion_major,
- &k->httpversion,
- &k->httpcode);
- if (nc==3) {
- k->httpversion += 10 * httpversion_major;
+ else if (Curl_compareheader(k->p,
+ "Transfer-Encoding:", "chunked")) {
+ /*
+ * [RFC 2616, section 3.6.1] A 'chunked' transfer encoding
+ * means that the server will send a series of "chunks". Each
+ * chunk starts with line with info (including size of the
+ * coming block) (terminated with CRLF), then a block of data
+ * with the previously mentioned size. There can be any amount
+ * of chunks, and a chunk-data set to zero signals the
+ * end-of-chunks. */
+ conn->bits.chunk = TRUE; /* chunks coming our way */
+
+ /* init our chunky engine */
+ Curl_httpchunk_init(conn);
}
- else {
- /* this is the real world, not a Nirvana
- NCSA 1.5.x returns this crap when asked for HTTP/1.1
- */
- nc=sscanf (k->p, " HTTP %3d", &k->httpcode);
- k->httpversion = 10;
+ else if (checkprefix("Content-Encoding:", k->p) &&
+ data->set.encoding) {
+ /*
+ * Process Content-Encoding. Look for the values: identity, gzip,
+ * defalte, compress, x-gzip and x-compress. x-gzip and
+ * x-compress are the same as gzip and compress. (Sec 3.5 RFC
+ * 2616). zlib cannot handle compress, and gzip is not currently
+ * implemented. However, errors are handled further down when the
+ * response body is processed 08/27/02 jhrg */
+ char *start;
+
+ /* Find the first non-space letter */
+ for(start=k->p+17;
+ *start && isspace((int)*start);
+ start++);
+
+ /* Record the content-encoding for later use. 08/27/02 jhrg */
+ if (checkprefix("identity", start))
+ k->content_encoding = IDENTITY;
+ else if (checkprefix("deflate", start))
+ k->content_encoding = DEFLATE;
+ else if (checkprefix("gzip", start)
+ || checkprefix("x-gzip", start))
+ k->content_encoding = GZIP;
+ else if (checkprefix("compress", start)
+ || checkprefix("x-compress", start))
+ k->content_encoding = COMPRESS;
}
-
- if (nc) {
- data->info.httpcode = k->httpcode;
- data->info.httpversion = k->httpversion;
-
- /* 404 -> URL not found! */
- if (data->set.http_fail_on_error &&
- (k->httpcode >= 400)) {
- /* If we have been told to fail hard on HTTP-errors,
- here is the check for that: */
- /* serious error, go home! */
- failf (data, "The requested file was not found");
- return CURLE_HTTP_NOT_FOUND;
- }
-
- if(k->httpversion == 10)
- /* Default action for HTTP/1.0 must be to close, unless
- we get one of those fancy headers that tell us the
- server keeps it open for us! */
- conn->bits.close = TRUE;
-
- switch(k->httpcode) {
- case 204:
- /* (quote from RFC2616, section 10.2.5): The server has
- * fulfilled the request but does not need to return an
- * entity-body ... The 204 response MUST NOT include a
- * message-body, and thus is always terminated by the first
- * empty line after the header fields. */
- /* FALLTHROUGH */
- case 304:
- /* (quote from RFC2616, section 10.3.5): The 304 response MUST
- * NOT contain a message-body, and thus is always terminated
- * by the first empty line after the header fields. */
- conn->size=0;
- conn->maxdownload=0;
- break;
- default:
- /* nothing */
- break;
+ else if (checkprefix("Content-Range:", k->p)) {
+ if (sscanf (k->p+14, " bytes %d-", &k->offset) ||
+ sscanf (k->p+14, " bytes: %d-", &k->offset)) {
+ /* This second format was added August 1st 2000 by Igor
+ Khristophorov since Sun's webserver JavaWebServer/1.1.1
+ obviously sends the header this way! :-( */
+ if (conn->resume_from == k->offset) {
+ /* we asked for a resume and we got it */
+ k->content_range = TRUE;
+ }
}
}
- else {
- k->header = FALSE; /* this is not a header line */
- break;
+ else if(data->cookies &&
+ checkprefix("Set-Cookie:", k->p)) {
+ Curl_cookie_add(data->cookies, TRUE, k->p+11, conn->name);
}
- }
-
- /* check for Content-Length: header lines to get size */
- if (checkprefix("Content-Length:", k->p) &&
- sscanf (k->p+15, " %ld", &k->contentlength)) {
- conn->size = k->contentlength;
- Curl_pgrsSetDownloadSize(data, k->contentlength);
+ else if(checkprefix("Last-Modified:", k->p) &&
+ (data->set.timecondition || data->set.get_filetime) ) {
+ time_t secs=time(NULL);
+ k->timeofdoc = curl_getdate(k->p+strlen("Last-Modified:"),
+ &secs);
+ if(data->set.get_filetime)
+ data->info.filetime = k->timeofdoc;
}
- /* check for Content-Type: header lines to get the mime-type */
- else if (checkprefix("Content-Type:", k->p)) {
- char *start;
- char *end;
- int len;
-
- /* Find the first non-space letter */
- for(start=k->p+14;
- *start && isspace((int)*start);
- start++);
-
- /* count all non-space letters following */
- for(end=start, len=0;
- *end && !isspace((int)*end);
- end++, len++);
-
- /* allocate memory of a cloned copy */
- data->info.contenttype = malloc(len + 1);
- if (NULL == data->info.contenttype)
- return CURLE_OUT_OF_MEMORY;
-
- /* copy the content-type string */
- memcpy(data->info.contenttype, start, len);
- data->info.contenttype[len] = 0; /* zero terminate */
- }
- else if((k->httpversion == 10) &&
- conn->bits.httpproxy &&
- compareheader(k->p, "Proxy-Connection:", "keep-alive")) {
- /*
- * When a HTTP/1.0 reply comes when using a proxy, the
- * 'Proxy-Connection: keep-alive' line tells us the
- * connection will be kept alive for our pleasure.
- * Default action for 1.0 is to close.
- */
- conn->bits.close = FALSE; /* don't close when done */
- infof(data, "HTTP/1.0 proxy connection set to keep alive!\n");
- }
- else if((k->httpversion == 10) &&
- compareheader(k->p, "Connection:", "keep-alive")) {
- /*
- * A HTTP/1.0 reply with the 'Connection: keep-alive' line
- * tells us the connection will be kept alive for our
- * pleasure. Default action for 1.0 is to close.
- *
- * [RFC2068, section 19.7.1] */
- conn->bits.close = FALSE; /* don't close when done */
- infof(data, "HTTP/1.0 connection set to keep alive!\n");
- }
- else if (compareheader(k->p, "Connection:", "close")) {
- /*
- * [RFC 2616, section 8.1.2.1]
- * "Connection: close" is HTTP/1.1 language and means that
- * the connection will close when this request has been
- * served.
- */
- conn->bits.close = TRUE; /* close when done */
- }
- else if (compareheader(k->p, "Transfer-Encoding:", "chunked")) {
- /*
- * [RFC 2616, section 3.6.1] A 'chunked' transfer encoding
- * means that the server will send a series of "chunks". Each
- * chunk starts with line with info (including size of the
- * coming block) (terminated with CRLF), then a block of data
- * with the previously mentioned size. There can be any amount
- * of chunks, and a chunk-data set to zero signals the
- * end-of-chunks. */
- conn->bits.chunk = TRUE; /* chunks coming our way */
-
- /* init our chunky engine */
- Curl_httpchunk_init(conn);
- }
- else if (checkprefix("Content-Encoding:", k->p) &&
- data->set.encoding) {
- /*
- * Process Content-Encoding. Look for the values: identity, gzip,
- * defalte, compress, x-gzip and x-compress. x-gzip and
- * x-compress are the same as gzip and compress. (Sec 3.5 RFC
- * 2616). zlib cannot handle compress, and gzip is not currently
- * implemented. However, errors are handled further down when the
- * response body is processed 08/27/02 jhrg */
- char *start;
-
- /* Find the first non-space letter */
- for(start=k->p+17;
- *start && isspace((int)*start);
- start++);
-
- /* Record the content-encoding for later use. 08/27/02 jhrg */
- if (checkprefix("identity", start))
- k->content_encoding = IDENTITY;
- else if (checkprefix("deflate", start))
- k->content_encoding = DEFLATE;
- else if (checkprefix("gzip", start)
- || checkprefix("x-gzip", start))
- k->content_encoding = GZIP;
- else if (checkprefix("compress", start)
- || checkprefix("x-compress", start))
- k->content_encoding = COMPRESS;
- }
- else if (checkprefix("Content-Range:", k->p)) {
- if (sscanf (k->p+14, " bytes %d-", &k->offset) ||
- sscanf (k->p+14, " bytes: %d-", &k->offset)) {
- /* This second format was added August 1st 2000 by Igor
- Khristophorov since Sun's webserver JavaWebServer/1.1.1
- obviously sends the header this way! :-( */
- if (conn->resume_from == k->offset) {
- /* we asked for a resume and we got it */
- k->content_range = TRUE;
+ else if ((k->httpcode >= 300 && k->httpcode < 400) &&
+ (data->set.http_follow_location) &&
+ checkprefix("Location:", k->p)) {
+ /* this is the URL that the server advices us to get instead */
+ char *ptr;
+ char *start=k->p;
+ char backup;
+
+ start += 9; /* pass "Location:" */
+
+ /* Skip spaces and tabs. We do this to support multiple
+ white spaces after the "Location:" keyword. */
+ while(*start && isspace((int)*start ))
+ start++;
+ ptr = start; /* start scanning here */
+
+ /* scan through the string to find the end */
+ while(*ptr && !isspace((int)*ptr))
+ ptr++;
+ backup = *ptr; /* store the ending letter */
+ if(ptr != start) {
+ *ptr = '\0'; /* zero terminate */
+ conn->newurl = strdup(start); /* clone string */
+ *ptr = backup; /* restore ending letter */
}
}
- }
- else if(data->cookies &&
- checkprefix("Set-Cookie:", k->p)) {
- Curl_cookie_add(data->cookies, TRUE, k->p+11, conn->name);
- }
- else if(checkprefix("Last-Modified:", k->p) &&
- (data->set.timecondition || data->set.get_filetime) ) {
- time_t secs=time(NULL);
- k->timeofdoc = curl_getdate(k->p+strlen("Last-Modified:"),
- &secs);
- if(data->set.get_filetime)
- data->info.filetime = k->timeofdoc;
- }
- else if ((k->httpcode >= 300 && k->httpcode < 400) &&
- (data->set.http_follow_location) &&
- checkprefix("Location:", k->p)) {
- /* this is the URL that the server advices us to get instead */
- char *ptr;
- char *start=k->p;
- char backup;
-
- start += 9; /* pass "Location:" */
-
- /* Skip spaces and tabs. We do this to support multiple
- white spaces after the "Location:" keyword. */
- while(*start && isspace((int)*start ))
- start++;
- ptr = start; /* start scanning here */
-
- /* scan through the string to find the end */
- while(*ptr && !isspace((int)*ptr))
- ptr++;
- backup = *ptr; /* store the ending letter */
- if(ptr != start) {
- *ptr = '\0'; /* zero terminate */
- conn->newurl = strdup(start); /* clone string */
- *ptr = backup; /* restore ending letter */
- }
- }
- /*
- * End of header-checks. Write them to the client.
- */
+ /*
+ * End of header-checks. Write them to the client.
+ */
- k->writetype = CLIENTWRITE_HEADER;
- if (data->set.http_include_header)
- k->writetype |= CLIENTWRITE_BODY;
+ k->writetype = CLIENTWRITE_HEADER;
+ if (data->set.http_include_header)
+ k->writetype |= CLIENTWRITE_BODY;
- if(data->set.verbose)
- Curl_debug(data, CURLINFO_HEADER_IN,
- k->p, k->hbuflen);
+ if(data->set.verbose)
+ Curl_debug(data, CURLINFO_HEADER_IN,
+ k->p, k->hbuflen);
- result = Curl_client_write(data, k->writetype, k->p,
- k->hbuflen);
- if(result)
- return result;
+ result = Curl_client_write(data, k->writetype, k->p,
+ k->hbuflen);
+ if(result)
+ return result;
- data->info.header_size += k->hbuflen;
- conn->headerbytecount += k->hbuflen;
+ data->info.header_size += k->hbuflen;
+ conn->headerbytecount += k->hbuflen;
- /* reset hbufp pointer && hbuflen */
- k->hbufp = data->state.headerbuff;
- k->hbuflen = 0;
- }
- while (!stop_reading && *k->str); /* header line within buffer */
+ /* reset hbufp pointer && hbuflen */
+ k->hbufp = data->state.headerbuff;
+ k->hbuflen = 0;
+ }
+ while (!stop_reading && *k->str); /* header line within buffer */
- if(stop_reading)
- /* We've stopped dealing with input, get out of the do-while loop */
- break;
+ if(stop_reading)
+ /* We've stopped dealing with input, get out of the do-while loop */
+ break;
- /* We might have reached the end of the header part here, but
- there might be a non-header part left in the end of the read
- buffer. */
+ /* We might have reached the end of the header part here, but
+ there might be a non-header part left in the end of the read
+ buffer. */
- } /* end if header mode */
+ } /* end if header mode */
- /* This is not an 'else if' since it may be a rest from the header
- parsing, where the beginning of the buffer is headers and the end
- is non-headers. */
- if (k->str && !k->header && (nread > 0)) {
+ /* This is not an 'else if' since it may be a rest from the header
+ parsing, where the beginning of the buffer is headers and the end
+ is non-headers. */
+ if (k->str && !k->header && (nread > 0)) {
- if(0 == k->bodywrites) {
- /* These checks are only made the first time we are about to
- write a piece of the body */
- if(conn->protocol&PROT_HTTP) {
- /* HTTP-only checks */
- if (conn->newurl) {
- /* abort after the headers if "follow Location" is set */
- infof (data, "Follow to new URL: %s\n", conn->newurl);
- k->keepon &= ~KEEP_READ;
- FD_ZERO(&k->rkeepfd);
- *done = TRUE;
- return CURLE_OK;
- }
- else if (conn->resume_from &&
- !k->content_range &&
- (data->set.httpreq==HTTPREQ_GET)) {
- /* we wanted to resume a download, although the server
- doesn't seem to support this and we did this with a GET
- (if it wasn't a GET we did a POST or PUT resume) */
- failf (data, "HTTP server doesn't seem to support "
- "byte ranges. Cannot resume.");
- return CURLE_HTTP_RANGE_ERROR;
+ if(0 == k->bodywrites) {
+ /* These checks are only made the first time we are about to
+ write a piece of the body */
+ if(conn->protocol&PROT_HTTP) {
+ /* HTTP-only checks */
+ if (conn->newurl) {
+ /* abort after the headers if "follow Location" is set */
+ infof (data, "Follow to new URL: %s\n", conn->newurl);
+ k->keepon &= ~KEEP_READ;
+ FD_ZERO(&k->rkeepfd);
+ *done = TRUE;
+ return CURLE_OK;
+ }
+ else if (conn->resume_from &&
+ !k->content_range &&
+ (data->set.httpreq==HTTPREQ_GET)) {
+ /* we wanted to resume a download, although the server
+ doesn't seem to support this and we did this with a GET
+ (if it wasn't a GET we did a POST or PUT resume) */
+ failf (data, "HTTP server doesn't seem to support "
+ "byte ranges. Cannot resume.");
+ return CURLE_HTTP_RANGE_ERROR;
+ }
+ else if(data->set.timecondition && !conn->range) {
+ /* A time condition has been set AND no ranges have been
+ requested. This seems to be what chapter 13.3.4 of
+ RFC 2616 defines to be the correct action for a
+ HTTP/1.1 client */
+ if((k->timeofdoc > 0) && (data->set.timevalue > 0)) {
+ switch(data->set.timecondition) {
+ case TIMECOND_IFMODSINCE:
+ default:
+ if(k->timeofdoc < data->set.timevalue) {
+ infof(data,
+ "The requested document is not new enough\n");
+ *done = TRUE;
+ return CURLE_OK;
+ }
+ break;
+ case TIMECOND_IFUNMODSINCE:
+ if(k->timeofdoc > data->set.timevalue) {
+ infof(data,
+ "The requested document is not old enough\n");
+ *done = TRUE;
+ return CURLE_OK;
+ }
+ break;
+ } /* switch */
+ } /* two valid time strings */
+ } /* we have a time condition */
+
+ } /* this is HTTP */
+ } /* this is the first time we write a body part */
+ k->bodywrites++;
+
+ /* pass data to the debug function before it gets "dechunked" */
+ if(data->set.verbose) {
+ if(k->badheader) {
+ Curl_debug(data, CURLINFO_DATA_IN, data->state.headerbuff,
+ k->hbuflen);
+ if(k->badheader == HEADER_PARTHEADER)
+ Curl_debug(data, CURLINFO_DATA_IN, k->str, nread);
}
- else if(data->set.timecondition && !conn->range) {
- /* A time condition has been set AND no ranges have been
- requested. This seems to be what chapter 13.3.4 of
- RFC 2616 defines to be the correct action for a
- HTTP/1.1 client */
- if((k->timeofdoc > 0) && (data->set.timevalue > 0)) {
- switch(data->set.timecondition) {
- case TIMECOND_IFMODSINCE:
- default:
- if(k->timeofdoc < data->set.timevalue) {
- infof(data,
- "The requested document is not new enough\n");
- *done = TRUE;
- return CURLE_OK;
- }
- break;
- case TIMECOND_IFUNMODSINCE:
- if(k->timeofdoc > data->set.timevalue) {
- infof(data,
- "The requested document is not old enough\n");
- *done = TRUE;
- return CURLE_OK;
- }
- break;
- } /* switch */
- } /* two valid time strings */
- } /* we have a time condition */
-
- } /* this is HTTP */
- } /* this is the first time we write a body part */
- k->bodywrites++;
-
- /* pass data to the debug function before it gets "dechunked" */
- if(data->set.verbose) {
- if(k->badheader) {
- Curl_debug(data, CURLINFO_DATA_IN, data->state.headerbuff,
- k->hbuflen);
- if(k->badheader == HEADER_PARTHEADER)
+ else
Curl_debug(data, CURLINFO_DATA_IN, k->str, nread);
}
- else
- Curl_debug(data, CURLINFO_DATA_IN, k->str, nread);
- }
- if(conn->bits.chunk) {
- /*
- * Bless me father for I have sinned. Here comes a chunked
- * transfer flying and we need to decode this properly. While
- * the name says read, this function both reads and writes away
- * the data. The returned 'nread' holds the number of actual
- * data it wrote to the client. */
- CHUNKcode res =
- Curl_httpchunk_read(conn, k->str, nread, &nread);
-
- if(CHUNKE_OK < res) {
- if(CHUNKE_WRITE_ERROR == res) {
- failf(data, "Failed writing data");
- return CURLE_WRITE_ERROR;
+ if(conn->bits.chunk) {
+ /*
+ * Bless me father for I have sinned. Here comes a chunked
+ * transfer flying and we need to decode this properly. While
+ * the name says read, this function both reads and writes away
+ * the data. The returned 'nread' holds the number of actual
+ * data it wrote to the client. */
+ CHUNKcode res =
+ Curl_httpchunk_read(conn, k->str, nread, &nread);
+
+ if(CHUNKE_OK < res) {
+ if(CHUNKE_WRITE_ERROR == res) {
+ failf(data, "Failed writing data");
+ return CURLE_WRITE_ERROR;
+ }
+ failf(data, "Received problem in the chunky parser");
+ return CURLE_RECV_ERROR;
}
- failf(data, "Received problem in the chunky parser");
- return CURLE_RECV_ERROR;
- }
- else if(CHUNKE_STOP == res) {
- /* we're done reading chunks! */
- k->keepon &= ~KEEP_READ; /* read no more */
- FD_ZERO(&k->rkeepfd);
+ else if(CHUNKE_STOP == res) {
+ /* we're done reading chunks! */
+ k->keepon &= ~KEEP_READ; /* read no more */
+ FD_ZERO(&k->rkeepfd);
- /* There are now possibly N number of bytes at the end of the
- str buffer that weren't written to the client, but we don't
- care about them right now. */
+ /* There are now possibly N number of bytes at the end of the
+ str buffer that weren't written to the client, but we don't
+ care about them right now. */
+ }
+ /* If it returned OK, we just keep going */
}
- /* If it returned OK, we just keep going */
- }
- if((-1 != conn->maxdownload) &&
- (k->bytecount + nread >= conn->maxdownload)) {
- nread = conn->maxdownload - k->bytecount;
- if(nread < 0 ) /* this should be unusual */
- nread = 0;
+ if((-1 != conn->maxdownload) &&
+ (k->bytecount + nread >= conn->maxdownload)) {
+ nread = conn->maxdownload - k->bytecount;
+ if(nread < 0 ) /* this should be unusual */
+ nread = 0;
- k->keepon &= ~KEEP_READ; /* we're done reading */
- FD_ZERO(&k->rkeepfd);
- }
+ k->keepon &= ~KEEP_READ; /* we're done reading */
+ FD_ZERO(&k->rkeepfd);
+ }
- k->bytecount += nread;
+ k->bytecount += nread;
- Curl_pgrsSetDownloadCounter(data, (double)k->bytecount);
+ Curl_pgrsSetDownloadCounter(data, (double)k->bytecount);
- if(!conn->bits.chunk && (nread || k->badheader)) {
- /* If this is chunky transfer, it was already written */
-
- if(k->badheader) {
- /* we parsed a piece of data wrongly assuming it was a header
- and now we output it as body instead */
- result = Curl_client_write(data, CLIENTWRITE_BODY,
- data->state.headerbuff,
- k->hbuflen);
- }
- if(k->badheader < HEADER_ALLBAD) {
- /* This switch handles various content encodings. If there's an
- error here, be sure to check over the almost identical code in
- http_chunk.c. 08/29/02 jhrg */
+ if(!conn->bits.chunk && (nread || k->badheader)) {
+ /* If this is chunky transfer, it was already written */
+
+ if(k->badheader) {
+ /* we parsed a piece of data wrongly assuming it was a header
+ and now we output it as body instead */
+ result = Curl_client_write(data, CLIENTWRITE_BODY,
+ data->state.headerbuff,
+ k->hbuflen);
+ }
+ if(k->badheader < HEADER_ALLBAD) {
+ /* This switch handles various content encodings. If there's an
+ error here, be sure to check over the almost identical code
+ in http_chunk.c. 08/29/02 jhrg */
#ifdef HAVE_LIBZ
- switch (k->content_encoding) {
- case IDENTITY:
+ switch (k->content_encoding) {
+ case IDENTITY:
#endif
- /* This is the default when the server sends no
- Content-Encoding header. See Curl_readwrite_init; the
- memset() call initializes k->content_encoding to zero.
- 08/28/02 jhrg */
- result = Curl_client_write(data, CLIENTWRITE_BODY, k->str,
- nread);
+ /* This is the default when the server sends no
+ Content-Encoding header. See Curl_readwrite_init; the
+ memset() call initializes k->content_encoding to zero.
+ 08/28/02 jhrg */
+ result = Curl_client_write(data, CLIENTWRITE_BODY, k->str,
+ nread);
#ifdef HAVE_LIBZ
- break;
-
- case DEFLATE:
- /* Assume CLIENTWRITE_BODY; headers are not encoded. */
- result = Curl_unencode_deflate_write(data, k, nread);
- break;
-
- case GZIP: /* FIXME 08/27/02 jhrg */
- case COMPRESS:
- default:
- failf (data, "Unrecognized content encoding type. "
- "libcurl understands `identity' and `deflate' "
- "content encodings.");
- result = CURLE_BAD_CONTENT_ENCODING;
- break;
- }
+ break;
+
+ case DEFLATE:
+ /* Assume CLIENTWRITE_BODY; headers are not encoded. */
+ result = Curl_unencode_deflate_write(data, k, nread);
+ break;
+
+ case GZIP: /* FIXME 08/27/02 jhrg */
+ case COMPRESS:
+ default:
+ failf (data, "Unrecognized content encoding type. "
+ "libcurl understands `identity' and `deflate' "
+ "content encodings.");
+ result = CURLE_BAD_CONTENT_ENCODING;
+ break;
+ }
#endif
+ }
+ k->badheader = HEADER_NORMAL; /* taken care of now */
+
+ if(result)
+ return result;
}
- k->badheader = HEADER_NORMAL; /* taken care of now */
- if(result)
- return result;
- }
+ } /* if (! header and data to read ) */
+
+ } while(!readdone);
- } /* if (! header and data to read ) */
} /* if( read from socket ) */
/* If we still have writing to do, we check if we have a writable
@@ -890,134 +925,114 @@ CURLcode Curl_readwrite(struct connectdata *conn,
int i, si;
ssize_t bytes_written;
+ bool writedone=FALSE;
if ((k->bytecount == 0) && (k->writebytecount == 0))
Curl_pgrsTime(data, TIMER_STARTTRANSFER);
didwhat |= KEEP_WRITE;
- /* only read more data if there's no upload data already
- present in the upload buffer */
- if(0 == conn->upload_present) {
- size_t buffersize = BUFSIZE;
- /* init the "upload from here" pointer */
- conn->upload_fromhere = k->uploadbuf;
-
- if(!k->upload_done) {
-
- if(conn->bits.upload_chunky) {
- /* if chunked Transfer-Encoding */
- buffersize -= (8 + 2 + 2); /* 32bit hex + CRLF + CRLF */
- conn->upload_fromhere += 10; /* 32bit hex + CRLF */
- }
-
- nread = data->set.fread(conn->upload_fromhere, 1,
- buffersize, data->set.in);
-
- if(conn->bits.upload_chunky) {
- /* if chunked Transfer-Encoding */
- char hexbuffer[9];
- int hexlen = snprintf(hexbuffer, sizeof(hexbuffer),
- "%x\r\n", nread);
- /* move buffer pointer */
- conn->upload_fromhere -= hexlen;
- nread += hexlen;
-
- /* copy the prefix to the buffer */
- memcpy(conn->upload_fromhere, hexbuffer, hexlen);
- if(nread>0) {
- /* append CRLF to the data */
- memcpy(conn->upload_fromhere +
- nread, "\r\n", 2);
- nread+=2;
- }
- else {
- /* mark this as done once this chunk is transfered */
- k->upload_done = TRUE;
- }
+ /*
+ * We loop here to do the READ and SEND loop until we run out of
+ * data to send or until we get EWOULDBLOCK back
+ */
+ do {
+
+ /* only read more data if there's no upload data already
+ present in the upload buffer */
+ if(0 == conn->upload_present) {
+ /* init the "upload from here" pointer */
+ conn->upload_fromhere = k->uploadbuf;
+
+ if(!k->upload_done)
+ nread = fillbuffer(conn, BUFSIZE);
+ else
+ nread = 0; /* we're done uploading/reading */
+
+ /* the signed int typecase of nread of for systems that has
+ unsigned size_t */
+ if (nread<=0) {
+ /* done */
+ k->keepon &= ~KEEP_WRITE; /* we're done writing */
+ FD_ZERO(&k->wkeepfd);
+ writedone = TRUE;
+ break;
}
- }
- else
- nread = 0; /* we're done uploading/reading */
-
- /* the signed int typecase of nread of for systems that has
- unsigned size_t */
- if (nread<=0) {
- /* done */
- k->keepon &= ~KEEP_WRITE; /* we're done writing */
- FD_ZERO(&k->wkeepfd);
- break;
- }
- /* store number of bytes available for upload */
- conn->upload_present = nread;
+ /* store number of bytes available for upload */
+ conn->upload_present = nread;
- /* convert LF to CRLF if so asked */
- if (data->set.crlf) {
- for(i = 0, si = 0; i < nread; i++, si++) {
- if (conn->upload_fromhere[i] == 0x0a) {
- data->state.scratch[si++] = 0x0d;
- data->state.scratch[si] = 0x0a;
+ /* convert LF to CRLF if so asked */
+ if (data->set.crlf) {
+ for(i = 0, si = 0; i < nread; i++, si++) {
+ if (conn->upload_fromhere[i] == 0x0a) {
+ data->state.scratch[si++] = 0x0d;
+ data->state.scratch[si] = 0x0a;
+ }
+ else
+ data->state.scratch[si] = conn->upload_fromhere[i];
}
- else
- data->state.scratch[si] = conn->upload_fromhere[i];
- }
- if(si != nread) {
- /* only perform the special operation if we really did replace
- anything */
- nread = si;
+ if(si != nread) {
+ /* only perform the special operation if we really did replace
+ anything */
+ nread = si;
- /* upload from the new (replaced) buffer instead */
- conn->upload_fromhere = data->state.scratch;
+ /* upload from the new (replaced) buffer instead */
+ conn->upload_fromhere = data->state.scratch;
- /* set the new amount too */
- conn->upload_present = nread;
+ /* set the new amount too */
+ conn->upload_present = nread;
+ }
}
}
- }
- else {
- /* We have a partial buffer left from a previous "round". Use
- that instead of reading more data */
- }
+ else {
+ /* We have a partial buffer left from a previous "round". Use
+ that instead of reading more data */
+ }
- /* write to socket */
- result = Curl_write(conn,
- conn->writesockfd,
- conn->upload_fromhere,
- conn->upload_present,
- &bytes_written);
- if(result)
- return result;
- else if(conn->upload_present != bytes_written) {
- /* we only wrote a part of the buffer (if anything), deal with it! */
-
- /* store the amount of bytes left in the buffer to write */
- conn->upload_present -= bytes_written;
-
- /* advance the pointer where to find the buffer when the next send
- is to happen */
- conn->upload_fromhere += bytes_written;
- }
- else {
- /* we've uploaded that buffer now */
- conn->upload_fromhere = k->uploadbuf;
- conn->upload_present = 0; /* no more bytes left */
-
- if(k->upload_done) {
- /* switch off writing, we're done! */
- k->keepon &= ~KEEP_WRITE; /* we're done writing */
- FD_ZERO(&k->wkeepfd);
+ /* write to socket (send away data) */
+ result = Curl_write(conn,
+ conn->writesockfd, /* socket to send to */
+ conn->upload_fromhere, /* buffer pointer */
+ conn->upload_present, /* buffer size */
+ &bytes_written); /* actually send away */
+ if(result)
+ return result;
+ else if(conn->upload_present != bytes_written) {
+ /* we only wrote a part of the buffer (if anything), deal with it! */
+
+ /* store the amount of bytes left in the buffer to write */
+ conn->upload_present -= bytes_written;
+
+ /* advance the pointer where to find the buffer when the next send
+ is to happen */
+ conn->upload_fromhere += bytes_written;
+
+ writedone = TRUE; /* we are done, stop the loop */
+ }
+ else {
+ /* we've uploaded that buffer now */
+ conn->upload_fromhere = k->uploadbuf;
+ conn->upload_present = 0; /* no more bytes left */
+
+ if(k->upload_done) {
+ /* switch off writing, we're done! */
+ k->keepon &= ~KEEP_WRITE; /* we're done writing */
+ FD_ZERO(&k->wkeepfd);
+ writedone = TRUE;
+ }
}
- }
- if(data->set.verbose)
- Curl_debug(data, CURLINFO_DATA_OUT, conn->upload_fromhere,
- bytes_written);
+ if(data->set.verbose)
+ Curl_debug(data, CURLINFO_DATA_OUT, conn->upload_fromhere,
+ bytes_written);
- k->writebytecount += bytes_written;
- Curl_pgrsSetUploadCounter(data, (double)k->writebytecount);
+ k->writebytecount += bytes_written;
+ Curl_pgrsSetUploadCounter(data, (double)k->writebytecount);
+ } while(!writedone); /* loop until we're done writing! */
+
}
} while(0); /* just to break out from! */
diff --git a/Source/CTest/Curl/url.c b/Source/CTest/Curl/url.c
index 7771752..db5c277 100644
--- a/Source/CTest/Curl/url.c
+++ b/Source/CTest/Curl/url.c
@@ -101,6 +101,7 @@
#include "strequal.h"
#include "escape.h"
#include "strtok.h"
+#include "share.h"
/* And now for the protocols */
#include "ftp.h"
@@ -1071,8 +1072,8 @@ CURLcode Curl_setopt(struct SessionHandle *data, CURLoption option, ...)
case CURLOPT_SHARE:
{
- curl_share *set;
- set = va_arg(param, curl_share *);
+ struct Curl_share *set;
+ set = va_arg(param, struct Curl_share *);
if(data->share)
data->share->dirty--;
@@ -1088,6 +1089,20 @@ CURLcode Curl_setopt(struct SessionHandle *data, CURLoption option, ...)
data->set.proxytype = va_arg(param, long);
break;
+ case CURLOPT_PRIVATE:
+ /*
+ * Set private data pointer.
+ */
+ data->set.private = va_arg(param, char *);
+ break;
+
+ case CURLOPT_HTTP200ALIASES:
+ /*
+ * Set a list of aliases for HTTP 200 in response header
+ */
+ data->set.http200aliases = va_arg(param, struct curl_slist *);
+ break;
+
default:
/* unknown tag and its companion, just ignore: */
return CURLE_FAILED_INIT; /* correct this */
@@ -1603,6 +1618,9 @@ static CURLcode ConnectPlease(struct connectdata *conn,
return result;
}
+/*
+ * ALERT! The 'dns' pointer being passed in here might be NULL at times.
+ */
static void verboseconnect(struct connectdata *conn,
struct Curl_dns_entry *dns)
{
@@ -1667,6 +1685,12 @@ CURLcode Curl_protocol_connect(struct connectdata *conn,
struct SessionHandle *data = conn->data;
CURLcode result=CURLE_OK;
+ if(conn->bits.tcpconnect)
+ /* We already are connected, get back. This may happen when the connect
+ worked fine in the first call, like when we connect to a local server
+ or proxy. */
+ return CURLE_OK;
+
Curl_pgrsTime(data, TIMER_CONNECT); /* connect done */
if(data->set.verbose)
@@ -1779,6 +1803,9 @@ static CURLcode CreateConnection(struct SessionHandle *data,
/* else, no chunky upload */
FALSE;
+ conn->fread = data->set.fread;
+ conn->fread_in = data->set.in;
+
/***********************************************************
* We need to allocate memory to store the path in. We get the size of the
* full URL to be sure, and we need to make it at least 256 bytes since
@@ -2286,6 +2313,7 @@ static CURLcode CreateConnection(struct SessionHandle *data,
/* Setup a "faked" transfer that'll do nothing */
if(CURLE_OK == result) {
+ conn->bits.tcpconnect = TRUE; /* we are "connected */
result = Curl_Transfer(conn, -1, -1, FALSE, NULL, /* no download */
-1, NULL); /* no upload */
}
@@ -2463,6 +2491,9 @@ static CURLcode CreateConnection(struct SessionHandle *data,
/* no name given, get the password only */
sscanf(userpass, ":%127[^@]", data->state.passwd);
+ /* we have set the password */
+ data->state.passwdgiven = TRUE;
+
if(data->state.user[0]) {
char *newname=curl_unescape(data->state.user, 0);
if(strlen(newname) < sizeof(data->state.user)) {
@@ -2498,14 +2529,17 @@ static CURLcode CreateConnection(struct SessionHandle *data,
/* the name is given, get user+password */
sscanf(data->set.userpwd, "%127[^:]:%127[^\n]",
data->state.user, data->state.passwd);
+ if(strchr(data->set.userpwd, ':'))
+ /* a colon means the password was given, even if blank */
+ data->state.passwdgiven = TRUE;
}
else
- /* no name given, get the password only */
+ /* no name given, starts with a colon, get the password only */
sscanf(data->set.userpwd+1, "%127[^\n]", data->state.passwd);
}
if (data->set.use_netrc != CURL_NETRC_IGNORED &&
- data->state.passwd[0] == '\0' ) { /* need passwd */
+ !data->state.passwdgiven) { /* need passwd */
if(Curl_parsenetrc(conn->hostname,
data->state.user,
data->state.passwd)) {
@@ -2516,8 +2550,7 @@ static CURLcode CreateConnection(struct SessionHandle *data,
}
/* if we have a user but no password, ask for one */
- if(conn->bits.user_passwd &&
- !data->state.passwd[0] ) {
+ if(conn->bits.user_passwd && !data->state.passwdgiven ) {
if(data->set.fpasswd(data->set.passwd_client,
"password:", data->state.passwd,
sizeof(data->state.passwd)))
@@ -2528,9 +2561,12 @@ static CURLcode CreateConnection(struct SessionHandle *data,
/* If our protocol needs a password and we have none, use the defaults */
if ( (conn->protocol & (PROT_FTP|PROT_HTTP)) &&
- !conn->bits.user_passwd) {
+ !conn->bits.user_passwd &&
+ !data->state.passwdgiven) {
+
strcpy(data->state.user, CURL_DEFAULT_USER);
strcpy(data->state.passwd, CURL_DEFAULT_PASSWORD);
+
/* This is the default password, so DON'T set conn->bits.user_passwd */
}
@@ -2782,14 +2818,21 @@ static CURLcode CreateConnection(struct SessionHandle *data,
/* Connect only if not already connected! */
result = ConnectPlease(conn, hostaddr, &connected);
- if(connected)
+ if(connected) {
result = Curl_protocol_connect(conn, hostaddr);
+ if(CURLE_OK == result)
+ conn->bits.tcpconnect = TRUE;
+ }
+ else
+ conn->bits.tcpconnect = FALSE;
+
if(CURLE_OK != result)
return result;
}
else {
Curl_pgrsTime(data, TIMER_CONNECT); /* we're connected already */
+ conn->bits.tcpconnect = TRUE;
if(data->set.verbose)
verboseconnect(conn, hostaddr);
}
diff --git a/Source/CTest/Curl/urldata.h b/Source/CTest/Curl/urldata.h
index c4cab1a..923da5c 100644
--- a/Source/CTest/Curl/urldata.h
+++ b/Source/CTest/Curl/urldata.h
@@ -157,6 +157,8 @@ struct ssl_config_data {
struct HTTP {
struct FormData *sendit;
int postsize;
+ char *postdata;
+
const char *p_pragma; /* Pragma: string */
const char *p_accept; /* Accept: string */
long readbytecount;
@@ -164,10 +166,24 @@ struct HTTP {
/* For FORM posting */
struct Form form;
- curl_read_callback storefread;
- FILE *in;
-
struct Curl_chunker chunk;
+
+ struct back {
+ curl_read_callback fread; /* backup storage for fread pointer */
+ void *fread_in; /* backup storage for fread_in pointer */
+ char *postdata;
+ int postsize;
+ } backup;
+
+ enum {
+ HTTPSEND_NADA, /* init */
+ HTTPSEND_REQUEST, /* sending a request */
+ HTTPSEND_BODY, /* sending body */
+ HTTPSEND_LAST /* never use this */
+ } sending;
+
+ void *send_buffer; /* used if the request couldn't be sent in one chunk,
+ points to an allocated send_buffer struct */
};
/****************************************************************************
@@ -190,7 +206,9 @@ struct FTP {
read the line, just ignore the result. */
bool no_transfer; /* nothing was transfered, (possibly because a resumed
transfer already was complete) */
-
+ long response_time; /* When no timeout is given, this is the amount of
+ seconds we await for an FTP response. Initialized
+ in Curl_ftp_connect() */
};
/****************************************************************************
@@ -220,8 +238,14 @@ struct ConnectBits {
bool upload_chunky; /* set TRUE if we are doing chunked transfer-encoding
on upload */
-
- bool getheader; /* TRUE if header parsing is wanted */
+ bool getheader; /* TRUE if header parsing is wanted */
+
+ bool forbidchunk; /* used only to explicitly forbid chunk-upload for
+ specific upload buffers. See readmoredata() in
+ http.c for details. */
+ bool tcpconnect; /* the tcp stream (or simimlar) is connected, this
+ is set the first time on the first connect function
+ call */
};
/*
@@ -456,6 +480,9 @@ struct connectdata {
and the 'upload_present' contains the number of bytes available at this
position */
char *upload_fromhere;
+
+ curl_read_callback fread; /* function that reads the input */
+ void *fread_in; /* pointer to pass to the fread() above */
};
/* The end of connectdata. 08/27/02 jhrg */
@@ -543,6 +570,9 @@ struct UrlState {
char proxyuser[MAX_CURL_USER_LENGTH];
char proxypasswd[MAX_CURL_PASSWORD_LENGTH];
+ bool passwdgiven; /* set TRUE if an application-provided password has been
+ set */
+
struct timeval keeps_speed; /* for the progress meter really */
/* 'connects' will be an allocated array with pointers. If the pointer is
@@ -631,7 +661,7 @@ struct UserDefined {
bool free_referer; /* set TRUE if 'referer' points to a string we
allocated */
char *useragent; /* User-Agent string */
- char *encoding; /* Accept-Encoding string 08/28/02 jhrg */
+ char *encoding; /* Accept-Encoding string */
char *postfields; /* if POST, set the fields' values here */
size_t postfieldsize; /* if POST, this might have a size to use instead of
strlen(), and then the data *may* be binary (contain
@@ -686,6 +716,10 @@ struct UserDefined {
int dns_cache_timeout; /* DNS cache timeout */
long buffer_size; /* size of receive buffer to use */
+
+ char *private; /* Private data */
+
+ struct curl_slist *http200aliases; /* linked list of aliases for http200 */
/* Here follows boolean settings that define how to behave during
this session. They are STATIC, set by libcurl users or at least initially
@@ -734,7 +768,7 @@ struct UserDefined {
struct SessionHandle {
curl_hash *hostcache;
- curl_share *share; /* Share, handles global variable mutexing */
+ struct Curl_share *share; /* Share, handles global variable mutexing */
struct UserDefined set; /* values set by the libcurl user */
struct DynamicStatic change; /* possibly modified userdefined data */