mirror of
https://github.com/moparisthebest/curl
synced 2024-12-21 23:58:49 -05:00
Major overhaul introducing http pipelining support and shared connection
cache within the multi handle.
This commit is contained in:
parent
7e4193b538
commit
b7eeb6e67f
19
CHANGES
19
CHANGES
@ -6,6 +6,25 @@
|
||||
|
||||
Changelog
|
||||
|
||||
Daniel (6 September 2006)
|
||||
- Ravi Pratap and I have implemented HTTP Pipelining support. Enable it for a
|
||||
multi handle using CURLMOPT_PIPELINING and all HTTP connections done on that
|
||||
handle will be attempted to get pipelined instead of done in parallell as
|
||||
they are performed otherwise.
|
||||
|
||||
As a side-effect from this work, connections are now shared between all easy
|
||||
handles within a multi handle, so if you use N easy handles for transfers,
|
||||
each of them can pick up and re-use a connection that was previously used by
|
||||
any of the handles, be it the same or one of the others.
|
||||
|
||||
This separation of the tight relationship between connections and easy
|
||||
handles is most noticable when you close easy handles that have been used in
|
||||
a multi handle and check amount of used memory or watch the debug output, as
|
||||
there are times when libcurl will keep the easy handle around for a while
|
||||
longer to be able to close it properly. Like for sending QUIT to close down
|
||||
an FTP connection.
|
||||
|
||||
This is a major change.
|
||||
|
||||
Daniel (4 September 2006)
|
||||
- Dmitry Rechkin (http://curl.haxx.se/bug/view.cgi?id=1551412) provided a
|
||||
|
@ -1,4 +1,4 @@
|
||||
Curl and libcurl 7.15.6
|
||||
Curl and libcurl 7.16.0
|
||||
|
||||
Public curl release number: 96
|
||||
Releases counted from the very beginning: 123
|
||||
@ -11,6 +11,7 @@ Curl and libcurl 7.15.6
|
||||
|
||||
This release includes the following changes:
|
||||
|
||||
o CURLMOPT_PIPELINING added for enabling pipelined transfers
|
||||
o Added support for other MS-DOS compilers (besides djgpp)
|
||||
o CURLOPT_SOCKOPTFUNCTION and CURLOPT_SOCKOPTDATA were added
|
||||
o (FTP) libcurl avoids sending TYPE if the desired type was already set
|
||||
@ -43,6 +44,6 @@ advice from friends like these:
|
||||
|
||||
Domenico Andreoli, Armel Asselin, Gisle Vanem, Yang Tse, Andrew Biggs,
|
||||
Peter Sylvester, David McCreedy, Dmitriy Sergeyev, Dmitry Rechkin,
|
||||
Jari Sundell
|
||||
Jari Sundell, Ravi Pratap
|
||||
|
||||
Thanks! (and sorry if I forgot to mention someone)
|
||||
|
@ -32,6 +32,13 @@ details.
|
||||
Pass a pointer to whatever you want passed to the curl_socket_callback's forth
|
||||
argument, the userp pointer. This is not used by libcurl but only passed-thru
|
||||
as-is. Set the callback pointer with \fICURLMOPT_SOCKETFUNCTION\fP.
|
||||
.IP CURLMOPT_PIPELINING
|
||||
Pass a long set to 1 to enable or 0 to disable. Enabling pipelining on a multi
|
||||
handle will make it attempt to perform HTTP Pipelining as far as possible for
|
||||
transfers using this handle. This means that if you add a second request that
|
||||
can use an already existing connection, the second request will be \&"piped"
|
||||
on the same connection rather than being executed in parallell. (Added in
|
||||
7.16.0)
|
||||
.SH RETURNS
|
||||
The standard CURLMcode for multi interface error codes. Note that it returns a
|
||||
CURLM_UNKNOWN_OPTION if you try setting an option that this version of libcurl
|
||||
|
@ -28,13 +28,13 @@
|
||||
|
||||
/* This is the version number of the libcurl package from which this header
|
||||
file origins: */
|
||||
#define LIBCURL_VERSION "7.15.6-CVS"
|
||||
#define LIBCURL_VERSION "7.16.0-CVS"
|
||||
|
||||
/* The numeric version number is also available "in parts" by using these
|
||||
defines: */
|
||||
#define LIBCURL_VERSION_MAJOR 7
|
||||
#define LIBCURL_VERSION_MINOR 15
|
||||
#define LIBCURL_VERSION_PATCH 6
|
||||
#define LIBCURL_VERSION_MINOR 16
|
||||
#define LIBCURL_VERSION_PATCH 0
|
||||
|
||||
/* This is the numeric version of the libcurl version number, meant for easier
|
||||
parsing and comparions by programs. The LIBCURL_VERSION_NUM define will
|
||||
@ -51,6 +51,6 @@
|
||||
and it is always a greater number in a more recent release. It makes
|
||||
comparisons with greater than and less than work.
|
||||
*/
|
||||
#define LIBCURL_VERSION_NUM 0x070f06
|
||||
#define LIBCURL_VERSION_NUM 0x071000
|
||||
|
||||
#endif /* __CURL_CURLVER_H */
|
||||
|
@ -270,6 +270,9 @@ typedef enum {
|
||||
/* This is the argument passed to the socket callback */
|
||||
CINIT(SOCKETDATA, OBJECTPOINT, 2),
|
||||
|
||||
/* set to 1 to enable pipelining for this multi handle */
|
||||
CINIT(PIPELINING, LONG, 3),
|
||||
|
||||
CURLMOPT_LASTENTRY /* the last unused */
|
||||
} CURLMoption;
|
||||
|
||||
|
10
lib/dict.c
10
lib/dict.c
@ -134,8 +134,8 @@ CURLcode Curl_dict(struct connectdata *conn, bool *done)
|
||||
struct SessionHandle *data=conn->data;
|
||||
curl_socket_t sockfd = conn->sock[FIRSTSOCKET];
|
||||
|
||||
char *path = conn->path;
|
||||
curl_off_t *bytecount = &conn->bytecount;
|
||||
char *path = data->reqdata.path;
|
||||
curl_off_t *bytecount = &data->reqdata.keep.bytecount;
|
||||
|
||||
*done = TRUE; /* unconditionally */
|
||||
|
||||
@ -196,7 +196,7 @@ CURLcode Curl_dict(struct connectdata *conn, bool *done)
|
||||
if(result)
|
||||
failf(data, "Failed sending DICT request");
|
||||
else
|
||||
result = Curl_Transfer(conn, FIRSTSOCKET, -1, FALSE, bytecount,
|
||||
result = Curl_setup_transfer(conn, FIRSTSOCKET, -1, FALSE, bytecount,
|
||||
-1, NULL); /* no upload */
|
||||
if(result)
|
||||
return result;
|
||||
@ -243,7 +243,7 @@ CURLcode Curl_dict(struct connectdata *conn, bool *done)
|
||||
if(result)
|
||||
failf(data, "Failed sending DICT request");
|
||||
else
|
||||
result = Curl_Transfer(conn, FIRSTSOCKET, -1, FALSE, bytecount,
|
||||
result = Curl_setup_transfer(conn, FIRSTSOCKET, -1, FALSE, bytecount,
|
||||
-1, NULL); /* no upload */
|
||||
|
||||
if(result)
|
||||
@ -268,7 +268,7 @@ CURLcode Curl_dict(struct connectdata *conn, bool *done)
|
||||
if(result)
|
||||
failf(data, "Failed sending DICT request");
|
||||
else
|
||||
result = Curl_Transfer(conn, FIRSTSOCKET, -1, FALSE, bytecount,
|
||||
result = Curl_setup_transfer(conn, FIRSTSOCKET, -1, FALSE, bytecount,
|
||||
-1, NULL);
|
||||
if(result)
|
||||
return result;
|
||||
|
38
lib/easy.c
38
lib/easy.c
@ -469,6 +469,13 @@ CURLcode curl_easy_perform(CURL *curl)
|
||||
|
||||
}
|
||||
|
||||
if(!data->state.connc) {
|
||||
/* oops, no connection cache, make one up */
|
||||
data->state.connc = Curl_mk_connc(CONNCACHE_PRIVATE);
|
||||
if(!data->state.connc)
|
||||
return CURLE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
return Curl_perform(data);
|
||||
}
|
||||
#endif
|
||||
@ -496,6 +503,13 @@ void Curl_easy_addmulti(struct SessionHandle *data,
|
||||
data->multi = multi;
|
||||
}
|
||||
|
||||
void Curl_easy_initHandleData(struct SessionHandle *data)
|
||||
{
|
||||
memset(&data->reqdata, 0, sizeof(struct HandleData));
|
||||
|
||||
data->reqdata.maxdownload = -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* curl_easy_getinfo() is an external interface that allows an app to retrieve
|
||||
* information from a performed transfer and similar.
|
||||
@ -543,16 +557,14 @@ CURL *curl_easy_duphandle(CURL *incurl)
|
||||
|
||||
/* copy all userdefined values */
|
||||
outcurl->set = data->set;
|
||||
outcurl->state.numconnects = data->state.numconnects;
|
||||
outcurl->state.connects = (struct connectdata **)
|
||||
malloc(sizeof(struct connectdata *) * outcurl->state.numconnects);
|
||||
|
||||
if(!outcurl->state.connects) {
|
||||
if(data->state.used_interface == Curl_if_multi)
|
||||
outcurl->state.connc = data->state.connc;
|
||||
else
|
||||
outcurl->state.connc = Curl_mk_connc(CONNCACHE_PRIVATE);
|
||||
|
||||
if(!outcurl->state.connc)
|
||||
break;
|
||||
}
|
||||
|
||||
memset(outcurl->state.connects, 0,
|
||||
sizeof(struct connectdata *)*outcurl->state.numconnects);
|
||||
|
||||
outcurl->state.lastconnect = -1;
|
||||
|
||||
@ -574,6 +586,7 @@ CURL *curl_easy_duphandle(CURL *incurl)
|
||||
#endif /* CURL_DISABLE_HTTP */
|
||||
|
||||
/* duplicate all values in 'change' */
|
||||
|
||||
if(data->change.url) {
|
||||
outcurl->change.url = strdup(data->change.url);
|
||||
if(!outcurl->change.url)
|
||||
@ -599,14 +612,16 @@ CURL *curl_easy_duphandle(CURL *incurl)
|
||||
break;
|
||||
#endif
|
||||
|
||||
Curl_easy_initHandleData(outcurl);
|
||||
|
||||
fail = FALSE; /* we reach this point and thus we are OK */
|
||||
|
||||
} while(0);
|
||||
|
||||
if(fail) {
|
||||
if(outcurl) {
|
||||
if(outcurl->state.connects)
|
||||
free(outcurl->state.connects);
|
||||
if(outcurl->state.connc->type == CONNCACHE_PRIVATE)
|
||||
Curl_rm_connc(outcurl->state.connc);
|
||||
if(outcurl->state.headerbuff)
|
||||
free(outcurl->state.headerbuff);
|
||||
if(outcurl->change.proxy)
|
||||
@ -637,6 +652,9 @@ void curl_easy_reset(CURL *curl)
|
||||
/* zero out Progress data: */
|
||||
memset(&data->progress, 0, sizeof(struct Progress));
|
||||
|
||||
/* init Handle data */
|
||||
Curl_easy_initHandleData(data);
|
||||
|
||||
/* The remainder of these calls have been taken from Curl_open() */
|
||||
|
||||
data->set.out = stdout; /* default output to stdout */
|
||||
|
@ -28,6 +28,8 @@
|
||||
*/
|
||||
void Curl_easy_addmulti(struct SessionHandle *data, void *multi);
|
||||
|
||||
void Curl_easy_initHandleData(struct SessionHandle *data);
|
||||
|
||||
CURLcode Curl_convert_to_network(struct SessionHandle *data,
|
||||
char *buffer, size_t length);
|
||||
CURLcode Curl_convert_from_network(struct SessionHandle *data,
|
||||
|
26
lib/file.c
26
lib/file.c
@ -102,7 +102,7 @@
|
||||
*/
|
||||
CURLcode Curl_file_connect(struct connectdata *conn)
|
||||
{
|
||||
char *real_path = curl_easy_unescape(conn->data, conn->path, 0, NULL);
|
||||
char *real_path = curl_easy_unescape(conn->data, conn->data->reqdata.path, 0, NULL);
|
||||
struct FILEPROTO *file;
|
||||
int fd;
|
||||
#if defined(WIN32) || defined(MSDOS) || defined(__EMX__)
|
||||
@ -119,7 +119,11 @@ CURLcode Curl_file_connect(struct connectdata *conn)
|
||||
return CURLE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
conn->proto.file = file;
|
||||
if (conn->data->reqdata.proto.file) {
|
||||
free(conn->data->reqdata.proto.file);
|
||||
}
|
||||
|
||||
conn->data->reqdata.proto.file = file;
|
||||
|
||||
#if defined(WIN32) || defined(MSDOS) || defined(__EMX__)
|
||||
/* If the first character is a slash, and there's
|
||||
@ -160,7 +164,7 @@ CURLcode Curl_file_connect(struct connectdata *conn)
|
||||
|
||||
file->fd = fd;
|
||||
if(!conn->data->set.upload && (fd == -1)) {
|
||||
failf(conn->data, "Couldn't open file %s", conn->path);
|
||||
failf(conn->data, "Couldn't open file %s", conn->data->reqdata.path);
|
||||
Curl_file_done(conn, CURLE_FILE_COULDNT_READ_FILE);
|
||||
return CURLE_FILE_COULDNT_READ_FILE;
|
||||
}
|
||||
@ -171,7 +175,7 @@ CURLcode Curl_file_connect(struct connectdata *conn)
|
||||
CURLcode Curl_file_done(struct connectdata *conn,
|
||||
CURLcode status)
|
||||
{
|
||||
struct FILEPROTO *file = conn->proto.file;
|
||||
struct FILEPROTO *file = conn->data->reqdata.proto.file;
|
||||
(void)status; /* not used */
|
||||
Curl_safefree(file->freepath);
|
||||
|
||||
@ -189,7 +193,7 @@ CURLcode Curl_file_done(struct connectdata *conn,
|
||||
|
||||
static CURLcode file_upload(struct connectdata *conn)
|
||||
{
|
||||
struct FILEPROTO *file = conn->proto.file;
|
||||
struct FILEPROTO *file = conn->data->reqdata.proto.file;
|
||||
char *dir = strchr(file->path, DIRSEP);
|
||||
FILE *fp;
|
||||
CURLcode res=CURLE_OK;
|
||||
@ -206,7 +210,7 @@ static CURLcode file_upload(struct connectdata *conn)
|
||||
*/
|
||||
conn->fread = data->set.fread;
|
||||
conn->fread_in = data->set.in;
|
||||
conn->upload_fromhere = buf;
|
||||
conn->data->reqdata.upload_fromhere = buf;
|
||||
|
||||
if(!dir)
|
||||
return CURLE_FILE_COULDNT_READ_FILE; /* fix: better error code */
|
||||
@ -297,7 +301,7 @@ CURLcode Curl_file(struct connectdata *conn, bool *done)
|
||||
return file_upload(conn);
|
||||
|
||||
/* get the fd from the connection phase */
|
||||
fd = conn->proto.file->fd;
|
||||
fd = conn->data->reqdata.proto.file->fd;
|
||||
|
||||
/* VMS: This only works reliable for STREAMLF files */
|
||||
if( -1 != fstat(fd, &statbuf)) {
|
||||
@ -346,8 +350,8 @@ CURLcode Curl_file(struct connectdata *conn, bool *done)
|
||||
return result;
|
||||
}
|
||||
|
||||
if (conn->resume_from <= expected_size)
|
||||
expected_size -= conn->resume_from;
|
||||
if (data->reqdata.resume_from <= expected_size)
|
||||
expected_size -= data->reqdata.resume_from;
|
||||
else {
|
||||
failf(data, "failed to resume file:// transfer");
|
||||
return CURLE_BAD_DOWNLOAD_RESUME;
|
||||
@ -363,8 +367,8 @@ CURLcode Curl_file(struct connectdata *conn, bool *done)
|
||||
if(fstated)
|
||||
Curl_pgrsSetDownloadSize(data, expected_size);
|
||||
|
||||
if(conn->resume_from)
|
||||
lseek(fd, conn->resume_from, SEEK_SET);
|
||||
if(data->reqdata.resume_from)
|
||||
lseek(fd, data->reqdata.resume_from, SEEK_SET);
|
||||
|
||||
Curl_pgrsTime(data, TIMER_STARTTRANSFER);
|
||||
|
||||
|
@ -199,22 +199,22 @@ CURLcode Curl_getinfo(struct SessionHandle *data, CURLINFO info, ...)
|
||||
break;
|
||||
case CURLINFO_LASTSOCKET:
|
||||
if((data->state.lastconnect != -1) &&
|
||||
(data->state.connects[data->state.lastconnect] != NULL)) {
|
||||
*param_longp = data->state.connects[data->state.lastconnect]->
|
||||
sock[FIRSTSOCKET];
|
||||
(data->state.connc->connects[data->state.lastconnect] != NULL)) {
|
||||
struct connectdata *c = data->state.connc->connects
|
||||
[data->state.lastconnect];
|
||||
*param_longp = c->sock[FIRSTSOCKET];
|
||||
/* we have a socket connected, let's determine if the server shut down */
|
||||
/* determine if ssl */
|
||||
if(data->state.connects[data->state.lastconnect]->ssl[FIRSTSOCKET].use) {
|
||||
if(c->ssl[FIRSTSOCKET].use) {
|
||||
/* use the SSL context */
|
||||
if (!Curl_ssl_check_cxn(data->state.connects[data->state.lastconnect]))
|
||||
if (!Curl_ssl_check_cxn(c))
|
||||
*param_longp = -1; /* FIN received */
|
||||
}
|
||||
/* Minix 3.1 doesn't support any flags on recv; just assume socket is OK */
|
||||
#ifdef MSG_PEEK
|
||||
else {
|
||||
/* use the socket */
|
||||
if(recv((int)data->state.connects[data->state.lastconnect]->
|
||||
sock[FIRSTSOCKET], (void*)&buf, 1, MSG_PEEK) == 0)
|
||||
if(recv((int)c->sock[FIRSTSOCKET], (void*)&buf, 1, MSG_PEEK) == 0)
|
||||
*param_longp = -1; /* FIN received */
|
||||
}
|
||||
#endif
|
||||
|
@ -87,7 +87,8 @@
|
||||
#define CURL_ASYNC_SUCCESS ARES_SUCCESS
|
||||
#else
|
||||
#define CURL_ASYNC_SUCCESS CURLE_OK
|
||||
#define ares_cancel(x)
|
||||
#define ares_cancel(x) do {} while(0)
|
||||
#define ares_destroy(x) do {} while (0);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
113
lib/http.c
113
lib/http.c
@ -220,8 +220,9 @@ static bool pickoneauth(struct auth *pick)
|
||||
*/
|
||||
static CURLcode perhapsrewind(struct connectdata *conn)
|
||||
{
|
||||
struct HTTP *http = conn->proto.http;
|
||||
struct SessionHandle *data = conn->data;
|
||||
struct HTTP *http = data->reqdata.proto.http;
|
||||
struct Curl_transfer_keeper *k = &data->reqdata.keep;
|
||||
curl_off_t bytessent;
|
||||
curl_off_t expectsend = -1; /* default is unknown */
|
||||
|
||||
@ -283,7 +284,7 @@ static CURLcode perhapsrewind(struct connectdata *conn)
|
||||
/* This is not NTLM or NTLM with many bytes left to send: close
|
||||
*/
|
||||
conn->bits.close = TRUE;
|
||||
conn->size = 0; /* don't download any more than 0 bytes */
|
||||
k->size = 0; /* don't download any more than 0 bytes */
|
||||
}
|
||||
|
||||
if(bytessent)
|
||||
@ -306,7 +307,7 @@ CURLcode Curl_http_auth_act(struct connectdata *conn)
|
||||
bool pickproxy = FALSE;
|
||||
CURLcode code = CURLE_OK;
|
||||
|
||||
if(100 == conn->keep.httpcode)
|
||||
if(100 == data->reqdata.keep.httpcode)
|
||||
/* this is a transient response code, ignore */
|
||||
return CURLE_OK;
|
||||
|
||||
@ -314,22 +315,22 @@ CURLcode Curl_http_auth_act(struct connectdata *conn)
|
||||
return data->set.http_fail_on_error?CURLE_HTTP_RETURNED_ERROR:CURLE_OK;
|
||||
|
||||
if(conn->bits.user_passwd &&
|
||||
((conn->keep.httpcode == 401) ||
|
||||
(conn->bits.authneg && conn->keep.httpcode < 300))) {
|
||||
((data->reqdata.keep.httpcode == 401) ||
|
||||
(conn->bits.authneg && data->reqdata.keep.httpcode < 300))) {
|
||||
pickhost = pickoneauth(&data->state.authhost);
|
||||
if(!pickhost)
|
||||
data->state.authproblem = TRUE;
|
||||
}
|
||||
if(conn->bits.proxy_user_passwd &&
|
||||
((conn->keep.httpcode == 407) ||
|
||||
(conn->bits.authneg && conn->keep.httpcode < 300))) {
|
||||
((data->reqdata.keep.httpcode == 407) ||
|
||||
(conn->bits.authneg && data->reqdata.keep.httpcode < 300))) {
|
||||
pickproxy = pickoneauth(&data->state.authproxy);
|
||||
if(!pickproxy)
|
||||
data->state.authproblem = TRUE;
|
||||
}
|
||||
|
||||
if(pickhost || pickproxy) {
|
||||
conn->newurl = strdup(data->change.url); /* clone URL */
|
||||
data->reqdata.newurl = strdup(data->change.url); /* clone URL */
|
||||
|
||||
if((data->set.httpreq != HTTPREQ_GET) &&
|
||||
(data->set.httpreq != HTTPREQ_HEAD) &&
|
||||
@ -340,7 +341,7 @@ CURLcode Curl_http_auth_act(struct connectdata *conn)
|
||||
}
|
||||
}
|
||||
|
||||
else if((conn->keep.httpcode < 300) &&
|
||||
else if((data->reqdata.keep.httpcode < 300) &&
|
||||
(!data->state.authhost.done) &&
|
||||
conn->bits.authneg) {
|
||||
/* no (known) authentication available,
|
||||
@ -349,13 +350,13 @@ CURLcode Curl_http_auth_act(struct connectdata *conn)
|
||||
we didn't try HEAD or GET */
|
||||
if((data->set.httpreq != HTTPREQ_GET) &&
|
||||
(data->set.httpreq != HTTPREQ_HEAD)) {
|
||||
conn->newurl = strdup(data->change.url); /* clone URL */
|
||||
data->reqdata.newurl = strdup(data->change.url); /* clone URL */
|
||||
data->state.authhost.done = TRUE;
|
||||
}
|
||||
}
|
||||
if (Curl_http_should_fail(conn)) {
|
||||
failf (data, "The requested URL returned error: %d",
|
||||
conn->keep.httpcode);
|
||||
data->reqdata.keep.httpcode);
|
||||
code = CURLE_HTTP_RETURNED_ERROR;
|
||||
}
|
||||
|
||||
@ -589,8 +590,8 @@ CURLcode Curl_http_input_auth(struct connectdata *conn,
|
||||
/* if exactly this is wanted, go */
|
||||
int neg = Curl_input_negotiate(conn, start);
|
||||
if (neg == 0) {
|
||||
conn->newurl = strdup(data->change.url);
|
||||
data->state.authproblem = (conn->newurl == NULL);
|
||||
data->reqdata.newurl = strdup(data->change.url);
|
||||
data->state.authproblem = (data->reqdata.newurl == NULL);
|
||||
}
|
||||
else {
|
||||
infof(data, "Authentication problem. Ignoring this.\n");
|
||||
@ -681,7 +682,7 @@ int Curl_http_should_fail(struct connectdata *conn)
|
||||
/*
|
||||
** For readability
|
||||
*/
|
||||
k = &conn->keep;
|
||||
k = &data->reqdata.keep;
|
||||
|
||||
/*
|
||||
** If we haven't been asked to fail on error,
|
||||
@ -696,7 +697,7 @@ int Curl_http_should_fail(struct connectdata *conn)
|
||||
if (k->httpcode < 400)
|
||||
return 0;
|
||||
|
||||
if (conn->resume_from &&
|
||||
if (data->reqdata.resume_from &&
|
||||
(data->set.httpreq==HTTPREQ_GET) &&
|
||||
(k->httpcode == 416)) {
|
||||
/* "Requested Range Not Satisfiable", just proceed and
|
||||
@ -736,7 +737,7 @@ int Curl_http_should_fail(struct connectdata *conn)
|
||||
infof(data,"%s: authavail = 0x%08x\n",__FUNCTION__,data->state.authavail);
|
||||
infof(data,"%s: httpcode = %d\n",__FUNCTION__,k->httpcode);
|
||||
infof(data,"%s: authdone = %d\n",__FUNCTION__,data->state.authdone);
|
||||
infof(data,"%s: newurl = %s\n",__FUNCTION__,conn->newurl ? conn->newurl : "(null)");
|
||||
infof(data,"%s: newurl = %s\n",__FUNCTION__,data->reqdata.newurl ? data->reqdata.newurl : "(null)");
|
||||
infof(data,"%s: authproblem = %d\n",__FUNCTION__,data->state.authproblem);
|
||||
#endif
|
||||
|
||||
@ -766,7 +767,7 @@ static size_t readmoredata(char *buffer,
|
||||
void *userp)
|
||||
{
|
||||
struct connectdata *conn = (struct connectdata *)userp;
|
||||
struct HTTP *http = conn->proto.http;
|
||||
struct HTTP *http = conn->data->reqdata.proto.http;
|
||||
size_t fullsize = size * nitems;
|
||||
|
||||
if(0 == http->postsize)
|
||||
@ -854,7 +855,7 @@ CURLcode add_buffer_send(send_buffer *in,
|
||||
CURLcode res;
|
||||
char *ptr;
|
||||
size_t size;
|
||||
struct HTTP *http = conn->proto.http;
|
||||
struct HTTP *http = conn->data->reqdata.proto.http;
|
||||
size_t sendsize;
|
||||
curl_socket_t sockfd;
|
||||
|
||||
@ -939,6 +940,8 @@ CURLcode add_buffer_send(send_buffer *in,
|
||||
This needs FIXing.
|
||||
*/
|
||||
return CURLE_SEND_ERROR;
|
||||
else
|
||||
conn->writechannel_inuse = FALSE;
|
||||
}
|
||||
}
|
||||
if(in->buffer)
|
||||
@ -1082,7 +1085,7 @@ CURLcode Curl_proxyCONNECT(struct connectdata *conn,
|
||||
{
|
||||
int subversion=0;
|
||||
struct SessionHandle *data=conn->data;
|
||||
struct Curl_transfer_keeper *k = &conn->keep;
|
||||
struct Curl_transfer_keeper *k = &data->reqdata.keep;
|
||||
CURLcode result;
|
||||
int res;
|
||||
size_t nread; /* total size read */
|
||||
@ -1106,12 +1109,12 @@ CURLcode Curl_proxyCONNECT(struct connectdata *conn,
|
||||
infof(data, "Establish HTTP proxy tunnel to %s:%d\n", hostname, remote_port);
|
||||
|
||||
do {
|
||||
if(conn->newurl) {
|
||||
if(data->reqdata.newurl) {
|
||||
/* This only happens if we've looped here due to authentication reasons,
|
||||
and we don't really use the newly cloned URL here then. Just free()
|
||||
it. */
|
||||
free(conn->newurl);
|
||||
conn->newurl = NULL;
|
||||
free(data->reqdata.newurl);
|
||||
data->reqdata.newurl = NULL;
|
||||
}
|
||||
|
||||
/* initialize a dynamic send-buffer */
|
||||
@ -1323,7 +1326,7 @@ CURLcode Curl_proxyCONNECT(struct connectdata *conn,
|
||||
headers. 'newurl' is set to a new URL if we must loop. */
|
||||
Curl_http_auth_act(conn);
|
||||
|
||||
} while(conn->newurl);
|
||||
} while(data->reqdata.newurl);
|
||||
|
||||
if(200 != k->httpcode) {
|
||||
failf(data, "Received HTTP code %d from proxy after CONNECT",
|
||||
@ -1462,11 +1465,9 @@ int Curl_https_getsock(struct connectdata *conn,
|
||||
CURLcode Curl_http_done(struct connectdata *conn,
|
||||
CURLcode status)
|
||||
{
|
||||
struct SessionHandle *data;
|
||||
struct HTTP *http;
|
||||
|
||||
data=conn->data;
|
||||
http=conn->proto.http;
|
||||
struct SessionHandle *data = conn->data;
|
||||
struct HTTP *http =data->reqdata.proto.http;
|
||||
struct Curl_transfer_keeper *k = &data->reqdata.keep;
|
||||
|
||||
/* set the proper values (possibly modified on POST) */
|
||||
conn->fread = data->set.fread; /* restore */
|
||||
@ -1484,7 +1485,7 @@ CURLcode Curl_http_done(struct connectdata *conn,
|
||||
}
|
||||
|
||||
if(HTTPREQ_POST_FORM == data->set.httpreq) {
|
||||
conn->bytecount = http->readbytecount + http->writebytecount;
|
||||
k->bytecount = http->readbytecount + http->writebytecount;
|
||||
|
||||
Curl_formclean(http->sendit); /* Now free that whole lot */
|
||||
if(http->form.fp) {
|
||||
@ -1494,15 +1495,15 @@ CURLcode Curl_http_done(struct connectdata *conn,
|
||||
}
|
||||
}
|
||||
else if(HTTPREQ_PUT == data->set.httpreq)
|
||||
conn->bytecount = http->readbytecount + http->writebytecount;
|
||||
k->bytecount = http->readbytecount + http->writebytecount;
|
||||
|
||||
if (status != CURLE_OK)
|
||||
return (status);
|
||||
|
||||
if(!conn->bits.retry &&
|
||||
((http->readbytecount +
|
||||
conn->headerbytecount -
|
||||
conn->deductheadercount)) <= 0) {
|
||||
k->headerbytecount -
|
||||
k->deductheadercount)) <= 0) {
|
||||
/* If this connection isn't simply closed to be retried, AND nothing was
|
||||
read from the HTTP server (that counts), this can't be right so we
|
||||
return an error here */
|
||||
@ -1586,7 +1587,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
|
||||
char *buf = data->state.buffer; /* this is a short cut to the buffer */
|
||||
CURLcode result=CURLE_OK;
|
||||
struct HTTP *http;
|
||||
char *ppath = conn->path;
|
||||
char *ppath = data->reqdata.path;
|
||||
char *host = conn->host.name;
|
||||
const char *te = ""; /* tranfer-encoding */
|
||||
char *ptr;
|
||||
@ -1599,17 +1600,17 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
|
||||
the rest of the request in the PERFORM phase. */
|
||||
*done = TRUE;
|
||||
|
||||
if(!conn->proto.http) {
|
||||
if(!data->reqdata.proto.http) {
|
||||
/* Only allocate this struct if we don't already have it! */
|
||||
|
||||
http = (struct HTTP *)malloc(sizeof(struct HTTP));
|
||||
if(!http)
|
||||
return CURLE_OUT_OF_MEMORY;
|
||||
memset(http, 0, sizeof(struct HTTP));
|
||||
conn->proto.http = http;
|
||||
data->reqdata.proto.http = http;
|
||||
}
|
||||
else
|
||||
http = conn->proto.http;
|
||||
http = data->reqdata.proto.http;
|
||||
|
||||
/* We default to persistant connections */
|
||||
conn->bits.close = FALSE;
|
||||
@ -1825,7 +1826,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
|
||||
if(( (HTTPREQ_POST == httpreq) ||
|
||||
(HTTPREQ_POST_FORM == httpreq) ||
|
||||
(HTTPREQ_PUT == httpreq) ) &&
|
||||
conn->resume_from) {
|
||||
data->reqdata.resume_from) {
|
||||
/**********************************************************************
|
||||
* Resuming upload in HTTP means that we PUT or POST and that we have
|
||||
* got a resume_from value set. The resume value has already created
|
||||
@ -1834,15 +1835,15 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
|
||||
* file size before we continue this venture in the dark lands of HTTP.
|
||||
*********************************************************************/
|
||||
|
||||
if(conn->resume_from < 0 ) {
|
||||
if(data->reqdata.resume_from < 0 ) {
|
||||
/*
|
||||
* This is meant to get the size of the present remote-file by itself.
|
||||
* We don't support this now. Bail out!
|
||||
*/
|
||||
conn->resume_from = 0;
|
||||
data->reqdata.resume_from = 0;
|
||||
}
|
||||
|
||||
if(conn->resume_from) {
|
||||
if(data->reqdata.resume_from) {
|
||||
/* do we still game? */
|
||||
curl_off_t passed=0;
|
||||
|
||||
@ -1850,7 +1851,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
|
||||
input. If we knew it was a proper file we could've just
|
||||
fseek()ed but we only have a stream here */
|
||||
do {
|
||||
size_t readthisamountnow = (size_t)(conn->resume_from - passed);
|
||||
size_t readthisamountnow = (size_t)(data->reqdata.resume_from - passed);
|
||||
size_t actuallyread;
|
||||
|
||||
if(readthisamountnow > BUFSIZE)
|
||||
@ -1867,11 +1868,11 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
|
||||
passed);
|
||||
return CURLE_READ_ERROR;
|
||||
}
|
||||
} while(passed != conn->resume_from); /* loop until done */
|
||||
} while(passed != data->reqdata.resume_from); /* loop until done */
|
||||
|
||||
/* now, decrease the size of the read */
|
||||
if(data->set.infilesize>0) {
|
||||
data->set.infilesize -= conn->resume_from;
|
||||
data->set.infilesize -= data->reqdata.resume_from;
|
||||
|
||||
if(data->set.infilesize <= 0) {
|
||||
failf(data, "File already completely uploaded");
|
||||
@ -1881,7 +1882,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
|
||||
/* we've passed, proceed as normal */
|
||||
}
|
||||
}
|
||||
if(conn->bits.use_range) {
|
||||
if(data->reqdata.use_range) {
|
||||
/*
|
||||
* A range is selected. We use different headers whether we're downloading
|
||||
* or uploading and we always let customized headers override our internal
|
||||
@ -1892,19 +1893,19 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
|
||||
/* if a line like this was already allocated, free the previous one */
|
||||
if(conn->allocptr.rangeline)
|
||||
free(conn->allocptr.rangeline);
|
||||
conn->allocptr.rangeline = aprintf("Range: bytes=%s\r\n", conn->range);
|
||||
conn->allocptr.rangeline = aprintf("Range: bytes=%s\r\n", data->reqdata.range);
|
||||
}
|
||||
else if((httpreq != HTTPREQ_GET) &&
|
||||
!checkheaders(data, "Content-Range:")) {
|
||||
|
||||
if(conn->resume_from) {
|
||||
if(data->reqdata.resume_from) {
|
||||
/* This is because "resume" was selected */
|
||||
curl_off_t total_expected_size=
|
||||
conn->resume_from + data->set.infilesize;
|
||||
data->reqdata.resume_from + data->set.infilesize;
|
||||
conn->allocptr.rangeline =
|
||||
aprintf("Content-Range: bytes %s%" FORMAT_OFF_T
|
||||
"/%" FORMAT_OFF_T "\r\n",
|
||||
conn->range, total_expected_size-1,
|
||||
data->reqdata.range, total_expected_size-1,
|
||||
total_expected_size);
|
||||
}
|
||||
else {
|
||||
@ -1912,7 +1913,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
|
||||
append total size */
|
||||
conn->allocptr.rangeline =
|
||||
aprintf("Content-Range: bytes %s/%" FORMAT_OFF_T "\r\n",
|
||||
conn->range, data->set.infilesize);
|
||||
data->reqdata.range, data->set.infilesize);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1954,7 +1955,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
|
||||
conn->allocptr.proxyuserpwd?
|
||||
conn->allocptr.proxyuserpwd:"",
|
||||
conn->allocptr.userpwd?conn->allocptr.userpwd:"",
|
||||
(conn->bits.use_range && conn->allocptr.rangeline)?
|
||||
(data->reqdata.use_range && conn->allocptr.rangeline)?
|
||||
conn->allocptr.rangeline:"",
|
||||
(data->set.useragent && *data->set.useragent && conn->allocptr.uagent)?
|
||||
conn->allocptr.uagent:"",
|
||||
@ -1983,7 +1984,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
|
||||
Curl_share_lock(data, CURL_LOCK_DATA_COOKIE, CURL_LOCK_ACCESS_SINGLE);
|
||||
co = Curl_cookie_getlist(data->cookies,
|
||||
conn->allocptr.cookiehost?
|
||||
conn->allocptr.cookiehost:host, conn->path,
|
||||
conn->allocptr.cookiehost:host, data->reqdata.path,
|
||||
(bool)(conn->protocol&PROT_HTTPS?TRUE:FALSE));
|
||||
Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
|
||||
}
|
||||
@ -2100,7 +2101,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
|
||||
failf(data, "Failed sending POST request");
|
||||
else
|
||||
/* setup variables for the upcoming transfer */
|
||||
result = Curl_Transfer(conn, FIRSTSOCKET, -1, TRUE,
|
||||
result = Curl_setup_transfer(conn, FIRSTSOCKET, -1, TRUE,
|
||||
&http->readbytecount,
|
||||
-1, NULL);
|
||||
break;
|
||||
@ -2163,7 +2164,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
|
||||
failf(data, "Failed sending POST request");
|
||||
else
|
||||
/* setup variables for the upcoming transfer */
|
||||
result = Curl_Transfer(conn, FIRSTSOCKET, -1, TRUE,
|
||||
result = Curl_setup_transfer(conn, FIRSTSOCKET, -1, TRUE,
|
||||
&http->readbytecount,
|
||||
FIRSTSOCKET,
|
||||
&http->writebytecount);
|
||||
@ -2207,7 +2208,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
|
||||
failf(data, "Failed sending PUT request");
|
||||
else
|
||||
/* prepare for transfer */
|
||||
result = Curl_Transfer(conn, FIRSTSOCKET, -1, TRUE,
|
||||
result = Curl_setup_transfer(conn, FIRSTSOCKET, -1, TRUE,
|
||||
&http->readbytecount,
|
||||
postsize?FIRSTSOCKET:-1,
|
||||
postsize?&http->writebytecount:NULL);
|
||||
@ -2330,7 +2331,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
|
||||
failf(data, "Failed sending HTTP POST request");
|
||||
else
|
||||
result =
|
||||
Curl_Transfer(conn, FIRSTSOCKET, -1, TRUE,
|
||||
Curl_setup_transfer(conn, FIRSTSOCKET, -1, TRUE,
|
||||
&http->readbytecount,
|
||||
http->postdata?FIRSTSOCKET:-1,
|
||||
http->postdata?&http->writebytecount:NULL);
|
||||
@ -2347,7 +2348,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
|
||||
failf(data, "Failed sending HTTP request");
|
||||
else
|
||||
/* HTTP GET/HEAD download: */
|
||||
result = Curl_Transfer(conn, FIRSTSOCKET, -1, TRUE,
|
||||
result = Curl_setup_transfer(conn, FIRSTSOCKET, -1, TRUE,
|
||||
&http->readbytecount,
|
||||
http->postdata?FIRSTSOCKET:-1,
|
||||
http->postdata?&http->writebytecount:NULL);
|
||||
|
@ -83,7 +83,7 @@
|
||||
|
||||
void Curl_httpchunk_init(struct connectdata *conn)
|
||||
{
|
||||
struct Curl_chunker *chunk = &conn->proto.http->chunk;
|
||||
struct Curl_chunker *chunk = &conn->data->reqdata.proto.http->chunk;
|
||||
chunk->hexindex=0; /* start at 0 */
|
||||
chunk->dataleft=0; /* no data left yet! */
|
||||
chunk->state = CHUNK_HEX; /* we get hex first! */
|
||||
@ -103,8 +103,9 @@ CHUNKcode Curl_httpchunk_read(struct connectdata *conn,
|
||||
ssize_t *wrotep)
|
||||
{
|
||||
CURLcode result=CURLE_OK;
|
||||
struct Curl_chunker *ch = &conn->proto.http->chunk;
|
||||
struct Curl_transfer_keeper *k = &conn->keep;
|
||||
struct SessionHandle *data = conn->data;
|
||||
struct Curl_chunker *ch = &data->reqdata.proto.http->chunk;
|
||||
struct Curl_transfer_keeper *k = &data->reqdata.keep;
|
||||
size_t piece;
|
||||
size_t length = (size_t)datalen;
|
||||
size_t *wrote = (size_t *)wrotep;
|
||||
@ -186,7 +187,7 @@ CHUNKcode Curl_httpchunk_read(struct connectdata *conn,
|
||||
|
||||
/* Write the data portion available */
|
||||
#ifdef HAVE_LIBZ
|
||||
switch (conn->keep.content_encoding) {
|
||||
switch (data->reqdata.keep.content_encoding) {
|
||||
case IDENTITY:
|
||||
#endif
|
||||
if(!k->ignorebody)
|
||||
@ -196,16 +197,16 @@ CHUNKcode Curl_httpchunk_read(struct connectdata *conn,
|
||||
break;
|
||||
|
||||
case DEFLATE:
|
||||
/* update conn->keep.str to point to the chunk data. */
|
||||
conn->keep.str = datap;
|
||||
result = Curl_unencode_deflate_write(conn, &conn->keep,
|
||||
/* update data->reqdata.keep.str to point to the chunk data. */
|
||||
data->reqdata.keep.str = datap;
|
||||
result = Curl_unencode_deflate_write(conn, &data->reqdata.keep,
|
||||
(ssize_t)piece);
|
||||
break;
|
||||
|
||||
case GZIP:
|
||||
/* update conn->keep.str to point to the chunk data. */
|
||||
conn->keep.str = datap;
|
||||
result = Curl_unencode_gzip_write(conn, &conn->keep,
|
||||
/* update data->reqdata.keep.str to point to the chunk data. */
|
||||
data->reqdata.keep.str = datap;
|
||||
result = Curl_unencode_gzip_write(conn, &data->reqdata.keep,
|
||||
(ssize_t)piece);
|
||||
break;
|
||||
|
||||
|
@ -7,7 +7,7 @@
|
||||
* | (__| |_| | _ <| |___
|
||||
* \___|\___/|_| \_\_____|
|
||||
*
|
||||
* Copyright (C) 1998 - 2004, Daniel Stenberg, <daniel@haxx.se>, et al.
|
||||
* Copyright (C) 1998 - 2006, Daniel Stenberg, <daniel@haxx.se>, et al.
|
||||
*
|
||||
* This software is licensed as described in the file COPYING, which
|
||||
* you should have received as part of this distribution. The terms
|
||||
@ -47,7 +47,12 @@ CURLcode Curl_output_digest(struct connectdata *conn,
|
||||
bool proxy,
|
||||
unsigned char *request,
|
||||
unsigned char *uripath);
|
||||
void Curl_digest_cleanup(struct SessionHandle *data);
|
||||
void Curl_digest_cleanup_one(struct digestdata *dig);
|
||||
|
||||
#if !defined(CURL_DISABLE_HTTP) && !defined(CURL_DISABLE_CRYPTO_AUTH)
|
||||
void Curl_digest_cleanup(struct SessionHandle *data);
|
||||
#else
|
||||
#define Curl_digest_cleanup(x) do {} while(0)
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -431,7 +431,7 @@ quit:
|
||||
DynaClose();
|
||||
|
||||
/* no data to transfer */
|
||||
Curl_Transfer(conn, -1, -1, FALSE, NULL, -1, NULL);
|
||||
Curl_setup_transfer(conn, -1, -1, FALSE, NULL, -1, NULL);
|
||||
conn->bits.close = TRUE;
|
||||
|
||||
return status;
|
||||
|
@ -130,3 +130,9 @@ Curl_llist_destroy(struct curl_llist *list, void *user)
|
||||
free(list);
|
||||
}
|
||||
}
|
||||
|
||||
size_t
|
||||
Curl_llist_count(struct curl_llist *list)
|
||||
{
|
||||
return list->size;
|
||||
}
|
||||
|
417
lib/multi.c
417
lib/multi.c
@ -64,13 +64,17 @@ typedef enum {
|
||||
CURLM_STATE_WAITCONNECT, /* awaiting the connect to finalize */
|
||||
CURLM_STATE_PROTOCONNECT, /* completing the protocol-specific connect
|
||||
phase */
|
||||
CURLM_STATE_WAITDO, /* wait for our turn to send the request */
|
||||
CURLM_STATE_DO, /* start send off the request (part 1) */
|
||||
CURLM_STATE_DOING, /* sending off the request (part 1) */
|
||||
CURLM_STATE_DO_MORE, /* send off the request (part 2) */
|
||||
CURLM_STATE_DO_DONE, /* done sending off request */
|
||||
CURLM_STATE_WAITPERFORM, /* wait for our turn to read the response */
|
||||
CURLM_STATE_PERFORM, /* transfer data */
|
||||
CURLM_STATE_TOOFAST, /* wait because limit-rate exceeded */
|
||||
CURLM_STATE_DONE, /* post data transfer operation */
|
||||
CURLM_STATE_COMPLETED, /* operation complete */
|
||||
CURLM_STATE_CANCELLED, /* cancelled */
|
||||
|
||||
CURLM_STATE_LAST /* not a true state, never use this */
|
||||
} CURLMstate;
|
||||
@ -86,6 +90,11 @@ struct socketstate {
|
||||
unsigned int action; /* socket action bitmap */
|
||||
};
|
||||
|
||||
struct closure {
|
||||
struct closure *next; /* a simple one-way list of structs */
|
||||
struct SessionHandle *easy_handle;
|
||||
};
|
||||
|
||||
struct Curl_one_easy {
|
||||
/* first, two fields for the linked list of these */
|
||||
struct Curl_one_easy *next;
|
||||
@ -142,8 +151,20 @@ struct Curl_multi {
|
||||
the pluralis form, there can be more than one easy handle waiting on the
|
||||
same actual socket) */
|
||||
struct curl_hash *sockhash;
|
||||
|
||||
/* Whether pipelining is enabled for this multi handle */
|
||||
bool pipelining_enabled;
|
||||
|
||||
/* shared connection cache */
|
||||
struct conncache *connc;
|
||||
|
||||
/* list of easy handles kept around for doing nice connection closures */
|
||||
struct closure *closure;
|
||||
};
|
||||
|
||||
static bool multi_conn_using(struct Curl_multi *multi,
|
||||
struct SessionHandle *data);
|
||||
|
||||
/* always use this function to change state, to make debugging easier */
|
||||
static void multistate(struct Curl_one_easy *easy, CURLMstate state)
|
||||
{
|
||||
@ -154,23 +175,33 @@ static void multistate(struct Curl_one_easy *easy, CURLMstate state)
|
||||
"WAITRESOLVE",
|
||||
"WAITCONNECT",
|
||||
"PROTOCONNECT",
|
||||
"WAITDO",
|
||||
"DO",
|
||||
"DOING",
|
||||
"DO_MORE",
|
||||
"DO_DONE",
|
||||
"WAITPERFORM",
|
||||
"PERFORM",
|
||||
"TOOFAST",
|
||||
"DONE",
|
||||
"COMPLETED",
|
||||
"CANCELLED"
|
||||
};
|
||||
CURLMstate oldstate = easy->state;
|
||||
int index = -1;
|
||||
#endif
|
||||
|
||||
easy->state = state;
|
||||
|
||||
if(easy->state > CURLM_STATE_CONNECT &&
|
||||
easy->state < CURLM_STATE_COMPLETED)
|
||||
index = easy->easy_conn->connectindex;
|
||||
|
||||
#ifdef CURLDEBUG
|
||||
infof(easy->easy_handle,
|
||||
"STATE: %s => %s handle %p: \n",
|
||||
statename[oldstate], statename[easy->state], (char *)easy);
|
||||
"STATE: %s => %s handle %p; (connection #%d) \n",
|
||||
statename[oldstate], statename[easy->state],
|
||||
(char *)easy, index);
|
||||
#endif
|
||||
if(state == CURLM_STATE_COMPLETED)
|
||||
/* changing to COMPLETED means there's one less easy handle 'alive' */
|
||||
@ -291,6 +322,13 @@ CURLM *curl_multi_init(void)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
multi->connc = Curl_mk_connc(CONNCACHE_MULTI);
|
||||
if(!multi->connc) {
|
||||
Curl_hash_destroy(multi->hostcache);
|
||||
free(multi);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return (CURLM *) multi;
|
||||
}
|
||||
|
||||
@ -309,6 +347,9 @@ CURLMcode curl_multi_add_handle(CURLM *multi_handle,
|
||||
if(!GOOD_EASY_HANDLE(easy_handle))
|
||||
return CURLM_BAD_EASY_HANDLE;
|
||||
|
||||
/* TODO: add some kind of code that prevents a user from being able to
|
||||
add the same handle more than once! */
|
||||
|
||||
/* Now, time to add an easy handle to the multi stack */
|
||||
easy = (struct Curl_one_easy *)calloc(sizeof(struct Curl_one_easy), 1);
|
||||
if(!easy)
|
||||
@ -321,7 +362,7 @@ CURLMcode curl_multi_add_handle(CURLM *multi_handle,
|
||||
easy->easy_handle = easy_handle;
|
||||
multistate(easy, CURLM_STATE_INIT);
|
||||
|
||||
/* for multi interface connections, we share DNS cache automaticly if the
|
||||
/* for multi interface connections, we share DNS cache automatically if the
|
||||
easy handle's one is currently private. */
|
||||
if (easy->easy_handle->dns.hostcache &&
|
||||
(easy->easy_handle->dns.hostcachetype == HCACHE_PRIVATE)) {
|
||||
@ -336,6 +377,23 @@ CURLMcode curl_multi_add_handle(CURLM *multi_handle,
|
||||
easy->easy_handle->dns.hostcachetype = HCACHE_MULTI;
|
||||
}
|
||||
|
||||
if(easy->easy_handle->state.connc) {
|
||||
if(easy->easy_handle->state.connc->type == CONNCACHE_PRIVATE) {
|
||||
/* kill old private version */
|
||||
Curl_rm_connc(easy->easy_handle->state.connc);
|
||||
/* point out our shared one instead */
|
||||
easy->easy_handle->state.connc = multi->connc;
|
||||
}
|
||||
/* else it is already using multi? */
|
||||
}
|
||||
else
|
||||
/* point out our shared one */
|
||||
easy->easy_handle->state.connc = multi->connc;
|
||||
|
||||
/* Make sure the type is setup correctly */
|
||||
easy->easy_handle->state.connc->type = CONNCACHE_MULTI;
|
||||
|
||||
|
||||
/* We add this new entry first in the list. We make our 'next' point to the
|
||||
previous next and our 'prev' point back to the 'first' struct */
|
||||
easy->next = multi->easy.next;
|
||||
@ -383,6 +441,7 @@ CURLMcode curl_multi_remove_handle(CURLM *multi_handle,
|
||||
break;
|
||||
easy=easy->next;
|
||||
}
|
||||
|
||||
if(easy) {
|
||||
/* If the 'state' is not INIT or COMPLETED, we might need to do something
|
||||
nice to put the easy_handle in a good known state when this returns. */
|
||||
@ -391,6 +450,15 @@ CURLMcode curl_multi_remove_handle(CURLM *multi_handle,
|
||||
alive connections when this is removed */
|
||||
multi->num_alive--;
|
||||
|
||||
if (easy->easy_handle->state.is_in_pipeline &&
|
||||
easy->state > CURLM_STATE_DO) {
|
||||
/* If the handle is in a pipeline and has finished sending off its
|
||||
request, we need to remember the fact that we want to remove this
|
||||
handle but do the actual removal at a later time */
|
||||
easy->easy_handle->state.cancelled = TRUE;
|
||||
return CURLM_OK;
|
||||
}
|
||||
|
||||
/* The timer must be shut down before easy->multi is set to NULL,
|
||||
else the timenode will remain in the splay tree after
|
||||
curl_easy_cleanup is called. */
|
||||
@ -402,13 +470,44 @@ CURLMcode curl_multi_remove_handle(CURLM *multi_handle,
|
||||
easy->easy_handle->dns.hostcachetype = HCACHE_NONE;
|
||||
}
|
||||
|
||||
Curl_easy_addmulti(easy->easy_handle, NULL); /* clear the association
|
||||
to this multi handle */
|
||||
|
||||
/* if we have a connection we must call Curl_done() here so that we
|
||||
don't leave a half-baked one around */
|
||||
if(easy->easy_conn)
|
||||
if(easy->easy_conn) {
|
||||
/* Set up the association right */
|
||||
easy->easy_conn->data = easy->easy_handle;
|
||||
Curl_done(&easy->easy_conn, easy->result);
|
||||
}
|
||||
|
||||
/* If this easy_handle was the last one in charge for one or more
|
||||
connections a the shared connection cache, we might need to keep this
|
||||
handle around until either A) the connection is closed and killed
|
||||
properly, or B) another easy_handle uses the connection.
|
||||
|
||||
The reason why we need to have a easy_handle associated with a live
|
||||
connection is simply that some connections will need a handle to get
|
||||
closed down properly. Currently, the only connections that need to keep
|
||||
a easy_handle handle around are using FTP(S). Such connections have
|
||||
the PROT_CLOSEACTION bit set.
|
||||
|
||||
Thus, we need to check for all connections in the shared cache that
|
||||
points to this handle and are using PROT_CLOSEACTION. If there's any,
|
||||
we need to add this handle to the list of "easy_handls kept around for
|
||||
nice closure".
|
||||
*/
|
||||
if(multi_conn_using(multi, easy->easy_handle))
|
||||
/* There's at least one connection using this handle so we must keep
|
||||
this handle around. We also keep the connection cache pointer
|
||||
pointing to the shared one since that will be used on close as
|
||||
well. */
|
||||
easy->easy_handle->state.shared_conn = multi;
|
||||
else
|
||||
if(easy->easy_handle->state.connc->type == CONNCACHE_MULTI)
|
||||
/* if this was using the shared connection cache we clear the pointer
|
||||
to that */
|
||||
easy->easy_handle->state.connc = NULL;
|
||||
|
||||
Curl_easy_addmulti(easy->easy_handle, NULL); /* clear the association
|
||||
to this multi handle */
|
||||
|
||||
/* make the previous node point to our next */
|
||||
if(easy->prev)
|
||||
@ -433,6 +532,11 @@ CURLMcode curl_multi_remove_handle(CURLM *multi_handle,
|
||||
return CURLM_BAD_EASY_HANDLE; /* twasn't found */
|
||||
}
|
||||
|
||||
bool Curl_multi_canPipeline(struct Curl_multi* multi)
|
||||
{
|
||||
return multi->pipelining_enabled;
|
||||
}
|
||||
|
||||
static int waitconnect_getsock(struct connectdata *conn,
|
||||
curl_socket_t *sock,
|
||||
int numsocks)
|
||||
@ -467,6 +571,16 @@ static int multi_getsock(struct Curl_one_easy *easy,
|
||||
of sockets */
|
||||
int numsocks)
|
||||
{
|
||||
if (easy->easy_handle->state.pipe_broke) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (easy->state > CURLM_STATE_CONNECT &&
|
||||
easy->state < CURLM_STATE_COMPLETED) {
|
||||
/* Set up ownership correctly */
|
||||
easy->easy_conn->data = easy->easy_handle;
|
||||
}
|
||||
|
||||
switch(easy->state) {
|
||||
case CURLM_STATE_TOOFAST: /* returns 0, so will not select. */
|
||||
default:
|
||||
@ -488,6 +602,7 @@ static int multi_getsock(struct Curl_one_easy *easy,
|
||||
return domore_getsock(easy->easy_conn, socks, numsocks);
|
||||
|
||||
case CURLM_STATE_PERFORM:
|
||||
case CURLM_STATE_WAITPERFORM:
|
||||
return Curl_single_getsock(easy->easy_conn, socks, numsocks);
|
||||
}
|
||||
|
||||
@ -553,8 +668,33 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
|
||||
bool dophase_done;
|
||||
bool done;
|
||||
CURLMcode result = CURLM_OK;
|
||||
struct Curl_transfer_keeper *k;
|
||||
|
||||
do {
|
||||
|
||||
if (easy->easy_handle->state.pipe_broke) {
|
||||
infof(easy->easy_handle, "Pipe broke: handle 0x%x\n", easy);
|
||||
if(easy->easy_handle->state.is_in_pipeline) {
|
||||
/* Head back to the CONNECT state */
|
||||
multistate(easy, CURLM_STATE_CONNECT);
|
||||
result = CURLM_CALL_MULTI_PERFORM;
|
||||
easy->result = CURLE_OK;
|
||||
} else {
|
||||
easy->result = CURLE_COULDNT_CONNECT;
|
||||
multistate(easy, CURLM_STATE_COMPLETED);
|
||||
}
|
||||
|
||||
easy->easy_handle->state.pipe_broke = FALSE;
|
||||
easy->easy_conn = NULL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (easy->state > CURLM_STATE_CONNECT &&
|
||||
easy->state < CURLM_STATE_COMPLETED) {
|
||||
/* Make sure we set the connection's current owner */
|
||||
easy->easy_conn->data = easy->easy_handle;
|
||||
}
|
||||
|
||||
if (CURLM_STATE_WAITCONNECT <= easy->state &&
|
||||
easy->state <= CURLM_STATE_DO &&
|
||||
easy->easy_handle->change.url_changed) {
|
||||
@ -562,6 +702,9 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
|
||||
Curl_posttransfer(easy->easy_handle);
|
||||
|
||||
easy->result = Curl_done(&easy->easy_conn, CURLE_OK);
|
||||
/* We make sure that the pipe broken flag is reset
|
||||
because in this case, it isn't an actual break */
|
||||
easy->easy_handle->state.pipe_broke = FALSE;
|
||||
if(CURLE_OK == easy->result) {
|
||||
gotourl = strdup(easy->easy_handle->change.url);
|
||||
if(gotourl) {
|
||||
@ -603,21 +746,26 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
|
||||
&async, &protocol_connect);
|
||||
|
||||
if(CURLE_OK == easy->result) {
|
||||
/* Add this handle to the send pipeline */
|
||||
Curl_addHandleToPipeline(easy->easy_handle,
|
||||
easy->easy_conn->send_pipe);
|
||||
|
||||
if(async)
|
||||
/* We're now waiting for an asynchronous name lookup */
|
||||
multistate(easy, CURLM_STATE_WAITRESOLVE);
|
||||
else {
|
||||
/* after the connect has been sent off, go WAITCONNECT unless the
|
||||
protocol connect is already done and we can go directly to
|
||||
DO! */
|
||||
WAITDO! */
|
||||
result = CURLM_CALL_MULTI_PERFORM;
|
||||
|
||||
if(protocol_connect)
|
||||
multistate(easy, CURLM_STATE_DO);
|
||||
else
|
||||
if(protocol_connect) {
|
||||
multistate(easy, CURLM_STATE_WAITDO);
|
||||
} else {
|
||||
multistate(easy, CURLM_STATE_WAITCONNECT);
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case CURLM_STATE_WAITRESOLVE:
|
||||
@ -659,7 +807,8 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
|
||||
|
||||
case CURLM_STATE_WAITCONNECT:
|
||||
/* awaiting a completion of an asynch connect */
|
||||
easy->result = Curl_is_connected(easy->easy_conn, FIRSTSOCKET,
|
||||
easy->result = Curl_is_connected(easy->easy_conn,
|
||||
FIRSTSOCKET,
|
||||
&connected);
|
||||
if(connected)
|
||||
easy->result = Curl_protocol_connect(easy->easy_conn,
|
||||
@ -680,8 +829,9 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
|
||||
multistate(easy, CURLM_STATE_PROTOCONNECT);
|
||||
}
|
||||
else {
|
||||
/* after the connect has completed, go DO */
|
||||
multistate(easy, CURLM_STATE_DO);
|
||||
/* after the connect has completed, go WAITDO */
|
||||
multistate(easy, CURLM_STATE_WAITDO);
|
||||
|
||||
result = CURLM_CALL_MULTI_PERFORM;
|
||||
}
|
||||
}
|
||||
@ -692,8 +842,8 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
|
||||
easy->result = Curl_protocol_connecting(easy->easy_conn,
|
||||
&protocol_connect);
|
||||
if(protocol_connect) {
|
||||
/* after the connect has completed, go DO */
|
||||
multistate(easy, CURLM_STATE_DO);
|
||||
/* after the connect has completed, go WAITDO */
|
||||
multistate(easy, CURLM_STATE_WAITDO);
|
||||
result = CURLM_CALL_MULTI_PERFORM;
|
||||
}
|
||||
else if(easy->result) {
|
||||
@ -705,6 +855,21 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
|
||||
}
|
||||
break;
|
||||
|
||||
case CURLM_STATE_WAITDO:
|
||||
/* Wait for our turn to DO when we're pipelining requests */
|
||||
infof(easy->easy_handle, "Connection #%d: send pipe size = %d\n",
|
||||
easy->easy_conn->connectindex,
|
||||
easy->easy_conn->send_pipe->size);
|
||||
if (!easy->easy_conn->writechannel_inuse &&
|
||||
Curl_isHandleAtHead(easy->easy_handle,
|
||||
easy->easy_conn->send_pipe)) {
|
||||
/* Grab the channel */
|
||||
easy->easy_conn->writechannel_inuse = TRUE;
|
||||
multistate(easy, CURLM_STATE_DO);
|
||||
result = CURLM_CALL_MULTI_PERFORM;
|
||||
}
|
||||
break;
|
||||
|
||||
case CURLM_STATE_DO:
|
||||
if(easy->easy_handle->set.connect_only) {
|
||||
/* keep connection open for application to use the socket */
|
||||
@ -715,7 +880,8 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
|
||||
}
|
||||
else {
|
||||
/* Perform the protocol's DO action */
|
||||
easy->result = Curl_do(&easy->easy_conn, &dophase_done);
|
||||
easy->result = Curl_do(&easy->easy_conn,
|
||||
&dophase_done);
|
||||
|
||||
if(CURLE_OK == easy->result) {
|
||||
|
||||
@ -726,7 +892,7 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
|
||||
result = CURLM_OK;
|
||||
}
|
||||
|
||||
/* after DO, go PERFORM... or DO_MORE */
|
||||
/* after DO, go DO_DONE... or DO_MORE */
|
||||
else if(easy->easy_conn->bits.do_more) {
|
||||
/* we're supposed to do more, but we need to sit down, relax
|
||||
and wait a little while first */
|
||||
@ -734,10 +900,10 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
|
||||
result = CURLM_OK;
|
||||
}
|
||||
else {
|
||||
/* we're done with the DO, now PERFORM */
|
||||
/* we're done with the DO, now DO_DONE */
|
||||
easy->result = Curl_readwrite_init(easy->easy_conn);
|
||||
if(CURLE_OK == easy->result) {
|
||||
multistate(easy, CURLM_STATE_PERFORM);
|
||||
multistate(easy, CURLM_STATE_DO_DONE);
|
||||
result = CURLM_CALL_MULTI_PERFORM;
|
||||
}
|
||||
}
|
||||
@ -754,7 +920,8 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
|
||||
|
||||
case CURLM_STATE_DOING:
|
||||
/* we continue DOING until the DO phase is complete */
|
||||
easy->result = Curl_protocol_doing(easy->easy_conn, &dophase_done);
|
||||
easy->result = Curl_protocol_doing(easy->easy_conn,
|
||||
&dophase_done);
|
||||
if(CURLE_OK == easy->result) {
|
||||
if(dophase_done) {
|
||||
/* after DO, go PERFORM... or DO_MORE */
|
||||
@ -765,10 +932,10 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
|
||||
result = CURLM_OK;
|
||||
}
|
||||
else {
|
||||
/* we're done with the DO, now PERFORM */
|
||||
/* we're done with the DO, now DO_DONE */
|
||||
easy->result = Curl_readwrite_init(easy->easy_conn);
|
||||
if(CURLE_OK == easy->result) {
|
||||
multistate(easy, CURLM_STATE_PERFORM);
|
||||
multistate(easy, CURLM_STATE_DO_DONE);
|
||||
result = CURLM_CALL_MULTI_PERFORM;
|
||||
}
|
||||
}
|
||||
@ -785,11 +952,12 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
|
||||
|
||||
case CURLM_STATE_DO_MORE:
|
||||
/* Ready to do more? */
|
||||
easy->result = Curl_is_connected(easy->easy_conn, SECONDARYSOCKET,
|
||||
easy->result = Curl_is_connected(easy->easy_conn,
|
||||
SECONDARYSOCKET,
|
||||
&connected);
|
||||
if(connected) {
|
||||
/*
|
||||
* When we are connected, DO MORE and then go PERFORM
|
||||
* When we are connected, DO MORE and then go DO_DONE
|
||||
*/
|
||||
easy->result = Curl_do_more(easy->easy_conn);
|
||||
|
||||
@ -797,12 +965,38 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
|
||||
easy->result = Curl_readwrite_init(easy->easy_conn);
|
||||
|
||||
if(CURLE_OK == easy->result) {
|
||||
multistate(easy, CURLM_STATE_PERFORM);
|
||||
multistate(easy, CURLM_STATE_DO_DONE);
|
||||
result = CURLM_CALL_MULTI_PERFORM;
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case CURLM_STATE_DO_DONE:
|
||||
/* Remove ourselves from the send pipeline */
|
||||
Curl_removeHandleFromPipeline(easy->easy_handle,
|
||||
easy->easy_conn->send_pipe);
|
||||
/* Add ourselves to the recv pipeline */
|
||||
Curl_addHandleToPipeline(easy->easy_handle,
|
||||
easy->easy_conn->recv_pipe);
|
||||
multistate(easy, CURLM_STATE_WAITPERFORM);
|
||||
result = CURLM_CALL_MULTI_PERFORM;
|
||||
break;
|
||||
|
||||
case CURLM_STATE_WAITPERFORM:
|
||||
infof(easy->easy_handle, "Connection #%d: recv pipe size = %d\n",
|
||||
easy->easy_conn->connectindex,
|
||||
easy->easy_conn->recv_pipe->size);
|
||||
/* Wait for our turn to PERFORM */
|
||||
if (!easy->easy_conn->readchannel_inuse &&
|
||||
Curl_isHandleAtHead(easy->easy_handle,
|
||||
easy->easy_conn->recv_pipe)) {
|
||||
/* Grab the channel */
|
||||
easy->easy_conn->readchannel_inuse = TRUE;
|
||||
multistate(easy, CURLM_STATE_PERFORM);
|
||||
result = CURLM_CALL_MULTI_PERFORM;
|
||||
}
|
||||
break;
|
||||
|
||||
case CURLM_STATE_TOOFAST: /* limit-rate exceeded in either direction */
|
||||
/* if both rates are within spec, resume transfer */
|
||||
Curl_pgrsUpdate(easy->easy_conn);
|
||||
@ -814,11 +1008,9 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
|
||||
easy->easy_handle->set.max_recv_speed ) )
|
||||
)
|
||||
multistate(easy, CURLM_STATE_PERFORM);
|
||||
|
||||
break;
|
||||
|
||||
case CURLM_STATE_PERFORM:
|
||||
|
||||
/* check if over speed */
|
||||
if ( ( ( easy->easy_handle->set.max_send_speed > 0 ) &&
|
||||
( easy->easy_handle->progress.ulspeed >
|
||||
@ -837,6 +1029,18 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
|
||||
/* read/write data if it is ready to do so */
|
||||
easy->result = Curl_readwrite(easy->easy_conn, &done);
|
||||
|
||||
k = &easy->easy_handle->reqdata.keep;
|
||||
|
||||
if (!(k->keepon & KEEP_READ)) {
|
||||
/* We're done reading */
|
||||
easy->easy_conn->readchannel_inuse = FALSE;
|
||||
}
|
||||
|
||||
if (!(k->keepon & KEEP_WRITE)) {
|
||||
/* We're done writing */
|
||||
easy->easy_conn->writechannel_inuse = FALSE;
|
||||
}
|
||||
|
||||
if(easy->result) {
|
||||
/* The transfer phase returned error, we mark the connection to get
|
||||
* closed to prevent being re-used. This is becasue we can't
|
||||
@ -852,7 +1056,6 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
|
||||
Curl_posttransfer(easy->easy_handle);
|
||||
Curl_done(&easy->easy_conn, easy->result);
|
||||
}
|
||||
|
||||
else if(TRUE == done) {
|
||||
char *newurl;
|
||||
bool retry = Curl_retry_request(easy->easy_conn, &newurl);
|
||||
@ -860,13 +1063,18 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
|
||||
/* call this even if the readwrite function returned error */
|
||||
Curl_posttransfer(easy->easy_handle);
|
||||
|
||||
if (retry) {
|
||||
Curl_removeHandleFromPipeline(easy->easy_handle,
|
||||
easy->easy_conn->recv_pipe);
|
||||
}
|
||||
|
||||
/* When we follow redirects, must to go back to the CONNECT state */
|
||||
if(easy->easy_conn->newurl || retry) {
|
||||
if(easy->easy_handle->reqdata.newurl || retry) {
|
||||
if(!retry) {
|
||||
/* if the URL is a follow-location and not just a retried request
|
||||
then figure out the URL here */
|
||||
newurl = easy->easy_conn->newurl;
|
||||
easy->easy_conn->newurl = NULL;
|
||||
newurl = easy->easy_handle->reqdata.newurl;
|
||||
easy->easy_handle->reqdata.newurl = NULL;
|
||||
}
|
||||
easy->result = Curl_done(&easy->easy_conn, CURLE_OK);
|
||||
if(easy->result == CURLE_OK)
|
||||
@ -886,23 +1094,49 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
|
||||
result = CURLM_CALL_MULTI_PERFORM;
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
case CURLM_STATE_DONE:
|
||||
/* Remove ourselves from the receive pipeline */
|
||||
Curl_removeHandleFromPipeline(easy->easy_handle,
|
||||
easy->easy_conn->recv_pipe);
|
||||
easy->easy_handle->state.is_in_pipeline = FALSE;
|
||||
|
||||
if (easy->easy_conn->bits.stream_was_rewound) {
|
||||
/* This request read past its response boundary so we quickly
|
||||
let the other requests consume those bytes since there is no
|
||||
guarantee that the socket will become active again */
|
||||
result = CURLM_CALL_MULTI_PERFORM;
|
||||
}
|
||||
|
||||
if (!easy->easy_handle->state.cancelled) {
|
||||
/* post-transfer command */
|
||||
easy->result = Curl_done(&easy->easy_conn, CURLE_OK);
|
||||
|
||||
/* after we have DONE what we're supposed to do, go COMPLETED, and
|
||||
it doesn't matter what the Curl_done() returned! */
|
||||
multistate(easy, CURLM_STATE_COMPLETED);
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
case CURLM_STATE_COMPLETED:
|
||||
if (easy->easy_handle->state.cancelled) {
|
||||
/* Go into the CANCELLED state if we were cancelled */
|
||||
multistate(easy, CURLM_STATE_CANCELLED);
|
||||
}
|
||||
|
||||
/* this is a completed transfer, it is likely to still be connected */
|
||||
|
||||
/* This node should be delinked from the list now and we should post
|
||||
an information message that we are complete. */
|
||||
break;
|
||||
|
||||
case CURLM_STATE_CANCELLED:
|
||||
/* Cancelled transfer, wait to be cleaned up */
|
||||
break;
|
||||
|
||||
default:
|
||||
return CURLM_INTERNAL_ERROR;
|
||||
}
|
||||
@ -911,7 +1145,8 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
|
||||
if(CURLE_OK != easy->result) {
|
||||
/*
|
||||
* If an error was returned, and we aren't in completed state now,
|
||||
* then we go to completed and consider this transfer aborted. */
|
||||
* then we go to completed and consider this transfer aborted.
|
||||
*/
|
||||
multistate(easy, CURLM_STATE_COMPLETED);
|
||||
}
|
||||
}
|
||||
@ -958,7 +1193,17 @@ CURLMcode curl_multi_perform(CURLM *multi_handle, int *running_handles)
|
||||
|
||||
easy=multi->easy.next;
|
||||
while(easy) {
|
||||
CURLMcode result = multi_runsingle(multi, easy);
|
||||
CURLMcode result;
|
||||
|
||||
if (easy->easy_handle->state.cancelled &&
|
||||
easy->state == CURLM_STATE_CANCELLED) {
|
||||
/* Remove cancelled handles once it's safe to do so */
|
||||
easy = easy->next;
|
||||
Curl_multi_rmeasy(multi_handle, easy->easy_handle);
|
||||
continue;
|
||||
}
|
||||
|
||||
result = multi_runsingle(multi, easy);
|
||||
if(result)
|
||||
returncode = result;
|
||||
|
||||
@ -975,7 +1220,6 @@ CURLMcode curl_multi_perform(CURLM *multi_handle, int *running_handles)
|
||||
int key = now.tv_sec; /* drop the usec part */
|
||||
|
||||
multi->timetree = Curl_splaygetbest(key, multi->timetree, &t);
|
||||
|
||||
if (t) {
|
||||
struct SessionHandle *d = t->payload;
|
||||
struct timeval* tv = &d->state.expiretime;
|
||||
@ -1000,17 +1244,42 @@ void Curl_multi_rmeasy(void *multi_handle, CURL *easy_handle)
|
||||
curl_multi_remove_handle(multi_handle, easy_handle);
|
||||
}
|
||||
|
||||
|
||||
CURLMcode curl_multi_cleanup(CURLM *multi_handle)
|
||||
{
|
||||
struct Curl_multi *multi=(struct Curl_multi *)multi_handle;
|
||||
struct Curl_one_easy *easy;
|
||||
struct Curl_one_easy *nexteasy;
|
||||
int i;
|
||||
struct closure *cl;
|
||||
struct closure *n;
|
||||
|
||||
if(GOOD_MULTI_HANDLE(multi)) {
|
||||
multi->type = 0; /* not good anymore */
|
||||
Curl_hash_destroy(multi->hostcache);
|
||||
Curl_hash_destroy(multi->sockhash);
|
||||
|
||||
#if 1
|
||||
/* go over all connections that have close actions */
|
||||
for(i=0; i< multi->connc->num; i++) {
|
||||
if(multi->connc->connects[i] &&
|
||||
multi->connc->connects[i]->protocol & PROT_CLOSEACTION)
|
||||
Curl_disconnect(multi->connc->connects[i]);
|
||||
}
|
||||
/* now walk through the list of handles we kept around only to be
|
||||
able to close connections "properly" */
|
||||
cl = multi->closure;
|
||||
while(cl) {
|
||||
cl->easy_handle->state.shared_conn = NULL; /* no more shared */
|
||||
Curl_close(cl->easy_handle); /* close handle */
|
||||
n = cl->next;
|
||||
free(cl);
|
||||
cl= n;
|
||||
}
|
||||
#endif
|
||||
|
||||
Curl_rm_connc(multi->connc);
|
||||
|
||||
/* remove all easy handles */
|
||||
easy = multi->easy.next;
|
||||
while(easy) {
|
||||
@ -1020,6 +1289,10 @@ CURLMcode curl_multi_cleanup(CURLM *multi_handle)
|
||||
easy->easy_handle->dns.hostcache = NULL;
|
||||
easy->easy_handle->dns.hostcachetype = HCACHE_NONE;
|
||||
}
|
||||
|
||||
/* Clear the pointer to the connection cache */
|
||||
easy->easy_handle->state.connc = NULL;
|
||||
|
||||
Curl_easy_addmulti(easy->easy_handle, NULL); /* clear the association */
|
||||
|
||||
if (easy->msg)
|
||||
@ -1280,6 +1553,9 @@ CURLMcode curl_multi_setopt(CURLM *multi_handle,
|
||||
case CURLMOPT_SOCKETDATA:
|
||||
multi->socket_userp = va_arg(param, void *);
|
||||
break;
|
||||
case CURLMOPT_PIPELINING:
|
||||
multi->pipelining_enabled = va_arg(param, long);
|
||||
break;
|
||||
default:
|
||||
res = CURLM_UNKNOWN_OPTION;
|
||||
}
|
||||
@ -1422,3 +1698,74 @@ CURLMcode curl_multi_assign(CURLM *multi_handle,
|
||||
|
||||
return CURLM_OK;
|
||||
}
|
||||
|
||||
static bool multi_conn_using(struct Curl_multi *multi,
|
||||
struct SessionHandle *data)
|
||||
{
|
||||
/* any live CLOSEACTION-connections pointing to the give 'data' ? */
|
||||
int i;
|
||||
|
||||
for(i=0; i< multi->connc->num; i++) {
|
||||
if(multi->connc->connects[i] &&
|
||||
(multi->connc->connects[i]->data == data) &&
|
||||
multi->connc->connects[i]->protocol & PROT_CLOSEACTION)
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
/* add the given data pointer to the list of 'closure handles' that are
|
||||
kept around only to be able to close some connections nicely */
|
||||
void Curl_multi_add_closure(struct Curl_multi *multi,
|
||||
struct SessionHandle *data)
|
||||
{
|
||||
int i;
|
||||
struct closure *cl = (struct closure *)calloc(sizeof(struct closure), 1);
|
||||
struct closure *p=NULL;
|
||||
struct closure *n;
|
||||
if(cl) {
|
||||
cl->easy_handle = data;
|
||||
cl->next = multi->closure;
|
||||
multi->closure = cl;
|
||||
}
|
||||
|
||||
p = multi->closure;
|
||||
cl = p->next; /* start immediately on the second since the first is the one
|
||||
we just added and it is _very_ likely to actually exist
|
||||
used in the cache since that's the whole purpose of adding
|
||||
it to this list! */
|
||||
|
||||
/* When adding, scan through all the other currently kept handles and see if
|
||||
there are any connections still referring to them and kill them if not. */
|
||||
while(cl) {
|
||||
bool inuse = FALSE;
|
||||
for(i=0; i< multi->connc->num; i++) {
|
||||
if(multi->connc->connects[i] &&
|
||||
(multi->connc->connects[i]->data == cl->easy_handle)) {
|
||||
inuse = TRUE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
n = cl->next;
|
||||
|
||||
if(!inuse) {
|
||||
/* cl->easy_handle is now killable */
|
||||
infof(data, "Delayed kill of easy handle %p\n", cl->easy_handle);
|
||||
/* unmark it as not having a connection around that uses it anymore */
|
||||
cl->easy_handle->state.shared_conn= NULL;
|
||||
Curl_close(cl->easy_handle);
|
||||
if(p)
|
||||
p->next = n;
|
||||
else
|
||||
multi->closure = n;
|
||||
free(cl);
|
||||
}
|
||||
else
|
||||
p = cl;
|
||||
|
||||
cl = n;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -7,7 +7,7 @@
|
||||
* | (__| |_| | _ <| |___
|
||||
* \___|\___/|_| \_\_____|
|
||||
*
|
||||
* Copyright (C) 1998 - 2005, Daniel Stenberg, <daniel@haxx.se>, et al.
|
||||
* Copyright (C) 1998 - 2006, Daniel Stenberg, <daniel@haxx.se>, et al.
|
||||
*
|
||||
* This software is licensed as described in the file COPYING, which
|
||||
* you should have received as part of this distribution. The terms
|
||||
@ -30,6 +30,11 @@ void Curl_expire(struct SessionHandle *data, long milli);
|
||||
|
||||
void Curl_multi_rmeasy(void *multi, CURL *data);
|
||||
|
||||
bool Curl_multi_canPipeline(struct Curl_multi* multi);
|
||||
|
||||
void Curl_multi_add_closure(struct Curl_multi *multi,
|
||||
struct SessionHandle *data);
|
||||
|
||||
/* the write bits start at bit 16 for the *getsock() bitmap */
|
||||
#define GETSOCK_WRITEBITSTART 16
|
||||
|
||||
|
@ -252,11 +252,11 @@ int Curl_pgrsUpdate(struct connectdata *conn)
|
||||
even when not displayed! */
|
||||
else if(!(data->progress.flags & PGRS_HEADERS_OUT)) {
|
||||
if (!data->progress.callback) {
|
||||
if(conn->resume_from)
|
||||
if(data->reqdata.resume_from)
|
||||
fprintf(data->set.err,
|
||||
"** Resuming transfer from byte position %" FORMAT_OFF_T
|
||||
"\n",
|
||||
conn->resume_from);
|
||||
data->reqdata.resume_from);
|
||||
fprintf(data->set.err,
|
||||
" %% Total %% Received %% Xferd Average Speed Time Time Time Current\n"
|
||||
" Dload Upload Total Spent Left Speed\n");
|
||||
|
62
lib/sendf.c
62
lib/sendf.c
@ -375,11 +375,16 @@ CURLcode Curl_client_write(struct connectdata *conn,
|
||||
struct SessionHandle *data = conn->data;
|
||||
size_t wrote;
|
||||
|
||||
if (data->state.cancelled) {
|
||||
/* We just suck everything into a black hole */
|
||||
return CURLE_OK;
|
||||
}
|
||||
|
||||
if(0 == len)
|
||||
len = strlen(ptr);
|
||||
|
||||
if(type & CLIENTWRITE_BODY) {
|
||||
if((conn->protocol&PROT_FTP) && conn->proto.ftp->transfertype == 'A') {
|
||||
if((conn->protocol&PROT_FTP) && conn->proto.ftpc.transfertype == 'A') {
|
||||
#ifdef CURL_DOES_CONVERSIONS
|
||||
/* convert from the network encoding */
|
||||
size_t rc;
|
||||
@ -431,6 +436,15 @@ CURLcode Curl_client_write(struct connectdata *conn,
|
||||
return CURLE_OK;
|
||||
}
|
||||
|
||||
void Curl_read_rewind(struct connectdata *conn,
|
||||
size_t extraBytesRead)
|
||||
{
|
||||
conn->read_pos -= extraBytesRead;
|
||||
conn->bits.stream_was_rewound = TRUE;
|
||||
}
|
||||
|
||||
#define MIN(a,b) (a < b ? a : b)
|
||||
|
||||
/*
|
||||
* Internal read-from-socket function. This is meant to deal with plain
|
||||
* sockets, SSL sockets and kerberos sockets.
|
||||
@ -445,6 +459,8 @@ int Curl_read(struct connectdata *conn, /* connection data */
|
||||
ssize_t *n) /* amount bytes read */
|
||||
{
|
||||
ssize_t nread;
|
||||
size_t bytestocopy = MIN(conn->buf_len - conn->read_pos, buffersize);
|
||||
size_t bytesremaining = buffersize - bytestocopy;
|
||||
|
||||
/* Set 'num' to 0 or 1, depending on which socket that has been sent here.
|
||||
If it is the second socket, we set num to 1. Otherwise to 0. This lets
|
||||
@ -453,20 +469,34 @@ int Curl_read(struct connectdata *conn, /* connection data */
|
||||
|
||||
*n=0; /* reset amount to zero */
|
||||
|
||||
if(conn->ssl[num].use) {
|
||||
nread = Curl_ssl_recv(conn, num, buf, buffersize);
|
||||
bytesremaining = MIN(bytesremaining, sizeof(conn->master_buffer));
|
||||
|
||||
if(nread == -1)
|
||||
/* Copy from our master buffer first */
|
||||
memcpy(buf, conn->master_buffer + conn->read_pos, bytestocopy);
|
||||
conn->read_pos += bytestocopy;
|
||||
|
||||
conn->bits.stream_was_rewound = FALSE;
|
||||
|
||||
*n = bytestocopy;
|
||||
|
||||
if (bytesremaining == 0) {
|
||||
return CURLE_OK;
|
||||
}
|
||||
|
||||
if(conn->ssl[num].use) {
|
||||
nread = Curl_ssl_recv(conn, num, conn->master_buffer, bytesremaining);
|
||||
|
||||
if(nread == -1 && bytestocopy == 0) {
|
||||
return -1; /* -1 from Curl_ssl_recv() means EWOULDBLOCK */
|
||||
}
|
||||
else {
|
||||
*n=0; /* reset amount to zero */
|
||||
if(conn->sec_complete)
|
||||
nread = Curl_sec_read(conn, sockfd, buf, buffersize);
|
||||
else
|
||||
nread = sread(sockfd, buf, buffersize);
|
||||
|
||||
if(-1 == nread) {
|
||||
} else {
|
||||
if(conn->sec_complete)
|
||||
nread = Curl_sec_read(conn, sockfd, conn->master_buffer, bytesremaining);
|
||||
else
|
||||
nread = sread(sockfd, conn->master_buffer, bytesremaining);
|
||||
|
||||
if(-1 == nread && bytestocopy == 0) {
|
||||
int err = Curl_sockerrno();
|
||||
#ifdef WIN32
|
||||
if(WSAEWOULDBLOCK == err)
|
||||
@ -476,7 +506,15 @@ int Curl_read(struct connectdata *conn, /* connection data */
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
*n = nread;
|
||||
|
||||
if (nread > 0) {
|
||||
memcpy(buf, conn->master_buffer, nread);
|
||||
|
||||
conn->buf_len = nread;
|
||||
conn->read_pos = nread;
|
||||
*n += nread;
|
||||
}
|
||||
|
||||
return CURLE_OK;
|
||||
}
|
||||
|
||||
|
@ -50,6 +50,9 @@ void Curl_failf(struct SessionHandle *, const char *fmt, ...);
|
||||
CURLcode Curl_client_write(struct connectdata *conn, int type, char *ptr,
|
||||
size_t len);
|
||||
|
||||
void Curl_read_rewind(struct connectdata *conn,
|
||||
size_t extraBytesRead);
|
||||
|
||||
/* internal read-function, does plain socket, SSL and krb4 */
|
||||
int Curl_read(struct connectdata *conn, curl_socket_t sockfd,
|
||||
char *buf, size_t buffersize,
|
||||
|
32
lib/telnet.c
32
lib/telnet.c
@ -219,7 +219,7 @@ CURLcode init_telnet(struct connectdata *conn)
|
||||
if(!tn)
|
||||
return CURLE_OUT_OF_MEMORY;
|
||||
|
||||
conn->proto.telnet = (void *)tn; /* make us known */
|
||||
conn->data->reqdata.proto.telnet = (void *)tn; /* make us known */
|
||||
|
||||
tn->telrcv_state = CURL_TS_DATA;
|
||||
|
||||
@ -238,7 +238,7 @@ CURLcode init_telnet(struct connectdata *conn)
|
||||
static void negotiate(struct connectdata *conn)
|
||||
{
|
||||
int i;
|
||||
struct TELNET *tn = (struct TELNET *)conn->proto.telnet;
|
||||
struct TELNET *tn = (struct TELNET *) conn->data->reqdata.proto.telnet;
|
||||
|
||||
for(i = 0;i < CURL_NTELOPTS;i++)
|
||||
{
|
||||
@ -312,7 +312,7 @@ static void send_negotiation(struct connectdata *conn, int cmd, int option)
|
||||
static
|
||||
void set_remote_option(struct connectdata *conn, int option, int newstate)
|
||||
{
|
||||
struct TELNET *tn = (struct TELNET *)conn->proto.telnet;
|
||||
struct TELNET *tn = (struct TELNET *)conn->data->reqdata.proto.telnet;
|
||||
if(newstate == CURL_YES)
|
||||
{
|
||||
switch(tn->him[option])
|
||||
@ -394,7 +394,7 @@ void set_remote_option(struct connectdata *conn, int option, int newstate)
|
||||
static
|
||||
void rec_will(struct connectdata *conn, int option)
|
||||
{
|
||||
struct TELNET *tn = (struct TELNET *)conn->proto.telnet;
|
||||
struct TELNET *tn = (struct TELNET *)conn->data->reqdata.proto.telnet;
|
||||
switch(tn->him[option])
|
||||
{
|
||||
case CURL_NO:
|
||||
@ -447,7 +447,7 @@ void rec_will(struct connectdata *conn, int option)
|
||||
static
|
||||
void rec_wont(struct connectdata *conn, int option)
|
||||
{
|
||||
struct TELNET *tn = (struct TELNET *)conn->proto.telnet;
|
||||
struct TELNET *tn = (struct TELNET *)conn->data->reqdata.proto.telnet;
|
||||
switch(tn->him[option])
|
||||
{
|
||||
case CURL_NO:
|
||||
@ -492,7 +492,7 @@ void rec_wont(struct connectdata *conn, int option)
|
||||
static void
|
||||
set_local_option(struct connectdata *conn, int option, int newstate)
|
||||
{
|
||||
struct TELNET *tn = (struct TELNET *)conn->proto.telnet;
|
||||
struct TELNET *tn = (struct TELNET *)conn->data->reqdata.proto.telnet;
|
||||
if(newstate == CURL_YES)
|
||||
{
|
||||
switch(tn->us[option])
|
||||
@ -574,7 +574,7 @@ set_local_option(struct connectdata *conn, int option, int newstate)
|
||||
static
|
||||
void rec_do(struct connectdata *conn, int option)
|
||||
{
|
||||
struct TELNET *tn = (struct TELNET *)conn->proto.telnet;
|
||||
struct TELNET *tn = (struct TELNET *)conn->data->reqdata.proto.telnet;
|
||||
switch(tn->us[option])
|
||||
{
|
||||
case CURL_NO:
|
||||
@ -627,7 +627,7 @@ void rec_do(struct connectdata *conn, int option)
|
||||
static
|
||||
void rec_dont(struct connectdata *conn, int option)
|
||||
{
|
||||
struct TELNET *tn = (struct TELNET *)conn->proto.telnet;
|
||||
struct TELNET *tn = (struct TELNET *)conn->data->reqdata.proto.telnet;
|
||||
switch(tn->us[option])
|
||||
{
|
||||
case CURL_NO:
|
||||
@ -789,7 +789,7 @@ static CURLcode check_telnet_options(struct connectdata *conn)
|
||||
char option_arg[256];
|
||||
char *buf;
|
||||
struct SessionHandle *data = conn->data;
|
||||
struct TELNET *tn = (struct TELNET *)conn->proto.telnet;
|
||||
struct TELNET *tn = (struct TELNET *)conn->data->reqdata.proto.telnet;
|
||||
|
||||
/* Add the user name as an environment variable if it
|
||||
was given on the command line */
|
||||
@ -860,7 +860,7 @@ static void suboption(struct connectdata *conn)
|
||||
char varname[128];
|
||||
char varval[128];
|
||||
struct SessionHandle *data = conn->data;
|
||||
struct TELNET *tn = (struct TELNET *)conn->proto.telnet;
|
||||
struct TELNET *tn = (struct TELNET *)data->reqdata.proto.telnet;
|
||||
|
||||
printsub(data, '<', (unsigned char *)tn->subbuffer, CURL_SB_LEN(tn)+2);
|
||||
switch (CURL_SB_GET(tn)) {
|
||||
@ -927,7 +927,7 @@ void telrcv(struct connectdata *conn,
|
||||
unsigned char c;
|
||||
int in = 0;
|
||||
struct SessionHandle *data = conn->data;
|
||||
struct TELNET *tn = (struct TELNET *)conn->proto.telnet;
|
||||
struct TELNET *tn = (struct TELNET *)data->reqdata.proto.telnet;
|
||||
|
||||
while(count--)
|
||||
{
|
||||
@ -1074,13 +1074,13 @@ void telrcv(struct connectdata *conn,
|
||||
|
||||
CURLcode Curl_telnet_done(struct connectdata *conn, CURLcode status)
|
||||
{
|
||||
struct TELNET *tn = (struct TELNET *)conn->proto.telnet;
|
||||
struct TELNET *tn = (struct TELNET *)conn->data->reqdata.proto.telnet;
|
||||
(void)status; /* unused */
|
||||
|
||||
curl_slist_free_all(tn->telnet_vars);
|
||||
|
||||
free(conn->proto.telnet);
|
||||
conn->proto.telnet = NULL;
|
||||
free(conn->data->reqdata.proto.telnet);
|
||||
conn->data->reqdata.proto.telnet = NULL;
|
||||
|
||||
return CURLE_OK;
|
||||
}
|
||||
@ -1119,7 +1119,7 @@ CURLcode Curl_telnet(struct connectdata *conn, bool *done)
|
||||
if(code)
|
||||
return code;
|
||||
|
||||
tn = (struct TELNET *)conn->proto.telnet;
|
||||
tn = (struct TELNET *)data->reqdata.proto.telnet;
|
||||
|
||||
code = check_telnet_options(conn);
|
||||
if(code)
|
||||
@ -1394,7 +1394,7 @@ CURLcode Curl_telnet(struct connectdata *conn, bool *done)
|
||||
}
|
||||
#endif
|
||||
/* mark this as "no further transfer wanted" */
|
||||
Curl_Transfer(conn, -1, -1, FALSE, NULL, -1, NULL);
|
||||
Curl_setup_transfer(conn, -1, -1, FALSE, NULL, -1, NULL);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
14
lib/tftp.c
14
lib/tftp.c
@ -258,7 +258,7 @@ static CURLcode tftp_send_first(tftp_state_data_t *state, tftp_event_t event)
|
||||
|
||||
/* As RFC3617 describes the separator slash is not actually part of the file
|
||||
name so we skip the always-present first letter of the path string. */
|
||||
char *filename = &state->conn->path[1];
|
||||
char *filename = &state->conn->data->reqdata.path[1];
|
||||
struct SessionHandle *data = state->conn->data;
|
||||
CURLcode res = CURLE_OK;
|
||||
|
||||
@ -282,7 +282,7 @@ static CURLcode tftp_send_first(tftp_state_data_t *state, tftp_event_t event)
|
||||
/* If we are uploading, send an WRQ */
|
||||
setpacketevent(&state->spacket, TFTP_EVENT_WRQ);
|
||||
filename = curl_easy_unescape(data, filename, 0, NULL);
|
||||
state->conn->upload_fromhere = (char *)&state->spacket.data[4];
|
||||
state->conn->data->reqdata.upload_fromhere = (char *)&state->spacket.data[4];
|
||||
if(data->set.infilesize != -1)
|
||||
Curl_pgrsSetUploadSize(data, data->set.infilesize);
|
||||
}
|
||||
@ -569,7 +569,7 @@ CURLcode Curl_tftp_connect(struct connectdata *conn, bool *done)
|
||||
tftp_state_data_t *state;
|
||||
int rc;
|
||||
|
||||
state = conn->proto.tftp = calloc(sizeof(tftp_state_data_t), 1);
|
||||
state = conn->data->reqdata.proto.tftp = calloc(sizeof(tftp_state_data_t), 1);
|
||||
if(!state)
|
||||
return CURLE_OUT_OF_MEMORY;
|
||||
|
||||
@ -620,8 +620,8 @@ CURLcode Curl_tftp_done(struct connectdata *conn, CURLcode status)
|
||||
{
|
||||
(void)status; /* unused */
|
||||
|
||||
free(conn->proto.tftp);
|
||||
conn->proto.tftp = NULL;
|
||||
free(conn->data->reqdata.proto.tftp);
|
||||
conn->data->reqdata.proto.tftp = NULL;
|
||||
Curl_pgrsDone(conn);
|
||||
|
||||
return CURLE_OK;
|
||||
@ -641,7 +641,7 @@ CURLcode Curl_tftp_done(struct connectdata *conn, CURLcode status)
|
||||
CURLcode Curl_tftp(struct connectdata *conn, bool *done)
|
||||
{
|
||||
struct SessionHandle *data = conn->data;
|
||||
tftp_state_data_t *state = (tftp_state_data_t *)(conn->proto.tftp);
|
||||
tftp_state_data_t *state = (tftp_state_data_t *)(conn->data->reqdata.proto.tftp);
|
||||
tftp_event_t event;
|
||||
CURLcode code;
|
||||
int rc;
|
||||
@ -742,7 +742,7 @@ CURLcode Curl_tftp(struct connectdata *conn, bool *done)
|
||||
}
|
||||
|
||||
/* Tell curl we're done */
|
||||
code = Curl_Transfer(conn, -1, -1, FALSE, NULL, -1, NULL);
|
||||
code = Curl_setup_transfer(conn, -1, -1, FALSE, NULL, -1, NULL);
|
||||
if(code)
|
||||
return code;
|
||||
|
||||
|
228
lib/transfer.c
228
lib/transfer.c
@ -114,12 +114,6 @@
|
||||
|
||||
#define CURL_TIMEOUT_EXPECT_100 1000 /* counting ms here */
|
||||
|
||||
enum {
|
||||
KEEP_NONE,
|
||||
KEEP_READ,
|
||||
KEEP_WRITE
|
||||
};
|
||||
|
||||
/*
|
||||
* This function will call the read callback to fill our buffer with data
|
||||
* to upload.
|
||||
@ -133,12 +127,12 @@ CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
|
||||
if(conn->bits.upload_chunky) {
|
||||
/* if chunked Transfer-Encoding */
|
||||
buffersize -= (8 + 2 + 2); /* 32bit hex + CRLF + CRLF */
|
||||
conn->upload_fromhere += 10; /* 32bit hex + CRLF */
|
||||
data->reqdata.upload_fromhere += 10; /* 32bit hex + CRLF */
|
||||
}
|
||||
|
||||
/* this function returns a size_t, so we typecast to int to prevent warnings
|
||||
with picky compilers */
|
||||
nread = (int)conn->fread(conn->upload_fromhere, 1,
|
||||
nread = (int)conn->fread(data->reqdata.upload_fromhere, 1,
|
||||
buffersize, conn->fread_in);
|
||||
|
||||
if(nread == CURL_READFUNC_ABORT) {
|
||||
@ -152,18 +146,18 @@ CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
|
||||
int hexlen = snprintf(hexbuffer, sizeof(hexbuffer),
|
||||
"%x\r\n", nread);
|
||||
/* move buffer pointer */
|
||||
conn->upload_fromhere -= hexlen;
|
||||
data->reqdata.upload_fromhere -= hexlen;
|
||||
nread += hexlen;
|
||||
|
||||
/* copy the prefix to the buffer */
|
||||
memcpy(conn->upload_fromhere, hexbuffer, hexlen);
|
||||
memcpy(data->reqdata.upload_fromhere, hexbuffer, hexlen);
|
||||
|
||||
/* always append CRLF to the data */
|
||||
memcpy(conn->upload_fromhere + nread, "\r\n", 2);
|
||||
memcpy(data->reqdata.upload_fromhere + nread, "\r\n", 2);
|
||||
|
||||
if((nread - hexlen) == 0) {
|
||||
/* mark this as done once this chunk is transfered */
|
||||
conn->keep.upload_done = TRUE;
|
||||
data->reqdata.keep.upload_done = TRUE;
|
||||
}
|
||||
|
||||
nread+=2; /* for the added CRLF */
|
||||
@ -174,7 +168,7 @@ CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
|
||||
#ifdef CURL_DOES_CONVERSIONS
|
||||
if(data->set.prefer_ascii) {
|
||||
CURLcode res;
|
||||
res = Curl_convert_to_network(data, conn->upload_fromhere, nread);
|
||||
res = Curl_convert_to_network(data, data->reqdata.upload_fromhere, nread);
|
||||
/* Curl_convert_to_network calls failf if unsuccessful */
|
||||
if(res != CURLE_OK) {
|
||||
return(res);
|
||||
@ -279,8 +273,8 @@ static int data_pending(struct connectdata *conn)
|
||||
CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
bool *done)
|
||||
{
|
||||
struct Curl_transfer_keeper *k = &conn->keep;
|
||||
struct SessionHandle *data = conn->data;
|
||||
struct Curl_transfer_keeper *k = &data->reqdata.keep;
|
||||
CURLcode result;
|
||||
ssize_t nread; /* number of bytes read */
|
||||
int didwhat=0;
|
||||
@ -308,10 +302,12 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
}
|
||||
|
||||
do {
|
||||
/* If we still have reading to do, we check if we have a readable
|
||||
socket. */
|
||||
if((k->keepon & KEEP_READ) && (select_res & CSELECT_IN)) {
|
||||
|
||||
/* We go ahead and do a read if we have a readable socket or if
|
||||
the stream was rewound (in which case we have data in a
|
||||
buffer) */
|
||||
if((k->keepon & KEEP_READ) &&
|
||||
((select_res & CSELECT_IN) || conn->bits.stream_was_rewound)) {
|
||||
/* read */
|
||||
bool is_empty_data = FALSE;
|
||||
|
||||
/* This is where we loop until we have read everything there is to
|
||||
@ -319,9 +315,14 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
do {
|
||||
size_t buffersize = data->set.buffer_size?
|
||||
data->set.buffer_size : BUFSIZE;
|
||||
size_t bytestoread = buffersize;
|
||||
int readrc;
|
||||
|
||||
if (k->size != -1 && !k->header)
|
||||
bytestoread = k->size - k->bytecount;
|
||||
|
||||
/* receive data from the network! */
|
||||
int readrc = Curl_read(conn, conn->sockfd, k->buf, buffersize, &nread);
|
||||
readrc = Curl_read(conn, conn->sockfd, k->buf, bytestoread, &nread);
|
||||
|
||||
/* subzero, this would've blocked */
|
||||
if(0 > readrc)
|
||||
@ -345,12 +346,13 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
is_empty_data = (nread == 0 && k->bodywrites == 0);
|
||||
|
||||
/* NULL terminate, allowing string ops to be used */
|
||||
if (0 < nread || is_empty_data)
|
||||
if (0 < nread || is_empty_data) {
|
||||
k->buf[nread] = 0;
|
||||
|
||||
/* if we receive 0 or less here, the server closed the connection and
|
||||
we bail out from this! */
|
||||
}
|
||||
else if (0 >= nread) {
|
||||
/* if we receive 0 or less here, the server closed the connection
|
||||
and we bail out from this! */
|
||||
|
||||
k->keepon &= ~KEEP_READ;
|
||||
break;
|
||||
}
|
||||
@ -540,12 +542,12 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
return result;
|
||||
|
||||
data->info.header_size += (long)headerlen;
|
||||
conn->headerbytecount += (long)headerlen;
|
||||
k->headerbytecount += (long)headerlen;
|
||||
|
||||
conn->deductheadercount =
|
||||
(100 == k->httpcode)?conn->headerbytecount:0;
|
||||
k->deductheadercount =
|
||||
(100 == k->httpcode)?k->headerbytecount:0;
|
||||
|
||||
if (conn->resume_from &&
|
||||
if (data->reqdata.resume_from &&
|
||||
(data->set.httpreq==HTTPREQ_GET) &&
|
||||
(k->httpcode == 416)) {
|
||||
/* "Requested Range Not Satisfiable" */
|
||||
@ -595,20 +597,20 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
using chunked Transfer-Encoding.
|
||||
*/
|
||||
if(conn->bits.chunk)
|
||||
conn->size=-1;
|
||||
k->size=-1;
|
||||
|
||||
}
|
||||
if(-1 != conn->size) {
|
||||
if(-1 != k->size) {
|
||||
/* We do this operation even if no_body is true, since this
|
||||
data might be retrieved later with curl_easy_getinfo()
|
||||
and its CURLINFO_CONTENT_LENGTH_DOWNLOAD option. */
|
||||
|
||||
Curl_pgrsSetDownloadSize(data, conn->size);
|
||||
conn->maxdownload = conn->size;
|
||||
Curl_pgrsSetDownloadSize(data, k->size);
|
||||
k->maxdownload = k->size;
|
||||
}
|
||||
/* If max download size is *zero* (nothing) we already
|
||||
have nothing and can safely return ok now! */
|
||||
if(0 == conn->maxdownload)
|
||||
if(0 == k->maxdownload)
|
||||
stop_reading = TRUE;
|
||||
|
||||
if(stop_reading) {
|
||||
@ -678,7 +680,7 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
(k->httpcode != 401) &&
|
||||
(k->httpcode != 407)) {
|
||||
|
||||
if (conn->resume_from &&
|
||||
if (data->reqdata.resume_from &&
|
||||
(data->set.httpreq==HTTPREQ_GET) &&
|
||||
(k->httpcode == 416)) {
|
||||
/* "Requested Range Not Satisfiable", just proceed and
|
||||
@ -714,8 +716,8 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
* MUST NOT contain a message-body, and thus is always
|
||||
* terminated by the first empty line after the header
|
||||
* fields. */
|
||||
conn->size=0;
|
||||
conn->maxdownload=0;
|
||||
k->size=0;
|
||||
k->maxdownload=0;
|
||||
k->ignorecl = TRUE; /* ignore Content-Length headers */
|
||||
break;
|
||||
default:
|
||||
@ -742,7 +744,7 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
return CURLE_FILESIZE_EXCEEDED;
|
||||
}
|
||||
if(contentlength >= 0)
|
||||
conn->size = contentlength;
|
||||
k->size = contentlength;
|
||||
else {
|
||||
/* Negative Content-Length is really odd, and we know it
|
||||
happens for example when older Apache servers send large
|
||||
@ -908,7 +910,7 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
|
||||
k->offset = curlx_strtoofft(ptr, NULL, 10);
|
||||
|
||||
if (conn->resume_from == k->offset)
|
||||
if (data->reqdata.resume_from == k->offset)
|
||||
/* we asked for a resume and we got it */
|
||||
k->content_range = TRUE;
|
||||
}
|
||||
@ -923,7 +925,7 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
here, or else use real peer host name. */
|
||||
conn->allocptr.cookiehost?
|
||||
conn->allocptr.cookiehost:conn->host.name,
|
||||
conn->path);
|
||||
data->reqdata.path);
|
||||
Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
|
||||
}
|
||||
#endif
|
||||
@ -971,9 +973,9 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
backup = *ptr; /* store the ending letter */
|
||||
if(ptr != start) {
|
||||
*ptr = '\0'; /* zero terminate */
|
||||
conn->newurl = strdup(start); /* clone string */
|
||||
data->reqdata.newurl = strdup(start); /* clone string */
|
||||
*ptr = backup; /* restore ending letter */
|
||||
if(!conn->newurl)
|
||||
if(!data->reqdata.newurl)
|
||||
return CURLE_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
@ -997,7 +999,7 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
return result;
|
||||
|
||||
data->info.header_size += (long)k->hbuflen;
|
||||
conn->headerbytecount += (long)k->hbuflen;
|
||||
k->headerbytecount += (long)k->hbuflen;
|
||||
|
||||
/* reset hbufp pointer && hbuflen */
|
||||
k->hbufp = data->state.headerbuff;
|
||||
@ -1026,7 +1028,7 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
if(conn->protocol&PROT_HTTP) {
|
||||
/* HTTP-only checks */
|
||||
|
||||
if (conn->newurl) {
|
||||
if (data->reqdata.newurl) {
|
||||
if(conn->bits.close) {
|
||||
/* Abort after the headers if "follow Location" is set
|
||||
and we're set to close anyway. */
|
||||
@ -1040,7 +1042,7 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
k->ignorebody = TRUE;
|
||||
infof(data, "Ignoring the response-body\n");
|
||||
}
|
||||
if (conn->resume_from && !k->content_range &&
|
||||
if (data->reqdata.resume_from && !k->content_range &&
|
||||
(data->set.httpreq==HTTPREQ_GET) &&
|
||||
!k->ignorebody) {
|
||||
/* we wanted to resume a download, although the server doesn't
|
||||
@ -1051,7 +1053,7 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
return CURLE_HTTP_RANGE_ERROR;
|
||||
}
|
||||
|
||||
if(data->set.timecondition && !conn->range) {
|
||||
if(data->set.timecondition && !data->reqdata.range) {
|
||||
/* A time condition has been set AND no ranges have been
|
||||
requested. This seems to be what chapter 13.3.4 of
|
||||
RFC 2616 defines to be the correct action for a
|
||||
@ -1127,9 +1129,17 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
}
|
||||
#endif /* CURL_DISABLE_HTTP */
|
||||
|
||||
if((-1 != conn->maxdownload) &&
|
||||
(k->bytecount + nread >= conn->maxdownload)) {
|
||||
nread = (ssize_t) (conn->maxdownload - k->bytecount);
|
||||
if((-1 != k->maxdownload) &&
|
||||
(k->bytecount + nread >= k->maxdownload)) {
|
||||
size_t excess = k->bytecount + nread - k->maxdownload;
|
||||
|
||||
if (excess > 0) {
|
||||
infof(data, "Rewinding stream by : %d bytes\n", excess);
|
||||
Curl_read_rewind(conn, excess);
|
||||
conn->bits.stream_was_rewound = TRUE;
|
||||
}
|
||||
|
||||
nread = (ssize_t) (k->maxdownload - k->bytecount);
|
||||
if(nread < 0 ) /* this should be unusual */
|
||||
nread = 0;
|
||||
|
||||
@ -1233,9 +1243,9 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
|
||||
/* only read more data if there's no upload data already
|
||||
present in the upload buffer */
|
||||
if(0 == conn->upload_present) {
|
||||
if(0 == data->reqdata.upload_present) {
|
||||
/* init the "upload from here" pointer */
|
||||
conn->upload_fromhere = k->uploadbuf;
|
||||
data->reqdata.upload_fromhere = k->uploadbuf;
|
||||
|
||||
if(!k->upload_done) {
|
||||
/* HTTP pollution, this should be written nicer to become more
|
||||
@ -1243,7 +1253,7 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
int fillcount;
|
||||
|
||||
if(k->wait100_after_headers &&
|
||||
(conn->proto.http->sending == HTTPSEND_BODY)) {
|
||||
(data->reqdata.proto.http->sending == HTTPSEND_BODY)) {
|
||||
/* If this call is to send body data, we must take some action:
|
||||
We have sent off the full HTTP 1.1 request, and we shall now
|
||||
go into the Expect: 100 state and await such a header */
|
||||
@ -1280,7 +1290,7 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
}
|
||||
|
||||
/* store number of bytes available for upload */
|
||||
conn->upload_present = nread;
|
||||
data->reqdata.upload_present = nread;
|
||||
|
||||
/* convert LF to CRLF if so asked */
|
||||
#ifdef CURL_DO_LINEEND_CONV
|
||||
@ -1302,7 +1312,7 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
* must be used instead of the escape sequences \r & \n.
|
||||
*/
|
||||
for(i = 0, si = 0; i < nread; i++, si++) {
|
||||
if (conn->upload_fromhere[i] == 0x0a) {
|
||||
if (data->reqdata.upload_fromhere[i] == 0x0a) {
|
||||
data->state.scratch[si++] = 0x0d;
|
||||
data->state.scratch[si] = 0x0a;
|
||||
if (!data->set.crlf) {
|
||||
@ -1312,7 +1322,7 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
}
|
||||
}
|
||||
else
|
||||
data->state.scratch[si] = conn->upload_fromhere[i];
|
||||
data->state.scratch[si] = data->reqdata.upload_fromhere[i];
|
||||
}
|
||||
if(si != nread) {
|
||||
/* only perform the special operation if we really did replace
|
||||
@ -1320,10 +1330,10 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
nread = si;
|
||||
|
||||
/* upload from the new (replaced) buffer instead */
|
||||
conn->upload_fromhere = data->state.scratch;
|
||||
data->reqdata.upload_fromhere = data->state.scratch;
|
||||
|
||||
/* set the new amount too */
|
||||
conn->upload_present = nread;
|
||||
data->reqdata.upload_present = nread;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1335,33 +1345,33 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
/* write to socket (send away data) */
|
||||
result = Curl_write(conn,
|
||||
conn->writesockfd, /* socket to send to */
|
||||
conn->upload_fromhere, /* buffer pointer */
|
||||
conn->upload_present, /* buffer size */
|
||||
data->reqdata.upload_fromhere, /* buffer pointer */
|
||||
data->reqdata.upload_present, /* buffer size */
|
||||
&bytes_written); /* actually send away */
|
||||
if(result)
|
||||
return result;
|
||||
|
||||
if(data->set.verbose)
|
||||
/* show the data before we change the pointer upload_fromhere */
|
||||
Curl_debug(data, CURLINFO_DATA_OUT, conn->upload_fromhere,
|
||||
Curl_debug(data, CURLINFO_DATA_OUT, data->reqdata.upload_fromhere,
|
||||
bytes_written, conn);
|
||||
|
||||
if(conn->upload_present != bytes_written) {
|
||||
if(data->reqdata.upload_present != bytes_written) {
|
||||
/* we only wrote a part of the buffer (if anything), deal with it! */
|
||||
|
||||
/* store the amount of bytes left in the buffer to write */
|
||||
conn->upload_present -= bytes_written;
|
||||
data->reqdata.upload_present -= bytes_written;
|
||||
|
||||
/* advance the pointer where to find the buffer when the next send
|
||||
is to happen */
|
||||
conn->upload_fromhere += bytes_written;
|
||||
data->reqdata.upload_fromhere += bytes_written;
|
||||
|
||||
writedone = TRUE; /* we are done, stop the loop */
|
||||
}
|
||||
else {
|
||||
/* we've uploaded that buffer now */
|
||||
conn->upload_fromhere = k->uploadbuf;
|
||||
conn->upload_present = 0; /* no more bytes left */
|
||||
data->reqdata.upload_fromhere = k->uploadbuf;
|
||||
data->reqdata.upload_present = 0; /* no more bytes left */
|
||||
|
||||
if(k->upload_done) {
|
||||
/* switch off writing, we're done! */
|
||||
@ -1382,10 +1392,10 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
k->now = Curl_tvnow();
|
||||
if(didwhat) {
|
||||
/* Update read/write counters */
|
||||
if(conn->bytecountp)
|
||||
*conn->bytecountp = k->bytecount; /* read count */
|
||||
if(conn->writebytecountp)
|
||||
*conn->writebytecountp = k->writebytecount; /* write count */
|
||||
if(k->bytecountp)
|
||||
*k->bytecountp = k->bytecount; /* read count */
|
||||
if(k->writebytecountp)
|
||||
*k->writebytecountp = k->writebytecount; /* write count */
|
||||
}
|
||||
else {
|
||||
/* no read no write, this is a timeout? */
|
||||
@ -1421,10 +1431,10 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
|
||||
if (data->set.timeout &&
|
||||
((Curl_tvdiff(k->now, k->start)/1000) >= data->set.timeout)) {
|
||||
if (conn->size != -1) {
|
||||
if (k->size != -1) {
|
||||
failf(data, "Operation timed out after %d seconds with %"
|
||||
FORMAT_OFF_T " out of %" FORMAT_OFF_T " bytes received",
|
||||
data->set.timeout, k->bytecount, conn->size);
|
||||
data->set.timeout, k->bytecount, k->size);
|
||||
} else {
|
||||
failf(data, "Operation timed out after %d seconds with %"
|
||||
FORMAT_OFF_T " bytes received",
|
||||
@ -1439,24 +1449,24 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
* returning.
|
||||
*/
|
||||
|
||||
if(!(conn->bits.no_body) && (conn->size != -1) &&
|
||||
(k->bytecount != conn->size) &&
|
||||
if(!(conn->bits.no_body) && (k->size != -1) &&
|
||||
(k->bytecount != k->size) &&
|
||||
#ifdef CURL_DO_LINEEND_CONV
|
||||
/* Most FTP servers don't adjust their file SIZE response for CRLFs,
|
||||
so we'll check to see if the discrepancy can be explained
|
||||
by the number of CRLFs we've changed to LFs.
|
||||
*/
|
||||
(k->bytecount != (conn->size + data->state.crlf_conversions)) &&
|
||||
(k->bytecount != (k->size + data->state.crlf_conversions)) &&
|
||||
#endif /* CURL_DO_LINEEND_CONV */
|
||||
!conn->newurl) {
|
||||
!data->reqdata.newurl) {
|
||||
failf(data, "transfer closed with %" FORMAT_OFF_T
|
||||
" bytes remaining to read",
|
||||
conn->size - k->bytecount);
|
||||
k->size - k->bytecount);
|
||||
return CURLE_PARTIAL_FILE;
|
||||
}
|
||||
else if(!(conn->bits.no_body) &&
|
||||
conn->bits.chunk &&
|
||||
(conn->proto.http->chunk.state != CHUNK_STOP)) {
|
||||
(data->reqdata.proto.http->chunk.state != CHUNK_STOP)) {
|
||||
/*
|
||||
* In chunked mode, return an error if the connection is closed prior to
|
||||
* the empty (terminiating) chunk is read.
|
||||
@ -1481,13 +1491,14 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
|
||||
|
||||
/*
|
||||
* Curl_readwrite_init() inits the readwrite session.
|
||||
* Curl_readwrite_init() inits the readwrite session. This is inited each time for a
|
||||
* transfer, sometimes multiple times on the same SessionHandle
|
||||
*/
|
||||
|
||||
CURLcode Curl_readwrite_init(struct connectdata *conn)
|
||||
{
|
||||
struct SessionHandle *data = conn->data;
|
||||
struct Curl_transfer_keeper *k = &conn->keep;
|
||||
struct Curl_transfer_keeper *k = &data->reqdata.keep;
|
||||
|
||||
/* NB: the content encoding software depends on this initialization of
|
||||
Curl_transfer_keeper.*/
|
||||
@ -1498,7 +1509,14 @@ CURLcode Curl_readwrite_init(struct connectdata *conn)
|
||||
k->header = TRUE; /* assume header */
|
||||
k->httpversion = -1; /* unknown at this point */
|
||||
|
||||
data = conn->data; /* there's the root struct */
|
||||
k->size = data->reqdata.size;
|
||||
k->maxdownload = data->reqdata.maxdownload;
|
||||
k->bytecountp = data->reqdata.bytecountp;
|
||||
k->writebytecountp = data->reqdata.writebytecountp;
|
||||
|
||||
k->bytecount = 0;
|
||||
k->headerbytecount = 0;
|
||||
|
||||
k->buf = data->state.buffer;
|
||||
k->uploadbuf = data->state.uploadbuffer;
|
||||
k->maxfd = (conn->sockfd>conn->writesockfd?
|
||||
@ -1514,8 +1532,8 @@ CURLcode Curl_readwrite_init(struct connectdata *conn)
|
||||
|
||||
if (!conn->bits.getheader) {
|
||||
k->header = FALSE;
|
||||
if(conn->size > 0)
|
||||
Curl_pgrsSetDownloadSize(data, conn->size);
|
||||
if(k->size > 0)
|
||||
Curl_pgrsSetDownloadSize(data, k->size);
|
||||
}
|
||||
/* we want header and/or body, if neither then don't do this! */
|
||||
if(conn->bits.getheader || !conn->bits.no_body) {
|
||||
@ -1535,7 +1553,7 @@ CURLcode Curl_readwrite_init(struct connectdata *conn)
|
||||
state info where we wait for the 100-return code
|
||||
*/
|
||||
if (data->state.expect100header &&
|
||||
(conn->proto.http->sending == HTTPSEND_BODY)) {
|
||||
(data->reqdata.proto.http->sending == HTTPSEND_BODY)) {
|
||||
/* wait with write until we either got 100-continue or a timeout */
|
||||
k->write_after_100_header = TRUE;
|
||||
k->start100 = k->start;
|
||||
@ -1565,6 +1583,7 @@ int Curl_single_getsock(struct connectdata *conn,
|
||||
of sockets */
|
||||
int numsocks)
|
||||
{
|
||||
struct SessionHandle *data = conn->data;
|
||||
int bitmap = GETSOCK_BLANK;
|
||||
int index = 0;
|
||||
|
||||
@ -1572,17 +1591,18 @@ int Curl_single_getsock(struct connectdata *conn,
|
||||
/* simple check but we might need two slots */
|
||||
return GETSOCK_BLANK;
|
||||
|
||||
if(conn->keep.keepon & KEEP_READ) {
|
||||
if(data->reqdata.keep.keepon & KEEP_READ) {
|
||||
bitmap |= GETSOCK_READSOCK(index);
|
||||
sock[index] = conn->sockfd;
|
||||
}
|
||||
if(conn->keep.keepon & KEEP_WRITE) {
|
||||
|
||||
if(data->reqdata.keep.keepon & KEEP_WRITE) {
|
||||
|
||||
if((conn->sockfd != conn->writesockfd) ||
|
||||
!(conn->keep.keepon & KEEP_READ)) {
|
||||
!(data->reqdata.keep.keepon & KEEP_READ)) {
|
||||
/* only if they are not the same socket or we didn't have a readable
|
||||
one, we increase index */
|
||||
if(conn->keep.keepon & KEEP_READ)
|
||||
if(data->reqdata.keep.keepon & KEEP_READ)
|
||||
index++; /* increase index if we need two entries */
|
||||
sock[index] = conn->writesockfd;
|
||||
}
|
||||
@ -1599,7 +1619,7 @@ int Curl_single_getsock(struct connectdata *conn,
|
||||
*
|
||||
* This function is what performs the actual transfer. It is capable of
|
||||
* doing both ways simultaneously.
|
||||
* The transfer must already have been setup by a call to Curl_Transfer().
|
||||
* The transfer must already have been setup by a call to Curl_setup_transfer().
|
||||
*
|
||||
* Note that headers are created in a preallocated buffer of a default size.
|
||||
* That buffer can be enlarged on demand, but it is never shrunken again.
|
||||
@ -1612,7 +1632,8 @@ static CURLcode
|
||||
Transfer(struct connectdata *conn)
|
||||
{
|
||||
CURLcode result;
|
||||
struct Curl_transfer_keeper *k = &conn->keep;
|
||||
struct SessionHandle *data = conn->data;
|
||||
struct Curl_transfer_keeper *k = &data->reqdata.keep;
|
||||
bool done=FALSE;
|
||||
|
||||
if(!(conn->protocol & PROT_FILE))
|
||||
@ -2171,8 +2192,10 @@ bool Curl_retry_request(struct connectdata *conn,
|
||||
char **url)
|
||||
{
|
||||
bool retry = FALSE;
|
||||
struct SessionHandle *data = conn->data;
|
||||
struct Curl_transfer_keeper *k = &data->reqdata.keep;
|
||||
|
||||
if((conn->keep.bytecount+conn->headerbytecount == 0) &&
|
||||
if((data->reqdata.keep.bytecount+k->headerbytecount == 0) &&
|
||||
conn->bits.reuse &&
|
||||
!conn->bits.no_body) {
|
||||
/* We got no data, we attempted to re-use a connection and yet we want a
|
||||
@ -2251,7 +2274,7 @@ CURLcode Curl_perform(struct SessionHandle *data)
|
||||
* We must duplicate the new URL here as the connection data may
|
||||
* be free()ed in the Curl_done() function.
|
||||
*/
|
||||
newurl = conn->newurl?strdup(conn->newurl):NULL;
|
||||
newurl = data->reqdata.newurl?strdup(data->reqdata.newurl):NULL;
|
||||
}
|
||||
else {
|
||||
/* The transfer phase returned error, we mark the connection to get
|
||||
@ -2321,11 +2344,12 @@ CURLcode Curl_perform(struct SessionHandle *data)
|
||||
}
|
||||
|
||||
/*
|
||||
* Curl_Transfer() is called to setup some basic properties for the upcoming
|
||||
* Curl_setup_transfer() is called to setup some basic properties for the upcoming
|
||||
* transfer.
|
||||
*/
|
||||
CURLcode
|
||||
Curl_Transfer(struct connectdata *c_conn, /* connection data */
|
||||
Curl_setup_transfer(
|
||||
struct connectdata *c_conn, /* connection data */
|
||||
int sockindex, /* socket index to read from or -1 */
|
||||
curl_off_t size, /* -1 if unknown at this point */
|
||||
bool getheader, /* TRUE if header parsing is wanted */
|
||||
@ -2338,6 +2362,8 @@ Curl_Transfer(struct connectdata *c_conn, /* connection data */
|
||||
)
|
||||
{
|
||||
struct connectdata *conn = (struct connectdata *)c_conn;
|
||||
struct SessionHandle *data = conn->data;
|
||||
|
||||
if(!conn)
|
||||
return CURLE_BAD_FUNCTION_ARGUMENT;
|
||||
|
||||
@ -2346,15 +2372,15 @@ Curl_Transfer(struct connectdata *c_conn, /* connection data */
|
||||
/* now copy all input parameters */
|
||||
conn->sockfd = sockindex == -1 ?
|
||||
CURL_SOCKET_BAD : conn->sock[sockindex];
|
||||
conn->size = size;
|
||||
conn->bits.getheader = getheader;
|
||||
conn->bytecountp = bytecountp;
|
||||
conn->writesockfd = writesockindex == -1 ?
|
||||
CURL_SOCKET_BAD:conn->sock[writesockindex];
|
||||
conn->writebytecountp = writecountp;
|
||||
conn->bits.getheader = getheader;
|
||||
|
||||
data->reqdata.size = size;
|
||||
data->reqdata.bytecountp = bytecountp;
|
||||
data->reqdata.writebytecountp = writecountp;
|
||||
|
||||
return CURLE_OK;
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2368,6 +2394,8 @@ CURLcode Curl_second_connect(struct connectdata *conn)
|
||||
struct connectdata *sec_conn = NULL; /* secondary connection */
|
||||
bool backup_reuse_fresh = data->set.reuse_fresh;
|
||||
char *backup_userpwd = data->set.userpwd;
|
||||
char *backup_path = data->reqdata.path;
|
||||
char *backup_pathbuffer = data->reqdata.pathbuffer;
|
||||
|
||||
if(data->change.url_alloc)
|
||||
free(data->change.url);
|
||||
@ -2398,5 +2426,13 @@ CURLcode Curl_second_connect(struct connectdata *conn)
|
||||
data->set.reuse_fresh = backup_reuse_fresh;
|
||||
data->set.userpwd = backup_userpwd;
|
||||
|
||||
/* Copy the source path into a separate place */
|
||||
sec_conn->sec_path = data->reqdata.path;
|
||||
sec_conn->sec_pathbuffer = data->reqdata.pathbuffer;
|
||||
|
||||
/* Restore the original */
|
||||
data->reqdata.path = backup_path;
|
||||
data->reqdata.pathbuffer = backup_pathbuffer;
|
||||
|
||||
return status;
|
||||
}
|
||||
|
@ -35,9 +35,10 @@ CURLcode Curl_readwrite_init(struct connectdata *conn);
|
||||
CURLcode Curl_readrewind(struct connectdata *conn);
|
||||
CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp);
|
||||
bool Curl_retry_request(struct connectdata *conn, char **url);
|
||||
|
||||
/* This sets up a forthcoming transfer */
|
||||
CURLcode
|
||||
Curl_Transfer (struct connectdata *data,
|
||||
Curl_setup_transfer (struct connectdata *data,
|
||||
int sockindex, /* socket index to read from or -1 */
|
||||
curl_off_t size, /* -1 if unknown at this point */
|
||||
bool getheader, /* TRUE if header parsing is wanted */
|
||||
|
16
lib/url.h
16
lib/url.h
@ -7,7 +7,7 @@
|
||||
* | (__| |_| | _ <| |___
|
||||
* \___|\___/|_| \_\_____|
|
||||
*
|
||||
* Copyright (C) 1998 - 2005, Daniel Stenberg, <daniel@haxx.se>, et al.
|
||||
* Copyright (C) 1998 - 2006, Daniel Stenberg, <daniel@haxx.se>, et al.
|
||||
*
|
||||
* This software is licensed as described in the file COPYING, which
|
||||
* you should have received as part of this distribution. The terms
|
||||
@ -46,6 +46,10 @@ CURLcode Curl_protocol_connecting(struct connectdata *conn, bool *done);
|
||||
CURLcode Curl_protocol_doing(struct connectdata *conn, bool *done);
|
||||
void Curl_safefree(void *ptr);
|
||||
|
||||
/* create a connection cache */
|
||||
struct conncache *Curl_mk_connc(int type);
|
||||
/* free a connection cache */
|
||||
void Curl_rm_connc(struct conncache *c);
|
||||
|
||||
int Curl_protocol_getsock(struct connectdata *conn,
|
||||
curl_socket_t *socks,
|
||||
@ -54,6 +58,16 @@ int Curl_doing_getsock(struct connectdata *conn,
|
||||
curl_socket_t *socks,
|
||||
int numsocks);
|
||||
|
||||
void Curl_addHandleToPipeline(struct SessionHandle *handle,
|
||||
struct curl_llist *pipe);
|
||||
void Curl_removeHandleFromPipeline(struct SessionHandle *handle,
|
||||
struct curl_llist *pipe);
|
||||
bool Curl_isHandleAtHead(struct SessionHandle *handle,
|
||||
struct curl_llist *pipe);
|
||||
void Curl_signalPipeClose(struct curl_llist *pipe);
|
||||
|
||||
void Curl_close_connections(struct SessionHandle *data);
|
||||
|
||||
#if 0
|
||||
CURLcode Curl_protocol_fdset(struct connectdata *conn,
|
||||
fd_set *read_fd_set,
|
||||
|
234
lib/urldata.h
234
lib/urldata.h
@ -333,25 +333,33 @@ typedef enum {
|
||||
FTPFILE_SINGLECWD = 3 /* make one CWD, then SIZE / RETR / STOR on the file */
|
||||
} curl_ftpfile;
|
||||
|
||||
/* This FTP struct is used in the SessionHandle. All FTP data that is
|
||||
connection-oriented must be in FTP_conn to properly deal with the fact that
|
||||
perhaps the SessionHandle is changed between the times the connection is
|
||||
used. */
|
||||
struct FTP {
|
||||
curl_off_t *bytecountp;
|
||||
char *user; /* user name string */
|
||||
char *passwd; /* password string */
|
||||
char *urlpath; /* the originally given path part of the URL */
|
||||
char *file; /* decoded file */
|
||||
bool no_transfer; /* nothing was transfered, (possibly because a resumed
|
||||
transfer already was complete) */
|
||||
curl_off_t downloadsize;
|
||||
};
|
||||
|
||||
/* ftp_conn is used for striuct connection-oriented data in the connectdata
|
||||
struct */
|
||||
struct ftp_conn {
|
||||
char *entrypath; /* the PWD reply when we logged on */
|
||||
char **dirs; /* realloc()ed array for path components */
|
||||
int dirdepth; /* number of entries used in the 'dirs' array */
|
||||
int diralloc; /* number of entries allocated for the 'dirs' array */
|
||||
char *file; /* decoded file */
|
||||
|
||||
char *entrypath; /* the PWD reply when we logged on */
|
||||
|
||||
char *cache; /* data cache between getresponse()-calls */
|
||||
curl_off_t cache_size; /* size of cache in bytes */
|
||||
bool dont_check; /* Set to TRUE to prevent the final (post-transfer)
|
||||
file size and 226/250 status check. It should still
|
||||
read the line, just ignore the result. */
|
||||
bool no_transfer; /* nothing was transfered, (possibly because a resumed
|
||||
transfer already was complete) */
|
||||
long response_time; /* When no timeout is given, this is the amount of
|
||||
seconds we await for an FTP response. Initialized
|
||||
in Curl_ftp_connect() */
|
||||
@ -365,7 +373,6 @@ struct FTP {
|
||||
char *prevpath; /* conn->path from the previous transfer */
|
||||
char transfertype; /* set by ftp_transfertype for use by Curl_client_write()a
|
||||
and others (A/I or zero) */
|
||||
|
||||
size_t nread_resp; /* number of bytes currently read of a server response */
|
||||
char *linestart_resp; /* line start pointer for the FTP server response
|
||||
reader function */
|
||||
@ -380,7 +387,6 @@ struct FTP {
|
||||
struct timeval response; /* set to Curl_tvnow() when a command has been sent
|
||||
off, used to time-out response reading */
|
||||
ftpstate state; /* always use ftp.c:state() to change state! */
|
||||
curl_off_t downloadsize;
|
||||
};
|
||||
|
||||
/****************************************************************************
|
||||
@ -406,8 +412,6 @@ struct ConnectBits {
|
||||
bool ipv6_ip; /* we communicate with a remote site specified with pure IPv6
|
||||
IP address */
|
||||
bool ipv6; /* we communicate with a site using an IPv6 address */
|
||||
bool use_range;
|
||||
bool rangestringalloc; /* the range string is malloc()'ed */
|
||||
|
||||
bool do_more; /* this is set TRUE if the ->curl_do_more() function is
|
||||
supposed to be called, after ->curl_do() */
|
||||
@ -456,6 +460,8 @@ struct ConnectBits {
|
||||
when Curl_done() is called, to prevent Curl_done() to
|
||||
get invoked twice when the multi interface is
|
||||
used. */
|
||||
bool stream_was_rewound; /* Indicates that the stream was rewound after a request
|
||||
read past the end of its response byte boundary */
|
||||
};
|
||||
|
||||
struct hostname {
|
||||
@ -465,6 +471,16 @@ struct hostname {
|
||||
char *dispname; /* name to display, as 'name' might be encoded */
|
||||
};
|
||||
|
||||
/*
|
||||
* Flags on the keepon member of the Curl_transfer_keeper
|
||||
*/
|
||||
enum {
|
||||
KEEP_NONE,
|
||||
KEEP_READ,
|
||||
KEEP_WRITE
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* This struct is all the previously local variables from Curl_perform() moved
|
||||
* to struct to allow the function to return and get re-invoked better without
|
||||
@ -472,8 +488,28 @@ struct hostname {
|
||||
*/
|
||||
|
||||
struct Curl_transfer_keeper {
|
||||
|
||||
/** Values copied over from the HandleData struct each time on init **/
|
||||
|
||||
curl_off_t size; /* -1 if unknown at this point */
|
||||
curl_off_t *bytecountp; /* return number of bytes read or NULL */
|
||||
|
||||
curl_off_t maxdownload; /* in bytes, the maximum amount of data to fetch, 0
|
||||
means unlimited */
|
||||
curl_off_t *writebytecountp; /* return number of bytes written or NULL */
|
||||
|
||||
/** End of HandleData struct copies **/
|
||||
|
||||
curl_off_t bytecount; /* total number of bytes read */
|
||||
curl_off_t writebytecount; /* number of bytes written */
|
||||
|
||||
long headerbytecount; /* only count received headers */
|
||||
long deductheadercount; /* this amount of bytes doesn't count when we check
|
||||
if anything has been transfered at the end of
|
||||
a connection. We use this counter to make only
|
||||
a 100 reply (without a following second response
|
||||
code) result in a CURLE_GOT_NOTHING error code */
|
||||
|
||||
struct timeval start; /* transfer started at this time */
|
||||
struct timeval now; /* current time */
|
||||
bool header; /* incoming data has HTTP header */
|
||||
@ -555,18 +591,83 @@ struct Curl_async {
|
||||
typedef CURLcode (*Curl_do_more_func)(struct connectdata *);
|
||||
typedef CURLcode (*Curl_done_func)(struct connectdata *, CURLcode);
|
||||
|
||||
|
||||
/*
|
||||
* Store's request specific data in the easy handle (SessionHandle).
|
||||
* Previously, these members were on the connectdata struct but since
|
||||
* a conn struct may now be shared between different SessionHandles,
|
||||
* we store connection-specifc data here.
|
||||
*
|
||||
*/
|
||||
struct HandleData {
|
||||
char *pathbuffer;/* allocated buffer to store the URL's path part in */
|
||||
char *path; /* path to use, points to somewhere within the pathbuffer
|
||||
area */
|
||||
|
||||
char *newurl; /* This can only be set if a Location: was in the
|
||||
document headers */
|
||||
|
||||
/* This struct is inited when needed */
|
||||
struct Curl_transfer_keeper keep;
|
||||
|
||||
/* 'upload_present' is used to keep a byte counter of how much data there is
|
||||
still left in the buffer, aimed for upload. */
|
||||
ssize_t upload_present;
|
||||
|
||||
/* 'upload_fromhere' is used as a read-pointer when we uploaded parts of a
|
||||
buffer, so the next read should read from where this pointer points to,
|
||||
and the 'upload_present' contains the number of bytes available at this
|
||||
position */
|
||||
char *upload_fromhere;
|
||||
|
||||
curl_off_t size; /* -1 if unknown at this point */
|
||||
curl_off_t *bytecountp; /* return number of bytes read or NULL */
|
||||
|
||||
curl_off_t maxdownload; /* in bytes, the maximum amount of data to fetch, 0
|
||||
means unlimited */
|
||||
curl_off_t *writebytecountp; /* return number of bytes written or NULL */
|
||||
|
||||
bool use_range;
|
||||
bool rangestringalloc; /* the range string is malloc()'ed */
|
||||
|
||||
char *range; /* range, if used. See README for detailed specification on
|
||||
this syntax. */
|
||||
curl_off_t resume_from; /* continue [ftp] transfer from here */
|
||||
|
||||
/* Protocol specific data */
|
||||
|
||||
union {
|
||||
struct HTTP *http;
|
||||
struct HTTP *https; /* alias, just for the sake of being more readable */
|
||||
struct FTP *ftp;
|
||||
void *tftp; /* private for tftp.c-eyes only */
|
||||
struct FILEPROTO *file;
|
||||
void *telnet; /* private for telnet.c-eyes only */
|
||||
void *generic;
|
||||
} proto;
|
||||
};
|
||||
|
||||
/*
|
||||
* The connectdata struct contains all fields and variables that should be
|
||||
* unique for an entire connection.
|
||||
*/
|
||||
struct connectdata {
|
||||
/**** Fields set when inited and not modified again */
|
||||
struct SessionHandle *data; /* link to the root CURL struct */
|
||||
long connectindex; /* what index in the connects index this particular
|
||||
struct has */
|
||||
/* 'data' is the CURRENT SessionHandle using this connection -- take great
|
||||
caution that this might very well vary between different times this
|
||||
connection is used! */
|
||||
struct SessionHandle *data;
|
||||
|
||||
bool inuse; /* This is a marker for the connection cache logic. If this is
|
||||
TRUE this handle is being used by an easy handle and cannot
|
||||
be used by any other easy handle without careful
|
||||
consideration (== only for pipelining). */
|
||||
|
||||
/**** Fields set when inited and not modified again */
|
||||
long connectindex; /* what index in the connection cache connects index this
|
||||
particular struct has */
|
||||
long protocol; /* PROT_* flags concerning the protocol set */
|
||||
#define PROT_MISSING (1<<0)
|
||||
#define PROT_CLOSEACTION (1<<1) /* needs action before socket close */
|
||||
#define PROT_HTTP (1<<2)
|
||||
#define PROT_HTTPS (1<<3)
|
||||
#define PROT_FTP (1<<4)
|
||||
@ -574,9 +675,9 @@ struct connectdata {
|
||||
#define PROT_DICT (1<<6)
|
||||
#define PROT_LDAP (1<<7)
|
||||
#define PROT_FILE (1<<8)
|
||||
#define PROT_TFTP (1<<11)
|
||||
#define PROT_FTPS (1<<9)
|
||||
#define PROT_SSL (1<<10) /* protocol requires SSL */
|
||||
#define PROT_TFTP (1<<11)
|
||||
|
||||
/* 'dns_entry' is the particular host we use. This points to an entry in the
|
||||
DNS cache and it will not get pruned while locked. It gets unlocked in
|
||||
@ -600,23 +701,9 @@ struct connectdata {
|
||||
struct hostname host;
|
||||
struct hostname proxy;
|
||||
|
||||
char *pathbuffer;/* allocated buffer to store the URL's path part in */
|
||||
char *path; /* path to use, points to somewhere within the pathbuffer
|
||||
area */
|
||||
long port; /* which port to use locally */
|
||||
unsigned short remote_port; /* what remote port to connect to,
|
||||
not the proxy port! */
|
||||
curl_off_t bytecount;
|
||||
long headerbytecount; /* only count received headers */
|
||||
long deductheadercount; /* this amount of bytes doesn't count when we check
|
||||
if anything has been transfered at the end of
|
||||
a connection. We use this counter to make only
|
||||
a 100 reply (without a following second response
|
||||
code) result in a CURLE_GOT_NOTHING error code */
|
||||
|
||||
char *range; /* range, if used. See README for detailed specification on
|
||||
this syntax. */
|
||||
curl_off_t resume_from; /* continue [ftp] transfer from here */
|
||||
|
||||
char *user; /* user name string, allocated */
|
||||
char *passwd; /* password string, allocated */
|
||||
@ -628,8 +715,6 @@ struct connectdata {
|
||||
struct timeval created; /* creation time */
|
||||
curl_socket_t sock[2]; /* two sockets, the second is used for the data
|
||||
transfer when doing FTP */
|
||||
curl_off_t maxdownload; /* in bytes, the maximum amount of data to fetch, 0
|
||||
means unlimited */
|
||||
|
||||
struct ssl_connect_data ssl[2]; /* this is for ssl-stuff */
|
||||
struct ssl_config_data ssl_config;
|
||||
@ -683,16 +768,10 @@ struct connectdata {
|
||||
|
||||
/**** curl_get() phase fields */
|
||||
|
||||
/* READ stuff */
|
||||
curl_socket_t sockfd; /* socket to read from or CURL_SOCKET_BAD */
|
||||
curl_off_t size; /* -1 if unknown at this point */
|
||||
curl_off_t *bytecountp; /* return number of bytes read or NULL */
|
||||
|
||||
/* WRITE stuff */
|
||||
curl_socket_t writesockfd; /* socket to write to, it may very
|
||||
well be the same we read from.
|
||||
CURL_SOCKET_BAD disables */
|
||||
curl_off_t *writebytecountp; /* return number of bytes written or NULL */
|
||||
|
||||
/** Dynamicly allocated strings, may need to be freed before this **/
|
||||
/** struct is killed. **/
|
||||
@ -707,9 +786,6 @@ struct connectdata {
|
||||
char *cookiehost; /* free later if not NULL */
|
||||
} allocptr;
|
||||
|
||||
char *newurl; /* This can only be set if a Location: was in the
|
||||
document headers */
|
||||
|
||||
int sec_complete; /* if krb4 is enabled for this connection */
|
||||
#ifdef HAVE_KRB4
|
||||
enum protection_level command_prot;
|
||||
@ -722,31 +798,23 @@ struct connectdata {
|
||||
struct sockaddr_in local_addr;
|
||||
#endif
|
||||
|
||||
bool readchannel_inuse; /* whether the read channel is in use by an easy handle */
|
||||
bool writechannel_inuse; /* whether the write channel is in use by an easy handle */
|
||||
bool is_in_pipeline; /* TRUE if this connection is in a pipeline */
|
||||
|
||||
struct curl_llist *send_pipe; /* List of handles waiting to
|
||||
send on this pipeline */
|
||||
struct curl_llist *recv_pipe; /* List of handles waiting to read
|
||||
their responses on this pipeline */
|
||||
|
||||
char master_buffer[BUFSIZE]; /* The master buffer for this connection. */
|
||||
size_t read_pos;
|
||||
size_t buf_len;
|
||||
|
||||
|
||||
/*************** Request - specific items ************/
|
||||
|
||||
/* previously this was in the urldata struct */
|
||||
union {
|
||||
struct HTTP *http;
|
||||
struct HTTP *https; /* alias, just for the sake of being more readable */
|
||||
struct FTP *ftp;
|
||||
void *tftp; /* private for tftp.c-eyes only */
|
||||
struct FILEPROTO *file;
|
||||
void *telnet; /* private for telnet.c-eyes only */
|
||||
void *generic;
|
||||
} proto;
|
||||
|
||||
/* This struct is inited when needed */
|
||||
struct Curl_transfer_keeper keep;
|
||||
|
||||
/* 'upload_present' is used to keep a byte counter of how much data there is
|
||||
still left in the buffer, aimed for upload. */
|
||||
ssize_t upload_present;
|
||||
|
||||
/* 'upload_fromhere' is used as a read-pointer when we uploaded parts of a
|
||||
buffer, so the next read should read from where this pointer points to,
|
||||
and the 'upload_present' contains the number of bytes available at this
|
||||
position */
|
||||
char *upload_fromhere;
|
||||
|
||||
curl_read_callback fread; /* function that reads the input */
|
||||
void *fread_in; /* pointer to pass to the fread() above */
|
||||
|
||||
@ -762,8 +830,11 @@ struct connectdata {
|
||||
/* data used for the asynch name resolve callback */
|
||||
struct Curl_async async;
|
||||
#endif
|
||||
|
||||
struct connectdata *sec_conn; /* secondary connection for 3rd party
|
||||
transfer */
|
||||
char *sec_path; /* The source path for FTP 3rd party */
|
||||
char *sec_pathbuffer;
|
||||
|
||||
enum { NORMAL, SOURCE3RD, TARGET3RD } xfertype;
|
||||
|
||||
@ -772,6 +843,9 @@ struct connectdata {
|
||||
int trlMax; /* allocated buffer size */
|
||||
int trlPos; /* index of where to store data */
|
||||
|
||||
union {
|
||||
struct ftp_conn ftpc;
|
||||
} proto;
|
||||
};
|
||||
|
||||
/* The end of connectdata. */
|
||||
@ -870,6 +944,18 @@ struct auth {
|
||||
|
||||
};
|
||||
|
||||
struct conncache {
|
||||
/* 'connects' will be an allocated array with pointers. If the pointer is
|
||||
set, it holds an allocated connection. */
|
||||
struct connectdata **connects;
|
||||
long num; /* size of the 'connects' array */
|
||||
enum {
|
||||
CONNCACHE_PRIVATE, /* used for an easy handle alone */
|
||||
CONNCACHE_MULTI /* shared within a multi handle */
|
||||
} type;
|
||||
};
|
||||
|
||||
|
||||
struct UrlState {
|
||||
enum {
|
||||
Curl_if_none,
|
||||
@ -877,13 +963,12 @@ struct UrlState {
|
||||
Curl_if_multi
|
||||
} used_interface;
|
||||
|
||||
struct conncache *connc; /* points to the connection cache this handle
|
||||
uses */
|
||||
|
||||
/* buffers to store authentication data in, as parsed from input options */
|
||||
struct timeval keeps_speed; /* for the progress meter really */
|
||||
|
||||
/* 'connects' will be an allocated array with pointers. If the pointer is
|
||||
set, it holds an allocated connection. */
|
||||
struct connectdata **connects;
|
||||
long numconnects; /* size of the 'connects' array */
|
||||
long lastconnect; /* index of most recent connect or -1 if undefined */
|
||||
|
||||
char *headerbuff; /* allocated buffer to store headers in */
|
||||
@ -895,6 +980,8 @@ struct UrlState {
|
||||
bytes / second */
|
||||
bool this_is_a_follow; /* this is a followed Location: request */
|
||||
|
||||
bool is_in_pipeline; /* Indicates whether this handle is part of a pipeline */
|
||||
|
||||
char *first_host; /* if set, this should be the host name that we will
|
||||
sent authorization to, no else. Used to make Location:
|
||||
following not keep sending user+password... This is
|
||||
@ -927,6 +1014,7 @@ struct UrlState {
|
||||
struct auth authproxy;
|
||||
|
||||
bool authproblem; /* TRUE if there's some problem authenticating */
|
||||
|
||||
#ifdef USE_ARES
|
||||
ares_channel areschannel; /* for name resolves */
|
||||
#endif
|
||||
@ -945,6 +1033,10 @@ struct UrlState {
|
||||
|
||||
bool expect100header; /* TRUE if we added Expect: 100-continue */
|
||||
|
||||
bool pipe_broke; /* TRUE if the connection we were pipelined on broke
|
||||
and we need to restart from the beginning */
|
||||
bool cancelled; /* TRUE if the request was cancelled */
|
||||
|
||||
#ifndef WIN32
|
||||
/* do FTP line-end conversions on most platforms */
|
||||
#define CURL_DO_LINEEND_CONV
|
||||
@ -953,6 +1045,11 @@ struct UrlState {
|
||||
/* for FTP downloads: how many CRLFs did we converted to LFs? */
|
||||
curl_off_t crlf_conversions;
|
||||
#endif
|
||||
/* If set to non-NULL, there's a connection in a shared connection cache
|
||||
that uses this handle so we can't kill this SessionHandle just yet but
|
||||
must keep it around and add it to the list of handles to kill once all
|
||||
its connections are gone */
|
||||
void *shared_conn;
|
||||
};
|
||||
|
||||
|
||||
@ -1179,8 +1276,9 @@ struct Names {
|
||||
struct SessionHandle {
|
||||
struct Names dns;
|
||||
struct Curl_multi *multi; /* if non-NULL, points to the multi handle
|
||||
struct of which this "belongs" */
|
||||
struct to which this "belongs" */
|
||||
struct Curl_share *share; /* Share, handles global variable mutexing */
|
||||
struct HandleData reqdata; /* Request-specific data */
|
||||
struct UserDefined set; /* values set by the libcurl user */
|
||||
struct DynamicStatic change; /* possibly modified userdefined data */
|
||||
|
||||
|
@ -34,5 +34,5 @@ EXTRA_DIST = test1 test108 test117 test127 test20 test27 test34 test46 \
|
||||
test250 test251 test252 test253 test254 test255 test521 test522 test523 \
|
||||
test256 test257 test258 test259 test260 test261 test262 test263 test264 \
|
||||
test265 test266 test267 test268 test269 test270 test271 test272 test273 \
|
||||
test274 test275 test524 test525 test276 test277
|
||||
test274 test275 test524 test525 test276 test277 test526 test527 test528
|
||||
|
||||
|
61
tests/data/test526
Normal file
61
tests/data/test526
Normal file
@ -0,0 +1,61 @@
|
||||
<info>
|
||||
<keywords>
|
||||
FTP
|
||||
PASV
|
||||
RETR
|
||||
</keywords>
|
||||
</info>
|
||||
# Server-side
|
||||
<reply>
|
||||
<data>
|
||||
file contents should appear once for each file
|
||||
</data>
|
||||
<datacheck>
|
||||
file contents should appear once for each file
|
||||
file contents should appear once for each file
|
||||
file contents should appear once for each file
|
||||
file contents should appear once for each file
|
||||
</datacheck>
|
||||
</reply>
|
||||
|
||||
# Client-side
|
||||
<client>
|
||||
<server>
|
||||
ftp
|
||||
</server>
|
||||
<tool>
|
||||
lib526
|
||||
</tool>
|
||||
<name>
|
||||
FTP RETR same file using different handles but same connection
|
||||
</name>
|
||||
<command>
|
||||
ftp://%HOSTIP:%FTPPORT/path/526
|
||||
</command>
|
||||
</client>
|
||||
|
||||
# Verify data after the test has been "shot"
|
||||
<verify>
|
||||
<strip>
|
||||
</strip>
|
||||
<protocol>
|
||||
USER anonymous
|
||||
PASS curl_by_daniel@haxx.se
|
||||
PWD
|
||||
CWD path
|
||||
EPSV
|
||||
TYPE I
|
||||
SIZE 526
|
||||
RETR 526
|
||||
EPSV
|
||||
SIZE 526
|
||||
RETR 526
|
||||
EPSV
|
||||
SIZE 526
|
||||
RETR 526
|
||||
EPSV
|
||||
SIZE 526
|
||||
RETR 526
|
||||
QUIT
|
||||
</protocol>
|
||||
</verify>
|
61
tests/data/test527
Normal file
61
tests/data/test527
Normal file
@ -0,0 +1,61 @@
|
||||
<info>
|
||||
<keywords>
|
||||
FTP
|
||||
PASV
|
||||
RETR
|
||||
</keywords>
|
||||
</info>
|
||||
# Server-side
|
||||
<reply>
|
||||
<data>
|
||||
file contents should appear once for each file
|
||||
</data>
|
||||
<datacheck>
|
||||
file contents should appear once for each file
|
||||
file contents should appear once for each file
|
||||
file contents should appear once for each file
|
||||
file contents should appear once for each file
|
||||
</datacheck>
|
||||
</reply>
|
||||
|
||||
# Client-side
|
||||
<client>
|
||||
<server>
|
||||
ftp
|
||||
</server>
|
||||
<tool>
|
||||
lib527
|
||||
</tool>
|
||||
<name>
|
||||
FTP RETR same file using different handles but same connection
|
||||
</name>
|
||||
<command>
|
||||
ftp://%HOSTIP:%FTPPORT/path/527
|
||||
</command>
|
||||
</client>
|
||||
|
||||
# Verify data after the test has been "shot"
|
||||
<verify>
|
||||
<strip>
|
||||
</strip>
|
||||
<protocol>
|
||||
USER anonymous
|
||||
PASS curl_by_daniel@haxx.se
|
||||
PWD
|
||||
CWD path
|
||||
EPSV
|
||||
TYPE I
|
||||
SIZE 527
|
||||
RETR 527
|
||||
EPSV
|
||||
SIZE 527
|
||||
RETR 527
|
||||
EPSV
|
||||
SIZE 527
|
||||
RETR 527
|
||||
EPSV
|
||||
SIZE 527
|
||||
RETR 527
|
||||
QUIT
|
||||
</protocol>
|
||||
</verify>
|
62
tests/data/test528
Normal file
62
tests/data/test528
Normal file
@ -0,0 +1,62 @@
|
||||
<info>
|
||||
<keywords>
|
||||
HTTP
|
||||
</keywords>
|
||||
</info>
|
||||
# Server-side
|
||||
<reply>
|
||||
<data>
|
||||
HTTP/1.1 200 OK
|
||||
Date: Thu, 09 Nov 2010 14:49:00 GMT
|
||||
Server: test-server/fake
|
||||
Content-Length: 47
|
||||
|
||||
file contents should appear once for each file
|
||||
</data>
|
||||
<datacheck>
|
||||
file contents should appear once for each file
|
||||
file contents should appear once for each file
|
||||
file contents should appear once for each file
|
||||
file contents should appear once for each file
|
||||
</datacheck>
|
||||
</reply>
|
||||
|
||||
# Client-side
|
||||
<client>
|
||||
<server>
|
||||
http
|
||||
</server>
|
||||
<tool>
|
||||
lib526
|
||||
</tool>
|
||||
<name>
|
||||
HTTP GET same file using different handles but same connection
|
||||
</name>
|
||||
<command>
|
||||
http://%HOSTIP:%HTTPPORT/path/528
|
||||
</command>
|
||||
</client>
|
||||
|
||||
# Verify data after the test has been "shot"
|
||||
<verify>
|
||||
<strip>
|
||||
</strip>
|
||||
<protocol>
|
||||
GET /path/528 HTTP/1.1
|
||||
Host: %HOSTIP:%HTTPPORT
|
||||
Accept: */*
|
||||
|
||||
GET /path/528 HTTP/1.1
|
||||
Host: %HOSTIP:%HTTPPORT
|
||||
Accept: */*
|
||||
|
||||
GET /path/528 HTTP/1.1
|
||||
Host: %HOSTIP:%HTTPPORT
|
||||
Accept: */*
|
||||
|
||||
GET /path/528 HTTP/1.1
|
||||
Host: %HOSTIP:%HTTPPORT
|
||||
Accept: */*
|
||||
|
||||
</protocol>
|
||||
</verify>
|
@ -41,7 +41,7 @@ SUPPORTFILES = first.c test.h
|
||||
# These are all libcurl test programs
|
||||
noinst_PROGRAMS = lib500 lib501 lib502 lib503 lib504 lib505 lib506 lib507 \
|
||||
lib508 lib509 lib510 lib511 lib512 lib513 lib514 lib515 lib516 lib517 \
|
||||
lib518 lib519 lib520 lib521 lib523 lib524 lib525
|
||||
lib518 lib519 lib520 lib521 lib523 lib524 lib525 lib526 lib527
|
||||
|
||||
lib500_SOURCES = lib500.c $(SUPPORTFILES)
|
||||
lib500_LDADD = $(LIBDIR)/libcurl.la
|
||||
@ -142,3 +142,13 @@ lib524_DEPENDENCIES = $(LIBDIR)/libcurl.la
|
||||
lib525_SOURCES = lib525.c $(SUPPORTFILES)
|
||||
lib525_LDADD = $(LIBDIR)/libcurl.la
|
||||
lib525_DEPENDENCIES = $(LIBDIR)/libcurl.la
|
||||
|
||||
lib526_SOURCES = lib526.c $(SUPPORTFILES)
|
||||
lib526_LDADD = $(LIBDIR)/libcurl.la
|
||||
lib526_DEPENDENCIES = $(LIBDIR)/libcurl.la
|
||||
|
||||
lib527_SOURCES = lib526.c $(SUPPORTFILES)
|
||||
lib527_CFLAGS = -DLIB527
|
||||
lib527_LDADD = $(LIBDIR)/libcurl.la
|
||||
lib527_DEPENDENCIES = $(LIBDIR)/libcurl.la
|
||||
|
||||
|
129
tests/libtest/lib526.c
Normal file
129
tests/libtest/lib526.c
Normal file
@ -0,0 +1,129 @@
|
||||
/*****************************************************************************
|
||||
* _ _ ____ _
|
||||
* Project ___| | | | _ \| |
|
||||
* / __| | | | |_) | |
|
||||
* | (__| |_| | _ <| |___
|
||||
* \___|\___/|_| \_\_____|
|
||||
*
|
||||
* $Id$
|
||||
*/
|
||||
|
||||
/*
|
||||
* This code sets up multiple easy handles that transfer a single file from
|
||||
* the same URL, in a serial manner after each other. Due to the connection
|
||||
* sharing within the multi handle all transfers are performed on the same
|
||||
* persistent connection.
|
||||
*
|
||||
* This source code is used for lib526 _and_ lib527 with only #ifdefs
|
||||
* controlling the small differences. lib526 closes all easy handles after
|
||||
* they all have transfered the file over the single connection, while lib527
|
||||
* closes each easy handle after each single transfer. 526 and 527 use FTP,
|
||||
* while 528 uses the lib526 tool but use HTTP.
|
||||
*/
|
||||
|
||||
#include "test.h"
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
|
||||
#define NUM_HANDLES 4
|
||||
|
||||
int test(char *URL)
|
||||
{
|
||||
int res = 0;
|
||||
CURL *curl[NUM_HANDLES];
|
||||
int running;
|
||||
char done=FALSE;
|
||||
CURLM *m;
|
||||
int current=0;
|
||||
int i;
|
||||
|
||||
/* In windows, this will init the winsock stuff */
|
||||
curl_global_init(CURL_GLOBAL_ALL);
|
||||
|
||||
/* get NUM_HANDLES easy handles */
|
||||
for(i=0; i < NUM_HANDLES; i++) {
|
||||
curl[i] = curl_easy_init();
|
||||
if(!curl[i])
|
||||
return 100 + i; /* major bad */
|
||||
curl_easy_setopt(curl[i], CURLOPT_URL, URL);
|
||||
|
||||
/* go verbose */
|
||||
curl_easy_setopt(curl[i], CURLOPT_VERBOSE, 1);
|
||||
}
|
||||
|
||||
m = curl_multi_init();
|
||||
|
||||
res = (int)curl_multi_add_handle(m, curl[current]);
|
||||
|
||||
fprintf(stderr, "Start at URL 0\n");
|
||||
|
||||
while(!done) {
|
||||
fd_set rd, wr, exc;
|
||||
int max_fd;
|
||||
struct timeval interval;
|
||||
|
||||
interval.tv_sec = 1;
|
||||
interval.tv_usec = 0;
|
||||
|
||||
while (res == CURLM_CALL_MULTI_PERFORM) {
|
||||
res = (int)curl_multi_perform(m, &running);
|
||||
if (running <= 0) {
|
||||
#ifdef LIB527
|
||||
curl_easy_cleanup(curl[current]);
|
||||
#endif
|
||||
if(++current < NUM_HANDLES) {
|
||||
fprintf(stderr, "Advancing to URL %d\n", current);
|
||||
res = (int)curl_multi_add_handle(m, curl[current]);
|
||||
if(res) {
|
||||
fprintf(stderr, "add handle failed: %d.\n", res);
|
||||
res = 243;
|
||||
break;
|
||||
}
|
||||
}
|
||||
else
|
||||
done = TRUE; /* bail out */
|
||||
break;
|
||||
}
|
||||
}
|
||||
if(done)
|
||||
break;
|
||||
|
||||
if (res != CURLM_OK) {
|
||||
fprintf(stderr, "not okay???\n");
|
||||
break;
|
||||
}
|
||||
|
||||
FD_ZERO(&rd);
|
||||
FD_ZERO(&wr);
|
||||
FD_ZERO(&exc);
|
||||
max_fd = 0;
|
||||
|
||||
if (curl_multi_fdset(m, &rd, &wr, &exc, &max_fd) != CURLM_OK) {
|
||||
fprintf(stderr, "unexpected failured of fdset.\n");
|
||||
res = 189;
|
||||
break;
|
||||
}
|
||||
|
||||
if (select(max_fd+1, &rd, &wr, &exc, &interval) == -1) {
|
||||
fprintf(stderr, "bad select??\n");
|
||||
res = 195;
|
||||
break;
|
||||
}
|
||||
|
||||
res = CURLM_CALL_MULTI_PERFORM;
|
||||
}
|
||||
|
||||
#ifndef LIB527
|
||||
/* get NUM_HANDLES easy handles */
|
||||
for(i=0; i < NUM_HANDLES; i++) {
|
||||
curl_multi_remove_handle(m, curl[i]);
|
||||
curl_easy_cleanup(curl[i]);
|
||||
}
|
||||
#endif
|
||||
curl_multi_cleanup(m);
|
||||
|
||||
curl_global_cleanup();
|
||||
return res;
|
||||
}
|
Loading…
Reference in New Issue
Block a user