mirror of
https://github.com/moparisthebest/curl
synced 2024-12-21 15:48:49 -05:00
- Dmitry Kurochkin moved several struct fields from the connectdata struct to
the SingleRequest one to make pipelining better. It is a bit tricky to keep them in the right place, to keep things related to the actual request or to the actual connection in the right place.
This commit is contained in:
parent
b3186dee17
commit
b620e62f0f
6
CHANGES
6
CHANGES
@ -6,6 +6,12 @@
|
||||
|
||||
Changelog
|
||||
|
||||
Daniel S (31 Jan 2008)
|
||||
- Dmitry Kurochkin moved several struct fields from the connectdata struct to
|
||||
the SingleRequest one to make pipelining better. It is a bit tricky to keep
|
||||
them in the right place, to keep things related to the actual request or to
|
||||
the actual connection in the right place.
|
||||
|
||||
Daniel S (29 Jan 2008)
|
||||
- Dmitry Kurochkin fixed Curl_done() for pipelining, as it could previously
|
||||
crash!
|
||||
|
@ -454,7 +454,7 @@ static CURLcode file_do(struct connectdata *conn, bool *done)
|
||||
/* If we have selected NOBODY and HEADER, it means that we only want file
|
||||
information. Which for FILE can't be much more than the file size and
|
||||
date. */
|
||||
if(conn->bits.no_body && data->set.include_header && fstated) {
|
||||
if(data->set.opt_no_body && data->set.include_header && fstated) {
|
||||
CURLcode result;
|
||||
snprintf(buf, sizeof(data->state.buffer),
|
||||
"Content-Length: %" FORMAT_OFF_T "\r\n", expected_size);
|
||||
|
@ -1473,7 +1473,7 @@ static CURLcode ftp_state_post_mdtm(struct connectdata *conn)
|
||||
/* If we have selected NOBODY and HEADER, it means that we only want file
|
||||
information. Which in FTP can't be much more than the file size and
|
||||
date. */
|
||||
if(conn->bits.no_body && ftpc->file &&
|
||||
if(data->set.opt_no_body && ftpc->file &&
|
||||
ftp_need_type(conn, data->set.prefer_ascii)) {
|
||||
/* The SIZE command is _not_ RFC 959 specified, and therefor many servers
|
||||
may not support it! It is however the only way we have to get a file's
|
||||
@ -2007,7 +2007,7 @@ static CURLcode ftp_state_mdtm_resp(struct connectdata *conn,
|
||||
/* If we asked for a time of the file and we actually got one as well,
|
||||
we "emulate" a HTTP-style header in our output. */
|
||||
|
||||
if(conn->bits.no_body &&
|
||||
if(data->set.opt_no_body &&
|
||||
ftpc->file &&
|
||||
data->set.get_filetime &&
|
||||
(data->info.filetime>=0) ) {
|
||||
@ -3575,7 +3575,7 @@ CURLcode ftp_perform(struct connectdata *conn,
|
||||
|
||||
DEBUGF(infof(conn->data, "DO phase starts\n"));
|
||||
|
||||
if(conn->bits.no_body) {
|
||||
if(conn->data->set.opt_no_body) {
|
||||
/* requested no body means no transfer... */
|
||||
struct FTP *ftp = conn->data->state.proto.ftp;
|
||||
ftp->transfer = FTPTRANSFER_INFO;
|
||||
|
30
lib/http.c
30
lib/http.c
@ -842,7 +842,7 @@ static size_t readmoredata(char *buffer,
|
||||
return 0;
|
||||
|
||||
/* make sure that a HTTP request is never sent away chunked! */
|
||||
conn->bits.forbidchunk = (bool)(http->sending == HTTPSEND_REQUEST);
|
||||
conn->data->req.forbidchunk = (bool)(http->sending == HTTPSEND_REQUEST);
|
||||
|
||||
if(http->postsize <= (curl_off_t)fullsize) {
|
||||
memcpy(buffer, http->postdata, (size_t)http->postsize);
|
||||
@ -1957,7 +1957,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
|
||||
if(data->set.str[STRING_CUSTOMREQUEST])
|
||||
request = data->set.str[STRING_CUSTOMREQUEST];
|
||||
else {
|
||||
if(conn->bits.no_body)
|
||||
if(data->set.opt_no_body)
|
||||
request = (char *)"HEAD";
|
||||
else {
|
||||
DEBUGASSERT((httpreq > HTTPREQ_NONE) && (httpreq < HTTPREQ_LAST));
|
||||
@ -2025,13 +2025,23 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
|
||||
ptr = checkheaders(data, "Transfer-Encoding:");
|
||||
if(ptr) {
|
||||
/* Some kind of TE is requested, check if 'chunked' is chosen */
|
||||
conn->bits.upload_chunky =
|
||||
data->req.upload_chunky =
|
||||
Curl_compareheader(ptr, "Transfer-Encoding:", "chunked");
|
||||
}
|
||||
else {
|
||||
if(httpreq == HTTPREQ_GET)
|
||||
conn->bits.upload_chunky = FALSE;
|
||||
if(conn->bits.upload_chunky)
|
||||
if((conn->protocol&PROT_HTTP) &&
|
||||
data->set.upload &&
|
||||
(data->set.infilesize == -1) &&
|
||||
(data->set.httpversion != CURL_HTTP_VERSION_1_0)) {
|
||||
/* HTTP, upload, unknown file size and not HTTP 1.0 */
|
||||
data->req.upload_chunky = TRUE;
|
||||
}
|
||||
else {
|
||||
/* else, no chunky upload */
|
||||
data->req.upload_chunky = FALSE;
|
||||
}
|
||||
|
||||
if(data->req.upload_chunky)
|
||||
te = "Transfer-Encoding: chunked\r\n";
|
||||
}
|
||||
|
||||
@ -2494,7 +2504,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
|
||||
|
||||
http->sending = HTTPSEND_BODY;
|
||||
|
||||
if(!conn->bits.upload_chunky) {
|
||||
if(!data->req.upload_chunky) {
|
||||
/* only add Content-Length if not uploading chunked */
|
||||
result = add_bufferf(req_buffer,
|
||||
"Content-Length: %" FORMAT_OFF_T "\r\n",
|
||||
@ -2566,7 +2576,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
|
||||
else
|
||||
postsize = data->set.infilesize;
|
||||
|
||||
if((postsize != -1) && !conn->bits.upload_chunky) {
|
||||
if((postsize != -1) && !data->req.upload_chunky) {
|
||||
/* only add Content-Length if not uploading chunked */
|
||||
result = add_bufferf(req_buffer,
|
||||
"Content-Length: %" FORMAT_OFF_T "\r\n",
|
||||
@ -2612,7 +2622,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
|
||||
data->set.postfieldsize:
|
||||
(data->set.postfields? (curl_off_t)strlen(data->set.postfields):0);
|
||||
|
||||
if(!conn->bits.upload_chunky) {
|
||||
if(!data->req.upload_chunky) {
|
||||
/* We only set Content-Length and allow a custom Content-Length if
|
||||
we don't upload data chunked, as RFC2616 forbids us to set both
|
||||
kinds of headers (Transfer-Encoding: chunked and Content-Length) */
|
||||
@ -2662,7 +2672,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
|
||||
if(result)
|
||||
return result;
|
||||
|
||||
if(!conn->bits.upload_chunky) {
|
||||
if(!data->req.upload_chunky) {
|
||||
/* We're not sending it 'chunked', append it to the request
|
||||
already now to reduce the number if send() calls */
|
||||
result = add_buffer(req_buffer, data->set.postfields,
|
||||
|
@ -181,7 +181,7 @@ CHUNKcode Curl_httpchunk_read(struct connectdata *conn,
|
||||
if(*datap == 0x0a) {
|
||||
/* we're now expecting data to come, unless size was zero! */
|
||||
if(0 == ch->datasize) {
|
||||
if(conn->bits.trailerhdrpresent!=TRUE) {
|
||||
if(k->trailerhdrpresent!=TRUE) {
|
||||
/* No Trailer: header found - revert to original Curl processing */
|
||||
ch->state = CHUNK_STOPCR;
|
||||
|
||||
|
@ -119,7 +119,7 @@ CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
|
||||
size_t buffersize = (size_t)bytes;
|
||||
int nread;
|
||||
|
||||
if(conn->bits.upload_chunky) {
|
||||
if(data->req.upload_chunky) {
|
||||
/* if chunked Transfer-Encoding */
|
||||
buffersize -= (8 + 2 + 2); /* 32bit hex + CRLF + CRLF */
|
||||
data->req.upload_fromhere += 10; /* 32bit hex + CRLF */
|
||||
@ -143,7 +143,7 @@ CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
|
||||
/* the read function returned a too large value */
|
||||
return CURLE_READ_ERROR;
|
||||
|
||||
if(!conn->bits.forbidchunk && conn->bits.upload_chunky) {
|
||||
if(!data->req.forbidchunk && data->req.upload_chunky) {
|
||||
/* if chunked Transfer-Encoding */
|
||||
char hexbuffer[11];
|
||||
int hexlen = snprintf(hexbuffer, sizeof(hexbuffer),
|
||||
@ -594,7 +594,7 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
else {
|
||||
k->header = FALSE; /* no more header to parse! */
|
||||
|
||||
if((k->size == -1) && !conn->bits.chunk && !conn->bits.close &&
|
||||
if((k->size == -1) && !k->chunk && !conn->bits.close &&
|
||||
(k->httpversion >= 11) ) {
|
||||
/* On HTTP 1.1, when connection is not to get closed, but no
|
||||
Content-Length nor Content-Encoding chunked have been
|
||||
@ -683,7 +683,7 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
* If we requested a "no body", this is a good time to get
|
||||
* out and return home.
|
||||
*/
|
||||
if(conn->bits.no_body)
|
||||
if(data->set.opt_no_body)
|
||||
stop_reading = TRUE;
|
||||
else {
|
||||
/* If we know the expected size of this document, we set the
|
||||
@ -699,7 +699,7 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
Content-Length: headers if we are now receiving data
|
||||
using chunked Transfer-Encoding.
|
||||
*/
|
||||
if(conn->bits.chunk)
|
||||
if(k->chunk)
|
||||
k->size=-1;
|
||||
|
||||
}
|
||||
@ -1002,7 +1002,7 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
* with the previously mentioned size. There can be any amount
|
||||
* of chunks, and a chunk-data set to zero signals the
|
||||
* end-of-chunks. */
|
||||
conn->bits.chunk = TRUE; /* chunks coming our way */
|
||||
k->chunk = TRUE; /* chunks coming our way */
|
||||
|
||||
/* init our chunky engine */
|
||||
Curl_httpchunk_init(conn);
|
||||
@ -1018,7 +1018,7 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
*
|
||||
* It seems both Trailer: and Trailers: occur in the wild.
|
||||
*/
|
||||
conn->bits.trailerhdrpresent = TRUE;
|
||||
k->trailerhdrpresent = TRUE;
|
||||
}
|
||||
|
||||
else if(checkprefix("Content-Encoding:", k->p) &&
|
||||
@ -1258,7 +1258,7 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
}
|
||||
|
||||
#ifndef CURL_DISABLE_HTTP
|
||||
if(conn->bits.chunk) {
|
||||
if(k->chunk) {
|
||||
/*
|
||||
* Here comes a chunked transfer flying and we need to decode this
|
||||
* properly. While the name says read, this function both reads
|
||||
@ -1326,7 +1326,7 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
|
||||
Curl_pgrsSetDownloadCounter(data, k->bytecount);
|
||||
|
||||
if(!conn->bits.chunk && (nread || k->badheader || is_empty_data)) {
|
||||
if(!k->chunk && (nread || k->badheader || is_empty_data)) {
|
||||
/* If this is chunky transfer, it was already written */
|
||||
|
||||
if(k->badheader && !k->ignorebody) {
|
||||
@ -1628,7 +1628,7 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
* returning.
|
||||
*/
|
||||
|
||||
if(!(conn->bits.no_body) && (k->size != -1) &&
|
||||
if(!(data->set.opt_no_body) && (k->size != -1) &&
|
||||
(k->bytecount != k->size) &&
|
||||
#ifdef CURL_DO_LINEEND_CONV
|
||||
/* Most FTP servers don't adjust their file SIZE response for CRLFs,
|
||||
@ -1643,8 +1643,8 @@ CURLcode Curl_readwrite(struct connectdata *conn,
|
||||
k->size - k->bytecount);
|
||||
return CURLE_PARTIAL_FILE;
|
||||
}
|
||||
else if(!(conn->bits.no_body) &&
|
||||
conn->bits.chunk &&
|
||||
else if(!(data->set.opt_no_body) &&
|
||||
k->chunk &&
|
||||
(conn->chunk.state != CHUNK_STOP)) {
|
||||
/*
|
||||
* In chunked mode, return an error if the connection is closed prior to
|
||||
@ -1747,7 +1747,7 @@ Transfer(struct connectdata *conn)
|
||||
return CURLE_OK;
|
||||
|
||||
/* we want header and/or body, if neither then don't do this! */
|
||||
if(!conn->bits.getheader && conn->bits.no_body)
|
||||
if(!k->getheader && data->set.opt_no_body)
|
||||
return CURLE_OK;
|
||||
|
||||
while(!done) {
|
||||
@ -2317,7 +2317,7 @@ bool Curl_retry_request(struct connectdata *conn,
|
||||
if((data->req.bytecount +
|
||||
data->req.headerbytecount == 0) &&
|
||||
conn->bits.reuse &&
|
||||
!conn->bits.no_body) {
|
||||
!data->set.opt_no_body) {
|
||||
/* We got no data, we attempted to re-use a connection and yet we want a
|
||||
"body". This might happen if the connection was left alive when we were
|
||||
done using it before, but that was closed when we wanted to read from
|
||||
@ -2486,7 +2486,7 @@ Curl_setup_transfer(
|
||||
CURL_SOCKET_BAD : conn->sock[sockindex];
|
||||
conn->writesockfd = writesockindex == -1 ?
|
||||
CURL_SOCKET_BAD:conn->sock[writesockindex];
|
||||
conn->bits.getheader = getheader;
|
||||
k->getheader = getheader;
|
||||
|
||||
k->size = size;
|
||||
k->bytecountp = bytecountp;
|
||||
@ -2496,13 +2496,13 @@ Curl_setup_transfer(
|
||||
necessary input is not always known in do_complete() as this function may
|
||||
be called after that */
|
||||
|
||||
if(!conn->bits.getheader) {
|
||||
if(!k->getheader) {
|
||||
k->header = FALSE;
|
||||
if(size > 0)
|
||||
Curl_pgrsSetDownloadSize(data, size);
|
||||
}
|
||||
/* we want header and/or body, if neither then don't do this! */
|
||||
if(conn->bits.getheader || !conn->bits.no_body) {
|
||||
if(k->getheader || !data->set.opt_no_body) {
|
||||
|
||||
if(conn->sockfd != CURL_SOCKET_BAD) {
|
||||
k->keepon |= KEEP_READ;
|
||||
|
20
lib/url.c
20
lib/url.c
@ -3538,7 +3538,6 @@ static CURLcode CreateConnection(struct SessionHandle *data,
|
||||
|
||||
conn->bits.user_passwd = (bool)(NULL != data->set.str[STRING_USERPWD]);
|
||||
conn->bits.proxy_user_passwd = (bool)(NULL != data->set.str[STRING_PROXYUSERPWD]);
|
||||
conn->bits.no_body = data->set.opt_no_body;
|
||||
conn->bits.tunnel_proxy = data->set.tunnel_thru_httpproxy;
|
||||
conn->bits.ftp_use_epsv = data->set.ftp_use_epsv;
|
||||
conn->bits.ftp_use_eprt = data->set.ftp_use_eprt;
|
||||
@ -4007,9 +4006,6 @@ static CURLcode CreateConnection(struct SessionHandle *data,
|
||||
else
|
||||
free(old_conn->host.rawalloc); /* free the newly allocated name buffer */
|
||||
|
||||
/* get the newly set value, not the old one */
|
||||
conn->bits.no_body = old_conn->bits.no_body;
|
||||
|
||||
/* re-use init */
|
||||
conn->bits.reuse = TRUE; /* yes, we're re-using here */
|
||||
|
||||
@ -4053,18 +4049,6 @@ static CURLcode CreateConnection(struct SessionHandle *data,
|
||||
conn->seek_func = data->set.seek_func;
|
||||
conn->seek_client = data->set.seek_client;
|
||||
|
||||
if((conn->protocol&PROT_HTTP) &&
|
||||
data->set.upload &&
|
||||
(data->set.infilesize == -1) &&
|
||||
(data->set.httpversion != CURL_HTTP_VERSION_1_0)) {
|
||||
/* HTTP, upload, unknown file size and not HTTP 1.0 */
|
||||
conn->bits.upload_chunky = TRUE;
|
||||
}
|
||||
else {
|
||||
/* else, no chunky upload */
|
||||
conn->bits.upload_chunky = FALSE;
|
||||
}
|
||||
|
||||
#ifndef USE_ARES
|
||||
/*************************************************************
|
||||
* Set timeout if that is being used, and we're not using an asynchronous
|
||||
@ -4542,8 +4526,8 @@ static CURLcode do_init(struct connectdata *conn)
|
||||
*/
|
||||
static void do_complete(struct connectdata *conn)
|
||||
{
|
||||
conn->bits.chunk=FALSE;
|
||||
conn->bits.trailerhdrpresent=FALSE;
|
||||
conn->data->req.chunk=FALSE;
|
||||
conn->data->req.trailerhdrpresent=FALSE;
|
||||
|
||||
conn->data->req.maxfd = (conn->sockfd>conn->writesockfd?
|
||||
conn->sockfd:conn->writesockfd)+1;
|
||||
|
@ -552,7 +552,6 @@ struct FILEPROTO {
|
||||
struct ConnectBits {
|
||||
bool close; /* if set, we close the connection after this request */
|
||||
bool reuse; /* if set, this is a re-used connection */
|
||||
bool chunk; /* if set, this is a chunked transfer-encoding */
|
||||
bool proxy; /* if set, this transfer is done through a proxy - any type */
|
||||
bool httpproxy; /* if set, this transfer is done through a http proxy */
|
||||
bool user_passwd; /* do we use user+password for this connection? */
|
||||
@ -564,14 +563,6 @@ struct ConnectBits {
|
||||
bool do_more; /* this is set TRUE if the ->curl_do_more() function is
|
||||
supposed to be called, after ->curl_do() */
|
||||
|
||||
bool upload_chunky; /* set TRUE if we are doing chunked transfer-encoding
|
||||
on upload */
|
||||
bool getheader; /* TRUE if header parsing is wanted */
|
||||
|
||||
bool forbidchunk; /* used only to explicitly forbid chunk-upload for
|
||||
specific upload buffers. See readmoredata() in
|
||||
http.c for details. */
|
||||
|
||||
bool tcpconnect; /* the TCP layer (or simimlar) is connected, this is set
|
||||
the first time on the first connect function call */
|
||||
bool protoconnstart;/* the protocol layer has STARTED its operation after
|
||||
@ -579,7 +570,6 @@ struct ConnectBits {
|
||||
|
||||
bool retry; /* this connection is about to get closed and then
|
||||
re-attempted at another connection. */
|
||||
bool no_body; /* CURLOPT_NO_BODY (or similar) was set */
|
||||
bool tunnel_proxy; /* if CONNECT is used to "tunnel" through the proxy.
|
||||
This is implicit when SSL-protocols are used through
|
||||
proxies, but can also be enabled explicitly by
|
||||
@ -603,9 +593,6 @@ struct ConnectBits {
|
||||
requests */
|
||||
bool netrc; /* name+password provided by netrc */
|
||||
|
||||
bool trailerhdrpresent; /* Set when Trailer: header found in HTTP response.
|
||||
Required to determine whether to look for trailers
|
||||
in case of Transfer-Encoding: chunking */
|
||||
bool done; /* set to FALSE when Curl_do() is called and set to TRUE
|
||||
when Curl_done() is called, to prevent Curl_done() to
|
||||
get invoked twice when the multi interface is
|
||||
@ -773,6 +760,18 @@ struct SingleRequest {
|
||||
and the 'upload_present' contains the number of bytes available at this
|
||||
position */
|
||||
char *upload_fromhere;
|
||||
|
||||
bool chunk; /* if set, this is a chunked transfer-encoding */
|
||||
bool upload_chunky; /* set TRUE if we are doing chunked transfer-encoding
|
||||
on upload */
|
||||
bool getheader; /* TRUE if header parsing is wanted */
|
||||
|
||||
bool forbidchunk; /* used only to explicitly forbid chunk-upload for
|
||||
specific upload buffers. See readmoredata() in
|
||||
http.c for details. */
|
||||
bool trailerhdrpresent; /* Set when Trailer: header found in HTTP response.
|
||||
Required to determine whether to look for trailers
|
||||
in case of Transfer-Encoding: chunking */
|
||||
};
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user