mirror of
https://github.com/moparisthebest/curl
synced 2024-12-21 15:48:49 -05:00
Improved the logic the decides whether to use HTTP 1.1 features or not in a
request. Detect cases where an upload must be sent chunked and the server supports only HTTP 1.0 and return CURLE_UPLOAD_FAILED.
This commit is contained in:
parent
3acd1146f9
commit
91ff938035
13
CHANGES
13
CHANGES
@ -6,6 +6,19 @@
|
||||
|
||||
Changelog
|
||||
|
||||
Daniel Fandrich (5 Sep 2008)
|
||||
- Improved the logic the decides whether to use HTTP 1.1 features or not in a
|
||||
request. Setting a specific version with CURLOPT_HTTP_VERSION overrides
|
||||
all other checks, but otherwise, a 1.0 request will be made if the server
|
||||
is known to support only 1.0 because it previously responded so and the
|
||||
connection was kept alive, or a response to a previous request on this handle
|
||||
came back as 1.0. The latter could take place in cases like redirection or
|
||||
authentication where several requests have to be made before the operation
|
||||
is complete.
|
||||
|
||||
- Detect cases where an upload must be sent chunked and the server supports
|
||||
only HTTP 1.0 and return CURLE_UPLOAD_FAILED.
|
||||
|
||||
Daniel Stenberg (5 Sep 2008)
|
||||
- Martin Drasar provided the CURLOPT_POSTREDIR patch. It renames
|
||||
CURLOPT_POST301 (but adds a define for backwards compatibility for you who
|
||||
|
@ -12,6 +12,7 @@ This release includes the following changes:
|
||||
o pkg-config can now show supported_protocols and supported_features
|
||||
o Added CURLOPT_CERTINFO and CURLINFO_CERTINFO
|
||||
o Added CURLOPT_POSTREDIR
|
||||
o Better detect HTTP 1.0 servers and don't do HTTP 1.1 requests on them
|
||||
|
||||
This release includes the following bugfixes:
|
||||
|
||||
|
@ -4,12 +4,6 @@ To be addressed before 7.19.1 (planned release: October/November 2008)
|
||||
162 - Craig Perras' note "http upload: how to stop on error"
|
||||
http://curl.haxx.se/mail/archive-2008-08/0138.html
|
||||
|
||||
163 - Detecting illegal attempts at chunked transfers on HTTP 1.0
|
||||
(tests 1069, 1072, 1073)
|
||||
http://curl.haxx.se/mail/archive-2008-08/0435.html
|
||||
|
||||
164 - Automatic downgrading to HTTP 1.0 (tests 1071 through 1074)
|
||||
|
||||
165 - "Problem with CURLOPT_RESUME_FROM and CURLOPT_APPEND" by Daniele Pinau,
|
||||
recipe: http://curl.haxx.se/mail/lib-2008-08/0439.html
|
||||
|
||||
|
42
lib/http.c
42
lib/http.c
@ -1950,15 +1950,30 @@ CURLcode Curl_http_done(struct connectdata *conn,
|
||||
return CURLE_OK;
|
||||
}
|
||||
|
||||
|
||||
/* Determine if we should use HTTP 1.1 for this request. Reasons to avoid it
|
||||
are if the user specifically requested HTTP 1.0, if the server we are
|
||||
connected to only supports 1.0, or if any server previously contacted to
|
||||
handle this request only supports 1.0. */
|
||||
static bool use_http_1_1(const struct SessionHandle *data,
|
||||
const struct connectdata *conn)
|
||||
{
|
||||
return (data->set.httpversion == CURL_HTTP_VERSION_1_1) ||
|
||||
((data->set.httpversion != CURL_HTTP_VERSION_1_0) &&
|
||||
((conn->httpversion == 11) ||
|
||||
((conn->httpversion != 10) &&
|
||||
(data->state.httpversion != 10))));
|
||||
}
|
||||
|
||||
/* check and possibly add an Expect: header */
|
||||
static CURLcode expect100(struct SessionHandle *data,
|
||||
struct connectdata *conn,
|
||||
send_buffer *req_buffer)
|
||||
{
|
||||
CURLcode result = CURLE_OK;
|
||||
data->state.expect100header = FALSE; /* default to false unless it is set
|
||||
to TRUE below */
|
||||
if((data->set.httpversion != CURL_HTTP_VERSION_1_0) &&
|
||||
!checkheaders(data, "Expect:")) {
|
||||
if(use_http_1_1(data, conn) && !checkheaders(data, "Expect:")) {
|
||||
/* if not doing HTTP 1.0 or disabled explicitly, we add a Expect:
|
||||
100-continue to the headers which actually speeds up post
|
||||
operations (as there is one packet coming back from the web
|
||||
@ -2139,10 +2154,14 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
|
||||
else {
|
||||
if((conn->protocol&PROT_HTTP) &&
|
||||
data->set.upload &&
|
||||
(data->set.infilesize == -1) &&
|
||||
(data->set.httpversion != CURL_HTTP_VERSION_1_0)) {
|
||||
/* HTTP, upload, unknown file size and not HTTP 1.0 */
|
||||
data->req.upload_chunky = TRUE;
|
||||
(data->set.infilesize == -1)) {
|
||||
if (use_http_1_1(data, conn)) {
|
||||
/* HTTP, upload, unknown file size and not HTTP 1.0 */
|
||||
data->req.upload_chunky = TRUE;
|
||||
} else {
|
||||
failf(data, "Chunky upload is not supported by HTTP 1.0");
|
||||
return CURLE_UPLOAD_FAILED;
|
||||
}
|
||||
}
|
||||
else {
|
||||
/* else, no chunky upload */
|
||||
@ -2410,8 +2429,9 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
|
||||
}
|
||||
}
|
||||
|
||||
/* Use 1.1 unless the use specificly asked for 1.0 */
|
||||
httpstring= data->set.httpversion==CURL_HTTP_VERSION_1_0?"1.0":"1.1";
|
||||
/* Use 1.1 unless the user specifically asked for 1.0 or the server only
|
||||
supports 1.0 */
|
||||
httpstring= use_http_1_1(data, conn)?"1.1":"1.0";
|
||||
|
||||
/* initialize a dynamic send-buffer */
|
||||
req_buffer = add_buffer_init();
|
||||
@ -2635,7 +2655,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
|
||||
return result;
|
||||
}
|
||||
|
||||
result = expect100(data, req_buffer);
|
||||
result = expect100(data, conn, req_buffer);
|
||||
if(result)
|
||||
return result;
|
||||
|
||||
@ -2707,7 +2727,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
|
||||
return result;
|
||||
}
|
||||
|
||||
result = expect100(data, req_buffer);
|
||||
result = expect100(data, conn, req_buffer);
|
||||
if(result)
|
||||
return result;
|
||||
|
||||
@ -2772,7 +2792,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
|
||||
sure that the expect100header is always set to the preferred value
|
||||
here. */
|
||||
if(postsize > TINY_INITIAL_POST_SIZE) {
|
||||
result = expect100(data, req_buffer);
|
||||
result = expect100(data, conn, req_buffer);
|
||||
if(result)
|
||||
return result;
|
||||
}
|
||||
|
@ -832,7 +832,7 @@ static CURLcode readwrite_headers(struct SessionHandle *data,
|
||||
k->header = FALSE; /* no more header to parse! */
|
||||
|
||||
if((k->size == -1) && !k->chunk && !conn->bits.close &&
|
||||
(k->httpversion >= 11) ) {
|
||||
(conn->httpversion >= 11) ) {
|
||||
/* On HTTP 1.1, when connection is not to get closed, but no
|
||||
Content-Length nor Content-Encoding chunked have been
|
||||
received, according to RFC2616 section 4.4 point 5, we
|
||||
@ -1006,17 +1006,17 @@ static CURLcode readwrite_headers(struct SessionHandle *data,
|
||||
nc = sscanf(HEADER1,
|
||||
" HTTP/%d.%d %3d",
|
||||
&httpversion_major,
|
||||
&k->httpversion,
|
||||
&conn->httpversion,
|
||||
&k->httpcode);
|
||||
if(nc==3) {
|
||||
k->httpversion += 10 * httpversion_major;
|
||||
conn->httpversion += 10 * httpversion_major;
|
||||
}
|
||||
else {
|
||||
/* this is the real world, not a Nirvana
|
||||
NCSA 1.5.x returns this crap when asked for HTTP/1.1
|
||||
*/
|
||||
nc=sscanf(HEADER1, " HTTP %3d", &k->httpcode);
|
||||
k->httpversion = 10;
|
||||
conn->httpversion = 10;
|
||||
|
||||
/* If user has set option HTTP200ALIASES,
|
||||
compare header line against list of aliases
|
||||
@ -1025,14 +1025,18 @@ static CURLcode readwrite_headers(struct SessionHandle *data,
|
||||
if(checkhttpprefix(data, k->p)) {
|
||||
nc = 1;
|
||||
k->httpcode = 200;
|
||||
k->httpversion = 10;
|
||||
conn->httpversion = 10;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if(nc) {
|
||||
data->info.httpcode = k->httpcode;
|
||||
data->info.httpversion = k->httpversion;
|
||||
data->info.httpversion = conn->httpversion;
|
||||
if (!data->state.httpversion ||
|
||||
data->state.httpversion > conn->httpversion)
|
||||
/* store the lowest server version we encounter */
|
||||
data->state.httpversion = conn->httpversion;
|
||||
|
||||
/*
|
||||
* This code executes as part of processing the header. As a
|
||||
@ -1060,14 +1064,14 @@ static CURLcode readwrite_headers(struct SessionHandle *data,
|
||||
}
|
||||
}
|
||||
|
||||
if(k->httpversion == 10) {
|
||||
if(conn->httpversion == 10) {
|
||||
/* Default action for HTTP/1.0 must be to close, unless
|
||||
we get one of those fancy headers that tell us the
|
||||
server keeps it open for us! */
|
||||
infof(data, "HTTP 1.0, assume close after body\n");
|
||||
conn->bits.close = TRUE;
|
||||
}
|
||||
else if(k->httpversion >= 11 &&
|
||||
else if(conn->httpversion >= 11 &&
|
||||
!conn->bits.close) {
|
||||
/* If HTTP version is >= 1.1 and connection is persistent
|
||||
server supports pipelining. */
|
||||
@ -1161,7 +1165,7 @@ static CURLcode readwrite_headers(struct SessionHandle *data,
|
||||
data->info.contenttype = contenttype;
|
||||
}
|
||||
}
|
||||
else if((k->httpversion == 10) &&
|
||||
else if((conn->httpversion == 10) &&
|
||||
conn->bits.httpproxy &&
|
||||
Curl_compareheader(k->p,
|
||||
"Proxy-Connection:", "keep-alive")) {
|
||||
@ -1174,7 +1178,7 @@ static CURLcode readwrite_headers(struct SessionHandle *data,
|
||||
conn->bits.close = FALSE; /* don't close when done */
|
||||
infof(data, "HTTP/1.0 proxy connection set to keep alive!\n");
|
||||
}
|
||||
else if((k->httpversion == 11) &&
|
||||
else if((conn->httpversion == 11) &&
|
||||
conn->bits.httpproxy &&
|
||||
Curl_compareheader(k->p,
|
||||
"Proxy-Connection:", "close")) {
|
||||
@ -1185,7 +1189,7 @@ static CURLcode readwrite_headers(struct SessionHandle *data,
|
||||
conn->bits.close = TRUE; /* close when done */
|
||||
infof(data, "HTTP/1.1 proxy connection set close!\n");
|
||||
}
|
||||
else if((k->httpversion == 10) &&
|
||||
else if((conn->httpversion == 10) &&
|
||||
Curl_compareheader(k->p, "Connection:", "keep-alive")) {
|
||||
/*
|
||||
* A HTTP/1.0 reply with the 'Connection: keep-alive' line
|
||||
@ -1886,6 +1890,7 @@ CURLcode Curl_pretransfer(struct SessionHandle *data)
|
||||
data->set.followlocation=0; /* reset the location-follow counter */
|
||||
data->state.this_is_a_follow = FALSE; /* reset this */
|
||||
data->state.errorbuf = FALSE; /* no error has occurred */
|
||||
data->state.httpversion = 0; /* don't assume any particular server version */
|
||||
|
||||
data->state.authproblem = FALSE;
|
||||
data->state.authhost.want = data->set.httpauth;
|
||||
|
@ -4689,7 +4689,8 @@ CURLcode Curl_done(struct connectdata **connp,
|
||||
* do_init() inits the readwrite session. This is inited each time (in the DO
|
||||
* function before the protocol-specific DO functions are invoked) for a
|
||||
* transfer, sometimes multiple times on the same SessionHandle. Make sure
|
||||
* nothing in here depends on stuff that are setup dynamicly for the transfer.
|
||||
* nothing in here depends on stuff that are setup dynamically for the
|
||||
* transfer.
|
||||
*/
|
||||
|
||||
static CURLcode do_init(struct connectdata *conn)
|
||||
@ -4706,7 +4707,6 @@ static CURLcode do_init(struct connectdata *conn)
|
||||
k->start = Curl_tvnow(); /* start time */
|
||||
k->now = k->start; /* current time is now */
|
||||
k->header = TRUE; /* assume header */
|
||||
k->httpversion = -1; /* unknown at this point */
|
||||
|
||||
k->bytecount = 0;
|
||||
|
||||
|
@ -694,7 +694,7 @@ enum expect100 {
|
||||
/*
|
||||
* Request specific data in the easy handle (SessionHandle). Previously,
|
||||
* these members were on the connectdata struct but since a conn struct may
|
||||
* now be shared between different SessionHandles, we store connection-specifc
|
||||
* now be shared between different SessionHandles, we store connection-specific
|
||||
* data here. This struct only keeps stuff that's interesting for *this*
|
||||
* request, as it will be cleared between multiple ones
|
||||
*/
|
||||
@ -738,7 +738,6 @@ struct SingleRequest {
|
||||
curl_off_t offset; /* possible resume offset read from the
|
||||
Content-Range: header */
|
||||
int httpcode; /* error code from the 'HTTP/1.? XXX' line */
|
||||
int httpversion; /* the HTTP version*10 */
|
||||
struct timeval start100; /* time stamp to wait for the 100 code from */
|
||||
enum expect100 exp100; /* expect 100 continue state */
|
||||
|
||||
@ -929,6 +928,8 @@ struct connectdata {
|
||||
char *proxypasswd; /* proxy password string, allocated */
|
||||
curl_proxytype proxytype; /* what kind of proxy that is in use */
|
||||
|
||||
int httpversion; /* the HTTP version*10 reported by the server */
|
||||
|
||||
struct timeval now; /* "current" time */
|
||||
struct timeval created; /* creation time */
|
||||
curl_socket_t sock[2]; /* two sockets, the second is used for the data
|
||||
@ -1218,6 +1219,8 @@ struct UrlState {
|
||||
/* set after initial USER failure, to prevent an authentication loop */
|
||||
bool ftp_trying_alternative;
|
||||
|
||||
int httpversion; /* the lowest HTTP version*10 reported by any server
|
||||
involved in this request */
|
||||
bool expect100header; /* TRUE if we added Expect: 100-continue */
|
||||
|
||||
bool pipe_broke; /* TRUE if the connection we were pipelined on broke
|
||||
@ -1519,7 +1522,7 @@ struct SessionHandle {
|
||||
struct Names dns;
|
||||
struct Curl_multi *multi; /* if non-NULL, points to the multi handle
|
||||
struct to which this "belongs" */
|
||||
struct Curl_one_easy *multi_pos; /* if non-NULL, points to the its position
|
||||
struct Curl_one_easy *multi_pos; /* if non-NULL, points to its position
|
||||
in multi controlling structure to assist
|
||||
in removal. */
|
||||
struct Curl_share *share; /* Share, handles global variable mutexing */
|
||||
|
@ -3,8 +3,3 @@
|
||||
# test cases are run by runtests.pl. Just add the plain test case numbers, one
|
||||
# per line.
|
||||
# Lines starting with '#' letters are treated as comments.
|
||||
1069
|
||||
1071
|
||||
1072
|
||||
1073
|
||||
1074
|
||||
|
@ -12,7 +12,7 @@ followlocation
|
||||
# Server-side
|
||||
<reply>
|
||||
<data>
|
||||
HTTP/1.0 307 Redirect swsclose
|
||||
HTTP/1.1 307 Redirect swsclose
|
||||
Date: Thu, 29 Jul 2008 14:49:00 GMT
|
||||
Server: test-server/fake
|
||||
Location: data/10530002.txt?coolsite=yes
|
||||
@ -21,24 +21,28 @@ Connection: close
|
||||
|
||||
</data>
|
||||
<data2>
|
||||
HTTP/1.0 200 OK swsclose
|
||||
HTTP/1.1 200 OK swsclose
|
||||
Date: Thu, 09 Nov 2010 14:49:00 GMT
|
||||
Server: test-server/fake
|
||||
Content-Length: 11
|
||||
Connection: close
|
||||
|
||||
blablabla
|
||||
|
||||
</data2>
|
||||
<datacheck>
|
||||
HTTP/1.0 307 Redirect swsclose
|
||||
HTTP/1.1 307 Redirect swsclose
|
||||
Date: Thu, 29 Jul 2008 14:49:00 GMT
|
||||
Server: test-server/fake
|
||||
Location: data/10530002.txt?coolsite=yes
|
||||
Content-Length: 0
|
||||
Connection: close
|
||||
|
||||
HTTP/1.0 200 OK swsclose
|
||||
HTTP/1.1 200 OK swsclose
|
||||
Date: Thu, 09 Nov 2010 14:49:00 GMT
|
||||
Server: test-server/fake
|
||||
Content-Length: 11
|
||||
Connection: close
|
||||
|
||||
blablabla
|
||||
|
||||
|
@ -9,14 +9,6 @@ HTTP/1.0
|
||||
|
||||
# Server-side
|
||||
<reply>
|
||||
<data>
|
||||
HTTP/1.0 500 Impossible swsclose
|
||||
Date: Thu, 09 Nov 2010 14:49:00 GMT
|
||||
Server: test-server/fake
|
||||
|
||||
blablabla
|
||||
|
||||
</data>
|
||||
</reply>
|
||||
|
||||
# Client-side
|
||||
|
@ -22,10 +22,9 @@ WWW-Authenticate: Blackmagic realm="gimme all yer s3cr3ts"
|
||||
WWW-Authenticate: Basic realm="gimme all yer s3cr3ts"
|
||||
WWW-Authenticate: Digest realm="gimme all yer s3cr3ts", nonce="11223344"
|
||||
Content-Type: text/plain
|
||||
Content-Length: 35
|
||||
Content-Length: 0
|
||||
Connection: close
|
||||
|
||||
Try again on this HTTP 1.0 server!
|
||||
</data>
|
||||
</reply>
|
||||
|
||||
@ -71,6 +70,7 @@ This is data we upload with PUT
|
||||
it comes from stdin so MUST be sent
|
||||
with chunked encoding
|
||||
which is impossible in HTTP/1.0
|
||||
|
||||
0
|
||||
|
||||
</protocol>
|
||||
|
@ -64,6 +64,7 @@ This is data we upload with PUT
|
||||
it comes from stdin so MUST be sent
|
||||
with chunked encoding
|
||||
which is impossible in HTTP/1.0
|
||||
|
||||
0
|
||||
|
||||
</protocol>
|
||||
|
Loading…
Reference in New Issue
Block a user