diff --git a/CHANGES b/CHANGES index 99f5ee58e..69ce8d84a 100644 --- a/CHANGES +++ b/CHANGES @@ -7,6 +7,17 @@ Changelog Daniel (29 January 2005) +- Using the multi interface, and doing a requsted a re-used connection that + gets closed just after the request has been sent failed and did not re-issue + a request on a fresh reconnect like the easy interface did. Now it does! + +- Define CURL_MULTIEASY when building libcurl (lib/easy.c to be exact), to use + my new curl_easy_perform() that uses the multi interface to run the + request. It is a great testbed for the multi interface and I believe we + shall do it this way for real in the future when we have a successor to + curl_multi_fdset(). I've used this approach to detect and fix several of the + recent multi-interfaces issues. + - Adjusted the KNOWN_BUGS #17 fix a bit more since the FTP code also did some bad assumptions. diff --git a/lib/multi.c b/lib/multi.c index f569c281a..e4773d06d 100644 --- a/lib/multi.c +++ b/lib/multi.c @@ -506,19 +506,24 @@ CURLMcode curl_multi_perform(CURLM *multi_handle, int *running_handles) Curl_done(&easy->easy_conn, easy->result); } - /* after the transfer is done, go DONE */ else if(TRUE == done) { + char *newurl; + bool retry = Curl_retry_request(easy->easy_conn, &newurl); /* call this even if the readwrite function returned error */ Curl_posttransfer(easy->easy_handle); /* When we follow redirects, must to go back to the CONNECT state */ - if(easy->easy_conn->newurl) { - char *newurl = easy->easy_conn->newurl; - easy->easy_conn->newurl = NULL; + if(easy->easy_conn->newurl || retry) { + if(!retry) { + /* if the URL is a follow-location and not just a retried request + then figure out the URL here */ + newurl = easy->easy_conn->newurl; + easy->easy_conn->newurl = NULL; + } easy->result = Curl_done(&easy->easy_conn, CURLE_OK); if(easy->result == CURLE_OK) - easy->result = Curl_follow(easy->easy_handle, newurl, FALSE); + easy->result = Curl_follow(easy->easy_handle, newurl, retry); if(CURLE_OK == easy->result) { easy->state = CURLM_STATE_CONNECT; result = CURLM_CALL_MULTI_PERFORM; @@ -529,6 +534,7 @@ CURLMcode curl_multi_perform(CURLM *multi_handle, int *running_handles) free(newurl); } else { + /* after the transfer is done, go DONE */ easy->state = CURLM_STATE_DONE; result = CURLM_CALL_MULTI_PERFORM; } diff --git a/lib/transfer.c b/lib/transfer.c index 71338977d..aed230a17 100644 --- a/lib/transfer.c +++ b/lib/transfer.c @@ -2060,7 +2060,32 @@ Curl_connect_host(struct SessionHandle *data, return res; } +/* Returns TRUE and sets '*url' if a request retry is wanted */ +bool Curl_retry_request(struct connectdata *conn, + char **url) +{ + bool retry = FALSE; + if((conn->keep.bytecount+conn->headerbytecount == 0) && + conn->bits.reuse) { + /* We got no data and we attempted to re-use a connection. This might + happen if the connection was left alive when we were done using it + before, but that was closed when we wanted to read from it again. Bad + luck. Retry the same request on a fresh connect! */ + infof(conn->data, "Connection died, retrying a fresh connect\n"); + *url = strdup(conn->data->change.url); + + conn->bits.close = TRUE; /* close this connection */ + conn->bits.retry = TRUE; /* mark this as a connection we're about + to retry. Marking it this way should + prevent i.e HTTP transfers to return + error just because nothing has been + transfered! */ + retry = TRUE; + } + + return retry; +} /* * Curl_perform() is the internal high-level function that gets called by the @@ -2106,31 +2131,12 @@ CURLcode Curl_perform(struct SessionHandle *data) if(res == CURLE_OK && !data->set.source_url) { res = Transfer(conn); /* now fetch that URL please */ if(res == CURLE_OK) { + retry = Curl_retry_request(conn, &newurl); - retry = FALSE; - - if((conn->keep.bytecount+conn->headerbytecount == 0) && - conn->bits.reuse) { - /* We got no data and we attempted to re-use a connection. This - might happen if the connection was left alive when we were done - using it before, but that was closed when we wanted to read - from it again. Bad luck. Retry the same request on a fresh - connect! */ - infof(data, "Connection died, retrying a fresh connect\n"); - newurl = strdup(conn->data->change.url); - - conn->bits.close = TRUE; /* close this connection */ - conn->bits.retry = TRUE; /* mark this as a connection we're about - to retry. Marking it this way should - prevent i.e HTTP transfers to return - error just because nothing has been - transfered! */ - retry = TRUE; - } - else + if(!retry) /* - * We must duplicate the new URL here as the connection data - * may be free()ed in the Curl_done() function. + * We must duplicate the new URL here as the connection data may + * be free()ed in the Curl_done() function. */ newurl = conn->newurl?strdup(conn->newurl):NULL; } diff --git a/lib/transfer.h b/lib/transfer.h index 86301d60a..6607b0915 100644 --- a/lib/transfer.h +++ b/lib/transfer.h @@ -36,7 +36,7 @@ void Curl_single_fdset(struct connectdata *conn, CURLcode Curl_readwrite_init(struct connectdata *conn); CURLcode Curl_readrewind(struct connectdata *conn); CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp); - +bool Curl_retry_request(struct connectdata *conn, char **url); /* This sets up a forthcoming transfer */ CURLcode Curl_Transfer (struct connectdata *data,