Peter Silva introduced CURLOPT_MAX_SEND_SPEED_LARGE and

CURLOPT_MAX_RECV_SPEED_LARGE that limit tha maximum rate libcurl is allowed
to send or receive data. This kind of adds the the command line tool's
option --limit-rate to the library.

The rate limiting logic in the curl app is now removed and is instead
provided by libcurl itself. Transfer rate limiting will now also work for -d
and -F, which it didn't before.
This commit is contained in:
Daniel Stenberg 2006-06-22 21:36:53 +00:00
parent 3e5dcc8bcd
commit dfe1884c25
10 changed files with 112 additions and 124 deletions

10
CHANGES
View File

@ -6,6 +6,16 @@
Changelog
Daniel (20 June 2006)
- Peter Silva introduced CURLOPT_MAX_SEND_SPEED_LARGE and
CURLOPT_MAX_RECV_SPEED_LARGE that limit tha maximum rate libcurl is allowed
to send or receive data. This kind of adds the the command line tool's
option --limit-rate to the library.
The rate limiting logic in the curl app is now removed and is instead
provided by libcurl itself. Transfer rate limiting will now also work for -d
and -F, which it didn't before.
Daniel (19 June 2006)
- Made -K on a file that couldn't be read cause a warning to be displayed.

View File

@ -11,6 +11,7 @@ Curl and libcurl 7.15.5
This release includes the following changes:
o added CURLOPT_MAX_SEND_SPEED_LARGE and CURLOPT_MAX_RECV_SPEED_LARGE
o configure --enable-hidden-symbols
This release includes the following bugfixes:
@ -19,6 +20,7 @@ This release includes the following bugfixes:
Other curl-related news:
o cURLpp 0.6.0 was released: http://rrette.com/curlpp.html
o pycurl-7.15.4 was released: http://pycurl.sf.net
New curl mirrors:
@ -28,6 +30,6 @@ New curl mirrors:
This release would not have looked like this without help, code, reports and
advice from friends like these:
Dan Fandrich
Dan Fandrich, Peter Silva
Thanks! (and sorry if I forgot to mention someone)

View File

@ -103,10 +103,6 @@ may have been fixed since this was written!
http://curl.haxx.se/bug/view.cgi?id=1004841. How?
http://curl.haxx.se/mail/lib-2004-08/0182.html
9. --limit-rate using -d or -F does not work. This is because the limit logic
is provided by the curl app in its read/write callbacks, and when doing
-d/-F the callbacks aren't used! http://curl.haxx.se/bug/view.cgi?id=921395
8. Doing resumed upload over HTTP does not work with '-C -', because curl
doesn't do a HEAD first to get the initial size. This needs to be done
manually for HTTP PUT resume to work, and then '-C [index]'.

View File

@ -1051,6 +1051,14 @@ for the library to consider it too slow and abort.
Pass a long as parameter. It contains the time in seconds that the transfer
should be below the \fICURLOPT_LOW_SPEED_LIMIT\fP for the library to consider
it too slow and abort.
.IP CURLOPT_MAX_SEND_SPEED_LARGE
Pass a curl_off_t as parameter. If an upload exceeds this speed on cumulative
average during the transfer, the transfer will pause to keep the average rate
less than or equal to the parameter value. (default: 0, unlimited)
.IP CURLOPT_MAX_RECV_SPEED_LARGE
Pass a curl_off_t as parameter. If an upload exceeds this speed on cumulative
average during the transfer, the transfer will pause to keep the average rate
less than or equal to the parameter value. (default: 0, unlimited)
.IP CURLOPT_MAXCONNECTS
Pass a long. The set number will be the persistent connection cache size. The
set amount will be the maximum amount of simultaneously open connections that

View File

@ -973,6 +973,11 @@ typedef enum {
Note that this is used only for SSL certificate processing */
CINIT(CONV_FROM_UTF8_FUNCTION, FUNCTIONPOINT, 144),
/* if the connection proceeds too quickly then need to slow it down */
/* limit-rate: maximum number of bytes per second to send or receive */
CINIT(MAX_SEND_SPEED_LARGE, OFF_T, 145),
CINIT(MAX_RECV_SPEED_LARGE, OFF_T, 146),
CURLOPT_LASTENTRY /* the last unused */
} CURLoption;

View File

@ -68,6 +68,7 @@ typedef enum {
CURLM_STATE_DOING, /* sending off the request (part 1) */
CURLM_STATE_DO_MORE, /* send off the request (part 2) */
CURLM_STATE_PERFORM, /* transfer data */
CURLM_STATE_TOOFAST, /* wait because limit-rate exceeded */
CURLM_STATE_DONE, /* post data transfer operation */
CURLM_STATE_COMPLETED, /* operation complete */
@ -156,6 +157,7 @@ static void multistate(struct Curl_one_easy *easy, CURLMstate state)
"DOING",
"DO_MORE",
"PERFORM",
"TOOFAST",
"DONE",
"COMPLETED",
};
@ -440,6 +442,7 @@ static int multi_getsock(struct Curl_one_easy *easy,
int numsocks)
{
switch(easy->state) {
case CURLM_STATE_TOOFAST: /* returns 0, so will not select. */
default:
return 0;
@ -771,7 +774,37 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
}
break;
case CURLM_STATE_TOOFAST: /* limit-rate exceeded in either direction */
/* if both rates are within spec, resume transfer */
Curl_pgrsUpdate(easy->easy_conn);
if ( ( ( easy->easy_handle->set.max_send_speed == 0 ) ||
( easy->easy_handle->progress.ulspeed <
easy->easy_handle->set.max_send_speed ) ) &&
( ( easy->easy_handle->set.max_recv_speed == 0 ) ||
( easy->easy_handle->progress.dlspeed <
easy->easy_handle->set.max_recv_speed ) )
)
multistate(easy, CURLM_STATE_PERFORM);
break;
case CURLM_STATE_PERFORM:
/* check if over speed */
if ( ( ( easy->easy_handle->set.max_send_speed > 0 ) &&
( easy->easy_handle->progress.ulspeed >
easy->easy_handle->set.max_send_speed ) ) ||
( ( easy->easy_handle->set.max_recv_speed > 0 ) &&
( easy->easy_handle->progress.dlspeed >
easy->easy_handle->set.max_recv_speed ) )
) {
/* Transfer is over the speed limit. Change state. TODO: Call
* Curl_expire() with the time left until we're targeted to be below
* the speed limit again. */
multistate(easy, CURLM_STATE_TOOFAST );
break;
}
/* read/write data if it is ready to do so */
easy->result = Curl_readwrite(easy->easy_conn, &done);
@ -825,6 +858,7 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
}
}
break;
case CURLM_STATE_DONE:
/* post-transfer command */
easy->result = Curl_done(&easy->easy_conn, CURLE_OK);

View File

@ -1628,15 +1628,31 @@ Transfer(struct connectdata *conn)
interval_ms = 1 * 1000;
if(k->keepon & KEEP_READ)
fd_read = conn->sockfd;
else
fd_read = CURL_SOCKET_BAD;
if(k->keepon & KEEP_WRITE)
fd_write = conn->writesockfd;
else
/* limit-rate logic: if speed exceeds threshold, then do not include fd in
select set */
if ( (conn->data->set.max_send_speed > 0) &&
(conn->data->progress.ulspeed > conn->data->set.max_send_speed) ) {
fd_write = CURL_SOCKET_BAD;
Curl_pgrsUpdate(conn);
}
else {
if(k->keepon & KEEP_WRITE)
fd_write = conn->writesockfd;
else
fd_write = CURL_SOCKET_BAD;
}
if ( (conn->data->set.max_recv_speed > 0) &&
(conn->data->progress.dlspeed > conn->data->set.max_recv_speed) ) {
fd_read = CURL_SOCKET_BAD;
Curl_pgrsUpdate(conn);
}
else {
if(k->keepon & KEEP_READ)
fd_read = conn->sockfd;
else
fd_read = CURL_SOCKET_BAD;
}
switch (Curl_select(fd_read, fd_write, interval_ms)) {
case -1: /* select() error, stop reading */
@ -1651,6 +1667,7 @@ Transfer(struct connectdata *conn)
continue;
case 0: /* timeout */
default: /* readable descriptors */
result = Curl_readwrite(conn, &done);
break;
}

View File

@ -1039,6 +1039,22 @@ CURLcode Curl_setopt(struct SessionHandle *data, CURLoption option,
*/
data->set.low_speed_limit=va_arg(param, long);
break;
case CURLOPT_MAX_SEND_SPEED_LARGE:
/*
* The max speed limit that sends transfer more than
* CURLOPT_MAX_SEND_PER_SECOND bytes per second the transfer is
* throttled..
*/
data->set.max_send_speed=va_arg(param, curl_off_t);
break;
case CURLOPT_MAX_RECV_SPEED_LARGE:
/*
* The max speed limit that sends transfer more than
* CURLOPT_MAX_RECV_PER_SECOND bytes per second the transfer is
* throttled..
*/
data->set.max_recv_speed=va_arg(param, curl_off_t);
break;
case CURLOPT_LOW_SPEED_TIME:
/*
* The low speed time that if transfers are below the set

View File

@ -1039,6 +1039,8 @@ struct UserDefined {
curl_off_t infilesize; /* size of file to upload, -1 means unknown */
long low_speed_limit; /* bytes/second */
long low_speed_time; /* number of seconds */
curl_off_t max_send_speed; /* high speed limit in bytes/second for upload */
curl_off_t max_recv_speed; /* high speed limit in bytes/second for download */
curl_off_t set_resume_from; /* continue [ftp] transfer from here */
char *cookie; /* HTTP cookie string to send */
struct curl_slist *headers; /* linked list of extra headers */

View File

@ -2660,9 +2660,7 @@ static size_t my_fwrite(void *buffer, size_t sz, size_t nmemb, void *stream)
size_t rc;
struct OutStruct *out=(struct OutStruct *)stream;
struct Configurable *config = out->config;
curl_off_t size = (curl_off_t)(sz * nmemb); /* typecast to prevent
warnings when converting from
unsigned to signed */
if(out && !out->stream) {
/* open file for writing */
out->stream=fopen(out->filename, "wb");
@ -2679,55 +2677,6 @@ static size_t my_fwrite(void *buffer, size_t sz, size_t nmemb, void *stream)
}
}
if(config->recvpersecond) {
/*
* We know when we received data the previous time. We know how much data
* we get now. Make sure that this is not faster than we are told to run.
* If we're faster, sleep a while *before* doing the fwrite() here.
*/
struct timeval now;
long timediff;
long sleep_time;
static curl_off_t addit = 0;
now = curlx_tvnow();
timediff = curlx_tvdiff(now, config->lastrecvtime); /* milliseconds */
if((config->recvpersecond > CURL_MAX_WRITE_SIZE) && (timediff < 100) ) {
/* If we allow a rather speedy transfer, add this amount for later
* checking. Also, do not modify the lastrecvtime as we will use a
* longer scope due to this addition. We wait for at least 100 ms to
* pass to get better values to do better math for the sleep. */
addit += size;
}
else {
size += addit; /* add up the possibly added bonus rounds from the
zero timediff calls */
addit = 0; /* clear the addition pool */
if( size*1000 > config->recvpersecond*timediff) {
/* figure out how many milliseconds to rest */
sleep_time = (long)(size*1000/config->recvpersecond - timediff);
/*
* Make sure we don't sleep for so long that we trigger the speed
* limit. This won't limit the bandwidth quite the way we've been
* asked to, but at least the transfer has a chance.
*/
if (config->low_speed_time > 0)
sleep_time = MIN(sleep_time,(config->low_speed_time * 1000) / 2);
if(sleep_time > 0) {
go_sleep(sleep_time);
now = curlx_tvnow();
}
}
config->lastrecvtime = now;
}
}
rc = fwrite(buffer, sz, nmemb, out->stream);
if((sz * nmemb) == rc) {
@ -2772,62 +2721,6 @@ static size_t my_fread(void *buffer, size_t sz, size_t nmemb, void *userp)
{
size_t rc;
struct InStruct *in=(struct InStruct *)userp;
struct Configurable *config = in->config;
curl_off_t size = (curl_off_t)(sz * nmemb); /* typecast to prevent warnings
when converting from
unsigned to signed */
if(config->sendpersecond) {
/*
* We know when we sent data the previous time. We know how much data
* we sent. Make sure that this was not faster than we are told to run.
* If we're faster, sleep a while *before* doing the fread() here.
* Also, make no larger fread() than should be sent this second!
*/
struct timeval now;
long timediff;
long sleep_time;
static curl_off_t addit = 0;
now = curlx_tvnow();
timediff = curlx_tvdiff(now, config->lastsendtime); /* milliseconds */
if((config->sendpersecond > CURL_MAX_WRITE_SIZE) &&
(timediff < 100)) {
/*
* We allow very fast transfers, then allow at least 100 ms between
* each sleeping mile-stone to create more accurate long-term rates.
*/
addit += size;
}
else {
/* If 'addit' is non-zero, it contains the total amount of bytes
uploaded during the last 'timediff' milliseconds. If it is zero,
we use the stored previous size. */
curl_off_t xfered = addit?addit:(curl_off_t)config->lastsendsize;
addit = 0; /* clear it for the next round */
if( xfered*1000 > config->sendpersecond*timediff) {
/* figure out how many milliseconds to rest */
sleep_time = (long)(xfered*1000/config->sendpersecond - timediff);
if(sleep_time > 0) {
go_sleep (sleep_time);
now = curlx_tvnow();
}
}
config->lastsendtime = now;
if(size > config->sendpersecond) {
/* lower the size to actually read */
nmemb = (size_t)config->sendpersecond;
sz = 1;
}
}
config->lastsendsize = sz*nmemb;
}
rc = fread(buffer, sz, nmemb, in->stream);
#if 0
@ -3890,11 +3783,10 @@ operate(struct Configurable *config, int argc, char *argv[])
curl_easy_setopt(curl, CURLOPT_IOCTLDATA, &input);
curl_easy_setopt(curl, CURLOPT_IOCTLFUNCTION, my_ioctl);
if(config->recvpersecond) {
if(config->recvpersecond)
/* tell libcurl to use a smaller sized buffer as it allows us to
make better sleeps! 7.9.9 stuff! */
curl_easy_setopt(curl, CURLOPT_BUFFERSIZE, config->recvpersecond);
}
/* size of uploaded file: */
curl_easy_setopt(curl, CURLOPT_INFILESIZE_LARGE, uploadfilesize);
@ -3944,8 +3836,14 @@ operate(struct Configurable *config, int argc, char *argv[])
config->conf&CONF_AUTO_REFERER);
curl_easy_setopt(curl, CURLOPT_USERAGENT, config->useragent);
curl_easy_setopt(curl, CURLOPT_FTPPORT, config->ftpport);
curl_easy_setopt(curl, CURLOPT_LOW_SPEED_LIMIT, config->low_speed_limit);
curl_easy_setopt(curl, CURLOPT_LOW_SPEED_LIMIT,
config->low_speed_limit);
curl_easy_setopt(curl, CURLOPT_LOW_SPEED_TIME, config->low_speed_time);
curl_easy_setopt(curl, CURLOPT_MAX_SEND_SPEED_LARGE,
config->sendpersecond);
curl_easy_setopt(curl, CURLOPT_MAX_RECV_SPEED_LARGE,
config->recvpersecond);
curl_easy_setopt(curl, CURLOPT_RESUME_FROM_LARGE,
config->use_resume?config->resume_from:0);
curl_easy_setopt(curl, CURLOPT_COOKIE, config->cookie);