2002-09-03 07:52:59 -04:00
|
|
|
/***************************************************************************
|
2004-05-20 16:35:42 -04:00
|
|
|
* _ _ ____ _
|
|
|
|
* Project ___| | | | _ \| |
|
|
|
|
* / __| | | | |_) | |
|
|
|
|
* | (__| |_| | _ <| |___
|
1999-12-29 09:20:26 -05:00
|
|
|
* \___|\___/|_| \_\_____|
|
|
|
|
*
|
2021-01-18 05:56:50 -05:00
|
|
|
* Copyright (C) 1998 - 2021, Daniel Stenberg, <daniel@haxx.se>, et al.
|
1999-12-29 09:20:26 -05:00
|
|
|
*
|
2002-09-03 07:52:59 -04:00
|
|
|
* This software is licensed as described in the file COPYING, which
|
|
|
|
* you should have received as part of this distribution. The terms
|
2020-11-04 08:02:01 -05:00
|
|
|
* are also available at https://curl.se/docs/copyright.html.
|
2004-05-20 16:35:42 -04:00
|
|
|
*
|
2001-01-03 04:29:33 -05:00
|
|
|
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
|
|
|
|
* copies of the Software, and permit persons to whom the Software is
|
2002-09-03 07:52:59 -04:00
|
|
|
* furnished to do so, under the terms of the COPYING file.
|
1999-12-29 09:20:26 -05:00
|
|
|
*
|
2001-01-03 04:29:33 -05:00
|
|
|
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
|
|
|
* KIND, either express or implied.
|
1999-12-29 09:20:26 -05:00
|
|
|
*
|
2002-09-03 07:52:59 -04:00
|
|
|
***************************************************************************/
|
1999-12-29 09:20:26 -05:00
|
|
|
|
2013-01-06 13:06:49 -05:00
|
|
|
#include "curl_setup.h"
|
1999-12-29 09:20:26 -05:00
|
|
|
|
2013-01-03 20:50:28 -05:00
|
|
|
#include "urldata.h"
|
|
|
|
#include "sendf.h"
|
2018-02-10 09:13:15 -05:00
|
|
|
#include "multiif.h"
|
2013-01-03 20:50:28 -05:00
|
|
|
#include "progress.h"
|
2019-07-31 09:30:31 -04:00
|
|
|
#include "timeval.h"
|
2015-03-03 06:36:18 -05:00
|
|
|
#include "curl_printf.h"
|
2002-05-03 08:06:04 -04:00
|
|
|
|
2018-03-15 11:43:00 -04:00
|
|
|
/* check rate limits within this many recent milliseconds, at minimum. */
|
|
|
|
#define MIN_RATE_LIMIT_PERIOD 3000
|
|
|
|
|
2019-02-11 10:38:19 -05:00
|
|
|
#ifndef CURL_DISABLE_PROGRESS_METER
|
2004-03-23 06:43:34 -05:00
|
|
|
/* Provide a string that is 2 + 1 + 2 + 1 + 2 = 8 letters long (plus the zero
|
|
|
|
byte) */
|
2008-10-10 21:56:04 -04:00
|
|
|
static void time2str(char *r, curl_off_t seconds)
|
2000-02-14 18:15:08 -05:00
|
|
|
{
|
2018-06-02 16:52:56 -04:00
|
|
|
curl_off_t h;
|
2008-10-10 21:56:04 -04:00
|
|
|
if(seconds <= 0) {
|
2004-03-23 06:43:34 -05:00
|
|
|
strcpy(r, "--:--:--");
|
|
|
|
return;
|
|
|
|
}
|
2008-10-10 21:56:04 -04:00
|
|
|
h = seconds / CURL_OFF_T_C(3600);
|
|
|
|
if(h <= CURL_OFF_T_C(99)) {
|
2018-06-02 16:52:56 -04:00
|
|
|
curl_off_t m = (seconds - (h*CURL_OFF_T_C(3600))) / CURL_OFF_T_C(60);
|
|
|
|
curl_off_t s = (seconds - (h*CURL_OFF_T_C(3600))) - (m*CURL_OFF_T_C(60));
|
2018-11-22 03:01:24 -05:00
|
|
|
msnprintf(r, 9, "%2" CURL_FORMAT_CURL_OFF_T ":%02" CURL_FORMAT_CURL_OFF_T
|
|
|
|
":%02" CURL_FORMAT_CURL_OFF_T, h, m, s);
|
2004-03-23 06:43:34 -05:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* this equals to more than 99 hours, switch to a more suitable output
|
|
|
|
format to fit within the limits. */
|
2018-06-02 16:52:56 -04:00
|
|
|
curl_off_t d = seconds / CURL_OFF_T_C(86400);
|
2008-10-10 21:56:04 -04:00
|
|
|
h = (seconds - (d*CURL_OFF_T_C(86400))) / CURL_OFF_T_C(3600);
|
|
|
|
if(d <= CURL_OFF_T_C(999))
|
2018-11-22 03:01:24 -05:00
|
|
|
msnprintf(r, 9, "%3" CURL_FORMAT_CURL_OFF_T
|
|
|
|
"d %02" CURL_FORMAT_CURL_OFF_T "h", d, h);
|
2004-03-23 06:43:34 -05:00
|
|
|
else
|
2018-11-22 03:01:24 -05:00
|
|
|
msnprintf(r, 9, "%7" CURL_FORMAT_CURL_OFF_T "d", d);
|
2004-03-23 06:43:34 -05:00
|
|
|
}
|
2000-02-14 18:15:08 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/* The point of this function would be to return a string of the input data,
|
2004-06-24 07:54:11 -04:00
|
|
|
but never longer than 5 columns (+ one zero byte).
|
|
|
|
Add suffix k, M, G when suitable... */
|
2004-03-11 16:48:15 -05:00
|
|
|
static char *max5data(curl_off_t bytes, char *max5)
|
2000-02-14 18:15:08 -05:00
|
|
|
{
|
2008-08-12 14:49:33 -04:00
|
|
|
#define ONE_KILOBYTE CURL_OFF_T_C(1024)
|
|
|
|
#define ONE_MEGABYTE (CURL_OFF_T_C(1024) * ONE_KILOBYTE)
|
|
|
|
#define ONE_GIGABYTE (CURL_OFF_T_C(1024) * ONE_MEGABYTE)
|
|
|
|
#define ONE_TERABYTE (CURL_OFF_T_C(1024) * ONE_GIGABYTE)
|
|
|
|
#define ONE_PETABYTE (CURL_OFF_T_C(1024) * ONE_TERABYTE)
|
2000-04-08 15:28:23 -04:00
|
|
|
|
2008-08-12 14:49:33 -04:00
|
|
|
if(bytes < CURL_OFF_T_C(100000))
|
2018-11-22 03:01:24 -05:00
|
|
|
msnprintf(max5, 6, "%5" CURL_FORMAT_CURL_OFF_T, bytes);
|
2008-08-12 14:49:33 -04:00
|
|
|
|
|
|
|
else if(bytes < CURL_OFF_T_C(10000) * ONE_KILOBYTE)
|
2018-11-22 03:01:24 -05:00
|
|
|
msnprintf(max5, 6, "%4" CURL_FORMAT_CURL_OFF_T "k", bytes/ONE_KILOBYTE);
|
2008-08-12 14:49:33 -04:00
|
|
|
|
|
|
|
else if(bytes < CURL_OFF_T_C(100) * ONE_MEGABYTE)
|
2000-04-08 15:28:23 -04:00
|
|
|
/* 'XX.XM' is good as long as we're less than 100 megs */
|
2018-11-22 03:01:24 -05:00
|
|
|
msnprintf(max5, 6, "%2" CURL_FORMAT_CURL_OFF_T ".%0"
|
|
|
|
CURL_FORMAT_CURL_OFF_T "M", bytes/ONE_MEGABYTE,
|
|
|
|
(bytes%ONE_MEGABYTE) / (ONE_MEGABYTE/CURL_OFF_T_C(10)) );
|
2008-08-12 14:49:33 -04:00
|
|
|
|
2021-03-08 06:30:56 -05:00
|
|
|
#if (SIZEOF_CURL_OFF_T > 4)
|
2008-08-12 14:49:33 -04:00
|
|
|
|
|
|
|
else if(bytes < CURL_OFF_T_C(10000) * ONE_MEGABYTE)
|
2004-05-05 04:43:23 -04:00
|
|
|
/* 'XXXXM' is good until we're at 10000MB or above */
|
2018-11-22 03:01:24 -05:00
|
|
|
msnprintf(max5, 6, "%4" CURL_FORMAT_CURL_OFF_T "M", bytes/ONE_MEGABYTE);
|
2004-05-05 04:43:23 -04:00
|
|
|
|
2008-08-12 14:49:33 -04:00
|
|
|
else if(bytes < CURL_OFF_T_C(100) * ONE_GIGABYTE)
|
2004-05-05 04:43:23 -04:00
|
|
|
/* 10000 MB - 100 GB, we show it as XX.XG */
|
2018-11-22 03:01:24 -05:00
|
|
|
msnprintf(max5, 6, "%2" CURL_FORMAT_CURL_OFF_T ".%0"
|
|
|
|
CURL_FORMAT_CURL_OFF_T "G", bytes/ONE_GIGABYTE,
|
|
|
|
(bytes%ONE_GIGABYTE) / (ONE_GIGABYTE/CURL_OFF_T_C(10)) );
|
2004-05-05 04:43:23 -04:00
|
|
|
|
2008-08-12 14:49:33 -04:00
|
|
|
else if(bytes < CURL_OFF_T_C(10000) * ONE_GIGABYTE)
|
2004-05-05 04:43:23 -04:00
|
|
|
/* up to 10000GB, display without decimal: XXXXG */
|
2018-11-22 03:01:24 -05:00
|
|
|
msnprintf(max5, 6, "%4" CURL_FORMAT_CURL_OFF_T "G", bytes/ONE_GIGABYTE);
|
2004-05-05 04:43:23 -04:00
|
|
|
|
2008-08-12 14:49:33 -04:00
|
|
|
else if(bytes < CURL_OFF_T_C(10000) * ONE_TERABYTE)
|
2004-05-05 04:43:23 -04:00
|
|
|
/* up to 10000TB, display without decimal: XXXXT */
|
2018-11-22 03:01:24 -05:00
|
|
|
msnprintf(max5, 6, "%4" CURL_FORMAT_CURL_OFF_T "T", bytes/ONE_TERABYTE);
|
2008-08-12 14:49:33 -04:00
|
|
|
|
|
|
|
else
|
2004-05-05 04:43:23 -04:00
|
|
|
/* up to 10000PB, display without decimal: XXXXP */
|
2018-11-22 03:01:24 -05:00
|
|
|
msnprintf(max5, 6, "%4" CURL_FORMAT_CURL_OFF_T "P", bytes/ONE_PETABYTE);
|
2004-05-05 04:43:23 -04:00
|
|
|
|
2008-08-12 14:49:33 -04:00
|
|
|
/* 16384 petabytes (16 exabytes) is the maximum a 64 bit unsigned number
|
|
|
|
can hold, but our data type is signed so 8192PB will be the maximum. */
|
2004-05-05 04:43:23 -04:00
|
|
|
|
2004-01-27 07:25:37 -05:00
|
|
|
#else
|
2008-08-12 14:49:33 -04:00
|
|
|
|
2004-03-11 16:48:15 -05:00
|
|
|
else
|
2018-11-22 03:01:24 -05:00
|
|
|
msnprintf(max5, 6, "%4" CURL_FORMAT_CURL_OFF_T "M", bytes/ONE_MEGABYTE);
|
2008-08-12 14:49:33 -04:00
|
|
|
|
2004-01-27 07:25:37 -05:00
|
|
|
#endif
|
|
|
|
|
2000-02-14 18:15:08 -05:00
|
|
|
return max5;
|
|
|
|
}
|
2019-02-11 10:38:19 -05:00
|
|
|
#endif
|
2000-02-14 18:15:08 -05:00
|
|
|
|
2004-05-20 16:35:42 -04:00
|
|
|
/*
|
2000-02-14 18:15:08 -05:00
|
|
|
|
|
|
|
New proposed interface, 9th of February 2000:
|
|
|
|
|
|
|
|
pgrsStartNow() - sets start time
|
|
|
|
pgrsSetDownloadSize(x) - known expected download size
|
|
|
|
pgrsSetUploadSize(x) - known expected upload size
|
|
|
|
pgrsSetDownloadCounter() - amount of data currently downloaded
|
|
|
|
pgrsSetUploadCounter() - amount of data currently uploaded
|
|
|
|
pgrsUpdate() - show progress
|
|
|
|
pgrsDone() - transfer complete
|
|
|
|
|
|
|
|
*/
|
2000-06-16 09:15:36 -04:00
|
|
|
|
2021-01-18 05:56:50 -05:00
|
|
|
int Curl_pgrsDone(struct Curl_easy *data)
|
2000-02-14 18:15:08 -05:00
|
|
|
{
|
2012-06-10 17:39:04 -04:00
|
|
|
int rc;
|
2017-09-09 17:09:06 -04:00
|
|
|
data->progress.lastshow = 0;
|
2021-01-18 05:56:50 -05:00
|
|
|
rc = Curl_pgrsUpdate(data); /* the final (forced) update */
|
2012-06-10 17:39:04 -04:00
|
|
|
if(rc)
|
|
|
|
return rc;
|
2004-11-26 09:33:13 -05:00
|
|
|
|
2011-03-20 21:00:29 -04:00
|
|
|
if(!(data->progress.flags & PGRS_HIDE) &&
|
|
|
|
!data->progress.callback)
|
|
|
|
/* only output if we don't use a progress callback and we're not
|
|
|
|
* hidden */
|
|
|
|
fprintf(data->set.err, "\n");
|
|
|
|
|
2004-11-26 09:33:13 -05:00
|
|
|
data->progress.speeder_c = 0; /* reset the progress meter display */
|
2012-06-10 17:39:04 -04:00
|
|
|
return 0;
|
2000-02-14 18:15:08 -05:00
|
|
|
}
|
|
|
|
|
2017-06-21 13:15:46 -04:00
|
|
|
/* reset the known transfer sizes */
|
|
|
|
void Curl_pgrsResetTransferSizes(struct Curl_easy *data)
|
2002-04-16 03:59:20 -04:00
|
|
|
{
|
2014-08-29 17:48:03 -04:00
|
|
|
Curl_pgrsSetDownloadSize(data, -1);
|
|
|
|
Curl_pgrsSetUploadSize(data, -1);
|
2002-04-16 03:59:20 -04:00
|
|
|
}
|
|
|
|
|
2017-06-26 12:51:05 -04:00
|
|
|
/*
|
2020-08-24 05:07:59 -04:00
|
|
|
*
|
|
|
|
* Curl_pgrsTime(). Store the current time at the given label. This fetches a
|
|
|
|
* fresh "now" and returns it.
|
|
|
|
*
|
2017-06-26 12:51:05 -04:00
|
|
|
* @unittest: 1399
|
|
|
|
*/
|
2020-08-24 05:07:59 -04:00
|
|
|
struct curltime Curl_pgrsTime(struct Curl_easy *data, timerid timer)
|
2000-03-01 16:59:59 -05:00
|
|
|
{
|
2017-10-25 05:59:43 -04:00
|
|
|
struct curltime now = Curl_now();
|
2019-07-31 09:30:31 -04:00
|
|
|
timediff_t *delta = NULL;
|
2011-12-20 09:05:50 -05:00
|
|
|
|
2000-03-01 16:59:59 -05:00
|
|
|
switch(timer) {
|
|
|
|
default:
|
|
|
|
case TIMER_NONE:
|
|
|
|
/* mistake filter */
|
|
|
|
break;
|
2014-05-15 14:43:32 -04:00
|
|
|
case TIMER_STARTOP:
|
|
|
|
/* This is set at the start of a transfer */
|
|
|
|
data->progress.t_startop = now;
|
|
|
|
break;
|
2000-11-06 10:32:16 -05:00
|
|
|
case TIMER_STARTSINGLE:
|
2014-05-15 14:43:32 -04:00
|
|
|
/* This is set at the start of each single fetch */
|
2011-12-20 09:05:50 -05:00
|
|
|
data->progress.t_startsingle = now;
|
2017-06-21 13:15:46 -04:00
|
|
|
data->progress.is_t_startransfer_set = false;
|
2000-11-06 10:32:16 -05:00
|
|
|
break;
|
2011-12-19 08:35:20 -05:00
|
|
|
case TIMER_STARTACCEPT:
|
2017-06-07 07:16:56 -04:00
|
|
|
data->progress.t_acceptdata = now;
|
2011-12-19 08:35:20 -05:00
|
|
|
break;
|
2000-03-01 16:59:59 -05:00
|
|
|
case TIMER_NAMELOOKUP:
|
2017-06-07 07:16:56 -04:00
|
|
|
delta = &data->progress.t_nslookup;
|
2000-03-01 16:59:59 -05:00
|
|
|
break;
|
|
|
|
case TIMER_CONNECT:
|
2017-06-07 07:16:56 -04:00
|
|
|
delta = &data->progress.t_connect;
|
2000-03-01 16:59:59 -05:00
|
|
|
break;
|
2008-07-03 02:56:03 -04:00
|
|
|
case TIMER_APPCONNECT:
|
2017-06-07 07:16:56 -04:00
|
|
|
delta = &data->progress.t_appconnect;
|
2008-07-03 02:56:03 -04:00
|
|
|
break;
|
2000-03-01 16:59:59 -05:00
|
|
|
case TIMER_PRETRANSFER:
|
2017-06-07 07:16:56 -04:00
|
|
|
delta = &data->progress.t_pretransfer;
|
2001-11-20 10:00:50 -05:00
|
|
|
break;
|
|
|
|
case TIMER_STARTTRANSFER:
|
2017-06-07 07:16:56 -04:00
|
|
|
delta = &data->progress.t_starttransfer;
|
2017-06-26 12:51:05 -04:00
|
|
|
/* prevent updating t_starttransfer unless:
|
|
|
|
* 1) this is the first time we're setting t_starttransfer
|
|
|
|
* 2) a redirect has occurred since the last time t_starttransfer was set
|
|
|
|
* This prevents repeated invocations of the function from incorrectly
|
|
|
|
* changing the t_starttransfer time.
|
|
|
|
*/
|
2017-06-21 13:15:46 -04:00
|
|
|
if(data->progress.is_t_startransfer_set) {
|
2020-08-24 05:07:59 -04:00
|
|
|
return now;
|
2017-06-26 12:51:05 -04:00
|
|
|
}
|
|
|
|
else {
|
2017-06-21 13:15:46 -04:00
|
|
|
data->progress.is_t_startransfer_set = true;
|
2017-06-26 12:51:05 -04:00
|
|
|
break;
|
|
|
|
}
|
2000-03-01 16:59:59 -05:00
|
|
|
case TIMER_POSTRANSFER:
|
|
|
|
/* this is the normal end-of-transfer thing */
|
|
|
|
break;
|
2002-04-16 03:59:20 -04:00
|
|
|
case TIMER_REDIRECT:
|
2017-10-23 06:05:49 -04:00
|
|
|
data->progress.t_redirect = Curl_timediff_us(now, data->progress.start);
|
2002-04-16 03:59:20 -04:00
|
|
|
break;
|
2000-03-01 16:59:59 -05:00
|
|
|
}
|
2017-06-07 07:16:56 -04:00
|
|
|
if(delta) {
|
2017-10-23 06:05:49 -04:00
|
|
|
timediff_t us = Curl_timediff_us(now, data->progress.t_startsingle);
|
|
|
|
if(us < 1)
|
|
|
|
us = 1; /* make sure at least one microsecond passed */
|
2017-06-21 13:15:46 -04:00
|
|
|
*delta += us;
|
2017-06-07 07:16:56 -04:00
|
|
|
}
|
2020-08-24 05:07:59 -04:00
|
|
|
return now;
|
2000-03-01 16:59:59 -05:00
|
|
|
}
|
|
|
|
|
2016-06-21 09:47:12 -04:00
|
|
|
void Curl_pgrsStartNow(struct Curl_easy *data)
|
2000-02-14 18:15:08 -05:00
|
|
|
{
|
2001-04-17 11:00:17 -04:00
|
|
|
data->progress.speeder_c = 0; /* reset the progress meter display */
|
2017-10-25 05:59:43 -04:00
|
|
|
data->progress.start = Curl_now();
|
2017-06-21 13:15:46 -04:00
|
|
|
data->progress.is_t_startransfer_set = false;
|
2020-11-07 17:53:24 -05:00
|
|
|
data->progress.ul_limit_start = data->progress.start;
|
|
|
|
data->progress.dl_limit_start = data->progress.start;
|
2021-05-11 03:09:11 -04:00
|
|
|
data->progress.ul_limit_size = 0;
|
|
|
|
data->progress.dl_limit_size = 0;
|
2019-07-29 06:16:43 -04:00
|
|
|
data->progress.downloaded = 0;
|
|
|
|
data->progress.uploaded = 0;
|
2011-09-22 16:34:54 -04:00
|
|
|
/* clear all bits except HIDE and HEADERS_OUT */
|
|
|
|
data->progress.flags &= PGRS_HIDE|PGRS_HEADERS_OUT;
|
2018-03-15 11:43:00 -04:00
|
|
|
Curl_ratelimit(data, data->progress.start);
|
2000-02-14 18:15:08 -05:00
|
|
|
}
|
|
|
|
|
2016-08-16 14:32:02 -04:00
|
|
|
/*
|
2018-03-10 08:07:38 -05:00
|
|
|
* This is used to handle speed limits, calculating how many milliseconds to
|
|
|
|
* wait until we're back under the speed limit, if needed.
|
2016-08-16 14:32:02 -04:00
|
|
|
*
|
|
|
|
* The way it works is by having a "starting point" (time & amount of data
|
2017-03-26 11:02:22 -04:00
|
|
|
* transferred by then) used in the speed computation, to be used instead of
|
|
|
|
* the start of the transfer. This starting point is regularly moved as
|
|
|
|
* transfer goes on, to keep getting accurate values (instead of average over
|
|
|
|
* the entire transfer).
|
2016-08-16 14:32:02 -04:00
|
|
|
*
|
2017-03-26 11:02:22 -04:00
|
|
|
* This function takes the current amount of data transferred, the amount at
|
|
|
|
* the starting point, the limit (in bytes/s), the time of the starting point
|
|
|
|
* and the current time.
|
2016-08-16 14:32:02 -04:00
|
|
|
*
|
2018-03-10 08:07:38 -05:00
|
|
|
* Returns 0 if no waiting is needed or when no waiting is needed but the
|
|
|
|
* starting point should be reset (to current); or the number of milliseconds
|
|
|
|
* to wait to get back under the speed limit.
|
2016-08-16 14:32:02 -04:00
|
|
|
*/
|
2018-03-10 08:07:38 -05:00
|
|
|
timediff_t Curl_pgrsLimitWaitTime(curl_off_t cursize,
|
|
|
|
curl_off_t startsize,
|
|
|
|
curl_off_t limit,
|
|
|
|
struct curltime start,
|
|
|
|
struct curltime now)
|
2016-08-16 14:32:02 -04:00
|
|
|
{
|
2016-11-11 04:19:22 -05:00
|
|
|
curl_off_t size = cursize - startsize;
|
2019-07-31 09:30:31 -04:00
|
|
|
timediff_t minimum;
|
|
|
|
timediff_t actual;
|
2016-08-16 14:32:02 -04:00
|
|
|
|
2018-03-15 11:43:00 -04:00
|
|
|
if(!limit || !size)
|
2018-03-10 08:07:38 -05:00
|
|
|
return 0;
|
|
|
|
|
2018-03-15 11:43:00 -04:00
|
|
|
/*
|
|
|
|
* 'minimum' is the number of milliseconds 'size' should take to download to
|
|
|
|
* stay below 'limit'.
|
|
|
|
*/
|
2018-03-10 08:07:38 -05:00
|
|
|
if(size < CURL_OFF_T_MAX/1000)
|
2020-05-28 18:08:03 -04:00
|
|
|
minimum = (timediff_t) (CURL_OFF_T_C(1000) * size / limit);
|
2018-03-10 08:07:38 -05:00
|
|
|
else {
|
2020-05-28 18:08:03 -04:00
|
|
|
minimum = (timediff_t) (size / limit);
|
2019-07-31 09:30:31 -04:00
|
|
|
if(minimum < TIMEDIFF_T_MAX/1000)
|
2018-03-12 16:26:31 -04:00
|
|
|
minimum *= 1000;
|
2018-03-10 08:07:38 -05:00
|
|
|
else
|
2019-07-31 09:30:31 -04:00
|
|
|
minimum = TIMEDIFF_T_MAX;
|
2018-03-10 08:07:38 -05:00
|
|
|
}
|
2016-08-16 14:32:02 -04:00
|
|
|
|
2018-03-15 11:43:00 -04:00
|
|
|
/*
|
|
|
|
* 'actual' is the time in milliseconds it took to actually download the
|
|
|
|
* last 'size' bytes.
|
|
|
|
*/
|
2017-10-23 06:05:49 -04:00
|
|
|
actual = Curl_timediff(now, start);
|
2018-03-15 11:43:00 -04:00
|
|
|
if(actual < minimum) {
|
|
|
|
/* if it downloaded the data faster than the limit, make it wait the
|
|
|
|
difference */
|
2018-03-10 08:07:38 -05:00
|
|
|
return (minimum - actual);
|
2018-03-15 11:43:00 -04:00
|
|
|
}
|
2017-03-10 08:28:37 -05:00
|
|
|
|
|
|
|
return 0;
|
2016-08-16 14:32:02 -04:00
|
|
|
}
|
|
|
|
|
2018-03-15 11:43:00 -04:00
|
|
|
/*
|
|
|
|
* Set the number of downloaded bytes so far.
|
|
|
|
*/
|
2016-06-21 09:47:12 -04:00
|
|
|
void Curl_pgrsSetDownloadCounter(struct Curl_easy *data, curl_off_t size)
|
2000-02-14 18:15:08 -05:00
|
|
|
{
|
|
|
|
data->progress.downloaded = size;
|
2018-03-15 11:43:00 -04:00
|
|
|
}
|
2016-08-16 14:32:02 -04:00
|
|
|
|
2018-03-15 11:43:00 -04:00
|
|
|
/*
|
|
|
|
* Update the timestamp and sizestamp to use for rate limit calculations.
|
|
|
|
*/
|
|
|
|
void Curl_ratelimit(struct Curl_easy *data, struct curltime now)
|
|
|
|
{
|
|
|
|
/* don't set a new stamp unless the time since last update is long enough */
|
2021-03-26 08:08:44 -04:00
|
|
|
if(data->set.max_recv_speed) {
|
2018-03-15 11:43:00 -04:00
|
|
|
if(Curl_timediff(now, data->progress.dl_limit_start) >=
|
|
|
|
MIN_RATE_LIMIT_PERIOD) {
|
|
|
|
data->progress.dl_limit_start = now;
|
|
|
|
data->progress.dl_limit_size = data->progress.downloaded;
|
|
|
|
}
|
|
|
|
}
|
2021-03-26 08:08:44 -04:00
|
|
|
if(data->set.max_send_speed) {
|
2018-03-15 11:43:00 -04:00
|
|
|
if(Curl_timediff(now, data->progress.ul_limit_start) >=
|
|
|
|
MIN_RATE_LIMIT_PERIOD) {
|
|
|
|
data->progress.ul_limit_start = now;
|
|
|
|
data->progress.ul_limit_size = data->progress.uploaded;
|
|
|
|
}
|
2016-08-16 14:32:02 -04:00
|
|
|
}
|
2000-02-14 18:15:08 -05:00
|
|
|
}
|
|
|
|
|
2018-03-15 11:43:00 -04:00
|
|
|
/*
|
|
|
|
* Set the number of uploaded bytes so far.
|
|
|
|
*/
|
2016-06-21 09:47:12 -04:00
|
|
|
void Curl_pgrsSetUploadCounter(struct Curl_easy *data, curl_off_t size)
|
2000-02-14 18:15:08 -05:00
|
|
|
{
|
|
|
|
data->progress.uploaded = size;
|
|
|
|
}
|
|
|
|
|
2016-06-21 09:47:12 -04:00
|
|
|
void Curl_pgrsSetDownloadSize(struct Curl_easy *data, curl_off_t size)
|
2000-02-14 18:15:08 -05:00
|
|
|
{
|
2014-08-29 17:48:03 -04:00
|
|
|
if(size >= 0) {
|
|
|
|
data->progress.size_dl = size;
|
2000-02-15 19:00:27 -05:00
|
|
|
data->progress.flags |= PGRS_DL_SIZE_KNOWN;
|
2014-08-29 17:48:03 -04:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
data->progress.size_dl = 0;
|
2003-05-14 02:31:00 -04:00
|
|
|
data->progress.flags &= ~PGRS_DL_SIZE_KNOWN;
|
2014-08-29 17:48:03 -04:00
|
|
|
}
|
2000-02-14 18:15:08 -05:00
|
|
|
}
|
|
|
|
|
2016-06-21 09:47:12 -04:00
|
|
|
void Curl_pgrsSetUploadSize(struct Curl_easy *data, curl_off_t size)
|
2000-02-14 18:15:08 -05:00
|
|
|
{
|
2014-08-29 17:48:03 -04:00
|
|
|
if(size >= 0) {
|
|
|
|
data->progress.size_ul = size;
|
2000-02-15 19:00:27 -05:00
|
|
|
data->progress.flags |= PGRS_UL_SIZE_KNOWN;
|
2014-08-29 17:48:03 -04:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
data->progress.size_ul = 0;
|
2003-05-14 02:31:00 -04:00
|
|
|
data->progress.flags &= ~PGRS_UL_SIZE_KNOWN;
|
2014-08-29 17:48:03 -04:00
|
|
|
}
|
2000-02-14 18:15:08 -05:00
|
|
|
}
|
|
|
|
|
2021-05-08 07:10:06 -04:00
|
|
|
/* returns the average speed in bytes / second */
|
|
|
|
static curl_off_t trspeed(curl_off_t size, /* number of bytes */
|
|
|
|
curl_off_t us) /* microseconds */
|
2021-05-08 06:28:38 -04:00
|
|
|
{
|
2021-05-08 07:10:06 -04:00
|
|
|
if(us < 1)
|
|
|
|
return size * 1000000;
|
|
|
|
return (curl_off_t)((long double)size/us * 1000000);
|
2021-05-08 06:28:38 -04:00
|
|
|
}
|
|
|
|
|
2019-07-18 17:23:35 -04:00
|
|
|
/* returns TRUE if it's time to show the progress meter */
|
2021-01-18 05:56:50 -05:00
|
|
|
static bool progress_calc(struct Curl_easy *data, struct curltime now)
|
2000-02-14 18:15:08 -05:00
|
|
|
{
|
2019-07-18 17:23:35 -04:00
|
|
|
bool timetoshow = FALSE;
|
2021-05-08 07:14:42 -04:00
|
|
|
struct Progress * const p = &data->progress;
|
2001-04-18 03:25:11 -04:00
|
|
|
|
2021-05-08 06:28:38 -04:00
|
|
|
/* The time spent so far (from the start) in microseconds */
|
2021-05-08 07:14:42 -04:00
|
|
|
p->timespent = Curl_timediff_us(now, p->start);
|
|
|
|
p->dlspeed = trspeed(p->downloaded, p->timespent);
|
|
|
|
p->ulspeed = trspeed(p->uploaded, p->timespent);
|
2000-02-14 18:15:08 -05:00
|
|
|
|
2007-03-19 08:02:33 -04:00
|
|
|
/* Calculations done at most once a second, unless end is reached */
|
2021-05-08 07:14:42 -04:00
|
|
|
if(p->lastshow != now.tv_sec) {
|
2018-06-02 16:52:56 -04:00
|
|
|
int countindex; /* amount of seconds stored in the speeder array */
|
2021-05-08 07:14:42 -04:00
|
|
|
int nowindex = p->speeder_c% CURR_TIME;
|
|
|
|
p->lastshow = now.tv_sec;
|
2019-07-18 17:23:35 -04:00
|
|
|
timetoshow = TRUE;
|
2007-03-19 08:02:33 -04:00
|
|
|
|
2017-06-09 05:16:18 -04:00
|
|
|
/* Let's do the "current speed" thing, with the dl + ul speeds
|
|
|
|
combined. Store the speed at entry 'nowindex'. */
|
2021-05-08 07:14:42 -04:00
|
|
|
p->speeder[ nowindex ] = p->downloaded + p->uploaded;
|
2007-03-19 08:02:33 -04:00
|
|
|
|
|
|
|
/* remember the exact time for this moment */
|
2021-05-08 07:14:42 -04:00
|
|
|
p->speeder_time [ nowindex ] = now;
|
2007-03-19 08:02:33 -04:00
|
|
|
|
|
|
|
/* advance our speeder_c counter, which is increased every time we get
|
|
|
|
here and we expect it to never wrap as 2^32 is a lot of seconds! */
|
2021-05-08 07:14:42 -04:00
|
|
|
p->speeder_c++;
|
2007-03-19 08:02:33 -04:00
|
|
|
|
|
|
|
/* figure out how many index entries of data we have stored in our speeder
|
|
|
|
array. With N_ENTRIES filled in, we have about N_ENTRIES-1 seconds of
|
|
|
|
transfer. Imagine, after one second we have filled in two entries,
|
|
|
|
after two seconds we've filled in three entries etc. */
|
2021-05-08 07:14:42 -04:00
|
|
|
countindex = ((p->speeder_c >= CURR_TIME)? CURR_TIME:p->speeder_c) - 1;
|
2007-03-19 08:02:33 -04:00
|
|
|
|
|
|
|
/* first of all, we don't do this if there's no counted seconds yet */
|
|
|
|
if(countindex) {
|
2018-06-02 16:52:56 -04:00
|
|
|
int checkindex;
|
2017-10-23 06:05:49 -04:00
|
|
|
timediff_t span_ms;
|
2021-05-08 07:14:42 -04:00
|
|
|
curl_off_t amount;
|
2007-03-19 08:02:33 -04:00
|
|
|
|
|
|
|
/* Get the index position to compare with the 'nowindex' position.
|
|
|
|
Get the oldest entry possible. While we have less than CURR_TIME
|
|
|
|
entries, the first entry will remain the oldest. */
|
2021-05-08 07:14:42 -04:00
|
|
|
checkindex = (p->speeder_c >= CURR_TIME)? p->speeder_c%CURR_TIME:0;
|
2007-03-19 08:02:33 -04:00
|
|
|
|
|
|
|
/* Figure out the exact time for the time span */
|
2021-05-08 07:14:42 -04:00
|
|
|
span_ms = Curl_timediff(now, p->speeder_time[checkindex]);
|
2007-03-19 08:02:33 -04:00
|
|
|
if(0 == span_ms)
|
2017-09-09 17:09:06 -04:00
|
|
|
span_ms = 1; /* at least one millisecond MUST have passed */
|
2007-03-19 08:02:33 -04:00
|
|
|
|
|
|
|
/* Calculate the average speed the last 'span_ms' milliseconds */
|
2021-05-08 07:14:42 -04:00
|
|
|
amount = p->speeder[nowindex]- p->speeder[checkindex];
|
|
|
|
|
|
|
|
if(amount > CURL_OFF_T_C(4294967) /* 0xffffffff/1000 */)
|
|
|
|
/* the 'amount' value is bigger than would fit in 32 bits if
|
|
|
|
multiplied with 1000, so we use the double math for this */
|
|
|
|
p->current_speed = (curl_off_t)
|
|
|
|
((double)amount/((double)span_ms/1000.0));
|
|
|
|
else
|
|
|
|
/* the 'amount' value is small enough to fit within 32 bits even
|
|
|
|
when multiplied with 1000 */
|
|
|
|
p->current_speed = amount*CURL_OFF_T_C(1000)/span_ms;
|
2007-03-19 08:02:33 -04:00
|
|
|
}
|
|
|
|
else
|
2017-06-09 05:16:18 -04:00
|
|
|
/* the first second we use the average */
|
2021-05-08 07:14:42 -04:00
|
|
|
p->current_speed = p->ulspeed + p->dlspeed;
|
2007-03-19 08:02:33 -04:00
|
|
|
|
|
|
|
} /* Calculations end */
|
2019-07-18 17:23:35 -04:00
|
|
|
return timetoshow;
|
2019-02-11 10:38:19 -05:00
|
|
|
}
|
2019-05-22 17:15:34 -04:00
|
|
|
|
2019-02-11 10:38:19 -05:00
|
|
|
#ifndef CURL_DISABLE_PROGRESS_METER
|
2021-01-18 05:56:50 -05:00
|
|
|
static void progress_meter(struct Curl_easy *data)
|
2019-02-11 10:38:19 -05:00
|
|
|
{
|
2019-07-18 17:23:35 -04:00
|
|
|
char max5[6][10];
|
|
|
|
curl_off_t dlpercen = 0;
|
|
|
|
curl_off_t ulpercen = 0;
|
|
|
|
curl_off_t total_percen = 0;
|
|
|
|
curl_off_t total_transfer;
|
|
|
|
curl_off_t total_expected_transfer;
|
|
|
|
char time_left[10];
|
|
|
|
char time_total[10];
|
|
|
|
char time_spent[10];
|
|
|
|
curl_off_t ulestimate = 0;
|
|
|
|
curl_off_t dlestimate = 0;
|
|
|
|
curl_off_t total_estimate;
|
|
|
|
curl_off_t timespent =
|
|
|
|
(curl_off_t)data->progress.timespent/1000000; /* seconds */
|
|
|
|
|
|
|
|
if(!(data->progress.flags & PGRS_HEADERS_OUT)) {
|
|
|
|
if(data->state.resume_from) {
|
2007-03-19 08:02:33 -04:00
|
|
|
fprintf(data->set.err,
|
2019-07-18 17:23:35 -04:00
|
|
|
"** Resuming transfer from byte position %"
|
|
|
|
CURL_FORMAT_CURL_OFF_T "\n", data->state.resume_from);
|
2007-03-19 08:02:33 -04:00
|
|
|
}
|
2019-07-18 17:23:35 -04:00
|
|
|
fprintf(data->set.err,
|
|
|
|
" %% Total %% Received %% Xferd Average Speed "
|
|
|
|
"Time Time Time Current\n"
|
|
|
|
" Dload Upload "
|
|
|
|
"Total Spent Left Speed\n");
|
|
|
|
data->progress.flags |= PGRS_HEADERS_OUT; /* headers are shown */
|
|
|
|
}
|
2007-03-19 08:02:33 -04:00
|
|
|
|
2019-07-18 17:23:35 -04:00
|
|
|
/* Figure out the estimated time of arrival for the upload */
|
|
|
|
if((data->progress.flags & PGRS_UL_SIZE_KNOWN) &&
|
|
|
|
(data->progress.ulspeed > CURL_OFF_T_C(0))) {
|
|
|
|
ulestimate = data->progress.size_ul / data->progress.ulspeed;
|
|
|
|
|
|
|
|
if(data->progress.size_ul > CURL_OFF_T_C(10000))
|
|
|
|
ulpercen = data->progress.uploaded /
|
|
|
|
(data->progress.size_ul/CURL_OFF_T_C(100));
|
|
|
|
else if(data->progress.size_ul > CURL_OFF_T_C(0))
|
|
|
|
ulpercen = (data->progress.uploaded*100) /
|
|
|
|
data->progress.size_ul;
|
|
|
|
}
|
2007-03-19 08:02:33 -04:00
|
|
|
|
2019-07-18 17:23:35 -04:00
|
|
|
/* ... and the download */
|
|
|
|
if((data->progress.flags & PGRS_DL_SIZE_KNOWN) &&
|
|
|
|
(data->progress.dlspeed > CURL_OFF_T_C(0))) {
|
|
|
|
dlestimate = data->progress.size_dl / data->progress.dlspeed;
|
|
|
|
|
|
|
|
if(data->progress.size_dl > CURL_OFF_T_C(10000))
|
|
|
|
dlpercen = data->progress.downloaded /
|
|
|
|
(data->progress.size_dl/CURL_OFF_T_C(100));
|
|
|
|
else if(data->progress.size_dl > CURL_OFF_T_C(0))
|
|
|
|
dlpercen = (data->progress.downloaded*100) /
|
|
|
|
data->progress.size_dl;
|
|
|
|
}
|
2007-03-19 08:02:33 -04:00
|
|
|
|
2019-07-18 17:23:35 -04:00
|
|
|
/* Now figure out which of them is slower and use that one for the
|
|
|
|
total estimate! */
|
|
|
|
total_estimate = ulestimate>dlestimate?ulestimate:dlestimate;
|
|
|
|
|
|
|
|
/* create the three time strings */
|
|
|
|
time2str(time_left, total_estimate > 0?(total_estimate - timespent):0);
|
|
|
|
time2str(time_total, total_estimate);
|
|
|
|
time2str(time_spent, timespent);
|
|
|
|
|
|
|
|
/* Get the total amount of data expected to get transferred */
|
|
|
|
total_expected_transfer =
|
|
|
|
((data->progress.flags & PGRS_UL_SIZE_KNOWN)?
|
|
|
|
data->progress.size_ul:data->progress.uploaded)+
|
|
|
|
((data->progress.flags & PGRS_DL_SIZE_KNOWN)?
|
|
|
|
data->progress.size_dl:data->progress.downloaded);
|
|
|
|
|
|
|
|
/* We have transferred this much so far */
|
|
|
|
total_transfer = data->progress.downloaded + data->progress.uploaded;
|
|
|
|
|
|
|
|
/* Get the percentage of data transferred so far */
|
|
|
|
if(total_expected_transfer > CURL_OFF_T_C(10000))
|
|
|
|
total_percen = total_transfer /
|
|
|
|
(total_expected_transfer/CURL_OFF_T_C(100));
|
|
|
|
else if(total_expected_transfer > CURL_OFF_T_C(0))
|
|
|
|
total_percen = (total_transfer*100) / total_expected_transfer;
|
|
|
|
|
|
|
|
fprintf(data->set.err,
|
|
|
|
"\r"
|
|
|
|
"%3" CURL_FORMAT_CURL_OFF_T " %s "
|
|
|
|
"%3" CURL_FORMAT_CURL_OFF_T " %s "
|
|
|
|
"%3" CURL_FORMAT_CURL_OFF_T " %s %s %s %s %s %s %s",
|
|
|
|
total_percen, /* 3 letters */ /* total % */
|
|
|
|
max5data(total_expected_transfer, max5[2]), /* total size */
|
|
|
|
dlpercen, /* 3 letters */ /* rcvd % */
|
|
|
|
max5data(data->progress.downloaded, max5[0]), /* rcvd size */
|
|
|
|
ulpercen, /* 3 letters */ /* xfer % */
|
|
|
|
max5data(data->progress.uploaded, max5[1]), /* xfer size */
|
|
|
|
max5data(data->progress.dlspeed, max5[3]), /* avrg dl speed */
|
|
|
|
max5data(data->progress.ulspeed, max5[4]), /* avrg ul speed */
|
|
|
|
time_total, /* 8 letters */ /* total time */
|
|
|
|
time_spent, /* 8 letters */ /* time spent */
|
|
|
|
time_left, /* 8 letters */ /* time left */
|
|
|
|
max5data(data->progress.current_speed, max5[5])
|
|
|
|
);
|
|
|
|
|
|
|
|
/* we flush the output stream to make it appear as soon as possible */
|
|
|
|
fflush(data->set.err);
|
2019-02-11 10:38:19 -05:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
/* progress bar disabled */
|
2019-07-18 17:23:35 -04:00
|
|
|
#define progress_meter(x) Curl_nop_stmt
|
2019-02-11 10:38:19 -05:00
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Curl_pgrsUpdate() returns 0 for success or the value returned by the
|
|
|
|
* progress callback!
|
|
|
|
*/
|
2021-01-18 05:56:50 -05:00
|
|
|
int Curl_pgrsUpdate(struct Curl_easy *data)
|
2019-02-11 10:38:19 -05:00
|
|
|
{
|
|
|
|
struct curltime now = Curl_now(); /* what time is it */
|
2021-01-18 05:56:50 -05:00
|
|
|
bool showprogress = progress_calc(data, now);
|
2019-02-11 10:38:19 -05:00
|
|
|
if(!(data->progress.flags & PGRS_HIDE)) {
|
|
|
|
if(data->set.fxferinfo) {
|
|
|
|
int result;
|
|
|
|
/* There's a callback set, call that */
|
|
|
|
Curl_set_in_callback(data, true);
|
|
|
|
result = data->set.fxferinfo(data->set.progress_client,
|
|
|
|
data->progress.size_dl,
|
|
|
|
data->progress.downloaded,
|
|
|
|
data->progress.size_ul,
|
|
|
|
data->progress.uploaded);
|
|
|
|
Curl_set_in_callback(data, false);
|
2019-11-26 03:13:11 -05:00
|
|
|
if(result != CURL_PROGRESSFUNC_CONTINUE) {
|
|
|
|
if(result)
|
|
|
|
failf(data, "Callback aborted");
|
|
|
|
return result;
|
|
|
|
}
|
2019-02-11 10:38:19 -05:00
|
|
|
}
|
2019-11-26 03:13:11 -05:00
|
|
|
else if(data->set.fprogress) {
|
2019-02-11 10:38:19 -05:00
|
|
|
int result;
|
|
|
|
/* The older deprecated callback is set, call that */
|
|
|
|
Curl_set_in_callback(data, true);
|
|
|
|
result = data->set.fprogress(data->set.progress_client,
|
|
|
|
(double)data->progress.size_dl,
|
|
|
|
(double)data->progress.downloaded,
|
|
|
|
(double)data->progress.size_ul,
|
|
|
|
(double)data->progress.uploaded);
|
|
|
|
Curl_set_in_callback(data, false);
|
2019-11-26 03:13:11 -05:00
|
|
|
if(result != CURL_PROGRESSFUNC_CONTINUE) {
|
|
|
|
if(result)
|
|
|
|
failf(data, "Callback aborted");
|
|
|
|
return result;
|
|
|
|
}
|
2019-02-11 10:38:19 -05:00
|
|
|
}
|
2019-07-18 17:23:35 -04:00
|
|
|
|
|
|
|
if(showprogress)
|
2021-01-18 05:56:50 -05:00
|
|
|
progress_meter(data);
|
2019-02-11 10:38:19 -05:00
|
|
|
}
|
2001-08-21 02:29:56 -04:00
|
|
|
|
2000-09-25 17:49:37 -04:00
|
|
|
return 0;
|
2000-02-14 18:15:08 -05:00
|
|
|
}
|