curl/tests/libtest/lib1900.c

257 lines
6.2 KiB
C
Raw Normal View History

Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 2013, Linus Nielsen Feltzing, <linus@haxx.se>
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
#include "test.h"
#include "testutil.h"
#include "warnless.h"
#include "memdebug.h"
#define TEST_HANG_TIMEOUT 60 * 1000
#define MAX_URLS 200
#define MAX_BLACKLIST 20
int urltime[MAX_URLS];
char *urlstring[MAX_URLS];
CURL *handles[MAX_URLS];
char *site_blacklist[MAX_BLACKLIST];
char *server_blacklist[MAX_BLACKLIST];
int num_handles;
int blacklist_num_servers;
int blacklist_num_sites;
int parse_url_file(const char *filename);
void free_urls(void);
int create_handles(void);
void setup_handle(char *base_url, CURLM *m, int handlenum);
void remove_handles(void);
static size_t
write_callback(void *contents, size_t size, size_t nmemb, void *userp)
{
size_t realsize = size * nmemb;
(void)contents;
(void)userp;
return realsize;
}
int parse_url_file(const char *filename)
{
FILE *f;
int time;
char buf[200];
num_handles = 0;
blacklist_num_sites = 0;
blacklist_num_servers = 0;
f = fopen(filename, "rb");
if(!f)
return 0;
while(!feof(f)) {
if(fscanf(f, "%d %s\n", &time, buf)) {
urltime[num_handles] = time;
urlstring[num_handles] = strdup(buf);
num_handles++;
continue;
}
if(fscanf(f, "blacklist_site %s\n", buf)) {
site_blacklist[blacklist_num_sites] = strdup(buf);
blacklist_num_sites++;
continue;
}
break;
}
fclose(f);
site_blacklist[blacklist_num_sites] = NULL;
server_blacklist[blacklist_num_servers] = NULL;
return num_handles;
}
void free_urls(void)
{
int i;
for(i = 0;i < num_handles;i++) {
free(urlstring[i]);
}
for(i = 0;i < blacklist_num_servers;i++) {
free(server_blacklist[i]);
}
for(i = 0;i < blacklist_num_sites;i++) {
free(site_blacklist[i]);
}
}
int create_handles(void)
{
int i;
for(i = 0;i < num_handles;i++) {
handles[i] = curl_easy_init();
}
return 0;
}
void setup_handle(char *base_url, CURLM *m, int handlenum)
{
char urlbuf[256];
sprintf(urlbuf, "%s%s", base_url, urlstring[handlenum]);
curl_easy_setopt(handles[handlenum], CURLOPT_URL, urlbuf);
curl_easy_setopt(handles[handlenum], CURLOPT_VERBOSE, 1L);
curl_easy_setopt(handles[handlenum], CURLOPT_FAILONERROR, 1L);
curl_easy_setopt(handles[handlenum], CURLOPT_WRITEFUNCTION, write_callback);
curl_easy_setopt(handles[handlenum], CURLOPT_WRITEDATA, NULL);
curl_multi_add_handle(m, handles[handlenum]);
}
void remove_handles(void)
{
int i;
for(i = 0;i < num_handles;i++) {
if(handles[i])
curl_easy_cleanup(handles[i]);
}
}
int test(char *URL)
{
int res = 0;
CURLM *m = NULL;
CURLMsg *msg; /* for picking up messages with the transfer status */
int msgs_left; /* how many messages are left */
int running;
int handlenum = 0;
struct timeval last_handle_add;
if(parse_url_file("log/urls.txt") <= 0)
goto test_cleanup;
start_test_timing();
curl_global_init(CURL_GLOBAL_ALL);
m = curl_multi_init();
create_handles();
multi_setopt(m, CURLMOPT_PIPELINING, 1L);
multi_setopt(m, CURLMOPT_MAX_HOST_CONNECTIONS, 2L);
multi_setopt(m, CURLMOPT_MAX_PIPELINE_LENGTH, 3L);
multi_setopt(m, CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE, 15000L);
multi_setopt(m, CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE, 10000L);
multi_setopt(m, CURLMOPT_PIPELINING_SITE_BL, site_blacklist);
multi_setopt(m, CURLMOPT_PIPELINING_SERVER_BL, server_blacklist);
gettimeofday(&last_handle_add, NULL);
for(;;) {
struct timeval interval;
struct timeval now;
long int msnow, mslast;
fd_set rd, wr, exc;
int maxfd = -99;
long timeout;
interval.tv_sec = 1;
interval.tv_usec = 0;
if(handlenum < num_handles) {
gettimeofday(&now, NULL);
msnow = now.tv_sec * 1000 + now.tv_usec / 1000;
mslast = last_handle_add.tv_sec * 1000 + last_handle_add.tv_usec / 1000;
if(msnow - mslast >= urltime[handlenum] && handlenum < num_handles) {
fprintf(stdout, "Adding handle %d\n", handlenum);
setup_handle(URL, m, handlenum);
last_handle_add = now;
handlenum++;
}
}
curl_multi_perform(m, &running);
abort_on_test_timeout();
/* See how the transfers went */
while ((msg = curl_multi_info_read(m, &msgs_left))) {
if (msg->msg == CURLMSG_DONE) {
int i, found = 0;
/* Find out which handle this message is about */
for (i = 0; i < num_handles; i++) {
found = (msg->easy_handle == handles[i]);
if(found)
break;
}
printf("Handle %d Completed with status %d\n", i, msg->data.result);
curl_multi_remove_handle(m, handles[i]);
}
}
if(handlenum == num_handles && !running) {
break; /* done */
}
FD_ZERO(&rd);
FD_ZERO(&wr);
FD_ZERO(&exc);
curl_multi_fdset(m, &rd, &wr, &exc, &maxfd);
/* At this point, maxfd is guaranteed to be greater or equal than -1. */
curl_multi_timeout(m, &timeout);
if(timeout < 0)
timeout = 1;
interval.tv_sec = timeout / 1000;
interval.tv_usec = (timeout % 1000) * 1000;
interval.tv_sec = 0;
interval.tv_usec = 1000;
select_test(maxfd+1, &rd, &wr, &exc, &interval);
abort_on_test_timeout();
}
test_cleanup:
remove_handles();
/* undocumented cleanup sequence - type UB */
curl_multi_cleanup(m);
curl_global_cleanup();
free_urls();
return res;
}