1
0
mirror of https://github.com/moparisthebest/curl synced 2024-11-17 15:05:02 -05:00
curl/lib/url.c

7062 lines
215 KiB
C
Raw Normal View History

2002-09-03 07:52:59 -04:00
/***************************************************************************
2003-03-25 09:23:12 -05:00
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
1999-12-29 09:20:26 -05:00
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2017, Daniel Stenberg, <daniel@haxx.se>, et al.
1999-12-29 09:20:26 -05:00
*
2002-09-03 07:52:59 -04:00
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at https://curl.haxx.se/docs/copyright.html.
2003-03-25 09:23:12 -05:00
*
2001-01-03 04:29:33 -05:00
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
2002-09-03 07:52:59 -04:00
* furnished to do so, under the terms of the COPYING file.
1999-12-29 09:20:26 -05:00
*
2001-01-03 04:29:33 -05:00
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
1999-12-29 09:20:26 -05:00
*
2002-09-03 07:52:59 -04:00
***************************************************************************/
1999-12-29 09:20:26 -05:00
build: fix circular header inclusion with other packages This commit renames lib/setup.h to lib/curl_setup.h and renames lib/setup_once.h to lib/curl_setup_once.h. Removes the need and usage of a header inclusion guard foreign to libcurl. [1] Removes the need and presence of an alarming notice we carried in old setup_once.h [2] ---------------------------------------- 1 - lib/setup_once.h used __SETUP_ONCE_H macro as header inclusion guard up to commit ec691ca3 which changed this to HEADER_CURL_SETUP_ONCE_H, this single inclusion guard is enough to ensure that inclusion of lib/setup_once.h done from lib/setup.h is only done once. Additionally lib/setup.h has always used __SETUP_ONCE_H macro to protect inclusion of setup_once.h even after commit ec691ca3, this was to avoid a circular header inclusion triggered when building a c-ares enabled version with c-ares sources available which also has a setup_once.h header. Commit ec691ca3 exposes the real nature of __SETUP_ONCE_H usage in lib/setup.h, it is a header inclusion guard foreign to libcurl belonging to c-ares's setup_once.h The renaming this commit does, fixes the circular header inclusion, and as such removes the need and usage of a header inclusion guard foreign to libcurl. Macro __SETUP_ONCE_H no longer used in libcurl. 2 - Due to the circular interdependency of old lib/setup_once.h and the c-ares setup_once.h header, old file lib/setup_once.h has carried back from 2006 up to now days an alarming and prominent notice about the need of keeping libcurl's and c-ares's setup_once.h in sync. Given that this commit fixes the circular interdependency, the need and presence of mentioned notice is removed. All mentioned interdependencies come back from now old days when the c-ares project lived inside a curl subdirectory. This commit removes last traces of such fact.
2013-01-06 13:06:49 -05:00
#include "curl_setup.h"
#ifdef HAVE_NETINET_IN_H
1999-12-29 09:20:26 -05:00
#include <netinet/in.h>
#endif
#ifdef HAVE_NETDB_H
1999-12-29 09:20:26 -05:00
#include <netdb.h>
#endif
1999-12-29 09:20:26 -05:00
#ifdef HAVE_ARPA_INET_H
#include <arpa/inet.h>
#endif
#ifdef HAVE_NET_IF_H
#include <net/if.h>
#endif
#ifdef HAVE_SYS_IOCTL_H
1999-12-29 09:20:26 -05:00
#include <sys/ioctl.h>
#endif
1999-12-29 09:20:26 -05:00
#ifdef HAVE_SYS_PARAM_H
#include <sys/param.h>
#endif
#ifdef __VMS
#include <in.h>
#include <inet.h>
#endif
libcurl: add UNIX domain sockets support The ability to do HTTP requests over a UNIX domain socket has been requested before, in Apr 2008 [0][1] and Sep 2010 [2]. While a discussion happened, no patch seems to get through. I decided to give it a go since I need to test a nginx HTTP server which listens on a UNIX domain socket. One patch [3] seems to make it possible to use the CURLOPT_OPENSOCKETFUNCTION function to gain a UNIX domain socket. Another person wrote a Go program which can do HTTP over a UNIX socket for Docker[4] which uses a special URL scheme (though the name contains cURL, it has no relation to the cURL library). This patch considers support for UNIX domain sockets at the same level as HTTP proxies / IPv6, it acts as an intermediate socket provider and not as a separate protocol. Since this feature affects network operations, a new feature flag was added ("unix-sockets") with a corresponding CURL_VERSION_UNIX_SOCKETS macro. A new CURLOPT_UNIX_SOCKET_PATH option is added and documented. This option enables UNIX domain sockets support for all requests on the handle (replacing IP sockets and skipping proxies). A new configure option (--enable-unix-sockets) and CMake option (ENABLE_UNIX_SOCKETS) can disable this optional feature. Note that I deliberately did not mark this feature as advanced, this is a feature/component that should easily be available. [0]: http://curl.haxx.se/mail/lib-2008-04/0279.html [1]: http://daniel.haxx.se/blog/2008/04/14/http-over-unix-domain-sockets/ [2]: http://sourceforge.net/p/curl/feature-requests/53/ [3]: http://curl.haxx.se/mail/lib-2008-04/0361.html [4]: https://github.com/Soulou/curl-unix-socket Signed-off-by: Peter Wu <peter@lekensteyn.nl>
2014-11-27 17:59:25 -05:00
#ifdef HAVE_SYS_UN_H
#include <sys/un.h>
#endif
1999-12-29 09:20:26 -05:00
#ifndef HAVE_SOCKET
#error "We can't compile without socket() support!"
#endif
#ifdef HAVE_LIMITS_H
#include <limits.h>
#endif
#ifdef USE_LIBIDN2
#include <idn2.h>
#elif defined(USE_WIN32_IDN)
/* prototype for curl_win32_idn_to_ascii() */
bool curl_win32_idn_to_ascii(const char *in, char **out);
#endif /* USE_LIBIDN2 */
#include "urldata.h"
#include "netrc.h"
#include "formdata.h"
2013-12-17 17:32:47 -05:00
#include "vtls/vtls.h"
#include "hostip.h"
#include "transfer.h"
#include "sendf.h"
#include "progress.h"
#include "cookie.h"
#include "strcase.h"
#include "strerror.h"
#include "escape.h"
#include "strtok.h"
#include "share.h"
#include "content_encoding.h"
#include "http_digest.h"
#include "http_negotiate.h"
#include "select.h"
#include "multiif.h"
#include "easyif.h"
#include "speedcheck.h"
#include "warnless.h"
#include "non-ascii.h"
#include "inet_pton.h"
#include "getinfo.h"
1999-12-29 09:20:26 -05:00
/* And now for the protocols */
#include "ftp.h"
#include "dict.h"
#include "telnet.h"
#include "tftp.h"
#include "http.h"
#include "http2.h"
#include "file.h"
#include "curl_ldap.h"
#include "ssh.h"
#include "imap.h"
#include "url.h"
#include "connect.h"
#include "inet_ntop.h"
#include "http_ntlm.h"
#include "curl_ntlm_wb.h"
#include "socks.h"
#include "curl_rtmp.h"
#include "gopher.h"
#include "http_proxy.h"
#include "conncache.h"
#include "multihandle.h"
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
#include "pipeline.h"
#include "dotdot.h"
#include "strdup.h"
/* The last 3 #include files should be in this order */
#include "curl_printf.h"
#include "curl_memory.h"
#include "memdebug.h"
/* Local static prototypes */
static struct connectdata *
find_oldest_idle_connection_in_bundle(struct Curl_easy *data,
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
struct connectbundle *bundle);
static void conn_free(struct connectdata *conn);
static void free_fixed_hostname(struct hostname *host);
static void signalPipeClose(struct curl_llist *pipeline, bool pipe_broke);
static CURLcode parse_url_login(struct Curl_easy *data,
struct connectdata *conn,
char **userptr, char **passwdptr,
char **optionsptr);
static CURLcode parse_login_details(const char *login, const size_t len,
char **userptr, char **passwdptr,
char **optionsptr);
static unsigned int get_protocol_family(unsigned int protocol);
/*
* Protocol table.
*/
static const struct Curl_handler * const protocols[] = {
#ifndef CURL_DISABLE_HTTP
&Curl_handler_http,
#endif
#if defined(USE_SSL) && !defined(CURL_DISABLE_HTTP)
&Curl_handler_https,
#endif
#ifndef CURL_DISABLE_FTP
&Curl_handler_ftp,
#endif
#if defined(USE_SSL) && !defined(CURL_DISABLE_FTP)
&Curl_handler_ftps,
#endif
#ifndef CURL_DISABLE_TELNET
&Curl_handler_telnet,
#endif
#ifndef CURL_DISABLE_DICT
&Curl_handler_dict,
#endif
2010-06-01 11:25:03 -04:00
#ifndef CURL_DISABLE_LDAP
&Curl_handler_ldap,
#if !defined(CURL_DISABLE_LDAPS) && \
((defined(USE_OPENLDAP) && defined(USE_SSL)) || \
(!defined(USE_OPENLDAP) && defined(HAVE_LDAP_SSL)))
&Curl_handler_ldaps,
#endif
#endif
#ifndef CURL_DISABLE_FILE
&Curl_handler_file,
#endif
#ifndef CURL_DISABLE_TFTP
&Curl_handler_tftp,
#endif
#ifdef USE_LIBSSH2
&Curl_handler_scp,
&Curl_handler_sftp,
#endif
#ifndef CURL_DISABLE_IMAP
&Curl_handler_imap,
#ifdef USE_SSL
&Curl_handler_imaps,
#endif
#endif
#ifndef CURL_DISABLE_POP3
&Curl_handler_pop3,
#ifdef USE_SSL
&Curl_handler_pop3s,
#endif
#endif
#if !defined(CURL_DISABLE_SMB) && defined(USE_NTLM) && \
(CURL_SIZEOF_CURL_OFF_T > 4) && \
(!defined(USE_WINDOWS_SSPI) || defined(USE_WIN32_CRYPTO))
&Curl_handler_smb,
#ifdef USE_SSL
&Curl_handler_smbs,
#endif
#endif
#ifndef CURL_DISABLE_SMTP
&Curl_handler_smtp,
#ifdef USE_SSL
&Curl_handler_smtps,
#endif
#endif
#ifndef CURL_DISABLE_RTSP
&Curl_handler_rtsp,
#endif
#ifndef CURL_DISABLE_GOPHER
&Curl_handler_gopher,
#endif
#ifdef USE_LIBRTMP
&Curl_handler_rtmp,
&Curl_handler_rtmpt,
&Curl_handler_rtmpe,
&Curl_handler_rtmpte,
&Curl_handler_rtmps,
&Curl_handler_rtmpts,
#endif
(struct Curl_handler *) NULL
};
/*
* Dummy handler for undefined protocol schemes.
*/
static const struct Curl_handler Curl_handler_dummy = {
"<no protocol>", /* scheme */
ZERO_NULL, /* setup_connection */
ZERO_NULL, /* do_it */
ZERO_NULL, /* done */
ZERO_NULL, /* do_more */
ZERO_NULL, /* connect_it */
ZERO_NULL, /* connecting */
ZERO_NULL, /* doing */
ZERO_NULL, /* proto_getsock */
ZERO_NULL, /* doing_getsock */
ZERO_NULL, /* domore_getsock */
ZERO_NULL, /* perform_getsock */
ZERO_NULL, /* disconnect */
ZERO_NULL, /* readwrite */
0, /* defport */
0, /* protocol */
PROTOPT_NONE /* flags */
};
void Curl_freeset(struct Curl_easy *data)
{
/* Free all dynamic strings stored in the data->set substructure. */
enum dupstring i;
for(i=(enum dupstring)0; i < STRING_LAST; i++) {
Curl_safefree(data->set.str[i]);
}
if(data->change.referer_alloc) {
Curl_safefree(data->change.referer);
data->change.referer_alloc = FALSE;
}
data->change.referer = NULL;
if(data->change.url_alloc) {
Curl_safefree(data->change.url);
data->change.url_alloc = FALSE;
}
data->change.url = NULL;
}
static CURLcode setstropt(char **charp, const char *s)
{
/* Release the previous storage at `charp' and replace by a dynamic storage
copy of `s'. Return CURLE_OK or CURLE_OUT_OF_MEMORY. */
Curl_safefree(*charp);
if(s) {
char *str = strdup(s);
if(!str)
return CURLE_OUT_OF_MEMORY;
*charp = str;
}
return CURLE_OK;
}
static CURLcode setstropt_userpwd(char *option, char **userp, char **passwdp)
{
CURLcode result = CURLE_OK;
char *user = NULL;
char *passwd = NULL;
/* Parse the login details if specified. It not then we treat NULL as a hint
to clear the existing data */
if(option) {
result = parse_login_details(option, strlen(option),
(userp ? &user : NULL),
(passwdp ? &passwd : NULL),
NULL);
}
if(!result) {
/* Store the username part of option if required */
if(userp) {
if(!user && option && option[0] == ':') {
/* Allocate an empty string instead of returning NULL as user name */
user = strdup("");
if(!user)
result = CURLE_OUT_OF_MEMORY;
}
Curl_safefree(*userp);
*userp = user;
}
/* Store the password part of option if required */
if(passwdp) {
Curl_safefree(*passwdp);
*passwdp = passwd;
}
}
return result;
}
CURLcode Curl_dupset(struct Curl_easy *dst, struct Curl_easy *src)
{
CURLcode result = CURLE_OK;
enum dupstring i;
/* Copy src->set into dst->set first, then deal with the strings
afterwards */
dst->set = src->set;
/* clear all string pointers first */
memset(dst->set.str, 0, STRING_LAST * sizeof(char *));
/* duplicate all strings */
for(i=(enum dupstring)0; i< STRING_LASTZEROTERMINATED; i++) {
result = setstropt(&dst->set.str[i], src->set.str[i]);
if(result)
return result;
}
/* duplicate memory areas pointed to */
i = STRING_COPYPOSTFIELDS;
if(src->set.postfieldsize && src->set.str[i]) {
/* postfieldsize is curl_off_t, Curl_memdup() takes a size_t ... */
dst->set.str[i] = Curl_memdup(src->set.str[i],
curlx_sotouz(src->set.postfieldsize));
if(!dst->set.str[i])
return CURLE_OUT_OF_MEMORY;
/* point to the new copy */
dst->set.postfields = dst->set.str[i];
}
return CURLE_OK;
}
/*
* This is the internal function curl_easy_cleanup() calls. This should
* cleanup and free all resources associated with this sessionhandle.
*
* NOTE: if we ever add something that attempts to write to a socket or
* similar here, we must ignore SIGPIPE first. It is currently only done
* when curl_easy_perform() is invoked.
*/
CURLcode Curl_close(struct Curl_easy *data)
1999-12-29 09:20:26 -05:00
{
struct Curl_multi *m;
if(!data)
return CURLE_OK;
Curl_expire_clear(data); /* shut off timers */
m = data->multi;
if(m)
/* This handle is still part of a multi handle, take care of this first
and detach this handle from there. */
curl_multi_remove_handle(data->multi, data);
if(data->multi_easy)
/* when curl_easy_perform() is used, it creates its own multi handle to
use and this is the one */
curl_multi_cleanup(data->multi_easy);
/* Destroy the timeout list that is held in the easy handle. It is
/normally/ done by curl_multi_remove_handle() but this is "just in
case" */
if(data->state.timeoutlist) {
Curl_llist_destroy(data->state.timeoutlist, NULL);
data->state.timeoutlist = NULL;
}
data->magic = 0; /* force a clear AFTER the possibly enforced removal from
the multi handle, since that function uses the magic
field! */
if(data->state.rangestringalloc)
free(data->state.range);
/* Free the pathbuffer */
Curl_safefree(data->state.pathbuffer);
data->state.path = NULL;
/* freed here just in case DONE wasn't called */
Curl_free_request_state(data);
/* Close down all open SSL info and sessions */
Curl_ssl_close_all(data);
Curl_safefree(data->state.first_host);
Curl_safefree(data->state.scratch);
Curl_ssl_free_certinfo(data);
/* Cleanup possible redirect junk */
free(data->req.newurl);
data->req.newurl = NULL;
if(data->change.referer_alloc) {
Curl_safefree(data->change.referer);
data->change.referer_alloc = FALSE;
}
data->change.referer = NULL;
if(data->change.url_alloc) {
Curl_safefree(data->change.url);
data->change.url_alloc = FALSE;
}
data->change.url = NULL;
Curl_safefree(data->state.buffer);
Curl_safefree(data->state.headerbuff);
Curl_flush_cookies(data, 1);
Curl_digest_cleanup(data);
Curl_safefree(data->info.contenttype);
Curl_safefree(data->info.wouldredirect);
/* this destroys the channel and we cannot use it anymore after this */
Curl_resolver_cleanup(data->state.resolver);
Curl_http2_cleanup_dependencies(data);
Curl_convert_close(data);
/* No longer a dirty share, if it exists */
if(data->share) {
Curl_share_lock(data, CURL_LOCK_DATA_SHARE, CURL_LOCK_ACCESS_SINGLE);
data->share->dirty--;
Curl_share_unlock(data, CURL_LOCK_DATA_SHARE);
}
if(data->set.wildcardmatch) {
/* destruct wildcard structures if it is needed */
struct WildcardData *wc = &data->wildcard;
Curl_wildcard_dtor(wc);
}
Curl_freeset(data);
free(data);
2000-05-22 10:12:12 -04:00
return CURLE_OK;
}
/*
* Initialize the UserDefined fields within a Curl_easy.
* This may be safely called on a new or existing Curl_easy.
*/
CURLcode Curl_init_userdefined(struct UserDefined *set)
{
CURLcode result = CURLE_OK;
set->out = stdout; /* default output to stdout */
set->in_set = stdin; /* default input from stdin */
set->err = stderr; /* default stderr to stderr */
/* use fwrite as default function to store output */
set->fwrite_func = (curl_write_callback)fwrite;
/* use fread as default function to read input */
set->fread_func_set = (curl_read_callback)fread;
set->is_fread_set = 0;
set->is_fwrite_set = 0;
set->seek_func = ZERO_NULL;
set->seek_client = ZERO_NULL;
/* conversion callbacks for non-ASCII hosts */
set->convfromnetwork = ZERO_NULL;
set->convtonetwork = ZERO_NULL;
set->convfromutf8 = ZERO_NULL;
set->filesize = -1; /* we don't know the size */
set->postfieldsize = -1; /* unknown size */
set->maxredirs = -1; /* allow any amount by default */
set->httpreq = HTTPREQ_GET; /* Default HTTP request */
set->rtspreq = RTSPREQ_OPTIONS; /* Default RTSP request */
set->ftp_use_epsv = TRUE; /* FTP defaults to EPSV operations */
set->ftp_use_eprt = TRUE; /* FTP defaults to EPRT operations */
set->ftp_use_pret = FALSE; /* mainly useful for drftpd servers */
set->ftp_filemethod = FTPFILE_MULTICWD;
set->dns_cache_timeout = 60; /* Timeout every 60 seconds by default */
/* Set the default size of the SSL session ID cache */
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
set->general_ssl.max_ssl_sessions = 5;
set->proxyport = 0;
set->proxytype = CURLPROXY_HTTP; /* defaults to HTTP proxy */
set->httpauth = CURLAUTH_BASIC; /* defaults to basic */
set->proxyauth = CURLAUTH_BASIC; /* defaults to basic */
/* make libcurl quiet by default: */
set->hide_progress = TRUE; /* CURLOPT_NOPROGRESS changes these */
/*
* libcurl 7.10 introduced SSL verification *by default*! This needs to be
* switched off unless wanted.
*/
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
set->ssl.primary.verifypeer = TRUE;
set->ssl.primary.verifyhost = TRUE;
#ifdef USE_TLS_SRP
set->ssl.authtype = CURL_TLSAUTH_NONE;
#endif
set->ssh_auth_types = CURLSSH_AUTH_DEFAULT; /* defaults to any auth
type */
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
set->general_ssl.sessionid = TRUE; /* session ID caching enabled by
default */
set->proxy_ssl = set->ssl;
set->new_file_perms = 0644; /* Default permissions */
set->new_directory_perms = 0755; /* Default permissions */
/* for the *protocols fields we don't use the CURLPROTO_ALL convenience
define since we internally only use the lower 16 bits for the passed
in bitmask to not conflict with the private bits */
set->allowed_protocols = CURLPROTO_ALL;
set->redir_protocols = CURLPROTO_ALL & /* All except FILE, SCP and SMB */
~(CURLPROTO_FILE | CURLPROTO_SCP | CURLPROTO_SMB |
CURLPROTO_SMBS);
#if defined(HAVE_GSSAPI) || defined(USE_WINDOWS_SSPI)
/*
* disallow unprotected protection negotiation NEC reference implementation
* seem not to follow rfc1961 section 4.3/4.4
*/
set->socks5_gssapi_nec = FALSE;
#endif
/* This is our preferred CA cert bundle/path since install time */
#if defined(CURL_CA_BUNDLE)
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
result = setstropt(&set->str[STRING_SSL_CAFILE_ORIG], CURL_CA_BUNDLE);
if(result)
return result;
#endif
#if defined(CURL_CA_PATH)
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
result = setstropt(&set->str[STRING_SSL_CAPATH_ORIG], CURL_CA_PATH);
if(result)
return result;
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
result = setstropt(&set->str[STRING_SSL_CAPATH_PROXY], CURL_CA_PATH);
if(result)
return result;
#endif
set->wildcardmatch = FALSE;
set->chunk_bgn = ZERO_NULL;
set->chunk_end = ZERO_NULL;
/* tcp keepalives are disabled by default, but provide reasonable values for
* the interval and idle times.
*/
2012-03-16 14:06:34 -04:00
set->tcp_keepalive = FALSE;
set->tcp_keepintvl = 60;
set->tcp_keepidle = 60;
2016-02-16 07:21:03 -05:00
set->tcp_fastopen = FALSE;
set->tcp_nodelay = TRUE;
set->ssl_enable_npn = TRUE;
set->ssl_enable_alpn = TRUE;
set->expect_100_timeout = 1000L; /* Wait for a second by default. */
set->sep_headers = TRUE; /* separated header lists by default */
Curl_http2_init_userset(set);
return result;
}
/**
* Curl_open()
*
* @param curl is a pointer to a sessionhandle pointer that gets set by this
* function.
* @return CURLcode
*/
CURLcode Curl_open(struct Curl_easy **curl)
{
2014-10-24 02:51:04 -04:00
CURLcode result;
struct Curl_easy *data;
/* Very simple start-up: alloc the struct, init it with zeroes and return */
data = calloc(1, sizeof(struct Curl_easy));
if(!data) {
/* this is a very serious error */
DEBUGF(fprintf(stderr, "Error: calloc of Curl_easy failed\n"));
return CURLE_OUT_OF_MEMORY;
}
2003-03-25 09:23:12 -05:00
data->magic = CURLEASY_MAGIC_NUMBER;
2014-10-24 02:51:04 -04:00
result = Curl_resolver_init(&data->state.resolver);
if(result) {
DEBUGF(fprintf(stderr, "Error: resolver_init failed\n"));
free(data);
2014-10-24 02:51:04 -04:00
return result;
}
/* We do some initial setup here, all those fields that can't be just 0 */
data->state.buffer = malloc(BUFSIZE + 1);
if(!data->state.buffer) {
DEBUGF(fprintf(stderr, "Error: malloc of buffer failed\n"));
result = CURLE_OUT_OF_MEMORY;
}
data->state.headerbuff = malloc(HEADERSIZE);
if(!data->state.headerbuff) {
DEBUGF(fprintf(stderr, "Error: malloc of headerbuff failed\n"));
result = CURLE_OUT_OF_MEMORY;
}
else {
result = Curl_init_userdefined(&data->set);
data->state.headersize=HEADERSIZE;
Curl_convert_init(data);
Curl_initinfo(data);
/* most recent connection is not yet defined */
data->state.lastconnect = NULL;
data->progress.flags |= PGRS_HIDE;
data->state.current_speed = -1; /* init to negative == impossible */
data->wildcard.state = CURLWC_INIT;
data->wildcard.filelist = NULL;
data->set.fnmatch = ZERO_NULL;
data->set.maxconnects = DEFAULT_CONNCACHE_SIZE; /* for easy handles */
Curl_http2_init_state(&data->state);
}
if(result) {
Curl_resolver_cleanup(data->state.resolver);
free(data->state.buffer);
free(data->state.headerbuff);
Curl_freeset(data);
free(data);
data = NULL;
}
else
*curl = data;
return result;
}
CURLcode Curl_setopt(struct Curl_easy *data, CURLoption option,
va_list param)
{
char *argptr;
CURLcode result = CURLE_OK;
long arg;
#ifndef CURL_DISABLE_HTTP
curl_off_t bigsize;
#endif
switch(option) {
case CURLOPT_DNS_CACHE_TIMEOUT:
data->set.dns_cache_timeout = va_arg(param, long);
break;
2003-03-25 09:23:12 -05:00
case CURLOPT_DNS_USE_GLOBAL_CACHE:
/* remember we want this enabled */
arg = va_arg(param, long);
data->set.global_dns_cache = (0 != arg) ? TRUE : FALSE;
break;
2001-09-11 18:23:16 -04:00
case CURLOPT_SSL_CIPHER_LIST:
/* set a list of cipher we want to use in the SSL connection */
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
result = setstropt(&data->set.str[STRING_SSL_CIPHER_LIST_ORIG],
va_arg(param, char *));
break;
case CURLOPT_PROXY_SSL_CIPHER_LIST:
/* set a list of cipher we want to use in the SSL connection for proxy */
result = setstropt(&data->set.str[STRING_SSL_CIPHER_LIST_PROXY],
va_arg(param, char *));
2001-09-11 18:23:16 -04:00
break;
case CURLOPT_RANDOM_FILE:
/*
* This is the path name to a file that contains random data to seed
* the random SSL stuff with. The file is only used for reading.
*/
result = setstropt(&data->set.str[STRING_SSL_RANDOM_FILE],
va_arg(param, char *));
break;
case CURLOPT_EGDSOCKET:
/*
* The Entropy Gathering Daemon socket pathname
*/
result = setstropt(&data->set.str[STRING_SSL_EGDSOCKET],
va_arg(param, char *));
break;
case CURLOPT_MAXCONNECTS:
/*
* Set the absolute number of maximum simultaneous alive connection that
* libcurl is allowed to have.
*/
data->set.maxconnects = va_arg(param, long);
break;
case CURLOPT_FORBID_REUSE:
/*
* When this transfer is done, it must not be left to be reused by a
* subsequent transfer but shall be closed immediately.
*/
data->set.reuse_forbid = (0 != va_arg(param, long)) ? TRUE : FALSE;
break;
case CURLOPT_FRESH_CONNECT:
/*
* This transfer shall not use a previously cached connection but
* should be made with a fresh new connect!
*/
data->set.reuse_fresh = (0 != va_arg(param, long)) ? TRUE : FALSE;
break;
2000-05-22 10:12:12 -04:00
case CURLOPT_VERBOSE:
/*
* Verbose means infof() calls that give a lot of information about
* the connection and transfer procedures as well as internal choices.
*/
data->set.verbose = (0 != va_arg(param, long)) ? TRUE : FALSE;
2000-05-22 10:12:12 -04:00
break;
case CURLOPT_HEADER:
/*
* Set to include the header in the general data output stream.
*/
data->set.include_header = (0 != va_arg(param, long)) ? TRUE : FALSE;
2000-05-22 10:12:12 -04:00
break;
case CURLOPT_NOPROGRESS:
/*
* Shut off the internal supported progress meter
*/
data->set.hide_progress = (0 != va_arg(param, long)) ? TRUE : FALSE;
if(data->set.hide_progress)
2000-05-22 10:12:12 -04:00
data->progress.flags |= PGRS_HIDE;
else
data->progress.flags &= ~PGRS_HIDE;
2000-05-22 10:12:12 -04:00
break;
case CURLOPT_NOBODY:
/*
* Do not include the body part in the output data stream.
*/
data->set.opt_no_body = (0 != va_arg(param, long)) ? TRUE : FALSE;
2000-05-22 10:12:12 -04:00
break;
case CURLOPT_FAILONERROR:
/*
* Don't output the >=400 error code HTML-page, but instead only
* return error.
*/
data->set.http_fail_on_error = (0 != va_arg(param, long)) ? TRUE : FALSE;
2000-05-22 10:12:12 -04:00
break;
case CURLOPT_KEEP_SENDING_ON_ERROR:
data->set.http_keep_sending_on_error = (0 != va_arg(param, long)) ?
TRUE : FALSE;
break;
2000-05-22 10:12:12 -04:00
case CURLOPT_UPLOAD:
case CURLOPT_PUT:
/*
* We want to sent data to the remote host. If this is HTTP, that equals
* using the PUT request.
*/
data->set.upload = (0 != va_arg(param, long)) ? TRUE : FALSE;
if(data->set.upload) {
/* If this is HTTP, PUT is what's needed to "upload" */
data->set.httpreq = HTTPREQ_PUT;
data->set.opt_no_body = FALSE; /* this is implied */
}
else
/* In HTTP, the opposite of upload is GET (unless NOBODY is true as
then this can be changed to HEAD later on) */
data->set.httpreq = HTTPREQ_GET;
2000-05-22 10:12:12 -04:00
break;
2000-11-22 08:50:17 -05:00
case CURLOPT_FILETIME:
/*
* Try to get the file time of the remote document. The time will
* later (possibly) become available using curl_easy_getinfo().
*/
data->set.get_filetime = (0 != va_arg(param, long)) ? TRUE : FALSE;
2000-11-22 08:50:17 -05:00
break;
case CURLOPT_FTP_CREATE_MISSING_DIRS:
/*
* An FTP option that modifies an upload to create missing directories on
* the server.
2004-05-19 05:25:00 -04:00
*/
switch(va_arg(param, long)) {
case 0:
data->set.ftp_create_missing_dirs = 0;
break;
case 1:
data->set.ftp_create_missing_dirs = 1;
break;
case 2:
data->set.ftp_create_missing_dirs = 2;
break;
default:
/* reserve other values for future use */
result = CURLE_UNKNOWN_OPTION;
break;
}
break;
case CURLOPT_SERVER_RESPONSE_TIMEOUT:
/*
* Option that specifies how quickly an server response must be obtained
* before it is considered failure. For pingpong protocols.
*/
2016-04-03 14:28:34 -04:00
data->set.server_response_timeout = va_arg(param, long) * 1000;
break;
case CURLOPT_TFTP_NO_OPTIONS:
/*
* Option that prevents libcurl from sending TFTP option requests to the
* server.
*/
data->set.tftp_no_options = va_arg(param, long) != 0;
break;
case CURLOPT_TFTP_BLKSIZE:
/*
* TFTP option that specifies the block size to use for data transmission.
*/
data->set.tftp_blksize = va_arg(param, long);
break;
case CURLOPT_DIRLISTONLY:
/*
* An option that changes the command to one that asks for a list
* only, no file info details.
*/
data->set.ftp_list_only = (0 != va_arg(param, long)) ? TRUE : FALSE;
2000-05-22 10:12:12 -04:00
break;
case CURLOPT_APPEND:
/*
* We want to upload and append to an existing file.
*/
data->set.ftp_append = (0 != va_arg(param, long)) ? TRUE : FALSE;
2000-05-22 10:12:12 -04:00
break;
2005-11-28 18:06:00 -05:00
case CURLOPT_FTP_FILEMETHOD:
/*
* How do access files over FTP.
*/
data->set.ftp_filemethod = (curl_ftpfile)va_arg(param, long);
2005-11-28 18:06:00 -05:00
break;
2000-05-22 10:12:12 -04:00
case CURLOPT_NETRC:
/*
* Parse the $HOME/.netrc file
*/
data->set.use_netrc = (enum CURL_NETRC_OPTION)va_arg(param, long);
2000-05-22 10:12:12 -04:00
break;
2003-11-11 09:30:43 -05:00
case CURLOPT_NETRC_FILE:
/*
* Use this file instead of the $HOME/.netrc file
*/
result = setstropt(&data->set.str[STRING_NETRC_FILE],
va_arg(param, char *));
2003-11-11 09:30:43 -05:00
break;
case CURLOPT_TRANSFERTEXT:
/*
* This option was previously named 'FTPASCII'. Renamed to work with
* more protocols than merely FTP.
*
* Transfer using ASCII (instead of BINARY).
*/
data->set.prefer_ascii = (0 != va_arg(param, long)) ? TRUE : FALSE;
2000-05-22 10:12:12 -04:00
break;
case CURLOPT_TIMECONDITION:
/*
* Set HTTP time condition. This must be one of the defines in the
* curl/curl.h header file.
*/
data->set.timecondition = (curl_TimeCond)va_arg(param, long);
break;
2000-05-22 10:12:12 -04:00
case CURLOPT_TIMEVALUE:
/*
* This is the value to compare with the remote document with the
* method set with CURLOPT_TIMECONDITION
*/
data->set.timevalue = (time_t)va_arg(param, long);
break;
2000-05-22 10:12:12 -04:00
case CURLOPT_SSLVERSION:
/*
* Set explicit SSL version to try to connect with, as some SSL
* implementations are lame.
*/
#ifdef USE_SSL
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
data->set.ssl.primary.version = va_arg(param, long);
#else
result = CURLE_UNKNOWN_OPTION;
#endif
break;
case CURLOPT_PROXY_SSLVERSION:
/*
* Set explicit SSL version to try to connect with for proxy, as some SSL
* implementations are lame.
*/
#ifdef USE_SSL
data->set.proxy_ssl.primary.version = va_arg(param, long);
#else
result = CURLE_UNKNOWN_OPTION;
#endif
break;
#ifndef CURL_DISABLE_HTTP
case CURLOPT_AUTOREFERER:
/*
* Switch on automatic referer that gets set if curl follows locations.
*/
data->set.http_auto_referer = (0 != va_arg(param, long)) ? TRUE : FALSE;
break;
2001-08-29 05:32:18 -04:00
case CURLOPT_ACCEPT_ENCODING:
2001-08-29 05:32:18 -04:00
/*
* String to use at the value of Accept-Encoding header.
*
* If the encoding is set to "" we use an Accept-Encoding header that
* encompasses all the encodings we support.
* If the encoding is set to NULL we don't send an Accept-Encoding header
* and ignore an received Content-Encoding header.
*
2001-08-29 05:32:18 -04:00
*/
argptr = va_arg(param, char *);
result = setstropt(&data->set.str[STRING_ENCODING],
(argptr && !*argptr)?
ALL_CONTENT_ENCODINGS: argptr);
break;
case CURLOPT_TRANSFER_ENCODING:
data->set.http_transfer_encoding = (0 != va_arg(param, long)) ?
TRUE : FALSE;
break;
case CURLOPT_FOLLOWLOCATION:
/*
* Follow Location: header hints on a HTTP-server.
*/
data->set.http_follow_location = (0 != va_arg(param, long)) ? TRUE : FALSE;
2001-08-29 05:32:18 -04:00
break;
case CURLOPT_UNRESTRICTED_AUTH:
/*
* Send authentication (user+password) when following locations, even when
* hostname changed.
*/
data->set.http_disable_hostname_check_before_authentication =
(0 != va_arg(param, long)) ? TRUE : FALSE;
break;
case CURLOPT_MAXREDIRS:
/*
* The maximum amount of hops you allow curl to follow Location:
* headers. This should mostly be used to detect never-ending loops.
*/
data->set.maxredirs = va_arg(param, long);
break;
case CURLOPT_POSTREDIR:
{
/*
* Set the behaviour of POST when redirecting
* CURL_REDIR_GET_ALL - POST is changed to GET after 301 and 302
* CURL_REDIR_POST_301 - POST is kept as POST after 301
* CURL_REDIR_POST_302 - POST is kept as POST after 302
* CURL_REDIR_POST_303 - POST is kept as POST after 303
* CURL_REDIR_POST_ALL - POST is kept as POST after 301, 302 and 303
* other - POST is kept as POST after 301 and 302
*/
2012-04-12 14:39:15 -04:00
int postRedir = curlx_sltosi(va_arg(param, long));
data->set.keep_post = postRedir & CURL_REDIR_POST_ALL;
}
break;
case CURLOPT_POST:
/* Does this option serve a purpose anymore? Yes it does, when
CURLOPT_POSTFIELDS isn't used and the POST data is read off the
callback! */
if(va_arg(param, long)) {
data->set.httpreq = HTTPREQ_POST;
data->set.opt_no_body = FALSE; /* this is implied */
}
else
data->set.httpreq = HTTPREQ_GET;
break;
case CURLOPT_COPYPOSTFIELDS:
/*
* A string with POST data. Makes curl HTTP POST. Even if it is NULL.
* If needed, CURLOPT_POSTFIELDSIZE must have been set prior to
* CURLOPT_COPYPOSTFIELDS and not altered later.
*/
argptr = va_arg(param, char *);
if(!argptr || data->set.postfieldsize == -1)
result = setstropt(&data->set.str[STRING_COPYPOSTFIELDS], argptr);
else {
/*
* Check that requested length does not overflow the size_t type.
*/
if((data->set.postfieldsize < 0) ||
((sizeof(curl_off_t) != sizeof(size_t)) &&
(data->set.postfieldsize > (curl_off_t)((size_t)-1))))
result = CURLE_OUT_OF_MEMORY;
else {
char *p;
(void) setstropt(&data->set.str[STRING_COPYPOSTFIELDS], NULL);
/* Allocate even when size == 0. This satisfies the need of possible
later address compare to detect the COPYPOSTFIELDS mode, and
to mark that postfields is used rather than read function or
form data.
*/
p = malloc((size_t)(data->set.postfieldsize?
data->set.postfieldsize:1));
if(!p)
result = CURLE_OUT_OF_MEMORY;
else {
if(data->set.postfieldsize)
memcpy(p, argptr, (size_t)data->set.postfieldsize);
data->set.str[STRING_COPYPOSTFIELDS] = p;
}
}
}
data->set.postfields = data->set.str[STRING_COPYPOSTFIELDS];
data->set.httpreq = HTTPREQ_POST;
break;
case CURLOPT_POSTFIELDS:
/*
* Like above, but use static data instead of copying it.
*/
data->set.postfields = va_arg(param, void *);
/* Release old copied data. */
(void) setstropt(&data->set.str[STRING_COPYPOSTFIELDS], NULL);
data->set.httpreq = HTTPREQ_POST;
break;
case CURLOPT_POSTFIELDSIZE:
/*
* The size of the POSTFIELD data to prevent libcurl to do strlen() to
* figure it out. Enables binary posts.
*/
bigsize = va_arg(param, long);
if(data->set.postfieldsize < bigsize &&
data->set.postfields == data->set.str[STRING_COPYPOSTFIELDS]) {
/* Previous CURLOPT_COPYPOSTFIELDS is no longer valid. */
(void) setstropt(&data->set.str[STRING_COPYPOSTFIELDS], NULL);
data->set.postfields = NULL;
}
data->set.postfieldsize = bigsize;
break;
2004-05-19 05:25:00 -04:00
case CURLOPT_POSTFIELDSIZE_LARGE:
/*
* The size of the POSTFIELD data to prevent libcurl to do strlen() to
* figure it out. Enables binary posts.
*/
bigsize = va_arg(param, curl_off_t);
if(data->set.postfieldsize < bigsize &&
data->set.postfields == data->set.str[STRING_COPYPOSTFIELDS]) {
/* Previous CURLOPT_COPYPOSTFIELDS is no longer valid. */
(void) setstropt(&data->set.str[STRING_COPYPOSTFIELDS], NULL);
data->set.postfields = NULL;
}
data->set.postfieldsize = bigsize;
break;
2000-05-22 10:12:12 -04:00
case CURLOPT_HTTPPOST:
/*
* Set to make us do HTTP POST
*/
data->set.httppost = va_arg(param, struct curl_httppost *);
data->set.httpreq = HTTPREQ_POST_FORM;
data->set.opt_no_body = FALSE; /* this is implied */
break;
2001-08-03 07:52:53 -04:00
case CURLOPT_REFERER:
2001-08-03 07:52:53 -04:00
/*
* String to set in the HTTP Referer: field.
2001-08-03 07:52:53 -04:00
*/
if(data->change.referer_alloc) {
Curl_safefree(data->change.referer);
data->change.referer_alloc = FALSE;
}
result = setstropt(&data->set.str[STRING_SET_REFERER],
va_arg(param, char *));
data->change.referer = data->set.str[STRING_SET_REFERER];
2001-08-03 07:52:53 -04:00
break;
case CURLOPT_USERAGENT:
/*
* String to use in the HTTP User-Agent field
*/
result = setstropt(&data->set.str[STRING_USERAGENT],
va_arg(param, char *));
break;
case CURLOPT_HTTPHEADER:
/*
* Set a list with HTTP headers to use (or replace internals with)
*/
data->set.headers = va_arg(param, struct curl_slist *);
break;
case CURLOPT_PROXYHEADER:
/*
* Set a list with proxy headers to use (or replace internals with)
*
* Since CURLOPT_HTTPHEADER was the only way to set HTTP headers for a
* long time we remain doing it this way until CURLOPT_PROXYHEADER is
* used. As soon as this option has been used, if set to anything but
* NULL, custom headers for proxies are only picked from this list.
*
* Set this option to NULL to restore the previous behavior.
*/
data->set.proxyheaders = va_arg(param, struct curl_slist *);
break;
case CURLOPT_HEADEROPT:
/*
* Set header option.
*/
arg = va_arg(param, long);
data->set.sep_headers = (arg & CURLHEADER_SEPARATE)? TRUE: FALSE;
break;
case CURLOPT_HTTP200ALIASES:
2004-01-05 17:29:29 -05:00
/*
* Set a list of aliases for HTTP 200 in response header
2004-01-05 17:29:29 -05:00
*/
data->set.http200aliases = va_arg(param, struct curl_slist *);
2004-01-05 17:29:29 -05:00
break;
#if !defined(CURL_DISABLE_COOKIES)
case CURLOPT_COOKIE:
/*
* Cookie string to send to the remote server in the request.
*/
result = setstropt(&data->set.str[STRING_COOKIE],
va_arg(param, char *));
break;
case CURLOPT_COOKIEFILE:
/*
* Set cookie file to read and parse. Can be used multiple times.
*/
argptr = (char *)va_arg(param, void *);
if(argptr) {
struct curl_slist *cl;
/* append the cookie file name to the list of file names, and deal with
them later */
cl = curl_slist_append(data->change.cookielist, argptr);
if(!cl) {
curl_slist_free_all(data->change.cookielist);
data->change.cookielist = NULL;
return CURLE_OUT_OF_MEMORY;
}
data->change.cookielist = cl; /* store the list for later use */
}
break;
case CURLOPT_COOKIEJAR:
/*
* Set cookie file name to dump all cookies to when we're done.
*/
{
struct CookieInfo *newcookies;
result = setstropt(&data->set.str[STRING_COOKIEJAR],
va_arg(param, char *));
/*
* Activate the cookie parser. This may or may not already
* have been made.
*/
newcookies = Curl_cookie_init(data, NULL, data->cookies,
data->set.cookiesession);
if(!newcookies)
result = CURLE_OUT_OF_MEMORY;
data->cookies = newcookies;
}
break;
case CURLOPT_COOKIESESSION:
/*
* Set this option to TRUE to start a new "cookie session". It will
* prevent the forthcoming read-cookies-from-file actions to accept
* cookies that are marked as being session cookies, as they belong to a
* previous session.
*
* In the original Netscape cookie spec, "session cookies" are cookies
* with no expire date set. RFC2109 describes the same action if no
* 'Max-Age' is set and RFC2965 includes the RFC2109 description and adds
* a 'Discard' action that can enforce the discard even for cookies that
* have a Max-Age.
*
* We run mostly with the original cookie spec, as hardly anyone implements
* anything else.
*/
data->set.cookiesession = (0 != va_arg(param, long)) ? TRUE : FALSE;
break;
case CURLOPT_COOKIELIST:
argptr = va_arg(param, char *);
if(argptr == NULL)
break;
if(strcasecompare(argptr, "ALL")) {
/* clear all cookies */
Curl_share_lock(data, CURL_LOCK_DATA_COOKIE, CURL_LOCK_ACCESS_SINGLE);
Curl_cookie_clearall(data->cookies);
Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
}
else if(strcasecompare(argptr, "SESS")) {
/* clear session cookies */
Curl_share_lock(data, CURL_LOCK_DATA_COOKIE, CURL_LOCK_ACCESS_SINGLE);
Curl_cookie_clearsess(data->cookies);
Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
}
else if(strcasecompare(argptr, "FLUSH")) {
/* flush cookies to file, takes care of the locking */
Curl_flush_cookies(data, 0);
}
else if(strcasecompare(argptr, "RELOAD")) {
/* reload cookies from file */
Curl_cookie_loadfiles(data);
break;
}
else {
if(!data->cookies)
/* if cookie engine was not running, activate it */
data->cookies = Curl_cookie_init(data, NULL, NULL, TRUE);
argptr = strdup(argptr);
if(!argptr || !data->cookies) {
result = CURLE_OUT_OF_MEMORY;
free(argptr);
}
else {
Curl_share_lock(data, CURL_LOCK_DATA_COOKIE, CURL_LOCK_ACCESS_SINGLE);
if(checkprefix("Set-Cookie:", argptr))
/* HTTP Header format line */
Curl_cookie_add(data, data->cookies, TRUE, argptr + 11, NULL, NULL);
else
/* Netscape format line */
Curl_cookie_add(data, data->cookies, FALSE, argptr, NULL, NULL);
Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
free(argptr);
}
}
break;
#endif /* CURL_DISABLE_COOKIES */
case CURLOPT_HTTPGET:
/*
* Set to force us do HTTP GET
*/
if(va_arg(param, long)) {
data->set.httpreq = HTTPREQ_GET;
data->set.upload = FALSE; /* switch off upload */
data->set.opt_no_body = FALSE; /* this is implied */
}
2000-08-24 08:33:16 -04:00
break;
case CURLOPT_HTTP_VERSION:
/*
* This sets a requested HTTP version to be used. The value is one of
* the listed enums in curl/curl.h.
*/
arg = va_arg(param, long);
#ifndef USE_NGHTTP2
if(arg >= CURL_HTTP_VERSION_2)
return CURLE_UNSUPPORTED_PROTOCOL;
#endif
data->set.httpversion = arg;
break;
case CURLOPT_HTTPAUTH:
/*
2003-06-26 07:30:59 -04:00
* Set HTTP Authentication type BITMASK.
*/
{
int bitcheck;
bool authbits;
unsigned long auth = va_arg(param, unsigned long);
if(auth == CURLAUTH_NONE) {
data->set.httpauth = auth;
break;
}
/* the DIGEST_IE bit is only used to set a special marker, for all the
rest we need to handle it as normal DIGEST */
data->state.authhost.iestyle = (auth & CURLAUTH_DIGEST_IE) ? TRUE : FALSE;
if(auth & CURLAUTH_DIGEST_IE) {
auth |= CURLAUTH_DIGEST; /* set standard digest bit */
auth &= ~CURLAUTH_DIGEST_IE; /* unset ie digest bit */
}
2003-06-26 07:30:59 -04:00
/* switch off bits we can't support */
#ifndef USE_NTLM
auth &= ~CURLAUTH_NTLM; /* no NTLM support */
auth &= ~CURLAUTH_NTLM_WB; /* no NTLM_WB support */
#elif !defined(NTLM_WB_ENABLED)
auth &= ~CURLAUTH_NTLM_WB; /* no NTLM_WB support */
#endif
#ifndef USE_SPNEGO
auth &= ~CURLAUTH_NEGOTIATE; /* no Negotiate (SPNEGO) auth without
GSS-API or SSPI */
#endif
/* check if any auth bit lower than CURLAUTH_ONLY is still set */
bitcheck = 0;
authbits = FALSE;
while(bitcheck < 31) {
if(auth & (1UL << bitcheck++)) {
authbits = TRUE;
break;
}
}
if(!authbits)
return CURLE_NOT_BUILT_IN; /* no supported types left! */
2003-06-26 07:30:59 -04:00
data->set.httpauth = auth;
}
break;
2004-05-19 05:25:00 -04:00
case CURLOPT_EXPECT_100_TIMEOUT_MS:
/*
* Time to wait for a response to a HTTP request containing an
* Expect: 100-continue header before sending the data anyway.
*/
data->set.expect_100_timeout = va_arg(param, long);
break;
#endif /* CURL_DISABLE_HTTP */
case CURLOPT_CUSTOMREQUEST:
/*
* Set a custom string to use as request
*/
result = setstropt(&data->set.str[STRING_CUSTOMREQUEST],
va_arg(param, char *));
/* we don't set
data->set.httpreq = HTTPREQ_CUSTOM;
here, we continue as if we were using the already set type
and this just changes the actual request keyword */
break;
#ifndef CURL_DISABLE_PROXY
case CURLOPT_HTTPPROXYTUNNEL:
/*
* Tunnel operations through the proxy instead of normal proxy use
*/
data->set.tunnel_thru_httpproxy = (0 != va_arg(param, long)) ?
TRUE : FALSE;
break;
case CURLOPT_PROXYPORT:
/*
* Explicitly set HTTP proxy port number.
*/
data->set.proxyport = va_arg(param, long);
break;
case CURLOPT_PROXYAUTH:
/*
* Set HTTP Authentication type BITMASK.
*/
{
int bitcheck;
bool authbits;
unsigned long auth = va_arg(param, unsigned long);
if(auth == CURLAUTH_NONE) {
data->set.proxyauth = auth;
break;
}
/* the DIGEST_IE bit is only used to set a special marker, for all the
rest we need to handle it as normal DIGEST */
data->state.authproxy.iestyle = (auth & CURLAUTH_DIGEST_IE) ? TRUE : FALSE;
if(auth & CURLAUTH_DIGEST_IE) {
auth |= CURLAUTH_DIGEST; /* set standard digest bit */
auth &= ~CURLAUTH_DIGEST_IE; /* unset ie digest bit */
}
/* switch off bits we can't support */
#ifndef USE_NTLM
auth &= ~CURLAUTH_NTLM; /* no NTLM support */
auth &= ~CURLAUTH_NTLM_WB; /* no NTLM_WB support */
#elif !defined(NTLM_WB_ENABLED)
auth &= ~CURLAUTH_NTLM_WB; /* no NTLM_WB support */
#endif
#ifndef USE_SPNEGO
auth &= ~CURLAUTH_NEGOTIATE; /* no Negotiate (SPNEGO) auth without
GSS-API or SSPI */
#endif
/* check if any auth bit lower than CURLAUTH_ONLY is still set */
bitcheck = 0;
authbits = FALSE;
while(bitcheck < 31) {
if(auth & (1UL << bitcheck++)) {
authbits = TRUE;
break;
}
}
if(!authbits)
return CURLE_NOT_BUILT_IN; /* no supported types left! */
data->set.proxyauth = auth;
}
break;
case CURLOPT_PROXY:
/*
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
* Set proxy server:port to use as proxy.
*
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
* If the proxy is set to "" (and CURLOPT_SOCKS_PROXY is set to "" or NULL)
* we explicitly say that we don't want to use a proxy
* (even though there might be environment variables saying so).
*
* Setting it to NULL, means no proxy but allows the environment variables
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
* to decide for us (if CURLOPT_SOCKS_PROXY setting it to NULL).
*/
result = setstropt(&data->set.str[STRING_PROXY],
va_arg(param, char *));
break;
case CURLOPT_PRE_PROXY:
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
/*
* Set proxy server:port to use as SOCKS proxy.
*
* If the proxy is set to "" or NULL we explicitly say that we don't want
* to use the socks proxy.
*/
result = setstropt(&data->set.str[STRING_PRE_PROXY],
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
va_arg(param, char *));
break;
case CURLOPT_PROXYTYPE:
/*
* Set proxy type. HTTP/HTTP_1_0/SOCKS4/SOCKS4a/SOCKS5/SOCKS5_HOSTNAME
*/
data->set.proxytype = (curl_proxytype)va_arg(param, long);
break;
case CURLOPT_PROXY_TRANSFER_MODE:
/*
* set transfer mode (;type=<a|i>) when doing FTP via an HTTP proxy
*/
switch(va_arg(param, long)) {
case 0:
data->set.proxy_transfer_mode = FALSE;
break;
case 1:
data->set.proxy_transfer_mode = TRUE;
break;
default:
/* reserve other values for future use */
result = CURLE_UNKNOWN_OPTION;
break;
}
break;
#endif /* CURL_DISABLE_PROXY */
#if defined(HAVE_GSSAPI) || defined(USE_WINDOWS_SSPI)
case CURLOPT_SOCKS5_GSSAPI_NEC:
/*
* Set flag for NEC SOCK5 support
*/
data->set.socks5_gssapi_nec = (0 != va_arg(param, long)) ? TRUE : FALSE;
break;
case CURLOPT_SOCKS5_GSSAPI_SERVICE:
case CURLOPT_PROXY_SERVICE_NAME:
/*
* Set proxy authentication service name for Kerberos 5 and SPNEGO
*/
result = setstropt(&data->set.str[STRING_PROXY_SERVICE_NAME],
va_arg(param, char *));
break;
#endif
#if !defined(CURL_DISABLE_CRYPTO_AUTH) || defined(USE_KERBEROS5) || \
defined(USE_SPNEGO)
case CURLOPT_SERVICE_NAME:
/*
* Set authentication service name for DIGEST-MD5, Kerberos 5 and SPNEGO
*/
result = setstropt(&data->set.str[STRING_SERVICE_NAME],
va_arg(param, char *));
break;
#endif
case CURLOPT_HEADERDATA:
/*
* Custom pointer to pass the header write callback function
*/
data->set.writeheader = (void *)va_arg(param, void *);
break;
case CURLOPT_ERRORBUFFER:
/*
* Error buffer provided by the caller to get the human readable
* error string in.
*/
data->set.errorbuffer = va_arg(param, char *);
break;
case CURLOPT_WRITEDATA:
/*
* FILE pointer to write to. Or possibly
* used as argument to the write callback.
*/
data->set.out = va_arg(param, void *);
break;
case CURLOPT_FTPPORT:
/*
* Use FTP PORT, this also specifies which IP address to use
*/
result = setstropt(&data->set.str[STRING_FTPPORT],
va_arg(param, char *));
data->set.ftp_use_port = (data->set.str[STRING_FTPPORT]) ? TRUE : FALSE;
break;
case CURLOPT_FTP_USE_EPRT:
data->set.ftp_use_eprt = (0 != va_arg(param, long)) ? TRUE : FALSE;
break;
case CURLOPT_FTP_USE_EPSV:
data->set.ftp_use_epsv = (0 != va_arg(param, long)) ? TRUE : FALSE;
break;
case CURLOPT_FTP_USE_PRET:
data->set.ftp_use_pret = (0 != va_arg(param, long)) ? TRUE : FALSE;
break;
case CURLOPT_FTP_SSL_CCC:
data->set.ftp_ccc = (curl_ftpccc)va_arg(param, long);
break;
case CURLOPT_FTP_SKIP_PASV_IP:
/*
* Enable or disable FTP_SKIP_PASV_IP, which will disable/enable the
* bypass of the IP address in PASV responses.
*/
data->set.ftp_skip_ip = (0 != va_arg(param, long)) ? TRUE : FALSE;
break;
case CURLOPT_READDATA:
/*
* FILE pointer to read the file to be uploaded from. Or possibly
* used as argument to the read callback.
*/
data->set.in_set = va_arg(param, void *);
break;
case CURLOPT_INFILESIZE:
/*
* If known, this should inform curl about the file size of the
* to-be-uploaded file.
*/
data->set.filesize = va_arg(param, long);
break;
case CURLOPT_INFILESIZE_LARGE:
/*
* If known, this should inform curl about the file size of the
* to-be-uploaded file.
*/
data->set.filesize = va_arg(param, curl_off_t);
break;
case CURLOPT_LOW_SPEED_LIMIT:
/*
* The low speed limit that if transfers are below this for
* CURLOPT_LOW_SPEED_TIME, the transfer is aborted.
*/
data->set.low_speed_limit=va_arg(param, long);
break;
case CURLOPT_MAX_SEND_SPEED_LARGE:
/*
* When transfer uploads are faster then CURLOPT_MAX_SEND_SPEED_LARGE
* bytes per second the transfer is throttled..
*/
data->set.max_send_speed=va_arg(param, curl_off_t);
break;
case CURLOPT_MAX_RECV_SPEED_LARGE:
/*
* When receiving data faster than CURLOPT_MAX_RECV_SPEED_LARGE bytes per
* second the transfer is throttled..
*/
data->set.max_recv_speed=va_arg(param, curl_off_t);
break;
case CURLOPT_LOW_SPEED_TIME:
/*
* The low speed time that if transfers are below the set
* CURLOPT_LOW_SPEED_LIMIT during this time, the transfer is aborted.
*/
data->set.low_speed_time=va_arg(param, long);
break;
case CURLOPT_URL:
/*
* The URL to fetch.
*/
if(data->change.url_alloc) {
/* the already set URL is allocated, free it first! */
Curl_safefree(data->change.url);
data->change.url_alloc = FALSE;
}
result = setstropt(&data->set.str[STRING_SET_URL],
va_arg(param, char *));
data->change.url = data->set.str[STRING_SET_URL];
break;
case CURLOPT_PORT:
/*
* The port number to use when getting the URL
*/
data->set.use_port = va_arg(param, long);
break;
case CURLOPT_TIMEOUT:
/*
* The maximum time you allow curl to use for a single transfer
* operation.
*/
data->set.timeout = va_arg(param, long) * 1000L;
break;
case CURLOPT_TIMEOUT_MS:
data->set.timeout = va_arg(param, long);
break;
case CURLOPT_CONNECTTIMEOUT:
/*
* The maximum time you allow curl to use to connect.
*/
data->set.connecttimeout = va_arg(param, long) * 1000L;
break;
case CURLOPT_CONNECTTIMEOUT_MS:
data->set.connecttimeout = va_arg(param, long);
break;
case CURLOPT_ACCEPTTIMEOUT_MS:
/*
* The maximum time you allow curl to wait for server connect
*/
data->set.accepttimeout = va_arg(param, long);
break;
2000-05-22 10:12:12 -04:00
case CURLOPT_USERPWD:
/*
* user:password to use in the operation
*/
result = setstropt_userpwd(va_arg(param, char *),
&data->set.str[STRING_USERNAME],
&data->set.str[STRING_PASSWORD]);
break;
case CURLOPT_USERNAME:
/*
* authentication user name to use in the operation
*/
result = setstropt(&data->set.str[STRING_USERNAME],
va_arg(param, char *));
break;
case CURLOPT_PASSWORD:
/*
* authentication password to use in the operation
*/
result = setstropt(&data->set.str[STRING_PASSWORD],
va_arg(param, char *));
break;
case CURLOPT_LOGIN_OPTIONS:
/*
* authentication options to use in the operation
*/
result = setstropt(&data->set.str[STRING_OPTIONS],
va_arg(param, char *));
break;
case CURLOPT_XOAUTH2_BEARER:
/*
* OAuth 2.0 bearer token to use in the operation
*/
result = setstropt(&data->set.str[STRING_BEARER],
va_arg(param, char *));
break;
2000-05-22 10:12:12 -04:00
case CURLOPT_POSTQUOTE:
/*
2003-03-25 09:23:12 -05:00
* List of RAW FTP commands to use after a transfer
*/
data->set.postquote = va_arg(param, struct curl_slist *);
break;
2002-02-28 18:31:23 -05:00
case CURLOPT_PREQUOTE:
/*
* List of RAW FTP commands to use prior to RETR (Wesley Laxton)
*/
data->set.prequote = va_arg(param, struct curl_slist *);
break;
case CURLOPT_QUOTE:
/*
2003-03-25 09:23:12 -05:00
* List of RAW FTP commands to use before a transfer
*/
data->set.quote = va_arg(param, struct curl_slist *);
break;
case CURLOPT_RESOLVE:
/*
* List of NAME:[address] names to populate the DNS cache with
* Prefix the NAME with dash (-) to _remove_ the name from the cache.
*
* Names added with this API will remain in the cache until explicitly
* removed or the handle is cleaned up.
*
* This API can remove any name from the DNS cache, but only entries
* that aren't actually in use right now will be pruned immediately.
*/
data->set.resolve = va_arg(param, struct curl_slist *);
data->change.resolve = data->set.resolve;
break;
case CURLOPT_PROGRESSFUNCTION:
/*
* Progress callback function
*/
data->set.fprogress = va_arg(param, curl_progress_callback);
if(data->set.fprogress)
data->progress.callback = TRUE; /* no longer internal */
else
data->progress.callback = FALSE; /* NULL enforces internal */
break;
case CURLOPT_XFERINFOFUNCTION:
/*
* Transfer info callback function
*/
data->set.fxferinfo = va_arg(param, curl_xferinfo_callback);
if(data->set.fxferinfo)
data->progress.callback = TRUE; /* no longer internal */
else
data->progress.callback = FALSE; /* NULL enforces internal */
break;
case CURLOPT_PROGRESSDATA:
/*
* Custom client data to pass to the progress callback
*/
data->set.progress_client = va_arg(param, void *);
break;
#ifndef CURL_DISABLE_PROXY
2000-05-22 10:12:12 -04:00
case CURLOPT_PROXYUSERPWD:
/*
* user:password needed to use the proxy
*/
result = setstropt_userpwd(va_arg(param, char *),
&data->set.str[STRING_PROXYUSERNAME],
&data->set.str[STRING_PROXYPASSWORD]);
break;
case CURLOPT_PROXYUSERNAME:
/*
* authentication user name to use in the operation
*/
result = setstropt(&data->set.str[STRING_PROXYUSERNAME],
va_arg(param, char *));
break;
case CURLOPT_PROXYPASSWORD:
/*
* authentication password to use in the operation
*/
result = setstropt(&data->set.str[STRING_PROXYPASSWORD],
va_arg(param, char *));
break;
case CURLOPT_NOPROXY:
/*
* proxy exception list
*/
result = setstropt(&data->set.str[STRING_NOPROXY],
va_arg(param, char *));
break;
#endif
2000-05-22 10:12:12 -04:00
case CURLOPT_RANGE:
/*
* What range of the file you want to transfer
*/
result = setstropt(&data->set.str[STRING_SET_RANGE],
va_arg(param, char *));
break;
2000-05-22 10:12:12 -04:00
case CURLOPT_RESUME_FROM:
/*
* Resume transfer at the give file position
*/
data->set.set_resume_from = va_arg(param, long);
break;
2004-01-05 17:29:29 -05:00
case CURLOPT_RESUME_FROM_LARGE:
/*
* Resume transfer at the give file position
*/
2004-01-22 07:45:50 -05:00
data->set.set_resume_from = va_arg(param, curl_off_t);
2004-01-05 17:29:29 -05:00
break;
case CURLOPT_DEBUGFUNCTION:
/*
* stderr write callback.
*/
data->set.fdebug = va_arg(param, curl_debug_callback);
/*
* if the callback provided is NULL, it'll use the default callback
*/
break;
case CURLOPT_DEBUGDATA:
/*
* Set to a void * that should receive all error writes. This
* defaults to CURLOPT_STDERR for normal operations.
*/
data->set.debugdata = va_arg(param, void *);
break;
2000-05-22 10:12:12 -04:00
case CURLOPT_STDERR:
/*
* Set to a FILE * that should receive all error writes. This
* defaults to stderr for normal operations.
*/
data->set.err = va_arg(param, FILE *);
if(!data->set.err)
data->set.err = stderr;
break;
case CURLOPT_HEADERFUNCTION:
/*
* Set header write callback
*/
data->set.fwrite_header = va_arg(param, curl_write_callback);
break;
2000-05-22 10:12:12 -04:00
case CURLOPT_WRITEFUNCTION:
/*
* Set data write callback
*/
data->set.fwrite_func = va_arg(param, curl_write_callback);
if(!data->set.fwrite_func) {
data->set.is_fwrite_set = 0;
/* When set to NULL, reset to our internal default function */
data->set.fwrite_func = (curl_write_callback)fwrite;
}
else
data->set.is_fwrite_set = 1;
break;
2000-05-22 10:12:12 -04:00
case CURLOPT_READFUNCTION:
/*
* Read data callback
*/
data->set.fread_func_set = va_arg(param, curl_read_callback);
if(!data->set.fread_func_set) {
data->set.is_fread_set = 0;
/* When set to NULL, reset to our internal default function */
data->set.fread_func_set = (curl_read_callback)fread;
}
else
data->set.is_fread_set = 1;
break;
case CURLOPT_SEEKFUNCTION:
/*
* Seek callback. Might be NULL.
*/
data->set.seek_func = va_arg(param, curl_seek_callback);
break;
case CURLOPT_SEEKDATA:
/*
* Seek control callback. Might be NULL.
*/
data->set.seek_client = va_arg(param, void *);
break;
case CURLOPT_CONV_FROM_NETWORK_FUNCTION:
/*
* "Convert from network encoding" callback
*/
data->set.convfromnetwork = va_arg(param, curl_conv_callback);
break;
case CURLOPT_CONV_TO_NETWORK_FUNCTION:
/*
* "Convert to network encoding" callback
*/
data->set.convtonetwork = va_arg(param, curl_conv_callback);
break;
case CURLOPT_CONV_FROM_UTF8_FUNCTION:
/*
* "Convert from UTF-8 encoding" callback
*/
data->set.convfromutf8 = va_arg(param, curl_conv_callback);
break;
case CURLOPT_IOCTLFUNCTION:
/*
* I/O control callback. Might be NULL.
*/
data->set.ioctl_func = va_arg(param, curl_ioctl_callback);
break;
case CURLOPT_IOCTLDATA:
/*
* I/O control data pointer. Might be NULL.
*/
data->set.ioctl_client = va_arg(param, void *);
break;
2000-05-22 10:12:12 -04:00
case CURLOPT_SSLCERT:
/*
* String that holds file name of the SSL certificate to use
*/
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
result = setstropt(&data->set.str[STRING_CERT_ORIG],
va_arg(param, char *));
break;
case CURLOPT_PROXY_SSLCERT:
/*
* String that holds file name of the SSL certificate to use for proxy
*/
result = setstropt(&data->set.str[STRING_CERT_PROXY],
va_arg(param, char *));
break;
2001-12-17 18:01:39 -05:00
case CURLOPT_SSLCERTTYPE:
/*
2001-12-17 18:01:39 -05:00
* String that holds file type of the SSL certificate to use
*/
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
result = setstropt(&data->set.str[STRING_CERT_TYPE_ORIG],
va_arg(param, char *));
break;
case CURLOPT_PROXY_SSLCERTTYPE:
/*
* String that holds file type of the SSL certificate to use for proxy
*/
result = setstropt(&data->set.str[STRING_CERT_TYPE_PROXY],
va_arg(param, char *));
2001-12-17 18:01:39 -05:00
break;
case CURLOPT_SSLKEY:
/*
2009-05-29 03:43:43 -04:00
* String that holds file name of the SSL key to use
2001-12-17 18:01:39 -05:00
*/
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
result = setstropt(&data->set.str[STRING_KEY_ORIG],
va_arg(param, char *));
break;
case CURLOPT_PROXY_SSLKEY:
/*
* String that holds file name of the SSL key to use for proxy
*/
result = setstropt(&data->set.str[STRING_KEY_PROXY],
va_arg(param, char *));
2001-12-17 18:01:39 -05:00
break;
case CURLOPT_SSLKEYTYPE:
/*
2009-05-29 03:43:43 -04:00
* String that holds file type of the SSL key to use
2001-12-17 18:01:39 -05:00
*/
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
result = setstropt(&data->set.str[STRING_KEY_TYPE_ORIG],
va_arg(param, char *));
break;
case CURLOPT_PROXY_SSLKEYTYPE:
/*
* String that holds file type of the SSL key to use for proxy
*/
result = setstropt(&data->set.str[STRING_KEY_TYPE_PROXY],
va_arg(param, char *));
2001-12-17 18:01:39 -05:00
break;
case CURLOPT_KEYPASSWD:
2001-12-17 18:01:39 -05:00
/*
* String that holds the SSL or SSH private key password.
2001-12-17 18:01:39 -05:00
*/
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
result = setstropt(&data->set.str[STRING_KEY_PASSWD_ORIG],
va_arg(param, char *));
break;
case CURLOPT_PROXY_KEYPASSWD:
/*
* String that holds the SSL private key password for proxy.
*/
result = setstropt(&data->set.str[STRING_KEY_PASSWD_PROXY],
va_arg(param, char *));
2001-12-17 18:01:39 -05:00
break;
case CURLOPT_SSLENGINE:
/*
* String that holds the SSL crypto engine.
*/
argptr = va_arg(param, char *);
if(argptr && argptr[0])
result = Curl_ssl_set_engine(data, argptr);
break;
2001-12-17 18:01:39 -05:00
case CURLOPT_SSLENGINE_DEFAULT:
/*
* flag to set engine as default.
*/
result = Curl_ssl_set_engine_default(data);
break;
2000-05-22 10:12:12 -04:00
case CURLOPT_CRLF:
/*
2006-02-23 13:39:22 -05:00
* Kludgy option to enable CRLF conversions. Subject for removal.
*/
data->set.crlf = (0 != va_arg(param, long)) ? TRUE : FALSE;
break;
2000-09-18 17:54:08 -04:00
case CURLOPT_INTERFACE:
/*
* Set what interface or address/hostname to bind the socket to when
* performing an operation and thus what from-IP your connection will use.
*/
result = setstropt(&data->set.str[STRING_DEVICE],
va_arg(param, char *));
2000-09-18 17:54:08 -04:00
break;
case CURLOPT_LOCALPORT:
/*
* Set what local port to bind the socket to when performing an operation.
*/
data->set.localport = curlx_sltous(va_arg(param, long));
break;
case CURLOPT_LOCALPORTRANGE:
/*
* Set number of local ports to try, starting with CURLOPT_LOCALPORT.
*/
data->set.localportrange = curlx_sltosi(va_arg(param, long));
break;
case CURLOPT_KRBLEVEL:
/*
* A string that defines the kerberos security level.
*/
result = setstropt(&data->set.str[STRING_KRB_LEVEL],
va_arg(param, char *));
data->set.krb = (data->set.str[STRING_KRB_LEVEL]) ? TRUE : FALSE;
break;
case CURLOPT_GSSAPI_DELEGATION:
/*
* GSS-API credential delegation
*/
data->set.gssapi_delegation = va_arg(param, long);
break;
case CURLOPT_SSL_VERIFYPEER:
/*
* Enable peer SSL verifying.
*/
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
data->set.ssl.primary.verifypeer = (0 != va_arg(param, long)) ?
TRUE : FALSE;
break;
case CURLOPT_PROXY_SSL_VERIFYPEER:
/*
* Enable peer SSL verifying for proxy.
*/
data->set.proxy_ssl.primary.verifypeer =
(0 != va_arg(param, long))?TRUE:FALSE;
break;
case CURLOPT_SSL_VERIFYHOST:
/*
* Enable verification of the host name in the peer certificate
*/
arg = va_arg(param, long);
/* Obviously people are not reading documentation and too many thought
this argument took a boolean when it wasn't and misused it. We thus ban
1 as a sensible input and we warn about its use. Then we only have the
2 action internally stored as TRUE. */
if(1 == arg) {
failf(data, "CURLOPT_SSL_VERIFYHOST no longer supports 1 as value!");
return CURLE_BAD_FUNCTION_ARGUMENT;
}
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
data->set.ssl.primary.verifyhost = (0 != arg) ? TRUE : FALSE;
break;
case CURLOPT_PROXY_SSL_VERIFYHOST:
/*
* Enable verification of the host name in the peer certificate for proxy
*/
arg = va_arg(param, long);
/* Obviously people are not reading documentation and too many thought
this argument took a boolean when it wasn't and misused it. We thus ban
1 as a sensible input and we warn about its use. Then we only have the
2 action internally stored as TRUE. */
if(1 == arg) {
failf(data, "CURLOPT_SSL_VERIFYHOST no longer supports 1 as value!");
return CURLE_BAD_FUNCTION_ARGUMENT;
}
data->set.proxy_ssl.primary.verifyhost = (0 != arg)?TRUE:FALSE;
break;
case CURLOPT_SSL_VERIFYSTATUS:
/*
* Enable certificate status verifying.
*/
if(!Curl_ssl_cert_status_request()) {
result = CURLE_NOT_BUILT_IN;
break;
}
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
data->set.ssl.primary.verifystatus = (0 != va_arg(param, long)) ?
TRUE : FALSE;
break;
case CURLOPT_SSL_CTX_FUNCTION:
#ifdef have_curlssl_ssl_ctx
/*
* Set a SSL_CTX callback
*/
data->set.ssl.fsslctx = va_arg(param, curl_ssl_ctx_callback);
#else
result = CURLE_NOT_BUILT_IN;
#endif
break;
case CURLOPT_SSL_CTX_DATA:
#ifdef have_curlssl_ssl_ctx
/*
* Set a SSL_CTX callback parameter pointer
*/
data->set.ssl.fsslctxp = va_arg(param, void *);
#else
result = CURLE_NOT_BUILT_IN;
#endif
break;
case CURLOPT_SSL_FALSESTART:
/*
* Enable TLS false start.
*/
if(!Curl_ssl_false_start()) {
result = CURLE_NOT_BUILT_IN;
break;
}
data->set.ssl.falsestart = (0 != va_arg(param, long)) ? TRUE : FALSE;
break;
case CURLOPT_CERTINFO:
#ifdef have_curlssl_certinfo
data->set.ssl.certinfo = (0 != va_arg(param, long)) ? TRUE : FALSE;
#else
result = CURLE_NOT_BUILT_IN;
#endif
break;
case CURLOPT_PINNEDPUBLICKEY:
#ifdef have_curlssl_pinnedpubkey /* only by supported backends */
/*
* Set pinned public key for SSL connection.
* Specify file name of the public key in DER format.
*/
result = setstropt(&data->set.str[STRING_SSL_PINNEDPUBLICKEY_ORIG],
va_arg(param, char *));
#else
result = CURLE_NOT_BUILT_IN;
#endif
break;
case CURLOPT_PROXY_PINNEDPUBLICKEY:
#ifdef have_curlssl_pinnedpubkey /* only by supported backends */
/*
* Set pinned public key for SSL connection.
* Specify file name of the public key in DER format.
*/
result = setstropt(&data->set.str[STRING_SSL_PINNEDPUBLICKEY_PROXY],
va_arg(param, char *));
#else
result = CURLE_NOT_BUILT_IN;
#endif
break;
case CURLOPT_CAINFO:
/*
* Set CA info for SSL connection. Specify file name of the CA certificate
*/
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
result = setstropt(&data->set.str[STRING_SSL_CAFILE_ORIG],
va_arg(param, char *));
break;
case CURLOPT_PROXY_CAINFO:
/*
* Set CA info SSL connection for proxy. Specify file name of the
* CA certificate
*/
result = setstropt(&data->set.str[STRING_SSL_CAFILE_PROXY],
va_arg(param, char *));
2002-05-28 05:21:29 -04:00
break;
case CURLOPT_CAPATH:
#ifdef have_curlssl_ca_path /* not supported by all backends */
2002-05-28 05:21:29 -04:00
/*
2002-08-26 19:13:25 -04:00
* Set CA path info for SSL connection. Specify directory name of the CA
* certificates which have been prepared using openssl c_rehash utility.
2002-05-28 05:21:29 -04:00
*/
2002-08-26 19:13:25 -04:00
/* This does not work on windows. */
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
result = setstropt(&data->set.str[STRING_SSL_CAPATH_ORIG],
va_arg(param, char *));
#else
result = CURLE_NOT_BUILT_IN;
#endif
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
break;
case CURLOPT_PROXY_CAPATH:
#ifdef have_curlssl_ca_path /* not supported by all backends */
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
/*
* Set CA path info for SSL connection proxy. Specify directory name of the
* CA certificates which have been prepared using openssl c_rehash utility.
*/
/* This does not work on windows. */
result = setstropt(&data->set.str[STRING_SSL_CAPATH_PROXY],
va_arg(param, char *));
#else
result = CURLE_NOT_BUILT_IN;
#endif
break;
case CURLOPT_CRLFILE:
/*
* Set CRL file info for SSL connection. Specify file name of the CRL
* to check certificates revocation
*/
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
result = setstropt(&data->set.str[STRING_SSL_CRLFILE_ORIG],
va_arg(param, char *));
break;
case CURLOPT_PROXY_CRLFILE:
/*
* Set CRL file info for SSL connection for proxy. Specify file name of the
* CRL to check certificates revocation
*/
result = setstropt(&data->set.str[STRING_SSL_CRLFILE_PROXY],
va_arg(param, char *));
break;
case CURLOPT_ISSUERCERT:
/*
* Set Issuer certificate file
* to check certificates issuer
*/
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
result = setstropt(&data->set.str[STRING_SSL_ISSUERCERT_ORIG],
va_arg(param, char *));
break;
case CURLOPT_TELNETOPTIONS:
/*
* Set a linked list of telnet options
*/
data->set.telnet_options = va_arg(param, struct curl_slist *);
break;
case CURLOPT_BUFFERSIZE:
/*
* The application kindly asks for a differently sized receive buffer.
* If it seems reasonable, we'll use it.
*/
data->set.buffer_size = va_arg(param, long);
if(data->set.buffer_size > MAX_BUFSIZE)
data->set.buffer_size = MAX_BUFSIZE; /* huge internal default */
else if(data->set.buffer_size < 1)
data->set.buffer_size = BUFSIZE;
/* Resize only if larger than default buffer size. */
if(data->set.buffer_size > BUFSIZE) {
data->state.buffer = realloc(data->state.buffer,
data->set.buffer_size + 1);
if(!data->state.buffer) {
DEBUGF(fprintf(stderr, "Error: realloc of buffer failed\n"));
result = CURLE_OUT_OF_MEMORY;
}
}
break;
case CURLOPT_NOSIGNAL:
/*
* The application asks not to set any signal() or alarm() handlers,
* even when using a timeout.
*/
data->set.no_signal = (0 != va_arg(param, long)) ? TRUE : FALSE;
break;
case CURLOPT_SHARE:
{
struct Curl_share *set;
set = va_arg(param, struct Curl_share *);
/* disconnect from old share, if any */
if(data->share) {
Curl_share_lock(data, CURL_LOCK_DATA_SHARE, CURL_LOCK_ACCESS_SINGLE);
2003-03-25 09:23:12 -05:00
if(data->dns.hostcachetype == HCACHE_SHARED) {
data->dns.hostcache = NULL;
data->dns.hostcachetype = HCACHE_NONE;
}
#if !defined(CURL_DISABLE_HTTP) && !defined(CURL_DISABLE_COOKIES)
if(data->share->cookies == data->cookies)
data->cookies = NULL;
#endif
2003-03-25 09:23:12 -05:00
if(data->share->sslsession == data->state.session)
data->state.session = NULL;
data->share->dirty--;
2003-03-25 09:23:12 -05:00
Curl_share_unlock(data, CURL_LOCK_DATA_SHARE);
data->share = NULL;
}
2003-03-25 09:23:12 -05:00
/* use new share if it set */
data->share = set;
if(data->share) {
Curl_share_lock(data, CURL_LOCK_DATA_SHARE, CURL_LOCK_ACCESS_SINGLE);
2003-02-04 18:48:46 -05:00
data->share->dirty++;
2003-03-25 09:23:12 -05:00
if(data->share->specifier & (1<< CURL_LOCK_DATA_DNS)) {
/* use shared host cache */
data->dns.hostcache = &data->share->hostcache;
data->dns.hostcachetype = HCACHE_SHARED;
}
#if !defined(CURL_DISABLE_HTTP) && !defined(CURL_DISABLE_COOKIES)
if(data->share->cookies) {
/* use shared cookie list, first free own one if any */
Curl_cookie_cleanup(data->cookies);
/* enable cookies since we now use a share that uses cookies! */
data->cookies = data->share->cookies;
}
#endif /* CURL_DISABLE_HTTP */
if(data->share->sslsession) {
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
data->set.general_ssl.max_ssl_sessions = data->share->max_ssl_sessions;
data->state.session = data->share->sslsession;
}
Curl_share_unlock(data, CURL_LOCK_DATA_SHARE);
2004-05-19 05:25:00 -04:00
}
/* check for host cache not needed,
* it will be done by curl_easy_perform */
}
break;
case CURLOPT_PRIVATE:
/*
* Set private data pointer.
*/
data->set.private_data = va_arg(param, void *);
break;
case CURLOPT_MAXFILESIZE:
/*
* Set the maximum size of a file to download.
*/
data->set.max_filesize = va_arg(param, long);
break;
#ifdef USE_SSL
case CURLOPT_USE_SSL:
/*
* Make transfers attempt to use SSL/TLS.
*/
data->set.use_ssl = (curl_usessl)va_arg(param, long);
break;
case CURLOPT_SSL_OPTIONS:
arg = va_arg(param, long);
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
data->set.ssl.enable_beast = arg&CURLSSLOPT_ALLOW_BEAST?TRUE:FALSE;
data->set.ssl.no_revoke = !!(arg & CURLSSLOPT_NO_REVOKE);
break;
case CURLOPT_PROXY_SSL_OPTIONS:
arg = va_arg(param, long);
data->set.proxy_ssl.enable_beast = arg&CURLSSLOPT_ALLOW_BEAST?TRUE:FALSE;
data->set.proxy_ssl.no_revoke = !!(arg & CURLSSLOPT_NO_REVOKE);
break;
#endif
2004-09-16 17:45:16 -04:00
case CURLOPT_FTPSSLAUTH:
/*
* Set a specific auth for FTP-SSL transfers.
*/
data->set.ftpsslauth = (curl_ftpauth)va_arg(param, long);
break;
case CURLOPT_IPRESOLVE:
data->set.ipver = va_arg(param, long);
break;
2004-01-05 17:29:29 -05:00
case CURLOPT_MAXFILESIZE_LARGE:
/*
* Set the maximum size of a file to download.
*/
2004-01-22 07:45:50 -05:00
data->set.max_filesize = va_arg(param, curl_off_t);
2004-01-05 17:29:29 -05:00
break;
2004-03-25 08:37:18 -05:00
case CURLOPT_TCP_NODELAY:
/*
* Enable or disable TCP_NODELAY, which will disable/enable the Nagle
* algorithm
*/
data->set.tcp_nodelay = (0 != va_arg(param, long)) ? TRUE : FALSE;
2004-03-25 08:37:18 -05:00
break;
case CURLOPT_FTP_ACCOUNT:
result = setstropt(&data->set.str[STRING_FTP_ACCOUNT],
va_arg(param, char *));
break;
case CURLOPT_IGNORE_CONTENT_LENGTH:
data->set.ignorecl = (0 != va_arg(param, long)) ? TRUE : FALSE;
break;
case CURLOPT_CONNECT_ONLY:
/*
* No data transfer, set up connection and let application use the socket
*/
data->set.connect_only = (0 != va_arg(param, long)) ? TRUE : FALSE;
break;
case CURLOPT_FTP_ALTERNATIVE_TO_USER:
result = setstropt(&data->set.str[STRING_FTP_ALTERNATIVE_TO_USER],
va_arg(param, char *));
break;
case CURLOPT_SOCKOPTFUNCTION:
/*
* socket callback function: called after socket() but before connect()
*/
data->set.fsockopt = va_arg(param, curl_sockopt_callback);
break;
case CURLOPT_SOCKOPTDATA:
/*
* socket callback data pointer. Might be NULL.
*/
data->set.sockopt_client = va_arg(param, void *);
break;
case CURLOPT_OPENSOCKETFUNCTION:
/*
* open/create socket callback function: called instead of socket(),
* before connect()
*/
data->set.fopensocket = va_arg(param, curl_opensocket_callback);
break;
case CURLOPT_OPENSOCKETDATA:
/*
* socket callback data pointer. Might be NULL.
*/
data->set.opensocket_client = va_arg(param, void *);
break;
case CURLOPT_CLOSESOCKETFUNCTION:
/*
* close socket callback function: called instead of close()
* when shutting down a connection
*/
data->set.fclosesocket = va_arg(param, curl_closesocket_callback);
break;
case CURLOPT_CLOSESOCKETDATA:
/*
* socket callback data pointer. Might be NULL.
*/
data->set.closesocket_client = va_arg(param, void *);
break;
case CURLOPT_SSL_SESSIONID_CACHE:
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
data->set.general_ssl.sessionid = (0 != va_arg(param, long)) ?
TRUE : FALSE;
break;
#ifdef USE_LIBSSH2
/* we only include SSH options if explicitly built to support SSH */
case CURLOPT_SSH_AUTH_TYPES:
data->set.ssh_auth_types = va_arg(param, long);
break;
case CURLOPT_SSH_PUBLIC_KEYFILE:
/*
* Use this file instead of the $HOME/.ssh/id_dsa.pub file
*/
result = setstropt(&data->set.str[STRING_SSH_PUBLIC_KEY],
va_arg(param, char *));
break;
case CURLOPT_SSH_PRIVATE_KEYFILE:
/*
* Use this file instead of the $HOME/.ssh/id_dsa file
*/
result = setstropt(&data->set.str[STRING_SSH_PRIVATE_KEY],
va_arg(param, char *));
break;
case CURLOPT_SSH_HOST_PUBLIC_KEY_MD5:
/*
* Option to allow for the MD5 of the host public key to be checked
* for validation purposes.
*/
result = setstropt(&data->set.str[STRING_SSH_HOST_PUBLIC_KEY_MD5],
va_arg(param, char *));
break;
#ifdef HAVE_LIBSSH2_KNOWNHOST_API
case CURLOPT_SSH_KNOWNHOSTS:
/*
* Store the file name to read known hosts from.
*/
result = setstropt(&data->set.str[STRING_SSH_KNOWNHOSTS],
va_arg(param, char *));
break;
case CURLOPT_SSH_KEYFUNCTION:
/* setting to NULL is fine since the ssh.c functions themselves will
then rever to use the internal default */
data->set.ssh_keyfunc = va_arg(param, curl_sshkeycallback);
break;
case CURLOPT_SSH_KEYDATA:
/*
* Custom client data to pass to the SSH keyfunc callback
*/
data->set.ssh_keyfunc_userp = va_arg(param, void *);
break;
#endif /* HAVE_LIBSSH2_KNOWNHOST_API */
#endif /* USE_LIBSSH2 */
case CURLOPT_HTTP_TRANSFER_DECODING:
/*
* disable libcurl transfer encoding is used
*/
data->set.http_te_skip = (0 == va_arg(param, long)) ? TRUE : FALSE;
break;
case CURLOPT_HTTP_CONTENT_DECODING:
/*
* raw data passed to the application when content encoding is used
*/
data->set.http_ce_skip = (0 == va_arg(param, long)) ? TRUE : FALSE;
break;
case CURLOPT_NEW_FILE_PERMS:
/*
* Uses these permissions instead of 0644
*/
data->set.new_file_perms = va_arg(param, long);
break;
case CURLOPT_NEW_DIRECTORY_PERMS:
/*
* Uses these permissions instead of 0755
*/
data->set.new_directory_perms = va_arg(param, long);
break;
case CURLOPT_ADDRESS_SCOPE:
/*
* We always get longs when passed plain numericals, but for this value we
* know that an unsigned int will always hold the value so we blindly
* typecast to this type
*/
data->set.scope_id = curlx_sltoui(va_arg(param, long));
break;
case CURLOPT_PROTOCOLS:
/* set the bitmask for the protocols that are allowed to be used for the
transfer, which thus helps the app which takes URLs from users or other
external inputs and want to restrict what protocol(s) to deal
with. Defaults to CURLPROTO_ALL. */
data->set.allowed_protocols = va_arg(param, long);
break;
case CURLOPT_REDIR_PROTOCOLS:
/* set the bitmask for the protocols that libcurl is allowed to follow to,
as a subset of the CURLOPT_PROTOCOLS ones. That means the protocol needs
to be set in both bitmasks to be allowed to get redirected to. Defaults
to all protocols except FILE and SCP. */
data->set.redir_protocols = va_arg(param, long);
break;
case CURLOPT_DEFAULT_PROTOCOL:
/* Set the protocol to use when the URL doesn't include any protocol */
result = setstropt(&data->set.str[STRING_DEFAULT_PROTOCOL],
va_arg(param, char *));
break;
case CURLOPT_MAIL_FROM:
/* Set the SMTP mail originator */
result = setstropt(&data->set.str[STRING_MAIL_FROM],
va_arg(param, char *));
break;
case CURLOPT_MAIL_AUTH:
/* Set the SMTP auth originator */
result = setstropt(&data->set.str[STRING_MAIL_AUTH],
va_arg(param, char *));
break;
case CURLOPT_MAIL_RCPT:
/* Set the list of mail recipients */
data->set.mail_rcpt = va_arg(param, struct curl_slist *);
break;
case CURLOPT_SASL_IR:
/* Enable/disable SASL initial response */
data->set.sasl_ir = (0 != va_arg(param, long)) ? TRUE : FALSE;
break;
case CURLOPT_RTSP_REQUEST:
{
/*
* Set the RTSP request method (OPTIONS, SETUP, PLAY, etc...)
* Would this be better if the RTSPREQ_* were just moved into here?
*/
long curl_rtspreq = va_arg(param, long);
2010-01-26 22:43:34 -05:00
Curl_RtspReq rtspreq = RTSPREQ_NONE;
switch(curl_rtspreq) {
case CURL_RTSPREQ_OPTIONS:
rtspreq = RTSPREQ_OPTIONS;
break;
case CURL_RTSPREQ_DESCRIBE:
rtspreq = RTSPREQ_DESCRIBE;
break;
case CURL_RTSPREQ_ANNOUNCE:
rtspreq = RTSPREQ_ANNOUNCE;
break;
case CURL_RTSPREQ_SETUP:
rtspreq = RTSPREQ_SETUP;
break;
case CURL_RTSPREQ_PLAY:
rtspreq = RTSPREQ_PLAY;
break;
case CURL_RTSPREQ_PAUSE:
rtspreq = RTSPREQ_PAUSE;
break;
case CURL_RTSPREQ_TEARDOWN:
rtspreq = RTSPREQ_TEARDOWN;
break;
case CURL_RTSPREQ_GET_PARAMETER:
rtspreq = RTSPREQ_GET_PARAMETER;
break;
case CURL_RTSPREQ_SET_PARAMETER:
rtspreq = RTSPREQ_SET_PARAMETER;
break;
case CURL_RTSPREQ_RECORD:
rtspreq = RTSPREQ_RECORD;
break;
case CURL_RTSPREQ_RECEIVE:
rtspreq = RTSPREQ_RECEIVE;
break;
default:
rtspreq = RTSPREQ_NONE;
}
data->set.rtspreq = rtspreq;
break;
}
case CURLOPT_RTSP_SESSION_ID:
/*
* Set the RTSP Session ID manually. Useful if the application is
* resuming a previously established RTSP session
*/
result = setstropt(&data->set.str[STRING_RTSP_SESSION_ID],
va_arg(param, char *));
break;
case CURLOPT_RTSP_STREAM_URI:
/*
* Set the Stream URI for the RTSP request. Unless the request is
* for generic server options, the application will need to set this.
*/
result = setstropt(&data->set.str[STRING_RTSP_STREAM_URI],
va_arg(param, char *));
break;
case CURLOPT_RTSP_TRANSPORT:
/*
* The content of the Transport: header for the RTSP request
*/
result = setstropt(&data->set.str[STRING_RTSP_TRANSPORT],
va_arg(param, char *));
break;
case CURLOPT_RTSP_CLIENT_CSEQ:
/*
* Set the CSEQ number to issue for the next RTSP request. Useful if the
* application is resuming a previously broken connection. The CSEQ
* will increment from this new number henceforth.
*/
data->state.rtsp_next_client_CSeq = va_arg(param, long);
break;
case CURLOPT_RTSP_SERVER_CSEQ:
/* Same as the above, but for server-initiated requests */
data->state.rtsp_next_client_CSeq = va_arg(param, long);
break;
case CURLOPT_INTERLEAVEDATA:
data->set.rtp_out = va_arg(param, void *);
break;
case CURLOPT_INTERLEAVEFUNCTION:
/* Set the user defined RTP write function */
data->set.fwrite_rtp = va_arg(param, curl_write_callback);
break;
case CURLOPT_WILDCARDMATCH:
data->set.wildcardmatch = (0 != va_arg(param, long)) ? TRUE : FALSE;
break;
case CURLOPT_CHUNK_BGN_FUNCTION:
data->set.chunk_bgn = va_arg(param, curl_chunk_bgn_callback);
break;
case CURLOPT_CHUNK_END_FUNCTION:
data->set.chunk_end = va_arg(param, curl_chunk_end_callback);
break;
case CURLOPT_FNMATCH_FUNCTION:
data->set.fnmatch = va_arg(param, curl_fnmatch_callback);
break;
case CURLOPT_CHUNK_DATA:
data->wildcard.customptr = va_arg(param, void *);
break;
case CURLOPT_FNMATCH_DATA:
data->set.fnmatch_data = va_arg(param, void *);
break;
#ifdef USE_TLS_SRP
case CURLOPT_TLSAUTH_USERNAME:
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
result = setstropt(&data->set.str[STRING_TLSAUTH_USERNAME_ORIG],
va_arg(param, char *));
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
if(data->set.str[STRING_TLSAUTH_USERNAME_ORIG] && !data->set.ssl.authtype)
data->set.ssl.authtype = CURL_TLSAUTH_SRP; /* default to SRP */
break;
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
case CURLOPT_PROXY_TLSAUTH_USERNAME:
result = setstropt(&data->set.str[STRING_TLSAUTH_USERNAME_PROXY],
va_arg(param, char *));
if(data->set.str[STRING_TLSAUTH_USERNAME_PROXY] &&
!data->set.proxy_ssl.authtype)
data->set.proxy_ssl.authtype = CURL_TLSAUTH_SRP; /* default to SRP */
break;
case CURLOPT_TLSAUTH_PASSWORD:
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
result = setstropt(&data->set.str[STRING_TLSAUTH_PASSWORD_ORIG],
va_arg(param, char *));
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
if(data->set.str[STRING_TLSAUTH_USERNAME_ORIG] && !data->set.ssl.authtype)
data->set.ssl.authtype = CURL_TLSAUTH_SRP; /* default to SRP */
break;
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
case CURLOPT_PROXY_TLSAUTH_PASSWORD:
result = setstropt(&data->set.str[STRING_TLSAUTH_PASSWORD_PROXY],
va_arg(param, char *));
if(data->set.str[STRING_TLSAUTH_USERNAME_PROXY] &&
!data->set.proxy_ssl.authtype)
data->set.proxy_ssl.authtype = CURL_TLSAUTH_SRP; /* default to SRP */
break;
case CURLOPT_TLSAUTH_TYPE:
if(strncasecompare((char *)va_arg(param, char *), "SRP", strlen("SRP")))
data->set.ssl.authtype = CURL_TLSAUTH_SRP;
else
data->set.ssl.authtype = CURL_TLSAUTH_NONE;
break;
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
case CURLOPT_PROXY_TLSAUTH_TYPE:
if(strncasecompare((char *)va_arg(param, char *), "SRP", strlen("SRP")))
data->set.proxy_ssl.authtype = CURL_TLSAUTH_SRP;
else
data->set.proxy_ssl.authtype = CURL_TLSAUTH_NONE;
break;
#endif
case CURLOPT_DNS_SERVERS:
result = Curl_set_dns_servers(data, va_arg(param, char *));
break;
case CURLOPT_DNS_INTERFACE:
result = Curl_set_dns_interface(data, va_arg(param, char *));
break;
case CURLOPT_DNS_LOCAL_IP4:
result = Curl_set_dns_local_ip4(data, va_arg(param, char *));
break;
case CURLOPT_DNS_LOCAL_IP6:
result = Curl_set_dns_local_ip6(data, va_arg(param, char *));
break;
case CURLOPT_TCP_KEEPALIVE:
data->set.tcp_keepalive = (0 != va_arg(param, long)) ? TRUE : FALSE;
break;
case CURLOPT_TCP_KEEPIDLE:
data->set.tcp_keepidle = va_arg(param, long);
break;
case CURLOPT_TCP_KEEPINTVL:
data->set.tcp_keepintvl = va_arg(param, long);
break;
2016-02-16 07:21:03 -05:00
case CURLOPT_TCP_FASTOPEN:
#if defined(CONNECT_DATA_IDEMPOTENT) || defined(MSG_FASTOPEN)
2016-02-16 07:21:03 -05:00
data->set.tcp_fastopen = (0 != va_arg(param, long))?TRUE:FALSE;
#else
result = CURLE_NOT_BUILT_IN;
#endif
break;
case CURLOPT_SSL_ENABLE_NPN:
data->set.ssl_enable_npn = (0 != va_arg(param, long)) ? TRUE : FALSE;
break;
case CURLOPT_SSL_ENABLE_ALPN:
data->set.ssl_enable_alpn = (0 != va_arg(param, long)) ? TRUE : FALSE;
break;
libcurl: add UNIX domain sockets support The ability to do HTTP requests over a UNIX domain socket has been requested before, in Apr 2008 [0][1] and Sep 2010 [2]. While a discussion happened, no patch seems to get through. I decided to give it a go since I need to test a nginx HTTP server which listens on a UNIX domain socket. One patch [3] seems to make it possible to use the CURLOPT_OPENSOCKETFUNCTION function to gain a UNIX domain socket. Another person wrote a Go program which can do HTTP over a UNIX socket for Docker[4] which uses a special URL scheme (though the name contains cURL, it has no relation to the cURL library). This patch considers support for UNIX domain sockets at the same level as HTTP proxies / IPv6, it acts as an intermediate socket provider and not as a separate protocol. Since this feature affects network operations, a new feature flag was added ("unix-sockets") with a corresponding CURL_VERSION_UNIX_SOCKETS macro. A new CURLOPT_UNIX_SOCKET_PATH option is added and documented. This option enables UNIX domain sockets support for all requests on the handle (replacing IP sockets and skipping proxies). A new configure option (--enable-unix-sockets) and CMake option (ENABLE_UNIX_SOCKETS) can disable this optional feature. Note that I deliberately did not mark this feature as advanced, this is a feature/component that should easily be available. [0]: http://curl.haxx.se/mail/lib-2008-04/0279.html [1]: http://daniel.haxx.se/blog/2008/04/14/http-over-unix-domain-sockets/ [2]: http://sourceforge.net/p/curl/feature-requests/53/ [3]: http://curl.haxx.se/mail/lib-2008-04/0361.html [4]: https://github.com/Soulou/curl-unix-socket Signed-off-by: Peter Wu <peter@lekensteyn.nl>
2014-11-27 17:59:25 -05:00
#ifdef USE_UNIX_SOCKETS
case CURLOPT_UNIX_SOCKET_PATH:
data->set.abstract_unix_socket = FALSE;
result = setstropt(&data->set.str[STRING_UNIX_SOCKET_PATH],
va_arg(param, char *));
break;
case CURLOPT_ABSTRACT_UNIX_SOCKET:
data->set.abstract_unix_socket = TRUE;
libcurl: add UNIX domain sockets support The ability to do HTTP requests over a UNIX domain socket has been requested before, in Apr 2008 [0][1] and Sep 2010 [2]. While a discussion happened, no patch seems to get through. I decided to give it a go since I need to test a nginx HTTP server which listens on a UNIX domain socket. One patch [3] seems to make it possible to use the CURLOPT_OPENSOCKETFUNCTION function to gain a UNIX domain socket. Another person wrote a Go program which can do HTTP over a UNIX socket for Docker[4] which uses a special URL scheme (though the name contains cURL, it has no relation to the cURL library). This patch considers support for UNIX domain sockets at the same level as HTTP proxies / IPv6, it acts as an intermediate socket provider and not as a separate protocol. Since this feature affects network operations, a new feature flag was added ("unix-sockets") with a corresponding CURL_VERSION_UNIX_SOCKETS macro. A new CURLOPT_UNIX_SOCKET_PATH option is added and documented. This option enables UNIX domain sockets support for all requests on the handle (replacing IP sockets and skipping proxies). A new configure option (--enable-unix-sockets) and CMake option (ENABLE_UNIX_SOCKETS) can disable this optional feature. Note that I deliberately did not mark this feature as advanced, this is a feature/component that should easily be available. [0]: http://curl.haxx.se/mail/lib-2008-04/0279.html [1]: http://daniel.haxx.se/blog/2008/04/14/http-over-unix-domain-sockets/ [2]: http://sourceforge.net/p/curl/feature-requests/53/ [3]: http://curl.haxx.se/mail/lib-2008-04/0361.html [4]: https://github.com/Soulou/curl-unix-socket Signed-off-by: Peter Wu <peter@lekensteyn.nl>
2014-11-27 17:59:25 -05:00
result = setstropt(&data->set.str[STRING_UNIX_SOCKET_PATH],
va_arg(param, char *));
break;
#endif
case CURLOPT_PATH_AS_IS:
data->set.path_as_is = (0 != va_arg(param, long)) ? TRUE : FALSE;
break;
case CURLOPT_PIPEWAIT:
data->set.pipewait = (0 != va_arg(param, long)) ? TRUE : FALSE;
break;
2015-10-21 16:47:24 -04:00
case CURLOPT_STREAM_WEIGHT:
#ifndef USE_NGHTTP2
return CURLE_NOT_BUILT_IN;
#else
arg = va_arg(param, long);
if((arg>=1) && (arg <= 256))
2015-10-21 16:47:24 -04:00
data->set.stream_weight = (int)arg;
break;
#endif
case CURLOPT_STREAM_DEPENDS:
case CURLOPT_STREAM_DEPENDS_E:
{
#ifndef USE_NGHTTP2
return CURLE_NOT_BUILT_IN;
#else
struct Curl_easy *dep = va_arg(param, struct Curl_easy *);
if(!dep || GOOD_EASY_HANDLE(dep)) {
if(data->set.stream_depends_on) {
Curl_http2_remove_child(data->set.stream_depends_on, data);
}
Curl_http2_add_child(dep, data, (option == CURLOPT_STREAM_DEPENDS_E));
}
break;
#endif
}
case CURLOPT_CONNECT_TO:
data->set.connect_to = va_arg(param, struct curl_slist *);
break;
default:
/* unknown tag and its companion, just ignore: */
result = CURLE_UNKNOWN_OPTION;
break;
1999-12-29 09:20:26 -05:00
}
return result;
1999-12-29 09:20:26 -05:00
}
#ifdef USE_RECV_BEFORE_SEND_WORKAROUND
static void conn_reset_postponed_data(struct connectdata *conn, int num)
{
struct postponed_data * const psnd = &(conn->postponed[num]);
if(psnd->buffer) {
DEBUGASSERT(psnd->allocated_size > 0);
DEBUGASSERT(psnd->recv_size <= psnd->allocated_size);
DEBUGASSERT(psnd->recv_size ?
(psnd->recv_processed < psnd->recv_size) :
(psnd->recv_processed == 0));
DEBUGASSERT(psnd->bindsock != CURL_SOCKET_BAD);
free(psnd->buffer);
psnd->buffer = NULL;
psnd->allocated_size = 0;
psnd->recv_size = 0;
psnd->recv_processed = 0;
#ifdef DEBUGBUILD
psnd->bindsock = CURL_SOCKET_BAD; /* used only for DEBUGASSERT */
#endif /* DEBUGBUILD */
}
else {
DEBUGASSERT(psnd->allocated_size == 0);
DEBUGASSERT(psnd->recv_size == 0);
DEBUGASSERT(psnd->recv_processed == 0);
DEBUGASSERT(psnd->bindsock == CURL_SOCKET_BAD);
}
}
static void conn_reset_all_postponed_data(struct connectdata *conn)
{
conn_reset_postponed_data(conn, 0);
conn_reset_postponed_data(conn, 1);
}
#else /* ! USE_RECV_BEFORE_SEND_WORKAROUND */
/* Use "do-nothing" macros instead of functions when workaround not used */
#define conn_reset_postponed_data(c,n) do {} WHILE_FALSE
#define conn_reset_all_postponed_data(c) do {} WHILE_FALSE
#endif /* ! USE_RECV_BEFORE_SEND_WORKAROUND */
static void conn_free(struct connectdata *conn)
{
if(!conn)
return;
/* possible left-overs from the async name resolvers */
Curl_resolver_cancel(conn);
/* close the SSL stuff before we close any sockets since they will/may
write to the sockets */
Curl_ssl_close(conn, FIRSTSOCKET);
Curl_ssl_close(conn, SECONDARYSOCKET);
/* close possibly still open sockets */
if(CURL_SOCKET_BAD != conn->sock[SECONDARYSOCKET])
Curl_closesocket(conn, conn->sock[SECONDARYSOCKET]);
if(CURL_SOCKET_BAD != conn->sock[FIRSTSOCKET])
Curl_closesocket(conn, conn->sock[FIRSTSOCKET]);
if(CURL_SOCKET_BAD != conn->tempsock[0])
Curl_closesocket(conn, conn->tempsock[0]);
if(CURL_SOCKET_BAD != conn->tempsock[1])
Curl_closesocket(conn, conn->tempsock[1]);
#if !defined(CURL_DISABLE_HTTP) && defined(USE_NTLM) && \
defined(NTLM_WB_ENABLED)
Curl_ntlm_wb_cleanup(conn);
#endif
Curl_safefree(conn->user);
Curl_safefree(conn->passwd);
Curl_safefree(conn->oauth_bearer);
Curl_safefree(conn->options);
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
Curl_safefree(conn->http_proxy.user);
Curl_safefree(conn->socks_proxy.user);
Curl_safefree(conn->http_proxy.passwd);
Curl_safefree(conn->socks_proxy.passwd);
Curl_safefree(conn->allocptr.proxyuserpwd);
Curl_safefree(conn->allocptr.uagent);
Curl_safefree(conn->allocptr.userpwd);
Curl_safefree(conn->allocptr.accept_encoding);
Curl_safefree(conn->allocptr.te);
Curl_safefree(conn->allocptr.rangeline);
Curl_safefree(conn->allocptr.ref);
Curl_safefree(conn->allocptr.host);
Curl_safefree(conn->allocptr.cookiehost);
Curl_safefree(conn->allocptr.rtsp_transport);
Curl_safefree(conn->trailer);
Curl_safefree(conn->host.rawalloc); /* host name buffer */
Curl_safefree(conn->conn_to_host.rawalloc); /* host name buffer */
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
Curl_safefree(conn->secondaryhostname);
Curl_safefree(conn->http_proxy.host.rawalloc); /* http proxy name buffer */
Curl_safefree(conn->socks_proxy.host.rawalloc); /* socks proxy name buffer */
Curl_safefree(conn->master_buffer);
conn_reset_all_postponed_data(conn);
Curl_llist_destroy(conn->send_pipe, NULL);
Curl_llist_destroy(conn->recv_pipe, NULL);
2011-10-07 14:50:57 -04:00
conn->send_pipe = NULL;
conn->recv_pipe = NULL;
Curl_safefree(conn->localdev);
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
Curl_free_primary_ssl_config(&conn->ssl_config);
Curl_free_primary_ssl_config(&conn->proxy_ssl_config);
#ifdef USE_UNIX_SOCKETS
Curl_safefree(conn->unix_domain_socket);
#endif
free(conn); /* free all the connection oriented data */
}
/*
* Disconnects the given connection. Note the connection may not be the
* primary connection, like when freeing room in the connection cache or
* killing of a dead old connection.
*
* This function MUST NOT reset state in the Curl_easy struct if that
* isn't strictly bound to the life-time of *this* particular connection.
*
*/
CURLcode Curl_disconnect(struct connectdata *conn, bool dead_connection)
2000-05-22 10:12:12 -04:00
{
struct Curl_easy *data;
if(!conn)
return CURLE_OK; /* this is closed and fine already */
data = conn->data;
if(!data) {
DEBUGF(fprintf(stderr, "DISCONNECT without easy handle, ignoring\n"));
return CURLE_OK;
}
/*
* If this connection isn't marked to force-close, leave it open if there
* are other users of it
*/
if(!conn->bits.close &&
(conn->send_pipe->size + conn->recv_pipe->size)) {
DEBUGF(infof(data, "Curl_disconnect, usecounter: %d\n",
conn->send_pipe->size + conn->recv_pipe->size));
return CURLE_OK;
}
if(conn->dns_entry != NULL) {
Curl_resolv_unlock(data, conn->dns_entry);
conn->dns_entry = NULL;
}
Curl_hostcache_prune(data); /* kill old DNS cache entries */
#if !defined(CURL_DISABLE_HTTP) && defined(USE_NTLM)
/* Cleanup NTLM connection-related data */
Curl_http_ntlm_cleanup(conn);
#endif
if(conn->handler->disconnect)
/* This is set if protocol-specific cleanups should be made */
conn->handler->disconnect(conn, dead_connection);
/* unlink ourselves! */
infof(data, "Closing connection %ld\n", conn->connection_id);
Curl_conncache_remove_conn(data->state.conn_cache, conn);
free_fixed_hostname(&conn->host);
free_fixed_hostname(&conn->conn_to_host);
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
free_fixed_hostname(&conn->http_proxy.host);
free_fixed_hostname(&conn->socks_proxy.host);
Curl_ssl_close(conn, FIRSTSOCKET);
2004-05-19 05:25:00 -04:00
/* Indicate to all handles on the pipe that we're dead */
if(Curl_pipeline_wanted(data->multi, CURLPIPE_ANY)) {
signalPipeClose(conn->send_pipe, TRUE);
signalPipeClose(conn->recv_pipe, TRUE);
}
conn_free(conn);
2000-05-22 10:12:12 -04:00
return CURLE_OK;
}
2001-03-04 11:34:20 -05:00
/*
* This function should return TRUE if the socket is to be assumed to
* be dead. Most commonly this happens when the server has closed the
* connection due to inactivity.
*/
2004-03-10 11:01:47 -05:00
static bool SocketIsDead(curl_socket_t sock)
2003-03-25 09:23:12 -05:00
{
int sval;
bool ret_val = TRUE;
2001-03-04 11:34:20 -05:00
sval = SOCKET_READABLE(sock, 0);
if(sval == 0)
2001-03-04 11:34:20 -05:00
/* timeout */
ret_val = FALSE;
2003-03-25 09:23:12 -05:00
2001-03-04 11:34:20 -05:00
return ret_val;
}
2015-05-12 05:46:58 -04:00
/*
* IsPipeliningPossible() returns TRUE if the options set would allow
* pipelining/multiplexing and the connection is using a HTTP protocol.
*/
static bool IsPipeliningPossible(const struct Curl_easy *handle,
const struct connectdata *conn)
{
/* If a HTTP protocol and pipelining is enabled */
if((conn->handler->protocol & PROTO_FAMILY_HTTP) &&
(!conn->bits.protoconnstart || !conn->bits.close)) {
2015-05-12 05:46:58 -04:00
if(Curl_pipeline_wanted(handle->multi, CURLPIPE_HTTP1) &&
(handle->set.httpversion != CURL_HTTP_VERSION_1_0) &&
(handle->set.httpreq == HTTPREQ_GET ||
handle->set.httpreq == HTTPREQ_HEAD))
/* didn't ask for HTTP/1.0 and a GET or HEAD */
return TRUE;
2015-05-12 05:46:58 -04:00
if(Curl_pipeline_wanted(handle->multi, CURLPIPE_MULTIPLEX) &&
(handle->set.httpversion >= CURL_HTTP_VERSION_2))
/* allows HTTP/2 */
return TRUE;
}
return FALSE;
}
int Curl_removeHandleFromPipeline(struct Curl_easy *handle,
struct curl_llist *pipeline)
{
if(pipeline) {
struct curl_llist_element *curr;
curr = pipeline->head;
while(curr) {
if(curr->ptr == handle) {
Curl_llist_remove(pipeline, curr, NULL);
return 1; /* we removed a handle */
}
curr = curr->next;
}
}
return 0;
}
#if 0 /* this code is saved here as it is useful for debugging purposes */
static void Curl_printPipeline(struct curl_llist *pipeline)
{
struct curl_llist_element *curr;
curr = pipeline->head;
while(curr) {
struct Curl_easy *data = (struct Curl_easy *) curr->ptr;
infof(data, "Handle in pipeline: %s\n", data->state.path);
curr = curr->next;
}
}
#endif
static struct Curl_easy* gethandleathead(struct curl_llist *pipeline)
{
struct curl_llist_element *curr = pipeline->head;
if(curr) {
return (struct Curl_easy *) curr->ptr;
}
return NULL;
}
/* remove the specified connection from all (possible) pipelines and related
queues */
void Curl_getoff_all_pipelines(struct Curl_easy *data,
struct connectdata *conn)
{
bool recv_head = (conn->readchannel_inuse &&
Curl_recvpipe_head(data, conn));
bool send_head = (conn->writechannel_inuse &&
Curl_sendpipe_head(data, conn));
if(Curl_removeHandleFromPipeline(data, conn->recv_pipe) && recv_head)
Curl_pipeline_leave_read(conn);
if(Curl_removeHandleFromPipeline(data, conn->send_pipe) && send_head)
Curl_pipeline_leave_write(conn);
}
static void signalPipeClose(struct curl_llist *pipeline, bool pipe_broke)
{
struct curl_llist_element *curr;
if(!pipeline)
return;
curr = pipeline->head;
while(curr) {
struct curl_llist_element *next = curr->next;
struct Curl_easy *data = (struct Curl_easy *) curr->ptr;
#ifdef DEBUGBUILD /* debug-only code */
if(data->magic != CURLEASY_MAGIC_NUMBER) {
/* MAJOR BADNESS */
infof(data, "signalPipeClose() found BAAD easy handle\n");
}
#endif
if(pipe_broke)
data->state.pipe_broke = TRUE;
Curl_multi_handlePipeBreak(data);
Curl_llist_remove(pipeline, curr, NULL);
curr = next;
}
}
/*
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
* This function finds the connection in the connection
* cache that has been unused for the longest time.
*
* Returns the pointer to the oldest idle connection, or NULL if none was
* found.
*/
struct connectdata *
Curl_oldest_idle_connection(struct Curl_easy *data)
{
struct conncache *bc = data->state.conn_cache;
struct curl_hash_iterator iter;
struct curl_llist_element *curr;
struct curl_hash_element *he;
time_t highscore=-1;
time_t score;
struct timeval now;
struct connectdata *conn_candidate = NULL;
struct connectbundle *bundle;
now = Curl_tvnow();
Curl_hash_start_iterate(&bc->hash, &iter);
he = Curl_hash_next_element(&iter);
while(he) {
struct connectdata *conn;
bundle = he->ptr;
curr = bundle->conn_list->head;
while(curr) {
conn = curr->ptr;
if(!conn->inuse) {
/* Set higher score for the age passed since the connection was used */
score = Curl_tvdiff(now, conn->now);
if(score > highscore) {
highscore = score;
conn_candidate = conn;
}
}
curr = curr->next;
}
he = Curl_hash_next_element(&iter);
}
return conn_candidate;
}
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
static bool
proxy_info_matches(const struct proxy_info* data,
const struct proxy_info* needle)
{
if((data->proxytype == needle->proxytype) &&
(data->port == needle->port) &&
Curl_safe_strcasecompare(data->host.name, needle->host.name) &&
Curl_safe_strcasecompare(data->user, needle->user) &&
Curl_safe_strcasecompare(data->passwd, needle->passwd))
return TRUE;
return FALSE;
}
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
/*
* This function finds the connection in the connection
* bundle that has been unused for the longest time.
*
* Returns the pointer to the oldest idle connection, or NULL if none was
* found.
*/
static struct connectdata *
find_oldest_idle_connection_in_bundle(struct Curl_easy *data,
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
struct connectbundle *bundle)
{
struct curl_llist_element *curr;
time_t highscore=-1;
time_t score;
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
struct timeval now;
struct connectdata *conn_candidate = NULL;
struct connectdata *conn;
(void)data;
now = Curl_tvnow();
curr = bundle->conn_list->head;
while(curr) {
conn = curr->ptr;
if(!conn->inuse) {
/* Set higher score for the age passed since the connection was used */
score = Curl_tvdiff(now, conn->now);
if(score > highscore) {
highscore = score;
conn_candidate = conn;
}
}
curr = curr->next;
}
return conn_candidate;
}
/*
* This function checks if given connection is dead and disconnects if so.
* (That also removes it from the connection cache.)
*
* Returns TRUE if the connection actually was dead and disconnected.
*/
static bool disconnect_if_dead(struct connectdata *conn,
struct Curl_easy *data)
{
size_t pipeLen = conn->send_pipe->size + conn->recv_pipe->size;
if(!pipeLen && !conn->inuse) {
/* The check for a dead socket makes sense only if there are no
handles in pipeline and the connection isn't already marked in
use */
bool dead;
if(conn->handler->protocol & CURLPROTO_RTSP)
/* RTSP is a special case due to RTP interleaving */
dead = Curl_rtsp_connisdead(conn);
else
dead = SocketIsDead(conn->sock[FIRSTSOCKET]);
if(dead) {
conn->data = data;
infof(data, "Connection %ld seems to be dead!\n", conn->connection_id);
/* disconnect resources */
Curl_disconnect(conn, /* dead_connection */TRUE);
return TRUE;
}
}
return FALSE;
}
/*
* Wrapper to use disconnect_if_dead() function in Curl_conncache_foreach()
*
* Returns always 0.
*/
static int call_disconnect_if_dead(struct connectdata *conn,
void *param)
{
struct Curl_easy* data = (struct Curl_easy*)param;
disconnect_if_dead(conn, data);
return 0; /* continue iteration */
}
/*
* This function scans the connection cache for half-open/dead connections,
* closes and removes them.
* The cleanup is done at most once per second.
*/
static void prune_dead_connections(struct Curl_easy *data)
{
struct timeval now = Curl_tvnow();
time_t elapsed = Curl_tvdiff(now, data->state.conn_cache->last_cleanup);
if(elapsed >= 1000L) {
Curl_conncache_foreach(data->state.conn_cache, data,
call_disconnect_if_dead);
data->state.conn_cache->last_cleanup = now;
}
}
static size_t max_pipeline_length(struct Curl_multi *multi)
{
return multi ? multi->max_pipeline_length : 0;
}
/*
* Given one filled in connection struct (named needle), this function should
* detect if there already is one that has all the significant details
* exactly the same and thus should be used instead.
*
* If there is a match, this function returns TRUE - and has marked the
* connection as 'in-use'. It must later be called with ConnectionDone() to
* return back to 'idle' (unused) state.
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
*
* The force_reuse flag is set if the connection must be used, even if
* the pipelining strategy wants to open a new connection instead of reusing.
*/
static bool
ConnectionExists(struct Curl_easy *data,
struct connectdata *needle,
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
struct connectdata **usethis,
bool *force_reuse,
bool *waitpipe)
1999-12-29 09:20:26 -05:00
{
struct connectdata *check;
struct connectdata *chosen = 0;
bool foundPendingCandidate = FALSE;
bool canPipeline = IsPipeliningPossible(data, needle);
struct connectbundle *bundle;
#ifdef USE_NTLM
bool wantNTLMhttp = ((data->state.authhost.want &
(CURLAUTH_NTLM | CURLAUTH_NTLM_WB)) &&
(needle->handler->protocol & PROTO_FAMILY_HTTP));
bool wantProxyNTLMhttp = (needle->bits.proxy_user_passwd &&
((data->state.authproxy.want &
(CURLAUTH_NTLM | CURLAUTH_NTLM_WB)) &&
(needle->handler->protocol & PROTO_FAMILY_HTTP)));
#endif
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
*force_reuse = FALSE;
*waitpipe = FALSE;
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
/* We can't pipe if the site is blacklisted */
if(canPipeline && Curl_pipeline_site_blacklisted(data, needle)) {
canPipeline = FALSE;
}
/* Look up the bundle with all the connections to this
particular host */
bundle = Curl_conncache_find_bundle(needle, data->state.conn_cache);
if(bundle) {
/* Max pipe length is zero (unlimited) for multiplexed connections */
size_t max_pipe_len = (bundle->multiuse != BUNDLE_MULTIPLEX)?
max_pipeline_length(data->multi):0;
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
size_t best_pipe_len = max_pipe_len;
struct curl_llist_element *curr;
infof(data, "Found bundle for host %s: %p [%s]\n",
(needle->bits.conn_to_host ? needle->conn_to_host.name :
needle->host.name), (void *)bundle,
(bundle->multiuse == BUNDLE_PIPELINING ?
"can pipeline" :
(bundle->multiuse == BUNDLE_MULTIPLEX ?
"can multiplex" : "serially")));
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
/* We can't pipe if we don't know anything about the server */
if(canPipeline) {
if(bundle->multiuse <= BUNDLE_UNKNOWN) {
if((bundle->multiuse == BUNDLE_UNKNOWN) && data->set.pipewait) {
infof(data, "Server doesn't support multi-use yet, wait\n");
*waitpipe = TRUE;
return FALSE; /* no re-use */
}
infof(data, "Server doesn't support multi-use (yet)\n");
canPipeline = FALSE;
}
if((bundle->multiuse == BUNDLE_PIPELINING) &&
!Curl_pipeline_wanted(data->multi, CURLPIPE_HTTP1)) {
/* not asked for, switch off */
infof(data, "Could pipeline, but not asked to!\n");
canPipeline = FALSE;
}
else if((bundle->multiuse == BUNDLE_MULTIPLEX) &&
!Curl_pipeline_wanted(data->multi, CURLPIPE_MULTIPLEX)) {
infof(data, "Could multiplex, but not asked to!\n");
canPipeline = FALSE;
}
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
}
curr = bundle->conn_list->head;
while(curr) {
bool match = FALSE;
size_t pipeLen;
/*
* Note that if we use a HTTP proxy in normal mode (no tunneling), we
* check connections to that proxy and not to the actual remote server.
*/
check = curr->ptr;
curr = curr->next;
if(disconnect_if_dead(check, data))
continue;
pipeLen = check->send_pipe->size + check->recv_pipe->size;
if(canPipeline) {
if(check->bits.protoconnstart && check->bits.close)
continue;
if(!check->bits.multiplex) {
/* If not multiplexing, make sure the pipe has only GET requests */
struct Curl_easy* sh = gethandleathead(check->send_pipe);
struct Curl_easy* rh = gethandleathead(check->recv_pipe);
if(sh) {
if(!IsPipeliningPossible(sh, check))
continue;
}
else if(rh) {
if(!IsPipeliningPossible(rh, check))
continue;
}
}
}
else {
if(pipeLen > 0) {
/* can only happen within multi handles, and means that another easy
handle is using this connection */
continue;
}
if(Curl_resolver_asynch()) {
/* ip_addr_str[0] is NUL only if the resolving of the name hasn't
completed yet and until then we don't re-use this connection */
if(!check->ip_addr_str[0]) {
infof(data,
"Connection #%ld is still name resolving, can't reuse\n",
check->connection_id);
continue;
}
}
if((check->sock[FIRSTSOCKET] == CURL_SOCKET_BAD) ||
check->bits.close) {
if(!check->bits.close)
foundPendingCandidate = TRUE;
/* Don't pick a connection that hasn't connected yet or that is going
to get closed. */
infof(data, "Connection #%ld isn't open enough, can't reuse\n",
check->connection_id);
#ifdef DEBUGBUILD
if(check->recv_pipe->size > 0) {
infof(data,
"BAD! Unconnected #%ld has a non-empty recv pipeline!\n",
check->connection_id);
}
#endif
continue;
}
}
#ifdef USE_UNIX_SOCKETS
if(needle->unix_domain_socket) {
if(!check->unix_domain_socket)
continue;
if(strcmp(needle->unix_domain_socket, check->unix_domain_socket))
continue;
if(needle->abstract_unix_socket != check->abstract_unix_socket)
continue;
}
else if(check->unix_domain_socket)
continue;
#endif
if((needle->handler->flags&PROTOPT_SSL) !=
(check->handler->flags&PROTOPT_SSL))
/* don't do mixed SSL and non-SSL connections */
if(get_protocol_family(check->handler->protocol) !=
needle->handler->protocol || !check->tls_upgraded)
/* except protocols that have been upgraded via TLS */
continue;
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
if(needle->bits.httpproxy != check->bits.httpproxy ||
needle->bits.socksproxy != check->bits.socksproxy)
continue;
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
if(needle->bits.socksproxy && !proxy_info_matches(&needle->socks_proxy,
&check->socks_proxy))
continue;
if(needle->bits.conn_to_host != check->bits.conn_to_host)
/* don't mix connections that use the "connect to host" feature and
* connections that don't use this feature */
continue;
if(needle->bits.conn_to_port != check->bits.conn_to_port)
/* don't mix connections that use the "connect to port" feature and
* connections that don't use this feature */
continue;
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
if(needle->bits.httpproxy) {
if(!proxy_info_matches(&needle->http_proxy, &check->http_proxy))
continue;
if(needle->bits.tunnel_proxy != check->bits.tunnel_proxy)
continue;
if(needle->http_proxy.proxytype == CURLPROXY_HTTPS) {
/* use https proxy */
if(needle->handler->flags&PROTOPT_SSL) {
/* use double layer ssl */
if(!Curl_ssl_config_matches(&needle->proxy_ssl_config,
&check->proxy_ssl_config))
continue;
if(check->proxy_ssl[FIRSTSOCKET].state != ssl_connection_complete)
continue;
}
else {
if(!Curl_ssl_config_matches(&needle->ssl_config,
&check->ssl_config))
continue;
if(check->ssl[FIRSTSOCKET].state != ssl_connection_complete)
continue;
}
}
}
if(!canPipeline && check->inuse)
/* this request can't be pipelined but the checked connection is
already in use so we skip it */
continue;
if(needle->localdev || needle->localport) {
/* If we are bound to a specific local end (IP+port), we must not
re-use a random other one, although if we didn't ask for a
particular one we can reuse one that was bound.
This comparison is a bit rough and too strict. Since the input
parameters can be specified in numerous ways and still end up the
same it would take a lot of processing to make it really accurate.
Instead, this matching will assume that re-uses of bound connections
will most likely also re-use the exact same binding parameters and
missing out a few edge cases shouldn't hurt anyone very much.
*/
if((check->localport != needle->localport) ||
(check->localportrange != needle->localportrange) ||
(needle->localdev &&
(!check->localdev || strcmp(check->localdev, needle->localdev))))
continue;
}
if(!(needle->handler->flags & PROTOPT_CREDSPERREQUEST)) {
/* This protocol requires credentials per connection,
so verify that we're using the same name and password as well */
if(strcmp(needle->user, check->user) ||
strcmp(needle->passwd, check->passwd)) {
/* one of them was different */
continue;
}
}
if(!needle->bits.httpproxy || (needle->handler->flags&PROTOPT_SSL) ||
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
needle->bits.tunnel_proxy) {
/* The requested connection does not use a HTTP proxy or it uses SSL or
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
it is a non-SSL protocol tunneled or it is a non-SSL protocol which
is allowed to be upgraded via TLS */
if((strcasecompare(needle->handler->scheme, check->handler->scheme) ||
(get_protocol_family(check->handler->protocol) ==
needle->handler->protocol && check->tls_upgraded)) &&
(!needle->bits.conn_to_host || strcasecompare(
needle->conn_to_host.name, check->conn_to_host.name)) &&
(!needle->bits.conn_to_port ||
needle->conn_to_port == check->conn_to_port) &&
strcasecompare(needle->host.name, check->host.name) &&
needle->remote_port == check->remote_port) {
/* The schemes match or the the protocol family is the same and the
previous connection was TLS upgraded, and the hostname and host
port match */
if(needle->handler->flags & PROTOPT_SSL) {
/* This is a SSL connection so verify that we're using the same
SSL options as well */
if(!Curl_ssl_config_matches(&needle->ssl_config,
&check->ssl_config)) {
DEBUGF(infof(data,
"Connection #%ld has different SSL parameters, "
"can't reuse\n",
check->connection_id));
continue;
}
else if(check->ssl[FIRSTSOCKET].state != ssl_connection_complete) {
foundPendingCandidate = TRUE;
DEBUGF(infof(data,
"Connection #%ld has not started SSL connect, "
"can't reuse\n",
check->connection_id));
continue;
}
}
match = TRUE;
}
2001-03-04 11:34:20 -05:00
}
else {
/* The requested connection is using the same HTTP proxy in normal
mode (no tunneling) */
match = TRUE;
}
if(match) {
#if defined(USE_NTLM)
/* If we are looking for an HTTP+NTLM connection, check if this is
already authenticating with the right credentials. If not, keep
looking so that we can reuse NTLM connections if
possible. (Especially we must not reuse the same connection if
partway through a handshake!) */
if(wantNTLMhttp) {
if(strcmp(needle->user, check->user) ||
strcmp(needle->passwd, check->passwd))
continue;
}
else if(check->ntlm.state != NTLMSTATE_NONE) {
/* Connection is using NTLM auth but we don't want NTLM */
continue;
}
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
/* Same for Proxy NTLM authentication */
if(wantProxyNTLMhttp) {
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
/* Both check->http_proxy.user and check->http_proxy.passwd can be
* NULL */
if(!check->http_proxy.user || !check->http_proxy.passwd)
continue;
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
if(strcmp(needle->http_proxy.user, check->http_proxy.user) ||
strcmp(needle->http_proxy.passwd, check->http_proxy.passwd))
continue;
}
else if(check->proxyntlm.state != NTLMSTATE_NONE) {
/* Proxy connection is using NTLM auth but we don't want NTLM */
continue;
}
if(wantNTLMhttp || wantProxyNTLMhttp) {
/* Credentials are already checked, we can use this connection */
chosen = check;
if((wantNTLMhttp &&
(check->ntlm.state != NTLMSTATE_NONE)) ||
(wantProxyNTLMhttp &&
(check->proxyntlm.state != NTLMSTATE_NONE))) {
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
/* We must use this connection, no other */
*force_reuse = TRUE;
break;
}
/* Continue look up for a better connection */
continue;
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
}
#endif
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
if(canPipeline) {
/* We can pipeline if we want to. Let's continue looking for
the optimal connection to use, i.e the shortest pipe that is not
blacklisted. */
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
if(pipeLen == 0) {
/* We have the optimal connection. Let's stop looking. */
chosen = check;
break;
}
/* We can't use the connection if the pipe is full */
if(max_pipe_len && (pipeLen >= max_pipe_len)) {
infof(data, "Pipe is full, skip (%zu)\n", pipeLen);
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
continue;
}
2015-05-18 08:09:32 -04:00
#ifdef USE_NGHTTP2
/* If multiplexed, make sure we don't go over concurrency limit */
if(check->bits.multiplex) {
/* Multiplexed connections can only be HTTP/2 for now */
struct http_conn *httpc = &check->proto.httpc;
if(pipeLen >= httpc->settings.max_concurrent_streams) {
infof(data, "MAX_CONCURRENT_STREAMS reached, skip (%zu)\n",
pipeLen);
continue;
}
}
2015-05-18 08:09:32 -04:00
#endif
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
/* We can't use the connection if the pipe is penalized */
if(Curl_pipeline_penalized(data, check)) {
infof(data, "Penalized, skip\n");
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
continue;
}
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
if(max_pipe_len) {
if(pipeLen < best_pipe_len) {
/* This connection has a shorter pipe so far. We'll pick this
and continue searching */
chosen = check;
best_pipe_len = pipeLen;
continue;
}
}
else {
/* When not pipelining (== multiplexed), we have a match here! */
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
chosen = check;
infof(data, "Multiplexed connection found!\n");
break;
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
}
}
else {
/* We have found a connection. Let's stop searching. */
chosen = check;
break;
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
}
}
}
}
if(chosen) {
*usethis = chosen;
return TRUE; /* yes, we found one to use! */
}
if(foundPendingCandidate && data->set.pipewait) {
infof(data,
"Found pending candidate for reuse and CURLOPT_PIPEWAIT is set\n");
*waitpipe = TRUE;
}
return FALSE; /* no matching connecting exists */
}
/* after a TCP connection to the proxy has been verified, this function does
the next magic step.
Note: this function's sub-functions call failf()
*/
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
CURLcode Curl_connected_proxy(struct connectdata *conn, int sockindex)
{
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
CURLcode result = CURLE_OK;
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
if(conn->bits.socksproxy) {
#ifndef CURL_DISABLE_PROXY
/* for the secondary socket (FTP), use the "connect to host"
* but ignore the "connect to port" (use the secondary port)
*/
const char * const host = conn->bits.httpproxy ?
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
conn->http_proxy.host.name :
conn->bits.conn_to_host ?
conn->conn_to_host.name :
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
sockindex == SECONDARYSOCKET ?
conn->secondaryhostname : conn->host.name;
const int port = conn->bits.httpproxy ? (int)conn->http_proxy.port :
sockindex == SECONDARYSOCKET ? conn->secondary_port :
conn->bits.conn_to_port ? conn->conn_to_port :
conn->remote_port;
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
conn->bits.socksproxy_connecting = TRUE;
switch(conn->socks_proxy.proxytype) {
case CURLPROXY_SOCKS5:
case CURLPROXY_SOCKS5_HOSTNAME:
result = Curl_SOCKS5(conn->socks_proxy.user, conn->socks_proxy.passwd,
host, port, sockindex, conn);
break;
case CURLPROXY_SOCKS4:
case CURLPROXY_SOCKS4A:
result = Curl_SOCKS4(conn->socks_proxy.user, host, port, sockindex,
conn);
break;
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
default:
failf(conn->data, "unknown proxytype option given");
result = CURLE_COULDNT_CONNECT;
} /* switch proxytype */
conn->bits.socksproxy_connecting = FALSE;
#else
(void)sockindex;
#endif /* CURL_DISABLE_PROXY */
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
}
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
return result;
}
/*
* verboseconnect() displays verbose information after a connect
*/
2007-03-24 21:59:52 -04:00
#ifndef CURL_DISABLE_VERBOSE_STRINGS
void Curl_verboseconnect(struct connectdata *conn)
{
if(conn->data->set.verbose)
infof(conn->data, "Connected to %s (%s) port %ld (#%ld)\n",
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
conn->bits.socksproxy ? conn->socks_proxy.host.dispname :
conn->bits.httpproxy ? conn->http_proxy.host.dispname :
conn->bits.conn_to_host ? conn->conn_to_host.dispname :
conn->host.dispname,
conn->ip_addr_str, conn->port, conn->connection_id);
}
2007-03-24 21:59:52 -04:00
#endif
int Curl_protocol_getsock(struct connectdata *conn,
curl_socket_t *socks,
int numsocks)
{
if(conn->handler->proto_getsock)
return conn->handler->proto_getsock(conn, socks, numsocks);
return GETSOCK_BLANK;
}
int Curl_doing_getsock(struct connectdata *conn,
curl_socket_t *socks,
int numsocks)
{
if(conn && conn->handler->doing_getsock)
return conn->handler->doing_getsock(conn, socks, numsocks);
return GETSOCK_BLANK;
}
/*
* We are doing protocol-specific connecting and this is being called over and
* over from the multi interface until the connection phase is done on
* protocol layer.
*/
CURLcode Curl_protocol_connecting(struct connectdata *conn,
bool *done)
{
CURLcode result=CURLE_OK;
if(conn && conn->handler->connecting) {
*done = FALSE;
result = conn->handler->connecting(conn, done);
}
else
*done = TRUE;
return result;
}
/*
* We are DOING this is being called over and over from the multi interface
* until the DOING phase is done on protocol layer.
*/
CURLcode Curl_protocol_doing(struct connectdata *conn, bool *done)
{
CURLcode result=CURLE_OK;
if(conn && conn->handler->doing) {
*done = FALSE;
result = conn->handler->doing(conn, done);
}
else
*done = TRUE;
return result;
}
/*
* We have discovered that the TCP connection has been successful, we can now
* proceed with some action.
*
*/
CURLcode Curl_protocol_connect(struct connectdata *conn,
bool *protocol_done)
{
CURLcode result=CURLE_OK;
2003-03-25 09:23:12 -05:00
*protocol_done = FALSE;
if(conn->bits.tcpconnect[FIRSTSOCKET] && conn->bits.protoconnstart) {
/* We already are connected, get back. This may happen when the connect
worked fine in the first call, like when we connect to a local server
or proxy. Note that we don't know if the protocol is actually done.
Unless this protocol doesn't have any protocol-connect callback, as
then we know we're done. */
if(!conn->handler->connecting)
*protocol_done = TRUE;
return CURLE_OK;
}
if(!conn->bits.protoconnstart) {
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
result = Curl_proxy_connect(conn, FIRSTSOCKET);
if(result)
return result;
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
if(CONNECT_FIRSTSOCKET_PROXY_SSL())
/* wait for HTTPS proxy SSL initialization to complete */
return CURLE_OK;
if(conn->bits.tunnel_proxy && conn->bits.httpproxy &&
(conn->tunnel_state[FIRSTSOCKET] != TUNNEL_COMPLETE))
/* when using an HTTP tunnel proxy, await complete tunnel establishment
before proceeding further. Return CURLE_OK so we'll be called again */
return CURLE_OK;
if(conn->handler->connect_it) {
/* is there a protocol-specific connect() procedure? */
/* Call the protocol-specific connect function */
result = conn->handler->connect_it(conn, protocol_done);
}
else
*protocol_done = TRUE;
/* it has started, possibly even completed but that knowledge isn't stored
in this bit! */
if(!result)
conn->bits.protoconnstart = TRUE;
}
return result; /* pass back status */
}
/*
* Helpers for IDNA convertions.
*/
2005-12-20 17:46:12 -05:00
static bool is_ASCII_name(const char *hostname)
{
const unsigned char *ch = (const unsigned char *)hostname;
while(*ch) {
if(*ch++ & 0x80)
return FALSE;
}
return TRUE;
}
/*
* Perform any necessary IDN conversion of hostname
*/
static void fix_hostname(struct connectdata *conn, struct hostname *host)
{
size_t len;
struct Curl_easy *data = conn->data;
#ifndef USE_LIBIDN2
2007-03-28 15:05:43 -04:00
(void)data;
(void)conn;
#elif defined(CURL_DISABLE_VERBOSE_STRINGS)
(void)conn;
#endif
/* set the name we use to display the host name */
2004-04-29 07:57:52 -04:00
host->dispname = host->name;
len = strlen(host->name);
if(len && (host->name[len-1] == '.'))
/* strip off a single trailing dot if present, primarily for SNI but
there's no use for it */
host->name[len-1]=0;
/* Check name for non-ASCII and convert hostname to ACE form if we can */
if(!is_ASCII_name(host->name)) {
#ifdef USE_LIBIDN2
if(idn2_check_version(IDN2_VERSION)) {
char *ace_hostname = NULL;
#if IDN2_VERSION_NUMBER >= 0x00140000
/* IDN2_NFC_INPUT: Normalize input string using normalization form C.
IDN2_NONTRANSITIONAL: Perform Unicode TR46 non-transitional
processing. */
int flags = IDN2_NFC_INPUT | IDN2_NONTRANSITIONAL;
#else
int flags = IDN2_NFC_INPUT;
#endif
int rc = idn2_lookup_ul((const char *)host->name, &ace_hostname, flags);
if(rc == IDN2_OK) {
host->encalloc = (char *)ace_hostname;
/* change the name pointer to point to the encoded hostname */
host->name = host->encalloc;
}
else
infof(data, "Failed to convert %s to ACE; %s\n", host->name,
idn2_strerror(rc));
}
#elif defined(USE_WIN32_IDN)
char *ace_hostname = NULL;
if(curl_win32_idn_to_ascii(host->name, &ace_hostname)) {
host->encalloc = ace_hostname;
/* change the name pointer to point to the encoded hostname */
host->name = host->encalloc;
}
else
infof(data, "Failed to convert %s to ACE;\n", host->name);
#else
infof(data, "IDN support not present, can't parse Unicode domains\n");
2007-03-28 00:44:14 -04:00
#endif
}
}
/*
* Frees data allocated by fix_hostname()
*/
static void free_fixed_hostname(struct hostname *host)
{
#if defined(USE_LIBIDN2)
if(host->encalloc) {
idn2_free(host->encalloc); /* must be freed with idn2_free() since this was
allocated by libidn */
host->encalloc = NULL;
}
#elif defined(USE_WIN32_IDN)
free(host->encalloc); /* must be freed withidn_free() since this was
allocated by curl_win32_idn_to_ascii */
host->encalloc = NULL;
#else
(void)host;
#endif
}
static void llist_dtor(void *user, void *element)
{
(void)user;
(void)element;
/* Do nothing */
}
/*
* Allocate and initialize a new connectdata object.
*/
static struct connectdata *allocate_conn(struct Curl_easy *data)
{
struct connectdata *conn = calloc(1, sizeof(struct connectdata));
if(!conn)
return NULL;
conn->handler = &Curl_handler_dummy; /* Be sure we have a handler defined
already from start to avoid NULL
situations and checks */
/* and we setup a few fields in case we end up actually using this struct */
conn->sock[FIRSTSOCKET] = CURL_SOCKET_BAD; /* no file descriptor */
conn->sock[SECONDARYSOCKET] = CURL_SOCKET_BAD; /* no file descriptor */
conn->tempsock[0] = CURL_SOCKET_BAD; /* no file descriptor */
conn->tempsock[1] = CURL_SOCKET_BAD; /* no file descriptor */
conn->connection_id = -1; /* no ID */
conn->port = -1; /* unknown at this point */
conn->remote_port = -1; /* unknown at this point */
#if defined(USE_RECV_BEFORE_SEND_WORKAROUND) && defined(DEBUGBUILD)
conn->postponed[0].bindsock = CURL_SOCKET_BAD; /* no file descriptor */
conn->postponed[1].bindsock = CURL_SOCKET_BAD; /* no file descriptor */
#endif /* USE_RECV_BEFORE_SEND_WORKAROUND && DEBUGBUILD */
/* Default protocol-independent behavior doesn't support persistent
connections, so we set this to force-close. Protocols that support
this need to set this to FALSE in their "curl_do" functions. */
connclose(conn, "Default to force-close");
/* Store creation time to help future close decision making */
conn->created = Curl_tvnow();
conn->data = data; /* Setup the association between this connection
and the Curl_easy */
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
conn->http_proxy.proxytype = data->set.proxytype;
conn->socks_proxy.proxytype = CURLPROXY_SOCKS4;
#ifdef CURL_DISABLE_PROXY
conn->bits.proxy = FALSE;
conn->bits.httpproxy = FALSE;
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
conn->bits.socksproxy = FALSE;
conn->bits.proxy_user_passwd = FALSE;
conn->bits.tunnel_proxy = FALSE;
#else /* CURL_DISABLE_PROXY */
/* note that these two proxy bits are now just on what looks to be
requested, they may be altered down the road */
conn->bits.proxy = (data->set.str[STRING_PROXY] &&
*data->set.str[STRING_PROXY]) ? TRUE : FALSE;
conn->bits.httpproxy = (conn->bits.proxy &&
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
(conn->http_proxy.proxytype == CURLPROXY_HTTP ||
conn->http_proxy.proxytype == CURLPROXY_HTTP_1_0 ||
conn->http_proxy.proxytype == CURLPROXY_HTTPS)) ?
TRUE : FALSE;
conn->bits.socksproxy = (conn->bits.proxy &&
!conn->bits.httpproxy) ? TRUE : FALSE;
if(data->set.str[STRING_PRE_PROXY] && *data->set.str[STRING_PRE_PROXY]) {
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
conn->bits.proxy = TRUE;
conn->bits.socksproxy = TRUE;
}
conn->bits.proxy_user_passwd =
(data->set.str[STRING_PROXYUSERNAME]) ? TRUE : FALSE;
conn->bits.tunnel_proxy = data->set.tunnel_thru_httpproxy;
#endif /* CURL_DISABLE_PROXY */
conn->bits.user_passwd = (data->set.str[STRING_USERNAME]) ? TRUE : FALSE;
conn->bits.ftp_use_epsv = data->set.ftp_use_epsv;
conn->bits.ftp_use_eprt = data->set.ftp_use_eprt;
conn->ssl_config.verifystatus = data->set.ssl.primary.verifystatus;
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
conn->ssl_config.verifypeer = data->set.ssl.primary.verifypeer;
conn->ssl_config.verifyhost = data->set.ssl.primary.verifyhost;
conn->proxy_ssl_config.verifystatus =
data->set.proxy_ssl.primary.verifystatus;
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
conn->proxy_ssl_config.verifypeer = data->set.proxy_ssl.primary.verifypeer;
conn->proxy_ssl_config.verifyhost = data->set.proxy_ssl.primary.verifyhost;
conn->ip_version = data->set.ipver;
#if !defined(CURL_DISABLE_HTTP) && defined(USE_NTLM) && \
defined(NTLM_WB_ENABLED)
conn->ntlm_auth_hlpr_socket = CURL_SOCKET_BAD;
conn->ntlm_auth_hlpr_pid = 0;
conn->challenge_header = NULL;
conn->response_header = NULL;
#endif
if(Curl_pipeline_wanted(data->multi, CURLPIPE_HTTP1) &&
!conn->master_buffer) {
/* Allocate master_buffer to be used for HTTP/1 pipelining */
conn->master_buffer = calloc(BUFSIZE, sizeof(char));
if(!conn->master_buffer)
goto error;
}
/* Initialize the pipeline lists */
conn->send_pipe = Curl_llist_alloc((curl_llist_dtor) llist_dtor);
conn->recv_pipe = Curl_llist_alloc((curl_llist_dtor) llist_dtor);
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
if(!conn->send_pipe || !conn->recv_pipe)
goto error;
#ifdef HAVE_GSSAPI
conn->data_prot = PROT_CLEAR;
#endif
/* Store the local bind parameters that will be used for this connection */
if(data->set.str[STRING_DEVICE]) {
conn->localdev = strdup(data->set.str[STRING_DEVICE]);
if(!conn->localdev)
goto error;
}
conn->localportrange = data->set.localportrange;
conn->localport = data->set.localport;
/* the close socket stuff needs to be copied to the connection struct as
it may live on without (this specific) Curl_easy */
conn->fclosesocket = data->set.fclosesocket;
conn->closesocket_client = data->set.closesocket_client;
return conn;
error:
Curl_llist_destroy(conn->send_pipe, NULL);
Curl_llist_destroy(conn->recv_pipe, NULL);
2011-10-07 14:50:57 -04:00
conn->send_pipe = NULL;
conn->recv_pipe = NULL;
free(conn->master_buffer);
free(conn->localdev);
free(conn);
return NULL;
}
static CURLcode findprotocol(struct Curl_easy *data,
struct connectdata *conn,
const char *protostr)
{
const struct Curl_handler * const *pp;
const struct Curl_handler *p;
/* Scan protocol handler table and match against 'protostr' to set a few
variables based on the URL. Now that the handler may be changed later
when the protocol specific setup function is called. */
for(pp = protocols; (p = *pp) != NULL; pp++) {
if(strcasecompare(p->scheme, protostr)) {
/* Protocol found in table. Check if allowed */
if(!(data->set.allowed_protocols & p->protocol))
/* nope, get out */
break;
/* it is allowed for "normal" request, now do an extra check if this is
the result of a redirect */
if(data->state.this_is_a_follow &&
!(data->set.redir_protocols & p->protocol))
/* nope, get out */
break;
/* Perform setup complement if some. */
conn->handler = conn->given = p;
/* 'port' and 'remote_port' are set in setup_connection_internals() */
return CURLE_OK;
}
}
/* The protocol was not found in the table, but we don't have to assign it
to anything since it is already assigned to a dummy-struct in the
create_conn() function when the connectdata struct is allocated. */
failf(data, "Protocol \"%s\" not supported or disabled in " LIBCURL_NAME,
protostr);
return CURLE_UNSUPPORTED_PROTOCOL;
}
/*
* Parse URL and fill in the relevant members of the connection struct.
*/
static CURLcode parseurlandfillconn(struct Curl_easy *data,
struct connectdata *conn,
bool *prot_missing,
char **userp, char **passwdp,
char **optionsp)
{
char *at;
char *fragment;
char *path = data->state.path;
char *query;
int i;
int rc;
const char *protop = "";
CURLcode result;
bool rebuild_url = FALSE;
bool url_has_scheme = FALSE;
char protobuf[16];
2000-11-20 03:53:21 -05:00
*prot_missing = FALSE;
/* We might pass the entire URL into the request so we need to make sure
* there are no bad characters in there.*/
if(strpbrk(data->change.url, "\r\n")) {
failf(data, "Illegal characters found in URL");
return CURLE_URL_MALFORMAT;
}
2001-01-24 07:32:34 -05:00
/*************************************************************
* Parse the URL.
*
* We need to parse the url even when using the proxy, because we will need
* the hostname and port in case we are trying to SSL connect through the
* proxy -- and we don't know if we will need to use SSL until we parse the
* url ...
************************************************************/
if(data->change.url[0] == ':') {
failf(data, "Bad URL, colon is first character");
return CURLE_URL_MALFORMAT;
}
/* Make sure we don't mistake a drive letter for a scheme, for example:
curld --proto-default file c:/foo/bar.txt */
if((('a' <= data->change.url[0] && data->change.url[0] <= 'z') ||
('A' <= data->change.url[0] && data->change.url[0] <= 'Z')) &&
data->change.url[1] == ':' && data->set.str[STRING_DEFAULT_PROTOCOL] &&
strcasecompare(data->set.str[STRING_DEFAULT_PROTOCOL], "file")) {
; /* do nothing */
}
else { /* check for a scheme */
for(i = 0; i < 16 && data->change.url[i]; ++i) {
if(data->change.url[i] == '/')
break;
if(data->change.url[i] == ':') {
url_has_scheme = TRUE;
break;
}
}
}
/* handle the file: scheme */
if((url_has_scheme && strncasecompare(data->change.url, "file:", 5)) ||
(!url_has_scheme && data->set.str[STRING_DEFAULT_PROTOCOL] &&
strcasecompare(data->set.str[STRING_DEFAULT_PROTOCOL], "file"))) {
bool path_has_drive = FALSE;
if(url_has_scheme)
rc = sscanf(data->change.url, "%*15[^\n/:]:%[^\n]", path);
else
rc = sscanf(data->change.url, "%[^\n]", path);
if(rc != 1) {
failf(data, "Bad URL");
return CURLE_URL_MALFORMAT;
}
if(url_has_scheme && path[0] == '/' && path[1] == '/') {
/* Allow omitted hostname (e.g. file:/<path>). This is not strictly
* speaking a valid file: URL by RFC 1738, but treating file:/<path> as
* file://localhost/<path> is similar to how other schemes treat missing
* hostnames. See RFC 1808. */
/* This cannot be done with strcpy() in a portable manner, since the
memory areas overlap! */
memmove(path, path + 2, strlen(path + 2)+1);
}
/* the path may start with a drive letter. for backwards compatibility
we skip some processing on those paths. */
path_has_drive = (('a' <= path[0] && path[0] <= 'z') ||
('A' <= path[0] && path[0] <= 'Z')) && path[1] == ':';
2001-01-24 07:32:34 -05:00
/*
* we deal with file://<host>/<path> differently since it supports no
* hostname other than "localhost" and "127.0.0.1", which is unique among
* the URL protocols specified in RFC 1738
*/
if(path[0] != '/' && !path_has_drive) {
/* the URL includes a host name, it must match "localhost" or
"127.0.0.1" to be valid */
char *ptr;
if(!checkprefix("localhost/", path) &&
!checkprefix("127.0.0.1/", path)) {
failf(data, "Invalid file://hostname/, "
"expected localhost or 127.0.0.1 or none");
return CURLE_URL_MALFORMAT;
}
ptr = &path[9]; /* now points to the slash after the host */
/* there was a host name and slash present
2003-03-25 09:23:12 -05:00
RFC1738 (section 3.1, page 5) says:
The rest of the locator consists of data specific to the scheme,
and is known as the "url-path". It supplies the details of how the
specified resource can be accessed. Note that the "/" between the
host (or port) and the url-path is NOT part of the url-path.
As most agents use file://localhost/foo to get '/foo' although the
slash preceding foo is a separator and not a slash for the path,
a URL as file://localhost//foo must be valid as well, to refer to
the same file with an absolute path.
*/
if('/' == ptr[1])
/* if there was two slashes, we skip the first one as that is then
used truly as a separator */
ptr++;
/* This cannot be made with strcpy, as the memory chunks overlap! */
memmove(path, ptr, strlen(ptr)+1);
path_has_drive = (('a' <= path[0] && path[0] <= 'z') ||
('A' <= path[0] && path[0] <= 'Z')) && path[1] == ':';
}
#if !defined(MSDOS) && !defined(WIN32) && !defined(__CYGWIN__)
if(path_has_drive) {
failf(data, "File drive letters are only accepted in MSDOS/Windows.");
return CURLE_URL_MALFORMAT;
}
#endif
2000-05-22 10:12:12 -04:00
protop = "file"; /* protocol string */
1999-12-29 09:20:26 -05:00
}
2000-05-22 10:12:12 -04:00
else {
/* clear path */
char slashbuf[4];
path[0]=0;
rc = sscanf(data->change.url,
"%15[^\n/:]:%3[/]%[^\n/?#]%[^\n]",
protobuf, slashbuf, conn->host.name, path);
if(2 == rc) {
failf(data, "Bad URL");
return CURLE_URL_MALFORMAT;
}
if(3 > rc) {
2003-03-25 09:23:12 -05:00
2001-01-24 07:32:34 -05:00
/*
* The URL was badly formatted, let's try the browser-style _without_
* protocol specified like 'http://'.
*/
rc = sscanf(data->change.url, "%[^\n/?#]%[^\n]", conn->host.name, path);
2010-01-26 22:43:34 -05:00
if(1 > rc) {
2001-01-24 07:32:34 -05:00
/*
* We couldn't even get this format.
* djgpp 2.04 has a sscanf() bug where 'conn->host.name' is
* assigned, but the return value is EOF!
2001-01-24 07:32:34 -05:00
*/
#if defined(__DJGPP__) && (DJGPP_MINOR == 4)
if(!(rc == -1 && *conn->host.name))
#endif
{
failf(data, "<url> malformed");
return CURLE_URL_MALFORMAT;
}
2000-05-22 10:12:12 -04:00
}
2001-01-24 07:32:34 -05:00
/*
* Since there was no protocol part specified in the URL use the
* user-specified default protocol. If we weren't given a default make a
* guess by matching some protocols against the host's outermost
* sub-domain name. Finally if there was no match use HTTP.
2001-01-24 07:32:34 -05:00
*/
protop = data->set.str[STRING_DEFAULT_PROTOCOL];
if(!protop) {
/* Note: if you add a new protocol, please update the list in
* lib/version.c too! */
if(checkprefix("FTP.", conn->host.name))
protop = "ftp";
else if(checkprefix("DICT.", conn->host.name))
protop = "DICT";
else if(checkprefix("LDAP.", conn->host.name))
protop = "LDAP";
else if(checkprefix("IMAP.", conn->host.name))
protop = "IMAP";
else if(checkprefix("SMTP.", conn->host.name))
protop = "smtp";
else if(checkprefix("POP3.", conn->host.name))
protop = "pop3";
else
protop = "http";
2000-05-22 10:12:12 -04:00
}
*prot_missing = TRUE; /* not given in URL */
2000-05-22 10:12:12 -04:00
}
else {
size_t s = strlen(slashbuf);
protop = protobuf;
if(s != 2) {
infof(data, "Unwillingly accepted illegal URL using %d slash%s!\n",
s, s>1?"es":"");
if(data->change.url_alloc)
free(data->change.url);
/* repair the URL to use two slashes */
data->change.url = aprintf("%s://%s%s",
protobuf, conn->host.name, path);
if(!data->change.url)
return CURLE_OUT_OF_MEMORY;
data->change.url_alloc = TRUE;
}
}
1999-12-29 09:20:26 -05:00
}
/* We search for '?' in the host name (but only on the right side of a
* @-letter to allow ?-letters in username and password) to handle things
* like http://example.com?param= (notice the missing '/').
*/
at = strchr(conn->host.name, '@');
if(at)
query = strchr(at+1, '?');
else
query = strchr(conn->host.name, '?');
if(query) {
/* We must insert a slash before the '?'-letter in the URL. If the URL had
a slash after the '?', that is where the path currently begins and the
'?string' is still part of the host name.
We must move the trailing part from the host name and put it first in
the path. And have it all prefixed with a slash.
*/
size_t hostlen = strlen(query);
size_t pathlen = strlen(path);
/* move the existing path plus the zero byte forward, to make room for
the host-name part */
memmove(path+hostlen+1, path, pathlen+1);
/* now copy the trailing host part in front of the existing path */
memcpy(path+1, query, hostlen);
path[0]='/'; /* prepend the missing slash */
rebuild_url = TRUE;
*query=0; /* now cut off the hostname at the ? */
}
else if(!path[0]) {
/* if there's no path set, use a single slash */
strcpy(path, "/");
rebuild_url = TRUE;
}
/* If the URL is malformatted (missing a '/' after hostname before path) we
* insert a slash here. The only letters except '/' that can start a path is
* '?' and '#' - as controlled by the two sscanf() patterns above.
*/
if(path[0] != '/') {
/* We need this function to deal with overlapping memory areas. We know
that the memory area 'path' points to is 'urllen' bytes big and that
is bigger than the path. Use +1 to move the zero byte too. */
memmove(&path[1], path, strlen(path)+1);
path[0] = '/';
rebuild_url = TRUE;
}
else if(!data->set.path_as_is) {
/* sanitise paths and remove ../ and ./ sequences according to RFC3986 */
char *newp = Curl_dedotdotify(path);
2013-07-11 07:29:48 -04:00
if(!newp)
return CURLE_OUT_OF_MEMORY;
if(strcmp(newp, path)) {
rebuild_url = TRUE;
free(data->state.pathbuffer);
data->state.pathbuffer = newp;
data->state.path = newp;
path = newp;
}
else
free(newp);
}
/*
* "rebuild_url" means that one or more URL components have been modified so
* we need to generate an updated full version. We need the corrected URL
* when communicating over HTTP proxy and we don't know at this point if
* we're using a proxy or not.
*/
if(rebuild_url) {
char *reurl;
size_t plen = strlen(path); /* new path, should be 1 byte longer than
the original */
size_t urllen = strlen(data->change.url); /* original URL length */
size_t prefixlen = strlen(conn->host.name);
if(!*prot_missing)
prefixlen += strlen(protop) + strlen("://");
reurl = malloc(urllen + 2); /* 2 for zerobyte + slash */
if(!reurl)
return CURLE_OUT_OF_MEMORY;
/* copy the prefix */
memcpy(reurl, data->change.url, prefixlen);
/* append the trailing piece + zerobyte */
memcpy(&reurl[prefixlen], path, plen + 1);
/* possible free the old one */
if(data->change.url_alloc) {
Curl_safefree(data->change.url);
data->change.url_alloc = FALSE;
}
infof(data, "Rebuilt URL to: %s\n", reurl);
data->change.url = reurl;
data->change.url_alloc = TRUE; /* free this later */
}
result = findprotocol(data, conn, protop);
if(result)
return result;
/*
* Parse the login details from the URL and strip them out of
* the host name
*/
result = parse_url_login(data, conn, userp, passwdp, optionsp);
if(result)
return result;
if(conn->host.name[0] == '[') {
/* This looks like an IPv6 address literal. See if there is an address
scope if there is no location header */
char *percent = strchr(conn->host.name, '%');
if(percent) {
unsigned int identifier_offset = 3;
char *endp;
unsigned long scope;
if(strncmp("%25", percent, 3) != 0) {
infof(data,
"Please URL encode %% as %%25, see RFC 6874.\n");
identifier_offset = 1;
}
scope = strtoul(percent + identifier_offset, &endp, 10);
if(*endp == ']') {
/* The address scope was well formed. Knock it out of the
hostname. */
memmove(percent, endp, strlen(endp)+1);
conn->scope_id = (unsigned int)scope;
}
else {
/* Zone identifier is not numeric */
#if defined(HAVE_NET_IF_H) && defined(IFNAMSIZ) && defined(HAVE_IF_NAMETOINDEX)
char ifname[IFNAMSIZ + 2];
char *square_bracket;
unsigned int scopeidx = 0;
strncpy(ifname, percent + identifier_offset, IFNAMSIZ + 2);
/* Ensure nullbyte termination */
ifname[IFNAMSIZ + 1] = '\0';
square_bracket = strchr(ifname, ']');
if(square_bracket) {
/* Remove ']' */
*square_bracket = '\0';
scopeidx = if_nametoindex(ifname);
if(scopeidx == 0) {
infof(data, "Invalid network interface: %s; %s\n", ifname,
strerror(errno));
}
}
if(scopeidx > 0) {
char *p = percent + identifier_offset + strlen(ifname);
/* Remove zone identifier from hostname */
memmove(percent, p, strlen(p) + 1);
conn->scope_id = scopeidx;
}
else
#endif /* HAVE_NET_IF_H && IFNAMSIZ */
infof(data, "Invalid IPv6 address format\n");
}
}
}
if(data->set.scope_id)
/* Override any scope that was set above. */
conn->scope_id = data->set.scope_id;
/* Remove the fragment part of the path. Per RFC 2396, this is always the
last part of the URI. We are looking for the first '#' so that we deal
gracefully with non conformant URI such as http://example.com#foo#bar. */
fragment = strchr(path, '#');
if(fragment) {
*fragment = 0;
/* we know the path part ended with a fragment, so we know the full URL
string does too and we need to cut it off from there so it isn't used
over proxy */
fragment = strchr(data->change.url, '#');
if(fragment)
*fragment = 0;
}
/*
* So if the URL was A://B/C#D,
* protop is A
* conn->host.name is B
* data->state.path is /C
*/
return CURLE_OK;
}
/*
* If we're doing a resumed transfer, we need to setup our stuff
* properly.
*/
static CURLcode setup_range(struct Curl_easy *data)
{
struct UrlState *s = &data->state;
s->resume_from = data->set.set_resume_from;
if(s->resume_from || data->set.str[STRING_SET_RANGE]) {
if(s->rangestringalloc)
free(s->range);
if(s->resume_from)
s->range = aprintf("%" CURL_FORMAT_CURL_OFF_TU "-", s->resume_from);
else
s->range = strdup(data->set.str[STRING_SET_RANGE]);
s->rangestringalloc = (s->range) ? TRUE : FALSE;
if(!s->range)
return CURLE_OUT_OF_MEMORY;
/* tell ourselves to fetch this range */
s->use_range = TRUE; /* enable range download */
}
else
s->use_range = FALSE; /* disable range download */
return CURLE_OK;
}
/*
* setup_connection_internals() -
*
* Setup connection internals specific to the requested protocol in the
* Curl_easy. This is inited and setup before the connection is made but
* is about the particular protocol that is to be used.
*
* This MUST get called after proxy magic has been figured out.
*/
static CURLcode setup_connection_internals(struct connectdata *conn)
{
const struct Curl_handler * p;
CURLcode result;
struct Curl_easy *data = conn->data;
/* in some case in the multi state-machine, we go back to the CONNECT state
and then a second (or third or...) call to this function will be made
without doing a DISCONNECT or DONE in between (since the connection is
yet in place) and therefore this function needs to first make sure
there's no lingering previous data allocated. */
Curl_free_request_state(data);
memset(&data->req, 0, sizeof(struct SingleRequest));
data->req.maxdownload = -1;
conn->socktype = SOCK_STREAM; /* most of them are TCP streams */
/* Perform setup complement if some. */
p = conn->handler;
if(p->setup_connection) {
result = (*p->setup_connection)(conn);
2001-03-29 03:16:55 -05:00
if(result)
return result;
1999-12-29 09:20:26 -05:00
p = conn->handler; /* May have changed. */
}
if(conn->port < 0)
/* we check for -1 here since if proxy was detected already, this
was very likely already set to the proxy port */
conn->port = p->defport;
return CURLE_OK;
}
1999-12-29 09:20:26 -05:00
/*
* Curl_free_request_state() should free temp data that was allocated in the
* Curl_easy for this single request.
*/
void Curl_free_request_state(struct Curl_easy *data)
{
Curl_safefree(data->req.protop);
Curl_safefree(data->req.newurl);
}
#ifndef CURL_DISABLE_PROXY
/****************************************************************
* Checks if the host is in the noproxy list. returns true if it matches
* and therefore the proxy should NOT be used.
****************************************************************/
static bool check_noproxy(const char *name, const char *no_proxy)
{
/* no_proxy=domain1.dom,host.domain2.dom
* (a comma-separated list of hosts which should
* not be proxied, or an asterisk to override
* all proxy variables)
*/
size_t tok_start;
size_t tok_end;
const char *separator = ", ";
size_t no_proxy_len;
size_t namelen;
char *endptr;
if(no_proxy && no_proxy[0]) {
if(strcasecompare("*", no_proxy)) {
return TRUE;
}
/* NO_PROXY was specified and it wasn't just an asterisk */
no_proxy_len = strlen(no_proxy);
endptr = strchr(name, ':');
if(endptr)
namelen = endptr - name;
else
namelen = strlen(name);
for(tok_start = 0; tok_start < no_proxy_len; tok_start = tok_end + 1) {
while(tok_start < no_proxy_len &&
strchr(separator, no_proxy[tok_start]) != NULL) {
/* Look for the beginning of the token. */
++tok_start;
}
if(tok_start == no_proxy_len)
break; /* It was all trailing separator chars, no more tokens. */
for(tok_end = tok_start; tok_end < no_proxy_len &&
strchr(separator, no_proxy[tok_end]) == NULL; ++tok_end)
/* Look for the end of the token. */
;
/* To match previous behaviour, where it was necessary to specify
* ".local.com" to prevent matching "notlocal.com", we will leave
* the '.' off.
*/
if(no_proxy[tok_start] == '.')
++tok_start;
if((tok_end - tok_start) <= namelen) {
/* Match the last part of the name to the domain we are checking. */
const char *checkn = name + namelen - (tok_end - tok_start);
if(strncasecompare(no_proxy + tok_start, checkn,
tok_end - tok_start)) {
if((tok_end - tok_start) == namelen || *(checkn - 1) == '.') {
/* We either have an exact match, or the previous character is a .
* so it is within the same domain, so no proxy for this host.
*/
return TRUE;
}
}
} /* if((tok_end - tok_start) <= namelen) */
} /* for(tok_start = 0; tok_start < no_proxy_len;
tok_start = tok_end + 1) */
} /* NO_PROXY was specified and it wasn't just an asterisk */
return FALSE;
}
/****************************************************************
* Detect what (if any) proxy to use. Remember that this selects a host
* name and is not limited to HTTP proxies only.
* The returned pointer must be freed by the caller (unless NULL)
****************************************************************/
static char *detect_proxy(struct connectdata *conn)
{
char *proxy = NULL;
/* If proxy was not specified, we check for default proxy environment
* variables, to enable i.e Lynx compliance:
*
* http_proxy=http://some.server.dom:port/
* https_proxy=http://some.server.dom:port/
* ftp_proxy=http://some.server.dom:port/
* no_proxy=domain1.dom,host.domain2.dom
* (a comma-separated list of hosts which should
* not be proxied, or an asterisk to override
* all proxy variables)
* all_proxy=http://some.server.dom:port/
* (seems to exist for the CERN www lib. Probably
* the first to check for.)
*
* For compatibility, the all-uppercase versions of these variables are
* checked if the lowercase versions don't exist.
*/
char proxy_env[128];
const char *protop = conn->handler->scheme;
char *envp = proxy_env;
char *prox;
/* Now, build <protocol>_proxy and check for such a one to use */
while(*protop)
*envp++ = (char)tolower((int)*protop++);
/* append _proxy */
strcpy(envp, "_proxy");
/* read the protocol proxy: */
prox=curl_getenv(proxy_env);
/*
* We don't try the uppercase version of HTTP_PROXY because of
* security reasons:
*
* When curl is used in a webserver application
* environment (cgi or php), this environment variable can
* be controlled by the web server user by setting the
* http header 'Proxy:' to some value.
*
* This can cause 'internal' http/ftp requests to be
* arbitrarily redirected by any external attacker.
*/
if(!prox && !strcasecompare("http_proxy", proxy_env)) {
/* There was no lowercase variable, try the uppercase version: */
Curl_strntoupper(proxy_env, proxy_env, sizeof(proxy_env));
prox=curl_getenv(proxy_env);
}
if(prox)
proxy = prox; /* use this */
else {
proxy = curl_getenv("all_proxy"); /* default proxy to use */
if(!proxy)
proxy=curl_getenv("ALL_PROXY");
}
return proxy;
}
/*
* If this is supposed to use a proxy, we need to figure out the proxy
* host name, so that we can re-use an existing connection
* that may exist registered to the same proxy host.
*/
static CURLcode parse_proxy(struct Curl_easy *data,
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
struct connectdata *conn, char *proxy,
curl_proxytype proxytype)
{
char *prox_portno;
char *endofprot;
/* We use 'proxyptr' to point to the proxy name from now on... */
char *proxyptr;
char *portptr;
char *atsign;
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
long port = -1;
char *proxyuser = NULL;
char *proxypasswd = NULL;
bool sockstype;
/* We do the proxy host string parsing here. We want the host name and the
* port name. Accept a protocol:// prefix
*/
/* Parse the protocol part if present */
endofprot = strstr(proxy, "://");
if(endofprot) {
proxyptr = endofprot+3;
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
if(checkprefix("https", proxy))
proxytype = CURLPROXY_HTTPS;
else if(checkprefix("socks5h", proxy))
proxytype = CURLPROXY_SOCKS5_HOSTNAME;
else if(checkprefix("socks5", proxy))
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
proxytype = CURLPROXY_SOCKS5;
else if(checkprefix("socks4a", proxy))
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
proxytype = CURLPROXY_SOCKS4A;
else if(checkprefix("socks4", proxy) || checkprefix("socks", proxy))
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
proxytype = CURLPROXY_SOCKS4;
else if(checkprefix("http:", proxy))
; /* leave it as HTTP or HTTP/1.0 */
else {
/* Any other xxx:// reject! */
failf(data, "Unsupported proxy scheme for \'%s\'", proxy);
return CURLE_COULDNT_CONNECT;
}
}
else
proxyptr = proxy; /* No xxx:// head: It's a HTTP proxy */
#ifndef HTTPS_PROXY_SUPPORT
if(proxytype == CURLPROXY_HTTPS) {
failf(data, "Unsupported proxy \'%s\'"
", libcurl is built without the HTTPS-proxy support.", proxy);
return CURLE_NOT_BUILT_IN;
}
#endif
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
sockstype = proxytype == CURLPROXY_SOCKS5_HOSTNAME ||
proxytype == CURLPROXY_SOCKS5 ||
proxytype == CURLPROXY_SOCKS4A ||
proxytype == CURLPROXY_SOCKS4;
/* Is there a username and password given in this proxy url? */
atsign = strchr(proxyptr, '@');
if(atsign) {
2014-10-24 02:51:04 -04:00
CURLcode result =
parse_login_details(proxyptr, atsign - proxyptr,
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
&proxyuser, &proxypasswd, NULL);
2014-10-24 02:51:04 -04:00
if(result)
return result;
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
proxyptr = atsign + 1;
}
/* start scanning for port number at this point */
portptr = proxyptr;
/* detect and extract RFC6874-style IPv6-addresses */
if(*proxyptr == '[') {
char *ptr = ++proxyptr; /* advance beyond the initial bracket */
while(*ptr && (ISXDIGIT(*ptr) || (*ptr == ':') || (*ptr == '.')))
ptr++;
if(*ptr == '%') {
/* There might be a zone identifier */
if(strncmp("%25", ptr, 3))
infof(data, "Please URL encode %% as %%25, see RFC 6874.\n");
ptr++;
/* Allow unreserved characters as defined in RFC 3986 */
while(*ptr && (ISALPHA(*ptr) || ISXDIGIT(*ptr) || (*ptr == '-') ||
(*ptr == '.') || (*ptr == '_') || (*ptr == '~')))
ptr++;
}
if(*ptr == ']')
/* yeps, it ended nicely with a bracket as well */
*ptr++ = 0;
else
infof(data, "Invalid IPv6 address format\n");
portptr = ptr;
/* Note that if this didn't end with a bracket, we still advanced the
* proxyptr first, but I can't see anything wrong with that as no host
* name nor a numeric can legally start with a bracket.
*/
}
/* Get port number off proxy.server.com:1080 */
prox_portno = strchr(portptr, ':');
if(prox_portno) {
char *endp = NULL;
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
*prox_portno = 0x0; /* cut off number from host name */
prox_portno ++;
/* now set the local port number */
port = strtol(prox_portno, &endp, 10);
if((endp && *endp && (*endp != '/') && (*endp != ' ')) ||
(port < 0) || (port > 65535)) {
/* meant to detect for example invalid IPv6 numerical addresses without
brackets: "2a00:fac0:a000::7:13". Accept a trailing slash only
because we then allow "URL style" with the number followed by a
slash, used in curl test cases already. Space is also an acceptable
terminating symbol. */
infof(data, "No valid port number in proxy string (%s)\n",
prox_portno);
}
else
conn->port = port;
}
else {
if(proxyptr[0]=='/')
/* If the first character in the proxy string is a slash, fail
immediately. The following code will otherwise clear the string which
will lead to code running as if no proxy was set! */
return CURLE_COULDNT_RESOLVE_PROXY;
/* without a port number after the host name, some people seem to use
a slash so we strip everything from the first slash */
atsign = strchr(proxyptr, '/');
if(atsign)
*atsign = '\0'; /* cut off path part from host name */
if(data->set.proxyport)
/* None given in the proxy string, then get the default one if it is
given */
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
port = data->set.proxyport;
else {
if(proxytype == CURLPROXY_HTTPS)
port = CURL_DEFAULT_HTTPS_PROXY_PORT;
else
port = CURL_DEFAULT_PROXY_PORT;
}
}
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
if(*proxyptr) {
struct proxy_info *proxyinfo =
sockstype ? &conn->socks_proxy : &conn->http_proxy;
proxyinfo->proxytype = proxytype;
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
if(proxyuser) {
/* found user and password, rip them out. note that we are unescaping
them, as there is otherwise no way to have a username or password
with reserved characters like ':' in them. */
Curl_safefree(proxyinfo->user);
proxyinfo->user = curl_easy_unescape(data, proxyuser, 0, NULL);
if(!proxyinfo->user)
return CURLE_OUT_OF_MEMORY;
Curl_safefree(proxyinfo->passwd);
if(proxypasswd && strlen(proxypasswd) < MAX_CURL_PASSWORD_LENGTH)
proxyinfo->passwd = curl_easy_unescape(data, proxypasswd, 0, NULL);
else
proxyinfo->passwd = strdup("");
if(!proxyinfo->passwd)
return CURLE_OUT_OF_MEMORY;
conn->bits.proxy_user_passwd = TRUE; /* enable it */
}
if(port >= 0) {
proxyinfo->port = port;
if(conn->port < 0 || sockstype || !conn->socks_proxy.host.rawalloc)
conn->port = port;
}
/* now, clone the cleaned proxy host name */
Curl_safefree(proxyinfo->host.rawalloc);
proxyinfo->host.rawalloc = strdup(proxyptr);
proxyinfo->host.name = proxyinfo->host.rawalloc;
if(!proxyinfo->host.rawalloc)
return CURLE_OUT_OF_MEMORY;
}
Curl_safefree(proxyuser);
Curl_safefree(proxypasswd);
return CURLE_OK;
}
/*
* Extract the user and password from the authentication string
*/
static CURLcode parse_proxy_auth(struct Curl_easy *data,
struct connectdata *conn)
{
char proxyuser[MAX_CURL_USER_LENGTH]="";
char proxypasswd[MAX_CURL_PASSWORD_LENGTH]="";
CURLcode result;
if(data->set.str[STRING_PROXYUSERNAME] != NULL) {
strncpy(proxyuser, data->set.str[STRING_PROXYUSERNAME],
MAX_CURL_USER_LENGTH);
proxyuser[MAX_CURL_USER_LENGTH-1] = '\0'; /*To be on safe side*/
}
if(data->set.str[STRING_PROXYPASSWORD] != NULL) {
strncpy(proxypasswd, data->set.str[STRING_PROXYPASSWORD],
MAX_CURL_PASSWORD_LENGTH);
proxypasswd[MAX_CURL_PASSWORD_LENGTH-1] = '\0'; /*To be on safe side*/
}
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
result = Curl_urldecode(data, proxyuser, 0, &conn->http_proxy.user, NULL,
FALSE);
if(!result)
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
result = Curl_urldecode(data, proxypasswd, 0, &conn->http_proxy.passwd,
NULL, FALSE);
return result;
}
#endif /* CURL_DISABLE_PROXY */
/*
* parse_url_login()
*
* Parse the login details (user name, password and options) from the URL and
* strip them out of the host name
*
* Inputs: data->set.use_netrc (CURLOPT_NETRC)
* conn->host.name
*
* Outputs: (almost :- all currently undefined)
* conn->bits.user_passwd - non-zero if non-default passwords exist
* user - non-zero length if defined
* passwd - non-zero length if defined
* options - non-zero length if defined
* conn->host.name - remove user name and password
*/
static CURLcode parse_url_login(struct Curl_easy *data,
struct connectdata *conn,
char **user, char **passwd, char **options)
{
CURLcode result = CURLE_OK;
char *userp = NULL;
char *passwdp = NULL;
char *optionsp = NULL;
/* At this point, we're hoping all the other special cases have
* been taken care of, so conn->host.name is at most
* [user[:password][;options]]@]hostname
*
* We need somewhere to put the embedded details, so do that first.
*/
char *ptr = strchr(conn->host.name, '@');
char *login = conn->host.name;
DEBUGASSERT(!**user);
DEBUGASSERT(!**passwd);
DEBUGASSERT(!**options);
DEBUGASSERT(conn->handler);
if(!ptr)
goto out;
/* We will now try to extract the
* possible login information in a string like:
* ftp://user:password@ftp.my.site:8021/README */
conn->host.name = ++ptr;
/* So the hostname is sane. Only bother interpreting the
* results if we could care. It could still be wasted
* work because it might be overtaken by the programmatically
* set user/passwd, but doing that first adds more cases here :-(
*/
if(data->set.use_netrc == CURL_NETRC_REQUIRED)
goto out;
/* We could use the login information in the URL so extract it. Only parse
options if the handler says we should. */
result = parse_login_details(login, ptr - login - 1,
&userp, &passwdp,
(conn->handler->flags & PROTOPT_URLOPTIONS)?
&optionsp:NULL);
if(result)
goto out;
if(userp) {
char *newname;
/* We have a user in the URL */
conn->bits.userpwd_in_url = TRUE;
conn->bits.user_passwd = TRUE; /* enable user+password */
/* Decode the user */
result = Curl_urldecode(data, userp, 0, &newname, NULL, FALSE);
if(result) {
goto out;
}
free(*user);
*user = newname;
}
if(passwdp) {
/* We have a password in the URL so decode it */
char *newpasswd;
result = Curl_urldecode(data, passwdp, 0, &newpasswd, NULL, FALSE);
if(result) {
goto out;
}
free(*passwd);
*passwd = newpasswd;
}
if(optionsp) {
/* We have an options list in the URL so decode it */
char *newoptions;
result = Curl_urldecode(data, optionsp, 0, &newoptions, NULL, FALSE);
if(result) {
goto out;
}
free(*options);
*options = newoptions;
}
out:
free(userp);
free(passwdp);
free(optionsp);
return result;
}
/*
* parse_login_details()
*
* This is used to parse a login string for user name, password and options in
* the following formats:
*
* user
* user:password
* user:password;options
* user;options
* user;options:password
* :password
* :password;options
* ;options
* ;options:password
*
* Parameters:
*
* login [in] - The login string.
* len [in] - The length of the login string.
* userp [in/out] - The address where a pointer to newly allocated memory
* holding the user will be stored upon completion.
* passdwp [in/out] - The address where a pointer to newly allocated memory
* holding the password will be stored upon completion.
* optionsp [in/out] - The address where a pointer to newly allocated memory
* holding the options will be stored upon completion.
*
* Returns CURLE_OK on success.
*/
static CURLcode parse_login_details(const char *login, const size_t len,
char **userp, char **passwdp,
char **optionsp)
{
CURLcode result = CURLE_OK;
char *ubuf = NULL;
char *pbuf = NULL;
char *obuf = NULL;
const char *psep = NULL;
const char *osep = NULL;
size_t ulen;
size_t plen;
size_t olen;
/* Attempt to find the password separator */
if(passwdp) {
psep = strchr(login, ':');
/* Within the constraint of the login string */
if(psep >= login + len)
psep = NULL;
}
/* Attempt to find the options separator */
if(optionsp) {
osep = strchr(login, ';');
/* Within the constraint of the login string */
if(osep >= login + len)
osep = NULL;
}
/* Calculate the portion lengths */
ulen = (psep ?
(size_t)(osep && psep > osep ? osep - login : psep - login) :
(osep ? (size_t)(osep - login) : len));
plen = (psep ?
(osep && osep > psep ? (size_t)(osep - psep) :
(size_t)(login + len - psep)) - 1 : 0);
olen = (osep ?
(psep && psep > osep ? (size_t)(psep - osep) :
(size_t)(login + len - osep)) - 1 : 0);
/* Allocate the user portion buffer */
if(userp && ulen) {
ubuf = malloc(ulen + 1);
if(!ubuf)
result = CURLE_OUT_OF_MEMORY;
}
/* Allocate the password portion buffer */
if(!result && passwdp && plen) {
pbuf = malloc(plen + 1);
if(!pbuf) {
free(ubuf);
result = CURLE_OUT_OF_MEMORY;
}
}
/* Allocate the options portion buffer */
if(!result && optionsp && olen) {
obuf = malloc(olen + 1);
if(!obuf) {
free(pbuf);
free(ubuf);
result = CURLE_OUT_OF_MEMORY;
}
}
if(!result) {
/* Store the user portion if necessary */
if(ubuf) {
memcpy(ubuf, login, ulen);
ubuf[ulen] = '\0';
Curl_safefree(*userp);
*userp = ubuf;
}
/* Store the password portion if necessary */
if(pbuf) {
memcpy(pbuf, psep + 1, plen);
pbuf[plen] = '\0';
Curl_safefree(*passwdp);
*passwdp = pbuf;
}
/* Store the options portion if necessary */
if(obuf) {
memcpy(obuf, osep + 1, olen);
obuf[olen] = '\0';
Curl_safefree(*optionsp);
*optionsp = obuf;
}
}
return result;
}
/*************************************************************
* Figure out the remote port number and fix it in the URL
*
* No matter if we use a proxy or not, we have to figure out the remote
* port number of various reasons.
*
* To be able to detect port number flawlessly, we must not confuse them
* IPv6-specified addresses in the [0::1] style. (RFC2732)
*
* The conn->host.name is currently [user:passwd@]host[:port] where host
* could be a hostname, IPv4 address or IPv6 address.
*
* The port number embedded in the URL is replaced, if necessary.
*************************************************************/
static CURLcode parse_remote_port(struct Curl_easy *data,
struct connectdata *conn)
{
char *portptr;
char endbracket;
/* Note that at this point, the IPv6 address cannot contain any scope
suffix as that has already been removed in the parseurlandfillconn()
function */
if((1 == sscanf(conn->host.name, "[%*45[0123456789abcdefABCDEF:.]%c",
&endbracket)) &&
(']' == endbracket)) {
/* this is a RFC2732-style specified IP-address */
conn->bits.ipv6_ip = TRUE;
conn->host.name++; /* skip over the starting bracket */
portptr = strchr(conn->host.name, ']');
if(portptr) {
*portptr++ = '\0'; /* zero terminate, killing the bracket */
if(':' != *portptr)
portptr = NULL; /* no port number available */
}
}
else {
#ifdef ENABLE_IPV6
struct in6_addr in6;
if(Curl_inet_pton(AF_INET6, conn->host.name, &in6) > 0) {
/* This is a numerical IPv6 address, meaning this is a wrongly formatted
URL */
failf(data, "IPv6 numerical address used in URL without brackets");
return CURLE_URL_MALFORMAT;
}
#endif
portptr = strrchr(conn->host.name, ':');
}
if(data->set.use_port && data->state.allow_port) {
/* if set, we use this and ignore the port possibly given in the URL */
conn->remote_port = (unsigned short)data->set.use_port;
if(portptr)
*portptr = '\0'; /* cut off the name there anyway - if there was a port
number - since the port number is to be ignored! */
if(conn->bits.httpproxy) {
/* we need to create new URL with the new port number */
char *url;
char type[12]="";
if(conn->bits.type_set)
snprintf(type, sizeof(type), ";type=%c",
data->set.prefer_ascii?'A':
(data->set.ftp_list_only?'D':'I'));
/*
* This synthesized URL isn't always right--suffixes like ;type=A are
* stripped off. It would be better to work directly from the original
* URL and simply replace the port part of it.
*/
url = aprintf("%s://%s%s%s:%hu%s%s%s", conn->given->scheme,
conn->bits.ipv6_ip?"[":"", conn->host.name,
conn->bits.ipv6_ip?"]":"", conn->remote_port,
data->state.slash_removed?"/":"", data->state.path,
type);
if(!url)
return CURLE_OUT_OF_MEMORY;
if(data->change.url_alloc) {
Curl_safefree(data->change.url);
data->change.url_alloc = FALSE;
}
data->change.url = url;
data->change.url_alloc = TRUE;
}
}
else if(portptr) {
/* no CURLOPT_PORT given, extract the one from the URL */
char *rest;
long port;
port=strtol(portptr+1, &rest, 10); /* Port number must be decimal */
if((port < 0) || (port > 0xffff)) {
/* Single unix standard says port numbers are 16 bits long */
failf(data, "Port number out of range");
return CURLE_URL_MALFORMAT;
}
else if(rest != &portptr[1]) {
*portptr = '\0'; /* cut off the name there */
2010-02-23 19:03:06 -05:00
conn->remote_port = curlx_ultous(port);
}
else {
if(rest[0]) {
failf(data, "Illegal port number");
return CURLE_URL_MALFORMAT;
}
/* Browser behavior adaptation. If there's a colon with no digits after,
just cut off the name there which makes us ignore the colon and just
use the default port. Firefox and Chrome both do that. */
*portptr = '\0';
}
}
/* only if remote_port was not already parsed off the URL we use the
default port number */
if(conn->remote_port < 0)
conn->remote_port = (unsigned short)conn->given->defport;
return CURLE_OK;
}
/*
* Override the login details from the URL with that in the CURLOPT_USERPWD
* option or a .netrc file, if applicable.
*/
static CURLcode override_login(struct Curl_easy *data,
struct connectdata *conn,
char **userp, char **passwdp, char **optionsp)
{
if(data->set.str[STRING_USERNAME]) {
free(*userp);
*userp = strdup(data->set.str[STRING_USERNAME]);
if(!*userp)
return CURLE_OUT_OF_MEMORY;
}
if(data->set.str[STRING_PASSWORD]) {
free(*passwdp);
*passwdp = strdup(data->set.str[STRING_PASSWORD]);
if(!*passwdp)
return CURLE_OUT_OF_MEMORY;
}
if(data->set.str[STRING_OPTIONS]) {
free(*optionsp);
*optionsp = strdup(data->set.str[STRING_OPTIONS]);
if(!*optionsp)
return CURLE_OUT_OF_MEMORY;
}
conn->bits.netrc = FALSE;
if(data->set.use_netrc != CURL_NETRC_IGNORED) {
int ret = Curl_parsenetrc(conn->host.name,
userp, passwdp,
data->set.str[STRING_NETRC_FILE]);
if(ret > 0) {
infof(data, "Couldn't find host %s in the "
DOT_CHAR "netrc file; using defaults\n",
conn->host.name);
}
2016-04-03 14:28:34 -04:00
else if(ret < 0) {
return CURLE_OUT_OF_MEMORY;
}
else {
/* set bits.netrc TRUE to remember that we got the name from a .netrc
file, so that it is safe to use even if we followed a Location: to a
different host or similar. */
conn->bits.netrc = TRUE;
conn->bits.user_passwd = TRUE; /* enable user+password */
}
}
return CURLE_OK;
}
/*
* Set the login details so they're available in the connection
*/
static CURLcode set_login(struct connectdata *conn,
const char *user, const char *passwd,
const char *options)
{
CURLcode result = CURLE_OK;
/* If our protocol needs a password and we have none, use the defaults */
if((conn->handler->flags & PROTOPT_NEEDSPWD) && !conn->bits.user_passwd) {
/* Store the default user */
conn->user = strdup(CURL_DEFAULT_USER);
/* Store the default password */
2010-02-06 12:30:06 -05:00
if(conn->user)
conn->passwd = strdup(CURL_DEFAULT_PASSWORD);
else
conn->passwd = NULL;
/* This is the default password, so DON'T set conn->bits.user_passwd */
}
else {
/* Store the user, zero-length if not set */
conn->user = strdup(user);
/* Store the password (only if user is present), zero-length if not set */
2010-02-06 12:30:06 -05:00
if(conn->user)
conn->passwd = strdup(passwd);
else
conn->passwd = NULL;
}
if(!conn->user || !conn->passwd)
result = CURLE_OUT_OF_MEMORY;
/* Store the options, null if not set */
if(!result && options[0]) {
conn->options = strdup(options);
if(!conn->options)
result = CURLE_OUT_OF_MEMORY;
}
return result;
}
/*
* Parses a "host:port" string to connect to.
* The hostname and the port may be empty; in this case, NULL is returned for
* the hostname and -1 for the port.
*/
static CURLcode parse_connect_to_host_port(struct Curl_easy *data,
const char *host,
char **hostname_result,
int *port_result)
{
char *host_dup;
char *hostptr;
char *host_portno;
char *portptr;
int port = -1;
#if defined(CURL_DISABLE_VERBOSE_STRINGS)
(void) data;
#endif
*hostname_result = NULL;
*port_result = -1;
if(!host || !*host)
return CURLE_OK;
host_dup = strdup(host);
if(!host_dup)
return CURLE_OUT_OF_MEMORY;
hostptr = host_dup;
/* start scanning for port number at this point */
portptr = hostptr;
/* detect and extract RFC6874-style IPv6-addresses */
if(*hostptr == '[') {
char *ptr = ++hostptr; /* advance beyond the initial bracket */
while(*ptr && (ISXDIGIT(*ptr) || (*ptr == ':') || (*ptr == '.')))
ptr++;
if(*ptr == '%') {
/* There might be a zone identifier */
if(strncmp("%25", ptr, 3))
infof(data, "Please URL encode %% as %%25, see RFC 6874.\n");
ptr++;
/* Allow unreserved characters as defined in RFC 3986 */
while(*ptr && (ISALPHA(*ptr) || ISXDIGIT(*ptr) || (*ptr == '-') ||
(*ptr == '.') || (*ptr == '_') || (*ptr == '~')))
ptr++;
}
if(*ptr == ']')
/* yeps, it ended nicely with a bracket as well */
*ptr++ = '\0';
else
infof(data, "Invalid IPv6 address format\n");
portptr = ptr;
/* Note that if this didn't end with a bracket, we still advanced the
* hostptr first, but I can't see anything wrong with that as no host
* name nor a numeric can legally start with a bracket.
*/
}
/* Get port number off server.com:1080 */
host_portno = strchr(portptr, ':');
if(host_portno) {
char *endp = NULL;
*host_portno = '\0'; /* cut off number from host name */
host_portno++;
if(*host_portno) {
long portparse = strtol(host_portno, &endp, 10);
if((endp && *endp) || (portparse < 0) || (portparse > 65535)) {
infof(data, "No valid port number in connect to host string (%s)\n",
host_portno);
hostptr = NULL;
port = -1;
}
else
port = (int)portparse; /* we know it will fit */
}
}
/* now, clone the cleaned host name */
if(hostptr) {
*hostname_result = strdup(hostptr);
if(!*hostname_result) {
free(host_dup);
return CURLE_OUT_OF_MEMORY;
}
}
*port_result = port;
free(host_dup);
return CURLE_OK;
}
/*
* Parses one "connect to" string in the form:
* "HOST:PORT:CONNECT-TO-HOST:CONNECT-TO-PORT".
*/
static CURLcode parse_connect_to_string(struct Curl_easy *data,
struct connectdata *conn,
const char *conn_to_host,
char **host_result,
int *port_result)
{
CURLcode result = CURLE_OK;
const char *ptr = conn_to_host;
int host_match = FALSE;
int port_match = FALSE;
*host_result = NULL;
*port_result = -1;
if(*ptr == ':') {
/* an empty hostname always matches */
host_match = TRUE;
ptr++;
}
else {
/* check whether the URL's hostname matches */
size_t hostname_to_match_len;
char *hostname_to_match = aprintf("%s%s%s",
conn->bits.ipv6_ip ? "[" : "",
conn->host.name,
conn->bits.ipv6_ip ? "]" : "");
if(!hostname_to_match)
return CURLE_OUT_OF_MEMORY;
hostname_to_match_len = strlen(hostname_to_match);
host_match = strncasecompare(ptr, hostname_to_match,
hostname_to_match_len);
free(hostname_to_match);
ptr += hostname_to_match_len;
host_match = host_match && *ptr == ':';
ptr++;
}
if(host_match) {
if(*ptr == ':') {
/* an empty port always matches */
port_match = TRUE;
ptr++;
}
else {
/* check whether the URL's port matches */
char *ptr_next = strchr(ptr, ':');
if(ptr_next) {
char *endp = NULL;
long port_to_match = strtol(ptr, &endp, 10);
if((endp == ptr_next) && (port_to_match == conn->remote_port)) {
port_match = TRUE;
ptr = ptr_next + 1;
}
}
}
}
if(host_match && port_match) {
/* parse the hostname and port to connect to */
result = parse_connect_to_host_port(data, ptr, host_result, port_result);
}
return result;
}
/*
* Processes all strings in the "connect to" slist, and uses the "connect
* to host" and "connect to port" of the first string that matches.
*/
static CURLcode parse_connect_to_slist(struct Curl_easy *data,
struct connectdata *conn,
struct curl_slist *conn_to_host)
{
CURLcode result = CURLE_OK;
char *host = NULL;
int port = -1;
while(conn_to_host && !host && port == -1) {
result = parse_connect_to_string(data, conn, conn_to_host->data,
&host, &port);
if(result)
return result;
if(host && *host) {
conn->conn_to_host.rawalloc = host;
conn->conn_to_host.name = host;
conn->bits.conn_to_host = TRUE;
infof(data, "Connecting to hostname: %s\n", host);
}
else {
/* no "connect to host" */
conn->bits.conn_to_host = FALSE;
Curl_safefree(host);
}
if(port >= 0) {
conn->conn_to_port = port;
conn->bits.conn_to_port = TRUE;
infof(data, "Connecting to port: %d\n", port);
}
else {
/* no "connect to port" */
conn->bits.conn_to_port = FALSE;
port = -1;
}
conn_to_host = conn_to_host->next;
}
return result;
}
/*************************************************************
* Resolve the address of the server or proxy
*************************************************************/
static CURLcode resolve_server(struct Curl_easy *data,
struct connectdata *conn,
bool *async)
{
CURLcode result=CURLE_OK;
time_t timeout_ms = Curl_timeleft(data, NULL, TRUE);
/*************************************************************
* Resolve the name of the server or proxy
*************************************************************/
if(conn->bits.reuse)
/* We're reusing the connection - no need to resolve anything, and
fix_hostname() was called already in create_conn() for the re-use
case. */
*async = FALSE;
else {
/* this is a fresh connect */
int rc;
struct Curl_dns_entry *hostaddr;
libcurl: add UNIX domain sockets support The ability to do HTTP requests over a UNIX domain socket has been requested before, in Apr 2008 [0][1] and Sep 2010 [2]. While a discussion happened, no patch seems to get through. I decided to give it a go since I need to test a nginx HTTP server which listens on a UNIX domain socket. One patch [3] seems to make it possible to use the CURLOPT_OPENSOCKETFUNCTION function to gain a UNIX domain socket. Another person wrote a Go program which can do HTTP over a UNIX socket for Docker[4] which uses a special URL scheme (though the name contains cURL, it has no relation to the cURL library). This patch considers support for UNIX domain sockets at the same level as HTTP proxies / IPv6, it acts as an intermediate socket provider and not as a separate protocol. Since this feature affects network operations, a new feature flag was added ("unix-sockets") with a corresponding CURL_VERSION_UNIX_SOCKETS macro. A new CURLOPT_UNIX_SOCKET_PATH option is added and documented. This option enables UNIX domain sockets support for all requests on the handle (replacing IP sockets and skipping proxies). A new configure option (--enable-unix-sockets) and CMake option (ENABLE_UNIX_SOCKETS) can disable this optional feature. Note that I deliberately did not mark this feature as advanced, this is a feature/component that should easily be available. [0]: http://curl.haxx.se/mail/lib-2008-04/0279.html [1]: http://daniel.haxx.se/blog/2008/04/14/http-over-unix-domain-sockets/ [2]: http://sourceforge.net/p/curl/feature-requests/53/ [3]: http://curl.haxx.se/mail/lib-2008-04/0361.html [4]: https://github.com/Soulou/curl-unix-socket Signed-off-by: Peter Wu <peter@lekensteyn.nl>
2014-11-27 17:59:25 -05:00
#ifdef USE_UNIX_SOCKETS
if(conn->unix_domain_socket) {
/* Unix domain sockets are local. The host gets ignored, just use the
libcurl: add UNIX domain sockets support The ability to do HTTP requests over a UNIX domain socket has been requested before, in Apr 2008 [0][1] and Sep 2010 [2]. While a discussion happened, no patch seems to get through. I decided to give it a go since I need to test a nginx HTTP server which listens on a UNIX domain socket. One patch [3] seems to make it possible to use the CURLOPT_OPENSOCKETFUNCTION function to gain a UNIX domain socket. Another person wrote a Go program which can do HTTP over a UNIX socket for Docker[4] which uses a special URL scheme (though the name contains cURL, it has no relation to the cURL library). This patch considers support for UNIX domain sockets at the same level as HTTP proxies / IPv6, it acts as an intermediate socket provider and not as a separate protocol. Since this feature affects network operations, a new feature flag was added ("unix-sockets") with a corresponding CURL_VERSION_UNIX_SOCKETS macro. A new CURLOPT_UNIX_SOCKET_PATH option is added and documented. This option enables UNIX domain sockets support for all requests on the handle (replacing IP sockets and skipping proxies). A new configure option (--enable-unix-sockets) and CMake option (ENABLE_UNIX_SOCKETS) can disable this optional feature. Note that I deliberately did not mark this feature as advanced, this is a feature/component that should easily be available. [0]: http://curl.haxx.se/mail/lib-2008-04/0279.html [1]: http://daniel.haxx.se/blog/2008/04/14/http-over-unix-domain-sockets/ [2]: http://sourceforge.net/p/curl/feature-requests/53/ [3]: http://curl.haxx.se/mail/lib-2008-04/0361.html [4]: https://github.com/Soulou/curl-unix-socket Signed-off-by: Peter Wu <peter@lekensteyn.nl>
2014-11-27 17:59:25 -05:00
* specified domain socket address. Do not cache "DNS entries". There is
* no DNS involved and we already have the filesystem path available */
const char *path = conn->unix_domain_socket;
libcurl: add UNIX domain sockets support The ability to do HTTP requests over a UNIX domain socket has been requested before, in Apr 2008 [0][1] and Sep 2010 [2]. While a discussion happened, no patch seems to get through. I decided to give it a go since I need to test a nginx HTTP server which listens on a UNIX domain socket. One patch [3] seems to make it possible to use the CURLOPT_OPENSOCKETFUNCTION function to gain a UNIX domain socket. Another person wrote a Go program which can do HTTP over a UNIX socket for Docker[4] which uses a special URL scheme (though the name contains cURL, it has no relation to the cURL library). This patch considers support for UNIX domain sockets at the same level as HTTP proxies / IPv6, it acts as an intermediate socket provider and not as a separate protocol. Since this feature affects network operations, a new feature flag was added ("unix-sockets") with a corresponding CURL_VERSION_UNIX_SOCKETS macro. A new CURLOPT_UNIX_SOCKET_PATH option is added and documented. This option enables UNIX domain sockets support for all requests on the handle (replacing IP sockets and skipping proxies). A new configure option (--enable-unix-sockets) and CMake option (ENABLE_UNIX_SOCKETS) can disable this optional feature. Note that I deliberately did not mark this feature as advanced, this is a feature/component that should easily be available. [0]: http://curl.haxx.se/mail/lib-2008-04/0279.html [1]: http://daniel.haxx.se/blog/2008/04/14/http-over-unix-domain-sockets/ [2]: http://sourceforge.net/p/curl/feature-requests/53/ [3]: http://curl.haxx.se/mail/lib-2008-04/0361.html [4]: https://github.com/Soulou/curl-unix-socket Signed-off-by: Peter Wu <peter@lekensteyn.nl>
2014-11-27 17:59:25 -05:00
hostaddr = calloc(1, sizeof(struct Curl_dns_entry));
if(!hostaddr)
result = CURLE_OUT_OF_MEMORY;
else {
bool longpath = FALSE;
hostaddr->addr = Curl_unix2addr(path, &longpath,
conn->abstract_unix_socket);
if(hostaddr->addr)
hostaddr->inuse++;
else {
/* Long paths are not supported for now */
if(longpath) {
failf(data, "Unix socket path too long: '%s'", path);
result = CURLE_COULDNT_RESOLVE_HOST;
}
else
result = CURLE_OUT_OF_MEMORY;
free(hostaddr);
hostaddr = NULL;
libcurl: add UNIX domain sockets support The ability to do HTTP requests over a UNIX domain socket has been requested before, in Apr 2008 [0][1] and Sep 2010 [2]. While a discussion happened, no patch seems to get through. I decided to give it a go since I need to test a nginx HTTP server which listens on a UNIX domain socket. One patch [3] seems to make it possible to use the CURLOPT_OPENSOCKETFUNCTION function to gain a UNIX domain socket. Another person wrote a Go program which can do HTTP over a UNIX socket for Docker[4] which uses a special URL scheme (though the name contains cURL, it has no relation to the cURL library). This patch considers support for UNIX domain sockets at the same level as HTTP proxies / IPv6, it acts as an intermediate socket provider and not as a separate protocol. Since this feature affects network operations, a new feature flag was added ("unix-sockets") with a corresponding CURL_VERSION_UNIX_SOCKETS macro. A new CURLOPT_UNIX_SOCKET_PATH option is added and documented. This option enables UNIX domain sockets support for all requests on the handle (replacing IP sockets and skipping proxies). A new configure option (--enable-unix-sockets) and CMake option (ENABLE_UNIX_SOCKETS) can disable this optional feature. Note that I deliberately did not mark this feature as advanced, this is a feature/component that should easily be available. [0]: http://curl.haxx.se/mail/lib-2008-04/0279.html [1]: http://daniel.haxx.se/blog/2008/04/14/http-over-unix-domain-sockets/ [2]: http://sourceforge.net/p/curl/feature-requests/53/ [3]: http://curl.haxx.se/mail/lib-2008-04/0361.html [4]: https://github.com/Soulou/curl-unix-socket Signed-off-by: Peter Wu <peter@lekensteyn.nl>
2014-11-27 17:59:25 -05:00
}
}
}
else
#endif
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
if(!conn->bits.proxy) {
struct hostname *connhost;
if(conn->bits.conn_to_host)
connhost = &conn->conn_to_host;
else
connhost = &conn->host;
/* If not connecting via a proxy, extract the port from the URL, if it is
* there, thus overriding any defaults that might have been set above. */
if(conn->bits.conn_to_port)
conn->port = conn->conn_to_port;
else
conn->port = conn->remote_port;
/* Resolve target host right on */
rc = Curl_resolv_timeout(conn, connhost->name, (int)conn->port,
&hostaddr, timeout_ms);
if(rc == CURLRESOLV_PENDING)
*async = TRUE;
else if(rc == CURLRESOLV_TIMEDOUT)
result = CURLE_OPERATION_TIMEDOUT;
else if(!hostaddr) {
failf(data, "Couldn't resolve host '%s'", connhost->dispname);
result = CURLE_COULDNT_RESOLVE_HOST;
/* don't return yet, we need to clean up the timeout first */
}
}
else {
/* This is a proxy that hasn't been resolved yet. */
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
struct hostname * const host = conn->bits.socksproxy ?
&conn->socks_proxy.host : &conn->http_proxy.host;
/* resolve proxy */
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
rc = Curl_resolv_timeout(conn, host->name, (int)conn->port,
&hostaddr, timeout_ms);
if(rc == CURLRESOLV_PENDING)
*async = TRUE;
else if(rc == CURLRESOLV_TIMEDOUT)
result = CURLE_OPERATION_TIMEDOUT;
else if(!hostaddr) {
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
failf(data, "Couldn't resolve proxy '%s'", host->dispname);
result = CURLE_COULDNT_RESOLVE_PROXY;
/* don't return yet, we need to clean up the timeout first */
}
}
DEBUGASSERT(conn->dns_entry == NULL);
conn->dns_entry = hostaddr;
}
return result;
}
/*
* Cleanup the connection just allocated before we can move along and use the
* previously existing one. All relevant data is copied over and old_conn is
* ready for freeing once this function returns.
*/
static void reuse_conn(struct connectdata *old_conn,
struct connectdata *conn)
{
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
free_fixed_hostname(&old_conn->http_proxy.host);
free_fixed_hostname(&old_conn->socks_proxy.host);
free(old_conn->http_proxy.host.rawalloc);
free(old_conn->socks_proxy.host.rawalloc);
/* free the SSL config struct from this connection struct as this was
allocated in vain and is targeted for destruction */
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
Curl_free_primary_ssl_config(&old_conn->ssl_config);
Curl_free_primary_ssl_config(&old_conn->proxy_ssl_config);
conn->data = old_conn->data;
/* get the user+password information from the old_conn struct since it may
* be new for this request even when we re-use an existing connection */
conn->bits.user_passwd = old_conn->bits.user_passwd;
if(conn->bits.user_passwd) {
/* use the new user name and password though */
Curl_safefree(conn->user);
Curl_safefree(conn->passwd);
conn->user = old_conn->user;
conn->passwd = old_conn->passwd;
old_conn->user = NULL;
old_conn->passwd = NULL;
}
conn->bits.proxy_user_passwd = old_conn->bits.proxy_user_passwd;
if(conn->bits.proxy_user_passwd) {
/* use the new proxy user name and proxy password though */
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
Curl_safefree(conn->http_proxy.user);
Curl_safefree(conn->socks_proxy.user);
Curl_safefree(conn->http_proxy.passwd);
Curl_safefree(conn->socks_proxy.passwd);
conn->http_proxy.user = old_conn->http_proxy.user;
conn->socks_proxy.user = old_conn->socks_proxy.user;
conn->http_proxy.passwd = old_conn->http_proxy.passwd;
conn->socks_proxy.passwd = old_conn->socks_proxy.passwd;
old_conn->http_proxy.user = NULL;
old_conn->socks_proxy.user = NULL;
old_conn->http_proxy.passwd = NULL;
old_conn->socks_proxy.passwd = NULL;
}
/* host can change, when doing keepalive with a proxy or if the case is
different this time etc */
free_fixed_hostname(&conn->host);
free_fixed_hostname(&conn->conn_to_host);
Curl_safefree(conn->host.rawalloc);
Curl_safefree(conn->conn_to_host.rawalloc);
conn->host=old_conn->host;
conn->bits.conn_to_host = old_conn->bits.conn_to_host;
conn->conn_to_host = old_conn->conn_to_host;
conn->bits.conn_to_port = old_conn->bits.conn_to_port;
conn->conn_to_port = old_conn->conn_to_port;
/* persist connection info in session handle */
Curl_persistconninfo(conn);
conn_reset_all_postponed_data(old_conn); /* free buffers */
conn_reset_all_postponed_data(conn); /* reset unprocessed data */
/* re-use init */
conn->bits.reuse = TRUE; /* yes, we're re-using here */
Curl_safefree(old_conn->user);
Curl_safefree(old_conn->passwd);
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
Curl_safefree(old_conn->http_proxy.user);
Curl_safefree(old_conn->socks_proxy.user);
Curl_safefree(old_conn->http_proxy.passwd);
Curl_safefree(old_conn->socks_proxy.passwd);
Curl_safefree(old_conn->localdev);
2011-10-07 14:50:57 -04:00
Curl_llist_destroy(old_conn->send_pipe, NULL);
Curl_llist_destroy(old_conn->recv_pipe, NULL);
2011-10-07 14:50:57 -04:00
old_conn->send_pipe = NULL;
old_conn->recv_pipe = NULL;
Curl_safefree(old_conn->master_buffer);
#ifdef USE_UNIX_SOCKETS
Curl_safefree(old_conn->unix_domain_socket);
#endif
}
/**
* create_conn() sets up a new connectdata struct, or re-uses an already
* existing one, and resolves host name.
*
* if this function returns CURLE_OK and *async is set to TRUE, the resolve
* response will be coming asynchronously. If *async is FALSE, the name is
* already resolved.
*
* @param data The sessionhandle pointer
* @param in_connect is set to the next connection data pointer
* @param async is set TRUE when an async DNS resolution is pending
* @see Curl_setup_conn()
*
* *NOTE* this function assigns the conn->data pointer!
*/
static CURLcode create_conn(struct Curl_easy *data,
struct connectdata **in_connect,
bool *async)
{
CURLcode result = CURLE_OK;
struct connectdata *conn;
struct connectdata *conn_temp = NULL;
size_t urllen;
char *user = NULL;
char *passwd = NULL;
char *options = NULL;
bool reuse;
char *proxy = NULL;
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
char *socksproxy = NULL;
char *no_proxy = NULL;
bool prot_missing = FALSE;
bool connections_available = TRUE;
bool force_reuse = FALSE;
bool waitpipe = FALSE;
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
size_t max_host_connections = Curl_multi_max_host_connections(data->multi);
size_t max_total_connections = Curl_multi_max_total_connections(data->multi);
*async = FALSE;
/*************************************************************
* Check input data
*************************************************************/
if(!data->change.url) {
result = CURLE_URL_MALFORMAT;
goto out;
}
/* First, split up the current URL in parts so that we can use the
parts for checking against the already present connections. In order
to not have to modify everything at once, we allocate a temporary
connection data struct and fill in for comparison purposes. */
conn = allocate_conn(data);
if(!conn) {
result = CURLE_OUT_OF_MEMORY;
goto out;
}
/* We must set the return variable as soon as possible, so that our
parent can cleanup any possible allocs we may have done before
any failure */
*in_connect = conn;
/* This initing continues below, see the comment "Continue connectdata
* initialization here" */
/***********************************************************
* We need to allocate memory to store the path in. We get the size of the
* full URL to be sure, and we need to make it at least 256 bytes since
* other parts of the code will rely on this fact
***********************************************************/
#define LEAST_PATH_ALLOC 256
urllen=strlen(data->change.url);
if(urllen < LEAST_PATH_ALLOC)
urllen=LEAST_PATH_ALLOC;
/*
* We malloc() the buffers below urllen+2 to make room for 2 possibilities:
* 1 - an extra terminating zero
* 2 - an extra slash (in case a syntax like "www.host.com?moo" is used)
*/
Curl_safefree(data->state.pathbuffer);
data->state.path = NULL;
data->state.pathbuffer = malloc(urllen+2);
if(NULL == data->state.pathbuffer) {
result = CURLE_OUT_OF_MEMORY; /* really bad error */
goto out;
}
data->state.path = data->state.pathbuffer;
conn->host.rawalloc = malloc(urllen+2);
if(NULL == conn->host.rawalloc) {
Curl_safefree(data->state.pathbuffer);
data->state.path = NULL;
result = CURLE_OUT_OF_MEMORY;
goto out;
}
conn->host.name = conn->host.rawalloc;
conn->host.name[0] = 0;
user = strdup("");
passwd = strdup("");
options = strdup("");
if(!user || !passwd || !options) {
result = CURLE_OUT_OF_MEMORY;
goto out;
}
result = parseurlandfillconn(data, conn, &prot_missing, &user, &passwd,
&options);
if(result)
goto out;
/*************************************************************
* No protocol part in URL was used, add it!
*************************************************************/
if(prot_missing) {
/* We're guessing prefixes here and if we're told to use a proxy or if
we're gonna follow a Location: later or... then we need the protocol
part added so that we have a valid URL. */
char *reurl;
char *ch_lower;
reurl = aprintf("%s://%s", conn->handler->scheme, data->change.url);
if(!reurl) {
result = CURLE_OUT_OF_MEMORY;
goto out;
}
/* Change protocol prefix to lower-case */
for(ch_lower = reurl; *ch_lower != ':'; ch_lower++)
*ch_lower = (char)TOLOWER(*ch_lower);
if(data->change.url_alloc) {
Curl_safefree(data->change.url);
data->change.url_alloc = FALSE;
}
data->change.url = reurl;
data->change.url_alloc = TRUE; /* free this later */
}
/*************************************************************
* If the protocol can't handle url query strings, then cut
2014-04-18 12:04:39 -04:00
* off the unhandable part
*************************************************************/
if((conn->given->flags&PROTOPT_NOURLQUERY)) {
char *path_q_sep = strchr(conn->data->state.path, '?');
if(path_q_sep) {
/* according to rfc3986, allow the query (?foo=bar)
also on protocols that can't handle it.
cut the string-part after '?'
*/
/* terminate the string */
path_q_sep[0] = 0;
}
}
if(data->set.str[STRING_BEARER]) {
conn->oauth_bearer = strdup(data->set.str[STRING_BEARER]);
if(!conn->oauth_bearer) {
result = CURLE_OUT_OF_MEMORY;
goto out;
}
}
#ifndef CURL_DISABLE_PROXY
/*************************************************************
* Extract the user and password from the authentication string
*************************************************************/
if(conn->bits.proxy_user_passwd) {
result = parse_proxy_auth(data, conn);
if(result)
goto out;
}
/*************************************************************
* Detect what (if any) proxy to use
*************************************************************/
if(data->set.str[STRING_PROXY]) {
proxy = strdup(data->set.str[STRING_PROXY]);
/* if global proxy is set, this is it */
if(NULL == proxy) {
failf(data, "memory shortage");
result = CURLE_OUT_OF_MEMORY;
goto out;
}
}
if(data->set.str[STRING_PRE_PROXY]) {
socksproxy = strdup(data->set.str[STRING_PRE_PROXY]);
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
/* if global socks proxy is set, this is it */
if(NULL == socksproxy) {
failf(data, "memory shortage");
result = CURLE_OUT_OF_MEMORY;
goto out;
}
}
no_proxy = curl_getenv("no_proxy");
if(!no_proxy)
no_proxy = curl_getenv("NO_PROXY");
if(check_noproxy(conn->host.name, data->set.str[STRING_NOPROXY]) ||
(!data->set.str[STRING_NOPROXY] &&
check_noproxy(conn->host.name, no_proxy))) {
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
Curl_safefree(proxy);
Curl_safefree(socksproxy);
}
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
else if(!proxy && !socksproxy)
#ifndef CURL_DISABLE_HTTP
/* if the host is not in the noproxy list, detect proxy. */
proxy = detect_proxy(conn);
#else /* !CURL_DISABLE_HTTP */
proxy = NULL;
#endif /* CURL_DISABLE_HTTP */
Curl_safefree(no_proxy);
libcurl: add UNIX domain sockets support The ability to do HTTP requests over a UNIX domain socket has been requested before, in Apr 2008 [0][1] and Sep 2010 [2]. While a discussion happened, no patch seems to get through. I decided to give it a go since I need to test a nginx HTTP server which listens on a UNIX domain socket. One patch [3] seems to make it possible to use the CURLOPT_OPENSOCKETFUNCTION function to gain a UNIX domain socket. Another person wrote a Go program which can do HTTP over a UNIX socket for Docker[4] which uses a special URL scheme (though the name contains cURL, it has no relation to the cURL library). This patch considers support for UNIX domain sockets at the same level as HTTP proxies / IPv6, it acts as an intermediate socket provider and not as a separate protocol. Since this feature affects network operations, a new feature flag was added ("unix-sockets") with a corresponding CURL_VERSION_UNIX_SOCKETS macro. A new CURLOPT_UNIX_SOCKET_PATH option is added and documented. This option enables UNIX domain sockets support for all requests on the handle (replacing IP sockets and skipping proxies). A new configure option (--enable-unix-sockets) and CMake option (ENABLE_UNIX_SOCKETS) can disable this optional feature. Note that I deliberately did not mark this feature as advanced, this is a feature/component that should easily be available. [0]: http://curl.haxx.se/mail/lib-2008-04/0279.html [1]: http://daniel.haxx.se/blog/2008/04/14/http-over-unix-domain-sockets/ [2]: http://sourceforge.net/p/curl/feature-requests/53/ [3]: http://curl.haxx.se/mail/lib-2008-04/0361.html [4]: https://github.com/Soulou/curl-unix-socket Signed-off-by: Peter Wu <peter@lekensteyn.nl>
2014-11-27 17:59:25 -05:00
#ifdef USE_UNIX_SOCKETS
if(data->set.str[STRING_UNIX_SOCKET_PATH]) {
if(proxy) {
free(proxy); /* Unix domain sockets cannot be proxied, so disable it */
proxy = NULL;
}
conn->unix_domain_socket = strdup(data->set.str[STRING_UNIX_SOCKET_PATH]);
if(conn->unix_domain_socket == NULL) {
result = CURLE_OUT_OF_MEMORY;
goto out;
}
conn->abstract_unix_socket = data->set.abstract_unix_socket;
libcurl: add UNIX domain sockets support The ability to do HTTP requests over a UNIX domain socket has been requested before, in Apr 2008 [0][1] and Sep 2010 [2]. While a discussion happened, no patch seems to get through. I decided to give it a go since I need to test a nginx HTTP server which listens on a UNIX domain socket. One patch [3] seems to make it possible to use the CURLOPT_OPENSOCKETFUNCTION function to gain a UNIX domain socket. Another person wrote a Go program which can do HTTP over a UNIX socket for Docker[4] which uses a special URL scheme (though the name contains cURL, it has no relation to the cURL library). This patch considers support for UNIX domain sockets at the same level as HTTP proxies / IPv6, it acts as an intermediate socket provider and not as a separate protocol. Since this feature affects network operations, a new feature flag was added ("unix-sockets") with a corresponding CURL_VERSION_UNIX_SOCKETS macro. A new CURLOPT_UNIX_SOCKET_PATH option is added and documented. This option enables UNIX domain sockets support for all requests on the handle (replacing IP sockets and skipping proxies). A new configure option (--enable-unix-sockets) and CMake option (ENABLE_UNIX_SOCKETS) can disable this optional feature. Note that I deliberately did not mark this feature as advanced, this is a feature/component that should easily be available. [0]: http://curl.haxx.se/mail/lib-2008-04/0279.html [1]: http://daniel.haxx.se/blog/2008/04/14/http-over-unix-domain-sockets/ [2]: http://sourceforge.net/p/curl/feature-requests/53/ [3]: http://curl.haxx.se/mail/lib-2008-04/0361.html [4]: https://github.com/Soulou/curl-unix-socket Signed-off-by: Peter Wu <peter@lekensteyn.nl>
2014-11-27 17:59:25 -05:00
}
#endif
if(proxy && (!*proxy || (conn->handler->flags & PROTOPT_NONETWORK))) {
free(proxy); /* Don't bother with an empty proxy string or if the
protocol doesn't work with network */
proxy = NULL;
}
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
if(socksproxy && (!*socksproxy ||
(conn->handler->flags & PROTOPT_NONETWORK))) {
free(socksproxy); /* Don't bother with an empty socks proxy string or if
the protocol doesn't work with network */
socksproxy = NULL;
}
/***********************************************************************
* If this is supposed to use a proxy, we need to figure out the proxy host
* name, proxy type and port number, so that we can re-use an existing
* connection that may exist registered to the same proxy host.
***********************************************************************/
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
if(proxy || socksproxy) {
if(proxy) {
result = parse_proxy(data, conn, proxy, conn->http_proxy.proxytype);
Curl_safefree(proxy); /* parse_proxy copies the proxy string */
if(result)
goto out;
}
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
if(socksproxy) {
result = parse_proxy(data, conn, socksproxy,
conn->socks_proxy.proxytype);
/* parse_proxy copies the socks proxy string */
Curl_safefree(socksproxy);
if(result)
goto out;
}
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
if(conn->http_proxy.host.rawalloc) {
#ifdef CURL_DISABLE_HTTP
/* asking for a HTTP proxy is a bit funny when HTTP is disabled... */
result = CURLE_UNSUPPORTED_PROTOCOL;
goto out;
#else
/* force this connection's protocol to become HTTP if not already
compatible - if it isn't tunneling through */
if(!(conn->handler->protocol & PROTO_FAMILY_HTTP) &&
!conn->bits.tunnel_proxy)
conn->handler = &Curl_handler_http;
conn->bits.httpproxy = TRUE;
#endif
}
else {
conn->bits.httpproxy = FALSE; /* not a HTTP proxy */
conn->bits.tunnel_proxy = FALSE; /* no tunneling if not HTTP */
}
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
if(conn->socks_proxy.host.rawalloc) {
if(!conn->http_proxy.host.rawalloc) {
/* once a socks proxy */
if(!conn->socks_proxy.user) {
conn->socks_proxy.user = conn->http_proxy.user;
conn->http_proxy.user = NULL;
Curl_safefree(conn->socks_proxy.passwd);
conn->socks_proxy.passwd = conn->http_proxy.passwd;
conn->http_proxy.passwd = NULL;
}
}
conn->bits.socksproxy = TRUE;
}
else
conn->bits.socksproxy = FALSE; /* not a socks proxy */
}
else {
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
conn->bits.socksproxy = FALSE;
conn->bits.httpproxy = FALSE;
}
conn->bits.proxy = conn->bits.httpproxy || conn->bits.socksproxy;
if(!conn->bits.proxy) {
/* we aren't using the proxy after all... */
conn->bits.proxy = FALSE;
conn->bits.httpproxy = FALSE;
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
conn->bits.socksproxy = FALSE;
conn->bits.proxy_user_passwd = FALSE;
conn->bits.tunnel_proxy = FALSE;
}
#endif /* CURL_DISABLE_PROXY */
/*************************************************************
* If the protocol is using SSL and HTTP proxy is used, we set
* the tunnel_proxy bit.
*************************************************************/
if((conn->given->flags&PROTOPT_SSL) && conn->bits.httpproxy)
conn->bits.tunnel_proxy = TRUE;
/*************************************************************
* Figure out the remote port number and fix it in the URL
*************************************************************/
result = parse_remote_port(data, conn);
if(result)
goto out;
/* Check for overridden login details and set them accordingly so they
they are known when protocol->setup_connection is called! */
result = override_login(data, conn, &user, &passwd, &options);
if(result)
goto out;
result = set_login(conn, user, passwd, options);
if(result)
goto out;
/*************************************************************
* Process the "connect to" linked list of hostname/port mappings.
* Do this after the remote port number has been fixed in the URL.
*************************************************************/
result = parse_connect_to_slist(data, conn, data->set.connect_to);
if(result)
goto out;
/*************************************************************
* IDN-fix the hostnames
*************************************************************/
fix_hostname(conn, &conn->host);
if(conn->bits.conn_to_host)
fix_hostname(conn, &conn->conn_to_host);
if(conn->bits.httpproxy)
fix_hostname(conn, &conn->http_proxy.host);
if(conn->bits.socksproxy)
fix_hostname(conn, &conn->socks_proxy.host);
/*************************************************************
* Check whether the host and the "connect to host" are equal.
* Do this after the hostnames have been IDN-fixed.
*************************************************************/
if(conn->bits.conn_to_host &&
strcasecompare(conn->conn_to_host.name, conn->host.name)) {
conn->bits.conn_to_host = FALSE;
}
/*************************************************************
* Check whether the port and the "connect to port" are equal.
* Do this after the remote port number has been fixed in the URL.
*************************************************************/
if(conn->bits.conn_to_port && conn->conn_to_port == conn->remote_port) {
conn->bits.conn_to_port = FALSE;
}
/*************************************************************
* If the "connect to" feature is used with an HTTP proxy,
* we set the tunnel_proxy bit.
*************************************************************/
if((conn->bits.conn_to_host || conn->bits.conn_to_port) &&
conn->bits.httpproxy)
conn->bits.tunnel_proxy = TRUE;
/*************************************************************
* Setup internals depending on protocol. Needs to be done after
* we figured out what/if proxy to use.
*************************************************************/
result = setup_connection_internals(conn);
if(result)
goto out;
conn->recv[FIRSTSOCKET] = Curl_recv_plain;
conn->send[FIRSTSOCKET] = Curl_send_plain;
conn->recv[SECONDARYSOCKET] = Curl_recv_plain;
conn->send[SECONDARYSOCKET] = Curl_send_plain;
2016-02-16 07:21:03 -05:00
conn->bits.tcp_fastopen = data->set.tcp_fastopen;
/***********************************************************************
* file: is a special case in that it doesn't need a network connection
***********************************************************************/
2007-03-24 13:23:01 -04:00
#ifndef CURL_DISABLE_FILE
if(conn->handler->flags & PROTOPT_NONETWORK) {
bool done;
/* this is supposed to be the connect function so we better at least check
that the file is present here! */
DEBUGASSERT(conn->handler->connect_it);
result = conn->handler->connect_it(conn, &done);
/* Setup a "faked" transfer that'll do nothing */
if(!result) {
conn->data = data;
conn->bits.tcpconnect[FIRSTSOCKET] = TRUE; /* we are "connected */
Curl_conncache_add_conn(data->state.conn_cache, conn);
/*
* Setup whatever necessary for a resumed transfer
*/
result = setup_range(data);
if(result) {
DEBUGASSERT(conn->handler->done);
/* we ignore the return code for the protocol-specific DONE */
(void)conn->handler->done(conn, result, FALSE);
goto out;
}
Curl_setup_transfer(conn, -1, -1, FALSE, NULL, /* no download */
-1, NULL); /* no upload */
}
/* since we skip do_init() */
Curl_init_do(data, conn);
goto out;
}
#endif
/* Get a cloned copy of the SSL config situation stored in the
connection struct. But to get this going nicely, we must first make
sure that the strings in the master copy are pointing to the correct
strings in the session handle strings array!
Keep in mind that the pointers in the master copy are pointing to strings
that will be freed as part of the Curl_easy struct, but all cloned
copies will be separately allocated.
*/
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
data->set.ssl.primary.CApath = data->set.str[STRING_SSL_CAPATH_ORIG];
data->set.proxy_ssl.primary.CApath = data->set.str[STRING_SSL_CAPATH_PROXY];
data->set.ssl.primary.CAfile = data->set.str[STRING_SSL_CAFILE_ORIG];
data->set.proxy_ssl.primary.CAfile = data->set.str[STRING_SSL_CAFILE_PROXY];
data->set.ssl.primary.random_file = data->set.str[STRING_SSL_RANDOM_FILE];
data->set.proxy_ssl.primary.random_file =
data->set.str[STRING_SSL_RANDOM_FILE];
data->set.ssl.primary.egdsocket = data->set.str[STRING_SSL_EGDSOCKET];
data->set.proxy_ssl.primary.egdsocket = data->set.str[STRING_SSL_EGDSOCKET];
data->set.ssl.primary.cipher_list =
data->set.str[STRING_SSL_CIPHER_LIST_ORIG];
data->set.proxy_ssl.primary.cipher_list =
data->set.str[STRING_SSL_CIPHER_LIST_PROXY];
data->set.ssl.CRLfile = data->set.str[STRING_SSL_CRLFILE_ORIG];
data->set.proxy_ssl.CRLfile = data->set.str[STRING_SSL_CRLFILE_PROXY];
data->set.ssl.issuercert = data->set.str[STRING_SSL_ISSUERCERT_ORIG];
data->set.proxy_ssl.issuercert = data->set.str[STRING_SSL_ISSUERCERT_PROXY];
data->set.ssl.cert = data->set.str[STRING_CERT_ORIG];
data->set.proxy_ssl.cert = data->set.str[STRING_CERT_PROXY];
data->set.ssl.cert_type = data->set.str[STRING_CERT_TYPE_ORIG];
data->set.proxy_ssl.cert_type = data->set.str[STRING_CERT_TYPE_PROXY];
data->set.ssl.key = data->set.str[STRING_KEY_ORIG];
data->set.proxy_ssl.key = data->set.str[STRING_KEY_PROXY];
data->set.ssl.key_type = data->set.str[STRING_KEY_TYPE_ORIG];
data->set.proxy_ssl.key_type = data->set.str[STRING_KEY_TYPE_PROXY];
data->set.ssl.key_passwd = data->set.str[STRING_KEY_PASSWD_ORIG];
data->set.proxy_ssl.key_passwd = data->set.str[STRING_KEY_PASSWD_PROXY];
data->set.ssl.primary.clientcert = data->set.str[STRING_CERT_ORIG];
data->set.proxy_ssl.primary.clientcert = data->set.str[STRING_CERT_PROXY];
#ifdef USE_TLS_SRP
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
data->set.ssl.username = data->set.str[STRING_TLSAUTH_USERNAME_ORIG];
data->set.proxy_ssl.username = data->set.str[STRING_TLSAUTH_USERNAME_PROXY];
data->set.ssl.password = data->set.str[STRING_TLSAUTH_PASSWORD_ORIG];
data->set.proxy_ssl.password = data->set.str[STRING_TLSAUTH_PASSWORD_PROXY];
#endif
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
if(!Curl_clone_primary_ssl_config(&data->set.ssl.primary,
&conn->ssl_config)) {
result = CURLE_OUT_OF_MEMORY;
goto out;
}
if(!Curl_clone_primary_ssl_config(&data->set.proxy_ssl.primary,
&conn->proxy_ssl_config)) {
result = CURLE_OUT_OF_MEMORY;
goto out;
}
prune_dead_connections(data);
/*************************************************************
* Check the current list of connections to see if we can
* re-use an already existing one or if we have to create a
* new one.
*************************************************************/
/* reuse_fresh is TRUE if we are told to use a new connection by force, but
we only acknowledge this option if this is not a re-used connection
already (which happens due to follow-location or during a HTTP
authentication phase). */
2004-03-02 09:00:44 -05:00
if(data->set.reuse_fresh && !data->state.this_is_a_follow)
reuse = FALSE;
2004-05-19 05:25:00 -04:00
else
reuse = ConnectionExists(data, conn, &conn_temp, &force_reuse, &waitpipe);
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
/* If we found a reusable connection, we may still want to
open a new connection if we are pipelining. */
if(reuse && !force_reuse && IsPipeliningPossible(data, conn_temp)) {
size_t pipelen = conn_temp->send_pipe->size + conn_temp->recv_pipe->size;
if(pipelen > 0) {
infof(data, "Found connection %ld, with requests in the pipe (%zu)\n",
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
conn_temp->connection_id, pipelen);
if(conn_temp->bundle->num_connections < max_host_connections &&
data->state.conn_cache->num_connections < max_total_connections) {
/* We want a new connection anyway */
reuse = FALSE;
infof(data, "We can reuse, but we want a new connection anyway\n");
}
}
}
if(reuse) {
/*
* We already have a connection for this, we got the former connection
* in the conn_temp variable and thus we need to cleanup the one we
* just allocated before we can move along and use the previously
* existing one.
*/
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
conn_temp->inuse = TRUE; /* mark this as being in use so that no other
handle in a multi stack may nick it */
reuse_conn(conn, conn_temp);
free(conn); /* we don't need this anymore */
conn = conn_temp;
*in_connect = conn;
infof(data, "Re-using existing connection! (#%ld) with %s %s\n",
conn->connection_id,
conn->bits.proxy?"proxy":"host",
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
conn->socks_proxy.host.name ? conn->socks_proxy.host.dispname :
conn->http_proxy.host.name ? conn->http_proxy.host.dispname :
conn->host.dispname);
}
else {
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
/* We have decided that we want a new connection. However, we may not
be able to do that if we have reached the limit of how many
connections we are allowed to open. */
struct connectbundle *bundle = NULL;
if(conn->handler->flags & PROTOPT_ALPN_NPN) {
/* The protocol wants it, so set the bits if enabled in the easy handle
(default) */
if(data->set.ssl_enable_alpn)
conn->bits.tls_enable_alpn = TRUE;
if(data->set.ssl_enable_npn)
conn->bits.tls_enable_npn = TRUE;
}
if(waitpipe)
/* There is a connection that *might* become usable for pipelining
"soon", and we wait for that */
connections_available = FALSE;
else
bundle = Curl_conncache_find_bundle(conn, data->state.conn_cache);
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
if(max_host_connections > 0 && bundle &&
(bundle->num_connections >= max_host_connections)) {
struct connectdata *conn_candidate;
/* The bundle is full. Let's see if we can kill a connection. */
conn_candidate = find_oldest_idle_connection_in_bundle(data, bundle);
if(conn_candidate) {
/* Set the connection's owner correctly, then kill it */
conn_candidate->data = data;
(void)Curl_disconnect(conn_candidate, /* dead_connection */ FALSE);
}
else {
infof(data, "No more connections allowed to host: %d\n",
max_host_connections);
connections_available = FALSE;
}
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
}
if(connections_available &&
(max_total_connections > 0) &&
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
(data->state.conn_cache->num_connections >= max_total_connections)) {
struct connectdata *conn_candidate;
/* The cache is full. Let's see if we can kill a connection. */
conn_candidate = Curl_oldest_idle_connection(data);
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
if(conn_candidate) {
/* Set the connection's owner correctly, then kill it */
conn_candidate->data = data;
(void)Curl_disconnect(conn_candidate, /* dead_connection */ FALSE);
}
else {
infof(data, "No connections available in cache\n");
connections_available = FALSE;
}
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
}
if(!connections_available) {
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
infof(data, "No connections available.\n");
conn_free(conn);
*in_connect = NULL;
result = CURLE_NO_CONNECTION_AVAILABLE;
goto out;
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
}
else {
/*
* This is a brand new connection, so let's store it in the connection
* cache of ours!
*/
Curl_conncache_add_conn(data->state.conn_cache, conn);
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
}
#if defined(USE_NTLM)
/* If NTLM is requested in a part of this connection, make sure we don't
assume the state is fine as this is a fresh connection and NTLM is
connection based. */
if((data->state.authhost.picked & (CURLAUTH_NTLM | CURLAUTH_NTLM_WB)) &&
data->state.authhost.done) {
infof(data, "NTLM picked AND auth done set, clear picked!\n");
data->state.authhost.picked = CURLAUTH_NONE;
data->state.authhost.done = FALSE;
}
if((data->state.authproxy.picked & (CURLAUTH_NTLM | CURLAUTH_NTLM_WB)) &&
data->state.authproxy.done) {
infof(data, "NTLM-proxy picked AND auth done set, clear picked!\n");
data->state.authproxy.picked = CURLAUTH_NONE;
data->state.authproxy.done = FALSE;
}
#endif
}
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
/* Mark the connection as used */
conn->inuse = TRUE;
/* Setup and init stuff before DO starts, in preparing for the transfer. */
Curl_init_do(data, conn);
/*
* Setup whatever necessary for a resumed transfer
*/
result = setup_range(data);
if(result)
goto out;
/* Continue connectdata initialization here. */
/*
* Inherit the proper values from the urldata struct AFTER we have arranged
* the persistent connection stuff
*/
conn->seek_func = data->set.seek_func;
conn->seek_client = data->set.seek_client;
2001-01-24 07:32:34 -05:00
/*************************************************************
* Resolve the address of the server or proxy
2001-01-24 07:32:34 -05:00
*************************************************************/
result = resolve_server(data, conn, async);
out:
free(options);
free(passwd);
free(user);
proxy: Support HTTPS proxy and SOCKS+HTTP(s) * HTTPS proxies: An HTTPS proxy receives all transactions over an SSL/TLS connection. Once a secure connection with the proxy is established, the user agent uses the proxy as usual, including sending CONNECT requests to instruct the proxy to establish a [usually secure] TCP tunnel with an origin server. HTTPS proxies protect nearly all aspects of user-proxy communications as opposed to HTTP proxies that receive all requests (including CONNECT requests) in vulnerable clear text. With HTTPS proxies, it is possible to have two concurrent _nested_ SSL/TLS sessions: the "outer" one between the user agent and the proxy and the "inner" one between the user agent and the origin server (through the proxy). This change adds supports for such nested sessions as well. A secure connection with a proxy requires its own set of the usual SSL options (their actual descriptions differ and need polishing, see TODO): --proxy-cacert FILE CA certificate to verify peer against --proxy-capath DIR CA directory to verify peer against --proxy-cert CERT[:PASSWD] Client certificate file and password --proxy-cert-type TYPE Certificate file type (DER/PEM/ENG) --proxy-ciphers LIST SSL ciphers to use --proxy-crlfile FILE Get a CRL list in PEM format from the file --proxy-insecure Allow connections to proxies with bad certs --proxy-key KEY Private key file name --proxy-key-type TYPE Private key file type (DER/PEM/ENG) --proxy-pass PASS Pass phrase for the private key --proxy-ssl-allow-beast Allow security flaw to improve interop --proxy-sslv2 Use SSLv2 --proxy-sslv3 Use SSLv3 --proxy-tlsv1 Use TLSv1 --proxy-tlsuser USER TLS username --proxy-tlspassword STRING TLS password --proxy-tlsauthtype STRING TLS authentication type (default SRP) All --proxy-foo options are independent from their --foo counterparts, except --proxy-crlfile which defaults to --crlfile and --proxy-capath which defaults to --capath. Curl now also supports %{proxy_ssl_verify_result} --write-out variable, similar to the existing %{ssl_verify_result} variable. Supported backends: OpenSSL, GnuTLS, and NSS. * A SOCKS proxy + HTTP/HTTPS proxy combination: If both --socks* and --proxy options are given, Curl first connects to the SOCKS proxy and then connects (through SOCKS) to the HTTP or HTTPS proxy. TODO: Update documentation for the new APIs and --proxy-* options. Look for "Added in 7.XXX" marks.
2016-11-16 12:49:15 -05:00
free(socksproxy);
free(proxy);
return result;
}
/* Curl_setup_conn() is called after the name resolve initiated in
* create_conn() is all done.
2006-02-16 05:02:11 -05:00
*
* Curl_setup_conn() also handles reused connections
*
* conn->data MUST already have been setup fine (in create_conn)
*/
2004-05-19 05:25:00 -04:00
CURLcode Curl_setup_conn(struct connectdata *conn,
bool *protocol_done)
{
CURLcode result = CURLE_OK;
struct Curl_easy *data = conn->data;
Curl_pgrsTime(data, TIMER_NAMELOOKUP);
if(conn->handler->flags & PROTOPT_NONETWORK) {
/* nothing to setup when not using a network */
*protocol_done = TRUE;
return result;
}
*protocol_done = FALSE; /* default to not done */
1999-12-29 09:20:26 -05:00
/* set proxy_connect_closed to false unconditionally already here since it
is used strictly to provide extra information to a parent function in the
case of proxy CONNECT failures and we must make sure we don't have it
lingering set from a previous invoke */
conn->bits.proxy_connect_closed = FALSE;
/*
* Set user-agent. Used for HTTP, but since we can attempt to tunnel
* basically anything through a http proxy we can't limit this based on
* protocol.
*/
if(data->set.str[STRING_USERAGENT]) {
Curl_safefree(conn->allocptr.uagent);
conn->allocptr.uagent =
aprintf("User-Agent: %s\r\n", data->set.str[STRING_USERAGENT]);
if(!conn->allocptr.uagent)
return CURLE_OUT_OF_MEMORY;
1999-12-29 09:20:26 -05:00
}
data->req.headerbytecount = 0;
#ifdef CURL_DO_LINEEND_CONV
data->state.crlf_conversions = 0; /* reset CRLF conversion counter */
#endif /* CURL_DO_LINEEND_CONV */
2003-03-25 09:23:12 -05:00
/* set start time here for timeout purposes in the connect procedure, it
is later set again for the progress meter purpose */
conn->now = Curl_tvnow();
if(CURL_SOCKET_BAD == conn->sock[FIRSTSOCKET]) {
conn->bits.tcpconnect[FIRSTSOCKET] = FALSE;
result = Curl_connecthost(conn, conn->dns_entry);
2013-11-05 18:30:12 -05:00
if(result)
return result;
}
else {
2013-11-05 18:30:12 -05:00
Curl_pgrsTime(data, TIMER_CONNECT); /* we're connected already */
Curl_pgrsTime(data, TIMER_APPCONNECT); /* we're connected already */
conn->bits.tcpconnect[FIRSTSOCKET] = TRUE;
*protocol_done = TRUE;
Curl_updateconninfo(conn, conn->sock[FIRSTSOCKET]);
Curl_verboseconnect(conn);
1999-12-29 09:20:26 -05:00
}
conn->now = Curl_tvnow(); /* time this *after* the connect is done, we
set this here perhaps a second time */
1999-12-29 09:20:26 -05:00
#ifdef __EMX__
/*
* This check is quite a hack. We're calling _fsetmode to fix the problem
* with fwrite converting newline characters (you get mangled text files,
* and corrupted binary files when you download to stdout and redirect it to
* a file).
*/
if((data->set.out)->_handle == NULL) {
_fsetmode(stdout, "b");
}
#endif
return result;
2000-05-22 10:12:12 -04:00
}
CURLcode Curl_connect(struct Curl_easy *data,
struct connectdata **in_connect,
bool *asyncp,
bool *protocol_done)
2000-11-17 09:03:58 -05:00
{
CURLcode result;
2000-11-17 09:03:58 -05:00
*asyncp = FALSE; /* assume synchronous resolves by default */
2004-05-19 05:25:00 -04:00
2000-11-17 09:03:58 -05:00
/* call the stuff that needs to be called */
result = create_conn(data, in_connect, asyncp);
if(!result) {
/* no error */
if((*in_connect)->send_pipe->size || (*in_connect)->recv_pipe->size)
/* pipelining */
*protocol_done = TRUE;
else if(!*asyncp) {
/* DNS resolution is done: that's either because this is a reused
connection, in which case DNS was unnecessary, or because DNS
really did finish already (synch resolver/fast async resolve) */
result = Curl_setup_conn(*in_connect, protocol_done);
}
}
2004-05-19 05:25:00 -04:00
if(result == CURLE_NO_CONNECTION_AVAILABLE) {
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
*in_connect = NULL;
return result;
Multiple pipelines and limiting the number of connections. Introducing a number of options to the multi interface that allows for multiple pipelines to the same host, in order to optimize the balance between the penalty for opening new connections and the potential pipelining latency. Two new options for limiting the number of connections: CURLMOPT_MAX_HOST_CONNECTIONS - Limits the number of running connections to the same host. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished, so we can reuse the connection. CURLMOPT_MAX_TOTAL_CONNECTIONS - Limits the number of connections in total. When adding a handle that exceeds this limit, that handle will be put in a pending state until another handle is finished. The free connection will then be reused, if possible, or closed if the pending handle can't reuse it. Several new options for pipelining: CURLMOPT_MAX_PIPELINE_LENGTH - Limits the pipeling length. If a pipeline is "full" when a connection is to be reused, a new connection will be opened if the CURLMOPT_MAX_xxx_CONNECTIONS limits allow it. If not, the handle will be put in a pending state until a connection is ready (either free or a pipe got shorter). CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a transfer with a content length that is larger than this. CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE - A pipelined connection will not be reused if it is currently processing a chunk larger than this. CURLMOPT_PIPELINING_SITE_BL - A blacklist of hosts that don't allow pipelining. CURLMOPT_PIPELINING_SERVER_BL - A blacklist of server types that don't allow pipelining. See the curl_multi_setopt() man page for details.
2013-02-15 05:50:45 -05:00
}
if(result && *in_connect) {
2000-11-17 09:03:58 -05:00
/* We're not allowed to return failure with memory left allocated
in the connectdata struct, free those here */
Curl_disconnect(*in_connect, FALSE); /* close the connection */
*in_connect = NULL; /* return a NULL */
2000-11-17 09:03:58 -05:00
}
return result;
2000-11-17 09:03:58 -05:00
}
/*
* Curl_init_do() inits the readwrite session. This is inited each time (in
* the DO function before the protocol-specific DO functions are invoked) for
* a transfer, sometimes multiple times on the same Curl_easy. Make sure
* nothing in here depends on stuff that are setup dynamically for the
* transfer.
*
* Allow this function to get called with 'conn' set to NULL.
*/
CURLcode Curl_init_do(struct Curl_easy *data, struct connectdata *conn)
{
struct SingleRequest *k = &data->req;
if(conn)
conn->bits.do_more = FALSE; /* by default there's no curl_do_more() to
* use */
data->state.done = FALSE; /* *_done() is not called yet */
data->state.expect100header = FALSE;
if(data->set.opt_no_body)
/* in HTTP lingo, no body means using the HEAD request... */
data->set.httpreq = HTTPREQ_HEAD;
else if(HTTPREQ_HEAD == data->set.httpreq)
/* ... but if unset there really is no perfect method that is the
"opposite" of HEAD but in reality most people probably think GET
then. The important thing is that we can't let it remain HEAD if the
opt_no_body is set FALSE since then we'll behave wrong when getting
HTTP. */
data->set.httpreq = HTTPREQ_GET;
k->start = Curl_tvnow(); /* start time */
k->now = k->start; /* current time is now */
k->header = TRUE; /* assume header */
k->bytecount = 0;
k->buf = data->state.buffer;
k->uploadbuf = data->state.uploadbuffer;
k->hbufp = data->state.headerbuff;
k->ignorebody=FALSE;
Curl_speedinit(data);
Curl_pgrsSetUploadCounter(data, 0);
Curl_pgrsSetDownloadCounter(data, 0);
return CURLE_OK;
}
/*
* get_protocol_family()
*
* This is used to return the protocol family for a given protocol.
*
* Parameters:
*
* protocol [in] - A single bit protocol identifier such as HTTP or HTTPS.
*
* Returns the family as a single bit protocol identifier.
*/
unsigned int get_protocol_family(unsigned int protocol)
{
unsigned int family;
switch(protocol) {
case CURLPROTO_HTTP:
case CURLPROTO_HTTPS:
family = CURLPROTO_HTTP;
break;
case CURLPROTO_FTP:
case CURLPROTO_FTPS:
family = CURLPROTO_FTP;
break;
case CURLPROTO_SCP:
family = CURLPROTO_SCP;
break;
case CURLPROTO_SFTP:
family = CURLPROTO_SFTP;
break;
case CURLPROTO_TELNET:
family = CURLPROTO_TELNET;
break;
case CURLPROTO_LDAP:
case CURLPROTO_LDAPS:
family = CURLPROTO_LDAP;
break;
case CURLPROTO_DICT:
family = CURLPROTO_DICT;
break;
case CURLPROTO_FILE:
family = CURLPROTO_FILE;
break;
case CURLPROTO_TFTP:
family = CURLPROTO_TFTP;
break;
case CURLPROTO_IMAP:
case CURLPROTO_IMAPS:
family = CURLPROTO_IMAP;
break;
case CURLPROTO_POP3:
case CURLPROTO_POP3S:
family = CURLPROTO_POP3;
break;
case CURLPROTO_SMTP:
case CURLPROTO_SMTPS:
family = CURLPROTO_SMTP;
break;
case CURLPROTO_RTSP:
family = CURLPROTO_RTSP;
break;
case CURLPROTO_RTMP:
case CURLPROTO_RTMPS:
family = CURLPROTO_RTMP;
break;
case CURLPROTO_RTMPT:
case CURLPROTO_RTMPTS:
family = CURLPROTO_RTMPT;
break;
case CURLPROTO_RTMPE:
family = CURLPROTO_RTMPE;
break;
case CURLPROTO_RTMPTE:
family = CURLPROTO_RTMPTE;
break;
case CURLPROTO_GOPHER:
family = CURLPROTO_GOPHER;
break;
case CURLPROTO_SMB:
case CURLPROTO_SMBS:
family = CURLPROTO_SMB;
break;
default:
family = 0;
break;
}
return family;
}