moved here from the newlib branch

darwin-pinnedpubkey
Daniel Stenberg 2000-05-22 14:12:12 +00:00
parent fb9d1ff00f
commit 96dde76b99
54 changed files with 2824 additions and 2580 deletions

33
CHANGES
View File

@ -8,6 +8,39 @@
Version XX
Daniel (21 May 2000)
- Updated lots of #defines, enums and variable type names in the library. No
more weird URG or URLTAG prefixes. All types and names should be curl-
prefixed to avoid name space clashes. The FLAGS-parameter to the former
curl_urlget() has been converted into a bunch of flags to use in separate
setopt calls. I'm still focusing on the easy-interface, as the curl tool is
now using that.
- Bjorn Reese has provided me with an asynchronous name resolver that I plan
to use in upcoming versions of curl to be able to gracefully timeout name
lookups.
Version 7.0beta released
Daniel (18 May 2000)
- Introduced LIBCURL_VERSION_NUM to the curl.h include file to better allow
source codes to be dependent on the lib version. This define is now set to
a dexadecimal number, with 8 bits each for major number, minor number and
patch number. In other words, version 1.2.3 would make it 0x010203. It also
makes a larger number a newer version.
Daniel (17 May 2000)
- Martin Kammerhofer correctly pointed out several flaws in the FTP range
option. I corrected them.
- Removed the win32 winsock init crap from the lib to the src/main.c file
in the application instead. They can't be in the lib, especially not for
multithreaded purposes.
Daniel (16 May 2000)
- Rewrote the src/main.c source to use the new easy-interface to libcurl 7.
There is still more work to do, but the first step is now taken.
<curl/easy.h> is the include file to use.
Daniel (14 May 2000)
- FTP URLs are now treated slightly different, more according to RFC 1738.
- FTP sessions are now performed differently, with CWD commands to change

11
FAQ
View File

@ -83,14 +83,3 @@ configre doesn't find OpenSSL even when it is installed
things work
Submitted by: Bob Allison <allisonb@users.sourceforge.net>
Will you write a script for me getting ZZZ from YYY?
====================================================
No.
I try to help out to solve issues with curl and related stuff, but I really
do have a lot of stuff on my daily schedule and I'd prefer if you did not
ask me to do your jobs. Writing scripts is very easy. Using curl might be
tricky, but once you're past the initial mistakes the road to success is
very short and straight-forward.

View File

@ -71,6 +71,9 @@
/* Define if you have the strcasecmp function. */
/*#define HAVE_STRCASECMP 1*/
/* Define if you have the stricmp function. */
#define HAVE_STRICMP 1
/* Define if you have the strdup function. */
#define HAVE_STRDUP 1

View File

@ -82,12 +82,18 @@
/* Define if you have the strcasecmp function. */
#undef HAVE_STRCASECMP
/* Define if you have the strcmpi function. */
#undef HAVE_STRCMPI
/* Define if you have the strdup function. */
#undef HAVE_STRDUP
/* Define if you have the strftime function. */
#undef HAVE_STRFTIME
/* Define if you have the stricmp function. */
#undef HAVE_STRICMP
/* Define if you have the strstr function. */
#undef HAVE_STRSTR

View File

@ -2,13 +2,7 @@ dnl $Id$
dnl Process this file with autoconf to produce a configure script.
AC_INIT(lib/urldata.h)
AM_CONFIG_HEADER(config.h src/config.h)
AM_INIT_AUTOMAKE(curl,"3-test")
dnl
dnl Detect the canonical host and target build environment
dnl
AC_CANONICAL_HOST
AC_CANONICAL_TARGET
AM_INIT_AUTOMAKE(curl,"7.0beta")
dnl Checks for programs.
AC_PROG_CC
@ -26,27 +20,6 @@ dnl **********************************************************************
dnl nsl lib?
AC_CHECK_FUNC(gethostbyname, , AC_CHECK_LIB(nsl, gethostbyname))
dnl At least one system has been identified to require BOTH nsl and
dnl socket libs to link properly.
if test "$ac_cv_lib_nsl_gethostbyname" = "$ac_cv_func_gethostbyname"; then
AC_MSG_CHECKING([trying both nsl and socket libs])
my_ac_save_LIBS=$LIBS
LIBS="-lnsl -lsocket $LIBS"
AC_TRY_LINK( ,
[gethostbyname();],
my_ac_link_result=success,
my_ac_link_result=failure )
if test "$my_ac_link_result" = "failure"; then
AC_MSG_RESULT([no])
AC_MSG_ERROR([couldn't find libraries for gethostbyname()])
dnl restore LIBS
LIBS=$my_ac_save_LIBS
else
AC_MSG_RESULT([yes])
fi
fi
dnl resolve lib?
AC_CHECK_FUNC(strcasecmp, , AC_CHECK_LIB(resolve, strcasecmp))
@ -219,14 +192,13 @@ AC_CHECK_FUNCS( socket \
RAND_screen
)
AC_PATH_PROG( PERL, perl, ,
$PATH:/usr/local/bin/perl:/usr/bin/:/usr/local/bin )
AC_SUBST(PERL)
AC_PATH_PROGS( NROFF, gnroff nroff, ,
$PATH:/usr/bin/:/usr/local/bin )
AC_SUBST(NROFF)
AC_PROG_RANLIB
AC_PROG_YACC
@ -236,8 +208,6 @@ dnl $PATH:/usr/bin/:/usr/local/bin )
dnl AC_SUBST(RANLIB)
AC_OUTPUT( Makefile \
curl.spec \
curl-ssl.spec \
src/Makefile \
lib/Makefile )
dnl perl/checklinks.pl \

2
curl.1
View File

@ -204,7 +204,7 @@ A quick and very simple example of how to setup a
to allow curl to ftp to the machine host.domain.com with user name
'myself' and password 'secret' should look similar to:
.B "machine host.domain.com user myself password secret"
.B "machine host.domain.com login myself password secret"
.IP "-N/--no-buffer"
Disables the buffering of the output stream. In normal work situations, curl
will use a standard buffered output stream that will have the effect that it

View File

@ -1,5 +1,5 @@
#ifndef __CURL_H
#define __CURL_H
#ifndef __CURL_CURL_H
#define __CURL_CURL_H
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
@ -39,6 +39,25 @@
*
* ------------------------------------------------------------
****************************************************************************/
/* The include stuff here is mainly for time_t! */
#ifdef vms
# include <types.h>
# include <time.h>
#else
# include <sys/types.h>
# if TIME_WITH_SYS_TIME
# include <sys/time.h>
# include <time.h>
# else
# if HAVE_SYS_TIME_H
# include <sys/time.h>
# else
# include <time.h>
# endif
# endif
#endif /* defined (vms) */
#ifndef TRUE
#define TRUE 1
#endif
@ -46,71 +65,8 @@
#define FALSE 0
#endif
#include <curl/types.h>
#define CONF_DEFAULT 0
#define CONF_PROXY (1<<0) /* set if proxy is in use */
#define CONF_PORT (1<<1) /* set if different port than protcol-defines is
used */
#define CONF_HTTP (1<<2) /* http get */
#define CONF_GOPHER (1<<3) /* gopher get */
#define CONF_FTP (1<<4) /* ftp get (binary mode) */
#define CONF_VERBOSE (1<<5) /* talk a lot */
#define CONF_TELNET (1<<6)
#define CONF_HEADER (1<<8) /* throw the header out too */
#define CONF_USERPWD (1<<9) /* user+passwd has been specified */
#define CONF_NOPROGRESS (1<<10) /* shut off the progress meter (auto)
see also _MUTE */
#define CONF_NOBODY (1<<11) /* use HEAD to get http document */
#define CONF_FAILONERROR (1<<12) /* Makes urlget() fail with a return code
WITHOUT writing anything to the output if
a return code >=300 is returned from the
server. */
#define CONF_RANGE (1<<13) /* Byte-range request, specified parameter is set */
#define CONF_UPLOAD (1<<14) /* this is an upload, only supported for ftp
currently */
#define CONF_POST (1<<15) /* HTTP POST method */
/* When getting an FTP directory, this switch makes the listing only show file
names and nothing else. Makes machine parsing of the output possible. This
enforces the NLST command to the ftp server, compared to the otherwise
used: LIST. */
#define CONF_FTPLISTONLY (1<<16)
/* Set the referer string */
#define CONF_REFERER (1<<17)
#define CONF_PROXYUSERPWD (1<<18) /* Proxy user+passwd has been specified */
/* For FTP, use PORT instead of PASV! */
#define CONF_FTPPORT (1<<19)
/* FTP: Append instead of overwrite on upload! */
#define CONF_FTPAPPEND (1<<20)
#define CONF_HTTPS (1<<21) /* Use SSLeay for encrypted communication */
#define CONF_NETRC (1<<22) /* read user+password from .netrc */
#define CONF_FOLLOWLOCATION (1<<23) /* get the page that the Location: tells
us to get */
#define CONF_FTPASCII (1<<24) /* use TYPE A for transfer */
#define CONF_HTTPPOST (1<<25) /* this causes a multipart/form-data
HTTP POST */
#define CONF_NOPROT (1<<26) /* host name specified without protocol */
#define CONF_PUT (1<<27) /* PUT the input file */
#define CONF_MUTE (1<<28) /* force NOPROGRESS */
#define CONF_DICT (1<<29) /* DICT:// protocol */
#define CONF_FILE (1<<30) /* FILE:// protocol */
#define CONF_LDAP (1<<31) /* LDAP:// protocol */
struct HttpHeader {
struct HttpHeader *next; /* next entry in the list */
@ -132,68 +88,69 @@ struct HttpPost {
may return other values, stay prepared. */
typedef enum {
URG_OK = 0,
URG_UNSUPPORTED_PROTOCOL,
URG_FAILED_INIT,
URG_URL_MALFORMAT,
URG_URL_MALFORMAT_USER,
URG_COULDNT_RESOLVE_PROXY,
URG_COULDNT_RESOLVE_HOST,
URG_COULDNT_CONNECT,
URG_FTP_WEIRD_SERVER_REPLY,
URG_FTP_ACCESS_DENIED,
URG_FTP_USER_PASSWORD_INCORRECT,
URG_FTP_WEIRD_PASS_REPLY,
URG_FTP_WEIRD_USER_REPLY,
URG_FTP_WEIRD_PASV_REPLY,
URG_FTP_WEIRD_227_FORMAT,
URG_FTP_CANT_GET_HOST,
URG_FTP_CANT_RECONNECT,
URG_FTP_COULDNT_SET_BINARY,
URG_PARTIAL_FILE,
URG_FTP_COULDNT_RETR_FILE,
URG_FTP_WRITE_ERROR,
URG_FTP_QUOTE_ERROR,
URG_HTTP_NOT_FOUND,
URG_WRITE_ERROR,
CURLE_OK = 0,
CURLE_UNSUPPORTED_PROTOCOL,
CURLE_FAILED_INIT,
CURLE_URL_MALFORMAT,
CURLE_URL_MALFORMAT_USER,
CURLE_COULDNT_RESOLVE_PROXY,
CURLE_COULDNT_RESOLVE_HOST,
CURLE_COULDNT_CONNECT,
CURLE_FTP_WEIRD_SERVER_REPLY,
CURLE_FTP_ACCESS_DENIED,
CURLE_FTP_USER_PASSWORD_INCORRECT,
CURLE_FTP_WEIRD_PASS_REPLY,
CURLE_FTP_WEIRD_USER_REPLY,
CURLE_FTP_WEIRD_PASV_REPLY,
CURLE_FTP_WEIRD_227_FORMAT,
CURLE_FTP_CANT_GET_HOST,
CURLE_FTP_CANT_RECONNECT,
CURLE_FTP_COULDNT_SET_BINARY,
CURLE_PARTIAL_FILE,
CURLE_FTP_COULDNT_RETR_FILE,
CURLE_FTP_WRITE_ERROR,
CURLE_FTP_QUOTE_ERROR,
CURLE_HTTP_NOT_FOUND,
CURLE_WRITE_ERROR,
URG_MALFORMAT_USER, /* the user name is illegally specified */
URG_FTP_COULDNT_STOR_FILE, /* failed FTP upload */
URG_READ_ERROR, /* could open/read from file */
CURLE_MALFORMAT_USER, /* the user name is illegally specified */
CURLE_FTP_COULDNT_STOR_FILE, /* failed FTP upload */
CURLE_READ_ERROR, /* could open/read from file */
URG_OUT_OF_MEMORY,
URG_OPERATION_TIMEOUTED, /* the timeout time was reached */
URG_FTP_COULDNT_SET_ASCII, /* TYPE A failed */
CURLE_OUT_OF_MEMORY,
CURLE_OPERATION_TIMEOUTED, /* the timeout time was reached */
CURLE_FTP_COULDNT_SET_ASCII, /* TYPE A failed */
URG_FTP_PORT_FAILED, /* FTP PORT operation failed */
CURLE_FTP_PORT_FAILED, /* FTP PORT operation failed */
URG_FTP_COULDNT_USE_REST, /* the REST command failed */
URG_FTP_COULDNT_GET_SIZE, /* the SIZE command failed */
CURLE_FTP_COULDNT_USE_REST, /* the REST command failed */
CURLE_FTP_COULDNT_GET_SIZE, /* the SIZE command failed */
URG_HTTP_RANGE_ERROR, /* The RANGE "command" didn't seem to work */
CURLE_HTTP_RANGE_ERROR, /* The RANGE "command" didn't seem to work */
URG_HTTP_POST_ERROR,
CURLE_HTTP_POST_ERROR,
URG_SSL_CONNECT_ERROR, /* something was wrong when connecting with SSL */
CURLE_SSL_CONNECT_ERROR, /* something was wrong when connecting with SSL */
URG_FTP_BAD_DOWNLOAD_RESUME, /* couldn't resume download */
CURLE_FTP_BAD_DOWNLOAD_RESUME, /* couldn't resume download */
URG_FILE_COULDNT_READ_FILE,
CURLE_FILE_COULDNT_READ_FILE,
URG_LDAP_CANNOT_BIND,
URG_LDAP_SEARCH_FAILED,
URG_LIBRARY_NOT_FOUND,
URG_FUNCTION_NOT_FOUND,
CURLE_LDAP_CANNOT_BIND,
CURLE_LDAP_SEARCH_FAILED,
CURLE_LIBRARY_NOT_FOUND,
CURLE_FUNCTION_NOT_FOUND,
CURLE_ABORTED_BY_CALLBACK,
URL_LAST
} UrgError;
CURLE_BAD_FUNCTION_ARGUMENT,
CURLE_BAD_CALLING_ORDER,
CURL_LAST
} CURLcode;
/* This is just to make older programs not break: */
#define URG_FTP_PARTIAL_FILE URG_PARTIAL_FILE
#define URGTAG_DONE -1
#define URGTAG_LAST -1
#define URGTAG_END -1
#define CURLE_FTP_PARTIAL_FILE CURLE_PARTIAL_FILE
#define URLGET_ERROR_SIZE 256
@ -201,19 +158,19 @@ typedef enum {
#define URL_MAX_LENGTH 4096
#define URL_MAX_LENGTH_TXT "4095"
/* name is uppercase URGTAG_<name>,
type is one of the defined URGTYPE_<type>
/* name is uppercase CURLOPT_<name>,
type is one of the defined CURLOPTTYPE_<type>
number is unique identifier */
#define T(name,type,number) URGTAG_ ## name = URGTYPE_ ## type + number
#define T(name,type,number) CURLOPT_ ## name = CURLOPTTYPE_ ## type + number
/* long may be 32 or 64 bits, but we should never depend on anything else
but 32 */
#define URGTYPE_LONG 0
#define URGTYPE_OBJECTPOINT 10000
#define URGTYPE_FUNCTIONPOINT 20000
#define CURLOPTTYPE_LONG 0
#define CURLOPTTYPE_OBJECTPOINT 10000
#define CURLOPTTYPE_FUNCTIONPOINT 20000
typedef enum {
URGTAG_NOTHING, /* the first unused */
T(NOTHING, LONG, 0), /********* the first one is unused ************/
/* This is the FILE * the regular output should be written to. */
T(FILE, OBJECTPOINT, 1),
@ -222,28 +179,29 @@ typedef enum {
T(URL, OBJECTPOINT, 2),
/* Port number to connect to, if other than default. Specify the CONF_PORT
flag in the URGTAG_FLAGS to activate this */
flag in the CURLOPT_FLAGS to activate this */
T(PORT, LONG, 3),
/* Name of proxy to use. Specify the CONF_PROXY flag in the URGTAG_FLAGS to
/* Name of proxy to use. Specify the CONF_PROXY flag in the CURLOPT_FLAGS to
activate this */
T(PROXY, OBJECTPOINT, 4),
/* Name and password to use when fetching. Specify the CONF_USERPWD flag in
the URGTAG_FLAGS to activate this */
the CURLOPT_FLAGS to activate this */
T(USERPWD, OBJECTPOINT, 5),
/* Name and password to use with Proxy. Specify the CONF_PROXYUSERPWD
flag in the URGTAG_FLAGS to activate this */
flag in the CURLOPT_FLAGS to activate this */
T(PROXYUSERPWD, OBJECTPOINT, 6),
/* Range to get, specified as an ASCII string. Specify the CONF_RANGE flag
in the URGTAG_FLAGS to activate this */
in the CURLOPT_FLAGS to activate this */
T(RANGE, OBJECTPOINT, 7),
#if 0
/* Configuration flags */
T(FLAGS, LONG, 8),
#endif
/* Specified file stream to upload from (use as input): */
T(INFILE, OBJECTPOINT, 9),
@ -262,7 +220,7 @@ typedef enum {
/* Time-out the read operation after this amount of seconds */
T(TIMEOUT, LONG, 13),
/* If the URGTAG_INFILE is used, this can be used to inform urlget about how
/* If the CURLOPT_INFILE is used, this can be used to inform urlget about how
large the file being sent really is. That allows better error checking
and better verifies that the upload was succcessful. -1 means unknown
size. */
@ -364,8 +322,25 @@ typedef enum {
as described elsewhere. */
T(WRITEINFO, OBJECTPOINT, 40),
URGTAG_LASTENTRY /* the last unusued */
} UrgTag;
/* Previous FLAG bits */
T(VERBOSE, LONG, 41), /* talk a lot */
T(HEADER, LONG, 42), /* throw the header out too */
T(NOPROGRESS, LONG, 43), /* shut off the progress meter */
T(NOBODY, LONG, 44), /* use HEAD to get http document */
T(FAILONERROR, LONG, 45), /* no output on http error codes >= 300 */
T(UPLOAD, LONG, 46), /* this is an upload */
T(POST, LONG, 47), /* HTTP POST method */
T(FTPLISTONLY, LONG, 48), /* Use NLST when listing ftp dir */
T(FTPAPPEND, LONG, 50), /* Append instead of overwrite on upload! */
T(NETRC, LONG, 51), /* read user+password from .netrc */
T(FOLLOWLOCATION, LONG, 52), /* use Location: Luke! */
T(FTPASCII, LONG, 53), /* use TYPE A for transfer */
T(PUT, LONG, 54), /* PUT the input file */
T(MUTE, LONG, 55), /* force NOPROGRESS */
CURLOPT_LASTENTRY /* the last unusued */
} CURLoption;
#define CURL_PROGRESS_STATS 0 /* default progress display */
#define CURL_PROGRESS_BAR 1
@ -388,23 +363,11 @@ typedef char bool;
#endif /* (rabe) */
#endif
/**********************************************************************
*
* >>> urlget() interface #defines changed in v5! <<<
*
* You enter parameters as tags. Tags are specified as a pair of parameters.
* The first parameter in a pair is the tag identifier, telling urlget what
* kind of tag it is, and the second is the data. The tags may come in any
* order but MUST ALWAYS BE TERMINATED with an ending URGTAG_DONE (which
* needs no data).
*
* _Very_ simple example:
*
* curl_urlget(URGTAG_URL, "http://www.fts.frontec.se/~dast/", URGTAG_DONE);
*
***********************************************************************/
#if 0
/* At last, I stand here in front of you today and can officially proclaim
this function prototype as history... 17th of May, 2000 */
UrgError curl_urlget(UrgTag, ...);
#endif
/* external form function */
int curl_FormParse(char *string,
@ -418,9 +381,10 @@ char *curl_GetEnv(char *variable);
char *curl_version(void);
/* This is the version number */
#define LIBCURL_VERSION "6.5.2"
#define LIBCURL_VERSION "7.0beta"
#define LIBCURL_VERSION_NUM 0x070000
/* linked-list structure for QUOTE */
/* linked-list structure for the CURLOPT_QUOTE option */
struct curl_slist {
char *data;
struct curl_slist *next;
@ -429,4 +393,192 @@ struct curl_slist {
struct curl_slist *curl_slist_append(struct curl_slist *list, char *data);
void curl_slist_free_all(struct curl_slist *list);
#endif /* __URLGET_H */
/*
* NAME curl_init()
*
* DESCRIPTION
*
* Inits libcurl globally. This must be used before any libcurl calls can
* be used. This may install global plug-ins or whatever. (This does not
* do winsock inits in Windows.)
*
* EXAMPLE
*
* curl_init();
*
*/
CURLcode curl_init(void);
/*
* NAME curl_init()
*
* DESCRIPTION
*
* Frees libcurl globally. This must be used after all libcurl calls have
* been used. This may remove global plug-ins or whatever. (This does not
* do winsock cleanups in Windows.)
*
* EXAMPLE
*
* curl_free(curl);
*
*/
void curl_free(void);
/*
* NAME curl_open()
*
* DESCRIPTION
*
* Opens a general curl session. It does not try to connect or do anything
* on the network because of this call. The specified URL is only required
* to enable curl to figure out what protocol to "activate".
*
* A session should be looked upon as a series of requests to a single host. A
* session interacts with one host only, using one single protocol.
*
* The URL is not required. If set to "" or NULL, it can still be set later
* using the curl_setopt() function. If the curl_connect() function is called
* without the URL being known, it will return error.
*
* EXAMPLE
*
* CURLcode result;
* CURL *curl;
* result = curl_open(&curl, "http://curl.haxx.nu/libcurl/");
* if(result != CURL_OK) {
* return result;
* }
* */
CURLcode curl_open(CURL **curl, char *url);
/*
* NAME curl_setopt()
*
* DESCRIPTION
*
* Sets a particular option to the specified value.
*
* EXAMPLE
*
* CURL curl;
* curl_setopt(curl, CURL_HTTP_FOLLOW_LOCATION, TRUE);
*/
CURLcode curl_setopt(CURL *handle, CURLoption option, ...);
/*
* NAME curl_close()
*
* DESCRIPTION
*
* Closes a session previously opened with curl_open()
*
* EXAMPLE
*
* CURL *curl;
* CURLcode result;
*
* result = curl_close(curl);
*/
CURLcode curl_close(CURL *curl); /* the opposite of curl_open() */
CURLcode curl_read(CURLconnect *c_conn, char *buf, size_t buffersize,
size_t *n);
CURLcode curl_write(CURLconnect *c_conn, char *buf, size_t amount,
size_t *n);
/*
* NAME curl_connect()
*
* DESCRIPTION
*
* Connects to the peer server and performs the initial setup. This function
* writes a connect handle to its second argument that is a unique handle for
* this connect. This allows multiple connects from the same handle returned
* by curl_open().
*
* EXAMPLE
*
* CURLCode result;
* CURL curl;
* CURLconnect connect;
* result = curl_connect(curl, &connect);
*/
CURLcode curl_connect(CURL *curl, CURLconnect **in_connect);
/*
* NAME curl_do()
*
* DESCRIPTION
*
* (Note: May 3rd 2000: this function does not currently allow you to
* specify a document, it will use the one set previously)
*
* This function asks for the particular document, file or resource that
* resides on the server we have connected to. You may specify a full URL,
* just an absolute path or even a relative path. That means, if you're just
* getting one file from the remote site, you can use the same URL as input
* for both curl_open() as well as for this function.
*
* In the even there is a host name, port number, user name or password parts
* in the URL, you can use the 'flags' argument to ignore them completely, or
* at your choice, make the function fail if you're trying to get a URL from
* different host than you connected to with curl_connect().
*
* You can only get one document at a time using the same connection. When one
* document has been received you can although request again.
*
* When the transfer is done, curl_done() MUST be called.
*
* EXAMPLE
*
* CURLCode result;
* char *url;
* CURLconnect *connect;
* result = curl_do(connect, url, CURL_DO_NONE); */
CURLcode curl_do(CURLconnect *in_conn);
/*
* NAME curl_done()
*
* DESCRIPTION
*
* When the transfer following a curl_do() call is done, this function should
* get called.
*
* EXAMPLE
*
* CURLCode result;
* char *url;
* CURLconnect *connect;
* result = curl_done(connect); */
CURLcode curl_done(CURLconnect *connect);
/*
* NAME curl_disconnect()
*
* DESCRIPTION
*
* Disconnects from the peer server and performs connection cleanup.
*
* EXAMPLE
*
* CURLcode result;
* CURLconnect *connect;
* result = curl_disconnect(connect); */
CURLcode curl_disconnect(CURLconnect *connect);
/*
* NAME curl_getdate()
*
* DESCRIPTION
*
* Returns the time, in seconds since 1 Jan 1970 of the time string given in
* the first argument. The time argument in the second parameter is for cases
* where the specified time is relative now, like 'two weeks' or 'tomorrow'
* etc.
*/
time_t curl_getdate(const char *p, const time_t *now);
#endif /* __CURL_CURL_H */

View File

@ -0,0 +1,46 @@
#ifndef __CURL_EASY_H
#define __CURL_EASY_H
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
* License for the specific language governing rights and limitations
* under the License.
*
* The Original Code is Curl.
*
* The Initial Developer of the Original Code is Daniel Stenberg.
*
* Portions created by the Initial Developer are Copyright (C) 1998.
* All Rights Reserved.
*
* ------------------------------------------------------------
* Main author:
* - Daniel Stenberg <Daniel.Stenberg@haxx.nu>
*
* http://curl.haxx.nu
*
* $Source$
* $Revision$
* $Date$
* $Author$
* $State$
* $Locker$
*
* ------------------------------------------------------------
****************************************************************************/
CURL *curl_easy_init(void);
CURLcode curl_easy_setopt(CURL *curl, CURLoption option, ...);
CURLcode curl_easy_perform(CURL *curl);
void curl_easy_cleanup(CURL *curl);
#endif

View File

@ -0,0 +1,45 @@
#ifndef __CURL_TYPES_H
#define __CURL_TYPES_H
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
* License for the specific language governing rights and limitations
* under the License.
*
* The Original Code is Curl.
*
* The Initial Developer of the Original Code is Daniel Stenberg.
*
* Portions created by the Initial Developer are Copyright (C) 1998.
* All Rights Reserved.
*
* ------------------------------------------------------------
* Main author:
* - Daniel Stenberg <Daniel.Stenberg@haxx.nu>
*
* http://curl.haxx.nu
*
* $Source$
* $Revision$
* $Date$
* $Author$
* $State$
* $Locker$
*
* ------------------------------------------------------------
****************************************************************************/
typedef void CURL;
typedef void CURLconnect;
#endif /* __CURL_TYPES_H */

View File

@ -7,7 +7,7 @@ AUTOMAKE_OPTIONS = foreign no-dependencies
noinst_LIBRARIES = libcurl.a
# Some flags needed when trying to cause warnings ;-)
#CFLAGS = -g -Wall -pedantic
CFLAGS = -g -Wall #-pedantic
INCLUDES = -I$(top_srcdir)/include
@ -23,7 +23,7 @@ download.c getdate.h ldap.c ssluse.c version.c \
download.h getenv.c ldap.h ssluse.h \
escape.c getenv.h mprintf.c telnet.c \
escape.h getpass.c netrc.c telnet.h \
writeout.c writeout.h
writeout.c writeout.h highlevel.c strequal.c strequal.h easy.c
# Say $(srcdir), so GNU make does not report an ambiguity with the .y.c rule.
$(srcdir)/getdate.c: getdate.y

View File

@ -77,11 +77,11 @@ AUTOMAKE_OPTIONS = foreign no-dependencies
noinst_LIBRARIES = libcurl.a
# Some flags needed when trying to cause warnings ;-)
#CFLAGS = -g -Wall -pedantic
CFLAGS = -g -Wall #-pedantic
INCLUDES = -I$(top_srcdir)/include
libcurl_a_SOURCES = arpa_telnet.h file.c getpass.h netrc.h timeval.c base64.c file.h hostip.c progress.c timeval.h base64.h formdata.c hostip.h progress.h cookie.c formdata.h http.c sendf.c cookie.h ftp.c http.h sendf.h url.c dict.c ftp.h if2ip.c speedcheck.c url.h dict.h getdate.c if2ip.h speedcheck.h urldata.h download.c getdate.h ldap.c ssluse.c version.c download.h getenv.c ldap.h ssluse.h escape.c getenv.h mprintf.c telnet.c escape.h getpass.c netrc.c telnet.h writeout.c writeout.h
libcurl_a_SOURCES = arpa_telnet.h file.c getpass.h netrc.h timeval.c base64.c file.h hostip.c progress.c timeval.h base64.h formdata.c hostip.h progress.h cookie.c formdata.h http.c sendf.c cookie.h ftp.c http.h sendf.h url.c dict.c ftp.h if2ip.c speedcheck.c url.h dict.h getdate.c if2ip.h speedcheck.h urldata.h download.c getdate.h ldap.c ssluse.c version.c download.h getenv.c ldap.h ssluse.h escape.c getenv.h mprintf.c telnet.c escape.h getpass.c netrc.c telnet.h writeout.c writeout.h highlevel.c strequal.c strequal.h easy.c
mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs
CONFIG_HEADER = ../config.h ../src/config.h
@ -97,9 +97,9 @@ libcurl_a_LIBADD =
libcurl_a_OBJECTS = file.o timeval.o base64.o hostip.o progress.o \
formdata.o cookie.o http.o sendf.o ftp.o url.o dict.o if2ip.o \
speedcheck.o getdate.o download.o ldap.o ssluse.o version.o getenv.o \
escape.o mprintf.o telnet.o getpass.o netrc.o writeout.o
escape.o mprintf.o telnet.o getpass.o netrc.o writeout.o highlevel.o \
strequal.o easy.o
AR = ar
CFLAGS = @CFLAGS@
COMPILE = $(CC) $(DEFS) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
CCLD = $(CC)
LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(LDFLAGS) -o $@

View File

@ -62,6 +62,7 @@ Example set of cookies:
#include "cookie.h"
#include "setup.h"
#include "getdate.h"
#include "strequal.h"
/****************************************************************************
*
@ -131,7 +132,7 @@ struct Cookie *cookie_add(struct CookieInfo *c,
}
else if(strequal("expires", name)) {
co->expirestr=strdup(what);
co->expires = get_date(what, &now);
co->expires = curl_getdate(what, &now);
}
else if(!co->name) {
co->name = strdup(name);
@ -173,9 +174,11 @@ struct Cookie *cookie_add(struct CookieInfo *c,
return NULL;
}
/* strip off the possible end-of-line characters */
if(ptr=strchr(lineptr, '\r'))
ptr=strchr(lineptr, '\r');
if(ptr)
*ptr=0; /* clear it */
if(ptr=strchr(lineptr, '\n'))
ptr=strchr(lineptr, '\n');
if(ptr)
*ptr=0; /* clear it */
firstptr=strtok(lineptr, "\t"); /* first tokenize it on the TAB */

View File

@ -92,12 +92,17 @@
#include "sendf.h"
#include "progress.h"
#include "strequal.h"
#define _MPRINTF_REPLACE /* use our functions only */
#include <curl/mprintf.h>
CURLcode dict_done(struct connectdata *conn)
{
return CURLE_OK;
}
UrgError dict(struct UrlData *data, char *path, long *bytecount)
CURLcode dict(struct connectdata *conn)
{
int nth;
char *word;
@ -106,9 +111,13 @@ UrgError dict(struct UrlData *data, char *path, long *bytecount)
char *strategy = NULL;
char *nthdef = NULL; /* This is not part of the protocol, but required
by RFC 2229 */
UrgError result=URG_OK;
if(data->conf & CONF_USERPWD) {
CURLcode result=CURLE_OK;
struct UrlData *data=conn->data;
char *path = conn->path;
long *bytecount = &conn->bytecount;
if(data->bits.user_passwd) {
/* AUTH is missing */
}
@ -162,7 +171,7 @@ UrgError dict(struct UrlData *data, char *path, long *bytecount)
word
);
result = Transfer(data, data->firstsocket, -1, FALSE, bytecount,
result = Transfer(conn, data->firstsocket, -1, FALSE, bytecount,
-1, NULL); /* no upload */
if(result)
@ -210,7 +219,7 @@ UrgError dict(struct UrlData *data, char *path, long *bytecount)
word
);
result = Transfer(data, data->firstsocket, -1, FALSE, bytecount,
result = Transfer(conn, data->firstsocket, -1, FALSE, bytecount,
-1, NULL); /* no upload */
if(result)
@ -234,7 +243,7 @@ UrgError dict(struct UrlData *data, char *path, long *bytecount)
"QUIT\n",
ppath);
result = Transfer(data, data->firstsocket, -1, FALSE, bytecount,
result = Transfer(conn, data->firstsocket, -1, FALSE, bytecount,
-1, NULL);
if(result)
@ -243,10 +252,5 @@ UrgError dict(struct UrlData *data, char *path, long *bytecount)
}
}
#if 0
ProgressEnd(data);
#endif
pgrsDone(data);
return URG_OK;
return CURLE_OK;
}

View File

@ -40,6 +40,7 @@
*
* ------------------------------------------------------------
****************************************************************************/
UrgError dict(struct UrlData *data, char *path, long *bytecountp);
CURLcode dict(struct connectdata *conn);
CURLcode dict_done(struct connectdata *conn);
#endif

View File

@ -78,20 +78,16 @@
#include "speedcheck.h"
#include "sendf.h"
#ifdef USE_ZLIB
#include <zlib.h>
#endif
#define MAX(x,y) ((x)>(y)?(x):(y))
#include <curl/types.h>
/* --- download and upload a stream from/to a socket --- */
/* Parts of this function was brought to us by the friendly Mark Butler
<butlerm@xmission.com>. */
UrgError
Transfer (struct UrlData *data,
/* READ stuff */
CURLcode
Transfer(CURLconnect *c_conn,
/* READ stuff */
int sockfd, /* socket to read from or -1 */
int size, /* -1 if unknown at this point */
bool getheader, /* TRUE if header parsing is wanted */
@ -101,492 +97,21 @@ Transfer (struct UrlData *data,
int writesockfd, /* socket to write to, it may very well be
the same we read from. -1 disables */
long *writebytecountp /* return number of bytes written or NULL */
)
)
{
char *buf = data->buffer;
size_t nread;
int bytecount = 0; /* number of bytes read */
int writebytecount = 0; /* number of bytes written */
long contentlength=0; /* size of incoming data */
struct timeval start = tvnow();
struct timeval now = start;
bool header = TRUE; /* incoming data has HTTP header */
int headerline = 0; /* counts header lines to better track the
first one */
struct connectdata *conn = (struct connectdata *)c_conn;
if(!conn)
return CURLE_BAD_FUNCTION_ARGUMENT;
char *hbufp; /* points at *end* of header line */
int hbuflen = 0;
char *str; /* within buf */
char *str_start; /* within buf */
char *end_ptr; /* within buf */
char *p; /* within headerbuff */
bool content_range = FALSE; /* set TRUE if Content-Range: was found */
int offset = 0; /* possible resume offset read from the
Content-Range: header */
int code = 0; /* error code from the 'HTTP/1.? XXX' line */
/* now copy all input parameters */
conn->sockfd = sockfd;
conn->size = size;
conn->getheader = getheader;
conn->bytecountp = bytecountp;
conn->writesockfd = writesockfd;
conn->writebytecountp = writebytecountp;
/* for the low speed checks: */
UrgError urg;
time_t timeofdoc=0;
long bodywrites=0;
return CURLE_OK;
char newurl[URL_MAX_LENGTH]; /* buffer for Location: URL */
/* the highest fd we use + 1 */
int maxfd = (sockfd>writesockfd?sockfd:writesockfd)+1;
hbufp = data->headerbuff;
myalarm (0); /* switch off the alarm-style timeout */
now = tvnow();
start = now;
#define KEEP_READ 1
#define KEEP_WRITE 2
pgrsTime(data, TIMER_PRETRANSFER);
if (!getheader) {
header = FALSE;
if(size > 0)
pgrsSetDownloadSize(data, size);
}
{
fd_set readfd;
fd_set writefd;
fd_set rkeepfd;
fd_set wkeepfd;
struct timeval interval;
int keepon=0;
/* timeout every X second
- makes a better progressmeter (i.e even when no data is read, the
meter can be updated and reflect reality)
- allows removal of the alarm() crap
- variable timeout is easier
*/
FD_ZERO (&readfd); /* clear it */
if(sockfd != -1) {
FD_SET (sockfd, &readfd); /* read socket */
keepon |= KEEP_READ;
}
FD_ZERO (&writefd); /* clear it */
if(writesockfd != -1) {
FD_SET (writesockfd, &writefd); /* write socket */
keepon |= KEEP_WRITE;
}
/* get these in backup variables to be able to restore them on each lap in
the select() loop */
rkeepfd = readfd;
wkeepfd = writefd;
while (keepon) {
readfd = rkeepfd; /* set those every lap in the loop */
writefd = wkeepfd;
interval.tv_sec = 1;
interval.tv_usec = 0;
switch (select (maxfd, &readfd, &writefd, NULL, &interval)) {
case -1: /* select() error, stop reading */
#ifdef EINTR
/* The EINTR is not serious, and it seems you might get this more
ofen when using the lib in a multi-threaded environment! */
if(errno == EINTR)
;
else
#endif
keepon = 0; /* no more read or write */
continue;
case 0: /* timeout */
break;
default:
if((keepon & KEEP_READ) && FD_ISSET(sockfd, &readfd)) {
/* read! */
#ifdef USE_SSLEAY
if (data->use_ssl) {
nread = SSL_read (data->ssl, buf, BUFSIZE - 1);
}
else {
#endif
nread = sread (sockfd, buf, BUFSIZE - 1);
#ifdef USE_SSLEAY
}
#endif /* USE_SSLEAY */
/* NULL terminate, allowing string ops to be used */
if (0 < (signed int) nread)
buf[nread] = 0;
/* if we receive 0 or less here, the server closed the connection and
we bail out from this! */
else if (0 >= (signed int) nread) {
keepon &= ~KEEP_READ;
break;
}
str = buf; /* Default buffer to use when we write the
buffer, it may be changed in the flow below
before the actual storing is done. */
/* Since this is a two-state thing, we check if we are parsing
headers at the moment or not. */
if (header) {
/* we are in parse-the-header-mode */
/* header line within buffer loop */
do {
int hbufp_index;
str_start = str; /* str_start is start of line within buf */
end_ptr = strchr (str_start, '\n');
if (!end_ptr) {
/* no more complete header lines within buffer */
/* copy what is remaining into headerbuff */
int str_length = (int)strlen(str);
if (hbuflen + (int)str_length >= data->headersize) {
char *newbuff;
long newsize=MAX((hbuflen+str_length)*3/2,
data->headersize*2);
hbufp_index = hbufp - data->headerbuff;
newbuff = (char *)realloc(data->headerbuff, newsize);
if(!newbuff) {
failf (data, "Failed to alloc memory for big header!");
return URG_READ_ERROR;
}
data->headersize=newsize;
data->headerbuff = newbuff;
hbufp = data->headerbuff + hbufp_index;
}
strcpy (hbufp, str);
hbufp += strlen (str);
hbuflen += strlen (str);
break; /* read more and try again */
}
str = end_ptr + 1; /* move just past new line */
if (hbuflen + (str - str_start) >= data->headersize) {
char *newbuff;
long newsize=MAX((hbuflen+(str-str_start))*3/2,
data->headersize*2);
hbufp_index = hbufp - data->headerbuff;
newbuff = (char *)realloc(data->headerbuff, newsize);
if(!newbuff) {
failf (data, "Failed to alloc memory for big header!");
return URG_READ_ERROR;
}
data->headersize= newsize;
data->headerbuff = newbuff;
hbufp = data->headerbuff + hbufp_index;
}
/* copy to end of line */
strncpy (hbufp, str_start, str - str_start);
hbufp += str - str_start;
hbuflen += str - str_start;
*hbufp = 0;
p = data->headerbuff;
/* we now have a full line that p points to */
if (('\n' == *p) || ('\r' == *p)) {
/* Zero-length line means end of header! */
if (-1 != size) /* if known */
size += bytecount; /* we append the already read size */
if ('\r' == *p)
p++; /* pass the \r byte */
if ('\n' == *p)
p++; /* pass the \n byte */
pgrsSetDownloadSize(data, size);
header = FALSE; /* no more header to parse! */
/* now, only output this if the header AND body are requested:
*/
if ((data->conf & (CONF_HEADER | CONF_NOBODY)) ==
CONF_HEADER) {
if((p - data->headerbuff) !=
data->fwrite (data->headerbuff, 1,
p - data->headerbuff, data->out)) {
failf (data, "Failed writing output");
return URG_WRITE_ERROR;
}
}
if(data->writeheader) {
/* obviously, the header is requested to be written to
this file: */
if((p - data->headerbuff) !=
data->fwrite (data->headerbuff, 1, p - data->headerbuff,
data->writeheader)) {
failf (data, "Failed writing output");
return URG_WRITE_ERROR;
}
}
break; /* exit header line loop */
}
if (!headerline++) {
/* This is the first header, it MUST be the error code line
or else we consiser this to be the body right away! */
if (sscanf (p, " HTTP/1.%*c %3d", &code)) {
/* 404 -> URL not found! */
if (
( ((data->conf & CONF_FOLLOWLOCATION) && (code >= 400))
||
!(data->conf & CONF_FOLLOWLOCATION) && (code >= 300))
&& (data->conf & CONF_FAILONERROR)) {
/* If we have been told to fail hard on HTTP-errors,
here is the check for that: */
/* serious error, go home! */
failf (data, "The requested file was not found");
return URG_HTTP_NOT_FOUND;
}
data->progress.httpcode = code;
}
else {
header = FALSE; /* this is not a header line */
break;
}
}
/* check for Content-Length: header lines to get size */
if (strnequal("Content-Length", p, 14) &&
sscanf (p+14, ": %ld", &contentlength))
size = contentlength;
else if (strnequal("Content-Range", p, 13) &&
sscanf (p+13, ": bytes %d-", &offset)) {
if (data->resume_from == offset) {
/* we asked for a resume and we got it */
content_range = TRUE;
}
}
else if(data->cookies &&
strnequal("Set-Cookie: ", p, 11)) {
cookie_add(data->cookies, TRUE, &p[12]);
}
else if(strnequal("Last-Modified:", p,
strlen("Last-Modified:")) &&
data->timecondition) {
time_t secs=time(NULL);
timeofdoc = get_date(p+strlen("Last-Modified:"), &secs);
}
else if ((code >= 300 && code < 400) &&
(data->conf & CONF_FOLLOWLOCATION) &&
strnequal("Location", p, 8) &&
sscanf (p+8, ": %" URL_MAX_LENGTH_TXT "s", newurl)) {
/* this is the URL that the server advices us to get
instead */
data->newurl = strdup (newurl);
}
if (data->conf & CONF_HEADER) {
if(hbuflen != data->fwrite (p, 1, hbuflen, data->out)) {
failf (data, "Failed writing output");
return URG_WRITE_ERROR;
}
}
if(data->writeheader) {
/* the header is requested to be written to this file */
if(hbuflen != data->fwrite (p, 1, hbuflen,
data->writeheader)) {
failf (data, "Failed writing output");
return URG_WRITE_ERROR;
}
}
/* reset hbufp pointer && hbuflen */
hbufp = data->headerbuff;
hbuflen = 0;
}
while (*str); /* header line within buffer */
/* We might have reached the end of the header part here, but
there might be a non-header part left in the end of the read
buffer. */
if (!header) {
/* the next token and forward is not part of
the header! */
/* we subtract the remaining header size from the buffer */
nread -= (str - buf);
}
} /* end if header mode */
/* This is not an 'else if' since it may be a rest from the header
parsing, where the beginning of the buffer is headers and the end
is non-headers. */
if (str && !header && (nread > 0)) {
if(0 == bodywrites) {
/* These checks are only made the first time we are about to
write a chunk of the body */
if(data->conf&CONF_HTTP) {
/* HTTP-only checks */
if (data->resume_from && !content_range ) {
/* we wanted to resume a download, although the server
doesn't seem to support this */
failf (data, "HTTP server doesn't seem to support byte ranges. Cannot resume.");
return URG_HTTP_RANGE_ERROR;
}
else if (data->newurl) {
/* abort after the headers if "follow Location" is set */
infof (data, "Follow to new URL: %s\n", data->newurl);
return URG_OK;
}
else if(data->timecondition && !data->range) {
/* A time condition has been set AND no ranges have been
requested. This seems to be what chapter 13.3.4 of
RFC 2616 defines to be the correct action for a
HTTP/1.1 client */
if((timeofdoc > 0) && (data->timevalue > 0)) {
switch(data->timecondition) {
case TIMECOND_IFMODSINCE:
default:
if(timeofdoc < data->timevalue) {
infof(data,
"The requested document is not new enough");
return URG_OK;
}
break;
case TIMECOND_IFUNMODSINCE:
if(timeofdoc > data->timevalue) {
infof(data,
"The requested document is not old enough");
return URG_OK;