1999-12-02 02:42:23 -05:00
|
|
|
|
/* URL handling.
|
2001-04-14 00:11:35 -04:00
|
|
|
|
Copyright (C) 1995, 1996, 1997, 2000, 2001 Free Software Foundation, Inc.
|
1999-12-02 02:42:23 -05:00
|
|
|
|
|
2001-05-27 15:35:15 -04:00
|
|
|
|
This file is part of GNU Wget.
|
1999-12-02 02:42:23 -05:00
|
|
|
|
|
2001-05-27 15:35:15 -04:00
|
|
|
|
GNU Wget is free software; you can redistribute it and/or modify
|
1999-12-02 02:42:23 -05:00
|
|
|
|
it under the terms of the GNU General Public License as published by
|
2000-11-01 13:31:53 -05:00
|
|
|
|
the Free Software Foundation; either version 2 of the License, or (at
|
|
|
|
|
your option) any later version.
|
1999-12-02 02:42:23 -05:00
|
|
|
|
|
2001-05-27 15:35:15 -04:00
|
|
|
|
GNU Wget is distributed in the hope that it will be useful,
|
1999-12-02 02:42:23 -05:00
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
2001-05-27 15:35:15 -04:00
|
|
|
|
along with Wget; if not, write to the Free Software
|
1999-12-02 02:42:23 -05:00
|
|
|
|
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
|
|
|
|
|
|
|
|
|
|
#include <config.h>
|
|
|
|
|
|
|
|
|
|
#include <stdio.h>
|
|
|
|
|
#include <stdlib.h>
|
|
|
|
|
#ifdef HAVE_STRING_H
|
|
|
|
|
# include <string.h>
|
|
|
|
|
#else
|
|
|
|
|
# include <strings.h>
|
|
|
|
|
#endif
|
|
|
|
|
#include <sys/types.h>
|
|
|
|
|
#ifdef HAVE_UNISTD_H
|
|
|
|
|
# include <unistd.h>
|
|
|
|
|
#endif
|
|
|
|
|
#include <errno.h>
|
|
|
|
|
#include <assert.h>
|
|
|
|
|
|
|
|
|
|
#include "wget.h"
|
|
|
|
|
#include "utils.h"
|
|
|
|
|
#include "url.h"
|
|
|
|
|
#include "host.h"
|
|
|
|
|
|
|
|
|
|
#ifndef errno
|
|
|
|
|
extern int errno;
|
|
|
|
|
#endif
|
|
|
|
|
|
2001-04-14 00:11:35 -04:00
|
|
|
|
/* Is X "."? */
|
1999-12-02 02:42:23 -05:00
|
|
|
|
#define DOTP(x) ((*(x) == '.') && (!*(x + 1)))
|
2001-04-14 00:11:35 -04:00
|
|
|
|
/* Is X ".."? */
|
1999-12-02 02:42:23 -05:00
|
|
|
|
#define DDOTP(x) ((*(x) == '.') && (*(x + 1) == '.') && (!*(x + 2)))
|
|
|
|
|
|
2000-11-01 13:31:53 -05:00
|
|
|
|
static int urlpath_length PARAMS ((const char *));
|
|
|
|
|
|
2001-11-18 19:12:05 -05:00
|
|
|
|
struct scheme_data
|
1999-12-02 02:42:23 -05:00
|
|
|
|
{
|
2001-11-18 19:12:05 -05:00
|
|
|
|
enum url_scheme scheme;
|
|
|
|
|
char *leading_string;
|
|
|
|
|
int default_port;
|
1999-12-02 02:42:23 -05:00
|
|
|
|
};
|
|
|
|
|
|
2001-11-18 19:12:05 -05:00
|
|
|
|
/* Supported schemes: */
|
|
|
|
|
static struct scheme_data supported_schemes[] =
|
1999-12-02 02:42:23 -05:00
|
|
|
|
{
|
2001-11-18 19:12:05 -05:00
|
|
|
|
{ SCHEME_HTTP, "http://", DEFAULT_HTTP_PORT },
|
2000-12-05 18:09:41 -05:00
|
|
|
|
#ifdef HAVE_SSL
|
2001-11-18 19:12:05 -05:00
|
|
|
|
{ SCHEME_HTTPS, "https://", DEFAULT_HTTPS_PORT },
|
2000-12-05 18:09:41 -05:00
|
|
|
|
#endif
|
2001-11-18 19:12:05 -05:00
|
|
|
|
{ SCHEME_FTP, "ftp://", DEFAULT_FTP_PORT }
|
1999-12-02 02:42:23 -05:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static void parse_dir PARAMS ((const char *, char **, char **));
|
|
|
|
|
static uerr_t parse_uname PARAMS ((const char *, char **, char **));
|
|
|
|
|
static char *construct_relative PARAMS ((const char *, const char *));
|
|
|
|
|
static char process_ftp_type PARAMS ((char *));
|
|
|
|
|
|
|
|
|
|
|
2001-04-24 20:20:30 -04:00
|
|
|
|
/* Support for encoding and decoding of URL strings. We determine
|
|
|
|
|
whether a character is unsafe through static table lookup. This
|
|
|
|
|
code assumes ASCII character set and 8-bit chars. */
|
|
|
|
|
|
|
|
|
|
enum {
|
|
|
|
|
urlchr_reserved = 1,
|
|
|
|
|
urlchr_unsafe = 2
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
#define R urlchr_reserved
|
|
|
|
|
#define U urlchr_unsafe
|
|
|
|
|
#define RU R|U
|
|
|
|
|
|
|
|
|
|
#define urlchr_test(c, mask) (urlchr_table[(unsigned char)(c)] & (mask))
|
|
|
|
|
|
|
|
|
|
/* rfc1738 reserved chars. We don't use this yet; preservation of
|
|
|
|
|
reserved chars will be implemented when I integrate the new
|
|
|
|
|
`reencode_string' function. */
|
|
|
|
|
|
|
|
|
|
#define RESERVED_CHAR(c) urlchr_test(c, urlchr_reserved)
|
|
|
|
|
|
2000-11-19 15:50:10 -05:00
|
|
|
|
/* Unsafe chars:
|
|
|
|
|
- anything <= 32;
|
|
|
|
|
- stuff from rfc1738 ("<>\"#%{}|\\^~[]`");
|
2001-04-24 20:20:30 -04:00
|
|
|
|
- '@' and ':'; needed for encoding URL username and password.
|
|
|
|
|
- anything >= 127. */
|
|
|
|
|
|
|
|
|
|
#define UNSAFE_CHAR(c) urlchr_test(c, urlchr_unsafe)
|
|
|
|
|
|
|
|
|
|
const static unsigned char urlchr_table[256] =
|
2000-11-19 15:50:10 -05:00
|
|
|
|
{
|
2001-04-24 20:20:30 -04:00
|
|
|
|
U, U, U, U, U, U, U, U, /* NUL SOH STX ETX EOT ENQ ACK BEL */
|
|
|
|
|
U, U, U, U, U, U, U, U, /* BS HT LF VT FF CR SO SI */
|
|
|
|
|
U, U, U, U, U, U, U, U, /* DLE DC1 DC2 DC3 DC4 NAK SYN ETB */
|
|
|
|
|
U, U, U, U, U, U, U, U, /* CAN EM SUB ESC FS GS RS US */
|
|
|
|
|
U, 0, U, U, 0, U, R, 0, /* SP ! " # $ % & ' */
|
|
|
|
|
0, 0, 0, R, 0, 0, 0, R, /* ( ) * + , - . / */
|
|
|
|
|
0, 0, 0, 0, 0, 0, 0, 0, /* 0 1 2 3 4 5 6 7 */
|
|
|
|
|
0, 0, U, R, U, R, U, R, /* 8 9 : ; < = > ? */
|
|
|
|
|
RU, 0, 0, 0, 0, 0, 0, 0, /* @ A B C D E F G */
|
|
|
|
|
0, 0, 0, 0, 0, 0, 0, 0, /* H I J K L M N O */
|
|
|
|
|
0, 0, 0, 0, 0, 0, 0, 0, /* P Q R S T U V W */
|
|
|
|
|
0, 0, 0, U, U, U, U, 0, /* X Y Z [ \ ] ^ _ */
|
|
|
|
|
U, 0, 0, 0, 0, 0, 0, 0, /* ` a b c d e f g */
|
|
|
|
|
0, 0, 0, 0, 0, 0, 0, 0, /* h i j k l m n o */
|
|
|
|
|
0, 0, 0, 0, 0, 0, 0, 0, /* p q r s t u v w */
|
|
|
|
|
0, 0, 0, U, U, U, U, U, /* x y z { | } ~ DEL */
|
|
|
|
|
|
|
|
|
|
U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
|
|
|
|
|
U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
|
|
|
|
|
U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
|
|
|
|
|
U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
|
|
|
|
|
|
|
|
|
|
U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
|
|
|
|
|
U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
|
|
|
|
|
U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
|
|
|
|
|
U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U,
|
|
|
|
|
};
|
2000-11-19 15:50:10 -05:00
|
|
|
|
|
1999-12-02 02:42:23 -05:00
|
|
|
|
/* Decodes the forms %xy in a URL to the character the hexadecimal
|
|
|
|
|
code of which is xy. xy are hexadecimal digits from
|
|
|
|
|
[0123456789ABCDEF] (case-insensitive). If x or y are not
|
|
|
|
|
hex-digits or `%' precedes `\0', the sequence is inserted
|
|
|
|
|
literally. */
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
decode_string (char *s)
|
|
|
|
|
{
|
2001-04-24 20:20:30 -04:00
|
|
|
|
char *t = s; /* t - tortoise */
|
|
|
|
|
char *h = s; /* h - hare */
|
1999-12-02 02:42:23 -05:00
|
|
|
|
|
2001-04-24 20:20:30 -04:00
|
|
|
|
for (; *h; h++, t++)
|
1999-12-02 02:42:23 -05:00
|
|
|
|
{
|
2001-04-24 20:20:30 -04:00
|
|
|
|
if (*h != '%')
|
|
|
|
|
{
|
|
|
|
|
copychar:
|
|
|
|
|
*t = *h;
|
|
|
|
|
}
|
1999-12-02 02:42:23 -05:00
|
|
|
|
else
|
|
|
|
|
{
|
2001-04-24 20:20:30 -04:00
|
|
|
|
/* Do nothing if '%' is not followed by two hex digits. */
|
|
|
|
|
if (!*(h + 1) || !*(h + 2)
|
|
|
|
|
|| !(ISXDIGIT (*(h + 1)) && ISXDIGIT (*(h + 2))))
|
|
|
|
|
goto copychar;
|
|
|
|
|
*t = (XCHAR_TO_XDIGIT (*(h + 1)) << 4) + XCHAR_TO_XDIGIT (*(h + 2));
|
|
|
|
|
h += 2;
|
1999-12-02 02:42:23 -05:00
|
|
|
|
}
|
|
|
|
|
}
|
2001-04-24 20:20:30 -04:00
|
|
|
|
*t = '\0';
|
1999-12-02 02:42:23 -05:00
|
|
|
|
}
|
|
|
|
|
|
2001-04-14 00:11:35 -04:00
|
|
|
|
/* Like encode_string, but return S if there are no unsafe chars. */
|
|
|
|
|
|
|
|
|
|
static char *
|
|
|
|
|
encode_string_maybe (const char *s)
|
|
|
|
|
{
|
|
|
|
|
const char *p1;
|
|
|
|
|
char *p2, *newstr;
|
|
|
|
|
int newlen;
|
|
|
|
|
int addition = 0;
|
|
|
|
|
|
|
|
|
|
for (p1 = s; *p1; p1++)
|
|
|
|
|
if (UNSAFE_CHAR (*p1))
|
|
|
|
|
addition += 2; /* Two more characters (hex digits) */
|
|
|
|
|
|
|
|
|
|
if (!addition)
|
|
|
|
|
return (char *)s;
|
|
|
|
|
|
|
|
|
|
newlen = (p1 - s) + addition;
|
|
|
|
|
newstr = (char *)xmalloc (newlen + 1);
|
|
|
|
|
|
|
|
|
|
p1 = s;
|
|
|
|
|
p2 = newstr;
|
|
|
|
|
while (*p1)
|
|
|
|
|
{
|
|
|
|
|
if (UNSAFE_CHAR (*p1))
|
|
|
|
|
{
|
|
|
|
|
const unsigned char c = *p1++;
|
|
|
|
|
*p2++ = '%';
|
|
|
|
|
*p2++ = XDIGIT_TO_XCHAR (c >> 4);
|
|
|
|
|
*p2++ = XDIGIT_TO_XCHAR (c & 0xf);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
*p2++ = *p1++;
|
|
|
|
|
}
|
|
|
|
|
*p2 = '\0';
|
|
|
|
|
assert (p2 - newstr == newlen);
|
|
|
|
|
|
|
|
|
|
return newstr;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Encode the unsafe characters (as determined by UNSAFE_CHAR) in a
|
2000-03-31 09:04:54 -05:00
|
|
|
|
given string, returning a malloc-ed %XX encoded string. */
|
2001-04-14 00:11:35 -04:00
|
|
|
|
|
1999-12-02 02:42:23 -05:00
|
|
|
|
char *
|
|
|
|
|
encode_string (const char *s)
|
|
|
|
|
{
|
2001-04-14 00:11:35 -04:00
|
|
|
|
char *encoded = encode_string_maybe (s);
|
|
|
|
|
if (encoded != s)
|
|
|
|
|
return encoded;
|
|
|
|
|
else
|
|
|
|
|
return xstrdup (s);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
}
|
2001-04-14 00:11:35 -04:00
|
|
|
|
|
|
|
|
|
/* Encode unsafe characters in PTR to %xx. If such encoding is done,
|
|
|
|
|
the old value of PTR is freed and PTR is made to point to the newly
|
|
|
|
|
allocated storage. */
|
|
|
|
|
|
|
|
|
|
#define ENCODE(ptr) do { \
|
|
|
|
|
char *e_new = encode_string_maybe (ptr); \
|
|
|
|
|
if (e_new != ptr) \
|
|
|
|
|
{ \
|
|
|
|
|
xfree (ptr); \
|
|
|
|
|
ptr = e_new; \
|
|
|
|
|
} \
|
|
|
|
|
} while (0)
|
1999-12-02 02:42:23 -05:00
|
|
|
|
|
2001-11-18 19:12:05 -05:00
|
|
|
|
/* Returns the scheme type if the scheme is supported, or
|
|
|
|
|
SCHEME_INVALID if not. */
|
|
|
|
|
enum url_scheme
|
|
|
|
|
url_scheme (const char *url)
|
1999-12-02 02:42:23 -05:00
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
2001-11-18 19:12:05 -05:00
|
|
|
|
for (i = 0; i < ARRAY_SIZE (supported_schemes); i++)
|
|
|
|
|
if (!strncasecmp (url, supported_schemes[i].leading_string,
|
|
|
|
|
strlen (supported_schemes[i].leading_string)))
|
|
|
|
|
return supported_schemes[i].scheme;
|
|
|
|
|
return SCHEME_INVALID;
|
1999-12-02 02:42:23 -05:00
|
|
|
|
}
|
|
|
|
|
|
2001-11-18 19:12:05 -05:00
|
|
|
|
/* Return the number of characters needed to skip the scheme part of
|
|
|
|
|
the URL, e.g. `http://'. If no scheme is found, returns 0. */
|
1999-12-02 02:42:23 -05:00
|
|
|
|
int
|
2001-11-18 19:12:05 -05:00
|
|
|
|
url_skip_scheme (const char *url)
|
1999-12-02 02:42:23 -05:00
|
|
|
|
{
|
2001-11-18 01:49:09 -05:00
|
|
|
|
const char *p = url;
|
1999-12-02 02:42:23 -05:00
|
|
|
|
|
2001-11-18 19:12:05 -05:00
|
|
|
|
/* Skip the scheme name. We allow `-' and `+' because of `whois++',
|
2001-11-18 01:49:09 -05:00
|
|
|
|
etc. */
|
|
|
|
|
while (ISALNUM (*p) || *p == '-' || *p == '+')
|
|
|
|
|
++p;
|
|
|
|
|
if (*p != ':')
|
1999-12-02 02:42:23 -05:00
|
|
|
|
return 0;
|
2001-11-18 01:49:09 -05:00
|
|
|
|
/* Skip ':'. */
|
|
|
|
|
++p;
|
|
|
|
|
|
|
|
|
|
/* Skip "//" if found. */
|
|
|
|
|
if (*p == '/' && *(p + 1) == '/')
|
|
|
|
|
p += 2;
|
|
|
|
|
|
|
|
|
|
return p - url;
|
1999-12-02 02:42:23 -05:00
|
|
|
|
}
|
|
|
|
|
|
2001-11-18 19:12:05 -05:00
|
|
|
|
/* Returns 1 if the URL begins with a scheme (supported or
|
1999-12-02 02:42:23 -05:00
|
|
|
|
unsupported), 0 otherwise. */
|
2000-11-19 15:50:10 -05:00
|
|
|
|
int
|
2001-11-18 19:12:05 -05:00
|
|
|
|
url_has_scheme (const char *url)
|
1999-12-02 02:42:23 -05:00
|
|
|
|
{
|
2001-11-18 01:49:09 -05:00
|
|
|
|
const char *p = url;
|
|
|
|
|
while (ISALNUM (*p) || *p == '-' || *p == '+')
|
|
|
|
|
++p;
|
|
|
|
|
return *p == ':';
|
1999-12-02 02:42:23 -05:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Skip the username and password, if present here. The function
|
|
|
|
|
should be called *not* with the complete URL, but with the part
|
2001-11-18 19:12:05 -05:00
|
|
|
|
right after the scheme.
|
1999-12-02 02:42:23 -05:00
|
|
|
|
|
|
|
|
|
If no username and password are found, return 0. */
|
|
|
|
|
int
|
2001-11-18 19:12:05 -05:00
|
|
|
|
url_skip_uname (const char *url)
|
1999-12-02 02:42:23 -05:00
|
|
|
|
{
|
|
|
|
|
const char *p;
|
2001-02-10 19:28:22 -05:00
|
|
|
|
const char *q = NULL;
|
|
|
|
|
for (p = url ; *p && *p != '/'; p++)
|
|
|
|
|
if (*p == '@') q = p;
|
1999-12-02 02:42:23 -05:00
|
|
|
|
/* If a `@' was found before the first occurrence of `/', skip
|
|
|
|
|
it. */
|
2001-02-10 19:28:22 -05:00
|
|
|
|
if (q != NULL)
|
|
|
|
|
return q - url + 1;
|
1999-12-02 02:42:23 -05:00
|
|
|
|
else
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Allocate a new urlinfo structure, fill it with default values and
|
|
|
|
|
return a pointer to it. */
|
|
|
|
|
struct urlinfo *
|
|
|
|
|
newurl (void)
|
|
|
|
|
{
|
|
|
|
|
struct urlinfo *u;
|
|
|
|
|
|
|
|
|
|
u = (struct urlinfo *)xmalloc (sizeof (struct urlinfo));
|
|
|
|
|
memset (u, 0, sizeof (*u));
|
2001-11-18 19:12:05 -05:00
|
|
|
|
u->scheme = SCHEME_INVALID;
|
1999-12-02 02:42:23 -05:00
|
|
|
|
return u;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Perform a "deep" free of the urlinfo structure. The structure
|
|
|
|
|
should have been created with newurl, but need not have been used.
|
|
|
|
|
If free_pointer is non-0, free the pointer itself. */
|
|
|
|
|
void
|
|
|
|
|
freeurl (struct urlinfo *u, int complete)
|
|
|
|
|
{
|
|
|
|
|
assert (u != NULL);
|
|
|
|
|
FREE_MAYBE (u->url);
|
|
|
|
|
FREE_MAYBE (u->host);
|
|
|
|
|
FREE_MAYBE (u->path);
|
|
|
|
|
FREE_MAYBE (u->file);
|
|
|
|
|
FREE_MAYBE (u->dir);
|
|
|
|
|
FREE_MAYBE (u->user);
|
|
|
|
|
FREE_MAYBE (u->passwd);
|
|
|
|
|
FREE_MAYBE (u->local);
|
|
|
|
|
FREE_MAYBE (u->referer);
|
|
|
|
|
if (u->proxy)
|
|
|
|
|
freeurl (u->proxy, 1);
|
|
|
|
|
if (complete)
|
2000-11-22 11:58:28 -05:00
|
|
|
|
xfree (u);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2001-11-18 19:12:05 -05:00
|
|
|
|
enum url_parse_error {
|
|
|
|
|
PE_UNRECOGNIZED_SCHEME, PE_BAD_PORT
|
|
|
|
|
};
|
|
|
|
|
|
1999-12-02 02:42:23 -05:00
|
|
|
|
/* Extract the given URL of the form
|
|
|
|
|
(http:|ftp:)// (user (:password)?@)?hostname (:port)? (/path)?
|
|
|
|
|
1. hostname (terminated with `/' or `:')
|
2001-11-18 19:12:05 -05:00
|
|
|
|
2. port number (terminated with `/'), or chosen for the scheme
|
1999-12-02 02:42:23 -05:00
|
|
|
|
3. dirname (everything after hostname)
|
|
|
|
|
Most errors are handled. No allocation is done, you must supply
|
|
|
|
|
pointers to allocated memory.
|
|
|
|
|
...and a host of other stuff :-)
|
|
|
|
|
|
|
|
|
|
- Recognizes hostname:dir/file for FTP and
|
|
|
|
|
hostname (:portnum)?/dir/file for HTTP.
|
|
|
|
|
- Parses the path to yield directory and file
|
|
|
|
|
- Parses the URL to yield the username and passwd (if present)
|
|
|
|
|
- Decodes the strings, in case they contain "forbidden" characters
|
|
|
|
|
- Writes the result to struct urlinfo
|
|
|
|
|
|
|
|
|
|
If the argument STRICT is set, it recognizes only the canonical
|
|
|
|
|
form. */
|
|
|
|
|
uerr_t
|
|
|
|
|
parseurl (const char *url, struct urlinfo *u, int strict)
|
|
|
|
|
{
|
|
|
|
|
int i, l, abs_ftp;
|
|
|
|
|
int recognizable; /* Recognizable URL is the one where
|
2001-11-18 19:12:05 -05:00
|
|
|
|
the scheme was explicitly named,
|
|
|
|
|
i.e. it wasn't deduced from the URL
|
|
|
|
|
format. */
|
1999-12-02 02:42:23 -05:00
|
|
|
|
uerr_t type;
|
|
|
|
|
|
|
|
|
|
DEBUGP (("parseurl (\"%s\") -> ", url));
|
2001-11-18 19:12:05 -05:00
|
|
|
|
recognizable = url_has_scheme (url);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
if (strict && !recognizable)
|
|
|
|
|
return URLUNKNOWN;
|
2001-11-18 19:12:05 -05:00
|
|
|
|
for (i = 0, l = 0; i < ARRAY_SIZE (supported_schemes); i++)
|
1999-12-02 02:42:23 -05:00
|
|
|
|
{
|
2001-11-18 19:12:05 -05:00
|
|
|
|
l = strlen (supported_schemes[i].leading_string);
|
|
|
|
|
if (!strncasecmp (supported_schemes[i].leading_string, url, l))
|
1999-12-02 02:42:23 -05:00
|
|
|
|
break;
|
|
|
|
|
}
|
2001-11-18 19:12:05 -05:00
|
|
|
|
/* If scheme is recognizable, but unsupported, bail out, else
|
1999-12-02 02:42:23 -05:00
|
|
|
|
suppose unknown. */
|
2001-11-18 19:12:05 -05:00
|
|
|
|
if (recognizable && i == ARRAY_SIZE (supported_schemes))
|
1999-12-02 02:42:23 -05:00
|
|
|
|
return URLUNKNOWN;
|
2001-11-18 19:12:05 -05:00
|
|
|
|
else if (i == ARRAY_SIZE (supported_schemes))
|
1999-12-02 02:42:23 -05:00
|
|
|
|
type = URLUNKNOWN;
|
|
|
|
|
else
|
2001-11-18 19:12:05 -05:00
|
|
|
|
u->scheme = type = supported_schemes[i].scheme;
|
1999-12-02 02:42:23 -05:00
|
|
|
|
|
|
|
|
|
if (type == URLUNKNOWN)
|
|
|
|
|
l = 0;
|
|
|
|
|
/* Allow a username and password to be specified (i.e. just skip
|
|
|
|
|
them for now). */
|
|
|
|
|
if (recognizable)
|
2001-11-18 19:12:05 -05:00
|
|
|
|
l += url_skip_uname (url + l);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
for (i = l; url[i] && url[i] != ':' && url[i] != '/'; i++);
|
|
|
|
|
if (i == l)
|
|
|
|
|
return URLBADHOST;
|
|
|
|
|
/* Get the hostname. */
|
|
|
|
|
u->host = strdupdelim (url + l, url + i);
|
|
|
|
|
DEBUGP (("host %s -> ", u->host));
|
|
|
|
|
|
|
|
|
|
/* Assume no port has been given. */
|
|
|
|
|
u->port = 0;
|
|
|
|
|
if (url[i] == ':')
|
|
|
|
|
{
|
|
|
|
|
/* We have a colon delimiting the hostname. It could mean that
|
|
|
|
|
a port number is following it, or a directory. */
|
|
|
|
|
if (ISDIGIT (url[++i])) /* A port number */
|
|
|
|
|
{
|
|
|
|
|
if (type == URLUNKNOWN)
|
2001-11-18 19:12:05 -05:00
|
|
|
|
{
|
|
|
|
|
type = URLHTTP;
|
|
|
|
|
u->scheme = SCHEME_HTTP;
|
|
|
|
|
}
|
1999-12-02 02:42:23 -05:00
|
|
|
|
for (; url[i] && url[i] != '/'; i++)
|
|
|
|
|
if (ISDIGIT (url[i]))
|
|
|
|
|
u->port = 10 * u->port + (url[i] - '0');
|
|
|
|
|
else
|
|
|
|
|
return URLBADPORT;
|
|
|
|
|
if (!u->port)
|
|
|
|
|
return URLBADPORT;
|
|
|
|
|
DEBUGP (("port %hu -> ", u->port));
|
|
|
|
|
}
|
|
|
|
|
else if (type == URLUNKNOWN) /* or a directory */
|
2001-11-18 19:12:05 -05:00
|
|
|
|
{
|
|
|
|
|
type = URLFTP;
|
|
|
|
|
u->scheme = SCHEME_FTP;
|
|
|
|
|
}
|
1999-12-02 02:42:23 -05:00
|
|
|
|
else /* or just a misformed port number */
|
|
|
|
|
return URLBADPORT;
|
|
|
|
|
}
|
|
|
|
|
else if (type == URLUNKNOWN)
|
2001-11-18 19:12:05 -05:00
|
|
|
|
{
|
|
|
|
|
type = URLHTTP;
|
|
|
|
|
u->scheme = SCHEME_HTTP;
|
|
|
|
|
}
|
1999-12-02 02:42:23 -05:00
|
|
|
|
if (!u->port)
|
|
|
|
|
{
|
2000-12-17 13:52:52 -05:00
|
|
|
|
int ind;
|
2001-11-18 19:12:05 -05:00
|
|
|
|
for (ind = 0; ind < ARRAY_SIZE (supported_schemes); ind++)
|
|
|
|
|
if (supported_schemes[ind].scheme == u->scheme)
|
1999-12-02 02:42:23 -05:00
|
|
|
|
break;
|
2001-11-18 19:12:05 -05:00
|
|
|
|
if (ind == ARRAY_SIZE (supported_schemes))
|
1999-12-02 02:42:23 -05:00
|
|
|
|
return URLUNKNOWN;
|
2001-11-18 19:12:05 -05:00
|
|
|
|
u->port = supported_schemes[ind].default_port;
|
1999-12-02 02:42:23 -05:00
|
|
|
|
}
|
|
|
|
|
/* Some delimiter troubles... */
|
|
|
|
|
if (url[i] == '/' && url[i - 1] != ':')
|
|
|
|
|
++i;
|
|
|
|
|
if (type == URLHTTP)
|
|
|
|
|
while (url[i] && url[i] == '/')
|
|
|
|
|
++i;
|
2000-03-02 17:48:07 -05:00
|
|
|
|
u->path = (char *)xmalloc (strlen (url + i) + 8);
|
|
|
|
|
strcpy (u->path, url + i);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
if (type == URLFTP)
|
|
|
|
|
{
|
|
|
|
|
u->ftp_type = process_ftp_type (u->path);
|
|
|
|
|
/* #### We don't handle type `d' correctly yet. */
|
2000-03-31 09:04:54 -05:00
|
|
|
|
if (!u->ftp_type || TOUPPER (u->ftp_type) == 'D')
|
1999-12-02 02:42:23 -05:00
|
|
|
|
u->ftp_type = 'I';
|
2001-02-10 19:06:59 -05:00
|
|
|
|
DEBUGP (("ftp_type %c -> ", u->ftp_type));
|
1999-12-02 02:42:23 -05:00
|
|
|
|
}
|
|
|
|
|
DEBUGP (("opath %s -> ", u->path));
|
|
|
|
|
/* Parse the username and password (if existing). */
|
|
|
|
|
parse_uname (url, &u->user, &u->passwd);
|
|
|
|
|
/* Decode the strings, as per RFC 1738. */
|
|
|
|
|
decode_string (u->host);
|
|
|
|
|
decode_string (u->path);
|
|
|
|
|
if (u->user)
|
|
|
|
|
decode_string (u->user);
|
|
|
|
|
if (u->passwd)
|
|
|
|
|
decode_string (u->passwd);
|
|
|
|
|
/* Parse the directory. */
|
|
|
|
|
parse_dir (u->path, &u->dir, &u->file);
|
|
|
|
|
DEBUGP (("dir %s -> file %s -> ", u->dir, u->file));
|
|
|
|
|
/* Simplify the directory. */
|
|
|
|
|
path_simplify (u->dir);
|
|
|
|
|
/* Remove the leading `/' in HTTP. */
|
|
|
|
|
if (type == URLHTTP && *u->dir == '/')
|
|
|
|
|
strcpy (u->dir, u->dir + 1);
|
|
|
|
|
DEBUGP (("ndir %s\n", u->dir));
|
|
|
|
|
/* Strip trailing `/'. */
|
|
|
|
|
l = strlen (u->dir);
|
2001-04-10 20:24:59 -04:00
|
|
|
|
if (l > 1 && u->dir[l - 1] == '/')
|
1999-12-02 02:42:23 -05:00
|
|
|
|
u->dir[l - 1] = '\0';
|
|
|
|
|
/* Re-create the path: */
|
2001-11-18 19:12:05 -05:00
|
|
|
|
abs_ftp = (u->scheme == SCHEME_FTP && *u->dir == '/');
|
1999-12-02 02:42:23 -05:00
|
|
|
|
/* sprintf (u->path, "%s%s%s%s", abs_ftp ? "%2F": "/",
|
|
|
|
|
abs_ftp ? (u->dir + 1) : u->dir, *u->dir ? "/" : "", u->file); */
|
|
|
|
|
strcpy (u->path, abs_ftp ? "%2F" : "/");
|
|
|
|
|
strcat (u->path, abs_ftp ? (u->dir + 1) : u->dir);
|
|
|
|
|
strcat (u->path, *u->dir ? "/" : "");
|
|
|
|
|
strcat (u->path, u->file);
|
2001-04-14 00:11:35 -04:00
|
|
|
|
ENCODE (u->path);
|
2000-11-01 13:31:53 -05:00
|
|
|
|
DEBUGP (("newpath: %s\n", u->path));
|
1999-12-02 02:42:23 -05:00
|
|
|
|
/* Create the clean URL. */
|
|
|
|
|
u->url = str_url (u, 0);
|
|
|
|
|
return URLOK;
|
|
|
|
|
}
|
|
|
|
|
|
2001-04-14 00:11:35 -04:00
|
|
|
|
/* Special versions of DOTP and DDOTP for parse_dir(). They work like
|
|
|
|
|
DOTP and DDOTP, but they also recognize `?' as end-of-string
|
|
|
|
|
delimiter. This is needed for correct handling of query
|
|
|
|
|
strings. */
|
2000-11-01 13:31:53 -05:00
|
|
|
|
|
|
|
|
|
#define PD_DOTP(x) ((*(x) == '.') && (!*((x) + 1) || *((x) + 1) == '?'))
|
|
|
|
|
#define PD_DDOTP(x) ((*(x) == '.') && (*(x) == '.') \
|
|
|
|
|
&& (!*((x) + 2) || *((x) + 2) == '?'))
|
|
|
|
|
|
1999-12-02 02:42:23 -05:00
|
|
|
|
/* Build the directory and filename components of the path. Both
|
|
|
|
|
components are *separately* malloc-ed strings! It does not change
|
|
|
|
|
the contents of path.
|
|
|
|
|
|
|
|
|
|
If the path ends with "." or "..", they are (correctly) counted as
|
|
|
|
|
directories. */
|
|
|
|
|
static void
|
|
|
|
|
parse_dir (const char *path, char **dir, char **file)
|
|
|
|
|
{
|
|
|
|
|
int i, l;
|
|
|
|
|
|
2000-11-01 13:31:53 -05:00
|
|
|
|
l = urlpath_length (path);
|
|
|
|
|
for (i = l; i && path[i] != '/'; i--);
|
|
|
|
|
|
1999-12-02 02:42:23 -05:00
|
|
|
|
if (!i && *path != '/') /* Just filename */
|
|
|
|
|
{
|
2000-11-01 13:31:53 -05:00
|
|
|
|
if (PD_DOTP (path) || PD_DDOTP (path))
|
1999-12-02 02:42:23 -05:00
|
|
|
|
{
|
2000-11-01 13:31:53 -05:00
|
|
|
|
*dir = strdupdelim (path, path + l);
|
|
|
|
|
*file = xstrdup (path + l); /* normally empty, but could
|
|
|
|
|
contain ?... */
|
1999-12-02 02:42:23 -05:00
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
*dir = xstrdup (""); /* This is required because of FTP */
|
|
|
|
|
*file = xstrdup (path);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else if (!i) /* /filename */
|
|
|
|
|
{
|
2000-11-01 13:31:53 -05:00
|
|
|
|
if (PD_DOTP (path + 1) || PD_DDOTP (path + 1))
|
1999-12-02 02:42:23 -05:00
|
|
|
|
{
|
2000-11-01 13:31:53 -05:00
|
|
|
|
*dir = strdupdelim (path, path + l);
|
|
|
|
|
*file = xstrdup (path + l); /* normally empty, but could
|
|
|
|
|
contain ?... */
|
1999-12-02 02:42:23 -05:00
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
*dir = xstrdup ("/");
|
|
|
|
|
*file = xstrdup (path + 1);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else /* Nonempty directory with or without a filename */
|
|
|
|
|
{
|
2000-11-01 13:31:53 -05:00
|
|
|
|
if (PD_DOTP (path + i + 1) || PD_DDOTP (path + i + 1))
|
1999-12-02 02:42:23 -05:00
|
|
|
|
{
|
2000-11-01 13:31:53 -05:00
|
|
|
|
*dir = strdupdelim (path, path + l);
|
|
|
|
|
*file = xstrdup (path + l); /* normally empty, but could
|
|
|
|
|
contain ?... */
|
1999-12-02 02:42:23 -05:00
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
*dir = strdupdelim (path, path + i);
|
2000-11-01 13:31:53 -05:00
|
|
|
|
*file = xstrdup (path + i + 1);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Find the optional username and password within the URL, as per
|
|
|
|
|
RFC1738. The returned user and passwd char pointers are
|
|
|
|
|
malloc-ed. */
|
|
|
|
|
static uerr_t
|
|
|
|
|
parse_uname (const char *url, char **user, char **passwd)
|
|
|
|
|
{
|
|
|
|
|
int l;
|
2001-02-10 19:28:22 -05:00
|
|
|
|
const char *p, *q, *col;
|
1999-12-02 02:42:23 -05:00
|
|
|
|
char **where;
|
|
|
|
|
|
|
|
|
|
*user = NULL;
|
|
|
|
|
*passwd = NULL;
|
2001-04-12 23:39:23 -04:00
|
|
|
|
|
2001-11-18 19:12:05 -05:00
|
|
|
|
/* Look for the end of the scheme identifier. */
|
|
|
|
|
l = url_skip_scheme (url);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
if (!l)
|
|
|
|
|
return URLUNKNOWN;
|
|
|
|
|
url += l;
|
|
|
|
|
/* Is there an `@' character? */
|
|
|
|
|
for (p = url; *p && *p != '/'; p++)
|
|
|
|
|
if (*p == '@')
|
|
|
|
|
break;
|
|
|
|
|
/* If not, return. */
|
|
|
|
|
if (*p != '@')
|
|
|
|
|
return URLOK;
|
|
|
|
|
/* Else find the username and password. */
|
2001-04-04 10:00:34 -04:00
|
|
|
|
for (p = q = col = url; *p && *p != '/'; p++)
|
1999-12-02 02:42:23 -05:00
|
|
|
|
{
|
|
|
|
|
if (*p == ':' && !*user)
|
|
|
|
|
{
|
|
|
|
|
*user = (char *)xmalloc (p - url + 1);
|
|
|
|
|
memcpy (*user, url, p - url);
|
|
|
|
|
(*user)[p - url] = '\0';
|
|
|
|
|
col = p + 1;
|
|
|
|
|
}
|
2001-02-10 19:28:22 -05:00
|
|
|
|
if (*p == '@') q = p;
|
1999-12-02 02:42:23 -05:00
|
|
|
|
}
|
|
|
|
|
/* Decide whether you have only the username or both. */
|
|
|
|
|
where = *user ? passwd : user;
|
2001-02-10 19:28:22 -05:00
|
|
|
|
*where = (char *)xmalloc (q - col + 1);
|
|
|
|
|
memcpy (*where, col, q - col);
|
|
|
|
|
(*where)[q - col] = '\0';
|
1999-12-02 02:42:23 -05:00
|
|
|
|
return URLOK;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* If PATH ends with `;type=X', return the character X. */
|
|
|
|
|
static char
|
|
|
|
|
process_ftp_type (char *path)
|
|
|
|
|
{
|
|
|
|
|
int len = strlen (path);
|
|
|
|
|
|
|
|
|
|
if (len >= 7
|
|
|
|
|
&& !memcmp (path + len - 7, ";type=", 6))
|
|
|
|
|
{
|
|
|
|
|
path[len - 7] = '\0';
|
|
|
|
|
return path[len - 1];
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
return '\0';
|
|
|
|
|
}
|
|
|
|
|
|
2001-11-18 19:12:05 -05:00
|
|
|
|
/* Recreate the URL string from the data in urlinfo. This can be used
|
|
|
|
|
to create a "canonical" representation of the URL. If `hide' is
|
|
|
|
|
non-zero (as it is when we're calling this on a URL we plan to
|
|
|
|
|
print, but not when calling it to canonicalize a URL for use within
|
|
|
|
|
the program), password will be hidden. The forbidden characters in
|
|
|
|
|
the URL will be cleansed. */
|
1999-12-02 02:42:23 -05:00
|
|
|
|
char *
|
|
|
|
|
str_url (const struct urlinfo *u, int hide)
|
|
|
|
|
{
|
2001-11-18 19:12:05 -05:00
|
|
|
|
char *res, *host, *user, *passwd, *scheme_name, *dir, *file;
|
2000-03-02 17:48:07 -05:00
|
|
|
|
int i, l, ln, lu, lh, lp, lf, ld;
|
2001-11-18 19:12:05 -05:00
|
|
|
|
unsigned short default_port;
|
1999-12-02 02:42:23 -05:00
|
|
|
|
|
2001-11-18 19:12:05 -05:00
|
|
|
|
/* Look for the scheme. */
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE (supported_schemes); i++)
|
|
|
|
|
if (supported_schemes[i].scheme == u->scheme)
|
1999-12-02 02:42:23 -05:00
|
|
|
|
break;
|
2001-11-18 19:12:05 -05:00
|
|
|
|
if (i == ARRAY_SIZE (supported_schemes))
|
1999-12-02 02:42:23 -05:00
|
|
|
|
return NULL;
|
2001-11-18 19:12:05 -05:00
|
|
|
|
scheme_name = supported_schemes[i].leading_string;
|
|
|
|
|
default_port = supported_schemes[i].default_port;
|
2001-04-14 00:11:35 -04:00
|
|
|
|
host = encode_string (u->host);
|
|
|
|
|
dir = encode_string (u->dir);
|
|
|
|
|
file = encode_string (u->file);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
user = passwd = NULL;
|
|
|
|
|
if (u->user)
|
2001-04-14 00:11:35 -04:00
|
|
|
|
user = encode_string (u->user);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
if (u->passwd)
|
|
|
|
|
{
|
|
|
|
|
if (hide)
|
2001-01-09 23:30:43 -05:00
|
|
|
|
/* Don't output the password, or someone might see it over the user's
|
|
|
|
|
shoulder (or in saved wget output). Don't give away the number of
|
2001-01-09 23:32:29 -05:00
|
|
|
|
characters in the password, either, as we did in past versions of
|
|
|
|
|
this code, when we replaced the password characters with 'x's. */
|
2001-01-11 01:16:46 -05:00
|
|
|
|
passwd = xstrdup("<password>");
|
2001-01-09 23:30:43 -05:00
|
|
|
|
else
|
2001-04-14 00:11:35 -04:00
|
|
|
|
passwd = encode_string (u->passwd);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
}
|
2001-11-18 19:12:05 -05:00
|
|
|
|
if (u->scheme == SCHEME_FTP && *dir == '/')
|
1999-12-02 02:42:23 -05:00
|
|
|
|
{
|
|
|
|
|
char *tmp = (char *)xmalloc (strlen (dir) + 3);
|
|
|
|
|
/*sprintf (tmp, "%%2F%s", dir + 1);*/
|
2000-10-31 19:26:33 -05:00
|
|
|
|
tmp[0] = '%';
|
1999-12-02 02:42:23 -05:00
|
|
|
|
tmp[1] = '2';
|
|
|
|
|
tmp[2] = 'F';
|
|
|
|
|
strcpy (tmp + 3, dir + 1);
|
2000-11-22 11:58:28 -05:00
|
|
|
|
xfree (dir);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
dir = tmp;
|
|
|
|
|
}
|
|
|
|
|
|
2001-11-18 19:12:05 -05:00
|
|
|
|
ln = strlen (scheme_name);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
lu = user ? strlen (user) : 0;
|
|
|
|
|
lp = passwd ? strlen (passwd) : 0;
|
|
|
|
|
lh = strlen (host);
|
|
|
|
|
ld = strlen (dir);
|
|
|
|
|
lf = strlen (file);
|
2000-03-02 17:48:07 -05:00
|
|
|
|
res = (char *)xmalloc (ln + lu + lp + lh + ld + lf + 20); /* safe sex */
|
2001-11-18 19:12:05 -05:00
|
|
|
|
/* sprintf (res, "%s%s%s%s%s%s:%d/%s%s%s", scheme_name,
|
1999-12-02 02:42:23 -05:00
|
|
|
|
(user ? user : ""), (passwd ? ":" : ""),
|
|
|
|
|
(passwd ? passwd : ""), (user ? "@" : ""),
|
|
|
|
|
host, u->port, dir, *dir ? "/" : "", file); */
|
|
|
|
|
l = 0;
|
2001-11-18 19:12:05 -05:00
|
|
|
|
memcpy (res, scheme_name, ln);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
l += ln;
|
|
|
|
|
if (user)
|
|
|
|
|
{
|
|
|
|
|
memcpy (res + l, user, lu);
|
|
|
|
|
l += lu;
|
|
|
|
|
if (passwd)
|
|
|
|
|
{
|
|
|
|
|
res[l++] = ':';
|
|
|
|
|
memcpy (res + l, passwd, lp);
|
|
|
|
|
l += lp;
|
|
|
|
|
}
|
|
|
|
|
res[l++] = '@';
|
|
|
|
|
}
|
|
|
|
|
memcpy (res + l, host, lh);
|
|
|
|
|
l += lh;
|
2001-11-18 19:12:05 -05:00
|
|
|
|
if (u->port != default_port)
|
2000-06-01 06:47:03 -04:00
|
|
|
|
{
|
|
|
|
|
res[l++] = ':';
|
|
|
|
|
long_to_string (res + l, (long)u->port);
|
|
|
|
|
l += numdigit (u->port);
|
|
|
|
|
}
|
1999-12-02 02:42:23 -05:00
|
|
|
|
res[l++] = '/';
|
|
|
|
|
memcpy (res + l, dir, ld);
|
|
|
|
|
l += ld;
|
|
|
|
|
if (*dir)
|
|
|
|
|
res[l++] = '/';
|
|
|
|
|
strcpy (res + l, file);
|
2000-11-22 11:58:28 -05:00
|
|
|
|
xfree (host);
|
|
|
|
|
xfree (dir);
|
|
|
|
|
xfree (file);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
FREE_MAYBE (user);
|
|
|
|
|
FREE_MAYBE (passwd);
|
|
|
|
|
return res;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Check whether two URL-s are equivalent, i.e. pointing to the same
|
|
|
|
|
location. Uses parseurl to parse them, and compares the canonical
|
|
|
|
|
forms.
|
|
|
|
|
|
|
|
|
|
Returns 1 if the URL1 is equivalent to URL2, 0 otherwise. Also
|
|
|
|
|
return 0 on error. */
|
2001-11-16 11:49:19 -05:00
|
|
|
|
/* Do not compile unused code. */
|
|
|
|
|
#if 0
|
1999-12-02 02:42:23 -05:00
|
|
|
|
int
|
|
|
|
|
url_equal (const char *url1, const char *url2)
|
|
|
|
|
{
|
|
|
|
|
struct urlinfo *u1, *u2;
|
|
|
|
|
uerr_t err;
|
|
|
|
|
int res;
|
|
|
|
|
|
|
|
|
|
u1 = newurl ();
|
|
|
|
|
err = parseurl (url1, u1, 0);
|
|
|
|
|
if (err != URLOK)
|
|
|
|
|
{
|
|
|
|
|
freeurl (u1, 1);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
u2 = newurl ();
|
|
|
|
|
err = parseurl (url2, u2, 0);
|
|
|
|
|
if (err != URLOK)
|
|
|
|
|
{
|
2001-11-16 11:49:19 -05:00
|
|
|
|
freeurl (u1, 1);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
freeurl (u2, 1);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
res = !strcmp (u1->url, u2->url);
|
|
|
|
|
freeurl (u1, 1);
|
|
|
|
|
freeurl (u2, 1);
|
|
|
|
|
return res;
|
|
|
|
|
}
|
2001-11-16 11:49:19 -05:00
|
|
|
|
#endif /* 0 */
|
1999-12-02 02:42:23 -05:00
|
|
|
|
|
|
|
|
|
urlpos *
|
|
|
|
|
get_urls_file (const char *file)
|
|
|
|
|
{
|
2000-11-19 15:50:10 -05:00
|
|
|
|
struct file_memory *fm;
|
|
|
|
|
urlpos *head, *tail;
|
|
|
|
|
const char *text, *text_end;
|
1999-12-02 02:42:23 -05:00
|
|
|
|
|
2000-11-19 15:50:10 -05:00
|
|
|
|
/* Load the file. */
|
|
|
|
|
fm = read_file (file);
|
|
|
|
|
if (!fm)
|
1999-12-02 02:42:23 -05:00
|
|
|
|
{
|
2000-11-19 15:50:10 -05:00
|
|
|
|
logprintf (LOG_NOTQUIET, "%s: %s\n", file, strerror (errno));
|
|
|
|
|
return NULL;
|
1999-12-02 02:42:23 -05:00
|
|
|
|
}
|
2000-11-19 15:50:10 -05:00
|
|
|
|
DEBUGP (("Loaded %s (size %ld).\n", file, fm->length));
|
|
|
|
|
head = tail = NULL;
|
|
|
|
|
text = fm->content;
|
|
|
|
|
text_end = fm->content + fm->length;
|
|
|
|
|
while (text < text_end)
|
1999-12-02 02:42:23 -05:00
|
|
|
|
{
|
2000-11-19 15:50:10 -05:00
|
|
|
|
const char *line_beg = text;
|
|
|
|
|
const char *line_end = memchr (text, '\n', text_end - text);
|
|
|
|
|
if (!line_end)
|
|
|
|
|
line_end = text_end;
|
1999-12-02 02:42:23 -05:00
|
|
|
|
else
|
2000-11-19 15:50:10 -05:00
|
|
|
|
++line_end;
|
|
|
|
|
text = line_end;
|
|
|
|
|
while (line_beg < line_end
|
|
|
|
|
&& ISSPACE (*line_beg))
|
|
|
|
|
++line_beg;
|
|
|
|
|
while (line_end > line_beg + 1
|
|
|
|
|
&& ISSPACE (*(line_end - 1)))
|
|
|
|
|
--line_end;
|
|
|
|
|
if (line_end > line_beg)
|
1999-12-02 02:42:23 -05:00
|
|
|
|
{
|
2000-11-19 15:50:10 -05:00
|
|
|
|
urlpos *entry = (urlpos *)xmalloc (sizeof (urlpos));
|
|
|
|
|
memset (entry, 0, sizeof (*entry));
|
|
|
|
|
entry->next = NULL;
|
|
|
|
|
entry->url = strdupdelim (line_beg, line_end);
|
|
|
|
|
if (!head)
|
|
|
|
|
head = entry;
|
1999-12-02 02:42:23 -05:00
|
|
|
|
else
|
2000-11-19 15:50:10 -05:00
|
|
|
|
tail->next = entry;
|
|
|
|
|
tail = entry;
|
1999-12-02 02:42:23 -05:00
|
|
|
|
}
|
|
|
|
|
}
|
2000-11-19 15:50:10 -05:00
|
|
|
|
read_file_free (fm);
|
|
|
|
|
return head;
|
1999-12-02 02:42:23 -05:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Free the linked list of urlpos. */
|
|
|
|
|
void
|
|
|
|
|
free_urlpos (urlpos *l)
|
|
|
|
|
{
|
|
|
|
|
while (l)
|
|
|
|
|
{
|
|
|
|
|
urlpos *next = l->next;
|
2000-11-22 11:58:28 -05:00
|
|
|
|
xfree (l->url);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
FREE_MAYBE (l->local_name);
|
2000-11-22 11:58:28 -05:00
|
|
|
|
xfree (l);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
l = next;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Rotate FNAME opt.backups times */
|
|
|
|
|
void
|
|
|
|
|
rotate_backups(const char *fname)
|
|
|
|
|
{
|
|
|
|
|
int maxlen = strlen (fname) + 1 + numdigit (opt.backups) + 1;
|
|
|
|
|
char *from = (char *)alloca (maxlen);
|
|
|
|
|
char *to = (char *)alloca (maxlen);
|
|
|
|
|
struct stat sb;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
if (stat (fname, &sb) == 0)
|
|
|
|
|
if (S_ISREG (sb.st_mode) == 0)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
for (i = opt.backups; i > 1; i--)
|
|
|
|
|
{
|
|
|
|
|
sprintf (from, "%s.%d", fname, i - 1);
|
|
|
|
|
sprintf (to, "%s.%d", fname, i);
|
|
|
|
|
/* #### This will fail on machines without the rename() system
|
|
|
|
|
call. */
|
|
|
|
|
rename (from, to);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
sprintf (to, "%s.%d", fname, 1);
|
|
|
|
|
rename(fname, to);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Create all the necessary directories for PATH (a file). Calls
|
|
|
|
|
mkdirhier() internally. */
|
|
|
|
|
int
|
|
|
|
|
mkalldirs (const char *path)
|
|
|
|
|
{
|
|
|
|
|
const char *p;
|
|
|
|
|
char *t;
|
|
|
|
|
struct stat st;
|
|
|
|
|
int res;
|
|
|
|
|
|
|
|
|
|
p = path + strlen (path);
|
|
|
|
|
for (; *p != '/' && p != path; p--);
|
|
|
|
|
/* Don't create if it's just a file. */
|
|
|
|
|
if ((p == path) && (*p != '/'))
|
|
|
|
|
return 0;
|
|
|
|
|
t = strdupdelim (path, p);
|
|
|
|
|
/* Check whether the directory exists. */
|
|
|
|
|
if ((stat (t, &st) == 0))
|
|
|
|
|
{
|
|
|
|
|
if (S_ISDIR (st.st_mode))
|
|
|
|
|
{
|
2000-11-22 11:58:28 -05:00
|
|
|
|
xfree (t);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
/* If the dir exists as a file name, remove it first. This
|
|
|
|
|
is *only* for Wget to work with buggy old CERN http
|
|
|
|
|
servers. Here is the scenario: When Wget tries to
|
|
|
|
|
retrieve a directory without a slash, e.g.
|
|
|
|
|
http://foo/bar (bar being a directory), CERN server will
|
|
|
|
|
not redirect it too http://foo/bar/ -- it will generate a
|
|
|
|
|
directory listing containing links to bar/file1,
|
|
|
|
|
bar/file2, etc. Wget will lose because it saves this
|
|
|
|
|
HTML listing to a file `bar', so it cannot create the
|
|
|
|
|
directory. To work around this, if the file of the same
|
|
|
|
|
name exists, we just remove it and create the directory
|
|
|
|
|
anyway. */
|
|
|
|
|
DEBUGP (("Removing %s because of directory danger!\n", t));
|
|
|
|
|
unlink (t);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
res = make_directory (t);
|
|
|
|
|
if (res != 0)
|
|
|
|
|
logprintf (LOG_NOTQUIET, "%s: %s", t, strerror (errno));
|
2000-11-22 11:58:28 -05:00
|
|
|
|
xfree (t);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
return res;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
count_slashes (const char *s)
|
|
|
|
|
{
|
|
|
|
|
int i = 0;
|
|
|
|
|
while (*s)
|
|
|
|
|
if (*s++ == '/')
|
|
|
|
|
++i;
|
|
|
|
|
return i;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Return the path name of the URL-equivalent file name, with a
|
|
|
|
|
remote-like structure of directories. */
|
|
|
|
|
static char *
|
|
|
|
|
mkstruct (const struct urlinfo *u)
|
|
|
|
|
{
|
|
|
|
|
char *host, *dir, *file, *res, *dirpref;
|
|
|
|
|
int l;
|
|
|
|
|
|
|
|
|
|
assert (u->dir != NULL);
|
|
|
|
|
assert (u->host != NULL);
|
|
|
|
|
|
|
|
|
|
if (opt.cut_dirs)
|
|
|
|
|
{
|
|
|
|
|
char *ptr = u->dir + (*u->dir == '/');
|
|
|
|
|
int slash_count = 1 + count_slashes (ptr);
|
|
|
|
|
int cut = MINVAL (opt.cut_dirs, slash_count);
|
|
|
|
|
for (; cut && *ptr; ptr++)
|
|
|
|
|
if (*ptr == '/')
|
|
|
|
|
--cut;
|
|
|
|
|
STRDUP_ALLOCA (dir, ptr);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
dir = u->dir + (*u->dir == '/');
|
|
|
|
|
|
|
|
|
|
host = xstrdup (u->host);
|
|
|
|
|
/* Check for the true name (or at least a consistent name for saving
|
|
|
|
|
to directory) of HOST, reusing the hlist if possible. */
|
|
|
|
|
if (opt.add_hostdir && !opt.simple_check)
|
|
|
|
|
{
|
|
|
|
|
char *nhost = realhost (host);
|
2000-11-22 11:58:28 -05:00
|
|
|
|
xfree (host);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
host = nhost;
|
|
|
|
|
}
|
|
|
|
|
/* Add dir_prefix and hostname (if required) to the beginning of
|
|
|
|
|
dir. */
|
|
|
|
|
if (opt.add_hostdir)
|
|
|
|
|
{
|
|
|
|
|
if (!DOTP (opt.dir_prefix))
|
|
|
|
|
{
|
|
|
|
|
dirpref = (char *)alloca (strlen (opt.dir_prefix) + 1
|
|
|
|
|
+ strlen (host) + 1);
|
|
|
|
|
sprintf (dirpref, "%s/%s", opt.dir_prefix, host);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
STRDUP_ALLOCA (dirpref, host);
|
|
|
|
|
}
|
|
|
|
|
else /* not add_hostdir */
|
|
|
|
|
{
|
|
|
|
|
if (!DOTP (opt.dir_prefix))
|
|
|
|
|
dirpref = opt.dir_prefix;
|
|
|
|
|
else
|
|
|
|
|
dirpref = "";
|
|
|
|
|
}
|
2000-11-22 11:58:28 -05:00
|
|
|
|
xfree (host);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
|
|
|
|
|
/* If there is a prefix, prepend it. */
|
|
|
|
|
if (*dirpref)
|
|
|
|
|
{
|
|
|
|
|
char *newdir = (char *)alloca (strlen (dirpref) + 1 + strlen (dir) + 2);
|
|
|
|
|
sprintf (newdir, "%s%s%s", dirpref, *dir == '/' ? "" : "/", dir);
|
|
|
|
|
dir = newdir;
|
|
|
|
|
}
|
2001-04-14 00:11:35 -04:00
|
|
|
|
dir = encode_string (dir);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
l = strlen (dir);
|
|
|
|
|
if (l && dir[l - 1] == '/')
|
|
|
|
|
dir[l - 1] = '\0';
|
|
|
|
|
|
|
|
|
|
if (!*u->file)
|
|
|
|
|
file = "index.html";
|
|
|
|
|
else
|
|
|
|
|
file = u->file;
|
|
|
|
|
|
|
|
|
|
/* Finally, construct the full name. */
|
|
|
|
|
res = (char *)xmalloc (strlen (dir) + 1 + strlen (file) + 1);
|
|
|
|
|
sprintf (res, "%s%s%s", dir, *dir ? "/" : "", file);
|
2000-11-22 11:58:28 -05:00
|
|
|
|
xfree (dir);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
return res;
|
|
|
|
|
}
|
|
|
|
|
|
2001-06-18 05:08:04 -04:00
|
|
|
|
/* Return a malloced copy of S, but protect any '/' characters. */
|
|
|
|
|
|
|
|
|
|
static char *
|
|
|
|
|
file_name_protect_query_string (const char *s)
|
|
|
|
|
{
|
|
|
|
|
const char *from;
|
|
|
|
|
char *to, *dest;
|
|
|
|
|
int destlen = 0;
|
|
|
|
|
for (from = s; *from; from++)
|
|
|
|
|
{
|
|
|
|
|
++destlen;
|
|
|
|
|
if (*from == '/')
|
|
|
|
|
destlen += 2; /* each / gets replaced with %2F, so
|
|
|
|
|
it adds two more chars. */
|
|
|
|
|
}
|
|
|
|
|
dest = (char *)xmalloc (destlen + 1);
|
|
|
|
|
for (from = s, to = dest; *from; from++)
|
|
|
|
|
{
|
|
|
|
|
if (*from != '/')
|
|
|
|
|
*to++ = *from;
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
*to++ = '%';
|
|
|
|
|
*to++ = '2';
|
|
|
|
|
*to++ = 'F';
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
assert (to - dest == destlen);
|
|
|
|
|
*to = '\0';
|
|
|
|
|
return dest;
|
|
|
|
|
}
|
|
|
|
|
|
1999-12-02 02:42:23 -05:00
|
|
|
|
/* Create a unique filename, corresponding to a given URL. Calls
|
|
|
|
|
mkstruct if necessary. Does *not* actually create any directories. */
|
|
|
|
|
char *
|
|
|
|
|
url_filename (const struct urlinfo *u)
|
|
|
|
|
{
|
|
|
|
|
char *file, *name;
|
|
|
|
|
int have_prefix = 0; /* whether we must prepend opt.dir_prefix */
|
|
|
|
|
|
|
|
|
|
if (opt.dirstruct)
|
|
|
|
|
{
|
|
|
|
|
file = mkstruct (u);
|
|
|
|
|
have_prefix = 1;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
if (!*u->file)
|
|
|
|
|
file = xstrdup ("index.html");
|
|
|
|
|
else
|
2001-06-18 05:08:04 -04:00
|
|
|
|
{
|
|
|
|
|
/* If the URL came with a query string, u->file will contain
|
|
|
|
|
a question mark followed by query string contents. These
|
|
|
|
|
contents can contain '/' which would make us create
|
|
|
|
|
unwanted directories. These slashes must be protected
|
|
|
|
|
explicitly. */
|
|
|
|
|
if (!strchr (u->file, '/'))
|
|
|
|
|
file = xstrdup (u->file);
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
/*assert (strchr (u->file, '?') != NULL);*/
|
|
|
|
|
file = file_name_protect_query_string (u->file);
|
|
|
|
|
}
|
|
|
|
|
}
|
1999-12-02 02:42:23 -05:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!have_prefix)
|
|
|
|
|
{
|
|
|
|
|
/* Check whether the prefix directory is something other than "."
|
|
|
|
|
before prepending it. */
|
|
|
|
|
if (!DOTP (opt.dir_prefix))
|
|
|
|
|
{
|
|
|
|
|
char *nfile = (char *)xmalloc (strlen (opt.dir_prefix)
|
|
|
|
|
+ 1 + strlen (file) + 1);
|
|
|
|
|
sprintf (nfile, "%s/%s", opt.dir_prefix, file);
|
2000-11-22 11:58:28 -05:00
|
|
|
|
xfree (file);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
file = nfile;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
/* DOS-ish file systems don't like `%' signs in them; we change it
|
|
|
|
|
to `@'. */
|
|
|
|
|
#ifdef WINDOWS
|
|
|
|
|
{
|
|
|
|
|
char *p = file;
|
|
|
|
|
for (p = file; *p; p++)
|
|
|
|
|
if (*p == '%')
|
|
|
|
|
*p = '@';
|
|
|
|
|
}
|
|
|
|
|
#endif /* WINDOWS */
|
|
|
|
|
|
|
|
|
|
/* Check the cases in which the unique extensions are not used:
|
|
|
|
|
1) Clobbering is turned off (-nc).
|
|
|
|
|
2) Retrieval with regetting.
|
|
|
|
|
3) Timestamping is used.
|
|
|
|
|
4) Hierarchy is built.
|
|
|
|
|
|
|
|
|
|
The exception is the case when file does exist and is a
|
|
|
|
|
directory (actually support for bad httpd-s). */
|
|
|
|
|
if ((opt.noclobber || opt.always_rest || opt.timestamping || opt.dirstruct)
|
|
|
|
|
&& !(file_exists_p (file) && !file_non_directory_p (file)))
|
|
|
|
|
return file;
|
|
|
|
|
|
|
|
|
|
/* Find a unique name. */
|
|
|
|
|
name = unique_name (file);
|
2000-11-22 11:58:28 -05:00
|
|
|
|
xfree (file);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
return name;
|
|
|
|
|
}
|
|
|
|
|
|
2000-10-31 19:26:33 -05:00
|
|
|
|
/* Like strlen(), but allow the URL to be ended with '?'. */
|
2000-10-31 14:25:32 -05:00
|
|
|
|
static int
|
2000-10-31 19:26:33 -05:00
|
|
|
|
urlpath_length (const char *url)
|
2000-10-31 14:25:32 -05:00
|
|
|
|
{
|
2000-10-31 19:26:33 -05:00
|
|
|
|
const char *q = strchr (url, '?');
|
|
|
|
|
if (q)
|
|
|
|
|
return q - url;
|
2000-10-31 14:25:32 -05:00
|
|
|
|
return strlen (url);
|
|
|
|
|
}
|
|
|
|
|
|
2000-11-04 23:38:31 -05:00
|
|
|
|
/* Find the last occurrence of character C in the range [b, e), or
|
|
|
|
|
NULL, if none are present. This is almost completely equivalent to
|
|
|
|
|
{ *e = '\0'; return strrchr(b); }, except that it doesn't change
|
|
|
|
|
the contents of the string. */
|
2000-10-31 19:26:33 -05:00
|
|
|
|
static const char *
|
|
|
|
|
find_last_char (const char *b, const char *e, char c)
|
|
|
|
|
{
|
|
|
|
|
for (; e > b; e--)
|
|
|
|
|
if (*e == c)
|
|
|
|
|
return e;
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2001-04-14 00:11:35 -04:00
|
|
|
|
/* Resolve the result of "linking" a base URI (BASE) to a
|
|
|
|
|
link-specified URI (LINK).
|
|
|
|
|
|
|
|
|
|
Either of the URIs may be absolute or relative, complete with the
|
|
|
|
|
host name, or path only. This tries to behave "reasonably" in all
|
|
|
|
|
foreseeable cases. It employs little specific knowledge about
|
2001-11-18 19:12:05 -05:00
|
|
|
|
schemes or URL-specific stuff -- it just works on strings.
|
2001-04-14 00:11:35 -04:00
|
|
|
|
|
|
|
|
|
The parameters LINKLENGTH is useful if LINK is not zero-terminated.
|
|
|
|
|
See uri_merge for a gentler interface to this functionality.
|
|
|
|
|
|
|
|
|
|
#### This function should handle `./' and `../' so that the evil
|
|
|
|
|
path_simplify can go. */
|
1999-12-02 02:42:23 -05:00
|
|
|
|
static char *
|
2001-11-18 19:12:05 -05:00
|
|
|
|
uri_merge_1 (const char *base, const char *link, int linklength, int no_scheme)
|
1999-12-02 02:42:23 -05:00
|
|
|
|
{
|
|
|
|
|
char *constr;
|
|
|
|
|
|
2001-11-18 19:12:05 -05:00
|
|
|
|
if (no_scheme)
|
1999-12-02 02:42:23 -05:00
|
|
|
|
{
|
2001-04-14 00:11:35 -04:00
|
|
|
|
const char *end = base + urlpath_length (base);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
|
2001-04-14 00:11:35 -04:00
|
|
|
|
if (*link != '/')
|
1999-12-02 02:42:23 -05:00
|
|
|
|
{
|
2001-04-14 00:11:35 -04:00
|
|
|
|
/* LINK is a relative URL: we need to replace everything
|
|
|
|
|
after last slash (possibly empty) with LINK.
|
2000-10-31 19:26:33 -05:00
|
|
|
|
|
2001-04-14 00:11:35 -04:00
|
|
|
|
So, if BASE is "whatever/foo/bar", and LINK is "qux/xyzzy",
|
2000-10-31 19:26:33 -05:00
|
|
|
|
our result should be "whatever/foo/qux/xyzzy". */
|
|
|
|
|
int need_explicit_slash = 0;
|
|
|
|
|
int span;
|
|
|
|
|
const char *start_insert;
|
2001-04-14 00:11:35 -04:00
|
|
|
|
const char *last_slash = find_last_char (base, end, '/');
|
2000-10-31 19:26:33 -05:00
|
|
|
|
if (!last_slash)
|
1999-12-02 02:42:23 -05:00
|
|
|
|
{
|
2001-04-14 00:11:35 -04:00
|
|
|
|
/* No slash found at all. Append LINK to what we have,
|
2000-10-31 19:26:33 -05:00
|
|
|
|
but we'll need a slash as a separator.
|
|
|
|
|
|
2001-04-14 00:11:35 -04:00
|
|
|
|
Example: if base == "foo" and link == "qux/xyzzy", then
|
|
|
|
|
we cannot just append link to base, because we'd get
|
2000-10-31 19:26:33 -05:00
|
|
|
|
"fooqux/xyzzy", whereas what we want is
|
|
|
|
|
"foo/qux/xyzzy".
|
|
|
|
|
|
|
|
|
|
To make sure the / gets inserted, we set
|
|
|
|
|
need_explicit_slash to 1. We also set start_insert
|
|
|
|
|
to end + 1, so that the length calculations work out
|
|
|
|
|
correctly for one more (slash) character. Accessing
|
|
|
|
|
that character is fine, since it will be the
|
|
|
|
|
delimiter, '\0' or '?'. */
|
|
|
|
|
/* example: "foo?..." */
|
|
|
|
|
/* ^ ('?' gets changed to '/') */
|
|
|
|
|
start_insert = end + 1;
|
|
|
|
|
need_explicit_slash = 1;
|
1999-12-02 02:42:23 -05:00
|
|
|
|
}
|
2001-04-14 00:11:35 -04:00
|
|
|
|
else if (last_slash && last_slash != base && *(last_slash - 1) == '/')
|
2000-11-08 04:15:40 -05:00
|
|
|
|
{
|
|
|
|
|
/* example: http://host" */
|
|
|
|
|
/* ^ */
|
|
|
|
|
start_insert = end + 1;
|
|
|
|
|
need_explicit_slash = 1;
|
|
|
|
|
}
|
2000-10-31 19:26:33 -05:00
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
/* example: "whatever/foo/bar" */
|
|
|
|
|
/* ^ */
|
|
|
|
|
start_insert = last_slash + 1;
|
|
|
|
|
}
|
|
|
|
|
|
2001-04-14 00:11:35 -04:00
|
|
|
|
span = start_insert - base;
|
|
|
|
|
constr = (char *)xmalloc (span + linklength + 1);
|
2000-10-31 19:26:33 -05:00
|
|
|
|
if (span)
|
2001-04-14 00:11:35 -04:00
|
|
|
|
memcpy (constr, base, span);
|
2000-10-31 19:26:33 -05:00
|
|
|
|
if (need_explicit_slash)
|
|
|
|
|
constr[span - 1] = '/';
|
2001-04-14 00:11:35 -04:00
|
|
|
|
if (linklength)
|
|
|
|
|
memcpy (constr + span, link, linklength);
|
|
|
|
|
constr[span + linklength] = '\0';
|
1999-12-02 02:42:23 -05:00
|
|
|
|
}
|
2001-04-14 00:11:35 -04:00
|
|
|
|
else /* *link == `/' */
|
1999-12-02 02:42:23 -05:00
|
|
|
|
{
|
2001-04-14 00:11:35 -04:00
|
|
|
|
/* LINK is an absolute path: we need to replace everything
|
|
|
|
|
after (and including) the FIRST slash with LINK.
|
2000-10-31 19:26:33 -05:00
|
|
|
|
|
2001-04-14 00:11:35 -04:00
|
|
|
|
So, if BASE is "http://host/whatever/foo/bar", and LINK is
|
2000-10-31 19:26:33 -05:00
|
|
|
|
"/qux/xyzzy", our result should be
|
|
|
|
|
"http://host/qux/xyzzy". */
|
|
|
|
|
int span;
|
2000-11-06 16:24:57 -05:00
|
|
|
|
const char *slash;
|
|
|
|
|
const char *start_insert = NULL; /* for gcc to shut up. */
|
2001-04-14 00:11:35 -04:00
|
|
|
|
const char *pos = base;
|
2000-10-31 19:26:33 -05:00
|
|
|
|
int seen_slash_slash = 0;
|
|
|
|
|
/* We're looking for the first slash, but want to ignore
|
|
|
|
|
double slash. */
|
|
|
|
|
again:
|
|
|
|
|
slash = memchr (pos, '/', end - pos);
|
|
|
|
|
if (slash && !seen_slash_slash)
|
|
|
|
|
if (*(slash + 1) == '/')
|
|
|
|
|
{
|
|
|
|
|
pos = slash + 2;
|
|
|
|
|
seen_slash_slash = 1;
|
|
|
|
|
goto again;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* At this point, SLASH is the location of the first / after
|
|
|
|
|
"//", or the first slash altogether. START_INSERT is the
|
2001-04-14 00:11:35 -04:00
|
|
|
|
pointer to the location where LINK will be inserted. When
|
|
|
|
|
examining the last two examples, keep in mind that LINK
|
2000-10-31 19:26:33 -05:00
|
|
|
|
begins with '/'. */
|
|
|
|
|
|
|
|
|
|
if (!slash && !seen_slash_slash)
|
|
|
|
|
/* example: "foo" */
|
|
|
|
|
/* ^ */
|
2001-04-14 00:11:35 -04:00
|
|
|
|
start_insert = base;
|
2000-10-31 19:26:33 -05:00
|
|
|
|
else if (!slash && seen_slash_slash)
|
|
|
|
|
/* example: "http://foo" */
|
|
|
|
|
/* ^ */
|
|
|
|
|
start_insert = end;
|
|
|
|
|
else if (slash && !seen_slash_slash)
|
|
|
|
|
/* example: "foo/bar" */
|
|
|
|
|
/* ^ */
|
2001-04-14 00:11:35 -04:00
|
|
|
|
start_insert = base;
|
2000-10-31 19:26:33 -05:00
|
|
|
|
else if (slash && seen_slash_slash)
|
|
|
|
|
/* example: "http://something/" */
|
|
|
|
|
/* ^ */
|
|
|
|
|
start_insert = slash;
|
|
|
|
|
|
2001-04-14 00:11:35 -04:00
|
|
|
|
span = start_insert - base;
|
|
|
|
|
constr = (char *)xmalloc (span + linklength + 1);
|
2000-10-31 19:26:33 -05:00
|
|
|
|
if (span)
|
2001-04-14 00:11:35 -04:00
|
|
|
|
memcpy (constr, base, span);
|
|
|
|
|
if (linklength)
|
|
|
|
|
memcpy (constr + span, link, linklength);
|
|
|
|
|
constr[span + linklength] = '\0';
|
2000-10-31 19:26:33 -05:00
|
|
|
|
}
|
1999-12-02 02:42:23 -05:00
|
|
|
|
}
|
2001-11-18 19:12:05 -05:00
|
|
|
|
else /* !no_scheme */
|
1999-12-02 02:42:23 -05:00
|
|
|
|
{
|
2001-04-14 00:11:35 -04:00
|
|
|
|
constr = strdupdelim (link, link + linklength);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
}
|
|
|
|
|
return constr;
|
|
|
|
|
}
|
2000-10-31 14:25:32 -05:00
|
|
|
|
|
2001-04-14 00:11:35 -04:00
|
|
|
|
/* Merge BASE with LINK and return the resulting URI. This is an
|
|
|
|
|
interface to uri_merge_1 that assumes that LINK is a
|
|
|
|
|
zero-terminated string. */
|
2000-10-31 14:25:32 -05:00
|
|
|
|
char *
|
2001-04-14 00:11:35 -04:00
|
|
|
|
uri_merge (const char *base, const char *link)
|
2000-10-31 14:25:32 -05:00
|
|
|
|
{
|
2001-11-18 19:12:05 -05:00
|
|
|
|
return uri_merge_1 (base, link, strlen (link), !url_has_scheme (link));
|
2000-10-31 14:25:32 -05:00
|
|
|
|
}
|
1999-12-02 02:42:23 -05:00
|
|
|
|
|
|
|
|
|
/* Optimize URL by host, destructively replacing u->host with realhost
|
|
|
|
|
(u->host). Do this regardless of opt.simple_check. */
|
|
|
|
|
void
|
|
|
|
|
opt_url (struct urlinfo *u)
|
|
|
|
|
{
|
|
|
|
|
/* Find the "true" host. */
|
|
|
|
|
char *host = realhost (u->host);
|
2000-11-22 11:58:28 -05:00
|
|
|
|
xfree (u->host);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
u->host = host;
|
|
|
|
|
assert (u->dir != NULL); /* the URL must have been parsed */
|
|
|
|
|
/* Refresh the printed representation. */
|
2000-11-22 11:58:28 -05:00
|
|
|
|
xfree (u->url);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
u->url = str_url (u, 0);
|
|
|
|
|
}
|
|
|
|
|
|
2001-11-18 19:12:05 -05:00
|
|
|
|
/* Returns proxy host address, in accordance with SCHEME. */
|
1999-12-02 02:42:23 -05:00
|
|
|
|
char *
|
2001-11-18 19:12:05 -05:00
|
|
|
|
getproxy (enum url_scheme scheme)
|
1999-12-02 02:42:23 -05:00
|
|
|
|
{
|
2001-11-18 19:12:05 -05:00
|
|
|
|
char *proxy = NULL;
|
2001-04-26 06:11:49 -04:00
|
|
|
|
|
2001-11-18 19:12:05 -05:00
|
|
|
|
switch (scheme)
|
|
|
|
|
{
|
|
|
|
|
case SCHEME_HTTP:
|
|
|
|
|
proxy = opt.http_proxy ? opt.http_proxy : getenv ("http_proxy");
|
|
|
|
|
break;
|
2000-12-05 18:09:41 -05:00
|
|
|
|
#ifdef HAVE_SSL
|
2001-11-18 19:12:05 -05:00
|
|
|
|
case SCHEME_HTTPS:
|
|
|
|
|
proxy = opt.https_proxy ? opt.https_proxy : getenv ("https_proxy");
|
|
|
|
|
break;
|
|
|
|
|
#endif
|
|
|
|
|
case SCHEME_FTP:
|
|
|
|
|
proxy = opt.ftp_proxy ? opt.ftp_proxy : getenv ("ftp_proxy");
|
|
|
|
|
break;
|
|
|
|
|
case SCHEME_INVALID:
|
|
|
|
|
break;
|
|
|
|
|
}
|
2001-04-26 06:11:49 -04:00
|
|
|
|
if (!proxy || !*proxy)
|
1999-12-02 02:42:23 -05:00
|
|
|
|
return NULL;
|
2001-04-26 06:11:49 -04:00
|
|
|
|
return proxy;
|
1999-12-02 02:42:23 -05:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Should a host be accessed through proxy, concerning no_proxy? */
|
|
|
|
|
int
|
|
|
|
|
no_proxy_match (const char *host, const char **no_proxy)
|
|
|
|
|
{
|
|
|
|
|
if (!no_proxy)
|
|
|
|
|
return 1;
|
|
|
|
|
else
|
|
|
|
|
return !sufmatch (no_proxy, host);
|
|
|
|
|
}
|
|
|
|
|
|
2000-11-19 15:50:10 -05:00
|
|
|
|
static void write_backup_file PARAMS ((const char *, downloaded_file_t));
|
2001-01-04 08:53:53 -05:00
|
|
|
|
static void replace_attr PARAMS ((const char **, int, FILE *, const char *));
|
2000-11-19 15:50:10 -05:00
|
|
|
|
|
1999-12-02 02:42:23 -05:00
|
|
|
|
/* Change the links in an HTML document. Accepts a structure that
|
|
|
|
|
defines the positions of all the links. */
|
|
|
|
|
void
|
|
|
|
|
convert_links (const char *file, urlpos *l)
|
|
|
|
|
{
|
2000-11-19 15:50:10 -05:00
|
|
|
|
struct file_memory *fm;
|
2000-10-20 01:55:46 -04:00
|
|
|
|
FILE *fp;
|
2001-01-04 08:53:53 -05:00
|
|
|
|
const char *p;
|
2000-10-20 01:55:46 -04:00
|
|
|
|
downloaded_file_t downloaded_file_return;
|
2000-11-19 15:50:10 -05:00
|
|
|
|
|
2000-11-20 21:06:36 -05:00
|
|
|
|
logprintf (LOG_VERBOSE, _("Converting %s... "), file);
|
|
|
|
|
|
2000-11-19 15:50:10 -05:00
|
|
|
|
{
|
|
|
|
|
/* First we do a "dry run": go through the list L and see whether
|
|
|
|
|
any URL needs to be converted in the first place. If not, just
|
|
|
|
|
leave the file alone. */
|
|
|
|
|
int count = 0;
|
|
|
|
|
urlpos *dry = l;
|
|
|
|
|
for (dry = l; dry; dry = dry->next)
|
2000-11-20 21:06:36 -05:00
|
|
|
|
if (dry->convert != CO_NOCONVERT)
|
2000-11-19 15:50:10 -05:00
|
|
|
|
++count;
|
|
|
|
|
if (!count)
|
|
|
|
|
{
|
2000-11-20 21:06:36 -05:00
|
|
|
|
logputs (LOG_VERBOSE, _("nothing to do.\n"));
|
2000-11-19 15:50:10 -05:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
1999-12-02 02:42:23 -05:00
|
|
|
|
|
2000-11-19 15:50:10 -05:00
|
|
|
|
fm = read_file (file);
|
|
|
|
|
if (!fm)
|
1999-12-02 02:42:23 -05:00
|
|
|
|
{
|
|
|
|
|
logprintf (LOG_NOTQUIET, _("Cannot convert links in %s: %s\n"),
|
|
|
|
|
file, strerror (errno));
|
|
|
|
|
return;
|
|
|
|
|
}
|
2000-10-20 01:55:46 -04:00
|
|
|
|
|
2000-11-19 15:50:10 -05:00
|
|
|
|
downloaded_file_return = downloaded_file (CHECK_FOR_FILE, file);
|
2000-10-20 01:55:46 -04:00
|
|
|
|
if (opt.backup_converted && downloaded_file_return)
|
2000-11-19 15:50:10 -05:00
|
|
|
|
write_backup_file (file, downloaded_file_return);
|
2000-02-29 19:17:23 -05:00
|
|
|
|
|
2000-11-19 15:50:10 -05:00
|
|
|
|
/* Before opening the file for writing, unlink the file. This is
|
|
|
|
|
important if the data in FM is mmaped. In such case, nulling the
|
|
|
|
|
file, which is what fopen() below does, would make us read all
|
|
|
|
|
zeroes from the mmaped region. */
|
|
|
|
|
if (unlink (file) < 0 && errno != ENOENT)
|
|
|
|
|
{
|
|
|
|
|
logprintf (LOG_NOTQUIET, _("Unable to delete `%s': %s\n"),
|
|
|
|
|
file, strerror (errno));
|
|
|
|
|
read_file_free (fm);
|
|
|
|
|
return;
|
2000-02-29 19:17:23 -05:00
|
|
|
|
}
|
1999-12-02 02:42:23 -05:00
|
|
|
|
/* Now open the file for writing. */
|
|
|
|
|
fp = fopen (file, "wb");
|
|
|
|
|
if (!fp)
|
|
|
|
|
{
|
|
|
|
|
logprintf (LOG_NOTQUIET, _("Cannot convert links in %s: %s\n"),
|
|
|
|
|
file, strerror (errno));
|
2000-11-19 15:50:10 -05:00
|
|
|
|
read_file_free (fm);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
return;
|
|
|
|
|
}
|
2000-11-19 15:50:10 -05:00
|
|
|
|
/* Here we loop through all the URLs in file, replacing those of
|
|
|
|
|
them that are downloaded with relative references. */
|
|
|
|
|
p = fm->content;
|
|
|
|
|
for (; l; l = l->next)
|
1999-12-02 02:42:23 -05:00
|
|
|
|
{
|
2000-11-19 15:50:10 -05:00
|
|
|
|
char *url_start = fm->content + l->pos;
|
2001-01-04 08:53:53 -05:00
|
|
|
|
|
2000-11-19 15:50:10 -05:00
|
|
|
|
if (l->pos >= fm->length)
|
1999-12-02 02:42:23 -05:00
|
|
|
|
{
|
|
|
|
|
DEBUGP (("Something strange is going on. Please investigate."));
|
|
|
|
|
break;
|
|
|
|
|
}
|
2000-11-19 15:50:10 -05:00
|
|
|
|
/* If the URL is not to be converted, skip it. */
|
2000-11-20 21:06:36 -05:00
|
|
|
|
if (l->convert == CO_NOCONVERT)
|
1999-12-02 02:42:23 -05:00
|
|
|
|
{
|
2000-11-20 21:06:36 -05:00
|
|
|
|
DEBUGP (("Skipping %s at position %d.\n", l->url, l->pos));
|
1999-12-02 02:42:23 -05:00
|
|
|
|
continue;
|
|
|
|
|
}
|
2000-11-19 15:50:10 -05:00
|
|
|
|
|
|
|
|
|
/* Echo the file contents, up to the offending URL's opening
|
|
|
|
|
quote, to the outfile. */
|
|
|
|
|
fwrite (p, 1, url_start - p, fp);
|
|
|
|
|
p = url_start;
|
2000-11-20 21:06:36 -05:00
|
|
|
|
if (l->convert == CO_CONVERT_TO_RELATIVE)
|
1999-12-02 02:42:23 -05:00
|
|
|
|
{
|
2000-11-19 15:50:10 -05:00
|
|
|
|
/* Convert absolute URL to relative. */
|
1999-12-02 02:42:23 -05:00
|
|
|
|
char *newname = construct_relative (file, l->local_name);
|
2000-11-22 11:58:28 -05:00
|
|
|
|
char *quoted_newname = html_quote_string (newname);
|
2001-01-04 08:53:53 -05:00
|
|
|
|
replace_attr (&p, l->size, fp, quoted_newname);
|
2000-11-20 21:06:36 -05:00
|
|
|
|
DEBUGP (("TO_RELATIVE: %s to %s at position %d in %s.\n",
|
1999-12-02 02:42:23 -05:00
|
|
|
|
l->url, newname, l->pos, file));
|
2001-01-04 08:53:53 -05:00
|
|
|
|
xfree (newname);
|
|
|
|
|
xfree (quoted_newname);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
}
|
2000-11-20 21:06:36 -05:00
|
|
|
|
else if (l->convert == CO_CONVERT_TO_COMPLETE)
|
2000-11-19 15:50:10 -05:00
|
|
|
|
{
|
|
|
|
|
/* Convert the link to absolute URL. */
|
|
|
|
|
char *newlink = l->url;
|
2000-11-22 11:58:28 -05:00
|
|
|
|
char *quoted_newlink = html_quote_string (newlink);
|
2001-01-04 08:53:53 -05:00
|
|
|
|
replace_attr (&p, l->size, fp, quoted_newlink);
|
2000-11-20 21:06:36 -05:00
|
|
|
|
DEBUGP (("TO_COMPLETE: <something> to %s at position %d in %s.\n",
|
2000-11-19 15:50:10 -05:00
|
|
|
|
newlink, l->pos, file));
|
2001-01-04 08:53:53 -05:00
|
|
|
|
xfree (quoted_newlink);
|
2000-11-19 15:50:10 -05:00
|
|
|
|
}
|
1999-12-02 02:42:23 -05:00
|
|
|
|
}
|
2000-10-20 01:55:46 -04:00
|
|
|
|
/* Output the rest of the file. */
|
2000-11-19 15:50:10 -05:00
|
|
|
|
if (p - fm->content < fm->length)
|
|
|
|
|
fwrite (p, 1, fm->length - (p - fm->content), fp);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
fclose (fp);
|
2000-11-19 15:50:10 -05:00
|
|
|
|
read_file_free (fm);
|
1999-12-02 02:42:23 -05:00
|
|
|
|
logputs (LOG_VERBOSE, _("done.\n"));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Construct and return a malloced copy of the relative link from two
|
|
|
|
|
pieces of information: local name S1 of the referring file and
|
|
|
|
|
local name S2 of the referred file.
|
|
|
|
|
|
|
|
|
|
So, if S1 is "jagor.srce.hr/index.html" and S2 is
|
|
|
|
|
"jagor.srce.hr/images/news.gif", the function will return
|
|
|
|
|
"images/news.gif".
|
|
|
|
|
|
|
|
|
|
Alternately, if S1 is "fly.cc.fer.hr/ioccc/index.html", and S2 is
|
|
|
|
|
"fly.cc.fer.hr/images/fly.gif", the function will return
|
|
|
|
|
"../images/fly.gif".
|
|
|
|
|
|
|
|
|
|
Caveats: S1 should not begin with `/', unless S2 also begins with
|
|
|
|
|
'/'. S1 should not contain things like ".." and such --
|
|
|
|
|
construct_relative ("fly/ioccc/../index.html",
|
|
|
|
|
"fly/images/fly.gif") will fail. (A workaround is to call
|
|
|
|
|
something like path_simplify() on S1). */
|
|
|
|
|
static char *
|
|
|
|
|
construct_relative (const char *s1, const char *s2)
|
|
|
|
|
{
|
|
|
|
|
int i, cnt, sepdirs1;
|
|
|
|
|
char *res;
|
|
|
|
|
|
|
|
|
|
if (*s2 == '/')
|
|
|
|
|
return xstrdup (s2);
|
|
|
|
|
/* S1 should *not* be absolute, if S2 wasn't. */
|
|
|
|
|
assert (*s1 != '/');
|
|
|
|
|
i = cnt = 0;
|
|
|
|
|
/* Skip the directories common to both strings. */
|
|
|
|
|
while (1)
|
|
|
|
|
{
|
|
|
|
|
while (s1[i] && s2[i]
|
|
|
|
|
&& (s1[i] == s2[i])
|
|
|
|
|
&& (s1[i] != '/')
|
|
|
|
|
&& (s2[i] != '/'))
|
|
|
|
|
++i;
|
|
|
|
|
if (s1[i] == '/' && s2[i] == '/')
|
|
|
|
|
cnt = ++i;
|
|
|
|
|
else
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
for (sepdirs1 = 0; s1[i]; i++)
|
|
|
|
|
if (s1[i] == '/')
|
|
|
|
|
++sepdirs1;
|
|
|
|
|
/* Now, construct the file as of:
|
|
|
|
|
- ../ repeated sepdirs1 time
|
|
|
|
|
- all the non-mutual directories of S2. */
|
|
|
|
|
res = (char *)xmalloc (3 * sepdirs1 + strlen (s2 + cnt) + 1);
|
|
|
|
|
for (i = 0; i < sepdirs1; i++)
|
|
|
|
|
memcpy (res + 3 * i, "../", 3);
|
|
|
|
|
strcpy (res + 3 * i, s2 + cnt);
|
|
|
|
|
return res;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Add URL to the head of the list L. */
|
|
|
|
|
urlpos *
|
|
|
|
|
add_url (urlpos *l, const char *url, const char *file)
|
|
|
|
|
{
|
|
|
|
|
urlpos *t;
|
|
|
|
|
|
|
|
|
|
t = (urlpos *)xmalloc (sizeof (urlpos));
|
|
|
|
|
memset (t, 0, sizeof (*t));
|
|
|
|
|
t->url = xstrdup (url);
|
|
|
|
|
t->local_name = xstrdup (file);
|
|
|
|
|
t->next = l;
|
|
|
|
|
return t;
|
|
|
|
|
}
|
2000-03-02 01:33:48 -05:00
|
|
|
|
|
2000-11-19 15:50:10 -05:00
|
|
|
|
static void
|
|
|
|
|
write_backup_file (const char *file, downloaded_file_t downloaded_file_return)
|
|
|
|
|
{
|
|
|
|
|
/* Rather than just writing over the original .html file with the
|
|
|
|
|
converted version, save the former to *.orig. Note we only do
|
|
|
|
|
this for files we've _successfully_ downloaded, so we don't
|
|
|
|
|
clobber .orig files sitting around from previous invocations. */
|
|
|
|
|
|
|
|
|
|
/* Construct the backup filename as the original name plus ".orig". */
|
|
|
|
|
size_t filename_len = strlen(file);
|
|
|
|
|
char* filename_plus_orig_suffix;
|
|
|
|
|
boolean already_wrote_backup_file = FALSE;
|
|
|
|
|
slist* converted_file_ptr;
|
|
|
|
|
static slist* converted_files = NULL;
|
|
|
|
|
|
|
|
|
|
if (downloaded_file_return == FILE_DOWNLOADED_AND_HTML_EXTENSION_ADDED)
|
|
|
|
|
{
|
|
|
|
|
/* Just write "orig" over "html". We need to do it this way
|
|
|
|
|
because when we're checking to see if we've downloaded the
|
|
|
|
|
file before (to see if we can skip downloading it), we don't
|
|
|
|
|
know if it's a text/html file. Therefore we don't know yet
|
|
|
|
|
at that stage that -E is going to cause us to tack on
|
|
|
|
|
".html", so we need to compare vs. the original URL plus
|
|
|
|
|
".orig", not the original URL plus ".html.orig". */
|
|
|
|
|
filename_plus_orig_suffix = alloca (filename_len + 1);
|
|
|
|
|
strcpy(filename_plus_orig_suffix, file);
|
|
|
|
|
strcpy((filename_plus_orig_suffix + filename_len) - 4, "orig");
|
|
|
|
|
}
|
|
|
|
|
else /* downloaded_file_return == FILE_DOWNLOADED_NORMALLY */
|
|
|
|
|
{
|
|
|
|
|
/* Append ".orig" to the name. */
|
|
|
|
|
filename_plus_orig_suffix = alloca (filename_len + sizeof(".orig"));
|
|
|
|
|
strcpy(filename_plus_orig_suffix, file);
|
|
|
|
|
strcpy(filename_plus_orig_suffix + filename_len, ".orig");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* We can get called twice on the same URL thanks to the
|
|
|
|
|
convert_all_links() call in main(). If we write the .orig file
|
|
|
|
|
each time in such a case, it'll end up containing the first-pass
|
|
|
|
|
conversion, not the original file. So, see if we've already been
|
|
|
|
|
called on this file. */
|
|
|
|
|
converted_file_ptr = converted_files;
|
|
|
|
|
while (converted_file_ptr != NULL)
|
|
|
|
|
if (strcmp(converted_file_ptr->string, file) == 0)
|
|
|
|
|
{
|
|
|
|
|
already_wrote_backup_file = TRUE;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
converted_file_ptr = converted_file_ptr->next;
|
|
|
|
|
|
|
|
|
|
if (!already_wrote_backup_file)
|
|
|
|
|
{
|
|
|
|
|
/* Rename <file> to <file>.orig before former gets written over. */
|
|
|
|
|
if (rename(file, filename_plus_orig_suffix) != 0)
|
|
|
|
|
logprintf (LOG_NOTQUIET, _("Cannot back up %s as %s: %s\n"),
|
|
|
|
|
file, filename_plus_orig_suffix, strerror (errno));
|
|
|
|
|
|
|
|
|
|
/* Remember that we've already written a .orig backup for this file.
|
|
|
|
|
Note that we never free this memory since we need it till the
|
|
|
|
|
convert_all_links() call, which is one of the last things the
|
|
|
|
|
program does before terminating. BTW, I'm not sure if it would be
|
|
|
|
|
safe to just set 'converted_file_ptr->string' to 'file' below,
|
|
|
|
|
rather than making a copy of the string... Another note is that I
|
|
|
|
|
thought I could just add a field to the urlpos structure saying
|
|
|
|
|
that we'd written a .orig file for this URL, but that didn't work,
|
2000-11-22 11:58:28 -05:00
|
|
|
|
so I had to make this separate list.
|
2001-01-09 21:10:16 -05:00
|
|
|
|
-- Dan Harkless <wget@harkless.org>
|
2000-11-22 11:58:28 -05:00
|
|
|
|
|
|
|
|
|
This [adding a field to the urlpos structure] didn't work
|
|
|
|
|
because convert_file() is called twice: once after all its
|
|
|
|
|
sublinks have been retrieved in recursive_retrieve(), and
|
|
|
|
|
once at the end of the day in convert_all_links(). The
|
|
|
|
|
original linked list collected in recursive_retrieve() is
|
|
|
|
|
lost after the first invocation of convert_links(), and
|
|
|
|
|
convert_all_links() makes a new one (it calls get_urls_html()
|
2001-01-09 21:10:16 -05:00
|
|
|
|
for each file it covers.) That's why your first approach didn't
|
2000-11-22 11:58:28 -05:00
|
|
|
|
work. The way to make it work is perhaps to make this flag a
|
2001-01-09 21:10:16 -05:00
|
|
|
|
field in the `urls_html' list.
|
|
|
|
|
-- Hrvoje Niksic <hniksic@arsdigita.com>
|
|
|
|
|
*/
|
2000-11-19 15:50:10 -05:00
|
|
|
|
converted_file_ptr = xmalloc(sizeof(*converted_file_ptr));
|
|
|
|
|
converted_file_ptr->string = xstrdup(file); /* die on out-of-mem. */
|
|
|
|
|
converted_file_ptr->next = converted_files;
|
|
|
|
|
converted_files = converted_file_ptr;
|
|
|
|
|
}
|
|
|
|
|
}
|
2000-03-02 01:33:48 -05:00
|
|
|
|
|
2001-01-04 08:53:53 -05:00
|
|
|
|
static int find_fragment PARAMS ((const char *, int, const char **,
|
|
|
|
|
const char **));
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
replace_attr (const char **pp, int raw_size, FILE *fp, const char *new_str)
|
|
|
|
|
{
|
|
|
|
|
const char *p = *pp;
|
|
|
|
|
int quote_flag = 0;
|
|
|
|
|
int size = raw_size;
|
|
|
|
|
char quote_char = '\"';
|
|
|
|
|
const char *frag_beg, *frag_end;
|
|
|
|
|
|
|
|
|
|
/* Structure of our string is:
|
|
|
|
|
"...old-contents..."
|
|
|
|
|
<--- l->size ---> (with quotes)
|
|
|
|
|
OR:
|
|
|
|
|
...old-contents...
|
|
|
|
|
<--- l->size --> (no quotes) */
|
|
|
|
|
|
|
|
|
|
if (*p == '\"' || *p == '\'')
|
|
|
|
|
{
|
|
|
|
|
quote_char = *p;
|
|
|
|
|
quote_flag = 1;
|
|
|
|
|
++p;
|
|
|
|
|
size -= 2; /* disregard opening and closing quote */
|
|
|
|
|
}
|
|
|
|
|
putc (quote_char, fp);
|
|
|
|
|
fputs (new_str, fp);
|
|
|
|
|
|
|
|
|
|
/* Look for fragment identifier, if any. */
|
|
|
|
|
if (find_fragment (p, size, &frag_beg, &frag_end))
|
|
|
|
|
fwrite (frag_beg, 1, frag_end - frag_beg, fp);
|
|
|
|
|
p += size;
|
|
|
|
|
if (quote_flag)
|
|
|
|
|
++p;
|
|
|
|
|
putc (quote_char, fp);
|
|
|
|
|
*pp = p;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Find the first occurrence of '#' in [BEG, BEG+SIZE) that is not
|
|
|
|
|
preceded by '&'. If the character is not found, return zero. If
|
|
|
|
|
the character is found, return 1 and set BP and EP to point to the
|
|
|
|
|
beginning and end of the region.
|
|
|
|
|
|
|
|
|
|
This is used for finding the fragment indentifiers in URLs. */
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
find_fragment (const char *beg, int size, const char **bp, const char **ep)
|
|
|
|
|
{
|
|
|
|
|
const char *end = beg + size;
|
|
|
|
|
int saw_amp = 0;
|
|
|
|
|
for (; beg < end; beg++)
|
|
|
|
|
{
|
|
|
|
|
switch (*beg)
|
|
|
|
|
{
|
|
|
|
|
case '&':
|
|
|
|
|
saw_amp = 1;
|
|
|
|
|
break;
|
|
|
|
|
case '#':
|
|
|
|
|
if (!saw_amp)
|
|
|
|
|
{
|
|
|
|
|
*bp = beg;
|
|
|
|
|
*ep = end;
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
/* fallthrough */
|
|
|
|
|
default:
|
|
|
|
|
saw_amp = 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2000-11-22 17:15:45 -05:00
|
|
|
|
typedef struct _downloaded_file_list {
|
|
|
|
|
char* file;
|
|
|
|
|
downloaded_file_t download_type;
|
|
|
|
|
struct _downloaded_file_list* next;
|
|
|
|
|
} downloaded_file_list;
|
|
|
|
|
|
|
|
|
|
static downloaded_file_list *downloaded_files;
|
|
|
|
|
|
2000-10-20 01:55:46 -04:00
|
|
|
|
/* Remembers which files have been downloaded. In the standard case, should be
|
|
|
|
|
called with mode == FILE_DOWNLOADED_NORMALLY for each file we actually
|
|
|
|
|
download successfully (i.e. not for ones we have failures on or that we skip
|
|
|
|
|
due to -N).
|
|
|
|
|
|
|
|
|
|
When we've downloaded a file and tacked on a ".html" extension due to -E,
|
|
|
|
|
call this function with FILE_DOWNLOADED_AND_HTML_EXTENSION_ADDED rather than
|
|
|
|
|
FILE_DOWNLOADED_NORMALLY.
|
|
|
|
|
|
|
|
|
|
If you just want to check if a file has been previously added without adding
|
|
|
|
|
it, call with mode == CHECK_FOR_FILE. Please be sure to call this function
|
|
|
|
|
with local filenames, not remote URLs. */
|
|
|
|
|
downloaded_file_t
|
|
|
|
|
downloaded_file (downloaded_file_t mode, const char* file)
|
2000-03-02 01:33:48 -05:00
|
|
|
|
{
|
2000-10-20 01:55:46 -04:00
|
|
|
|
boolean found_file = FALSE;
|
|
|
|
|
downloaded_file_list* rover = downloaded_files;
|
2000-03-02 01:33:48 -05:00
|
|
|
|
|
|
|
|
|
while (rover != NULL)
|
2000-10-20 01:55:46 -04:00
|
|
|
|
if (strcmp(rover->file, file) == 0)
|
2000-03-02 01:33:48 -05:00
|
|
|
|
{
|
|
|
|
|
found_file = TRUE;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
rover = rover->next;
|
|
|
|
|
|
|
|
|
|
if (found_file)
|
2000-10-20 01:55:46 -04:00
|
|
|
|
return rover->download_type; /* file had already been downloaded */
|
2000-03-02 01:33:48 -05:00
|
|
|
|
else
|
|
|
|
|
{
|
2000-10-20 01:55:46 -04:00
|
|
|
|
if (mode != CHECK_FOR_FILE)
|
2000-03-02 01:33:48 -05:00
|
|
|
|
{
|
2000-10-20 01:55:46 -04:00
|
|
|
|
rover = xmalloc(sizeof(*rover));
|
|
|
|
|
rover->file = xstrdup(file); /* use xstrdup() so die on out-of-mem. */
|
|
|
|
|
rover->download_type = mode;
|
2000-03-02 01:33:48 -05:00
|
|
|
|
rover->next = downloaded_files;
|
|
|
|
|
downloaded_files = rover;
|
|
|
|
|
}
|
|
|
|
|
|
2000-10-20 01:55:46 -04:00
|
|
|
|
return FILE_NOT_ALREADY_DOWNLOADED;
|
2000-03-02 01:33:48 -05:00
|
|
|
|
}
|
|
|
|
|
}
|
2000-11-22 17:15:45 -05:00
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
downloaded_files_free (void)
|
|
|
|
|
{
|
|
|
|
|
downloaded_file_list* rover = downloaded_files;
|
|
|
|
|
while (rover)
|
|
|
|
|
{
|
|
|
|
|
downloaded_file_list *next = rover->next;
|
|
|
|
|
xfree (rover->file);
|
|
|
|
|
xfree (rover);
|
|
|
|
|
rover = next;
|
|
|
|
|
}
|
|
|
|
|
}
|