2003-09-21 18:47:14 -04:00
|
|
|
/* Conversion of links to local files.
|
2015-03-09 11:32:01 -04:00
|
|
|
Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
|
|
|
|
2014, 2015 Free Software Foundation, Inc.
|
2003-09-21 18:47:14 -04:00
|
|
|
|
|
|
|
This file is part of GNU Wget.
|
|
|
|
|
|
|
|
GNU Wget is free software; you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
2007-07-10 01:53:22 -04:00
|
|
|
the Free Software Foundation; either version 3 of the License, or
|
2003-09-21 18:47:14 -04:00
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
GNU Wget is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
2007-07-10 01:53:22 -04:00
|
|
|
along with Wget. If not, see <http://www.gnu.org/licenses/>.
|
2003-09-21 18:47:14 -04:00
|
|
|
|
2007-11-28 03:05:33 -05:00
|
|
|
Additional permission under GNU GPL version 3 section 7
|
|
|
|
|
|
|
|
If you modify this program, or any covered work, by linking or
|
|
|
|
combining it with the OpenSSL project's OpenSSL library (or a
|
|
|
|
modified version of that library), containing parts covered by the
|
|
|
|
terms of the OpenSSL or SSLeay licenses, the Free Software Foundation
|
|
|
|
grants you additional permission to convey the resulting work.
|
|
|
|
Corresponding Source for a non-source form of such a combination
|
|
|
|
shall include the source code for the parts of OpenSSL used as well
|
|
|
|
as that of the covered work. */
|
2003-09-21 18:47:14 -04:00
|
|
|
|
2007-10-18 23:50:40 -04:00
|
|
|
#include "wget.h"
|
2003-09-21 18:47:14 -04:00
|
|
|
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
2005-06-19 18:34:58 -04:00
|
|
|
#include <string.h>
|
2010-12-01 07:15:13 -05:00
|
|
|
#include <unistd.h>
|
2003-09-21 18:47:14 -04:00
|
|
|
#include <errno.h>
|
|
|
|
#include <assert.h>
|
|
|
|
#include "convert.h"
|
|
|
|
#include "url.h"
|
|
|
|
#include "recur.h"
|
|
|
|
#include "utils.h"
|
|
|
|
#include "hash.h"
|
2005-04-07 17:37:22 -04:00
|
|
|
#include "ptimer.h"
|
2006-06-28 07:09:30 -04:00
|
|
|
#include "res.h"
|
2008-04-22 03:15:48 -04:00
|
|
|
#include "html-url.h"
|
|
|
|
#include "css-url.h"
|
2010-08-19 21:11:07 -04:00
|
|
|
#include "iri.h"
|
2015-09-22 15:10:38 -04:00
|
|
|
#include "xstrndup.h"
|
2003-09-21 18:47:14 -04:00
|
|
|
|
|
|
|
static struct hash_table *dl_file_url_map;
|
|
|
|
struct hash_table *dl_url_file_map;
|
|
|
|
|
2008-04-22 03:15:48 -04:00
|
|
|
/* Set of HTML/CSS files downloaded in this Wget run, used for link
|
2005-03-30 14:27:34 -05:00
|
|
|
conversion after Wget is done. */
|
2003-09-21 18:47:14 -04:00
|
|
|
struct hash_table *downloaded_html_set;
|
2008-04-22 03:15:48 -04:00
|
|
|
struct hash_table *downloaded_css_set;
|
2003-09-21 18:47:14 -04:00
|
|
|
|
2005-06-19 18:34:58 -04:00
|
|
|
static void convert_links (const char *, struct urlpos *);
|
2003-09-21 18:47:14 -04:00
|
|
|
|
|
|
|
|
2012-05-05 09:24:35 -04:00
|
|
|
static void
|
2008-04-22 03:15:48 -04:00
|
|
|
convert_links_in_hashtable (struct hash_table *downloaded_set,
|
|
|
|
int is_css,
|
|
|
|
int *file_count)
|
2003-09-21 18:47:14 -04:00
|
|
|
{
|
2005-03-30 14:27:34 -05:00
|
|
|
int i;
|
2003-09-21 18:47:14 -04:00
|
|
|
|
2005-03-30 14:27:34 -05:00
|
|
|
int cnt;
|
|
|
|
char **file_array;
|
|
|
|
|
|
|
|
cnt = 0;
|
2008-04-22 03:15:48 -04:00
|
|
|
if (downloaded_set)
|
|
|
|
cnt = hash_table_count (downloaded_set);
|
2005-03-30 14:27:34 -05:00
|
|
|
if (cnt == 0)
|
2008-04-22 04:47:39 -04:00
|
|
|
return;
|
2005-03-30 14:27:34 -05:00
|
|
|
file_array = alloca_array (char *, cnt);
|
2008-04-22 03:15:48 -04:00
|
|
|
string_set_to_array (downloaded_set, file_array);
|
2003-09-21 18:47:14 -04:00
|
|
|
|
2005-03-30 14:27:34 -05:00
|
|
|
for (i = 0; i < cnt; i++)
|
2003-09-21 18:47:14 -04:00
|
|
|
{
|
|
|
|
struct urlpos *urls, *cur_url;
|
|
|
|
char *url;
|
2005-03-30 14:27:34 -05:00
|
|
|
char *file = file_array[i];
|
2003-09-21 18:47:14 -04:00
|
|
|
|
2008-04-22 03:15:48 -04:00
|
|
|
/* Determine the URL of the file. get_urls_{html,css} will need
|
2006-06-28 07:09:30 -04:00
|
|
|
it. */
|
2003-09-21 18:47:14 -04:00
|
|
|
url = hash_table_get (dl_file_url_map, file);
|
|
|
|
if (!url)
|
2006-06-28 07:09:30 -04:00
|
|
|
{
|
|
|
|
DEBUGP (("Apparently %s has been removed.\n", file));
|
|
|
|
continue;
|
|
|
|
}
|
2003-09-21 18:47:14 -04:00
|
|
|
|
|
|
|
DEBUGP (("Scanning %s (from %s)\n", file, url));
|
|
|
|
|
2008-04-22 03:15:48 -04:00
|
|
|
/* Parse the file... */
|
|
|
|
urls = is_css ? get_urls_css_file (file, url) :
|
2008-07-23 18:56:29 -04:00
|
|
|
get_urls_html (file, url, NULL, NULL);
|
2003-09-21 18:47:14 -04:00
|
|
|
|
|
|
|
/* We don't respect meta_disallow_follow here because, even if
|
|
|
|
the file is not followed, we might still want to convert the
|
|
|
|
links that have been followed from other files. */
|
|
|
|
|
|
|
|
for (cur_url = urls; cur_url; cur_url = cur_url->next)
|
2006-06-28 07:09:30 -04:00
|
|
|
{
|
|
|
|
char *local_name;
|
2010-08-19 21:11:07 -04:00
|
|
|
struct url *u;
|
|
|
|
struct iri *pi;
|
2006-06-28 07:09:30 -04:00
|
|
|
|
|
|
|
if (cur_url->link_base_p)
|
|
|
|
{
|
|
|
|
/* Base references have been resolved by our parser, so
|
|
|
|
we turn the base URL into an empty string. (Perhaps
|
|
|
|
we should remove the tag entirely?) */
|
|
|
|
cur_url->convert = CO_NULLIFY_BASE;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We decide the direction of conversion according to whether
|
|
|
|
a URL was downloaded. Downloaded URLs will be converted
|
|
|
|
ABS2REL, whereas non-downloaded will be converted REL2ABS. */
|
2010-08-19 21:11:07 -04:00
|
|
|
|
|
|
|
pi = iri_new ();
|
|
|
|
set_uri_encoding (pi, opt.locale, true);
|
|
|
|
|
|
|
|
u = url_parse (cur_url->url->url, NULL, pi, true);
|
2012-05-31 16:57:41 -04:00
|
|
|
if (!u)
|
2014-05-26 05:51:58 -04:00
|
|
|
continue;
|
2012-05-31 16:57:41 -04:00
|
|
|
|
2006-06-28 07:09:30 -04:00
|
|
|
local_name = hash_table_get (dl_url_file_map, u->url);
|
|
|
|
|
|
|
|
/* Decide on the conversion type. */
|
|
|
|
if (local_name)
|
|
|
|
{
|
|
|
|
/* We've downloaded this URL. Convert it to relative
|
2003-09-21 18:47:14 -04:00
|
|
|
form. We do this even if the URL already is in
|
|
|
|
relative form, because our directory structure may
|
|
|
|
not be identical to that on the server (think `-nd',
|
2015-09-22 15:10:38 -04:00
|
|
|
`--cut-dirs', etc.). If --convert-file-only was passed,
|
|
|
|
we only convert the basename portion of the URL. */
|
|
|
|
cur_url->convert = (opt.convert_file_only ? CO_CONVERT_BASENAME_ONLY : CO_CONVERT_TO_RELATIVE);
|
2006-06-28 07:09:30 -04:00
|
|
|
cur_url->local_name = xstrdup (local_name);
|
|
|
|
DEBUGP (("will convert url %s to local %s\n", u->url, local_name));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* We haven't downloaded this URL. If it's not already
|
2003-09-21 18:47:14 -04:00
|
|
|
complete (including a full host name), convert it to
|
|
|
|
that form, so it can be reached while browsing this
|
|
|
|
HTML locally. */
|
2006-06-28 07:09:30 -04:00
|
|
|
if (!cur_url->link_complete_p)
|
|
|
|
cur_url->convert = CO_CONVERT_TO_COMPLETE;
|
|
|
|
cur_url->local_name = NULL;
|
|
|
|
DEBUGP (("will convert url %s to complete\n", u->url));
|
|
|
|
}
|
2010-08-19 21:11:07 -04:00
|
|
|
|
|
|
|
url_free (u);
|
|
|
|
iri_free (pi);
|
2006-06-28 07:09:30 -04:00
|
|
|
}
|
2003-09-21 18:47:14 -04:00
|
|
|
|
|
|
|
/* Convert the links in the file. */
|
|
|
|
convert_links (file, urls);
|
2008-04-22 03:15:48 -04:00
|
|
|
++*file_count;
|
2003-09-21 18:47:14 -04:00
|
|
|
|
|
|
|
/* Free the data. */
|
|
|
|
free_urlpos (urls);
|
|
|
|
}
|
2008-04-22 03:15:48 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* This function is called when the retrieval is done to convert the
|
|
|
|
links that have been downloaded. It has to be called at the end of
|
|
|
|
the retrieval, because only then does Wget know conclusively which
|
|
|
|
URLs have been downloaded, and which not, so it can tell which
|
|
|
|
direction to convert to.
|
|
|
|
|
|
|
|
The "direction" means that the URLs to the files that have been
|
|
|
|
downloaded get converted to the relative URL which will point to
|
|
|
|
that file. And the other URLs get converted to the remote URL on
|
|
|
|
the server.
|
|
|
|
|
|
|
|
All the downloaded HTMLs are kept in downloaded_html_files, and
|
|
|
|
downloaded URLs in urls_downloaded. All the information is
|
|
|
|
extracted from these two lists. */
|
|
|
|
|
|
|
|
void
|
|
|
|
convert_all_links (void)
|
|
|
|
{
|
|
|
|
double secs;
|
|
|
|
int file_count = 0;
|
|
|
|
|
|
|
|
struct ptimer *timer = ptimer_new ();
|
|
|
|
|
|
|
|
convert_links_in_hashtable (downloaded_html_set, 0, &file_count);
|
|
|
|
convert_links_in_hashtable (downloaded_css_set, 1, &file_count);
|
2003-09-21 18:47:14 -04:00
|
|
|
|
2005-07-06 12:35:42 -04:00
|
|
|
secs = ptimer_measure (timer);
|
2015-08-21 12:29:05 -04:00
|
|
|
logprintf (LOG_VERBOSE, _("Converted links in %d files in %s seconds.\n"),
|
2006-06-28 07:09:30 -04:00
|
|
|
file_count, print_decimal (secs));
|
2008-04-22 04:47:39 -04:00
|
|
|
|
2007-12-06 02:05:53 -05:00
|
|
|
ptimer_destroy (timer);
|
2003-09-21 18:47:14 -04:00
|
|
|
}
|
|
|
|
|
2005-06-19 18:34:58 -04:00
|
|
|
static void write_backup_file (const char *, downloaded_file_t);
|
2008-04-22 03:15:48 -04:00
|
|
|
static const char *replace_plain (const char*, int, FILE*, const char *);
|
2005-06-19 18:34:58 -04:00
|
|
|
static const char *replace_attr (const char *, int, FILE *, const char *);
|
|
|
|
static const char *replace_attr_refresh_hack (const char *, int, FILE *,
|
2006-06-28 07:09:30 -04:00
|
|
|
const char *, int);
|
2010-09-14 06:14:34 -04:00
|
|
|
static char *local_quote_string (const char *, bool);
|
2005-06-19 18:34:58 -04:00
|
|
|
static char *construct_relative (const char *, const char *);
|
2015-09-22 15:10:38 -04:00
|
|
|
static char *convert_basename (const char *, const struct urlpos *);
|
2003-09-21 18:47:14 -04:00
|
|
|
|
2008-04-22 03:15:48 -04:00
|
|
|
/* Change the links in one file. LINKS is a list of links in the
|
2003-09-21 18:47:14 -04:00
|
|
|
document, along with their positions and the desired direction of
|
|
|
|
the conversion. */
|
|
|
|
static void
|
|
|
|
convert_links (const char *file, struct urlpos *links)
|
|
|
|
{
|
|
|
|
struct file_memory *fm;
|
|
|
|
FILE *fp;
|
|
|
|
const char *p;
|
|
|
|
downloaded_file_t downloaded_file_return;
|
|
|
|
|
|
|
|
struct urlpos *link;
|
|
|
|
int to_url_count = 0, to_file_count = 0;
|
|
|
|
|
2015-08-21 12:29:05 -04:00
|
|
|
logprintf (LOG_VERBOSE, _("Converting links in %s... "), file);
|
2003-09-21 18:47:14 -04:00
|
|
|
|
|
|
|
{
|
|
|
|
/* First we do a "dry run": go through the list L and see whether
|
|
|
|
any URL needs to be converted in the first place. If not, just
|
|
|
|
leave the file alone. */
|
|
|
|
int dry_count = 0;
|
2005-02-28 13:48:23 -05:00
|
|
|
struct urlpos *dry;
|
2003-09-21 18:47:14 -04:00
|
|
|
for (dry = links; dry; dry = dry->next)
|
|
|
|
if (dry->convert != CO_NOCONVERT)
|
2006-06-28 07:09:30 -04:00
|
|
|
++dry_count;
|
2003-09-21 18:47:14 -04:00
|
|
|
if (!dry_count)
|
|
|
|
{
|
2006-06-28 07:09:30 -04:00
|
|
|
logputs (LOG_VERBOSE, _("nothing to do.\n"));
|
|
|
|
return;
|
2003-09-21 18:47:14 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-07-09 06:24:51 -04:00
|
|
|
fm = wget_read_file (file);
|
2003-09-21 18:47:14 -04:00
|
|
|
if (!fm)
|
|
|
|
{
|
|
|
|
logprintf (LOG_NOTQUIET, _("Cannot convert links in %s: %s\n"),
|
2006-06-28 07:09:30 -04:00
|
|
|
file, strerror (errno));
|
2003-09-21 18:47:14 -04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
downloaded_file_return = downloaded_file (CHECK_FOR_FILE, file);
|
|
|
|
if (opt.backup_converted && downloaded_file_return)
|
|
|
|
write_backup_file (file, downloaded_file_return);
|
|
|
|
|
|
|
|
/* Before opening the file for writing, unlink the file. This is
|
|
|
|
important if the data in FM is mmaped. In such case, nulling the
|
|
|
|
file, which is what fopen() below does, would make us read all
|
|
|
|
zeroes from the mmaped region. */
|
|
|
|
if (unlink (file) < 0 && errno != ENOENT)
|
|
|
|
{
|
2008-04-16 05:50:36 -04:00
|
|
|
logprintf (LOG_NOTQUIET, _("Unable to delete %s: %s\n"),
|
|
|
|
quote (file), strerror (errno));
|
2010-07-09 06:24:51 -04:00
|
|
|
wget_read_file_free (fm);
|
2003-09-21 18:47:14 -04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* Now open the file for writing. */
|
|
|
|
fp = fopen (file, "wb");
|
|
|
|
if (!fp)
|
|
|
|
{
|
|
|
|
logprintf (LOG_NOTQUIET, _("Cannot convert links in %s: %s\n"),
|
2006-06-28 07:09:30 -04:00
|
|
|
file, strerror (errno));
|
2010-07-09 06:24:51 -04:00
|
|
|
wget_read_file_free (fm);
|
2003-09-21 18:47:14 -04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Here we loop through all the URLs in file, replacing those of
|
|
|
|
them that are downloaded with relative references. */
|
|
|
|
p = fm->content;
|
|
|
|
for (link = links; link; link = link->next)
|
|
|
|
{
|
|
|
|
char *url_start = fm->content + link->pos;
|
|
|
|
|
|
|
|
if (link->pos >= fm->length)
|
2006-06-28 07:09:30 -04:00
|
|
|
{
|
|
|
|
DEBUGP (("Something strange is going on. Please investigate."));
|
|
|
|
break;
|
|
|
|
}
|
2003-09-21 18:47:14 -04:00
|
|
|
/* If the URL is not to be converted, skip it. */
|
|
|
|
if (link->convert == CO_NOCONVERT)
|
2006-06-28 07:09:30 -04:00
|
|
|
{
|
|
|
|
DEBUGP (("Skipping %s at position %d.\n", link->url->url, link->pos));
|
|
|
|
continue;
|
|
|
|
}
|
2003-09-21 18:47:14 -04:00
|
|
|
|
|
|
|
/* Echo the file contents, up to the offending URL's opening
|
|
|
|
quote, to the outfile. */
|
|
|
|
fwrite (p, 1, url_start - p, fp);
|
|
|
|
p = url_start;
|
|
|
|
|
|
|
|
switch (link->convert)
|
2006-06-28 07:09:30 -04:00
|
|
|
{
|
|
|
|
case CO_CONVERT_TO_RELATIVE:
|
|
|
|
/* Convert absolute URL to relative. */
|
|
|
|
{
|
|
|
|
char *newname = construct_relative (file, link->local_name);
|
2010-09-14 06:14:34 -04:00
|
|
|
char *quoted_newname = local_quote_string (newname,
|
|
|
|
link->link_css_p);
|
2006-06-28 07:09:30 -04:00
|
|
|
|
2008-04-22 03:15:48 -04:00
|
|
|
if (link->link_css_p)
|
|
|
|
p = replace_plain (p, link->size, fp, quoted_newname);
|
|
|
|
else if (!link->link_refresh_p)
|
2006-06-28 07:09:30 -04:00
|
|
|
p = replace_attr (p, link->size, fp, quoted_newname);
|
|
|
|
else
|
|
|
|
p = replace_attr_refresh_hack (p, link->size, fp, quoted_newname,
|
|
|
|
link->refresh_timeout);
|
|
|
|
|
|
|
|
DEBUGP (("TO_RELATIVE: %s to %s at position %d in %s.\n",
|
|
|
|
link->url->url, newname, link->pos, file));
|
2015-09-22 15:10:38 -04:00
|
|
|
|
|
|
|
xfree (newname);
|
|
|
|
xfree (quoted_newname);
|
|
|
|
++to_file_count;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case CO_CONVERT_BASENAME_ONLY:
|
|
|
|
{
|
|
|
|
char *newname = convert_basename (p, link);
|
|
|
|
char *quoted_newname = local_quote_string (newname, link->link_css_p);
|
|
|
|
|
|
|
|
if (link->link_css_p)
|
|
|
|
p = replace_plain (p, link->size, fp, quoted_newname);
|
|
|
|
else if (!link->link_refresh_p)
|
|
|
|
p = replace_attr (p, link->size, fp, quoted_newname);
|
|
|
|
else
|
|
|
|
p = replace_attr_refresh_hack (p, link->size, fp, quoted_newname,
|
|
|
|
link->refresh_timeout);
|
|
|
|
|
|
|
|
DEBUGP (("Converted file part only: %s to %s at position %d in %s.\n",
|
|
|
|
link->url->url, newname, link->pos, file));
|
|
|
|
|
2006-06-28 07:09:30 -04:00
|
|
|
xfree (newname);
|
|
|
|
xfree (quoted_newname);
|
|
|
|
++to_file_count;
|
2015-09-22 15:10:38 -04:00
|
|
|
|
2006-06-28 07:09:30 -04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case CO_CONVERT_TO_COMPLETE:
|
|
|
|
/* Convert the link to absolute URL. */
|
|
|
|
{
|
|
|
|
char *newlink = link->url->url;
|
|
|
|
char *quoted_newlink = html_quote_string (newlink);
|
|
|
|
|
2008-04-22 03:15:48 -04:00
|
|
|
if (link->link_css_p)
|
2010-09-14 06:14:34 -04:00
|
|
|
p = replace_plain (p, link->size, fp, newlink);
|
2008-04-22 03:15:48 -04:00
|
|
|
else if (!link->link_refresh_p)
|
2006-06-28 07:09:30 -04:00
|
|
|
p = replace_attr (p, link->size, fp, quoted_newlink);
|
|
|
|
else
|
|
|
|
p = replace_attr_refresh_hack (p, link->size, fp, quoted_newlink,
|
|
|
|
link->refresh_timeout);
|
|
|
|
|
|
|
|
DEBUGP (("TO_COMPLETE: <something> to %s at position %d in %s.\n",
|
|
|
|
newlink, link->pos, file));
|
2015-09-22 15:10:38 -04:00
|
|
|
|
2006-06-28 07:09:30 -04:00
|
|
|
xfree (quoted_newlink);
|
|
|
|
++to_url_count;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case CO_NULLIFY_BASE:
|
|
|
|
/* Change the base href to "". */
|
|
|
|
p = replace_attr (p, link->size, fp, "");
|
|
|
|
break;
|
|
|
|
case CO_NOCONVERT:
|
|
|
|
abort ();
|
|
|
|
break;
|
|
|
|
}
|
2003-09-21 18:47:14 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Output the rest of the file. */
|
|
|
|
if (p - fm->content < fm->length)
|
|
|
|
fwrite (p, 1, fm->length - (p - fm->content), fp);
|
|
|
|
fclose (fp);
|
2010-07-09 06:24:51 -04:00
|
|
|
wget_read_file_free (fm);
|
2003-09-21 18:47:14 -04:00
|
|
|
|
|
|
|
logprintf (LOG_VERBOSE, "%d-%d\n", to_file_count, to_url_count);
|
|
|
|
}
|
|
|
|
|
2003-10-25 07:58:24 -04:00
|
|
|
/* Construct and return a link that points from BASEFILE to LINKFILE.
|
|
|
|
Both files should be local file names, BASEFILE of the referrering
|
|
|
|
file, and LINKFILE of the referred file.
|
2003-09-21 18:47:14 -04:00
|
|
|
|
2003-10-25 07:58:24 -04:00
|
|
|
Examples:
|
2003-09-21 18:47:14 -04:00
|
|
|
|
2003-10-25 07:58:24 -04:00
|
|
|
cr("foo", "bar") -> "bar"
|
|
|
|
cr("A/foo", "A/bar") -> "bar"
|
|
|
|
cr("A/foo", "A/B/bar") -> "B/bar"
|
|
|
|
cr("A/X/foo", "A/Y/bar") -> "../Y/bar"
|
|
|
|
cr("X/", "Y/bar") -> "../Y/bar" (trailing slash does matter in BASE)
|
|
|
|
|
|
|
|
Both files should be absolute or relative, otherwise strange
|
|
|
|
results might ensue. The function makes no special efforts to
|
|
|
|
handle "." and ".." in links, so make sure they're not there
|
|
|
|
(e.g. using path_simplify). */
|
2003-10-16 11:48:24 -04:00
|
|
|
|
2003-09-21 18:47:14 -04:00
|
|
|
static char *
|
2003-10-25 07:58:24 -04:00
|
|
|
construct_relative (const char *basefile, const char *linkfile)
|
2003-09-21 18:47:14 -04:00
|
|
|
{
|
2003-10-25 07:58:24 -04:00
|
|
|
char *link;
|
|
|
|
int basedirs;
|
|
|
|
const char *b, *l;
|
|
|
|
int i, start;
|
|
|
|
|
|
|
|
/* First, skip the initial directory components common to both
|
|
|
|
files. */
|
|
|
|
start = 0;
|
|
|
|
for (b = basefile, l = linkfile; *b == *l && *b != '\0'; ++b, ++l)
|
|
|
|
{
|
|
|
|
if (*b == '/')
|
2006-06-28 07:09:30 -04:00
|
|
|
start = (b - basefile) + 1;
|
2003-10-25 07:58:24 -04:00
|
|
|
}
|
|
|
|
basefile += start;
|
|
|
|
linkfile += start;
|
|
|
|
|
|
|
|
/* With common directories out of the way, the situation we have is
|
|
|
|
as follows:
|
|
|
|
b - b1/b2/[...]/bfile
|
|
|
|
l - l1/l2/[...]/lfile
|
2003-09-21 18:47:14 -04:00
|
|
|
|
2003-10-25 07:58:24 -04:00
|
|
|
The link we're constructing needs to be:
|
|
|
|
lnk - ../../l1/l2/[...]/lfile
|
|
|
|
|
|
|
|
Where the number of ".."'s equals the number of bN directory
|
|
|
|
components in B. */
|
|
|
|
|
|
|
|
/* Count the directory components in B. */
|
|
|
|
basedirs = 0;
|
|
|
|
for (b = basefile; *b; b++)
|
2003-09-21 18:47:14 -04:00
|
|
|
{
|
2003-10-25 07:58:24 -04:00
|
|
|
if (*b == '/')
|
2006-06-28 07:09:30 -04:00
|
|
|
++basedirs;
|
2003-09-21 18:47:14 -04:00
|
|
|
}
|
2003-10-25 07:58:24 -04:00
|
|
|
|
|
|
|
/* Construct LINK as explained above. */
|
2005-06-19 19:03:27 -04:00
|
|
|
link = xmalloc (3 * basedirs + strlen (linkfile) + 1);
|
2003-10-25 07:58:24 -04:00
|
|
|
for (i = 0; i < basedirs; i++)
|
|
|
|
memcpy (link + 3 * i, "../", 3);
|
|
|
|
strcpy (link + 3 * i, linkfile);
|
|
|
|
return link;
|
2003-09-21 18:47:14 -04:00
|
|
|
}
|
|
|
|
|
2015-09-22 15:10:38 -04:00
|
|
|
/* Construct and return a "transparent proxy" URL
|
|
|
|
reflecting changes made by --adjust-extension to the file component
|
|
|
|
(i.e., "basename") of the original URL, but leaving the "dirname"
|
|
|
|
of the URL (protocol://hostname... portion) untouched.
|
|
|
|
|
|
|
|
Think: populating a squid cache via a recursive wget scrape, where
|
|
|
|
changing URLs to work locally with "file://..." is NOT desirable.
|
|
|
|
|
|
|
|
Example:
|
|
|
|
|
|
|
|
if
|
|
|
|
p = "//foo.com/bar.cgi?xyz"
|
|
|
|
and
|
|
|
|
link->local_name = "docroot/foo.com/bar.cgi?xyz.css"
|
|
|
|
then
|
|
|
|
|
|
|
|
new_construct_func(p, link);
|
|
|
|
will return
|
|
|
|
"//foo.com/bar.cgi?xyz.css"
|
|
|
|
|
|
|
|
Essentially, we do s/$(basename orig_url)/$(basename link->local_name)/
|
|
|
|
*/
|
|
|
|
static char *
|
|
|
|
convert_basename (const char *p, const struct urlpos *link)
|
|
|
|
{
|
|
|
|
int len = link->size;
|
|
|
|
char *url = NULL;
|
|
|
|
char *org_basename = NULL, *local_basename = NULL;
|
|
|
|
char *result = NULL;
|
|
|
|
|
|
|
|
if (*p == '"' || *p == '\'')
|
|
|
|
{
|
|
|
|
len -= 2;
|
|
|
|
p++;
|
|
|
|
}
|
|
|
|
|
|
|
|
url = xstrndup (p, len);
|
|
|
|
|
|
|
|
org_basename = strrchr (url, '/');
|
|
|
|
if (org_basename)
|
|
|
|
org_basename++;
|
|
|
|
else
|
|
|
|
org_basename = url;
|
|
|
|
|
|
|
|
local_basename = strrchr (link->local_name, '/');
|
|
|
|
if (local_basename)
|
|
|
|
local_basename++;
|
|
|
|
else
|
|
|
|
local_basename = url;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the basenames differ, graft the adjusted basename (local_basename)
|
|
|
|
* onto the original URL.
|
|
|
|
*/
|
|
|
|
if (strcmp (org_basename, local_basename) == 0)
|
|
|
|
result = url;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
result = uri_merge (url, local_basename);
|
|
|
|
xfree (url);
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2005-04-10 19:39:41 -04:00
|
|
|
/* Used by write_backup_file to remember which files have been
|
|
|
|
written. */
|
|
|
|
static struct hash_table *converted_files;
|
|
|
|
|
2003-09-21 18:47:14 -04:00
|
|
|
static void
|
|
|
|
write_backup_file (const char *file, downloaded_file_t downloaded_file_return)
|
|
|
|
{
|
|
|
|
/* Rather than just writing over the original .html file with the
|
|
|
|
converted version, save the former to *.orig. Note we only do
|
|
|
|
this for files we've _successfully_ downloaded, so we don't
|
2008-04-22 17:48:36 -04:00
|
|
|
clobber .orig files sitting around from previous invocations.
|
|
|
|
On VMS, use "_orig" instead of ".orig". See "wget.h". */
|
2003-09-21 18:47:14 -04:00
|
|
|
|
|
|
|
/* Construct the backup filename as the original name plus ".orig". */
|
2005-03-30 14:27:34 -05:00
|
|
|
size_t filename_len = strlen (file);
|
2003-09-21 18:47:14 -04:00
|
|
|
char* filename_plus_orig_suffix;
|
|
|
|
|
2008-04-22 03:15:48 -04:00
|
|
|
/* TODO: hack this to work with css files */
|
2003-09-21 18:47:14 -04:00
|
|
|
if (downloaded_file_return == FILE_DOWNLOADED_AND_HTML_EXTENSION_ADDED)
|
|
|
|
{
|
|
|
|
/* Just write "orig" over "html". We need to do it this way
|
2006-06-28 07:09:30 -04:00
|
|
|
because when we're checking to see if we've downloaded the
|
|
|
|
file before (to see if we can skip downloading it), we don't
|
|
|
|
know if it's a text/html file. Therefore we don't know yet
|
|
|
|
at that stage that -E is going to cause us to tack on
|
|
|
|
".html", so we need to compare vs. the original URL plus
|
|
|
|
".orig", not the original URL plus ".html.orig". */
|
2003-09-21 18:47:14 -04:00
|
|
|
filename_plus_orig_suffix = alloca (filename_len + 1);
|
2005-03-30 14:27:34 -05:00
|
|
|
strcpy (filename_plus_orig_suffix, file);
|
|
|
|
strcpy ((filename_plus_orig_suffix + filename_len) - 4, "orig");
|
2003-09-21 18:47:14 -04:00
|
|
|
}
|
|
|
|
else /* downloaded_file_return == FILE_DOWNLOADED_NORMALLY */
|
|
|
|
{
|
|
|
|
/* Append ".orig" to the name. */
|
2009-07-04 21:42:59 -04:00
|
|
|
filename_plus_orig_suffix = alloca (filename_len + sizeof (ORIG_SFX));
|
2005-03-30 14:27:34 -05:00
|
|
|
strcpy (filename_plus_orig_suffix, file);
|
2009-07-04 21:42:59 -04:00
|
|
|
strcpy (filename_plus_orig_suffix + filename_len, ORIG_SFX);
|
2003-09-21 18:47:14 -04:00
|
|
|
}
|
|
|
|
|
2005-03-30 14:27:34 -05:00
|
|
|
if (!converted_files)
|
|
|
|
converted_files = make_string_hash_table (0);
|
|
|
|
|
2003-09-21 18:47:14 -04:00
|
|
|
/* We can get called twice on the same URL thanks to the
|
2014-06-11 10:31:44 -04:00
|
|
|
convert_all_links() call in main. If we write the .orig file
|
2003-09-21 18:47:14 -04:00
|
|
|
each time in such a case, it'll end up containing the first-pass
|
|
|
|
conversion, not the original file. So, see if we've already been
|
|
|
|
called on this file. */
|
2005-03-30 14:27:34 -05:00
|
|
|
if (!string_set_contains (converted_files, file))
|
2003-09-21 18:47:14 -04:00
|
|
|
{
|
|
|
|
/* Rename <file> to <file>.orig before former gets written over. */
|
2005-03-30 14:27:34 -05:00
|
|
|
if (rename (file, filename_plus_orig_suffix) != 0)
|
2006-06-28 07:09:30 -04:00
|
|
|
logprintf (LOG_NOTQUIET, _("Cannot back up %s as %s: %s\n"),
|
|
|
|
file, filename_plus_orig_suffix, strerror (errno));
|
2003-09-21 18:47:14 -04:00
|
|
|
|
|
|
|
/* Remember that we've already written a .orig backup for this file.
|
2006-06-28 07:09:30 -04:00
|
|
|
Note that we never free this memory since we need it till the
|
|
|
|
convert_all_links() call, which is one of the last things the
|
|
|
|
program does before terminating. BTW, I'm not sure if it would be
|
|
|
|
safe to just set 'converted_file_ptr->string' to 'file' below,
|
|
|
|
rather than making a copy of the string... Another note is that I
|
|
|
|
thought I could just add a field to the urlpos structure saying
|
|
|
|
that we'd written a .orig file for this URL, but that didn't work,
|
|
|
|
so I had to make this separate list.
|
|
|
|
-- Dan Harkless <wget@harkless.org>
|
2003-09-21 18:47:14 -04:00
|
|
|
|
|
|
|
This [adding a field to the urlpos structure] didn't work
|
|
|
|
because convert_file() is called from convert_all_links at
|
|
|
|
the end of the retrieval with a freshly built new urlpos
|
|
|
|
list.
|
2006-06-28 07:09:30 -04:00
|
|
|
-- Hrvoje Niksic <hniksic@xemacs.org>
|
2003-09-21 18:47:14 -04:00
|
|
|
*/
|
2005-03-30 14:27:34 -05:00
|
|
|
string_set_add (converted_files, file);
|
2003-09-21 18:47:14 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-06-22 15:38:10 -04:00
|
|
|
static bool find_fragment (const char *, int, const char **, const char **);
|
2003-09-21 18:47:14 -04:00
|
|
|
|
2008-04-22 03:15:48 -04:00
|
|
|
/* Replace a string with NEW_TEXT. Ignore quoting. */
|
|
|
|
static const char *
|
|
|
|
replace_plain (const char *p, int size, FILE *fp, const char *new_text)
|
|
|
|
{
|
|
|
|
fputs (new_text, fp);
|
|
|
|
p += size;
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
2003-09-21 18:47:14 -04:00
|
|
|
/* Replace an attribute's original text with NEW_TEXT. */
|
|
|
|
|
|
|
|
static const char *
|
|
|
|
replace_attr (const char *p, int size, FILE *fp, const char *new_text)
|
|
|
|
{
|
2005-06-22 15:38:10 -04:00
|
|
|
bool quote_flag = false;
|
2006-06-28 07:09:30 -04:00
|
|
|
char quote_char = '\"'; /* use "..." for quoting, unless the
|
|
|
|
original value is quoted, in which
|
|
|
|
case reuse its quoting char. */
|
2003-09-21 18:47:14 -04:00
|
|
|
const char *frag_beg, *frag_end;
|
|
|
|
|
|
|
|
/* Structure of our string is:
|
|
|
|
"...old-contents..."
|
|
|
|
<--- size ---> (with quotes)
|
|
|
|
OR:
|
|
|
|
...old-contents...
|
|
|
|
<--- size --> (no quotes) */
|
|
|
|
|
|
|
|
if (*p == '\"' || *p == '\'')
|
|
|
|
{
|
|
|
|
quote_char = *p;
|
2005-06-22 15:38:10 -04:00
|
|
|
quote_flag = true;
|
2003-09-21 18:47:14 -04:00
|
|
|
++p;
|
2006-06-28 07:09:30 -04:00
|
|
|
size -= 2; /* disregard opening and closing quote */
|
2003-09-21 18:47:14 -04:00
|
|
|
}
|
|
|
|
putc (quote_char, fp);
|
|
|
|
fputs (new_text, fp);
|
|
|
|
|
|
|
|
/* Look for fragment identifier, if any. */
|
|
|
|
if (find_fragment (p, size, &frag_beg, &frag_end))
|
|
|
|
fwrite (frag_beg, 1, frag_end - frag_beg, fp);
|
|
|
|
p += size;
|
|
|
|
if (quote_flag)
|
|
|
|
++p;
|
|
|
|
putc (quote_char, fp);
|
|
|
|
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The same as REPLACE_ATTR, but used when replacing
|
|
|
|
<meta http-equiv=refresh content="new_text"> because we need to
|
|
|
|
append "timeout_value; URL=" before the next_text. */
|
|
|
|
|
|
|
|
static const char *
|
|
|
|
replace_attr_refresh_hack (const char *p, int size, FILE *fp,
|
2006-06-28 07:09:30 -04:00
|
|
|
const char *new_text, int timeout)
|
2003-09-21 18:47:14 -04:00
|
|
|
{
|
|
|
|
/* "0; URL=..." */
|
|
|
|
char *new_with_timeout = (char *)alloca (numdigit (timeout)
|
2006-06-28 07:09:30 -04:00
|
|
|
+ 6 /* "; URL=" */
|
|
|
|
+ strlen (new_text)
|
|
|
|
+ 1);
|
2003-09-21 18:47:14 -04:00
|
|
|
sprintf (new_with_timeout, "%d; URL=%s", timeout, new_text);
|
|
|
|
|
|
|
|
return replace_attr (p, size, fp, new_with_timeout);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Find the first occurrence of '#' in [BEG, BEG+SIZE) that is not
|
|
|
|
preceded by '&'. If the character is not found, return zero. If
|
2005-06-22 15:38:10 -04:00
|
|
|
the character is found, return true and set BP and EP to point to
|
|
|
|
the beginning and end of the region.
|
2003-09-21 18:47:14 -04:00
|
|
|
|
|
|
|
This is used for finding the fragment indentifiers in URLs. */
|
|
|
|
|
2005-06-22 15:38:10 -04:00
|
|
|
static bool
|
2003-09-21 18:47:14 -04:00
|
|
|
find_fragment (const char *beg, int size, const char **bp, const char **ep)
|
|
|
|
{
|
|
|
|
const char *end = beg + size;
|
2005-06-22 15:38:10 -04:00
|
|
|
bool saw_amp = false;
|
2003-09-21 18:47:14 -04:00
|
|
|
for (; beg < end; beg++)
|
|
|
|
{
|
|
|
|
switch (*beg)
|
2006-06-28 07:09:30 -04:00
|
|
|
{
|
|
|
|
case '&':
|
|
|
|
saw_amp = true;
|
|
|
|
break;
|
|
|
|
case '#':
|
|
|
|
if (!saw_amp)
|
|
|
|
{
|
|
|
|
*bp = beg;
|
|
|
|
*ep = end;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
/* fallthrough */
|
|
|
|
default:
|
|
|
|
saw_amp = false;
|
|
|
|
}
|
2003-09-21 18:47:14 -04:00
|
|
|
}
|
2005-06-22 15:38:10 -04:00
|
|
|
return false;
|
2003-09-21 18:47:14 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Quote FILE for use as local reference to an HTML file.
|
|
|
|
|
|
|
|
We quote ? as %3F to avoid passing part of the file name as the
|
|
|
|
parameter when browsing the converted file through HTTP. However,
|
2009-07-28 20:37:58 -04:00
|
|
|
it is safe to do this only when `--adjust-extension' is turned on.
|
2003-09-21 18:47:14 -04:00
|
|
|
This is because converting "index.html?foo=bar" to
|
|
|
|
"index.html%3Ffoo=bar" would break local browsing, as the latter
|
|
|
|
isn't even recognized as an HTML file! However, converting
|
|
|
|
"index.html?foo=bar.html" to "index.html%3Ffoo=bar.html" should be
|
2004-03-03 19:41:34 -05:00
|
|
|
safe for both local and HTTP-served browsing.
|
|
|
|
|
2009-08-29 17:24:49 -04:00
|
|
|
We always quote "#" as "%23", "%" as "%25" and ";" as "%3B"
|
|
|
|
because those characters have special meanings in URLs. */
|
2003-09-21 18:47:14 -04:00
|
|
|
|
|
|
|
static char *
|
2010-09-14 06:14:34 -04:00
|
|
|
local_quote_string (const char *file, bool no_html_quote)
|
2003-09-21 18:47:14 -04:00
|
|
|
{
|
2004-03-03 19:41:34 -05:00
|
|
|
const char *from;
|
|
|
|
char *newname, *to;
|
2003-09-21 18:47:14 -04:00
|
|
|
|
2009-08-29 17:24:49 -04:00
|
|
|
char *any = strpbrk (file, "?#%;");
|
2004-03-03 19:41:34 -05:00
|
|
|
if (!any)
|
2010-09-14 06:14:34 -04:00
|
|
|
return no_html_quote ? strdup (file) : html_quote_string (file);
|
2003-09-21 18:47:14 -04:00
|
|
|
|
2004-03-03 19:41:34 -05:00
|
|
|
/* Allocate space assuming the worst-case scenario, each character
|
|
|
|
having to be quoted. */
|
|
|
|
to = newname = (char *)alloca (3 * strlen (file) + 1);
|
2014-06-30 05:35:46 -04:00
|
|
|
newname[0] = '\0';
|
2004-03-03 19:41:34 -05:00
|
|
|
for (from = file; *from; from++)
|
|
|
|
switch (*from)
|
|
|
|
{
|
|
|
|
case '%':
|
2006-06-28 07:09:30 -04:00
|
|
|
*to++ = '%';
|
|
|
|
*to++ = '2';
|
|
|
|
*to++ = '5';
|
|
|
|
break;
|
2004-03-03 19:41:34 -05:00
|
|
|
case '#':
|
2006-06-28 07:09:30 -04:00
|
|
|
*to++ = '%';
|
|
|
|
*to++ = '2';
|
|
|
|
*to++ = '3';
|
|
|
|
break;
|
2009-08-29 17:24:49 -04:00
|
|
|
case ';':
|
|
|
|
*to++ = '%';
|
|
|
|
*to++ = '3';
|
|
|
|
*to++ = 'B';
|
|
|
|
break;
|
2004-03-03 19:41:34 -05:00
|
|
|
case '?':
|
2009-07-28 20:37:58 -04:00
|
|
|
if (opt.adjust_extension)
|
2006-06-28 07:09:30 -04:00
|
|
|
{
|
|
|
|
*to++ = '%';
|
|
|
|
*to++ = '3';
|
|
|
|
*to++ = 'F';
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* fallthrough */
|
2004-03-03 19:41:34 -05:00
|
|
|
default:
|
2006-06-28 07:09:30 -04:00
|
|
|
*to++ = *from;
|
2004-03-03 19:41:34 -05:00
|
|
|
}
|
|
|
|
*to = '\0';
|
2003-09-21 18:47:14 -04:00
|
|
|
|
2010-09-14 06:14:34 -04:00
|
|
|
return no_html_quote ? strdup (newname) : html_quote_string (newname);
|
2003-09-21 18:47:14 -04:00
|
|
|
}
|
2014-11-20 10:35:34 -05:00
|
|
|
|
2003-09-21 18:47:14 -04:00
|
|
|
/* Book-keeping code for dl_file_url_map, dl_url_file_map,
|
|
|
|
downloaded_html_list, and downloaded_html_set. Other code calls
|
|
|
|
these functions to let us know that a file has been downloaded. */
|
|
|
|
|
2006-06-28 07:09:30 -04:00
|
|
|
#define ENSURE_TABLES_EXIST do { \
|
|
|
|
if (!dl_file_url_map) \
|
|
|
|
dl_file_url_map = make_string_hash_table (0); \
|
|
|
|
if (!dl_url_file_map) \
|
|
|
|
dl_url_file_map = make_string_hash_table (0); \
|
2003-09-21 18:47:14 -04:00
|
|
|
} while (0)
|
|
|
|
|
2005-06-22 15:38:10 -04:00
|
|
|
/* Return true if S1 and S2 are the same, except for "/index.html".
|
|
|
|
The three cases in which it returns one are (substitute any
|
|
|
|
substring for "foo"):
|
2003-09-21 18:47:14 -04:00
|
|
|
|
|
|
|
m("foo/index.html", "foo/") ==> 1
|
|
|
|
m("foo/", "foo/index.html") ==> 1
|
|
|
|
m("foo", "foo/index.html") ==> 1
|
|
|
|
m("foo", "foo/" ==> 1
|
|
|
|
m("foo", "foo") ==> 1 */
|
|
|
|
|
2005-06-22 15:38:10 -04:00
|
|
|
static bool
|
2003-09-21 18:47:14 -04:00
|
|
|
match_except_index (const char *s1, const char *s2)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
const char *lng;
|
|
|
|
|
|
|
|
/* Skip common substring. */
|
|
|
|
for (i = 0; *s1 && *s2 && *s1 == *s2; s1++, s2++, i++)
|
|
|
|
;
|
|
|
|
if (i == 0)
|
|
|
|
/* Strings differ at the very beginning -- bail out. We need to
|
|
|
|
check this explicitly to avoid `lng - 1' reading outside the
|
|
|
|
array. */
|
2005-06-22 15:38:10 -04:00
|
|
|
return false;
|
2003-09-21 18:47:14 -04:00
|
|
|
|
|
|
|
if (!*s1 && !*s2)
|
|
|
|
/* Both strings hit EOF -- strings are equal. */
|
2005-06-22 15:38:10 -04:00
|
|
|
return true;
|
2003-09-21 18:47:14 -04:00
|
|
|
else if (*s1 && *s2)
|
|
|
|
/* Strings are randomly different, e.g. "/foo/bar" and "/foo/qux". */
|
2005-06-22 15:38:10 -04:00
|
|
|
return false;
|
2003-09-21 18:47:14 -04:00
|
|
|
else if (*s1)
|
|
|
|
/* S1 is the longer one. */
|
|
|
|
lng = s1;
|
|
|
|
else
|
|
|
|
/* S2 is the longer one. */
|
|
|
|
lng = s2;
|
|
|
|
|
|
|
|
/* foo */ /* foo/ */
|
|
|
|
/* foo/index.html */ /* or */ /* foo/index.html */
|
|
|
|
/* ^ */ /* ^ */
|
|
|
|
|
|
|
|
if (*lng != '/')
|
|
|
|
/* The right-hand case. */
|
|
|
|
--lng;
|
|
|
|
|
|
|
|
if (*lng == '/' && *(lng + 1) == '\0')
|
|
|
|
/* foo */
|
|
|
|
/* foo/ */
|
2005-06-22 15:38:10 -04:00
|
|
|
return true;
|
2003-09-21 18:47:14 -04:00
|
|
|
|
|
|
|
return 0 == strcmp (lng, "/index.html");
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
dissociate_urls_from_file_mapper (void *key, void *value, void *arg)
|
|
|
|
{
|
|
|
|
char *mapping_url = (char *)key;
|
|
|
|
char *mapping_file = (char *)value;
|
|
|
|
char *file = (char *)arg;
|
|
|
|
|
|
|
|
if (0 == strcmp (mapping_file, file))
|
|
|
|
{
|
|
|
|
hash_table_remove (dl_url_file_map, mapping_url);
|
|
|
|
xfree (mapping_url);
|
|
|
|
xfree (mapping_file);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Continue mapping. */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Remove all associations from various URLs to FILE from dl_url_file_map. */
|
|
|
|
|
|
|
|
static void
|
|
|
|
dissociate_urls_from_file (const char *file)
|
|
|
|
{
|
2005-08-27 09:05:39 -04:00
|
|
|
/* Can't use hash_table_iter_* because the table mutates while mapping. */
|
|
|
|
hash_table_for_each (dl_url_file_map, dissociate_urls_from_file_mapper,
|
2006-06-28 07:09:30 -04:00
|
|
|
(char *) file);
|
2003-09-21 18:47:14 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Register that URL has been successfully downloaded to FILE. This
|
|
|
|
is used by the link conversion code to convert references to URLs
|
|
|
|
to references to local files. It is also being used to check if a
|
|
|
|
URL has already been downloaded. */
|
|
|
|
|
|
|
|
void
|
|
|
|
register_download (const char *url, const char *file)
|
|
|
|
{
|
|
|
|
char *old_file, *old_url;
|
|
|
|
|
|
|
|
ENSURE_TABLES_EXIST;
|
|
|
|
|
|
|
|
/* With some forms of retrieval, it is possible, although not likely
|
|
|
|
or particularly desirable. If both are downloaded, the second
|
|
|
|
download will override the first one. When that happens,
|
|
|
|
dissociate the old file name from the URL. */
|
|
|
|
|
|
|
|
if (hash_table_get_pair (dl_file_url_map, file, &old_file, &old_url))
|
|
|
|
{
|
|
|
|
if (0 == strcmp (url, old_url))
|
2006-06-28 07:09:30 -04:00
|
|
|
/* We have somehow managed to download the same URL twice.
|
|
|
|
Nothing to do. */
|
|
|
|
return;
|
2003-09-21 18:47:14 -04:00
|
|
|
|
|
|
|
if (match_except_index (url, old_url)
|
2006-06-28 07:09:30 -04:00
|
|
|
&& !hash_table_contains (dl_url_file_map, url))
|
|
|
|
/* The two URLs differ only in the "index.html" ending. For
|
|
|
|
example, one is "http://www.server.com/", and the other is
|
|
|
|
"http://www.server.com/index.html". Don't remove the old
|
|
|
|
one, just add the new one as a non-canonical entry. */
|
|
|
|
goto url_only;
|
2003-09-21 18:47:14 -04:00
|
|
|
|
|
|
|
hash_table_remove (dl_file_url_map, file);
|
|
|
|
xfree (old_file);
|
|
|
|
xfree (old_url);
|
|
|
|
|
|
|
|
/* Remove all the URLs that point to this file. Yes, there can
|
2006-06-28 07:09:30 -04:00
|
|
|
be more than one such URL, because we store redirections as
|
|
|
|
multiple entries in dl_url_file_map. For example, if URL1
|
|
|
|
redirects to URL2 which gets downloaded to FILE, we map both
|
|
|
|
URL1 and URL2 to FILE in dl_url_file_map. (dl_file_url_map
|
|
|
|
only points to URL2.) When another URL gets loaded to FILE,
|
|
|
|
we want both URL1 and URL2 dissociated from it.
|
2003-09-21 18:47:14 -04:00
|
|
|
|
|
|
|
This is a relatively expensive operation because it performs
|
|
|
|
a linear search of the whole hash table, but it should be
|
|
|
|
called very rarely, only when two URLs resolve to the same
|
|
|
|
file name, *and* the "<file>.1" extensions are turned off.
|
|
|
|
In other words, almost never. */
|
|
|
|
dissociate_urls_from_file (file);
|
|
|
|
}
|
|
|
|
|
|
|
|
hash_table_put (dl_file_url_map, xstrdup (file), xstrdup (url));
|
|
|
|
|
|
|
|
url_only:
|
|
|
|
/* A URL->FILE mapping is not possible without a FILE->URL mapping.
|
|
|
|
If the latter were present, it should have been removed by the
|
|
|
|
above `if'. So we could write:
|
|
|
|
|
|
|
|
assert (!hash_table_contains (dl_url_file_map, url));
|
|
|
|
|
|
|
|
The above is correct when running in recursive mode where the
|
|
|
|
same URL always resolves to the same file. But if you do
|
|
|
|
something like:
|
|
|
|
|
|
|
|
wget URL URL
|
|
|
|
|
|
|
|
then the first URL will resolve to "FILE", and the other to
|
|
|
|
"FILE.1". In that case, FILE.1 will not be found in
|
|
|
|
dl_file_url_map, but URL will still point to FILE in
|
|
|
|
dl_url_file_map. */
|
|
|
|
if (hash_table_get_pair (dl_url_file_map, url, &old_url, &old_file))
|
|
|
|
{
|
|
|
|
hash_table_remove (dl_url_file_map, url);
|
|
|
|
xfree (old_url);
|
|
|
|
xfree (old_file);
|
|
|
|
}
|
|
|
|
|
|
|
|
hash_table_put (dl_url_file_map, xstrdup (url), xstrdup (file));
|
|
|
|
}
|
|
|
|
|
2014-06-11 10:31:44 -04:00
|
|
|
/* Register that FROM has been redirected to "TO". This assumes that TO
|
2003-09-21 18:47:14 -04:00
|
|
|
is successfully downloaded and already registered using
|
|
|
|
register_download() above. */
|
|
|
|
|
|
|
|
void
|
|
|
|
register_redirection (const char *from, const char *to)
|
|
|
|
{
|
|
|
|
char *file;
|
|
|
|
|
|
|
|
ENSURE_TABLES_EXIST;
|
|
|
|
|
|
|
|
file = hash_table_get (dl_url_file_map, to);
|
|
|
|
assert (file != NULL);
|
|
|
|
if (!hash_table_contains (dl_url_file_map, from))
|
|
|
|
hash_table_put (dl_url_file_map, xstrdup (from), xstrdup (file));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Register that the file has been deleted. */
|
|
|
|
|
|
|
|
void
|
|
|
|
register_delete_file (const char *file)
|
|
|
|
{
|
|
|
|
char *old_url, *old_file;
|
|
|
|
|
|
|
|
ENSURE_TABLES_EXIST;
|
|
|
|
|
|
|
|
if (!hash_table_get_pair (dl_file_url_map, file, &old_file, &old_url))
|
|
|
|
return;
|
|
|
|
|
|
|
|
hash_table_remove (dl_file_url_map, file);
|
|
|
|
xfree (old_file);
|
|
|
|
xfree (old_url);
|
|
|
|
dissociate_urls_from_file (file);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Register that FILE is an HTML file that has been downloaded. */
|
|
|
|
|
|
|
|
void
|
2012-05-20 15:02:25 -04:00
|
|
|
register_html (const char *file)
|
2003-09-21 18:47:14 -04:00
|
|
|
{
|
|
|
|
if (!downloaded_html_set)
|
|
|
|
downloaded_html_set = make_string_hash_table (0);
|
|
|
|
string_set_add (downloaded_html_set, file);
|
|
|
|
}
|
|
|
|
|
2008-04-22 03:15:48 -04:00
|
|
|
/* Register that FILE is a CSS file that has been downloaded. */
|
|
|
|
|
|
|
|
void
|
2012-05-20 15:02:25 -04:00
|
|
|
register_css (const char *file)
|
2008-04-22 03:15:48 -04:00
|
|
|
{
|
|
|
|
if (!downloaded_css_set)
|
|
|
|
downloaded_css_set = make_string_hash_table (0);
|
|
|
|
string_set_add (downloaded_css_set, file);
|
|
|
|
}
|
|
|
|
|
2005-06-19 18:34:58 -04:00
|
|
|
static void downloaded_files_free (void);
|
2005-04-07 17:37:22 -04:00
|
|
|
|
|
|
|
/* Cleanup the data structures associated with this file. */
|
|
|
|
|
2003-09-21 18:47:14 -04:00
|
|
|
void
|
|
|
|
convert_cleanup (void)
|
|
|
|
{
|
|
|
|
if (dl_file_url_map)
|
|
|
|
{
|
|
|
|
free_keys_and_values (dl_file_url_map);
|
|
|
|
hash_table_destroy (dl_file_url_map);
|
|
|
|
dl_file_url_map = NULL;
|
|
|
|
}
|
|
|
|
if (dl_url_file_map)
|
|
|
|
{
|
|
|
|
free_keys_and_values (dl_url_file_map);
|
|
|
|
hash_table_destroy (dl_url_file_map);
|
|
|
|
dl_url_file_map = NULL;
|
|
|
|
}
|
|
|
|
if (downloaded_html_set)
|
|
|
|
string_set_free (downloaded_html_set);
|
2005-04-07 17:37:22 -04:00
|
|
|
downloaded_files_free ();
|
2005-04-10 19:39:41 -04:00
|
|
|
if (converted_files)
|
|
|
|
string_set_free (converted_files);
|
2003-09-21 18:47:14 -04:00
|
|
|
}
|
2014-11-20 10:35:34 -05:00
|
|
|
|
2003-09-21 18:47:14 -04:00
|
|
|
/* Book-keeping code for downloaded files that enables extension
|
|
|
|
hacks. */
|
|
|
|
|
|
|
|
/* This table should really be merged with dl_file_url_map and
|
|
|
|
downloaded_html_files. This was originally a list, but I changed
|
|
|
|
it to a hash table beause it was actually taking a lot of time to
|
|
|
|
find things in it. */
|
|
|
|
|
|
|
|
static struct hash_table *downloaded_files_hash;
|
|
|
|
|
|
|
|
/* We're storing "modes" of type downloaded_file_t in the hash table.
|
|
|
|
However, our hash tables only accept pointers for keys and values.
|
|
|
|
So when we need a pointer, we use the address of a
|
|
|
|
downloaded_file_t variable of static storage. */
|
2009-09-21 23:39:44 -04:00
|
|
|
|
2003-09-21 18:47:14 -04:00
|
|
|
static downloaded_file_t *
|
|
|
|
downloaded_mode_to_ptr (downloaded_file_t mode)
|
|
|
|
{
|
|
|
|
static downloaded_file_t
|
|
|
|
v1 = FILE_NOT_ALREADY_DOWNLOADED,
|
|
|
|
v2 = FILE_DOWNLOADED_NORMALLY,
|
|
|
|
v3 = FILE_DOWNLOADED_AND_HTML_EXTENSION_ADDED,
|
|
|
|
v4 = CHECK_FOR_FILE;
|
|
|
|
|
|
|
|
switch (mode)
|
|
|
|
{
|
|
|
|
case FILE_NOT_ALREADY_DOWNLOADED:
|
|
|
|
return &v1;
|
|
|
|
case FILE_DOWNLOADED_NORMALLY:
|
|
|
|
return &v2;
|
|
|
|
case FILE_DOWNLOADED_AND_HTML_EXTENSION_ADDED:
|
|
|
|
return &v3;
|
|
|
|
case CHECK_FOR_FILE:
|
|
|
|
return &v4;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Remembers which files have been downloaded. In the standard case,
|
|
|
|
should be called with mode == FILE_DOWNLOADED_NORMALLY for each
|
|
|
|
file we actually download successfully (i.e. not for ones we have
|
|
|
|
failures on or that we skip due to -N).
|
|
|
|
|
|
|
|
When we've downloaded a file and tacked on a ".html" extension due
|
|
|
|
to -E, call this function with
|
|
|
|
FILE_DOWNLOADED_AND_HTML_EXTENSION_ADDED rather than
|
|
|
|
FILE_DOWNLOADED_NORMALLY.
|
|
|
|
|
|
|
|
If you just want to check if a file has been previously added
|
|
|
|
without adding it, call with mode == CHECK_FOR_FILE. Please be
|
|
|
|
sure to call this function with local filenames, not remote
|
|
|
|
URLs. */
|
|
|
|
|
|
|
|
downloaded_file_t
|
|
|
|
downloaded_file (downloaded_file_t mode, const char *file)
|
|
|
|
{
|
|
|
|
downloaded_file_t *ptr;
|
|
|
|
|
|
|
|
if (mode == CHECK_FOR_FILE)
|
|
|
|
{
|
|
|
|
if (!downloaded_files_hash)
|
2006-06-28 07:09:30 -04:00
|
|
|
return FILE_NOT_ALREADY_DOWNLOADED;
|
2003-09-21 18:47:14 -04:00
|
|
|
ptr = hash_table_get (downloaded_files_hash, file);
|
|
|
|
if (!ptr)
|
2006-06-28 07:09:30 -04:00
|
|
|
return FILE_NOT_ALREADY_DOWNLOADED;
|
2003-09-21 18:47:14 -04:00
|
|
|
return *ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!downloaded_files_hash)
|
|
|
|
downloaded_files_hash = make_string_hash_table (0);
|
|
|
|
|
|
|
|
ptr = hash_table_get (downloaded_files_hash, file);
|
|
|
|
if (ptr)
|
|
|
|
return *ptr;
|
|
|
|
|
|
|
|
ptr = downloaded_mode_to_ptr (mode);
|
2006-10-12 09:37:58 -04:00
|
|
|
hash_table_put (downloaded_files_hash, xstrdup (file), ptr);
|
2003-09-21 18:47:14 -04:00
|
|
|
|
|
|
|
return FILE_NOT_ALREADY_DOWNLOADED;
|
|
|
|
}
|
|
|
|
|
2005-04-07 17:37:22 -04:00
|
|
|
static void
|
2003-09-21 18:47:14 -04:00
|
|
|
downloaded_files_free (void)
|
|
|
|
{
|
|
|
|
if (downloaded_files_hash)
|
|
|
|
{
|
2005-08-27 09:05:39 -04:00
|
|
|
hash_table_iterator iter;
|
|
|
|
for (hash_table_iterate (downloaded_files_hash, &iter);
|
2006-06-28 07:09:30 -04:00
|
|
|
hash_table_iter_next (&iter);
|
|
|
|
)
|
|
|
|
xfree (iter.key);
|
2003-09-21 18:47:14 -04:00
|
|
|
hash_table_destroy (downloaded_files_hash);
|
|
|
|
downloaded_files_hash = NULL;
|
|
|
|
}
|
|
|
|
}
|
2014-11-20 10:35:34 -05:00
|
|
|
|
2005-04-07 17:37:22 -04:00
|
|
|
/* The function returns the pointer to the malloc-ed quoted version of
|
|
|
|
string s. It will recognize and quote numeric and special graphic
|
|
|
|
entities, as per RFC1866:
|
|
|
|
|
|
|
|
`&' -> `&'
|
|
|
|
`<' -> `<'
|
|
|
|
`>' -> `>'
|
|
|
|
`"' -> `"'
|
|
|
|
SP -> ` '
|
|
|
|
|
|
|
|
No other entities are recognized or replaced. */
|
|
|
|
char *
|
|
|
|
html_quote_string (const char *s)
|
|
|
|
{
|
|
|
|
const char *b = s;
|
|
|
|
char *p, *res;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Pass through the string, and count the new size. */
|
|
|
|
for (i = 0; *s; s++, i++)
|
|
|
|
{
|
|
|
|
if (*s == '&')
|
2006-06-28 07:09:30 -04:00
|
|
|
i += 4; /* `amp;' */
|
2005-04-07 17:37:22 -04:00
|
|
|
else if (*s == '<' || *s == '>')
|
2006-06-28 07:09:30 -04:00
|
|
|
i += 3; /* `lt;' and `gt;' */
|
2005-04-07 17:37:22 -04:00
|
|
|
else if (*s == '\"')
|
2006-06-28 07:09:30 -04:00
|
|
|
i += 5; /* `quot;' */
|
2005-04-07 17:37:22 -04:00
|
|
|
else if (*s == ' ')
|
2006-06-28 07:09:30 -04:00
|
|
|
i += 4; /* #32; */
|
2005-04-07 17:37:22 -04:00
|
|
|
}
|
2005-06-19 19:03:27 -04:00
|
|
|
res = xmalloc (i + 1);
|
2005-04-07 17:37:22 -04:00
|
|
|
s = b;
|
|
|
|
for (p = res; *s; s++)
|
|
|
|
{
|
|
|
|
switch (*s)
|
2006-06-28 07:09:30 -04:00
|
|
|
{
|
|
|
|
case '&':
|
|
|
|
*p++ = '&';
|
|
|
|
*p++ = 'a';
|
|
|
|
*p++ = 'm';
|
|
|
|
*p++ = 'p';
|
|
|
|
*p++ = ';';
|
|
|
|
break;
|
|
|
|
case '<': case '>':
|
|
|
|
*p++ = '&';
|
|
|
|
*p++ = (*s == '<' ? 'l' : 'g');
|
|
|
|
*p++ = 't';
|
|
|
|
*p++ = ';';
|
|
|
|
break;
|
|
|
|
case '\"':
|
|
|
|
*p++ = '&';
|
|
|
|
*p++ = 'q';
|
|
|
|
*p++ = 'u';
|
|
|
|
*p++ = 'o';
|
|
|
|
*p++ = 't';
|
|
|
|
*p++ = ';';
|
|
|
|
break;
|
|
|
|
case ' ':
|
|
|
|
*p++ = '&';
|
|
|
|
*p++ = '#';
|
|
|
|
*p++ = '3';
|
|
|
|
*p++ = '2';
|
|
|
|
*p++ = ';';
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
*p++ = *s;
|
|
|
|
}
|
2005-04-07 17:37:22 -04:00
|
|
|
}
|
|
|
|
*p = '\0';
|
|
|
|
return res;
|
|
|
|
}
|
2006-06-28 07:09:30 -04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* vim: et ts=2 sw=2
|
|
|
|
*/
|