Added proxy support to Indexer APIs.

Provider getURL and downloadResult functions now removed and replaced with ones from helpers.py to help slim the code down plus allow more better control over request sessions.

Removed TVTumbler code.

Fixed HDBits provider.

Fixed config settings that were ment to be booleans but instead where set as str or int, should help resolve random html errors.

XEM Refresh check re-coded.

NameParser code for creating show object has been changed to only attempt at the very end once its found the bestMatch result, helps on resources and performance.
This commit is contained in:
echel0n 2014-07-27 03:59:21 -07:00
parent fcded3c3cf
commit 14c354b551
45 changed files with 781 additions and 1394 deletions

View File

@ -6,6 +6,7 @@
#license:unlicense (http://unlicense.org/)
from functools import wraps
import traceback
__author__ = "dbr/Ben"
__version__ = "1.9"
@ -21,7 +22,7 @@ import logging
import zipfile
import datetime as dt
import requests
import cachecontrol
import requests.exceptions
import xmltodict
try:
@ -35,7 +36,7 @@ except ImportError:
gzip = None
from lib.dateutil.parser import parse
from cachecontrol import caches
from lib.cachecontrol import CacheControl, caches
from tvdb_ui import BaseUI, ConsoleUI
from tvdb_exceptions import (tvdb_error, tvdb_userabort, tvdb_shownotfound,
@ -366,7 +367,8 @@ class Tvdb:
apikey=None,
forceConnect=False,
useZip=False,
dvdorder=False):
dvdorder=False,
proxy=None):
"""interactive (True/False):
When True, uses built-in console UI is used to select the correct show.
@ -464,16 +466,18 @@ class Tvdb:
self.config['dvdorder'] = dvdorder
self.config['proxy'] = proxy
if cache is True:
self.config['cache_enabled'] = True
self.config['cache_location'] = self._getTempDir()
self.sess = cachecontrol.CacheControl(cache=caches.FileCache(self.config['cache_location']))
self.sess = CacheControl(cache=caches.FileCache(self.config['cache_location']))
elif cache is False:
self.config['cache_enabled'] = False
elif isinstance(cache, basestring):
self.config['cache_enabled'] = True
self.config['cache_location'] = cache
self.sess = cachecontrol.CacheControl(cache=caches.FileCache(self.config['cache_location']))
self.sess = CacheControl(cache=caches.FileCache(self.config['cache_location']))
else:
raise ValueError("Invalid value for Cache %r (type was %s)" % (cache, type(cache)))
@ -561,18 +565,24 @@ class Tvdb:
# get response from TVDB
if self.config['cache_enabled']:
if self.config['proxy']:
log().debug("Using proxy for URL: %s" % url)
self.sess.proxies = {
"http": self.config['proxy'],
"https": self.config['proxy'],
}
resp = self.sess.get(url, cache_auto=True, params=params)
else:
resp = requests.get(url, params=params)
except requests.HTTPError, e:
except requests.exceptions.HTTPError, e:
raise tvdb_error("HTTP error " + str(e.errno) + " while loading URL " + str(url))
except requests.ConnectionError, e:
except requests.exceptions.ConnectionError, e:
raise tvdb_error("Connection error " + str(e.message) + " while loading URL " + str(url))
except requests.Timeout, e:
except requests.exceptions.Timeout, e:
raise tvdb_error("Connection timed out " + str(e.message) + " while loading URL " + str(url))
except Exception:
raise tvdb_error("Unknown exception while loading URL " + url + ": " + traceback.format_exc())
def process(path, key, value):
key = key.lower()
@ -703,7 +713,9 @@ class Tvdb:
if self.config['custom_ui'] is not None:
log().debug("Using custom UI %s" % (repr(self.config['custom_ui'])))
ui = self.config['custom_ui'](config=self.config)
CustomUI = self.config['custom_ui']
ui = CustomUI(config=self.config)
else:
if not self.config['interactive']:
log().debug('Auto-selecting first search result using BaseUI')

View File

@ -10,6 +10,7 @@ Modified from http://github.com/dbr/tvrage_api
Simple-to-use Python interface to The TVRage's API (tvrage.com)
"""
from functools import wraps
import traceback
__author__ = "echel0n"
__version__ = "1.0"
@ -23,7 +24,7 @@ import warnings
import logging
import datetime as dt
import requests
import cachecontrol
import requests.exceptions
import xmltodict
try:
@ -32,7 +33,7 @@ except ImportError:
import xml.etree.ElementTree as ElementTree
from lib.dateutil.parser import parse
from cachecontrol import caches
from cachecontrol import CacheControl, caches
from tvrage_ui import BaseUI
from tvrage_exceptions import (tvrage_error, tvrage_userabort, tvrage_shownotfound,
@ -283,7 +284,8 @@ class TVRage:
apikey=None,
forceConnect=False,
useZip=False,
dvdorder=False):
dvdorder=False,
proxy=None):
"""
cache (True/False/str/unicode/urllib2 opener):
@ -316,16 +318,18 @@ class TVRage:
self.config['custom_ui'] = custom_ui
self.config['proxy'] = proxy
if cache is True:
self.config['cache_enabled'] = True
self.config['cache_location'] = self._getTempDir()
self.sess = cachecontrol.CacheControl(cache=caches.FileCache(self.config['cache_location']))
self.sess = CacheControl(cache=caches.FileCache(self.config['cache_location']))
elif cache is False:
self.config['cache_enabled'] = False
elif isinstance(cache, basestring):
self.config['cache_enabled'] = True
self.config['cache_location'] = cache
self.sess = cachecontrol.CacheControl(cache=caches.FileCache(self.config['cache_location']))
self.sess = CacheControl(cache=caches.FileCache(self.config['cache_location']))
else:
raise ValueError("Invalid value for Cache %r (type was %s)" % (cache, type(cache)))
@ -401,18 +405,25 @@ class TVRage:
# get response from TVRage
if self.config['cache_enabled']:
if self.config['proxy']:
log().debug("Using proxy for URL: %s" % url)
self.sess.proxies = {
"http": self.config['proxy'],
"https": self.config['proxy'],
}
resp = self.sess.get(url.strip(), cache_auto=True, params=params)
else:
resp = requests.get(url.strip(), params=params)
except requests.HTTPError, e:
except requests.exceptions.HTTPError, e:
raise tvrage_error("HTTP error " + str(e.errno) + " while loading URL " + str(url))
except requests.ConnectionError, e:
except requests.exceptions.ConnectionError, e:
raise tvrage_error("Connection error " + str(e.message) + " while loading URL " + str(url))
except requests.Timeout, e:
except requests.exceptions.Timeout, e:
raise tvrage_error("Connection timed out " + str(e.message) + " while loading URL " + str(url))
except Exception:
raise tvrage_error("Unknown exception while loading URL " + url + ": " + traceback.format_exc())
def remap_keys(path, key, value):
name_map = {
@ -564,7 +575,8 @@ class TVRage:
if self.config['custom_ui'] is not None:
log().debug("Using custom UI %s" % (repr(self.config['custom_ui'])))
ui = self.config['custom_ui'](config=self.config)
CustomUI = self.config['custom_ui']
ui = CustomUI(config=self.config)
else:
log().debug('Auto-selecting first search result using BaseUI')
ui = BaseUI(config=self.config)

View File

@ -32,7 +32,8 @@ import sys
from sickbeard import providers, metadata, config, webserveInit
from sickbeard.providers.generic import GenericProvider
from providers import ezrss, tvtorrents, btn, newznab, womble, thepiratebay, torrentleech, kat, iptorrents, \
omgwtfnzbs, scc, hdtorrents, torrentday, hdbits, nextgen, speedcd, nyaatorrents, fanzub, torrentbytes, animezb, freshontv, bitsoup
omgwtfnzbs, scc, hdtorrents, torrentday, hdbits, nextgen, speedcd, nyaatorrents, fanzub, torrentbytes, animezb, \
freshontv, bitsoup
from sickbeard.config import CheckSection, check_setting_int, check_setting_str, check_setting_float, ConfigMigrator, \
naming_ep_type
from sickbeard import searchBacklog, showUpdater, versionChecker, properFinder, autoPostProcesser, \
@ -98,9 +99,9 @@ metadata_provider_dict = {}
NEWEST_VERSION = None
NEWEST_VERSION_STRING = None
VERSION_NOTIFY = None
AUTO_UPDATE = None
NOTIFY_ON_UPDATE = None
VERSION_NOTIFY = False
AUTO_UPDATE = False
NOTIFY_ON_UPDATE = False
CUR_COMMIT_HASH = None
INIT_LOCK = Lock()
@ -119,9 +120,9 @@ WEB_PASSWORD = None
WEB_HOST = None
WEB_IPV6 = None
PLAY_VIDEOS = None
PLAY_VIDEOS = False
HANDLE_REVERSE_PROXY = None
HANDLE_REVERSE_PROXY = False
PROXY_SETTING = None
LOCALHOST_IP = None
@ -137,16 +138,15 @@ ENABLE_HTTPS = False
HTTPS_CERT = None
HTTPS_KEY = None
LAUNCH_BROWSER = None
LAUNCH_BROWSER = False
CACHE_DIR = None
ACTUAL_CACHE_DIR = None
ROOT_DIRS = None
UPDATE_SHOWS_ON_START = None
SORT_ARTICLE = None
UPDATE_SHOWS_ON_START = False
SORT_ARTICLE = False
DEBUG = False
CLEAR_CACHE = None
USE_LISTVIEW = None
USE_LISTVIEW = False
METADATA_XBMC = None
METADATA_XBMC_12PLUS = None
METADATA_MEDIABROWSER = None
@ -157,42 +157,42 @@ METADATA_MEDE8ER = None
QUALITY_DEFAULT = None
STATUS_DEFAULT = None
FLATTEN_FOLDERS_DEFAULT = None
SUBTITLES_DEFAULT = None
FLATTEN_FOLDERS_DEFAULT = False
SUBTITLES_DEFAULT = False
INDEXER_DEFAULT = None
INDEXER_TIMEOUT = None
SCENE_DEFAULT = None
ANIME_DEFAULT = None
SCENE_DEFAULT = False
ANIME_DEFAULT = False
PROVIDER_ORDER = []
NAMING_MULTI_EP = None
NAMING_MULTI_EP = False
NAMING_PATTERN = None
NAMING_ABD_PATTERN = None
NAMING_CUSTOM_ABD = None
NAMING_CUSTOM_ABD = False
NAMING_SPORTS_PATTERN = None
NAMING_CUSTOM_SPORTS = None
NAMING_CUSTOM_SPORTS = False
NAMING_FORCE_FOLDERS = False
NAMING_STRIP_YEAR = None
NAMING_STRIP_YEAR = False
NAMING_ANIME = None
USE_NZBS = None
USE_TORRENTS = None
USE_NZBS = False
USE_TORRENTS = False
NZB_METHOD = None
NZB_DIR = None
USENET_RETENTION = None
TORRENT_METHOD = None
TORRENT_DIR = None
DOWNLOAD_PROPERS = None
DOWNLOAD_PROPERS = False
CHECK_PROPERS_INTERVAL = None
ALLOW_HIGH_PRIORITY = None
ALLOW_HIGH_PRIORITY = False
AUTOPOSTPROCESSER_FREQUENCY = None
DAILYSEARCH_FREQUENCY = None
UPDATE_FREQUENCY = None
BACKLOG_FREQUENCY = None
DAILYSEARCH_STARTUP = None
BACKLOG_STARTUP = None
DAILYSEARCH_STARTUP = False
BACKLOG_STARTUP = False
MIN_AUTOPOSTPROCESSER_FREQUENCY = 1
MIN_BACKLOG_FREQUENCY = 10
@ -203,8 +203,8 @@ DEFAULT_BACKLOG_FREQUENCY = 10080
DEFAULT_DAILYSEARCH_FREQUENCY = 60
DEFAULT_UPDATE_FREQUENCY = 1
ADD_SHOWS_WO_DIR = None
CREATE_MISSING_SHOW_DIRS = None
ADD_SHOWS_WO_DIR = False
CREATE_MISSING_SHOW_DIRS = False
RENAME_EPISODES = False
AIRDATE_EPISODES = False
PROCESS_AUTOMATICALLY = False
@ -250,7 +250,7 @@ TORRENT_SEED_TIME = None
TORRENT_PAUSED = False
TORRENT_HIGH_BANDWIDTH = False
TORRENT_LABEL = ''
TORRENT_VERIFY_CERT = True
TORRENT_VERIFY_CERT = False
USE_XBMC = False
XBMC_ALWAYS_ON = True
@ -331,7 +331,7 @@ ANIMESUPPORT = False
USE_ANIDB = False
ANIDB_USERNAME = None
ANIDB_PASSWORD = None
ANIDB_USE_MYLIST = 0
ANIDB_USE_MYLIST = False
ADBA_CONNECTION = None
ANIME_SPLIT_HOME = False
@ -403,9 +403,9 @@ EMAIL_LIST = None
GUI_NAME = None
HOME_LAYOUT = None
HISTORY_LAYOUT = None
DISPLAY_SHOW_SPECIALS = None
DISPLAY_SHOW_SPECIALS = False
COMING_EPS_LAYOUT = None
COMING_EPS_DISPLAY_PAUSED = None
COMING_EPS_DISPLAY_PAUSED = False
COMING_EPS_SORT = None
COMING_EPS_MISSED_RANGE = None
FUZZY_DATING = False
@ -438,6 +438,8 @@ TMDB_API_KEY = 'edc5f123313769de83a71e157758030b'
TRAKT_API_KEY = 'abd806c54516240c76e4ebc9c5ccf394'
__INITIALIZED__ = False
def initialize(consoleLogging=True):
with INIT_LOCK:
@ -474,7 +476,7 @@ def initialize(consoleLogging=True):
USE_SYNOLOGYNOTIFIER, SYNOLOGYNOTIFIER_NOTIFY_ONSNATCH, SYNOLOGYNOTIFIER_NOTIFY_ONDOWNLOAD, SYNOLOGYNOTIFIER_NOTIFY_ONSUBTITLEDOWNLOAD, \
USE_EMAIL, EMAIL_HOST, EMAIL_PORT, EMAIL_TLS, EMAIL_USER, EMAIL_PASSWORD, EMAIL_FROM, EMAIL_NOTIFY_ONSNATCH, EMAIL_NOTIFY_ONDOWNLOAD, EMAIL_NOTIFY_ONSUBTITLEDOWNLOAD, EMAIL_LIST, \
USE_LISTVIEW, METADATA_XBMC, METADATA_XBMC_12PLUS, METADATA_MEDIABROWSER, METADATA_PS3, metadata_provider_dict, \
NEWZBIN, NEWZBIN_USERNAME, NEWZBIN_PASSWORD, GIT_PATH, MOVE_ASSOCIATED_FILES, CLEAR_CACHE, dailySearchScheduler, NFO_RENAME, \
NEWZBIN, NEWZBIN_USERNAME, NEWZBIN_PASSWORD, GIT_PATH, MOVE_ASSOCIATED_FILES, dailySearchScheduler, NFO_RENAME, \
GUI_NAME, HOME_LAYOUT, HISTORY_LAYOUT, DISPLAY_SHOW_SPECIALS, COMING_EPS_LAYOUT, COMING_EPS_SORT, COMING_EPS_DISPLAY_PAUSED, COMING_EPS_MISSED_RANGE, FUZZY_DATING, TRIM_ZERO, DATE_PRESET, TIME_PRESET, TIME_PRESET_W_SECONDS, \
METADATA_WDTV, METADATA_TIVO, METADATA_MEDE8ER, IGNORE_WORDS, CALENDAR_UNPROTECTED, CREATE_MISSING_SHOW_DIRS, \
ADD_SHOWS_WO_DIR, USE_SUBTITLES, SUBTITLES_LANGUAGES, SUBTITLES_DIR, SUBTITLES_SERVICES_LIST, SUBTITLES_SERVICES_ENABLED, SUBTITLES_HISTORY, SUBTITLES_FINDER_FREQUENCY, subtitlesFinderScheduler, \
@ -583,18 +585,11 @@ def initialize(consoleLogging=True):
if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', ROOT_DIRS):
ROOT_DIRS = ''
proxies = getproxies()
proxy_url = None
if 'http' in proxies:
proxy_url = proxies['http']
elif 'ftp' in proxies:
proxy_url = proxies['ftp']
QUALITY_DEFAULT = check_setting_int(CFG, 'General', 'quality_default', SD)
STATUS_DEFAULT = check_setting_int(CFG, 'General', 'status_default', SKIPPED)
VERSION_NOTIFY = check_setting_int(CFG, 'General', 'version_notify', 1)
AUTO_UPDATE = check_setting_int(CFG, 'General', 'auto_update', 0)
NOTIFY_ON_UPDATE = check_setting_int(CFG, 'General', 'notify_on_update', 1)
VERSION_NOTIFY = bool(check_setting_int(CFG, 'General', 'version_notify', 1))
AUTO_UPDATE = bool(check_setting_int(CFG, 'General', 'auto_update', 0))
NOTIFY_ON_UPDATE = bool(check_setting_int(CFG, 'General', 'notify_on_update', 1))
FLATTEN_FOLDERS_DEFAULT = bool(check_setting_int(CFG, 'General', 'flatten_folders_default', 0))
INDEXER_DEFAULT = check_setting_int(CFG, 'General', 'indexer_default', 0)
INDEXER_TIMEOUT = check_setting_int(CFG, 'General', 'indexer_timeout', 20)
@ -605,11 +600,11 @@ def initialize(consoleLogging=True):
NAMING_PATTERN = check_setting_str(CFG, 'General', 'naming_pattern', 'Season %0S/%SN - S%0SE%0E - %EN')
NAMING_ABD_PATTERN = check_setting_str(CFG, 'General', 'naming_abd_pattern', '%SN - %A.D - %EN')
NAMING_CUSTOM_ABD = check_setting_int(CFG, 'General', 'naming_custom_abd', 0)
NAMING_CUSTOM_ABD = bool(check_setting_int(CFG, 'General', 'naming_custom_abd', 0))
NAMING_SPORTS_PATTERN = check_setting_str(CFG, 'General', 'naming_sports_pattern', '%SN - %A-D - %EN')
NAMING_ANIME = check_setting_int(CFG, 'General', 'naming_anime', 3)
NAMING_CUSTOM_SPORTS = check_setting_int(CFG, 'General', 'naming_custom_sports', 0)
NAMING_MULTI_EP = check_setting_int(CFG, 'General', 'naming_multi_ep', 1)
NAMING_CUSTOM_SPORTS = bool(check_setting_int(CFG, 'General', 'naming_custom_sports', 0))
NAMING_MULTI_EP = bool(check_setting_int(CFG, 'General', 'naming_multi_ep', 1))
NAMING_FORCE_FOLDERS = naming.check_force_season_folders()
NAMING_STRIP_YEAR = bool(check_setting_int(CFG, 'General', 'naming_strip_year', 0))
@ -659,16 +654,16 @@ def initialize(consoleLogging=True):
TORRENT_DIR = check_setting_str(CFG, 'Blackhole', 'torrent_dir', '')
TV_DOWNLOAD_DIR = check_setting_str(CFG, 'General', 'tv_download_dir', '')
PROCESS_AUTOMATICALLY = check_setting_int(CFG, 'General', 'process_automatically', 0)
UNPACK = check_setting_int(CFG, 'General', 'unpack', 0)
RENAME_EPISODES = check_setting_int(CFG, 'General', 'rename_episodes', 1)
AIRDATE_EPISODES = check_setting_int(CFG, 'General', 'airdate_episodes', 0)
KEEP_PROCESSED_DIR = check_setting_int(CFG, 'General', 'keep_processed_dir', 1)
PROCESS_AUTOMATICALLY = bool(check_setting_int(CFG, 'General', 'process_automatically', 0))
UNPACK = bool(check_setting_int(CFG, 'General', 'unpack', 0))
RENAME_EPISODES = bool(check_setting_int(CFG, 'General', 'rename_episodes', 1))
AIRDATE_EPISODES = bool(check_setting_int(CFG, 'General', 'airdate_episodes', 0))
KEEP_PROCESSED_DIR = bool(check_setting_int(CFG, 'General', 'keep_processed_dir', 1))
PROCESS_METHOD = check_setting_str(CFG, 'General', 'process_method', 'copy' if KEEP_PROCESSED_DIR else 'move')
MOVE_ASSOCIATED_FILES = check_setting_int(CFG, 'General', 'move_associated_files', 0)
NFO_RENAME = check_setting_int(CFG, 'General', 'nfo_rename', 1)
CREATE_MISSING_SHOW_DIRS = check_setting_int(CFG, 'General', 'create_missing_show_dirs', 0)
ADD_SHOWS_WO_DIR = check_setting_int(CFG, 'General', 'add_shows_wo_dir', 0)
MOVE_ASSOCIATED_FILES = bool(check_setting_int(CFG, 'General', 'move_associated_files', 0))
NFO_RENAME = bool(check_setting_int(CFG, 'General', 'nfo_rename', 1))
CREATE_MISSING_SHOW_DIRS = bool(check_setting_int(CFG, 'General', 'create_missing_show_dirs', 0))
ADD_SHOWS_WO_DIR = bool(check_setting_int(CFG, 'General', 'add_shows_wo_dir', 0))
NZBS = bool(check_setting_int(CFG, 'NZBs', 'nzbs', 0))
NZBS_UID = check_setting_str(CFG, 'NZBs', 'nzbs_uid', '')
@ -761,7 +756,8 @@ def initialize(consoleLogging=True):
USE_PUSHOVER = bool(check_setting_int(CFG, 'Pushover', 'use_pushover', 0))
PUSHOVER_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Pushover', 'pushover_notify_onsnatch', 0))
PUSHOVER_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Pushover', 'pushover_notify_ondownload', 0))
PUSHOVER_NOTIFY_ONSUBTITLEDOWNLOAD = bool(check_setting_int(CFG, 'Pushover', 'pushover_notify_onsubtitledownload', 0))
PUSHOVER_NOTIFY_ONSUBTITLEDOWNLOAD = bool(
check_setting_int(CFG, 'Pushover', 'pushover_notify_onsubtitledownload', 0))
PUSHOVER_USERKEY = check_setting_str(CFG, 'Pushover', 'pushover_userkey', '')
PUSHOVER_APIKEY = check_setting_str(CFG, 'Pushover', 'pushover_apikey', '')
USE_LIBNOTIFY = bool(check_setting_int(CFG, 'Libnotify', 'use_libnotify', 0))
@ -796,7 +792,7 @@ def initialize(consoleLogging=True):
TRAKT_API = check_setting_str(CFG, 'Trakt', 'trakt_api', '')
TRAKT_REMOVE_WATCHLIST = bool(check_setting_int(CFG, 'Trakt', 'trakt_remove_watchlist', 0))
TRAKT_USE_WATCHLIST = bool(check_setting_int(CFG, 'Trakt', 'trakt_use_watchlist', 0))
TRAKT_METHOD_ADD = check_setting_str(CFG, 'Trakt', 'trakt_method_add', "0")
TRAKT_METHOD_ADD = check_setting_int(CFG, 'Trakt', 'trakt_method_add', 0)
TRAKT_START_PAUSED = bool(check_setting_int(CFG, 'Trakt', 'trakt_start_paused', 0))
TRAKT_USE_RECOMMENDED = bool(check_setting_int(CFG, 'Trakt', 'trakt_use_recommended', 0))
TRAKT_SYNC = bool(check_setting_int(CFG, 'Trakt', 'trakt_sync', 0))
@ -874,10 +870,11 @@ def initialize(consoleLogging=True):
USE_LISTVIEW = bool(check_setting_int(CFG, 'General', 'use_listview', 0))
ANIMESUPPORT = False
USE_ANIDB = check_setting_str(CFG, 'ANIDB', 'use_anidb', '')
USE_ANIDB = bool(check_setting_int(CFG, 'ANIDB', 'use_anidb', 0))
ANIDB_USERNAME = check_setting_str(CFG, 'ANIDB', 'anidb_username', '')
ANIDB_PASSWORD = check_setting_str(CFG, 'ANIDB', 'anidb_password', '')
ANIDB_USE_MYLIST = bool(check_setting_int(CFG, 'ANIDB', 'anidb_use_mylist', 0))
ANIME_SPLIT_HOME = bool(check_setting_int(CFG, 'ANIME', 'anime_split_home', 0))
METADATA_XBMC = check_setting_str(CFG, 'General', 'metadata_xbmc', '0|0|0|0|0|0|0|0|0|0')
@ -902,125 +899,15 @@ def initialize(consoleLogging=True):
TIME_PRESET = TIME_PRESET_W_SECONDS.replace(u":%S", u"")
TIMEZONE_DISPLAY = check_setting_str(CFG, 'GUI', 'timezone_display', 'network')
# initialize NZB and TORRENT providers
providerList = providers.makeProviderList()
NEWZNAB_DATA = check_setting_str(CFG, 'Newznab', 'newznab_data', '')
newznabProviderList = providers.getNewznabProviderList(NEWZNAB_DATA)
TORRENTRSS_DATA = check_setting_str(CFG, 'TorrentRss', 'torrentrss_data', '')
torrentRssProviderList = providers.getTorrentRssProviderList(TORRENTRSS_DATA)
if not os.path.isfile(CONFIG_FILE):
logger.log(u"Unable to find '" + CONFIG_FILE + "', all settings will be default!", logger.DEBUG)
save_config()
# start up all the threads
logger.sb_log_instance.initLogging(consoleLogging=consoleLogging)
# initialize the main SB database
myDB = db.DBConnection()
db.upgradeDatabase(myDB, mainDB.InitialSchema)
# initialize the cache database
myDB = db.DBConnection('cache.db')
db.upgradeDatabase(myDB, cache_db.InitialSchema)
# initialize the failed downloads database
myDB = db.DBConnection('failed.db')
db.upgradeDatabase(myDB, failed_db.InitialSchema)
# fix up any db problems
myDB = db.DBConnection()
db.sanityCheckDatabase(myDB, mainDB.MainSanityCheck)
# migrate the config if it needs it
migrator = ConfigMigrator(CFG)
migrator.migrate_config()
# initialize metadata_providers
metadata_provider_dict = metadata.get_metadata_generator_dict()
for cur_metadata_tuple in [(METADATA_XBMC, metadata.xbmc),
(METADATA_XBMC_12PLUS, metadata.xbmc_12plus),
(METADATA_MEDIABROWSER, metadata.mediabrowser),
(METADATA_PS3, metadata.ps3),
(METADATA_WDTV, metadata.wdtv),
(METADATA_TIVO, metadata.tivo),
(METADATA_MEDE8ER, metadata.mede8er),
]:
(cur_metadata_config, cur_metadata_class) = cur_metadata_tuple
tmp_provider = cur_metadata_class.metadata_class()
tmp_provider.set_config(cur_metadata_config)
metadata_provider_dict[tmp_provider.name] = tmp_provider
# initialize newznab providers
newznabProviderList = providers.getNewznabProviderList(NEWZNAB_DATA)
providerList = providers.makeProviderList()
# initialize schedulers
# updaters
update_now = datetime.timedelta(minutes=0)
versionCheckScheduler = scheduler.Scheduler(versionChecker.CheckVersion(),
cycleTime=datetime.timedelta(hours=UPDATE_FREQUENCY),
threadName="CHECKVERSION",
silent=False)
showQueueScheduler = scheduler.Scheduler(show_queue.ShowQueue(),
cycleTime=datetime.timedelta(seconds=3),
threadName="SHOWQUEUE")
showUpdateScheduler = scheduler.Scheduler(showUpdater.ShowUpdater(),
cycleTime=datetime.timedelta(hours=1),
threadName="SHOWUPDATER",
start_time=datetime.time(hour=3)) # 3 AM
# searchers
searchQueueScheduler = scheduler.Scheduler(search_queue.SearchQueue(),
cycleTime=datetime.timedelta(seconds=3),
threadName="SEARCHQUEUE")
update_interval = datetime.timedelta(minutes=DAILYSEARCH_FREQUENCY)
dailySearchScheduler = scheduler.Scheduler(dailysearcher.DailySearcher(),
cycleTime=update_interval,
threadName="DAILYSEARCHER",
run_delay=update_now if DAILYSEARCH_STARTUP
else update_interval)
update_interval = datetime.timedelta(minutes=BACKLOG_FREQUENCY)
backlogSearchScheduler = searchBacklog.BacklogSearchScheduler(searchBacklog.BacklogSearcher(),
cycleTime=update_interval,
threadName="BACKLOG",
run_delay=update_now if BACKLOG_STARTUP
else update_interval)
search_intervals = {'15m': 15, '45m': 45, '90m': 90, '4h': 4*60, 'daily': 24*60}
if CHECK_PROPERS_INTERVAL in search_intervals:
update_interval = datetime.timedelta(minutes=search_intervals[CHECK_PROPERS_INTERVAL])
run_at = None
else:
update_interval = datetime.timedelta(hours=1)
run_at = datetime.time(hour=1) # 1 AM
properFinderScheduler = scheduler.Scheduler(properFinder.ProperFinder(),
cycleTime=update_interval,
threadName="FINDPROPERS",
start_time=run_at,
run_delay=update_interval)
# processors
autoPostProcesserScheduler = scheduler.Scheduler(autoPostProcesser.PostProcesser(),
cycleTime=datetime.timedelta(
minutes=AUTOPOSTPROCESSER_FREQUENCY),
threadName="POSTPROCESSER",
silent=not PROCESS_AUTOMATICALLY)
traktCheckerScheduler = scheduler.Scheduler(traktChecker.TraktChecker(),
cycleTime=datetime.timedelta(hours=1),
threadName="TRAKTCHECKER",
silent=not USE_TRAKT)
subtitlesFinderScheduler = scheduler.Scheduler(subtitles.SubtitlesFinder(),
cycleTime=datetime.timedelta(hours=SUBTITLES_FINDER_FREQUENCY),
threadName="FINDSUBTITLES",
silent=not USE_SUBTITLES)
# dynamically load provider settings
for curTorrentProvider in [curProvider for curProvider in providers.sortedProviderList() if
curProvider.providerType == GenericProvider.TORRENT]:
@ -1104,17 +991,114 @@ def initialize(consoleLogging=True):
curNzbProvider.getID() + '_backlog_only',
0))
try:
url = 'http://raw.github.com/echel0n/sickrage-init/master/settings.ini'
clear_cache = ElementTree.XML(helpers.getURL(url)).find('cache/clear').text
CLEAR_CACHE = check_setting_str(CFG, 'General', 'clear_cache', '')
if CLEAR_CACHE != clear_cache:
for curProvider in [x for x in providers.sortedProviderList() if x.isActive()]:
curProvider.cache._clearCache()
CLEAR_CACHE = clear_cache
save_config()
except:
pass
if not os.path.isfile(CONFIG_FILE):
logger.log(u"Unable to find '" + CONFIG_FILE + "', all settings will be default!", logger.DEBUG)
save_config()
# start up all the threads
logger.sb_log_instance.initLogging(consoleLogging=consoleLogging)
# initialize the main SB database
myDB = db.DBConnection()
db.upgradeDatabase(myDB, mainDB.InitialSchema)
# initialize the cache database
myDB = db.DBConnection('cache.db')
db.upgradeDatabase(myDB, cache_db.InitialSchema)
# initialize the failed downloads database
myDB = db.DBConnection('failed.db')
db.upgradeDatabase(myDB, failed_db.InitialSchema)
# fix up any db problems
myDB = db.DBConnection()
db.sanityCheckDatabase(myDB, mainDB.MainSanityCheck)
# migrate the config if it needs it
migrator = ConfigMigrator(CFG)
migrator.migrate_config()
# initialize metadata_providers
metadata_provider_dict = metadata.get_metadata_generator_dict()
for cur_metadata_tuple in [(METADATA_XBMC, metadata.xbmc),
(METADATA_XBMC_12PLUS, metadata.xbmc_12plus),
(METADATA_MEDIABROWSER, metadata.mediabrowser),
(METADATA_PS3, metadata.ps3),
(METADATA_WDTV, metadata.wdtv),
(METADATA_TIVO, metadata.tivo),
(METADATA_MEDE8ER, metadata.mede8er),
]:
(cur_metadata_config, cur_metadata_class) = cur_metadata_tuple
tmp_provider = cur_metadata_class.metadata_class()
tmp_provider.set_config(cur_metadata_config)
metadata_provider_dict[tmp_provider.name] = tmp_provider
# initialize schedulers
# updaters
update_now = datetime.timedelta(minutes=0)
versionCheckScheduler = scheduler.Scheduler(versionChecker.CheckVersion(),
cycleTime=datetime.timedelta(hours=UPDATE_FREQUENCY),
threadName="CHECKVERSION",
silent=False)
showQueueScheduler = scheduler.Scheduler(show_queue.ShowQueue(),
cycleTime=datetime.timedelta(seconds=3),
threadName="SHOWQUEUE")
showUpdateScheduler = scheduler.Scheduler(showUpdater.ShowUpdater(),
cycleTime=datetime.timedelta(hours=1),
threadName="SHOWUPDATER",
start_time=datetime.time(hour=3)) # 3 AM
# searchers
searchQueueScheduler = scheduler.Scheduler(search_queue.SearchQueue(),
cycleTime=datetime.timedelta(seconds=3),
threadName="SEARCHQUEUE")
update_interval = datetime.timedelta(minutes=DAILYSEARCH_FREQUENCY)
dailySearchScheduler = scheduler.Scheduler(dailysearcher.DailySearcher(),
cycleTime=update_interval,
threadName="DAILYSEARCHER",
run_delay=update_now if DAILYSEARCH_STARTUP
else update_interval)
update_interval = datetime.timedelta(minutes=BACKLOG_FREQUENCY)
backlogSearchScheduler = searchBacklog.BacklogSearchScheduler(searchBacklog.BacklogSearcher(),
cycleTime=update_interval,
threadName="BACKLOG",
run_delay=update_now if BACKLOG_STARTUP
else update_interval)
search_intervals = {'15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60}
if CHECK_PROPERS_INTERVAL in search_intervals:
update_interval = datetime.timedelta(minutes=search_intervals[CHECK_PROPERS_INTERVAL])
run_at = None
else:
update_interval = datetime.timedelta(hours=1)
run_at = datetime.time(hour=1) # 1 AM
properFinderScheduler = scheduler.Scheduler(properFinder.ProperFinder(),
cycleTime=update_interval,
threadName="FINDPROPERS",
start_time=run_at,
run_delay=update_interval)
# processors
autoPostProcesserScheduler = scheduler.Scheduler(autoPostProcesser.PostProcesser(),
cycleTime=datetime.timedelta(
minutes=AUTOPOSTPROCESSER_FREQUENCY),
threadName="POSTPROCESSER",
silent=not PROCESS_AUTOMATICALLY)
traktCheckerScheduler = scheduler.Scheduler(traktChecker.TraktChecker(),
cycleTime=datetime.timedelta(hours=1),
threadName="TRAKTCHECKER",
silent=not USE_TRAKT)
subtitlesFinderScheduler = scheduler.Scheduler(subtitles.SubtitlesFinder(),
cycleTime=datetime.timedelta(hours=SUBTITLES_FINDER_FREQUENCY),
threadName="FINDSUBTITLES",
silent=not USE_SUBTITLES)
showList = []
loadingShowList = {}
@ -1126,11 +1110,10 @@ def start():
global __INITIALIZED__, backlogSearchScheduler, \
showUpdateScheduler, versionCheckScheduler, showQueueScheduler, \
properFinderScheduler, autoPostProcesserScheduler, searchQueueScheduler, \
subtitlesFinderScheduler, USE_SUBTITLES,traktCheckerScheduler, \
subtitlesFinderScheduler, USE_SUBTITLES, traktCheckerScheduler, \
dailySearchScheduler, events, started
with INIT_LOCK:
if __INITIALIZED__:
# start sysetm events queue
events.start()
@ -1269,11 +1252,13 @@ def halt():
__INITIALIZED__ = False
started = False
def sig_handler(signum=None, frame=None):
if type(signum) != type(None):
logger.log(u"Signal %i caught, saving and exiting..." % int(signum))
events.put(events.SystemEvent.SHUTDOWN)
def saveAll():
global showList
@ -1286,6 +1271,7 @@ def saveAll():
logger.log(u"Saving config file to disk")
save_config()
def restart(soft=True):
if soft:
halt()
@ -1391,8 +1377,6 @@ def save_config():
new_config['General']['ignore_words'] = IGNORE_WORDS
new_config['General']['calendar_unprotected'] = int(CALENDAR_UNPROTECTED)
new_config['General']['clear_cache'] = CLEAR_CACHE
new_config['Blackhole'] = {}
new_config['Blackhole']['nzb_dir'] = NZB_DIR
new_config['Blackhole']['torrent_dir'] = TORRENT_DIR
@ -1617,7 +1601,7 @@ def save_config():
new_config['Trakt']['trakt_api'] = TRAKT_API
new_config['Trakt']['trakt_remove_watchlist'] = int(TRAKT_REMOVE_WATCHLIST)
new_config['Trakt']['trakt_use_watchlist'] = int(TRAKT_USE_WATCHLIST)
new_config['Trakt']['trakt_method_add'] = TRAKT_METHOD_ADD
new_config['Trakt']['trakt_method_add'] = int(TRAKT_METHOD_ADD)
new_config['Trakt']['trakt_start_paused'] = int(TRAKT_START_PAUSED)
new_config['Trakt']['trakt_use_recommended'] = int(TRAKT_USE_RECOMMENDED)
new_config['Trakt']['trakt_sync'] = int(TRAKT_SYNC)
@ -1705,10 +1689,10 @@ def save_config():
new_config['FailedDownloads']['delete_failed'] = int(DELETE_FAILED)
new_config['ANIDB'] = {}
new_config['ANIDB']['use_anidb'] = USE_ANIDB
new_config['ANIDB']['use_anidb'] = int(USE_ANIDB)
new_config['ANIDB']['anidb_username'] = ANIDB_USERNAME
new_config['ANIDB']['anidb_password'] = helpers.encrypt(ANIDB_PASSWORD, ENCRYPTION_VERSION)
new_config['ANIDB']['anidb_use_mylist'] = ANIDB_USE_MYLIST
new_config['ANIDB']['anidb_use_mylist'] = int(ANIDB_USE_MYLIST)
new_config['ANIME'] = {}
new_config['ANIME']['anime_split_home'] = int(ANIME_SPLIT_HOME)

View File

@ -265,8 +265,8 @@ class Quality:
return (status, Quality.NONE)
@staticmethod
def statusFromName(name, assume=True):
quality = Quality.nameQuality(name)
def statusFromName(name, assume=True, anime=False):
quality = Quality.nameQuality(name, anime)
if assume and quality == Quality.UNKNOWN:
quality = Quality.assumeQuality(name)
return Quality.compositeStatus(DOWNLOADED, quality)

View File

@ -51,14 +51,12 @@ class GitHub(object):
if params and type(params) is dict:
url += '?' + '&'.join([str(x) + '=' + str(params[x]) for x in params.keys()])
data = helpers.getURL(url)
if data:
json_data = json.loads(data)
return json_data
else:
parsedJSON = helpers.getURL(url, json=True)
if not parsedJSON:
return []
return parsedJSON
def commits(self):
"""
Uses the API to get a list of the 100 most recent commits from the specified user/repo/branch, starting from HEAD.

View File

@ -33,9 +33,11 @@ import uuid
import base64
import zipfile
from lib import requests
from lib.requests import exceptions
from itertools import izip, cycle
import sickbeard
import subliminal
import adba
import requests
import requests.exceptions
try:
import json
@ -49,20 +51,18 @@ except ImportError:
from xml.dom.minidom import Node
import sickbeard
from sickbeard.exceptions import MultipleShowObjectsException, EpisodeNotFoundByAbsoluteNumberException, ex
from sickbeard.exceptions import MultipleShowObjectsException, ex
from sickbeard import logger, classes
from sickbeard.common import USER_AGENT, mediaExtensions, subtitleExtensions, XML_NSMAP
from sickbeard.common import USER_AGENT, mediaExtensions, subtitleExtensions
from sickbeard import db
from sickbeard import encodingKludge as ek
from sickbeard import notifiers
from lib import subliminal
from lib import adba
from lib import trakt
from sickbeard import clients
from cachecontrol import CacheControl, caches
from itertools import izip, cycle
urllib._urlopener = classes.SickBeardURLopener()
session = requests.Session()
def indentXML(elem, level=0):
'''
@ -191,101 +191,12 @@ def sanitizeFileName(name):
return name
def getURL(url, post_data=None, headers=None, params=None, timeout=30, json=False, use_proxy=False):
"""
Returns a byte-string retrieved from the url provider.
"""
global session
if not session:
session = requests.Session()
req_headers = ['User-Agent', USER_AGENT, 'Accept-Encoding', 'gzip,deflate']
if headers:
for cur_header in headers:
req_headers.append(cur_header)
try:
# Remove double-slashes from url
parsed = list(urlparse.urlparse(url))
parsed[2] = re.sub("/{2,}", "/", parsed[2]) # replace two or more / with one
url = urlparse.urlunparse(parsed)
it = iter(req_headers)
if use_proxy and sickbeard.PROXY_SETTING:
logger.log("Using proxy for url: " + url, logger.DEBUG)
proxies = {
"http": sickbeard.PROXY_SETTING,
"https": sickbeard.PROXY_SETTING,
}
r = session.get(url, params=params, data=post_data, headers=dict(zip(it, it)), proxies=proxies,
timeout=timeout, verify=False)
else:
r = session.get(url, params=params, data=post_data, headers=dict(zip(it, it)), timeout=timeout,
verify=False)
except requests.HTTPError, e:
logger.log(u"HTTP error " + str(e.errno) + " while loading URL " + url, logger.WARNING)
return None
except requests.ConnectionError, e:
logger.log(u"Connection error " + str(e.message) + " while loading URL " + url, logger.WARNING)
return None
except requests.Timeout, e:
logger.log(u"Connection timed out " + str(e.message) + " while loading URL " + url, logger.WARNING)
return None
if r.ok:
if json:
return r.json()
return r.content
def _remove_file_failed(file):
try:
ek.ek(os.remove, file)
except:
pass
def download_file(url, filename):
global session
if not session:
session = requests.Session()
try:
r = session.get(url, stream=True, verify=False)
with open(filename, 'wb') as fp:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
fp.write(chunk)
fp.flush()
except requests.HTTPError, e:
_remove_file_failed(filename)
logger.log(u"HTTP error " + str(e.errno) + " while loading URL " + url, logger.WARNING)
return False
except requests.ConnectionError, e:
logger.log(u"Connection error " + str(e.message) + " while loading URL " + url, logger.WARNING)
return False
except requests.Timeout, e:
logger.log(u"Connection timed out " + str(e.message) + " while loading URL " + url, logger.WARNING)
return False
except Exception:
_remove_file_failed(filename)
logger.log(u"Unknown exception while loading URL " + url + ": " + traceback.format_exc(), logger.WARNING)
return False
return True
def findCertainShow(showList, indexerid):
if not showList:
return None
@ -610,6 +521,12 @@ def delete_empty_folders(check_empty_dir, keep_dir=None):
else:
break
def fileBitFilter(mode):
for bit in [stat.S_IXUSR, stat.S_IXGRP, stat.S_IXOTH, stat.S_ISUID, stat.S_ISGID]:
if mode & bit:
mode -= bit
return mode
def chmodAsParent(childPath):
if os.name == 'nt' or os.name == 'ce':
@ -649,15 +566,6 @@ def chmodAsParent(childPath):
except OSError:
logger.log(u"Failed to set permission for %s to %o" % (childPath, childMode), logger.ERROR)
def fileBitFilter(mode):
for bit in [stat.S_IXUSR, stat.S_IXGRP, stat.S_IXOTH, stat.S_ISUID, stat.S_ISGID]:
if mode & bit:
mode -= bit
return mode
def fixSetGroupID(childPath):
if os.name == 'nt' or os.name == 'ce':
return
@ -1272,4 +1180,130 @@ def touchFile(fname, atime=None):
logger.log(u"File air date stamping not available on your OS", logger.DEBUG)
pass
return False
return False
def getURL(url, post_data=None, params=None, headers=None, timeout=30, session=None, json=False):
"""
Returns a byte-string retrieved from the url provider.
"""
# request session
session = CacheControl(sess=session, cache=caches.FileCache(os.path.join(sickbeard.CACHE_DIR, 'sessions')))
# request session headers
req_headers = {'User-Agent': USER_AGENT, 'Accept-Encoding': 'gzip,deflate'}
if headers:
req_headers.update(headers)
session.headers.update(req_headers)
# request session ssl verify
session.verify = False
# request session paramaters
session.params = params
try:
# Remove double-slashes from url
parsed = list(urlparse.urlparse(url))
parsed[2] = re.sub("/{2,}", "/", parsed[2]) # replace two or more / with one
url = urlparse.urlunparse(parsed)
# request session proxies
if sickbeard.PROXY_SETTING:
logger.log("Using proxy for url: " + url, logger.DEBUG)
session.proxies = {
"http": sickbeard.PROXY_SETTING,
"https": sickbeard.PROXY_SETTING,
}
resp = session.get(url, data=post_data, timeout=timeout)
except requests.exceptions.HTTPError, e:
logger.log(u"HTTP error " + str(e.errno) + " while loading URL " + url, logger.WARNING)
return
except requests.exceptions.ConnectionError, e:
logger.log(u"Connection error " + str(e.message) + " while loading URL " + url, logger.WARNING)
return
except requests.exceptions.Timeout, e:
logger.log(u"Connection timed out " + str(e.message) + " while loading URL " + url, logger.WARNING)
return
except Exception:
logger.log(u"Unknown exception while loading URL " + url + ": " + traceback.format_exc(), logger.WARNING)
return
if not resp:
logger.log(u"No data returned from " + url, logger.DEBUG)
return
elif not resp.ok:
logger.log(u"Requested url " + url + " returned status code is " + str(
resp.status_code) + ': ' + clients.http_error_code[resp.status_code], logger.WARNING)
return
if json:
return resp.json()
return resp.content
def download_file(url, filename, session=None):
# create session
session = CacheControl(sess=session, cache=caches.FileCache(os.path.join(sickbeard.CACHE_DIR, 'sessions')))
# request session headers
session.headers.update({'User-Agent': USER_AGENT, 'Accept-Encoding': 'gzip,deflate'})
# request session ssl verify
session.verify = False
# request session streaming
session.stream = True
# request session proxies
if sickbeard.PROXY_SETTING:
logger.log("Using proxy for url: " + url, logger.DEBUG)
session.proxies = {
"http": sickbeard.PROXY_SETTING,
"https": sickbeard.PROXY_SETTING,
}
try:
resp = session.get(url)
if not resp.ok:
return False
with open(filename, 'wb') as fp:
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
fp.write(chunk)
fp.flush()
chmodAsParent(filename)
except requests.exceptions.HTTPError, e:
_remove_file_failed(filename)
logger.log(u"HTTP error " + str(e.errno) + " while loading URL " + url, logger.WARNING)
return False
except requests.exceptions.ConnectionError, e:
_remove_file_failed(filename)
logger.log(u"Connection error " + str(e.message) + " while loading URL " + url, logger.WARNING)
return False
except requests.exceptions.Timeout, e:
_remove_file_failed(filename)
logger.log(u"Connection timed out " + str(e.message) + " while loading URL " + url, logger.WARNING)
return False
except EnvironmentError, e:
_remove_file_failed(filename)
logger.log(u"Unable to save the file: " + ex(e), logger.ERROR)
return False
except Exception:
_remove_file_failed(filename)
logger.log(u"Unknown exception while loading URL " + url + ": " + traceback.format_exc(), logger.WARNING)
return False
if not resp:
logger.log(u"No data returned from " + url, logger.DEBUG)
return False
elif not resp.ok:
logger.log(u"Requested url " + url + " returned status code is " + str(
resp.status_code) + ': ' + clients.http_error_code[resp.status_code], logger.WARNING)
return False
return True

View File

@ -48,6 +48,9 @@ class indexerApi(object):
if self.indexerID:
if sickbeard.CACHE_DIR:
indexerConfig[self.indexerID]['api_params']['cache'] = os.path.join(sickbeard.CACHE_DIR, self.name)
if sickbeard.PROXY_SETTING:
indexerConfig[self.indexerID]['api_params']['proxy'] = sickbeard.PROXY_SETTING
return indexerConfig[self.indexerID]['api_params']
@property

View File

@ -23,7 +23,7 @@ indexerConfig[INDEXER_TVDB] = {
'module': Tvdb,
'api_params': {'apikey': 'F9C450E78D99172E',
'language': 'en',
'useZip': True
'useZip': True,
},
}
@ -32,7 +32,7 @@ indexerConfig[INDEXER_TVRAGE] = {
'name': 'TVRage',
'module': TVRage,
'api_params': {'apikey': 'Uhewg1Rr0o62fvZvUIZt',
'language': 'en'
'language': 'en',
},
}

View File

@ -35,9 +35,8 @@ def getShowImage(url, imgNum=None):
logger.log(u"Fetching image from " + tempURL, logger.DEBUG)
image_data = helpers.getURL(tempURL)
if image_data is None:
logger.log(u"There was an error trying to retrieve the image, aborting", logger.ERROR)
return None
return
return image_data

View File

@ -31,9 +31,10 @@ from dateutil import parser
class NameParser(object):
NORMAL_REGEX = 0
SPORTS_REGEX = 1
ANIME_REGEX = 2
ALL_REGEX = 0
NORMAL_REGEX = 1
SPORTS_REGEX = 2
ANIME_REGEX = 3
def __init__(self, file_name=True, showObj=None, tryIndexers=False, convert=False,
naming_pattern=False):
@ -44,13 +45,14 @@ class NameParser(object):
self.convert = convert
self.naming_pattern = naming_pattern
self.regexModes = [self.NORMAL_REGEX, self.SPORTS_REGEX, self.ANIME_REGEX]
if self.showObj and not self.showObj.is_anime and not self.showObj.is_sports:
self.regexModes = [self.NORMAL_REGEX]
self._compile_regexes(self.NORMAL_REGEX)
elif self.showObj and self.showObj.is_anime:
self.regexModes = [self.ANIME_REGEX]
self._compile_regexes(self.ANIME_REGEX)
elif self.showObj and self.showObj.is_sports:
self.regexModes = [self.SPORTS_REGEX]
self._compile_regexes(self.SPORTS_REGEX)
else:
self._compile_regexes(self.ALL_REGEX)
def clean_series_name(self, series_name):
"""Cleans up series name by removing any . and _
@ -83,9 +85,12 @@ class NameParser(object):
elif regexMode == self.ANIME_REGEX:
logger.log(u"Using ANIME regexs", logger.DEBUG)
uncompiled_regex = [regexes.anime_regexes, regexes.normal_regexes]
else:
logger.log(u"Using NORMAL reqgexs", logger.DEBUG)
elif regexMode == self.NORMAL_REGEX:
logger.log(u"Using NORMAL regexs", logger.DEBUG)
uncompiled_regex = [regexes.normal_regexes]
else:
logger.log(u"Using ALL regexes", logger.DEBUG)
uncompiled_regex = [regexes.normal_regexes, regexes.sports_regexs, regexes.anime_regexes]
self.compiled_regexes = []
for regexItem in uncompiled_regex:
@ -95,7 +100,7 @@ class NameParser(object):
except re.error, errormsg:
logger.log(u"WARNING: Invalid episode_pattern, %s. %s" % (errormsg, cur_pattern))
else:
self.compiled_regexes.append((regexMode, cur_pattern_num, cur_pattern_name, cur_regex))
self.compiled_regexes.append((cur_pattern_num, cur_pattern_name, cur_regex))
def _parse_string(self, name):
if not name:
@ -103,144 +108,126 @@ class NameParser(object):
matches = []
bestResult = None
doneSearch = False
for regexMode in self.regexModes:
if doneSearch:
break
for (cur_regex_num, cur_regex_name, cur_regex) in self.compiled_regexes:
match = cur_regex.match(name)
self._compile_regexes(regexMode)
for (cur_regexMode, cur_regex_num, cur_regex_name, cur_regex) in self.compiled_regexes:
match = cur_regex.match(name)
if not match:
continue
if not match:
result = ParseResult(name)
result.which_regex = [cur_regex_name]
result.score = 0 - cur_regex_num
named_groups = match.groupdict().keys()
if 'series_name' in named_groups:
result.series_name = match.group('series_name')
if result.series_name:
result.series_name = self.clean_series_name(result.series_name)
result.score += 1
if 'season_num' in named_groups:
tmp_season = int(match.group('season_num'))
if cur_regex_name == 'bare' and tmp_season in (19, 20):
continue
result.season_number = tmp_season
result.score += 1
result = ParseResult(name)
result.which_regex = [cur_regex_name]
result.score = 0 - cur_regex_num
if 'ep_num' in named_groups:
ep_num = self._convert_number(match.group('ep_num'))
if 'extra_ep_num' in named_groups and match.group('extra_ep_num'):
result.episode_numbers = range(ep_num, self._convert_number(match.group('extra_ep_num')) + 1)
result.score += 1
else:
result.episode_numbers = [ep_num]
result.score += 1
named_groups = match.groupdict().keys()
if 'ep_ab_num' in named_groups:
ep_ab_num = self._convert_number(match.group('ep_ab_num'))
if 'extra_ab_ep_num' in named_groups and match.group('extra_ab_ep_num'):
result.ab_episode_numbers = range(ep_ab_num,
self._convert_number(match.group('extra_ab_ep_num')) + 1)
result.score += 1
else:
result.ab_episode_numbers = [ep_ab_num]
result.score += 1
if 'series_name' in named_groups:
result.series_name = match.group('series_name')
if result.series_name:
result.series_name = self.clean_series_name(result.series_name)
if 'sports_event_id' in named_groups:
sports_event_id = match.group('sports_event_id')
if sports_event_id:
result.sports_event_id = int(match.group('sports_event_id'))
result.score += 1
if 'sports_event_name' in named_groups:
result.sports_event_name = match.group('sports_event_name')
if result.sports_event_name:
result.sports_event_name = self.clean_series_name(result.sports_event_name)
result.score += 1
if 'sports_air_date' in named_groups:
sports_air_date = match.group('sports_air_date')
if result.show and result.show.is_sports:
try:
result.sports_air_date = parser.parse(sports_air_date, fuzzy=True).date()
result.score += 1
# get show object
if not result.show and not self.naming_pattern:
result.show = helpers.get_show(result.series_name, self.tryIndexers)
# confirm result show object variables
if result.show:
# confirm passed in show object indexer id matches result show object indexer id
if self.showObj and self.showObj.indexerid != result.show.indexerid:
doneSearch = True
break
# confirm we are using correct regex mode
if regexMode == self.NORMAL_REGEX and not (result.show.is_anime or result.show.is_sports):
result.score += 1
elif regexMode == self.SPORTS_REGEX and result.show.is_sports:
result.score += 1
elif regexMode == self.ANIME_REGEX and result.show.is_anime:
result.score += 1
elif not result.show.is_anime:
break
if 'season_num' in named_groups:
tmp_season = int(match.group('season_num'))
if cur_regex_name == 'bare' and tmp_season in (19, 20):
except:
continue
result.season_number = tmp_season
result.score += 1
if 'ep_num' in named_groups:
ep_num = self._convert_number(match.group('ep_num'))
if 'extra_ep_num' in named_groups and match.group('extra_ep_num'):
result.episode_numbers = range(ep_num, self._convert_number(match.group('extra_ep_num')) + 1)
if 'air_year' in named_groups and 'air_month' in named_groups and 'air_day' in named_groups:
if result.show and result.show.air_by_date:
year = int(match.group('air_year'))
month = int(match.group('air_month'))
day = int(match.group('air_day'))
try:
dtStr = '%s-%s-%s' % (year, month, day)
result.air_date = datetime.datetime.strptime(dtStr, "%Y-%m-%d").date()
result.score += 1
else:
result.episode_numbers = [ep_num]
result.score += 1
if 'ep_ab_num' in named_groups:
ep_ab_num = self._convert_number(match.group('ep_ab_num'))
if 'extra_ab_ep_num' in named_groups and match.group('extra_ab_ep_num'):
result.ab_episode_numbers = range(ep_ab_num,
self._convert_number(match.group('extra_ab_ep_num')) + 1)
result.score += 1
else:
result.ab_episode_numbers = [ep_ab_num]
result.score += 1
if 'sports_event_id' in named_groups:
sports_event_id = match.group('sports_event_id')
if sports_event_id:
result.sports_event_id = int(match.group('sports_event_id'))
result.score += 1
if 'sports_event_name' in named_groups:
result.sports_event_name = match.group('sports_event_name')
if result.sports_event_name:
result.sports_event_name = self.clean_series_name(result.sports_event_name)
result.score += 1
if 'sports_air_date' in named_groups:
sports_air_date = match.group('sports_air_date')
if result.show and result.show.is_sports:
try:
result.sports_air_date = parser.parse(sports_air_date, fuzzy=True).date()
result.score += 1
except:
continue
if 'air_year' in named_groups and 'air_month' in named_groups and 'air_day' in named_groups:
if result.show and result.show.air_by_date:
year = int(match.group('air_year'))
month = int(match.group('air_month'))
day = int(match.group('air_day'))
try:
dtStr = '%s-%s-%s' % (year, month, day)
result.air_date = datetime.datetime.strptime(dtStr, "%Y-%m-%d").date()
result.score += 1
except:
continue
if 'extra_info' in named_groups:
tmp_extra_info = match.group('extra_info')
# Show.S04.Special or Show.S05.Part.2.Extras is almost certainly not every episode in the season
if tmp_extra_info and cur_regex_name == 'season_only' and re.search(
r'([. _-]|^)(special|extra)s?\w*([. _-]|$)', tmp_extra_info, re.I):
except:
continue
result.extra_info = tmp_extra_info
result.score += 1
if 'release_group' in named_groups:
result.release_group = match.group('release_group')
result.score += 1
if 'extra_info' in named_groups:
tmp_extra_info = match.group('extra_info')
matches.append(result)
# Show.S04.Special or Show.S05.Part.2.Extras is almost certainly not every episode in the season
if tmp_extra_info and cur_regex_name == 'season_only' and re.search(
r'([. _-]|^)(special|extra)s?\w*([. _-]|$)', tmp_extra_info, re.I):
continue
result.extra_info = tmp_extra_info
result.score += 1
if 'release_group' in named_groups:
result.release_group = match.group('release_group')
result.score += 1
matches.append(result)
if len(matches):
# pick best match with highest score based on placement
bestResult = max(sorted(matches, reverse=True, key=lambda x: x.which_regex), key=lambda x: x.score)
# if no show object was created check and see if we passed one in and use that instead
if not bestResult.show and self.showObj:
bestResult.show = self.showObj
show = None
if not self.naming_pattern:
# try and create a show object for this result
show = helpers.get_show(bestResult.series_name, self.tryIndexers)
# get quality
bestResult.quality = common.Quality.nameQuality(name,
bestResult.show.is_anime if bestResult.show else False)
# confirm passed in show object indexer id matches result show object indexer id
if show:
if self.showObj and show.indexerid != self.showObj.indexerid:
show = None
bestResult.show = show
elif not show and self.showObj:
bestResult.show = self.showObj
# if this is a naming pattern test or result doesn't have a show object then return best result
if not bestResult.show or self.naming_pattern:
return bestResult
# get quality
bestResult.quality = common.Quality.nameQuality(name, bestResult.show.is_anime)
new_episode_numbers = []
new_season_numbers = []
new_absolute_numbers = []

View File

@ -77,7 +77,6 @@ def _update_zoneinfo():
url_zv = 'https://raw.githubusercontent.com/Prinz23/sb_network_timezones/master/zoneinfo.txt'
url_data = helpers.getURL(url_zv)
if url_data is None:
# When urlData is None, trouble connecting to github
logger.log(u"Loading zoneinfo.txt failed. Unable to get URL: " + url_zv, logger.ERROR)
@ -148,7 +147,6 @@ def update_network_dict():
url = 'https://raw.githubusercontent.com/Prinz23/sb_network_timezones/master/network_timezones.txt'
url_data = helpers.getURL(url)
if url_data is None:
# When urlData is None, trouble connecting to github
logger.log(u"Loading Network Timezones update failed. Unable to get URL: " + url, logger.ERROR)

View File

@ -106,7 +106,6 @@ def stripNS(element, ns):
def splitResult(result):
urlData = helpers.getURL(result.url)
if urlData is None:
logger.log(u"Unable to load url " + result.url + ", can't download season NZB", logger.ERROR)
return False

View File

@ -111,7 +111,7 @@ def sendNZB(nzb, proper=False):
if (data == None):
return False
nzbcontent64 = standard_b64encode(data)
nzbget_result = nzbGetRPC.append(nzb.name + ".nzb", sickbeard.NZBGET_CATEGORY, addToTop, nzbcontent64)
nzbget_result = nzbGetRPC.append(nzb.name + ".nzb", sickbeard.NZBGET_CATEGORY, addToTop, nzbcontent64)
elif nzbget_version == 12:
if nzbcontent64 is not None:
nzbget_result = nzbGetRPC.append(nzb.name + ".nzb", sickbeard.NZBGET_CATEGORY, nzbgetprio, False,

View File

@ -19,9 +19,11 @@
import re
import traceback
import datetime
import urlparse
import sickbeard
import generic
import requests
import requests.exceptions
from sickbeard.common import Quality
from sickbeard import logger
from sickbeard import tvcache
@ -30,12 +32,9 @@ from sickbeard import classes
from sickbeard import helpers
from sickbeard import show_name_helpers
from sickbeard.exceptions import ex
from sickbeard import clients
from lib import requests
from lib.requests import exceptions
from sickbeard.bs4_parser import BS4Parser
from lib.unidecode import unidecode
from sickbeard.helpers import sanitizeSceneName
from sickbeard.bs4_parser import BS4Parser
from unidecode import unidecode
class BitSoupProvider(generic.TorrentProvider):
@ -83,7 +82,8 @@ class BitSoupProvider(generic.TorrentProvider):
'ssl': 'yes'
}
self.session = requests.Session()
if not self.session:
self.session = requests.session()
try:
response = self.session.post(self.urls['login'], data=login_params, timeout=30, verify=False)
@ -227,32 +227,6 @@ class BitSoupProvider(generic.TorrentProvider):
return (title, url)
def getURL(self, url, post_data=None, headers=None, json=False):
if not self.session:
self._doLogin()
if not headers:
headers = []
try:
# Remove double-slashes from url
parsed = list(urlparse.urlparse(url))
parsed[2] = re.sub("/{2,}", "/", parsed[2]) # replace two or more / with one
url = urlparse.urlunparse(parsed)
response = self.session.get(url, verify=False)
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
logger.log(u"Error loading " + self.name + " URL: " + ex(e), logger.ERROR)
return None
if response.status_code != 200:
logger.log(self.name + u" page requested with url " + url + " returned status code is " + str(
response.status_code) + ': ' + clients.http_error_code[response.status_code], logger.WARNING)
return None
return response.content
def findPropers(self, search_date=datetime.datetime.today()):
results = []

View File

@ -89,7 +89,6 @@ class BTNProvider(generic.TorrentProvider):
params.update(search_params)
parsedJSON = self._api_call(apikey, params)
if not parsedJSON:
logger.log(u"No data returned from " + self.name, logger.ERROR)
return []

View File

@ -56,7 +56,7 @@ class EZRSSProvider(generic.TorrentProvider):
def getQuality(self, item, anime=False):
filename = item.filename
quality = Quality.nameQuality(filename)
quality = Quality.sceneQuality(filename, anime)
return quality
@ -81,10 +81,8 @@ class EZRSSProvider(generic.TorrentProvider):
params['show_name'] = helpers.sanitizeSceneName(self.show.name, ezrss=True).replace('.', ' ').encode('utf-8')
if ep_obj.show.air_by_date:
params['date'] = str(ep_obj.airdate).split('-')[0]
elif ep_obj.show.sports:
params['date'] = str(ep_obj.airdate).split('-')[0]
if ep_obj.show.air_by_date or ep_obj.show.sports:
params['season'] = str(ep_obj.airdate).split('-')[0]
elif ep_obj.show.anime:
params['season'] = "%d" % ep_obj.scene_absolute_number
else:
@ -101,9 +99,7 @@ class EZRSSProvider(generic.TorrentProvider):
params['show_name'] = helpers.sanitizeSceneName(self.show.name, ezrss=True).replace('.', ' ').encode('utf-8')
if self.show.air_by_date:
params['date'] = str(ep_obj.airdate)
elif self.show.sports:
if self.show.air_by_date or self.show.sports:
params['date'] = str(ep_obj.airdate)
elif self.show.anime:
params['episode'] = "%i" % int(ep_obj.scene_absolute_number)

View File

@ -258,32 +258,6 @@ class FreshOnTVProvider(generic.TorrentProvider):
return (title, url)
def getURL(self, url, post_data=None, headers=None, json=False):
if not self.session:
self._doLogin()
if not headers:
headers = []
try:
# Remove double-slashes from url
parsed = list(urlparse.urlparse(url))
parsed[2] = re.sub("/{2,}", "/", parsed[2]) # replace two or more / with one
url = urlparse.urlunparse(parsed)
response = self.session.get(url, verify=False)
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
logger.log(u"Error loading " + self.name + " URL: " + ex(e), logger.ERROR)
return None
if response.status_code != 200:
logger.log(self.name + u" page requested with url " + url + " returned status code is " + str(
response.status_code) + ': ' + clients.http_error_code[response.status_code], logger.WARNING)
return None
return response.content
def findPropers(self, search_date=datetime.datetime.today()):
results = []

View File

@ -34,9 +34,11 @@ from sickbeard import encodingKludge as ek
from sickbeard.exceptions import ex
from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
from sickbeard.common import Quality
from sickbeard import clients
from lib.hachoir_parser import createParser
class GenericProvider:
NZB = "nzb"
TORRENT = "torrent"
@ -61,10 +63,10 @@ class GenericProvider:
self.cache = tvcache.TVCache(self)
self.cookies = None
self.session = requests.session()
self.session.verify = False
self.session.headers.update({
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.107 Safari/537.36'})
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.107 Safari/537.36'}
def getID(self):
return GenericProvider.makeID(self.name)
@ -79,6 +81,9 @@ class GenericProvider:
def _checkAuth(self):
return
def _doLogin(self):
return True
def isActive(self):
if self.providerType == GenericProvider.NZB and sickbeard.USE_NZBS:
return self.isEnabled()
@ -109,60 +114,61 @@ class GenericProvider:
return result
def getURL(self, url, post_data=None, headers=None, json=False):
def getURL(self, url, post_data=None, params=None, timeout=30, json=False):
"""
By default this is just a simple urlopen call but this method should be overridden
for providers with special URL requirements (like cookies)
"""
if not headers:
headers = []
# check for auth
if not self._doLogin():
return
data = helpers.getURL(url, post_data, headers, json=json)
if not data:
logger.log(u"Error loading " + self.name + " URL: " + url, logger.ERROR)
return None
return data
return helpers.getURL(url, post_data=post_data, params=params, headers=self.headers, timeout=timeout,
session=self.session, json=json)
def downloadResult(self, result):
"""
Save the result to disk.
"""
logger.log(u"Downloading a result from " + self.name + " at " + result.url)
# check for auth
if not self._doLogin():
return
data = self.getURL(result.url)
if self.providerType == GenericProvider.TORRENT:
torrent_hash = re.findall('urn:btih:([\w]{32,40})', result.url)[0].upper()
if not torrent_hash:
logger.log("Unable to extract torrent hash from link: " + ex(result.url), logger.ERROR)
return False
if data is None:
return False
urls = [
'http://torcache.net/torrent/' + torrent_hash + '.torrent',
'http://torrage.com/torrent/' + torrent_hash + '.torrent',
'http://zoink.it/torrent/' + torrent_hash + '.torrent',
]
filename = ek.ek(os.path.join, sickbeard.TORRENT_DIR,
helpers.sanitizeFileName(result.name) + '.' + self.providerType)
elif self.providerType == GenericProvider.NZB:
urls = [result.url]
filename = ek.ek(os.path.join, sickbeard.NZB_DIR,
helpers.sanitizeFileName(result.name) + '.' + self.providerType)
# use the appropriate watch folder
if self.providerType == GenericProvider.NZB:
saveDir = sickbeard.NZB_DIR
writeMode = 'w'
elif self.providerType == GenericProvider.TORRENT:
saveDir = sickbeard.TORRENT_DIR
writeMode = 'wb'
else:
return False
return
# use the result name as the filename
file_name = ek.ek(os.path.join, saveDir, helpers.sanitizeFileName(result.name) + '.' + self.providerType)
for url in urls:
if helpers.download_file(url, filename, session=self.session):
logger.log(u"Downloading a result from " + self.name + " at " + url)
logger.log(u"Saving to " + file_name, logger.DEBUG)
if self.providerType == GenericProvider.TORRENT:
logger.log(u"Saved magnet link to " + filename, logger.MESSAGE)
else:
logger.log(u"Saved result to " + filename, logger.MESSAGE)
try:
with open(file_name, writeMode) as fileOut:
fileOut.write(data)
helpers.chmodAsParent(file_name)
except EnvironmentError, e:
logger.log("Unable to save the file: " + ex(e), logger.ERROR)
return False
# as long as it's a valid download then consider it a successful snatch
return self._verify_download(file_name)
return self._verify_download(filename)
def _verify_download(self, file_name=None):
"""
@ -312,14 +318,16 @@ class GenericProvider:
if not len(parse_result.episode_numbers) and (
parse_result.season_number and parse_result.season_number != season) or (
not parse_result.season_number and season != 1):
logger.log(u"The result " + title + " doesn't seem to be a valid season that we are trying to snatch, ignoring",
logger.DEBUG)
logger.log(
u"The result " + title + " doesn't seem to be a valid season that we are trying to snatch, ignoring",
logger.DEBUG)
addCacheEntry = True
elif len(parse_result.episode_numbers) and (
parse_result.season_number != season or not [ep for ep in episodes if
ep.scene_episode in parse_result.episode_numbers]):
logger.log(u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
logger.DEBUG)
logger.log(
u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
logger.DEBUG)
addCacheEntry = True
if not addCacheEntry:

View File

@ -80,7 +80,7 @@ class HDBitsProvider(generic.TorrentProvider):
return True
def _get_season_search_strings(self, ep_obj):
season_search_string = [self._make_post_data_JSON(show=ep_obj.show, season=ep_obj.scene_season)]
season_search_string = [self._make_post_data_JSON(show=ep_obj.show, season=ep_obj)]
return season_search_string
def _get_episode_search_strings(self, ep_obj, add_string=''):
@ -105,16 +105,8 @@ class HDBitsProvider(generic.TorrentProvider):
logger.log(u"Search url: " + self.search_url + " search_params: " + search_params, logger.DEBUG)
data = self.getURL(self.search_url, post_data=search_params)
if not data:
logger.log(u"No data returned from " + self.search_url, logger.ERROR)
return []
parsedJSON = helpers.parse_json(data)
if parsedJSON is None:
logger.log(u"Error trying to load " + self.name + " JSON data", logger.ERROR)
parsedJSON = self.getURL(self.search_url, post_data=search_params, json=True)
if not parsedJSON:
return []
if self._checkAuthFromData(parsedJSON):
@ -195,7 +187,7 @@ class HDBitsProvider(generic.TorrentProvider):
else:
post_data['tvdb'] = {
'id': show.indexerid,
'season': season,
'season': episode.scene_season,
}
if search_term:
@ -225,20 +217,14 @@ class HDBitsCache(tvcache.TVCache):
if self._checkAuth(None):
data = self._getRSSData()
# As long as we got something from the provider we count it as an update
if data:
self.setLastUpdate()
else:
return []
parsedJSON = helpers.parse_json(data)
if parsedJSON is None:
parsedJSON = self._getRSSData()
if not parsedJSON:
logger.log(u"Error trying to load " + self.provider.name + " JSON feed", logger.ERROR)
return []
# mark updated
self.setLastUpdate()
if self._checkAuth(parsedJSON):
if parsedJSON and 'data' in parsedJSON:
items = parsedJSON['data']
@ -249,27 +235,21 @@ class HDBitsCache(tvcache.TVCache):
cl = []
for item in items:
ci = self._parseItem(item)
if ci is not None:
cl.append(ci)
if len(cl) > 0:
myDB = self._getDB()
myDB.mass_action(cl)
else:
raise exceptions.AuthException(
"Your authentication info for " + self.provider.name + " is incorrect, check your config")
else:
return []
def _getRSSData(self):
return self.provider.getURL(self.provider.rss_url, post_data=self.provider._make_post_data_JSON())
return self.provider.getURL(self.provider.rss_url, post_data=self.provider._make_post_data_JSON(), json=True)
def _parseItem(self, item):

View File

@ -288,29 +288,6 @@ class HDTorrentsProvider(generic.TorrentProvider):
return (title, url)
def getURL(self, url, post_data=None, headers=None, json=False):
if not self.session:
self._doLogin()
if not headers:
headers = []
try:
parsed = list(urlparse.urlparse(url))
parsed[2] = re.sub("/{2,}", "/", parsed[2]) # replace two or more / with one
url = urlparse.urlunparse(parsed)
response = self.session.get(url, verify=False)
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
logger.log(u"Error loading " + self.name + " URL: " + ex(e), logger.ERROR)
return None
if response.status_code != 200:
logger.log(self.name + u" page requested with url " + url + " returned status code is " + str(
response.status_code) + ': ' + clients.http_error_code[response.status_code], logger.WARNING)
return None
return response.content
def findPropers(self, search_date=datetime.datetime.today()):
results = []

View File

@ -230,30 +230,6 @@ class IPTorrentsProvider(generic.TorrentProvider):
return (title, url)
def getURL(self, url, post_data=None, headers=None, json=False):
if not self.session:
self._doLogin()
if not headers:
headers = []
try:
parsed = list(urlparse.urlparse(url))
parsed[2] = re.sub("/{2,}", "/", parsed[2]) # replace two or more / with one
url = urlparse.urlunparse(parsed)
response = self.session.get(url, verify=False)
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
logger.log(u"Error loading " + self.name + " URL: " + ex(e), logger.ERROR)
return None
if response.status_code != 200:
logger.log(self.name + u" page requested with url " + url + " returned status code is " + str(
response.status_code) + ': ' + clients.http_error_code[response.status_code], logger.WARNING)
return None
return response.content
def findPropers(self, search_date=datetime.datetime.today()):
results = []

View File

@ -112,7 +112,6 @@ class KATProvider(generic.TorrentProvider):
fileName = None
data = self.getURL(torrent_link)
if not data:
return None
@ -316,83 +315,6 @@ class KATProvider(generic.TorrentProvider):
return (title, url)
def getURL(self, url, post_data=None, headers=None, json=False):
if not self.session:
self.session = requests.Session()
try:
# Remove double-slashes from url
parsed = list(urlparse.urlparse(url))
parsed[2] = re.sub("/{2,}", "/", parsed[2]) # replace two or more / with one
url = urlparse.urlunparse(parsed)
if sickbeard.PROXY_SETTING:
proxies = {
"http": sickbeard.PROXY_SETTING,
"https": sickbeard.PROXY_SETTING,
}
r = self.session.get(url, proxies=proxies, verify=False)
else:
r = self.session.get(url, verify=False)
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
logger.log(u"Error loading " + self.name + " URL: " + str(sys.exc_info()) + " - " + ex(e), logger.ERROR)
return None
if r.status_code != 200:
logger.log(self.name + u" page requested with url " + url + " returned status code is " + str(
r.status_code) + ': ' + clients.http_error_code[r.status_code], logger.WARNING)
return None
return r.content
def downloadResult(self, result):
"""
Save the result to disk.
"""
if not self.session:
self.session = requests.Session()
torrent_hash = re.findall('urn:btih:([\w]{32,40})', result.url)[0].upper()
if not torrent_hash:
logger.log("Unable to extract torrent hash from link: " + ex(result.url), logger.ERROR)
return False
try:
r = self.session.get('http://torcache.net/torrent/' + torrent_hash + '.torrent', verify=False)
except Exception, e:
logger.log("Unable to connect to TORCACHE: " + ex(e), logger.ERROR)
try:
logger.log("Trying TORRAGE cache instead")
r = self.session.get('http://torrage.com/torrent/' + torrent_hash + '.torrent', verify=False)
except Exception, e:
logger.log("Unable to connect to TORRAGE: " + ex(e), logger.ERROR)
return False
if not r.status_code == 200:
return False
magnetFileName = ek.ek(os.path.join, sickbeard.TORRENT_DIR,
helpers.sanitizeFileName(result.name) + '.' + self.providerType)
magnetFileContent = r.content
try:
with open(magnetFileName, 'wb') as fileOut:
fileOut.write(magnetFileContent)
helpers.chmodAsParent(magnetFileName)
except EnvironmentError, e:
logger.log("Unable to save the file: " + ex(e), logger.ERROR)
return False
logger.log(u"Saved magnet link to " + magnetFileName + " ", logger.MESSAGE)
return True
def findPropers(self, search_date=datetime.datetime.today()):
results = []

View File

@ -227,25 +227,6 @@ class NewzbinProvider(generic.NZBProvider):
return True
def getURL(self, url, post_data=None, headers=None, json=False):
myOpener = classes.AuthURLOpener(sickbeard.NEWZBIN_USERNAME, sickbeard.NEWZBIN_PASSWORD)
try:
# Remove double-slashes from url
parsed = list(urlparse.urlparse(url))
parsed[2] = re.sub("/{2,}", "/", parsed[2]) # replace two or more / with one
url = urlparse.urlunparse(parsed)
f = myOpener.openit(url)
except (urllib.ContentTooShortError, IOError), e:
logger.log("Error loading search results: " + str(sys.exc_info()) + " - " + ex(e), logger.ERROR)
return None
data = f.read()
f.close()
return data
def _get_season_search_strings(self, ep_obj):
return ['^' + x for x in show_name_helpers.makeSceneSeasonSearchString(self.show, ep_obj)]

View File

@ -200,66 +200,66 @@ class NextGenProvider(generic.TorrentProvider):
logger.log(u"" + self.name + " search page URL: " + searchURL, logger.DEBUG)
data = self.getURL(searchURL)
if not data:
continue
if data:
try:
with BS4Parser(data.decode('iso-8859-1'), features=["html5lib", "permissive"]) as html:
resultsTable = html.find('div', attrs={'id': 'torrent-table-wrapper'})
try:
with BS4Parser(data.decode('iso-8859-1'), features=["html5lib", "permissive"]) as html:
resultsTable = html.find('div', attrs={'id': 'torrent-table-wrapper'})
if not resultsTable:
logger.log(u"The Data returned from " + self.name + " do not contains any torrent",
logger.DEBUG)
continue
if not resultsTable:
logger.log(u"The Data returned from " + self.name + " do not contains any torrent",
# Collecting entries
entries_std = html.find_all('div', attrs={'id': 'torrent-std'})
entries_sticky = html.find_all('div', attrs={'id': 'torrent-sticky'})
entries = entries_std + entries_sticky
#Xirg STANDARD TORRENTS
#Continue only if one Release is found
if len(entries) > 0:
for result in entries:
try:
torrentName = \
((result.find('div', attrs={'id': 'torrent-udgivelse2-users'})).find('a'))['title']
torrentId = (
((result.find('div', attrs={'id': 'torrent-download'})).find('a'))['href']).replace(
'download.php?id=', '')
torrent_name = str(torrentName)
torrent_download_url = (self.urls['download'] % torrentId).encode('utf8')
torrent_details_url = (self.urls['detail'] % torrentId).encode('utf8')
#torrent_seeders = int(result.find('div', attrs = {'id' : 'torrent-seeders'}).find('a')['class'][0])
## Not used, perhaps in the future ##
#torrent_id = int(torrent['href'].replace('/details.php?id=', ''))
#torrent_leechers = int(result.find('td', attrs = {'class' : 'ac t_leechers'}).string)
except (AttributeError, TypeError):
continue
# Filter unseeded torrent and torrents with no name/url
#if mode != 'RSS' and torrent_seeders == 0:
# continue
if not torrent_name or not torrent_download_url:
continue
item = torrent_name, torrent_download_url
logger.log(u"Found result: " + torrent_name + " (" + torrent_details_url + ")",
logger.DEBUG)
continue
items[mode].append(item)
# Collecting entries
entries_std = html.find_all('div', attrs={'id': 'torrent-std'})
entries_sticky = html.find_all('div', attrs={'id': 'torrent-sticky'})
else:
logger.log(u"The Data returned from " + self.name + " do not contains any torrent",
logger.WARNING)
continue
entries = entries_std + entries_sticky
#Xirg STANDARD TORRENTS
#Continue only if one Release is found
if len(entries) > 0:
for result in entries:
try:
torrentName = \
((result.find('div', attrs={'id': 'torrent-udgivelse2-users'})).find('a'))['title']
torrentId = (
((result.find('div', attrs={'id': 'torrent-download'})).find('a'))['href']).replace(
'download.php?id=', '')
torrent_name = str(torrentName)
torrent_download_url = (self.urls['download'] % torrentId).encode('utf8')
torrent_details_url = (self.urls['detail'] % torrentId).encode('utf8')
#torrent_seeders = int(result.find('div', attrs = {'id' : 'torrent-seeders'}).find('a')['class'][0])
## Not used, perhaps in the future ##
#torrent_id = int(torrent['href'].replace('/details.php?id=', ''))
#torrent_leechers = int(result.find('td', attrs = {'class' : 'ac t_leechers'}).string)
except (AttributeError, TypeError):
continue
# Filter unseeded torrent and torrents with no name/url
#if mode != 'RSS' and torrent_seeders == 0:
# continue
if not torrent_name or not torrent_download_url:
continue
item = torrent_name, torrent_download_url
logger.log(u"Found result: " + torrent_name + " (" + torrent_details_url + ")",
logger.DEBUG)
items[mode].append(item)
else:
logger.log(u"The Data returned from " + self.name + " do not contains any torrent",
logger.WARNING)
continue
except Exception, e:
logger.log(u"Failed parsing " + self.name + " Traceback: " + traceback.format_exc(),
logger.ERROR)
except Exception, e:
logger.log(u"Failed parsing " + self.name + " Traceback: " + traceback.format_exc(),
logger.ERROR)
results += items[mode]
@ -278,32 +278,6 @@ class NextGenProvider(generic.TorrentProvider):
return title, url
def getURL(self, url, post_data=None, headers=None, json=False):
if not self.session:
self._doLogin()
if not headers:
headers = []
try:
# Remove double-slashes from url
parsed = list(urlparse.urlparse(url))
parsed[2] = re.sub("/{2,}", "/", parsed[2]) # replace two or more / with one
url = urlparse.urlunparse(parsed)
response = self.session.get(url, verify=False)
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
logger.log(u"Error loading " + self.name + " URL: " + ex(e), logger.ERROR)
return None
if response.status_code != 200:
logger.log(self.name + u" page requested with url " + url + " returned status code is " + str(
response.status_code) + ': ' + clients.http_error_code[response.status_code], logger.WARNING)
return None
return response.content
def findPropers(self, search_date=datetime.datetime.today()):
results = []

View File

@ -114,17 +114,14 @@ class OmgwtfnzbsProvider(generic.NZBProvider):
search_url = 'https://api.omgwtfnzbs.org/json/?' + urllib.urlencode(params)
logger.log(u"Search url: " + search_url, logger.DEBUG)
data = self.getURL(search_url, json=True)
if not data:
logger.log(u"No data returned from " + search_url, logger.ERROR)
parsedJSON = self.getURL(search_url, json=True)
if not parsedJSON:
return []
if self._checkAuthFromData(data, is_XML=False):
if self._checkAuthFromData(parsedJSON, is_XML=False):
results = []
for item in data:
for item in parsedJSON:
if 'release' in item and 'getnzb' in item:
results.append(item)

View File

@ -141,7 +141,6 @@ class PublicHDProvider(generic.TorrentProvider):
logger.log(u"Search string: " + searchURL, logger.DEBUG)
html = self.getURL(searchURL)
if not html:
continue
@ -205,74 +204,6 @@ class PublicHDProvider(generic.TorrentProvider):
return (title, url)
def getURL(self, url, post_data=None, headers=None, json=False):
if not self.session:
self.session = requests.Session()
try:
# Remove double-slashes from url
parsed = list(urlparse.urlparse(url))
parsed[2] = re.sub("/{2,}", "/", parsed[2]) # replace two or more / with one
url = urlparse.urlunparse(parsed)
r = self.session.get(url, verify=False)
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
logger.log(u"Error loading " + self.name + " URL: " + str(sys.exc_info()) + " - " + ex(e), logger.ERROR)
return None
if r.status_code != 200:
logger.log(self.name + u" page requested with url " + url + " returned status code is " + str(
r.status_code) + ': ' + clients.http_error_code[r.status_code], logger.WARNING)
return None
return r.content
def downloadResult(self, result):
"""
Save the result to disk.
"""
if not self.session:
self.session = requests.Session()
torrent_hash = re.findall('urn:btih:([\w]{32,40})', result.url)[0].upper()
if not torrent_hash:
logger.log("Unable to extract torrent hash from link: " + ex(result.url), logger.ERROR)
return False
try:
r = self.session.get('http://torcache.net/torrent/' + torrent_hash + '.torrent', verify=False)
except Exception, e:
logger.log("Unable to connect to TORCACHE: " + ex(e), logger.ERROR)
try:
logger.log("Trying TORRAGE cache instead")
r = self.session.get('http://torrage.com/torrent/' + torrent_hash + '.torrent', verify=False)
except Exception, e:
logger.log("Unable to connect to TORRAGE: " + ex(e), logger.ERROR)
return False
if not r.status_code == 200:
return False
magnetFileName = ek.ek(os.path.join, sickbeard.TORRENT_DIR,
helpers.sanitizeFileName(result.name) + '.' + self.providerType)
magnetFileContent = r.content
try:
with open(magnetFileName, 'wb') as fileOut:
fileOut.write(magnetFileContent)
helpers.chmodAsParent(magnetFileName)
except EnvironmentError, e:
logger.log("Unable to save the file: " + ex(e), logger.ERROR)
return False
logger.log(u"Saved magnet link to " + magnetFileName + " ", logger.MESSAGE)
return True
def findPropers(self, search_date=datetime.datetime.today()):
results = []

View File

@ -35,7 +35,7 @@ from lib.requests import exceptions
from lib.bencode import bdecode
class TorrentRssProvider(generic.TorrentProvider):
def __init__(self, name, url, cookies, search_mode='eponly', search_fallback=False, backlog_only=False):
def __init__(self, name, url, cookies='', search_mode='eponly', search_fallback=False, backlog_only=False):
generic.TorrentProvider.__init__(self, name)
self.cache = TorrentRssCache(self)
self.url = re.sub('\/$', '', url)
@ -47,11 +47,7 @@ class TorrentRssProvider(generic.TorrentProvider):
self.search_mode = search_mode
self.search_fallback = search_fallback
self.backlog_only = backlog_only
if cookies:
self.cookies = cookies
else:
self.cookies = ''
self.cookies = cookies
def configStr(self):
return self.name + '|' + self.url + '|' + self.cookies + '|' + str(int(self.enabled)) + '|' + self.search_mode + '|' + str(int(self.search_fallback)) + '|' + str(int(self.backlog_only))
@ -118,6 +114,9 @@ class TorrentRssProvider(generic.TorrentProvider):
if url.startswith('magnet:') and re.search('urn:btih:([\w]{32,40})', url):
return (True, 'RSS feed Parsed correctly')
else:
if self.cookies:
requests.utils.add_dict_to_cookiejar(self.session.cookies,
dict(x.rsplit('=', 1) for x in (self.cookies.split(';'))))
torrent_file = self.getURL(url)
try:
bdecode(torrent_file)
@ -130,30 +129,6 @@ class TorrentRssProvider(generic.TorrentProvider):
except Exception, e:
return (False, 'Error when trying to load RSS: ' + ex(e))
def getURL(self, url, post_data=None, headers=None, json=False):
if not self.session:
self.session = requests.Session()
if self.cookies:
requests.utils.add_dict_to_cookiejar(self.session.cookies,
dict(x.rsplit('=', 1) for x in (self.cookies.split(';'))))
try:
parsed = list(urlparse.urlparse(url))
parsed[2] = re.sub("/{2,}", "/", parsed[2]) # replace two or more / with one
r = self.session.get(url, verify=False)
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
logger.log(u"Error loading " + self.name + " URL: " + ex(e), logger.ERROR)
return None
if r.status_code != 200:
logger.log(self.name + u" page requested with url " + url + " returned status code is " + str(
r.status_code) + ': ' + clients.http_error_code[r.status_code], logger.WARNING)
return None
return r.content
def dumpHTML(self, data):
dumpName = ek.ek(os.path.join, sickbeard.CACHE_DIR, 'custom_torrent.html')
@ -179,10 +154,11 @@ class TorrentRssCache(tvcache.TVCache):
def _getRSSData(self):
logger.log(u"TorrentRssCache cache update URL: " + self.provider.url, logger.DEBUG)
request_headers = None
if self.provider.cookies:
request_headers = { 'Cookie': self.provider.cookies }
else:
request_headers = None
return self.getRSSFeed(self.provider.url, request_headers=request_headers)
def _parseItem(self, item):

View File

@ -69,8 +69,6 @@ class SCCProvider(generic.TorrentProvider):
self.categories = "c27=27&c17=17&c11=11"
self.headers = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.107 Safari/537.36'}
def isEnabled(self):
return self.enabled
@ -178,14 +176,14 @@ class SCCProvider(generic.TorrentProvider):
foreignSearchURL = None
if mode == 'Season':
searchURL = self.urls['archive'] % (search_string)
data = [self.getURL(searchURL, headers=self.headers)]
data = [self.getURL(searchURL)]
else:
searchURL = self.urls['search'] % (search_string, self.categories)
nonsceneSearchURL = self.urls['nonscene'] % (search_string)
foreignSearchURL = self.urls['foreign'] % (search_string)
data = [self.getURL(searchURL, headers=self.headers),
self.getURL(nonsceneSearchURL, headers=self.headers),
self.getURL(foreignSearchURL, headers=self.headers)]
data = [self.getURL(searchURL),
self.getURL(nonsceneSearchURL),
self.getURL(foreignSearchURL)]
logger.log(u"Search string: " + nonsceneSearchURL, logger.DEBUG)
logger.log(u"Search string: " + foreignSearchURL, logger.DEBUG)
@ -222,9 +220,10 @@ class SCCProvider(generic.TorrentProvider):
title = link.string
if re.search('\.\.\.', title):
with BS4Parser(self.getURL(self.url + "/" + link['href'])) as details_html:
title = re.search('(?<=").+(?<!")', details_html.title.string).group(0)
data = self.getURL(self.url + "/" + link['href'])
if data:
with BS4Parser(data) as details_html:
title = re.search('(?<=").+(?<!")', details_html.title.string).group(0)
download_url = self.urls['download'] % url['href']
id = int(link['href'].replace('details?id=', ''))
seeders = int(result.find('td', attrs={'class': 'ttr_seeders'}).string)
@ -272,32 +271,6 @@ class SCCProvider(generic.TorrentProvider):
return (title, url)
def getURL(self, url, post_data=None, headers=None, json=False):
if not self.session:
self._doLogin()
if not headers:
headers = {}
try:
# Remove double-slashes from url
parsed = list(urlparse.urlparse(url))
parsed[2] = re.sub("/{2,}", "/", parsed[2]) # replace two or more / with one
url = urlparse.urlunparse(parsed)
response = self.session.get(url, headers=headers, verify=False)
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
logger.log(u"Error loading " + self.name + " URL: " + ex(e), logger.ERROR)
return None
if response.status_code != 200:
logger.log(self.name + u" page requested with url " + url + " returned status code is " + str(
response.status_code) + ': ' + clients.http_error_code[response.status_code], logger.WARNING)
return None
return response.content
def findPropers(self, search_date=datetime.datetime.today()):
results = []

View File

@ -163,15 +163,12 @@ class SpeedCDProvider(generic.TorrentProvider):
post_data = dict({'/browse.php?': None, 'cata': 'yes', 'jxt': 4, 'jxw': 'b', 'search': search_string},
**self.categories[mode])
data = self.session.post(self.urls['search'], data=post_data, verify=False)
if not data:
parsedJSON = self.getURL(self.urls['search'], post_data=post_data, json=True)
if not parsedJSON:
continue
try:
# convert to json
data = data.json()
torrents = data.get('Fs', [])[0].get('Cn', {}).get('torrents', [])
torrents = parsedJSON.get('Fs', [])[0].get('Cn', {}).get('torrents', [])
except:
continue
@ -214,36 +211,6 @@ class SpeedCDProvider(generic.TorrentProvider):
return (title, url)
def getURL(self, url, post_data=None, headers=None, json=False):
if not self.session:
self._doLogin()
try:
# Remove double-slashes from url
parsed = list(urlparse.urlparse(url))
parsed[2] = re.sub("/{2,}", "/", parsed[2]) # replace two or more / with one
url = urlparse.urlunparse(parsed)
if sickbeard.PROXY_SETTING:
proxies = {
"http": sickbeard.PROXY_SETTING,
"https": sickbeard.PROXY_SETTING,
}
r = self.session.get(url, proxies=proxies, verify=False)
else:
r = self.session.get(url, verify=False)
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
logger.log(u"Error loading " + self.name + " URL: " + ex(e), logger.ERROR)
return None
if r.status_code != 200:
logger.log(self.name + u" page requested with url " + url + " returned status code is " + str(
r.status_code) + ': ' + clients.http_error_code[r.status_code], logger.WARNING)
return None
return r.content
def findPropers(self, search_date=datetime.datetime.today()):
results = []

View File

@ -116,6 +116,9 @@ class ThePirateBayProvider(generic.TorrentProvider):
fileURL = self.proxy._buildURL(self.url + 'ajax_details_filelist.php?id=' + str(torrent_id))
if self.proxy and self.proxy.isEnabled():
self.headers.update({'referer': self.proxy.getProxyURL()})
data = self.getURL(fileURL)
if not data:
return None
@ -222,6 +225,9 @@ class ThePirateBayProvider(generic.TorrentProvider):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
if self.proxy and self.proxy.isEnabled():
self.headers.update({'referer': self.proxy.getProxyURL()})
for mode in search_params.keys():
for search_string in search_params[mode]:
@ -290,84 +296,6 @@ class ThePirateBayProvider(generic.TorrentProvider):
return (title, url)
def getURL(self, url, post_data=None, headers=None, json=False):
if not headers:
headers = {}
if not self.session:
self.session = requests.Session()
# Glype Proxies does not support Direct Linking.
# We have to fake a search on the proxy site to get data
if self.proxy.isEnabled():
headers.update({'referer': self.proxy.getProxyURL()})
try:
if sickbeard.PROXY_SETTING:
proxies = {
"http": sickbeard.PROXY_SETTING,
"https": sickbeard.PROXY_SETTING,
}
r = self.session.get(url, headers=headers, proxies=proxies, verify=False)
else:
r = self.session.get(url, headers=headers, verify=False)
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
logger.log(u"Error loading " + self.name + " URL: " + str(sys.exc_info()) + " - " + ex(e), logger.ERROR)
return None
if r.status_code != 200:
logger.log(self.name + u" page requested with url " + url + " returned status code is " + str(
r.status_code) + ': ' + clients.http_error_code[r.status_code], logger.WARNING)
return None
return r.content
def downloadResult(self, result):
"""
Save the result to disk.
"""
if not self.session:
self.session = requests.Session()
torrent_hash = re.findall('urn:btih:([\w]{32,40})', result.url)[0].upper()
if not torrent_hash:
logger.log("Unable to extract torrent hash from link: " + ex(result.url), logger.ERROR)
return False
try:
r = self.session.get('http://torcache.net/torrent/' + torrent_hash + '.torrent', verify=False)
except Exception, e:
logger.log("Unable to connect to TORCACHE: " + ex(e), logger.ERROR)
try:
logger.log("Trying TORRAGE cache instead")
r = self.session.get('http://torrage.com/torrent/' + torrent_hash + '.torrent', verify=False)
except Exception, e:
logger.log("Unable to connect to TORRAGE: " + ex(e), logger.ERROR)
return False
if not r.status_code == 200:
return False
magnetFileName = ek.ek(os.path.join, sickbeard.TORRENT_DIR,
helpers.sanitizeFileName(result.name) + '.' + self.providerType)
magnetFileContent = r.content
try:
with open(magnetFileName, 'wb') as fileOut:
fileOut.write(magnetFileContent)
helpers.chmodAsParent(magnetFileName)
except EnvironmentError, e:
logger.log("Unable to save the file: " + ex(e), logger.ERROR)
return False
logger.log(u"Saved magnet link to " + magnetFileName + " ", logger.MESSAGE)
return True
def findPropers(self, search_date=datetime.datetime.today()):
results = []

View File

@ -233,32 +233,6 @@ class TorrentBytesProvider(generic.TorrentProvider):
return (title, url)
def getURL(self, url, post_data=None, headers=None, json=False):
if not self.session:
self._doLogin()
if not headers:
headers = []
try:
# Remove double-slashes from url
parsed = list(urlparse.urlparse(url))
parsed[2] = re.sub("/{2,}", "/", parsed[2]) # replace two or more / with one
url = urlparse.urlunparse(parsed)
response = self.session.get(url, verify=False)
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
logger.log(u"Error loading " + self.name + " URL: " + ex(e), logger.ERROR)
return None
if response.status_code != 200:
logger.log(self.name + u" page requested with url " + url + " returned status code is " + str(
response.status_code) + ': ' + clients.http_error_code[response.status_code], logger.WARNING)
return None
return response.content
def findPropers(self, search_date=datetime.datetime.today()):
results = []

View File

@ -194,13 +194,12 @@ class TorrentDayProvider(generic.TorrentProvider):
if self.freeleech:
post_data.update({'free': 'on'})
data = self.session.post(self.urls['search'], data=post_data, verify=False)
if not data:
parsedJSON = self.getURL(self.urls['search'], post_data=post_data, json=True)
if not parsedJSON:
continue
try:
data = data.json()
torrents = data.get('Fs', [])[0].get('Cn', {}).get('torrents', [])
torrents = parsedJSON.get('Fs', [])[0].get('Cn', {}).get('torrents', [])
except:
continue
@ -237,29 +236,6 @@ class TorrentDayProvider(generic.TorrentProvider):
return (title, url)
def getURL(self, url, post_data=None, headers=None, json=False):
if not self.session:
self._doLogin()
try:
# Remove double-slashes from url
parsed = list(urlparse.urlparse(url))
parsed[2] = re.sub("/{2,}", "/", parsed[2]) # replace two or more / with one
url = urlparse.urlunparse(parsed)
response = self.session.get(url, verify=False)
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
logger.log(u"Error loading " + self.name + " URL: " + ex(e), logger.ERROR)
return None
if response.status_code != 200:
logger.log(self.name + u" page requested with url " + url + " returned status code is " + str(
response.status_code) + ': ' + clients.http_error_code[response.status_code], logger.WARNING)
return None
return response.content
def findPropers(self, search_date=datetime.datetime.today()):
results = []

View File

@ -230,32 +230,6 @@ class TorrentLeechProvider(generic.TorrentProvider):
return (title, url)
def getURL(self, url, post_data=None, headers=None, json=False):
if not self.session:
self._doLogin()
if not headers:
headers = []
try:
# Remove double-slashes from url
parsed = list(urlparse.urlparse(url))
parsed[2] = re.sub("/{2,}", "/", parsed[2]) # replace two or more / with one
url = urlparse.urlunparse(parsed)
response = self.session.get(url, verify=False)
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
logger.log(u"Error loading " + self.name + " URL: " + ex(e), logger.ERROR)
return None
if response.status_code != 200:
logger.log(self.name + u" page requested with url " + url + " returned status code is " + str(
response.status_code) + ': ' + clients.http_error_code[response.status_code], logger.WARNING)
return None
return response.content
def findPropers(self, search_date=datetime.datetime.today()):
results = []

View File

@ -23,14 +23,8 @@ class RSSFeeds:
with closing(Shove('sqlite:///' + self.db_name, compress=True)) as fs:
fc = cache.Cache(fs)
fc.purge(age)
except:
os.remove(self.db_name)
try:
with closing(Shove('sqlite:///' + self.db_name, compress=True)) as fs:
fc = cache.Cache(fs)
fc.purge(age)
except Exception as e:
logger.log(u"RSS cache error: " + ex(e), logger.DEBUG)
except Exception as e:
logger.log(u"RSS cache error: " + ex(e), logger.DEBUG)
def getFeed(self, url, post_data=None, request_headers=None):
parsed = list(urlparse.urlparse(url))
@ -43,25 +37,18 @@ class RSSFeeds:
with closing(Shove('sqlite:///' + self.db_name, compress=True)) as fs:
fc = cache.Cache(fs)
feed = fc.fetch(url, False, False, request_headers)
except:
os.remove(self.db_name)
try:
with closing(Shove('sqlite:///' + self.db_name, compress=True)) as fs:
fc = cache.Cache(fs)
feed = fc.fetch(url, False, False, request_headers)
except Exception as e:
logger.log(u"RSS cache error: " + ex(e), logger.DEBUG)
feed = None
if not feed:
logger.log(u"RSS Error loading URL: " + url, logger.ERROR)
return
elif 'error' in feed.feed:
logger.log(u"RSS ERROR:[%s] CODE:[%s]" % (feed.feed['error']['description'], feed.feed['error']['code']),
logger.DEBUG)
return
elif not feed.entries:
logger.log(u"No RSS items found using URL: " + url, logger.WARNING)
return
if not feed or not feed.entries:
logger.log(u"RSS cache error loading url: " + url, logger.ERROR)
return
elif 'error' in feed.feed:
err_code = feed.feed['error']['code']
err_desc = feed.feed['error']['description']
return feed
logger.log(
u"RSS ERROR:[%s] CODE:[%s]" % (err_desc, err_code), logger.DEBUG)
return
return feed
except Exception as e:
logger.log(u"RSS cache error: " + ex(e), logger.DEBUG)

View File

@ -173,7 +173,6 @@ def retrieve_exceptions():
url = sickbeard.indexerApi(indexer).config['scene_url']
url_data = helpers.getURL(url)
if url_data is None:
# When urlData is None, trouble connecting to github
logger.log(u"Check scene exceptions update failed. Unable to get URL: " + url, logger.ERROR)
@ -299,16 +298,16 @@ def _xem_exceptions_fetcher():
url = "http://thexem.de/map/allNames?origin=%s&seasonNumbers=1" % sickbeard.indexerApi(indexer).config[
'xem_origin']
url_data = helpers.getURL(url, json=True)
if url_data is None:
parsedJSON = helpers.getURL(url, json=True)
if not parsedJSON:
logger.log(u"Check scene exceptions update failed for " + sickbeard.indexerApi(
indexer).name + ", Unable to get URL: " + url, logger.ERROR)
continue
if url_data['result'] == 'failure':
if parsedJSON['result'] == 'failure':
continue
for indexerid, names in url_data['data'].items():
for indexerid, names in parsedJSON['data'].items():
xem_exception_dict[int(indexerid)] = names
setLastRefresh('xem')

View File

@ -21,25 +21,20 @@
# @copyright: Dermot Buckley
#
import time
import datetime
import traceback
import sickbeard
from lib.tmdb_api import TMDB
try:
import json
except ImportError:
from lib import simplejson as json
import sickbeard
from sickbeard import logger
from sickbeard import db
from sickbeard.exceptions import ex
from lib import requests
MAX_XEM_AGE_SECS = 86400 # 1 day
def get_scene_numbering(indexer_id, indexer, season, episode, fallback_to_xem=True):
"""
@ -196,7 +191,8 @@ def get_indexer_absolute_numbering(indexer_id, indexer, sceneAbsoluteNumber, fal
return sceneAbsoluteNumber
def set_scene_numbering(indexer_id, indexer, season=None, episode=None, absolute_number=None, sceneSeason=None, sceneEpisode=None, sceneAbsolute=None):
def set_scene_numbering(indexer_id, indexer, season=None, episode=None, absolute_number=None, sceneSeason=None,
sceneEpisode=None, sceneAbsolute=None):
"""
Set scene numbering for a season/episode.
To clear the scene numbering, leave both sceneSeason and sceneEpisode as None.
@ -332,7 +328,7 @@ def get_indexer_absolute_numbering_for_xem(indexer_id, indexer, sceneAbsoluteNum
else:
rows = myDB.select(
"SELECT absolute_number FROM tv_episodes WHERE indexer = ? and showid = ? and scene_absolute_number = ? and scene_season = ?",
[indexer, indexer_id, sceneAbsoluteNumber, scene_season])
[indexer, indexer_id, sceneAbsoluteNumber, scene_season])
if rows:
return int(rows[0]["absolute_number"])
@ -455,6 +451,7 @@ def get_xem_absolute_numbering_for_show(indexer_id, indexer):
return result
def xem_refresh(indexer_id, indexer, force=False):
"""
Refresh data from xem for a tv show
@ -467,77 +464,73 @@ def xem_refresh(indexer_id, indexer, force=False):
indexer_id = int(indexer_id)
indexer = int(indexer)
# XEM API URL
url = "http://thexem.de/map/all?id=%s&origin=%s&destination=scene" % (
indexer_id, sickbeard.indexerApi(indexer).config['xem_origin'])
MAX_REFRESH_AGE_SECS = 86400 # 1 day
myDB = db.DBConnection()
rows = myDB.select("SELECT last_refreshed FROM xem_refresh WHERE indexer = ? and indexer_id = ?",
[indexer, indexer_id])
if rows:
refresh = time.time() > (int(rows[0]['last_refreshed']) + MAX_XEM_AGE_SECS)
lastRefresh = int(rows[0]['last_refreshed'])
refresh = int(time.mktime(datetime.datetime.today().timetuple())) > lastRefresh + MAX_REFRESH_AGE_SECS
else:
refresh = True
if refresh or force:
logger.log(
u'Looking up XEM scene mapping using for show %s on %s' % (indexer_id, sickbeard.indexerApi(indexer).name,),
logger.DEBUG)
# mark refreshed
myDB.upsert("xem_refresh",
{'indexer': indexer,
'last_refreshed': int(time.mktime(datetime.datetime.today().timetuple()))},
{'indexer_id': indexer_id})
try:
logger.log(
u'Looking up XEM scene mapping for show %s on %s' % (indexer_id, sickbeard.indexerApi(indexer).name,),
logger.DEBUG)
data = requests.get("http://thexem.de/map/all?id=%s&origin=%s&destination=scene" % (
indexer_id, sickbeard.indexerApi(indexer).config['xem_origin'],), verify=False).json()
parsedJSON = sickbeard.helpers.getURL(url, json=True)
if not parsedJSON or parsedJSON == '':
logger.log(u'No XEN data for show "%s on %s"' % (indexer_id, sickbeard.indexerApi(indexer).name,), logger.MESSAGE)
return
if data is None or data == '':
logger.log(u'No XEN data for show "%s on %s", trying TVTumbler' % (
indexer_id, sickbeard.indexerApi(indexer).name,), logger.MESSAGE)
data = requests.get("http://show-api.tvtumbler.com/api/thexem/all?id=%s&origin=%s&destination=scene" % (
indexer_id, sickbeard.indexerApi(indexer).config['xem_origin'],), verify=False).json()
if data is None or data == '':
logger.log(u'TVTumbler also failed for show "%s on %s". giving up.' % (indexer_id, indexer,),
logger.MESSAGE)
return None
if 'success' in parsedJSON['result']:
cl = []
for entry in parsedJSON['data']:
if 'scene' in entry:
cl.append([
"UPDATE tv_episodes SET scene_season = ?, scene_episode = ?, scene_absolute_number = ? WHERE showid = ? AND season = ? AND episode = ?",
[entry['scene']['season'],
entry['scene']['episode'],
entry['scene']['absolute'],
indexer_id,
entry[sickbeard.indexerApi(indexer).config['xem_origin']]['season'],
entry[sickbeard.indexerApi(indexer).config['xem_origin']]['episode']
]])
if 'scene_2' in entry: # for doubles
cl.append([
"UPDATE tv_episodes SET scene_season = ?, scene_episode = ?, scene_absolute_number = ? WHERE showid = ? AND season = ? AND episode = ?",
[entry['scene_2']['season'],
entry['scene_2']['episode'],
entry['scene_2']['absolute'],
indexer_id,
entry[sickbeard.indexerApi(indexer).config['xem_origin']]['season'],
entry[sickbeard.indexerApi(indexer).config['xem_origin']]['episode']
]])
result = data
cl = []
if result:
cl.append(["INSERT OR REPLACE INTO xem_refresh (indexer, indexer_id, last_refreshed) VALUES (?,?,?)",
[indexer, indexer_id, time.time()]])
if 'success' in result['result']:
for entry in result['data']:
if 'scene' in entry:
cl.append([
"UPDATE tv_episodes SET scene_season = ?, scene_episode = ?, scene_absolute_number = ? WHERE showid = ? AND season = ? AND episode = ?",
[entry['scene']['season'],
entry['scene']['episode'],
entry['scene']['absolute'],
indexer_id,
entry[sickbeard.indexerApi(indexer).config['xem_origin']]['season'],
entry[sickbeard.indexerApi(indexer).config['xem_origin']]['episode']
]])
if 'scene_2' in entry: # for doubles
cl.append([
"UPDATE tv_episodes SET scene_season = ?, scene_episode = ?, scene_absolute_number = ? WHERE showid = ? AND season = ? AND episode = ?",
[entry['scene_2']['season'],
entry['scene_2']['episode'],
entry['scene_2']['absolute'],
indexer_id,
entry[sickbeard.indexerApi(indexer).config['xem_origin']]['season'],
entry[sickbeard.indexerApi(indexer).config['xem_origin']]['episode']
]])
else:
logger.log(u'Failed to get XEM scene data for show %s from %s because "%s"' % (
indexer_id, sickbeard.indexerApi(indexer).name, result['message']), logger.DEBUG)
if len(cl) > 0:
myDB = db.DBConnection()
myDB.mass_action(cl)
else:
logger.log(u"Empty lookup result - no XEM data for show %s on %s" % (
indexer_id, sickbeard.indexerApi(indexer).name,), logger.DEBUG)
except Exception, e:
logger.log(u"Exception while refreshing XEM data for show " + str(indexer_id) + " on " + sickbeard.indexerApi(
indexer).name + ": " + ex(e), logger.WARNING)
logger.log(
u"Exception while refreshing XEM data for show " + str(indexer_id) + " on " + sickbeard.indexerApi(
indexer).name + ": " + ex(e), logger.WARNING)
logger.log(traceback.format_exc(), logger.DEBUG)
return None
if len(cl) > 0:
myDB = db.DBConnection()
myDB.mass_action(cl)
def fix_xem_numbering(indexer_id, indexer):
"""
@ -553,12 +546,12 @@ def fix_xem_numbering(indexer_id, indexer):
# query = [{
# "name": self.show.name,
# "seasons": [{
# "episodes": [{
# "episode_number": None,
# "name": None
# }],
# "season_number": None,
# "seasons": [{
# "episodes": [{
# "episode_number": None,
# "name": None
# }],
# "season_number": None,
# }],
# "/tv/tv_program/number_of_seasons": [],
# "/tv/tv_program/number_of_episodes": [],

View File

@ -59,7 +59,6 @@ def _downloadResult(result):
# nzbs with an URL can just be downloaded from the provider
if result.resultType == "nzb":
newResult = resProvider.downloadResult(result)
# if it's an nzb data result
elif result.resultType == "nzbdata":
@ -83,18 +82,12 @@ def _downloadResult(result):
elif resProvider.providerType == "torrent":
newResult = resProvider.downloadResult(result)
else:
logger.log(u"Invalid provider type - this is a coding error, report it please", logger.ERROR)
return False
if newResult and sickbeard.USE_FAILED_DOWNLOADS:
ui.notifications.message('Episode snatched',
'<b>%s</b> snatched from <b>%s</b>' % (result.name, resProvider.name))
newResult = False
return newResult
def snatchEpisode(result, endStatus=SNATCHED):
"""
Contains the internal logic necessary to actually "snatch" a result that

View File

@ -35,7 +35,7 @@ search_queue_lock = threading.Lock()
BACKLOG_SEARCH = 10
DAILY_SEARCH = 20
FAILED_SEARCH = 30
MANUAL_SEARCH = 30
MANUAL_SEARCH = 40
class SearchQueue(generic_queue.GenericQueue):

View File

@ -428,11 +428,10 @@ class QueueItemRefresh(ShowQueueItem):
self.show.populateCache()
# Load XEM data to DB for show
sickbeard.scene_numbering.xem_refresh(self.show.indexerid, self.show.indexer, force=self.force)
sickbeard.scene_numbering.xem_refresh(self.show.indexerid, self.show.indexer)
self.inProgress = False
class QueueItemRename(ShowQueueItem):
def __init__(self, show=None):
ShowQueueItem.__init__(self, ShowQueueActions.RENAME, show)

View File

@ -721,7 +721,7 @@ class TVShow(object):
if newStatus != None:
with curEp.lock:
logger.log(u"STATUS: we have an associated file, so setting the status from " + str(
curEp.status) + u" to DOWNLOADED/" + str(Quality.statusFromName(file)), logger.DEBUG)
curEp.status) + u" to DOWNLOADED/" + str(Quality.statusFromName(file, anime=self.is_anime)), logger.DEBUG)
curEp.status = Quality.compositeStatus(newStatus, newQuality)
with curEp.lock:
@ -1676,7 +1676,7 @@ class TVEpisode(object):
logger.log(
u"5 Status changes from " + str(self.status) + " to " + str(Quality.statusFromName(self.location)),
logger.DEBUG)
self.status = Quality.statusFromName(self.location)
self.status = Quality.statusFromName(self.location, anime=self.show.is_anime)
# shouldn't get here probably
else:
@ -1701,8 +1701,8 @@ class TVEpisode(object):
if self.status == UNKNOWN:
if sickbeard.helpers.isMediaFile(self.location):
logger.log(u"7 Status changes from " + str(self.status) + " to " + str(
Quality.statusFromName(self.location)), logger.DEBUG)
self.status = Quality.statusFromName(self.location)
Quality.statusFromName(self.location, anime=self.show.is_anime)), logger.DEBUG)
self.status = Quality.statusFromName(self.location, anime=self.show.is_anime)
nfoFile = sickbeard.helpers.replaceExtension(self.location, "nfo")
logger.log(str(self.show.indexerid) + u": Using NFO name " + nfoFile, logger.DEBUG)

View File

@ -106,16 +106,18 @@ class TVCache():
def updateCache(self):
if self.shouldUpdate() and self._checkAuth(None):
self._clearCache()
data = self._getRSSData()
# as long as the http request worked we count this as an update
if data:
self.setLastUpdate()
else:
data = self._getRSSData()
if not data:
return []
# clear cache
self._clearCache()
# set updated
self.setLastUpdate()
# parse data
if self._checkAuth(data):
cl = []
for item in data.entries:

View File

@ -1,47 +0,0 @@
'''
Created on Aug 26, 2013
Wrappers around tvtumbler access.
@author: dermot@buckley.ie
'''
import time
from sickbeard import helpers
from sickbeard import logger
try:
import json
except ImportError:
from lib import simplejson as json
UPDATE_INTERVAL = 432000 # 5 days
SHOW_LOOKUP_URL = 'http://show-api.tvtumbler.com/api/show'
_tvtumber_cache = {}
def show_info(indexer_id):
try:
cachedResult = _tvtumber_cache[str(indexer_id)]
if time.time() < (cachedResult['mtime'] + UPDATE_INTERVAL):
# cached result is still considered current, use it
return cachedResult['response']
# otherwise we just fall through to lookup
except KeyError:
pass # no cached value, just fall through to lookup
url = SHOW_LOOKUP_URL + '?indexer_id=' + str(indexer_id)
data = helpers.getURL(url, timeout=60) # give this a longer timeout b/c it may take a while
result = json.loads(data)
if not result:
logger.log(u"Empty lookup result -> failed to find show id", logger.DEBUG)
return None
if result['error']:
logger.log(u"Lookup failed: " + result['errorMessage'], logger.DEBUG)
return None
# result is good, store it for later
_tvtumber_cache[str(indexer_id)] = {'mtime': time.time(),
'response': result['show']}
return result['show']

View File

@ -163,21 +163,18 @@ class WindowsUpdateManager(UpdateManager):
regex = ".*SickRage\-win32\-alpha\-build(\d+)(?:\.\d+)?\.zip"
version_url_data = helpers.getURL(self.version_url)
if not version_url_data:
return
if version_url_data is None:
return None
else:
for curLine in version_url_data.splitlines():
logger.log(u"checking line " + curLine, logger.DEBUG)
match = re.match(regex, curLine)
if match:
logger.log(u"found a match", logger.DEBUG)
if whole_link:
return curLine.strip()
else:
return int(match.group(1))
return None
for curLine in version_url_data.splitlines():
logger.log(u"checking line " + curLine, logger.DEBUG)
match = re.match(regex, curLine)
if match:
logger.log(u"found a match", logger.DEBUG)
if whole_link:
return curLine.strip()
else:
return int(match.group(1))
def need_update(self):
self._cur_version = self._find_installed_version()

View File

@ -23,27 +23,25 @@ import os
import time
import urllib
import datetime
import threading
import re
import traceback
import sickbeard
import webserve
from sickbeard import db, logger, exceptions, history, ui, helpers
from sickbeard.exceptions import ex
from sickbeard import encodingKludge as ek
from sickbeard import search_queue
from sickbeard import image_cache
from sickbeard import classes
from sickbeard.exceptions import ex
from sickbeard.common import SNATCHED, SNATCHED_PROPER, DOWNLOADED, SKIPPED, UNAIRED, IGNORED, ARCHIVED, WANTED, UNKNOWN
from common import Quality, qualityPresetStrings, statusStrings
from sickbeard import image_cache
try:
import json
except ImportError:
from lib import simplejson as json
import xml.etree.cElementTree as etree
from lib import subliminal
dateFormat = "%Y-%m-%d"
@ -1530,7 +1528,7 @@ class CMD_SickBeardRestart(ApiCall):
class CMD_SickBeardSearchIndexers(ApiCall):
_help = {"desc": "search for show on the indexers with a given string and language",
"optionalParameters": {"name": {"desc": "name of the show you want to search for"},
"indexerid": {"desc": "thetvdb.com unique id of a show"},
"indexerid": {"desc": "thetvdb.com or tvrage.com unique id of a show"},
"lang": {"desc": "the 2 letter abbreviation lang id"}
}
}
@ -1555,31 +1553,30 @@ class CMD_SickBeardSearchIndexers(ApiCall):
def run(self):
""" search for show at tvdb with a given string and language """
if self.name and not self.indexerid: # only name was given
baseURL = "http://thetvdb.com/api/GetSeries.php?"
params = {"seriesname": str(self.name).encode('utf-8'), 'language': self.lang}
finalURL = baseURL + urllib.urlencode(params)
urlData = sickbeard.helpers.getURL(finalURL)
lINDEXER_API_PARMS = sickbeard.indexerApi(self.indexer).api_params.copy()
lINDEXER_API_PARMS['language'] = self.lang
lINDEXER_API_PARMS['custom_ui'] = classes.AllShowsListUI
t = sickbeard.indexerApi(self.indexer).indexer(**lINDEXER_API_PARMS)
if urlData is None:
apiData = None
try:
apiData = t[str(self.name).encode()]
except Exception, e:
pass
if not apiData:
return _responds(RESULT_FAILURE, msg="Did not get result from tvdb")
else:
try:
seriesXML = etree.ElementTree(etree.XML(urlData))
except Exception, e:
logger.log(u"API :: Unable to parse XML for some reason: " + ex(e) + " from XML: " + urlData,
logger.ERROR)
return _responds(RESULT_FAILURE, msg="Unable to read result from tvdb")
series = seriesXML.getiterator('Series')
results = []
for curSeries in series:
results.append({"indexerid": int(curSeries.findtext('seriesid')),
"tvdbid": int(curSeries.findtext('seriesid')),
"name": curSeries.findtext('SeriesName'),
"first_aired": curSeries.findtext('FirstAired')})
results = []
for curSeries in apiData:
results.append({"indexerid": int(curSeries.findtext('seriesid')),
"tvdbid": int(curSeries.findtext('seriesid')),
"name": curSeries.findtext('SeriesName'),
"first_aired": curSeries.findtext('FirstAired')})
lang_id = self.valid_languages[self.lang]
return _responds(RESULT_SUCCESS, {"results": results, "langid": lang_id})
lang_id = self.valid_languages[self.lang]
return _responds(RESULT_SUCCESS, {"results": results, "langid": lang_id})
elif self.indexerid:
lINDEXER_API_PARMS = sickbeard.indexerApi(self.indexer).api_params.copy()

View File

@ -17,6 +17,7 @@
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import base64
import inspect
import traceback
@ -1429,8 +1430,7 @@ class ConfigGeneral(MainHandler):
use_api=None, api_key=None, indexer_default=None, timezone_display=None, cpu_preset=None,
web_password=None, version_notify=None, enable_https=None, https_cert=None, https_key=None,
handle_reverse_proxy=None, sort_article=None, auto_update=None, notify_on_update=None,
proxy_setting=None,
anon_redirect=None, git_path=None, calendar_unprotected=None,
proxy_setting=None, anon_redirect=None, git_path=None, calendar_unprotected=None,
fuzzy_dating=None, trim_zero=None, date_preset=None, date_preset_na=None, time_preset=None,
indexer_timeout=None, play_videos=None):
@ -1539,7 +1539,6 @@ class ConfigBackupRestore(MainHandler):
def restore(self, backupFile=None):
finalResult = ''
if backupFile:
@ -2460,26 +2459,11 @@ class ConfigAnime(MainHandler):
results = []
if use_anidb == "on":
use_anidb = 1
else:
use_anidb = 0
if anidb_use_mylist == "on":
anidb_use_mylist = 1
else:
anidb_use_mylist = 0
if split_home == "on":
split_home = 1
else:
split_home = 0
sickbeard.USE_ANIDB = use_anidb
sickbeard.USE_ANIDB = config.checkbox_to_value(use_anidb)
sickbeard.ANIDB_USERNAME = anidb_username
sickbeard.ANIDB_PASSWORD = anidb_password
sickbeard.ANIDB_USE_MYLIST = anidb_use_mylist
sickbeard.ANIME_SPLIT_HOME = split_home
sickbeard.ANIDB_USE_MYLIST = config.checkbox_to_value(anidb_use_mylist)
sickbeard.ANIME_SPLIT_HOME = config.checkbox_to_value(split_home)
sickbeard.save_config()