1
0
mirror of https://github.com/moparisthebest/SickRage synced 2024-12-12 11:02:21 -05:00

Fixed issues with newznab/torrent provider searches.

Fixed issues with browser launches at startup.
Fixed issues with feedparser and feedcache relating to provider searches.
This commit is contained in:
echel0n 2014-12-11 10:06:50 -08:00
parent 78bfc40757
commit 3d7e460079
15 changed files with 56 additions and 59 deletions

View File

@ -343,7 +343,7 @@ class SickRage(object):
logger.ERROR)
if sickbeard.LAUNCH_BROWSER and not self.runAsDaemon:
logger.log(u"Launching browser and exiting", logger.ERROR)
sickbeard.launchBrowser(self.startPort)
sickbeard.launchBrowser('https' if sickbeard.ENABLE_HTTPS else 'http', self.startPort, sickbeard.WEB_ROOT)
os._exit(1)
if self.consoleLogging:
@ -368,7 +368,7 @@ class SickRage(object):
# Launch browser
if sickbeard.LAUNCH_BROWSER and not (self.noLaunch or self.runAsDaemon):
sickbeard.launchBrowser(self.startPort)
sickbeard.launchBrowser('https' if sickbeard.ENABLE_HTTPS else 'http', self.startPort, sickbeard.WEB_ROOT)
# main loop
while (True):

View File

@ -1840,13 +1840,12 @@ def save_config():
new_config.write()
def launchBrowser(startPort=None):
def launchBrowser(protocol='http', startPort=None, web_root='/'):
if not startPort:
startPort = WEB_PORT
if ENABLE_HTTPS:
browserURL = 'https://localhost:%d/' % (startPort)
else:
browserURL = 'http://localhost:%d/' % (startPort)
browserURL = '%s://localhost:%d%s' % (protocol, startPort, web_root)
try:
webbrowser.open(browserURL, 2, 1)
except:

View File

@ -176,6 +176,8 @@ class Quality:
"""
Return The quality from the scene episode File
"""
if not name:
return Quality.UNKNOWN
name = os.path.basename(name)

View File

@ -79,7 +79,7 @@ class Animezb(generic.NZBProvider):
logger.log(u"Search url: " + search_url, logger.DEBUG)
results = []
for curItem in self.cache.getRSSFeed(search_url, items=['entries']) or []:
for curItem in self.cache.getRSSFeed(search_url, items=['entries'])['entries'] or []:
(title, url) = self._get_title_and_url(curItem)
if title and url:

View File

@ -123,7 +123,7 @@ class EZRSSProvider(generic.TorrentProvider):
logger.log(u"Search string: " + search_url, logger.DEBUG)
results = []
for curItem in self.cache.getRSSFeed(search_url, items=['entries']) or []:
for curItem in self.cache.getRSSFeed(search_url, items=['entries'])['entries'] or []:
(title, url) = self._get_title_and_url(curItem)

View File

@ -74,7 +74,7 @@ class Fanzub(generic.NZBProvider):
logger.log(u"Search url: " + search_url, logger.DEBUG)
results = []
for curItem in self.cache.getRSSFeed(search_url, items=['entries']) or []:
for curItem in self.cache.getRSSFeed(search_url, items=['entries'])['entries'] or []:
(title, url) = self._get_title_and_url(curItem)
if title and url:

View File

@ -214,7 +214,7 @@ class GenericProvider:
Returns a Quality value obtained from the node's data
"""
(title, url) = self._get_title_and_url(item) # @UnusedVariable
(title, url) = self._get_title_and_url(item)
quality = Quality.sceneQuality(title, anime)
return quality
@ -236,18 +236,11 @@ class GenericProvider:
Returns: A tuple containing two strings representing title and URL respectively
"""
title = None
url = None
if 'title' in item:
title = item.title
title = item.get('title')
if title:
title = title.replace(' ', '.')
if 'link' in item:
url = item.link
url = item.get('link')
if url:
url = url.replace('&', '&')

View File

@ -297,7 +297,7 @@ class NewznabProvider(generic.NZBProvider):
if not self._checkAuthFromData(data):
break
for item in data.entries or []:
for item in data.get('entries', []):
(title, url) = self._get_title_and_url(item)
@ -311,8 +311,8 @@ class NewznabProvider(generic.NZBProvider):
# get total and offset attribs
try:
if total == 0:
total = int(data.feed.newznab_response['total'] or 0)
offset = int(data.feed.newznab_response['offset'] or 0)
total = int(data['feed']['newznab_response']['total'] or 0)
offset = int(data['feed']['newznab_response']['offset'] or 0)
except AttributeError:
break
@ -377,11 +377,18 @@ class NewznabProvider(generic.NZBProvider):
(title, url) = self._get_title_and_url(item)
if item.has_key('published_parsed') and item['published_parsed']:
result_date = item.published_parsed
if result_date:
result_date = datetime.datetime(*result_date[0:6])
else:
try:
result_date = datetime.datetime(*item['published_parsed'][0:6])
except AttributeError:
try:
result_date = datetime.datetime(*item['updated_parsed'][0:6])
except AttributeError:
try:
result_date = datetime.datetime(*item['created_parsed'][0:6])
except AttributeError:
try:
result_date = datetime.datetime(*item['date'][0:6])
except AttributeError:
logger.log(u"Unable to figure out the date for entry " + title + ", skipping it")
continue
@ -425,12 +432,8 @@ class NewznabCache(tvcache.TVCache):
def _parseItem(self, item):
(title, url) = self._get_title_and_url(item)
attrs = item.newznab_attr
if not isinstance(attrs, list):
attrs = [item.newznab_attr]
tvrageid = 0
for attr in attrs:
for attr in item.get('newznab_attr', []):
if attr['name'] == 'tvrageid':
tvrageid = int(attr['value'] or 0)
break

View File

@ -51,7 +51,7 @@ class NyaaProvider(generic.TorrentProvider):
return 'nyaatorrents.png'
def getQuality(self, item, anime=False):
title = item.title
title = item.get('title')
quality = Quality.sceneQuality(title, anime)
return quality
@ -80,7 +80,7 @@ class NyaaProvider(generic.TorrentProvider):
logger.log(u"Search string: " + searchURL, logger.DEBUG)
results = []
for curItem in self.cache.getRSSFeed(searchURL, items=['entries']) or []:
for curItem in self.cache.getRSSFeed(searchURL, items=['entries'])['entries'] or []:
(title, url) = self._get_title_and_url(curItem)
if title and url:

View File

@ -163,12 +163,12 @@ class OmgwtfnzbsCache(tvcache.TVCache):
Returns: A tuple containing two strings representing title and URL respectively
"""
title = item.title if item.title else None
title = item.get('title')
if title:
title = u'' + title
title = title.replace(' ', '.')
url = item.link if item.link else None
url = item.get('link')
if url:
url = url.replace('&', '&')

View File

@ -74,20 +74,18 @@ class TorrentRssProvider(generic.TorrentProvider):
def _get_title_and_url(self, item):
title, url = None, None
title = item.title
title = item.get('title')
if title:
title = u'' + title
title = title.replace(' ', '.')
attempt_list = [lambda: item.torrent_magneturi,
attempt_list = [lambda: item.get('torrent_magneturi'),
lambda: item.enclosures[0].href,
lambda: item.link]
lambda: item.get('link')]
url = None
for cur_attempt in attempt_list:
try:
url = cur_attempt()
@ -95,9 +93,9 @@ class TorrentRssProvider(generic.TorrentProvider):
continue
if title and url:
return (title, url)
break
return (title, url)
return title, url
def validateRSS(self):

View File

@ -60,13 +60,15 @@ class TvTorrentsProvider(generic.TorrentProvider):
return True
def _checkAuthFromData(self, data):
if not (data.entries and data.feed):
if not (data.get('entries') and data.get('feed')):
return self._checkAuth()
try:title = data['feed']['title']
except:return False
try:
title = data['feed']['title']
except:
return False
if "User can't be found" in title or "Invalid Hash" in title:
if title and ("User can't be found" in title or "Invalid Hash" in title):
logger.log(u"Incorrect authentication credentials for " + self.name + " : " + str(title),
logger.DEBUG)
raise AuthException(

View File

@ -67,7 +67,7 @@ class WombleCache(tvcache.TVCache):
myDB.mass_action(cl)
def _checkAuth(self, data):
return data if data.feed.title != 'Invalid Link' else None
return data if data.get('feed') and data['feed']['title'] != 'Invalid Link' else None
provider = WombleProvider()

View File

@ -15,6 +15,7 @@ from sickbeard.exceptions import ex
from feedcache.cache import Cache
from sqliteshelf import SQLiteShelf
class RSSFeeds:
def __init__(self, db_name):
try:
@ -47,8 +48,10 @@ class RSSFeeds:
resp = fc.fetch(url, False, False, request_headers)
for item in items:
try:data[item] = resp[item]
except:continue
try:
data[item] = resp[item]
except:
continue
finally:
self.rssDB.close()

View File

@ -122,7 +122,7 @@ class TVCache():
self.setLastUpdate()
cl = []
for item in data['entries']:
for item in data.get('entries', []):
ci = self._parseItem(item)
if ci is not None:
cl.append(ci)
@ -281,10 +281,7 @@ class TVCache():
def searchCache(self, episode, manualSearch=False):
neededEps = self.findNeededEpisodes(episode, manualSearch)
if len(neededEps) > 0:
return neededEps[episode]
else:
return []
return neededEps[episode] if len(neededEps) > 0 else []
def listPropers(self, date=None, delimiter="."):
myDB = self._getDB()