1
0
mirror of https://github.com/moparisthebest/SickRage synced 2024-12-12 11:02:21 -05:00

More updates for RSS feed code

This commit is contained in:
echel0n 2014-04-25 18:39:43 -07:00
parent a455bcb7c6
commit 3ee6d45fcd
8 changed files with 41 additions and 63 deletions

View File

@ -85,14 +85,13 @@ class DTTProvider(generic.TorrentProvider):
logger.log(u"Search string: " + searchURL, logger.DEBUG) logger.log(u"Search string: " + searchURL, logger.DEBUG)
data = self.getURL(searchURL) data = self.getRSSFeed(searchURL)
if not data: if not data:
return [] return []
try: try:
parsedXML = parseString(data) items = data.entries
items = parsedXML.getElementsByTagName('item')
except Exception, e: except Exception, e:
logger.log(u"Error trying to load DTT RSS feed: " + ex(e), logger.ERROR) logger.log(u"Error trying to load DTT RSS feed: " + ex(e), logger.ERROR)
logger.log(u"RSS data: " + data, logger.DEBUG) logger.log(u"RSS data: " + data, logger.DEBUG)
@ -107,10 +106,8 @@ class DTTProvider(generic.TorrentProvider):
return results return results
def _get_title_and_url(self, item): def _get_title_and_url(self, item):
description_node = item.getElementsByTagName('description')[0] title = item.title
url = item.enclosures[0].href
title = get_xml_text(description_node).replace('_', '.').split(' (')[0]
url = item.getElementsByTagName('enclosure')[0].getAttribute('url')
return (title, url) return (title, url)
@ -134,7 +131,7 @@ class DTTCache(tvcache.TVCache):
url = self.provider.url + 'rss/allshows?' + urllib.urlencode(params) url = self.provider.url + 'rss/allshows?' + urllib.urlencode(params)
logger.log(u"DTT cache update URL: " + url, logger.DEBUG) logger.log(u"DTT cache update URL: " + url, logger.DEBUG)
data = self.provider.getURL(url) data = self.provider.getRSSFeed(url)
return data return data
def _parseItem(self, item): def _parseItem(self, item):

View File

@ -112,19 +112,13 @@ class EZRSSProvider(generic.TorrentProvider):
logger.log(u"Search string: " + search_url, logger.DEBUG) logger.log(u"Search string: " + search_url, logger.DEBUG)
data = self.getURL(search_url) data = self.getRSSFeed(search_url)
if not data: if not data:
logger.log(u"No data returned from " + search_url, logger.ERROR) logger.log(u"No data returned from " + search_url, logger.ERROR)
return [] return []
parsedXML = helpers.parse_xml(data) items = data.entries
if parsedXML is None:
logger.log(u"Error trying to load " + self.name + " RSS feed", logger.ERROR)
return []
items = parsedXML.findall('.//item')
results = [] results = []
@ -178,7 +172,7 @@ class EZRSSCache(tvcache.TVCache):
rss_url = self.provider.url + 'feed/' rss_url = self.provider.url + 'feed/'
logger.log(self.provider.name + " cache update URL: " + rss_url, logger.DEBUG) logger.log(self.provider.name + " cache update URL: " + rss_url, logger.DEBUG)
data = self.provider.getURL(rss_url) data = self.provider.getRSSFeed(rss_url)
if not data: if not data:
logger.log(u"No data returned from " + rss_url, logger.ERROR) logger.log(u"No data returned from " + rss_url, logger.ERROR)

View File

@ -23,6 +23,7 @@ import datetime
import os import os
import sys import sys
import re import re
import urllib
import urllib2 import urllib2
import copy import copy
import itertools import itertools
@ -119,18 +120,20 @@ class GenericProvider:
return data return data
def getRSSFeed(self, url): def getRSSFeed(self, url, post_data=None):
parsed = list(urlparse.urlparse(url)) parsed = list(urlparse.urlparse(url))
parsed[2] = re.sub("/{2,}", "/", parsed[2]) # replace two or more / with one parsed[2] = re.sub("/{2,}", "/", parsed[2]) # replace two or more / with one
f = feedparser.parse(url) if post_data:
data = f.entries url = url + 'api?' + urllib.urlencode(post_data)
if not data: f = feedparser.parse(url)
logger.log(u"Error loading " + self.name + " URL: " + ex(e), logger.ERROR)
if not f:
logger.log(u"Error loading " + self.name + " URL: " + url, logger.ERROR)
return None return None
return data return f
def downloadResult(self, result): def downloadResult(self, result):
""" """
@ -226,11 +229,11 @@ class GenericProvider:
Returns: A tuple containing two strings representing title and URL respectively Returns: A tuple containing two strings representing title and URL respectively
""" """
title = helpers.get_xml_text(item.find('title')) title = item.title
if title: if title:
title = title.replace(' ', '.') title = title.replace(' ', '.')
url = helpers.get_xml_text(item.find('link')) url = item.link
if url: if url:
url = url.replace('&', '&') url = url.replace('&', '&')

View File

@ -231,7 +231,7 @@ class HDBitsCache(tvcache.TVCache):
return [] return []
def _getRSSData(self): def _getRSSData(self):
return self.provider.getURL(self.provider.rss_url, post_data=self.provider._make_post_data_JSON()) return self.provider.getRSSFeed(self.provider.rss_url, post_data=self.provider._make_post_data_JSON())
def _parseItem(self, item): def _parseItem(self, item):

View File

@ -285,14 +285,13 @@ class NewzbinProvider(generic.NZBProvider):
item_list = [] item_list = []
try: try:
parsedXML = parseString(data) items = data.entries
items = parsedXML.getElementsByTagName('item')
except Exception, e: except Exception, e:
logger.log("Error trying to load Newzbin RSS feed: " + ex(e), logger.ERROR) logger.log("Error trying to load Newzbin RSS feed: " + ex(e), logger.ERROR)
return [] return []
for cur_item in items: for cur_item in items:
title = helpers.get_xml_text(cur_item.getElementsByTagName('title')[0]) title = cur_item.title
if title == 'Feeds Error': if title == 'Feeds Error':
raise exceptions.AuthException("The feed wouldn't load, probably because of invalid auth info") raise exceptions.AuthException("The feed wouldn't load, probably because of invalid auth info")
if sickbeard.USENET_RETENTION is not None: if sickbeard.USENET_RETENTION is not None:
@ -345,7 +344,7 @@ class NewzbinProvider(generic.NZBProvider):
url = self.url + "search/?%s" % urllib.urlencode(params) url = self.url + "search/?%s" % urllib.urlencode(params)
logger.log("Newzbin search URL: " + url, logger.DEBUG) logger.log("Newzbin search URL: " + url, logger.DEBUG)
data = self.getURL(url) data = self.getRSSFeed(url)
return data return data

View File

@ -166,14 +166,15 @@ class NewznabProvider(generic.NZBProvider):
if parsedXML is None: if parsedXML is None:
return self._checkAuth() return self._checkAuth()
if parsedXML.tag == 'error': status = parsedXML.status
code = parsedXML.attrib['code'] if status:
if status == 200:
if code == '100': return True
if status == 100:
raise AuthException("Your API key for " + self.name + " is incorrect, check your config.") raise AuthException("Your API key for " + self.name + " is incorrect, check your config.")
elif code == '101': elif status == 101:
raise AuthException("Your account on " + self.name + " has been suspended, contact the administrator.") raise AuthException("Your account on " + self.name + " has been suspended, contact the administrator.")
elif code == '102': elif status == 102:
raise AuthException( raise AuthException(
"Your account isn't allowed to use the API on " + self.name + ", contact the administrator") "Your account isn't allowed to use the API on " + self.name + ", contact the administrator")
else: else:
@ -181,8 +182,6 @@ class NewznabProvider(generic.NZBProvider):
logger.ERROR) logger.ERROR)
return False return False
return True
def _doSearch(self, search_params, show=None, max_age=0): def _doSearch(self, search_params, show=None, max_age=0):
self._checkAuth() self._checkAuth()
@ -206,31 +205,15 @@ class NewznabProvider(generic.NZBProvider):
logger.log(u"Search url: " + search_url, logger.DEBUG) logger.log(u"Search url: " + search_url, logger.DEBUG)
data = self.getURL(search_url) data = self.getRSSFeed(search_url)
if not data: if not data:
logger.log(u"No data returned from " + search_url, logger.ERROR) logger.log(u"No data returned from " + search_url, logger.ERROR)
return [] return []
# hack this in until it's fixed server side if self._checkAuthFromData(data):
if not data.startswith('<?xml'):
data = '<?xml version="1.0" encoding="ISO-8859-1" ?>' + data
parsedXML = helpers.parse_xml(data)
if parsedXML is None:
logger.log(u"Error trying to load " + self.name + " XML data", logger.ERROR)
return []
if self._checkAuthFromData(parsedXML):
if parsedXML.tag == 'rss':
items = parsedXML.findall('.//item')
else:
logger.log(u"Resulting XML from " + self.name + " isn't RSS, not parsing it", logger.ERROR)
return []
items = data.entries
results = [] results = []
for curItem in items: for curItem in items:
@ -307,15 +290,15 @@ class NewznabCache(tvcache.TVCache):
logger.log(self.provider.name + " cache update URL: " + rss_url, logger.DEBUG) logger.log(self.provider.name + " cache update URL: " + rss_url, logger.DEBUG)
data = self.provider.getURL(rss_url) data = self.provider.getRSSFeed(rss_url)
if not data: if not data:
logger.log(u"No data returned from " + rss_url, logger.ERROR) logger.log(u"No data returned from " + rss_url, logger.ERROR)
return None return None
# hack this in until it's fixed server side # hack this in until it's fixed server side
if data and not data.startswith('<?xml'): #if data and not data.startswith('<?xml'):
data = '<?xml version="1.0" encoding="ISO-8859-1" ?>' + data # data = '<?xml version="1.0" encoding="ISO-8859-1" ?>' + data
return data return data

View File

@ -86,10 +86,11 @@ class TorrentRssProvider(generic.TorrentProvider):
if not data: if not data:
return (False, 'No data returned from url: ' + self.url) return (False, 'No data returned from url: ' + self.url)
if not len(data) > 0: items = data.entries
if not len(items) > 0:
return (False, 'No items found in the RSS feed ' + self.url) return (False, 'No items found in the RSS feed ' + self.url)
(title, url) = self._get_title_and_url(data[0]) (title, url) = self._get_title_and_url(items[0])
if not title: if not title:
return (False, 'Unable to get title from first item') return (False, 'Unable to get title from first item')

View File

@ -105,8 +105,9 @@ class TVCache():
self._clearCache() self._clearCache()
if self._checkAuth(data): if self._checkAuth(data):
items = data.entries
cl = [] cl = []
for item in data: for item in items:
ci = self._parseItem(item) ci = self._parseItem(item)
if ci is not None: if ci is not None:
cl.append(ci) cl.append(ci)