More updates for RSS feed code

This commit is contained in:
echel0n 2014-04-25 18:39:43 -07:00
parent a455bcb7c6
commit 3ee6d45fcd
8 changed files with 41 additions and 63 deletions

View File

@ -85,14 +85,13 @@ class DTTProvider(generic.TorrentProvider):
logger.log(u"Search string: " + searchURL, logger.DEBUG)
data = self.getURL(searchURL)
data = self.getRSSFeed(searchURL)
if not data:
return []
try:
parsedXML = parseString(data)
items = parsedXML.getElementsByTagName('item')
items = data.entries
except Exception, e:
logger.log(u"Error trying to load DTT RSS feed: " + ex(e), logger.ERROR)
logger.log(u"RSS data: " + data, logger.DEBUG)
@ -107,10 +106,8 @@ class DTTProvider(generic.TorrentProvider):
return results
def _get_title_and_url(self, item):
description_node = item.getElementsByTagName('description')[0]
title = get_xml_text(description_node).replace('_', '.').split(' (')[0]
url = item.getElementsByTagName('enclosure')[0].getAttribute('url')
title = item.title
url = item.enclosures[0].href
return (title, url)
@ -134,7 +131,7 @@ class DTTCache(tvcache.TVCache):
url = self.provider.url + 'rss/allshows?' + urllib.urlencode(params)
logger.log(u"DTT cache update URL: " + url, logger.DEBUG)
data = self.provider.getURL(url)
data = self.provider.getRSSFeed(url)
return data
def _parseItem(self, item):

View File

@ -112,19 +112,13 @@ class EZRSSProvider(generic.TorrentProvider):
logger.log(u"Search string: " + search_url, logger.DEBUG)
data = self.getURL(search_url)
data = self.getRSSFeed(search_url)
if not data:
logger.log(u"No data returned from " + search_url, logger.ERROR)
return []
parsedXML = helpers.parse_xml(data)
if parsedXML is None:
logger.log(u"Error trying to load " + self.name + " RSS feed", logger.ERROR)
return []
items = parsedXML.findall('.//item')
items = data.entries
results = []
@ -178,7 +172,7 @@ class EZRSSCache(tvcache.TVCache):
rss_url = self.provider.url + 'feed/'
logger.log(self.provider.name + " cache update URL: " + rss_url, logger.DEBUG)
data = self.provider.getURL(rss_url)
data = self.provider.getRSSFeed(rss_url)
if not data:
logger.log(u"No data returned from " + rss_url, logger.ERROR)

View File

@ -23,6 +23,7 @@ import datetime
import os
import sys
import re
import urllib
import urllib2
import copy
import itertools
@ -119,18 +120,20 @@ class GenericProvider:
return data
def getRSSFeed(self, url):
def getRSSFeed(self, url, post_data=None):
parsed = list(urlparse.urlparse(url))
parsed[2] = re.sub("/{2,}", "/", parsed[2]) # replace two or more / with one
f = feedparser.parse(url)
data = f.entries
if post_data:
url = url + 'api?' + urllib.urlencode(post_data)
if not data:
logger.log(u"Error loading " + self.name + " URL: " + ex(e), logger.ERROR)
f = feedparser.parse(url)
if not f:
logger.log(u"Error loading " + self.name + " URL: " + url, logger.ERROR)
return None
return data
return f
def downloadResult(self, result):
"""
@ -226,11 +229,11 @@ class GenericProvider:
Returns: A tuple containing two strings representing title and URL respectively
"""
title = helpers.get_xml_text(item.find('title'))
title = item.title
if title:
title = title.replace(' ', '.')
url = helpers.get_xml_text(item.find('link'))
url = item.link
if url:
url = url.replace('&', '&')

View File

@ -231,7 +231,7 @@ class HDBitsCache(tvcache.TVCache):
return []
def _getRSSData(self):
return self.provider.getURL(self.provider.rss_url, post_data=self.provider._make_post_data_JSON())
return self.provider.getRSSFeed(self.provider.rss_url, post_data=self.provider._make_post_data_JSON())
def _parseItem(self, item):

View File

@ -285,14 +285,13 @@ class NewzbinProvider(generic.NZBProvider):
item_list = []
try:
parsedXML = parseString(data)
items = parsedXML.getElementsByTagName('item')
items = data.entries
except Exception, e:
logger.log("Error trying to load Newzbin RSS feed: " + ex(e), logger.ERROR)
return []
for cur_item in items:
title = helpers.get_xml_text(cur_item.getElementsByTagName('title')[0])
title = cur_item.title
if title == 'Feeds Error':
raise exceptions.AuthException("The feed wouldn't load, probably because of invalid auth info")
if sickbeard.USENET_RETENTION is not None:
@ -345,7 +344,7 @@ class NewzbinProvider(generic.NZBProvider):
url = self.url + "search/?%s" % urllib.urlencode(params)
logger.log("Newzbin search URL: " + url, logger.DEBUG)
data = self.getURL(url)
data = self.getRSSFeed(url)
return data

View File

@ -166,14 +166,15 @@ class NewznabProvider(generic.NZBProvider):
if parsedXML is None:
return self._checkAuth()
if parsedXML.tag == 'error':
code = parsedXML.attrib['code']
if code == '100':
status = parsedXML.status
if status:
if status == 200:
return True
if status == 100:
raise AuthException("Your API key for " + self.name + " is incorrect, check your config.")
elif code == '101':
elif status == 101:
raise AuthException("Your account on " + self.name + " has been suspended, contact the administrator.")
elif code == '102':
elif status == 102:
raise AuthException(
"Your account isn't allowed to use the API on " + self.name + ", contact the administrator")
else:
@ -181,8 +182,6 @@ class NewznabProvider(generic.NZBProvider):
logger.ERROR)
return False
return True
def _doSearch(self, search_params, show=None, max_age=0):
self._checkAuth()
@ -206,31 +205,15 @@ class NewznabProvider(generic.NZBProvider):
logger.log(u"Search url: " + search_url, logger.DEBUG)
data = self.getURL(search_url)
data = self.getRSSFeed(search_url)
if not data:
logger.log(u"No data returned from " + search_url, logger.ERROR)
return []
# hack this in until it's fixed server side
if not data.startswith('<?xml'):
data = '<?xml version="1.0" encoding="ISO-8859-1" ?>' + data
parsedXML = helpers.parse_xml(data)
if parsedXML is None:
logger.log(u"Error trying to load " + self.name + " XML data", logger.ERROR)
return []
if self._checkAuthFromData(parsedXML):
if parsedXML.tag == 'rss':
items = parsedXML.findall('.//item')
else:
logger.log(u"Resulting XML from " + self.name + " isn't RSS, not parsing it", logger.ERROR)
return []
if self._checkAuthFromData(data):
items = data.entries
results = []
for curItem in items:
@ -307,15 +290,15 @@ class NewznabCache(tvcache.TVCache):
logger.log(self.provider.name + " cache update URL: " + rss_url, logger.DEBUG)
data = self.provider.getURL(rss_url)
data = self.provider.getRSSFeed(rss_url)
if not data:
logger.log(u"No data returned from " + rss_url, logger.ERROR)
return None
# hack this in until it's fixed server side
if data and not data.startswith('<?xml'):
data = '<?xml version="1.0" encoding="ISO-8859-1" ?>' + data
#if data and not data.startswith('<?xml'):
# data = '<?xml version="1.0" encoding="ISO-8859-1" ?>' + data
return data

View File

@ -86,10 +86,11 @@ class TorrentRssProvider(generic.TorrentProvider):
if not data:
return (False, 'No data returned from url: ' + self.url)
if not len(data) > 0:
items = data.entries
if not len(items) > 0:
return (False, 'No items found in the RSS feed ' + self.url)
(title, url) = self._get_title_and_url(data[0])
(title, url) = self._get_title_and_url(items[0])
if not title:
return (False, 'Unable to get title from first item')

View File

@ -105,8 +105,9 @@ class TVCache():
self._clearCache()
if self._checkAuth(data):
items = data.entries
cl = []
for item in data:
for item in items:
ci = self._parseItem(item)
if ci is not None:
cl.append(ci)