1
0
mirror of https://github.com/moparisthebest/SickRage synced 2024-12-13 11:32:20 -05:00

Merge pull request #1395 from flightlevel/patch-5

Handles multi-page results and improved login
This commit is contained in:
Alexandre Beloin 2015-02-23 17:09:35 -05:00
commit 32e39e0890

View File

@ -19,6 +19,7 @@
import re import re
import traceback import traceback
import datetime import datetime
import time
import urlparse import urlparse
import sickbeard import sickbeard
import generic import generic
@ -104,32 +105,36 @@ class FreshOnTVProvider(generic.TorrentProvider):
try: try:
response = self.session.post(self.urls['login'], data=login_params, timeout=30, verify=False) response = self.session.post(self.urls['login'], data=login_params, timeout=30, verify=False)
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e: except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError) as e:
logger.log(u'Unable to connect to ' + self.name + ' provider: ' + ex(e), logger.ERROR) logger.log(u'Unable to connect to ' + self.name + ' provider: ' + ex(e), logger.ERROR)
return False return False
if re.search('Username does not exist in the userbase or the account is not confirmed yet.', response.text): if re.search('/logout.php', response.text):
logger.log(u'Invalid username or password for ' + self.name + ' Check your settings', logger.ERROR) logger.log(u'Login to ' + self.name + ' was successful.', logger.DEBUG)
return False
if re.search('DDoS protection by CloudFlare', response.text): try:
logger.log(u'Unable to login to ' + self.name + ' due to CloudFlare DDoS javascript check.', logger.ERROR) if requests.utils.dict_from_cookiejar(self.session.cookies)['uid'] and requests.utils.dict_from_cookiejar(self.session.cookies)['pass']:
return False self._uid = requests.utils.dict_from_cookiejar(self.session.cookies)['uid']
self._hash = requests.utils.dict_from_cookiejar(self.session.cookies)['pass']
try: self.cookies = {'uid': self._uid,
if requests.utils.dict_from_cookiejar(self.session.cookies)['uid'] and requests.utils.dict_from_cookiejar(self.session.cookies)['pass']: 'pass': self._hash
self._uid = requests.utils.dict_from_cookiejar(self.session.cookies)['uid'] }
self._hash = requests.utils.dict_from_cookiejar(self.session.cookies)['pass'] return True
except:
logger.log(u'Unable to obtain cookie for FreshOnTV', logger.ERROR)
return False
self.cookies = {'uid': self._uid, else:
'pass': self._hash logger.log(u'Login to ' + self.name + ' was unsuccessful.', logger.DEBUG)
} if re.search('Username does not exist in the userbase or the account is not confirmed yet.', response.text):
return True logger.log(u'Invalid username or password for ' + self.name + ' Check your settings', logger.ERROR)
except:
pass if re.search('DDoS protection by CloudFlare', response.text):
logger.log(u'Unable to login to ' + self.name + ' due to CloudFlare DDoS javascript check.', logger.ERROR)
return False
logger.log(u'Unable to obtain cookie for FreshOnTV', logger.ERROR)
return False
def _get_season_search_strings(self, ep_obj): def _get_season_search_strings(self, ep_obj):
@ -195,66 +200,116 @@ class FreshOnTVProvider(generic.TorrentProvider):
if isinstance(search_string, unicode): if isinstance(search_string, unicode):
search_string = unidecode(search_string) search_string = unidecode(search_string)
searchURL = self.urls['search'] % (freeleech, search_string) searchURL = self.urls['search'] % (freeleech, search_string)
logger.log(u"Search string: " + searchURL, logger.DEBUG) logger.log(u"Search string: " + searchURL, logger.DEBUG)
init_html = self.getURL(searchURL)
max_page_number = 0
# returns top 15 results by default, expandable in user profile to 100 if not init_html:
data = self.getURL(searchURL) logger.log(u"The opening search response from " + self.name + " is empty.",logger.DEBUG)
if not data:
continue continue
try: try:
with BS4Parser(data, features=["html5lib", "permissive"]) as html: with BS4Parser(init_html, features=["html5lib", "permissive"]) as init_soup:
torrent_table = html.find('table', attrs={'class': 'frame'})
torrent_rows = torrent_table.findChildren('tr') if torrent_table else []
#Continue only if one Release is found #Check to see if there is more than 1 page of results
if len(torrent_rows) < 2: pager = init_soup.find('div', {'class': 'pager'})
logger.log(u"The Data returned from " + self.name + " do not contains any torrent", if pager:
logger.DEBUG) page_links = pager.find_all('a', href=True)
else:
page_links = []
if len(page_links) > 0:
for lnk in page_links:
link_text = lnk.text.strip()
if link_text.isdigit():
page_int = int(link_text)
if page_int > max_page_number:
max_page_number = page_int
#limit page number to 15 just in case something goes wrong
if max_page_number > 15:
max_page_number = 15
except:
logger.log(u"BS4 parser unable to process response " + self.name + " Traceback: " + traceback.format_exc(), logger.ERROR)
continue
data_response_list = []
data_response_list.append(init_html)
#Freshon starts counting pages from zero, even though it displays numbers from 1
if max_page_number > 1:
for i in range(1, max_page_number):
time.sleep(1)
page_searchURL = searchURL + '&page=' + str(i)
logger.log(u"Search string: " + page_searchURL, logger.DEBUG)
page_html = self.getURL(page_searchURL)
if not page_html:
logger.log(u"The search response for page number " + str(i) + " is empty." + self.name,logger.DEBUG)
continue continue
# skip colheader data_response_list.append(page_html)
for result in torrent_rows[1:]:
cells = result.findChildren('td')
link = cells[1].find('a', attrs = {'class': 'torrent_name_link'}) try:
#skip if torrent has been nuked due to poor quality
if cells[1].find('img', alt='Nuked') != None: for data_response in data_response_list:
with BS4Parser(data_response, features=["html5lib", "permissive"]) as html:
torrent_rows = html.findAll("tr", {"class": re.compile('torrent_[0-9]*')})
#Continue only if a Release is found
if len(torrent_rows) == 0:
logger.log(u"The Data returned from " + self.name + " does not contain any torrent", logger.DEBUG)
continue continue
torrent_id = link['href'].replace('/details.php?id=', '') for individual_torrent in torrent_rows:
#skip if torrent has been nuked due to poor quality
if individual_torrent.find('img', alt='Nuked') != None:
continue
try: try:
if link.has_key('title'): title = individual_torrent.find('a', {'class': 'torrent_name_link'})['title']
title = cells[1].find('a', {'class': 'torrent_name_link'})['title'] except:
else: logger.log(u"Unable to parse torrent title " + self.name + " Traceback: " + traceback.format_exc(), logger.DEBUG)
title = link.contents[0] continue
download_url = self.urls['download'] % (torrent_id)
id = int(torrent_id)
seeders = int(cells[8].find('a', {'class': 'link'}).span.contents[0].strip()) try:
leechers = int(cells[9].find('a', {'class': 'link'}).contents[0].strip()) details_url = individual_torrent.find('a', {'class': 'torrent_name_link'})['href']
except (AttributeError, TypeError): id = int((re.match('.*?([0-9]+)$', details_url).group(1)).strip())
continue download_url = self.urls['download'] % (str(id))
except:
logger.log(u"Unable to parse torrent id & download url " + self.name + " Traceback: " + traceback.format_exc(), logger.DEBUG)
continue
#Filter unseeded torrent try:
if mode != 'RSS' and (seeders < self.minseed or leechers < self.minleech): seeders = int(individual_torrent.find('td', {'class': 'table_seeders'}).find('span').text.strip())
continue except:
logger.log(u"Unable to parse torrent seeders content " + self.name + " Traceback: " + traceback.format_exc(), logger.DEBUG)
seeders = 1
try:
leechers = int(individual_torrent.find('td', {'class': 'table_leechers'}).find('a').text.strip())
except:
logger.log(u"Unable to parse torrent leechers content " + self.name + " Traceback: " + traceback.format_exc(), logger.DEBUG)
leechers = 0
if not title or not download_url: #Filter unseeded torrent
continue if mode != 'RSS' and (seeders < self.minseed or leechers < self.minleech):
continue
item = title, download_url, id, seeders, leechers if not title or not download_url:
logger.log(u"Found result: " + title + "(" + searchURL + ")", logger.DEBUG) continue
items[mode].append(item) item = title, download_url, id, seeders, leechers
logger.log(u"Found result: " + title + " (" + searchURL + ")", logger.DEBUG)
except Exception, e: items[mode].append(item)
logger.log(u"Failed parsing " + self.name + " Traceback: " + traceback.format_exc(), logger.ERROR)
except Exception as e:
logger.log(u"Failed parsing " + " Traceback: " + traceback.format_exc(), logger.DEBUG)
#For each search mode sort all the items by seeders #For each search mode sort all the items by seeders
items[mode].sort(key=lambda tup: tup[3], reverse=True) items[mode].sort(key=lambda tup: tup[3], reverse=True)