1
0
mirror of https://github.com/moparisthebest/SickRage synced 2024-12-12 11:02:21 -05:00

Revamped the failed handler code to fix a few bugs and have everything failed sent directly to backlog

This commit is contained in:
echel0n 2014-03-19 07:59:34 -07:00
parent 31a63d41aa
commit 748ba6be71
5 changed files with 60 additions and 166 deletions

View File

@ -79,14 +79,9 @@ class FailedProcessor(object):
self._log(u"Could not create show object. Either the show hasn't been added to SickBeard, or it's still loading (if SB was restarted recently)", logger.WARNING) self._log(u"Could not create show object. Either the show hasn't been added to SickBeard, or it's still loading (if SB was restarted recently)", logger.WARNING)
raise exceptions.FailedProcessingFailed() raise exceptions.FailedProcessingFailed()
# figure out what segment the episode is in and remember it so we can backlog it for episode in parsed.episode_numbers:
if self._show_obj.air_by_date: cur_failed_queue_item = search_queue.FailedQueueItem(self._show_obj, {parsed.season_number: episode})
segment = str(parsed.air_date)[:7] sickbeard.searchQueueScheduler.action.add_item(cur_failed_queue_item)
else:
segment = parsed.season_number
cur_failed_queue_item = search_queue.FailedQueueItem(self._show_obj, segment, parsed.episode_numbers)
sickbeard.searchQueueScheduler.action.add_item(cur_failed_queue_item)
return True return True

View File

@ -110,7 +110,7 @@ def hasFailed(release, size, provider="%"):
return (len(sql_results) > 0) return (len(sql_results) > 0)
def revertEpisodes(show_obj, season, episodes): def revertEpisode(show_obj, season, episode=None):
"""Restore the episodes of a failed download to their original state""" """Restore the episodes of a failed download to their original state"""
myDB = db.DBConnection("failed.db") myDB = db.DBConnection("failed.db")
log_str = u"" log_str = u""
@ -119,24 +119,22 @@ def revertEpisodes(show_obj, season, episodes):
# {episode: result, ...} # {episode: result, ...}
history_eps = dict([(res["episode"], res) for res in sql_results]) history_eps = dict([(res["episode"], res) for res in sql_results])
if len(episodes) > 0: if episode:
for cur_episode in episodes: try:
try: ep_obj = show_obj.getEpisode(season, episode)
ep_obj = show_obj.getEpisode(season, cur_episode) log_str += _log_helper(u"Reverting episode (%s, %s): %s" % (season, episode, ep_obj.name))
except exceptions.EpisodeNotFoundException, e:
log_str += _log_helper(u"Unable to create episode, please set its status manually: " + exceptions.ex(e), logger.WARNING)
continue
log_str += _log_helper(u"Reverting episode (%s, %s): %s" % (season, cur_episode, ep_obj.name))
with ep_obj.lock: with ep_obj.lock:
if cur_episode in history_eps: if episode in history_eps:
log_str += _log_helper(u"Found in history") log_str += _log_helper(u"Found in history")
ep_obj.status = history_eps[cur_episode]['old_status'] ep_obj.status = history_eps[episode]['old_status']
else: else:
log_str += _log_helper(u"WARNING: Episode not found in history. Setting it back to WANTED", logger.WARNING) log_str += _log_helper(u"WARNING: Episode not found in history. Setting it back to WANTED", logger.WARNING)
ep_obj.status = WANTED ep_obj.status = WANTED
ep_obj.saveToDB() ep_obj.saveToDB()
except exceptions.EpisodeNotFoundException, e:
log_str += _log_helper(u"Unable to create episode, please set its status manually: " + exceptions.ex(e), logger.WARNING)
else: else:
# Whole season # Whole season
log_str += _log_helper(u"Setting season to wanted: " + str(season)) log_str += _log_helper(u"Setting season to wanted: " + str(season))
@ -152,21 +150,22 @@ def revertEpisodes(show_obj, season, episodes):
ep_obj.saveToDB() ep_obj.saveToDB()
def markFailed(show_obj, season, episodes): return log_str
def markFailed(show_obj, season, episode=None):
log_str = u"" log_str = u""
if len(episodes) > 0: if episode:
for cur_episode in episodes: try:
try: ep_obj = show_obj.getEpisode(season, episode)
ep_obj = show_obj.getEpisode(season, cur_episode)
except exceptions.EpisodeNotFoundException, e:
log_str += _log_helper(u"Unable to get episode, please set its status manually: " + exceptions.ex(e), logger.WARNING)
continue
with ep_obj.lock: with ep_obj.lock:
quality = Quality.splitCompositeStatus(ep_obj.status)[1] quality = Quality.splitCompositeStatus(ep_obj.status)[1]
ep_obj.status = Quality.compositeStatus(FAILED, quality) ep_obj.status = Quality.compositeStatus(FAILED, quality)
ep_obj.saveToDB() ep_obj.saveToDB()
except exceptions.EpisodeNotFoundException, e:
log_str += _log_helper(u"Unable to get episode, please set its status manually: " + exceptions.ex(e), logger.WARNING)
else: else:
# Whole season # Whole season
for ep_obj in show_obj.getAllEpisodes(season): for ep_obj in show_obj.getAllEpisodes(season):

View File

@ -953,51 +953,4 @@ def suffix(d):
return 'th' if 11<=d<=13 else {1:'st',2:'nd',3:'rd'}.get(d%10, 'th') return 'th' if 11<=d<=13 else {1:'st',2:'nd',3:'rd'}.get(d%10, 'th')
def custom_strftime(format, t): def custom_strftime(format, t):
return t.strftime(format).replace('{S}', str(t.day) + suffix(t.day)) return t.strftime(format).replace('{S}', str(t.day) + suffix(t.day))
def retry(ExceptionToCheck, default=None, tries=4, delay=3, backoff=2, logger=None):
"""Retry calling the decorated function using an exponential backoff.
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
:param ExceptionToCheck: the exception to check. may be a tuple of
excpetions to check
:type ExceptionToCheck: Exception or tuple
:param tries: number of times to try (not retry) before giving up
:type tries: int
:param delay: initial delay between retries in seconds
:type delay: int
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry
:type backoff: int
:param logger: logger to use. If None, print
:type logger: logging.Logger instance
"""
def deco_retry(f):
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
try_one_last_time = True
while mtries > 1:
try:
print args,kwargs
return f(*args, **kwargs)
try_one_last_time = False
break
except ExceptionToCheck, e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
if logger:
logger.warning(msg)
else:
print msg
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
if try_one_last_time:
try:
return f(*args, **kwargs)
except ExceptionToCheck, e:
return default
return
return f_retry # true decorator
return deco_retry

View File

@ -26,6 +26,7 @@ from sickbeard import db, logger, common, exceptions, helpers
from sickbeard import generic_queue from sickbeard import generic_queue
from sickbeard import search, failed_history, history from sickbeard import search, failed_history, history
from sickbeard import ui from sickbeard import ui
from sickbeard.common import Quality
BACKLOG_SEARCH = 10 BACKLOG_SEARCH = 10
RSS_SEARCH = 20 RSS_SEARCH = 20
@ -238,73 +239,42 @@ class BacklogQueueItem(generic_queue.QueueItem):
class FailedQueueItem(generic_queue.QueueItem): class FailedQueueItem(generic_queue.QueueItem):
def __init__(self, show, segment, episodes): def __init__(self, show, segment):
generic_queue.QueueItem.__init__(self, 'Retry', MANUAL_SEARCH) generic_queue.QueueItem.__init__(self, 'Retry', MANUAL_SEARCH)
self.priority = generic_queue.QueuePriorities.HIGH self.priority = generic_queue.QueuePriorities.HIGH
self.thread_name = 'RETRY-' + str(show.indexerid) self.thread_name = 'RETRY-' + str(show.indexerid)
self.show = show self.show = show
self.segment = segment self.segment = segment
self.episodes = episodes
self.success = None self.success = None
def execute(self): def execute(self):
generic_queue.QueueItem.execute(self) generic_queue.QueueItem.execute(self)
season = self.segment for season, episode in self.segment.iteritems():
if self.show.air_by_date: epObj = self.show.getEpisode(season, episode)
myDB = db.DBConnection()
season_year, season_month = map(int, season.split('-')) (release, provider) = failed_history.findRelease(self.show, season, episode)
min_date = datetime.date(season_year, season_month, 1) if release:
logger.log(u"Marking release as bad: " + release)
failed_history.markFailed(self.show, season, episode)
failed_history.logFailed(release)
history.logFailed(self.show.indexerid, season, episode, epObj.status, release, provider)
# it's easier to just hard code this than to worry about rolling the year over or making a month length map failed_history.revertEpisode(self.show, season, episode)
if season_month == 12:
max_date = datetime.date(season_year, 12, 31) for season, episode in self.segment.iteritems():
epObj = self.show.getEpisode(season, episode)
if self.show.air_by_date:
results = search.findSeason(self.show, str(epObj.airdate)[:7])
else: else:
max_date = datetime.date(season_year, season_month + 1, 1) - datetime.timedelta(days=1) results = search.findSeason(self.show, season)
for episode in self.episodes: # download whatever we find
season = myDB.fetch("SELECT season FROM tv_episodes WHERE showid = ? AND airdate >= ? AND airdate <= ? AND episode = ?", for curResult in results:
[self.show.indexerid, min_date.toordinal(), max_date.toordinal(), episode]) self.success = search.snatchEpisode(curResult)
if self.episodes > 1: time.sleep(5)
for episode in self.episodes:
(release, provider) = failed_history.findRelease(self.show, season, episode)
if release:
logger.log(u"Marking release as bad: " + release)
failed_history.markFailed(self.show, season, [episode])
failed_history.logFailed(release)
history.logFailed(self.show.indexerid, season, episode, common.Quality.NONE, release, provider)
failed_history.revertEpisodes(self.show, season, [episode])
# Single failed episode search
epObj = self.show.getEpisode(int(season), int(episode))
foundEpisode = search.findEpisode(epObj, manualSearch=True)
if not foundEpisode:
ui.notifications.message('No downloads were found', "Couldn't find a download for <i>%s</i>" % epObj.prettyName())
logger.log(u"Unable to find a download for " + epObj.prettyName())
else:
# just use the first result for now
logger.log(u"Downloading episode from " + foundEpisode.url)
result = search.snatchEpisode(foundEpisode)
providerModule = foundEpisode.provider
if not result:
ui.notifications.error('Error while attempting to snatch ' + foundEpisode.name+', check your logs')
elif providerModule == None:
ui.notifications.error('Provider is configured incorrectly, unable to download')
self.success = result
return
# Multiple failed episode search
results = search.findSeason(self.show, self.segment)
# download whatever we find
for curResult in results:
search.snatchEpisode(curResult)
time.sleep(5)
self.finish() self.finish()

View File

@ -3075,7 +3075,8 @@ class Home:
else: else:
return _genericMessage("Error", errMsg) return _genericMessage("Error", errMsg)
segment_list = {} wanted_segments = []
failed_segments = {}
if eps != None: if eps != None:
@ -3091,11 +3092,14 @@ class Home:
return _genericMessage("Error", "Episode couldn't be retrieved") return _genericMessage("Error", "Episode couldn't be retrieved")
if int(status) in (WANTED, FAILED): if int(status) in (WANTED, FAILED):
# figure out what segment the episode is in and remember it so we can backlog it # figure out what episodes are wanted so we can backlog them
if epObj.show.air_by_date: if epObj.show.air_by_date:
segment_list.setdefault(str(epObj.airdate)[:7],[]).append(epObj.episode) wanted_segments.append(str(epObj.airdate)[:7])
else: else:
segment_list.setdefault(epObj.season,[]).append(epObj.episode) wanted_segments.append(epObj.season)
# figure out what episodes failed so we can retry them
failed_segments.setdefault(epObj.season,[]).append(epObj.episode)
with epObj.lock: with epObj.lock:
# don't let them mess up UNAIRED episodes # don't let them mess up UNAIRED episodes
@ -3116,26 +3120,26 @@ class Home:
if int(status) == WANTED: if int(status) == WANTED:
msg = "Backlog was automatically started for the following seasons of <b>" + showObj.name + "</b>:<br /><ul>" msg = "Backlog was automatically started for the following seasons of <b>" + showObj.name + "</b>:<br /><ul>"
for cur_segment, cur_episodes in segment_list.iteritems(): for cur_segment in wanted_segments:
msg += "<li>Season " + str(cur_segment) + "</li>" msg += "<li>Season " + str(cur_segment) + "</li>"
logger.log(u"Sending backlog for " + showObj.name + " season " + str(cur_segment) + " because some eps were set to wanted") logger.log(u"Sending backlog for " + showObj.name + " season " + str(cur_segment) + " because some eps were set to wanted")
cur_backlog_queue_item = search_queue.BacklogQueueItem(showObj, cur_segment) cur_backlog_queue_item = search_queue.BacklogQueueItem(showObj, cur_segment)
sickbeard.searchQueueScheduler.action.add_item(cur_backlog_queue_item) # @UndefinedVariable sickbeard.searchQueueScheduler.action.add_item(cur_backlog_queue_item) # @UndefinedVariable
msg += "</ul>" msg += "</ul>"
if segment_list: if wanted_segments:
ui.notifications.message("Backlog started", msg) ui.notifications.message("Backlog started", msg)
if int(status) == FAILED: if int(status) == FAILED:
msg = "Retrying Search was automatically started for the following season of <b>" + showObj.name + "</b>:<br />" msg = "Retrying Search was automatically started for the following season of <b>" + showObj.name + "</b>:<br />"
for cur_segment, cur_episodes in segment_list.iteritems(): for cur_segment in failed_segments:
msg += "<li>Season " + str(cur_segment) + "</li>" msg += "<li>Season " + str(cur_segment) + "</li>"
logger.log(u"Retrying Search for " + showObj.name + " season " + str(cur_segment) + " because some eps were set to failed") logger.log(u"Retrying Search for " + showObj.name + " season " + str(cur_segment) + " because some eps were set to failed")
cur_failed_queue_item = search_queue.FailedQueueItem(showObj, cur_segment, cur_episodes) cur_failed_queue_item = search_queue.FailedQueueItem(showObj, cur_segment)
sickbeard.searchQueueScheduler.action.add_item(cur_failed_queue_item) # @UndefinedVariable sickbeard.searchQueueScheduler.action.add_item(cur_failed_queue_item) # @UndefinedVariable
msg += "</ul>" msg += "</ul>"
if segment_list: if failed_segments:
ui.notifications.message("Retry Search started", msg) ui.notifications.message("Retry Search started", msg)
if direct: if direct:
@ -3336,14 +3340,8 @@ class Home:
if isinstance(ep_obj, str): if isinstance(ep_obj, str):
return json.dumps({'result': 'failure'}) return json.dumps({'result': 'failure'})
# figure out what segment the episode is in and remember it so we can backlog it
if ep_obj.show.air_by_date:
segment = str(ep_obj.airdate)[:7]
else:
segment = ep_obj.season
# make a queue item for it and put it on the queue # make a queue item for it and put it on the queue
ep_queue_item = search_queue.FailedQueueItem(ep_obj.show, segment, [ep_obj.episode]) ep_queue_item = search_queue.FailedQueueItem(ep_obj.show, {ep_obj.season: ep_obj.episode})
sickbeard.searchQueueScheduler.action.add_item(ep_queue_item) # @UndefinedVariable sickbeard.searchQueueScheduler.action.add_item(ep_queue_item) # @UndefinedVariable
# wait until the queue item tells us whether it worked or not # wait until the queue item tells us whether it worked or not
@ -3365,27 +3363,6 @@ class Home:
}) })
return json.dumps({'result': 'failure'}) return json.dumps({'result': 'failure'})
# try:
#
#
# ui.notifications.message('Info', pp.log)
# except exceptions.FailedHistoryNotFoundException:
# ui.notifications.error('Not Found Error', 'Couldn\'t find release in history. (Has it been over 30 days?)\n'
# 'Can\'t mark it as bad.')
# return json.dumps({'result': 'failure'})
# except exceptions.FailedHistoryMultiSnatchException:
# ui.notifications.error('Multi-Snatch Error', 'The same episode was snatched again before the first one was done.\n'
# 'Please cancel any downloads of this episode and then set it back to wanted.\n Can\'t continue.')
# return json.dumps({'result': 'failure'})
# except exceptions.FailedProcessingFailed:
# ui.notifications.error('Processing Failed', pp.log)
# return json.dumps({'result': 'failure'})
# except Exception as e:
# ui.notifications.error('Unknown Error', 'Unknown exception: ' + str(e))
# return json.dumps({'result': 'failure'})
#
# return json.dumps({'result': 'success'})
class UI: class UI: