Fix for manual and backlog download/search issues.

Removed multi-threading as it was more problems then usefull for now.

Added in match & snatch, any quality from initial quality settings gets downloaded first and does not continue searching, if archive qualities exist it'll stop once it hits max quality from said list.
This commit is contained in:
echel0n 2014-05-11 05:49:07 -07:00
parent 82abad6f19
commit 1398c38275
27 changed files with 362 additions and 401 deletions

View File

@ -13,9 +13,7 @@
#set $numGoodShows = len([x for x in $sickbeard.showList if x.paused == 0 and x.status != "Ended"])
#set $numDLEpisodes = $myDB.select("SELECT COUNT(*) FROM tv_episodes WHERE status IN ("+",".join([str(x) for x in $Quality.DOWNLOADED + [$ARCHIVED]])+") AND season != 0 and episode != 0 AND airdate <= "+$today+"")[0][0]
#set $numEpisodes = $myDB.select("SELECT COUNT(*) FROM tv_episodes WHERE season != 0 and episode != 0 AND (airdate != 1 OR status IN ("+",".join([str(x) for x in ($Quality.DOWNLOADED + $Quality.SNATCHED + $Quality.SNATCHED_PROPER) + [$ARCHIVED]])+")) AND airdate <= "+$today+" AND status != "+str($IGNORED)+"")[0][0]
<b>$numShows shows</b> ($numGoodShows active) | <b>$numDLEpisodes/$numEpisodes</b> episodes downloaded |
<b>Search</b>: <%=str(sickbeard.currentSearchScheduler.timeLeft()).split('.')[0]%> |
<!--<b>Update</b>: <a%a=str(sickbeard.updateScheduler.timeLeft()).split('.')[0]%> | //-->
<b>$numShows shows</b> ($numGoodShows active) | <b>$numDLEpisodes/$numEpisodes</b> episodes downloaded |
<b>Backlog</b>: $sbdatetime.sbdatetime.sbfdate($sickbeard.backlogSearchScheduler.nextRun())
</div>
<ul style="float:right;">

View File

@ -86,7 +86,7 @@ def _worker(executor_reference, work_queue):
_base.LOGGER.critical('Exception in worker', exc_info=True)
class ThreadPoolExecutor(_base.Executor):
def __init__(self, max_workers, name=None):
def __init__(self, max_workers):
"""Initializes a new ThreadPoolExecutor instance.
Args:
@ -98,7 +98,6 @@ class ThreadPoolExecutor(_base.Executor):
self._threads = set()
self._shutdown = False
self._shutdown_lock = threading.Lock()
self._name = name
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
@ -109,11 +108,16 @@ class ThreadPoolExecutor(_base.Executor):
w = _WorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
self._adjust_thread_count()
name = None
if kwargs.has_key('name'):
name = kwargs.pop('name')
self._adjust_thread_count(name)
return f
submit.__doc__ = _base.Executor.submit.__doc__
def _adjust_thread_count(self):
def _adjust_thread_count(self, name=None):
# When the executor gets lost, the weakref callback will wake up
# the worker threads.
def weakref_cb(_, q=self._work_queue):
@ -124,8 +128,8 @@ class ThreadPoolExecutor(_base.Executor):
t = threading.Thread(target=_worker,
args=(weakref.ref(self, weakref_cb),
self._work_queue),)
if self._name:
t.name = self._name
if name:
t.name = name
t.daemon = True
t.start()
self._threads.add(t)

View File

@ -33,7 +33,7 @@ from sickbeard import providers, metadata, config
from providers import ezrss, tvtorrents, btn, newznab, womble, thepiratebay, torrentleech, kat, publichd, iptorrents, \
omgwtfnzbs, scc, hdtorrents, torrentday, hdbits, nextgen, speedcd
from sickbeard.config import CheckSection, check_setting_int, check_setting_str, ConfigMigrator, naming_ep_type
from sickbeard import searchCurrent, searchBacklog, showUpdater, versionChecker, properFinder, autoPostProcesser, \
from sickbeard import searchBacklog, showUpdater, versionChecker, properFinder, autoPostProcesser, \
subtitles, traktWatchListChecker
from sickbeard import helpers, db, exceptions, show_queue, search_queue, scheduler, show_name_helpers
from sickbeard import logger
@ -74,7 +74,6 @@ PIDFILE = ''
DAEMON = None
backlogSearchScheduler = None
currentSearchScheduler = None
showUpdateScheduler = None
versionCheckScheduler = None
showQueueScheduler = None
@ -474,7 +473,7 @@ def initialize(consoleLogging=True):
global ACTUAL_LOG_DIR, LOG_DIR, WEB_PORT, WEB_LOG, ENCRYPTION_VERSION, WEB_ROOT, WEB_USERNAME, WEB_PASSWORD, WEB_HOST, WEB_IPV6, USE_API, API_KEY, ENABLE_HTTPS, HTTPS_CERT, HTTPS_KEY, \
HANDLE_REVERSE_PROXY, USE_NZBS, USE_TORRENTS, NZB_METHOD, NZB_DIR, DOWNLOAD_PROPERS, PREFER_EPISODE_RELEASES, ALLOW_HIGH_PRIORITY, TORRENT_METHOD, \
SAB_USERNAME, SAB_PASSWORD, SAB_APIKEY, SAB_CATEGORY, SAB_HOST, \
NZBGET_USERNAME, NZBGET_PASSWORD, NZBGET_CATEGORY, NZBGET_HOST, NZBGET_USE_HTTPS, currentSearchScheduler, backlogSearchScheduler, \
NZBGET_USERNAME, NZBGET_PASSWORD, NZBGET_CATEGORY, NZBGET_HOST, NZBGET_USE_HTTPS, backlogSearchScheduler, \
TORRENT_USERNAME, TORRENT_PASSWORD, TORRENT_HOST, TORRENT_PATH, TORRENT_RATIO, TORRENT_SEED_TIME, TORRENT_PAUSED, TORRENT_HIGH_BANDWIDTH, TORRENT_LABEL, \
USE_XBMC, XBMC_ALWAYS_ON, XBMC_NOTIFY_ONSNATCH, XBMC_NOTIFY_ONDOWNLOAD, XBMC_NOTIFY_ONSUBTITLEDOWNLOAD, XBMC_UPDATE_FULL, XBMC_UPDATE_ONLYFIRST, \
XBMC_UPDATE_LIBRARY, XBMC_HOST, XBMC_USERNAME, XBMC_PASSWORD, \
@ -1018,12 +1017,6 @@ def initialize(consoleLogging=True):
newznabProviderList = providers.getNewznabProviderList(NEWZNAB_DATA)
providerList = providers.makeProviderList()
# initialize newznab providers
currentSearchScheduler = scheduler.Scheduler(searchCurrent.CurrentSearcher(),
cycleTime=datetime.timedelta(minutes=SEARCH_FREQUENCY),
threadName="SEARCH",
runImmediately=True)
# the interval for this is stored inside the ShowUpdater class
showUpdaterInstance = showUpdater.ShowUpdater()
showUpdateScheduler = scheduler.Scheduler(showUpdaterInstance,
@ -1039,7 +1032,8 @@ def initialize(consoleLogging=True):
showQueueScheduler = scheduler.Scheduler(show_queue.ShowQueue(),
cycleTime=datetime.timedelta(seconds=3),
threadName="SHOWQUEUE",
silent=True)
silent=True,
runImmediately=True)
searchQueueScheduler = scheduler.Scheduler(search_queue.SearchQueue(),
cycleTime=datetime.timedelta(seconds=3),
@ -1094,7 +1088,7 @@ def initialize(consoleLogging=True):
def start():
global __INITIALIZED__, currentSearchScheduler, backlogSearchScheduler, \
global __INITIALIZED__, backlogSearchScheduler, \
showUpdateScheduler, versionCheckScheduler, showQueueScheduler, \
properFinderScheduler, autoPostProcesserScheduler, searchQueueScheduler, \
subtitlesFinderScheduler, started, USE_SUBTITLES, \
@ -1104,8 +1098,11 @@ def start():
if __INITIALIZED__:
# start the search scheduler
currentSearchScheduler.thread.start()
# start the queue checker
showQueueScheduler.thread.start()
# start the version checker
versionCheckScheduler.thread.start()
# start the backlog scheduler
backlogSearchScheduler.thread.start()
@ -1113,12 +1110,6 @@ def start():
# start the show updater
showUpdateScheduler.thread.start()
# start the version checker
versionCheckScheduler.thread.start()
# start the queue checker
showQueueScheduler.thread.start()
# start the search queue checker
searchQueueScheduler.thread.start()
@ -1139,7 +1130,7 @@ def start():
def halt():
global __INITIALIZED__, currentSearchScheduler, backlogSearchScheduler, showUpdateScheduler, \
global __INITIALIZED__, backlogSearchScheduler, showUpdateScheduler, \
showQueueScheduler, properFinderScheduler, autoPostProcesserScheduler, searchQueueScheduler, \
subtitlesFinderScheduler, started, \
traktWatchListCheckerSchedular
@ -1152,13 +1143,6 @@ def halt():
# abort all the threads
currentSearchScheduler.abort = True
logger.log(u"Waiting for the SEARCH thread to exit")
try:
currentSearchScheduler.thread.join(10)
except:
pass
backlogSearchScheduler.abort = True
logger.log(u"Waiting for the BACKLOG thread to exit")
try:

View File

@ -163,7 +163,6 @@ def change_SEARCH_FREQUENCY(freq):
if sickbeard.SEARCH_FREQUENCY < sickbeard.MIN_SEARCH_FREQUENCY:
sickbeard.SEARCH_FREQUENCY = sickbeard.MIN_SEARCH_FREQUENCY
sickbeard.currentSearchScheduler.cycleTime = datetime.timedelta(minutes=sickbeard.SEARCH_FREQUENCY)
sickbeard.backlogSearchScheduler.cycleTime = datetime.timedelta(minutes=sickbeard.get_backlog_cycle_time())
def change_UPDATE_FREQUENCY(freq):

View File

@ -57,6 +57,8 @@ class DBConnection:
self.connection.row_factory = sqlite3.Row
def checkDBVersion(self):
result = None
try:
result = self.select("SELECT db_version FROM db_version")
except sqlite3.OperationalError, e:

View File

@ -20,8 +20,12 @@ import datetime
import threading
import Queue
import sickbeard
from lib.concurrent.futures.thread import ThreadPoolExecutor
from sickbeard import logger
class QueuePriorities:
LOW = 10
NORMAL = 20
@ -29,11 +33,12 @@ class QueuePriorities:
class GenericQueue:
def __init__(self):
#self.executor = ThreadPoolExecutor(sickbeard.NUM_OF_THREADS)
self.currentItem = None
self.thread = None
self.queue_name = "QUEUE"
self.min_priority = 0
self.queue = Queue.PriorityQueue()
self.queue = Queue.Queue()
def pause(self):
logger.log(u"Pausing queue")
@ -45,7 +50,7 @@ class GenericQueue:
def add_item(self, item):
item.added = datetime.datetime.now()
self.queue.put(item, item.priority)
self.queue.put(item)
return item
def run(self, queue=None):
@ -67,12 +72,10 @@ class GenericQueue:
return
threadName = self.queue_name + '-' + queueItem.get_thread_name()
self.thread = threading.Thread(None, queueItem.execute, threadName)
self.thread.start()
executor = ThreadPoolExecutor(sickbeard.NUM_OF_THREADS)
self.thread = executor.submit(queueItem.execute, name=threadName)
self.currentItem = queueItem
class QueueItem:
def __init__(self, name, action_id=0):
self.name = name

View File

@ -213,7 +213,6 @@ class NameParser(object):
i = result = 0
for integer, numeral in numeral_map:
while n[i:i + len(numeral)] == numeral:
time.sleep(1)
result += integer
i += len(numeral)

View File

@ -326,9 +326,8 @@ class BTNCache(tvcache.TVCache):
if ci is not None:
cl.append(ci)
if len(cl) > 0:
myDB = self._getDB()
myDB.mass_action(cl)
myDB = self._getDB()
myDB.mass_action(cl)
else:
raise AuthException(

View File

@ -236,12 +236,10 @@ class GenericProvider:
searchItems = {}
itemList = []
if manualSearch:
if not manualSearch:
self.cache.updateCache()
for epObj in episodes:
cacheResult = self.cache.searchCache(epObj, manualSearch)
if len(cacheResult):
results.update(cacheResult)

View File

@ -355,9 +355,8 @@ class HDTorrentsCache(tvcache.TVCache):
if ci is not None:
cl.append(ci)
if len(cl) > 0:
myDB = self._getDB()
myDB.mass_action(cl)
myDB = self._getDB()
myDB.mass_action(cl)
def _parseItem(self, item):
@ -366,7 +365,7 @@ class HDTorrentsCache(tvcache.TVCache):
if not title or not url:
return None
logger.log(u"Attempting to cache item:" + title, logger.DEBUG)
logger.log(u"Attempting to cache item:[" + title +"]", logger.DEBUG)
return self._addCacheEntry(title, url)

View File

@ -301,9 +301,8 @@ class IPTorrentsCache(tvcache.TVCache):
if ci is not None:
cl.append(ci)
if len(cl) > 0:
myDB = self._getDB()
myDB.mass_action(cl)
myDB = self._getDB()
myDB.mass_action(cl)
def _parseItem(self, item):
@ -312,7 +311,7 @@ class IPTorrentsCache(tvcache.TVCache):
if not title or not url:
return None
logger.log(u"Attempting to cache item:" + title, logger.DEBUG)
logger.log(u"Attempting to cache item:[" + title +"]", logger.DEBUG)
return self._addCacheEntry(title, url)

View File

@ -424,15 +424,13 @@ class KATCache(tvcache.TVCache):
cl = []
for result in rss_results:
item = (result[0], result[1])
ci = self._parseItem(item)
if ci is not None:
cl.append(ci)
if len(cl) > 0:
myDB = self._getDB()
myDB.mass_action(cl)
myDB = self._getDB()
myDB.mass_action(cl)
def _parseItem(self, item):
@ -441,7 +439,7 @@ class KATCache(tvcache.TVCache):
if not title or not url:
return None
logger.log(u"Attempting to cache item:" + title, logger.DEBUG)
logger.log(u"Attempting to cache item:[" + title +"]", logger.DEBUG)
return self._addCacheEntry(title, url)

View File

@ -272,3 +272,59 @@ class NewznabCache(tvcache.TVCache):
def _checkAuth(self, data):
return self.provider._checkAuthFromData(data)
def updateCache(self):
if not self.shouldUpdate():
return
if self._checkAuth(None):
data = self._getRSSData()
# as long as the http request worked we count this as an update
if data:
self.setLastUpdate()
else:
return []
# now that we've loaded the current RSS feed lets delete the old cache
logger.log(u"Clearing " + self.provider.name + " cache and updating with new information")
self._clearCache()
if self._checkAuth(data):
items = data.entries
ql = []
for item in items:
ci = self._parseItem(item)
if ci is not None:
ql.append(ci)
myDB = self._getDB()
myDB.mass_action(ql)
else:
raise AuthException(
u"Your authentication credentials for " + self.provider.name + " are incorrect, check your config")
return []
# overwrite method with that parses the rageid from the newznab feed
def _parseItem(self, item):
title = item.title
url = item.link
self._checkItemAuth(title, url)
if not title or not url:
logger.log(
u"The data returned from the " + self.provider.name + " feed is incomplete, this result is unusable",
logger.DEBUG)
return None
url = self._translateLinkURL(url)
logger.log(u"Adding item from RSS to cache: " + title, logger.DEBUG)
return self._addCacheEntry(title, url)

View File

@ -350,9 +350,8 @@ class NextGenCache(tvcache.TVCache):
if ci is not None:
cl.append(ci)
if len(cl) > 0:
myDB = self._getDB()
myDB.mass_action(cl)
myDB = self._getDB()
myDB.mass_action(cl)
def _parseItem(self, item):
@ -361,7 +360,7 @@ class NextGenCache(tvcache.TVCache):
if not title or not url:
return None
logger.log(u"Attempting to cache item:" + title, logger.DEBUG)
logger.log(u"Attempting to cache item:[" + title +"]", logger.DEBUG)
return self._addCacheEntry(title, url)

View File

@ -332,7 +332,7 @@ class PublicHDCache(tvcache.TVCache):
if not title or not url:
return None
logger.log(u"Attempting to cache item:" + title, logger.DEBUG)
logger.log(u"Attempting to cache item:[" + title +"]", logger.DEBUG)
return self._addCacheEntry(title, url)

View File

@ -340,9 +340,8 @@ class SCCCache(tvcache.TVCache):
if ci is not None:
cl.append(ci)
if len(cl) > 0:
myDB = self._getDB()
myDB.mass_action(cl)
myDB = self._getDB()
myDB.mass_action(cl)
def _parseItem(self, item):
@ -351,7 +350,7 @@ class SCCCache(tvcache.TVCache):
if not title or not url:
return None
logger.log(u"Attempting to cache item:" + title, logger.DEBUG)
logger.log(u"Attempting to cache item:[" + title +"]", logger.DEBUG)
return self._addCacheEntry(title, url)

View File

@ -290,7 +290,7 @@ class SpeedCDCache(tvcache.TVCache):
if not title or not url:
return None
logger.log(u"Attempting to cache item:" + title, logger.DEBUG)
logger.log(u"Attempting to cache item:[" + title +"]", logger.DEBUG)
return self._addCacheEntry(title, url)

View File

@ -421,9 +421,8 @@ class ThePirateBayCache(tvcache.TVCache):
if ci is not None:
cl.append(ci)
if len(cl) > 0:
myDB = self._getDB()
myDB.mass_action(cl)
myDB = self._getDB()
myDB.mass_action(cl)
def _parseItem(self, item):
@ -432,7 +431,7 @@ class ThePirateBayCache(tvcache.TVCache):
if not title or not url:
return None
logger.log(u"Attempting to cache item:" + title, logger.DEBUG)
logger.log(u"Attempting to cache item:[" + title +"]", logger.DEBUG)
return self._addCacheEntry(title, url)

View File

@ -302,9 +302,8 @@ class TorrentDayCache(tvcache.TVCache):
if ci is not None:
cl.append(ci)
if len(cl) > 0:
myDB = self._getDB()
myDB.mass_action(cl)
myDB = self._getDB()
myDB.mass_action(cl)
def _parseItem(self, item):
@ -313,7 +312,7 @@ class TorrentDayCache(tvcache.TVCache):
if not title or not url:
return None
logger.log(u"Attempting to cache item:" + title, logger.DEBUG)
logger.log(u"Attempting to cache item:[" + title +"]", logger.DEBUG)
return self._addCacheEntry(title, url)

View File

@ -301,9 +301,8 @@ class TorrentLeechCache(tvcache.TVCache):
if ci is not None:
cl.append(ci)
if len(cl) > 0:
myDB = self._getDB()
myDB.mass_action(cl)
myDB = self._getDB()
myDB.mass_action(cl)
def _parseItem(self, item):
@ -312,7 +311,7 @@ class TorrentLeechCache(tvcache.TVCache):
if not title or not url:
return None
logger.log(u"Attempting to cache item:" + title, logger.DEBUG)
logger.log(u"Attempting to cache item:[" + title +"]", logger.DEBUG)
return self._addCacheEntry(title, url)

View File

@ -367,9 +367,6 @@ def searchProviders(queueItem, show, season, episodes, curProvider, seasonSearch
foundResults = {}
finalResults = []
if manualSearch:
curProvider.cache.updateCache()
# convert indexer numbering to scene numbering for searches
map(lambda x: x.convertToSceneNumbering, episodes)
@ -404,7 +401,6 @@ def searchProviders(queueItem, show, season, episodes, curProvider, seasonSearch
highest_quality_overall = 0
for cur_episode in foundResults:
for cur_result in foundResults[cur_episode]:
cur_result.queue_item = queueItem
if cur_result.quality != Quality.UNKNOWN and cur_result.quality > highest_quality_overall:
highest_quality_overall = cur_result.quality
logger.log(u"The highest quality of any match is " + Quality.qualityStrings[highest_quality_overall], logger.DEBUG)
@ -572,4 +568,5 @@ def searchProviders(queueItem, show, season, episodes, curProvider, seasonSearch
finalResults.append(pickBestResult(foundResults[curEp], show))
return finalResults
queueItem.results = finalResults
return queueItem

View File

@ -22,113 +22,43 @@ import datetime
import Queue
import time
import traceback
import threading
import sickbeard
from sickbeard import db, logger, common, exceptions, helpers
from sickbeard import generic_queue, scheduler
from sickbeard import search, failed_history, history
from sickbeard import ui
from lib.concurrent import futures
from sickbeard.snatch_queue import SnatchQueue
from lib.concurrent.futures import as_completed
from lib.concurrent.futures.thread import ThreadPoolExecutor
search_queue_lock = threading.Lock()
BACKLOG_SEARCH = 10
RSS_SEARCH = 20
FAILED_SEARCH = 30
MANUAL_SEARCH = 30
SNATCH = 40
# snatch queues
ManualSnatchQueue = Queue.PriorityQueue()
RSSSnatchQueue = Queue.PriorityQueue()
BacklogSnatchQueue = Queue.PriorityQueue()
FailedSnatchQueue = Queue.PriorityQueue()
SearchItemQueue = Queue.PriorityQueue()
class SnatchQueue(generic_queue.GenericQueue):
def __init__(self):
generic_queue.GenericQueue.__init__(self)
self.queue_name = "SNATCHQUEUE"
def is_in_queue(self, show, episodes, quality):
for cur_item in self.queue.queue:
if cur_item.results.extraInfo[0] == show \
and cur_item.results.episodes.sort() == episodes.sort() \
and cur_item.results.quality >= quality:
return True
return False
def add_item(self, item):
# dynamically select our snatch queue
if item.type == 'RSSSearchQueueItem':
self.queue = RSSSnatchQueue
elif item.type == 'ManualSearchQueueItem':
self.queue = ManualSnatchQueue
elif item.type == 'BacklogQueueItem':
self.queue = BacklogSnatchQueue
elif item.type == 'FailedQueueItem':
self.queue = FailedSnatchQueue
else:
return
# check if we already have a item ready to snatch with same or better quality score
if not self.is_in_queue(item.results.extraInfo[0], item.results.episodes, item.results.quality):
generic_queue.GenericQueue.add_item(self, item)
else:
logger.log(
u"Not adding item [" + item.results.name + "] it's already in the queue with same or higher quality",
logger.DEBUG)
class SnatchQueueItem(generic_queue.QueueItem):
def __init__(self, results, queue_item):
generic_queue.QueueItem.__init__(self, 'Snatch', SNATCH)
self.priority = generic_queue.QueuePriorities.HIGH
self.thread_name = 'SNATCH-' + str(results.extraInfo[0].indexerid)
self.results = results
self.success = None
self.queue_item = queue_item
self.type = queue_item.type
def execute(self):
generic_queue.QueueItem.execute(self)
# just use the first result for now
logger.log(u"Downloading " + self.results.name + " from " + self.results.provider.name)
result = search.snatchEpisode(self.results)
if self.type == "ManualSearchQueueItem":
providerModule = self.results.provider
if not result:
ui.notifications.error(
'Error while attempting to snatch ' + self.results.name + ', check your logs')
elif providerModule == None:
ui.notifications.error('Provider is configured incorrectly, unable to download')
self.success = result
self.queue_item.success = result
generic_queue.QueueItem.finish(self.queue_item)
generic_queue.QueueItem.finish(self)
class SearchQueue(generic_queue.GenericQueue):
def __init__(self):
generic_queue.GenericQueue.__init__(self)
self.queue_name = "SEARCHQUEUE"
self.queue = SearchItemQueue
def is_in_queue(self, show, segment):
for cur_item in self.queue.queue:
if isinstance(cur_item, BacklogQueueItem) and cur_item.show == show and cur_item.segment == segment:
return True
with search_queue_lock:
if isinstance(cur_item, BacklogQueueItem) and cur_item.show == show and cur_item.segment == segment:
return True
return False
def is_ep_in_queue(self, ep_obj):
for cur_item in self.queue.queue:
if isinstance(cur_item, ManualSearchQueueItem) and cur_item.ep_obj == ep_obj:
return True
with search_queue_lock:
if isinstance(cur_item, ManualSearchQueueItem) and cur_item.ep_obj == ep_obj:
return True
return False
def pause_backlog(self):
@ -143,15 +73,14 @@ class SearchQueue(generic_queue.GenericQueue):
def is_backlog_in_progress(self):
for cur_item in self.queue.queue + [self.currentItem]:
if isinstance(cur_item, BacklogQueueItem):
return True
with search_queue_lock:
if isinstance(cur_item, BacklogQueueItem):
return True
return False
def add_item(self, item):
if isinstance(item, RSSSearchQueueItem):
generic_queue.GenericQueue.add_item(self, item)
elif isinstance(item, BacklogQueueItem) and not self.is_in_queue(item.show, item.segment):
if isinstance(item, BacklogQueueItem) and not self.is_in_queue(item.show, item.segment):
generic_queue.GenericQueue.add_item(self, item)
elif isinstance(item, ManualSearchQueueItem) and not self.is_ep_in_queue(item.ep_obj):
generic_queue.GenericQueue.add_item(self, item)
@ -165,7 +94,6 @@ class ManualSearchQueueItem(generic_queue.QueueItem):
def __init__(self, ep_obj):
generic_queue.QueueItem.__init__(self, 'Manual Search', MANUAL_SEARCH)
self.priority = generic_queue.QueuePriorities.HIGH
self.type = self.__class__.__name__
self.thread_name = 'MANUAL-' + str(ep_obj.show.indexerid)
self.success = None
self.show = ep_obj.show
@ -174,113 +102,49 @@ class ManualSearchQueueItem(generic_queue.QueueItem):
def execute(self):
generic_queue.QueueItem.execute(self)
fs = []
didSearch = False
providers = [x for x in sickbeard.providers.sortedProviderList() if x.isActive()]
try:
with ThreadPoolExecutor(sickbeard.NUM_OF_THREADS) as executor:
for provider in providers:
didSearch = True
logger.log("Beginning manual search for [" + self.ep_obj.prettyName() + "] on " + provider.name)
executor.submit(
search.searchProviders, self, self.show, self.ep_obj.season, [self.ep_obj], provider, False,
True).add_done_callback(snatch_results)
executor.shutdown(wait=True)
except Exception, e:
for provider in providers:
logger.log("Beginning manual search for [" + self.ep_obj.prettyName() + "] on " + provider.name)
searchResult = search.searchProviders(self, self.show, self.ep_obj.season, [self.ep_obj], provider,
False,
True)
didSearch = True
if searchResult:
self.success = SnatchQueue().process_results(searchResult)
if self.success:
break
except Exception:
logger.log(traceback.format_exc(), logger.DEBUG)
stop = True
if not didSearch:
logger.log(
u"No NZB/Torrent providers found or enabled in your SickRage config. Please check your settings.",
logger.ERROR)
if ManualSnatchQueue.empty():
if not self.success:
ui.notifications.message('No downloads were found',
"Couldn't find a download for <i>%s</i>" % self.ep_obj.prettyName())
logger.log(u"Unable to find a download for " + self.ep_obj.prettyName())
else:
# snatch all items in queue
scheduler.Scheduler(SnatchQueue(), silent=True, runOnce=True, queue=ManualSnatchQueue).thread.start()
self.finish()
def finish(self):
# don't let this linger if something goes wrong
if self.success == None:
self.success = False
generic_queue.QueueItem.finish(self)
class RSSSearchQueueItem(generic_queue.QueueItem):
def __init__(self):
generic_queue.QueueItem.__init__(self, 'RSS Search', RSS_SEARCH)
self.thread_name = 'RSSFEED'
self.type = self.__class__.__name__
def execute(self):
generic_queue.QueueItem.execute(self)
results = False
didSearch = False
self._changeMissingEpisodes()
providers = [x for x in sickbeard.providers.sortedProviderList() if x.isActive()]
try:
with ThreadPoolExecutor(sickbeard.NUM_OF_THREADS) as executor:
for provider in providers:
didSearch = True
logger.log("Beginning RSS Feed search on " + provider.name)
executor.submit(search.searchForNeededEpisodes, provider).add_done_callback(snatch_results)
executor.shutdown(wait=True)
except:
logger.log(traceback.format_exc(), logger.DEBUG)
if not didSearch:
logger.log(
u"No NZB/Torrent providers found or enabled in your SickRage config. Please check your settings.",
logger.ERROR)
if RSSSnatchQueue.empty():
logger.log(u"No needed episodes found on the RSS feeds")
else:
# snatch all items in queue
scheduler.Scheduler(SnatchQueue(), silent=True, runOnce=True, queue=RSSSnatchQueue).thread.start()
generic_queue.QueueItem.finish(self)
def _changeMissingEpisodes(self):
logger.log(u"Changing all old missing episodes to status WANTED")
curDate = datetime.date.today().toordinal()
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE status = ? AND airdate < ?",
[common.UNAIRED, curDate])
for sqlEp in sqlResults:
try:
show = helpers.findCertainShow(sickbeard.showList, int(sqlEp["showid"]))
except exceptions.MultipleShowObjectsException:
logger.log(u"ERROR: expected to find a single show matching " + str(sqlEp["showid"]))
return None
if show == None:
logger.log(u"Unable to find the show with ID " + str(
sqlEp["showid"]) + " in your show list! DB value was " + str(sqlEp), logger.ERROR)
return None
ep = show.getEpisode(sqlEp["season"], sqlEp["episode"])
with ep.lock:
if ep.show.paused:
ep.status = common.SKIPPED
else:
ep.status = common.WANTED
ep.saveToDB()
class BacklogQueueItem(generic_queue.QueueItem):
def __init__(self, show, segment):
generic_queue.QueueItem.__init__(self, 'Backlog', BACKLOG_SEARCH)
self.priority = generic_queue.QueuePriorities.LOW
self.type = self.__class__.__name__
self.thread_name = 'BACKLOG-' + str(show.indexerid)
self.show = show
@ -315,7 +179,7 @@ class BacklogQueueItem(generic_queue.QueueItem):
def execute(self):
generic_queue.QueueItem.execute(self)
results = False
fs = []
didSearch = False
# check if we want to search for season packs instead of just season/episode
@ -327,15 +191,18 @@ class BacklogQueueItem(generic_queue.QueueItem):
providers = [x for x in sickbeard.providers.sortedProviderList() if x.isActive()]
try:
with ThreadPoolExecutor(sickbeard.NUM_OF_THREADS) as executor:
for provider in providers:
didSearch = True
logger.log("Beginning backlog search for [" + str(self.segment) + "] on " + provider.name)
executor.submit(
search.searchProviders, self, self.show, self.segment, self.wantedEpisodes, provider,
seasonSearch, False).add_done_callback(snatch_results)
executor.shutdown(wait=True)
except Exception, e:
for provider in providers:
logger.log("Beginning backlog search for [" + str(self.segment) + "] on " + provider.name)
searchResult = search.searchProviders(self, self.show, self.segment, self.wantedEpisodes, provider,
seasonSearch, False)
didSearch = True
if searchResult:
self.success = SnatchQueue().process_results(searchResult)
if self.success:
break
except Exception:
logger.log(traceback.format_exc(), logger.DEBUG)
if not didSearch:
@ -343,11 +210,8 @@ class BacklogQueueItem(generic_queue.QueueItem):
u"No NZB/Torrent providers found or enabled in your SickRage config. Please check your settings.",
logger.ERROR)
if BacklogSnatchQueue.empty():
if not self.success:
logger.log(u"No needed episodes found during backlog search")
else:
# snatch all items in queue
scheduler.Scheduler(SnatchQueue(), silent=True, runOnce=True, queue=BacklogSnatchQueue).thread.start()
self.finish()
@ -356,8 +220,6 @@ class BacklogQueueItem(generic_queue.QueueItem):
# check through the list of statuses to see if we want any
for curStatusResult in statusResults:
time.sleep(1)
curCompositeStatus = int(curStatusResult["status"])
curStatus, curQuality = common.Quality.splitCompositeStatus(curCompositeStatus)
episode = int(curStatusResult["episode"])
@ -380,7 +242,6 @@ class FailedQueueItem(generic_queue.QueueItem):
def __init__(self, show, episodes):
generic_queue.QueueItem.__init__(self, 'Retry', FAILED_SEARCH)
self.priority = generic_queue.QueuePriorities.HIGH
self.type = self.__class__.__name__
self.thread_name = 'RETRY-' + str(show.indexerid)
self.show = show
self.episodes = episodes
@ -389,13 +250,11 @@ class FailedQueueItem(generic_queue.QueueItem):
def execute(self):
generic_queue.QueueItem.execute(self)
results = False
fs = []
didSearch = False
episodes = []
for i, epObj in enumerate(episodes):
time.sleep(1)
logger.log(
"Beginning failed download search for " + epObj.prettyName())
@ -412,14 +271,16 @@ class FailedQueueItem(generic_queue.QueueItem):
providers = [x for x in sickbeard.providers.sortedProviderList() if x.isActive()]
try:
with ThreadPoolExecutor(sickbeard.NUM_OF_THREADS) as executor:
for provider in providers:
didSearch = True
executor.submit(
search.searchProviders, self, self.show, self.episodes[0].season, self.episodes, provider,
False,
True).add_done_callback(snatch_results)
executor.shutdown(wait=True)
for provider in providers:
searchResult = search.searchProviders(self.show, self.episodes[0].season, self.episodes, provider,
False, True)
didSearch = True
if searchResult:
self.success = SnatchQueue().process_results(searchResult)
if self.success:
break
except Exception, e:
logger.log(traceback.format_exc(), logger.DEBUG)
@ -428,17 +289,7 @@ class FailedQueueItem(generic_queue.QueueItem):
u"No NZB/Torrent providers found or enabled in your SickRage config. Please check your settings.",
logger.ERROR)
if FailedSnatchQueue.empty():
if not self.success:
logger.log(u"No needed episodes found on the RSS feeds")
else:
# snatch all items in queue
scheduler.Scheduler(SnatchQueue(), silent=True, runOnce=True, queue=FailedSnatchQueue).thread.start()
self.finish()
# send to snatch queue
def snatch_results(f):
for result in f.result():
snatch_queue_item = SnatchQueueItem(result, result.queue_item)
SnatchQueue().add_item(snatch_queue_item)
self.finish()

View File

@ -19,6 +19,7 @@
from __future__ import with_statement
import traceback
import threading
import Queue
import sickbeard
@ -31,18 +32,18 @@ from sickbeard import generic_queue
from sickbeard import name_cache
from sickbeard.exceptions import ex
ShowItemQueue = Queue.PriorityQueue()
show_queue_lock = threading.Lock()
show_queue = Queue.Queue()
class ShowQueue(generic_queue.GenericQueue):
def __init__(self):
generic_queue.GenericQueue.__init__(self)
self.queue_name = "SHOWQUEUE"
self.queue = ShowItemQueue
self.queue = show_queue
def _isInQueue(self, show, actions):
return show in [x.show for x in self.queue.queue if x.action_id in actions] if self.queue.qsize() > 0 else []
return show in [x.show for x in self.queue.get() if x.action_id in actions] if not self.queue.empty() else []
def _isBeingSomethinged(self, show, actions):
return self.currentItem != None and show == self.currentItem.show and \
@ -76,7 +77,8 @@ class ShowQueue(generic_queue.GenericQueue):
return self._isBeingSomethinged(show, (ShowQueueActions.SUBTITLE,))
def _getLoadingShowList(self):
return [x for x in self.queue.queue + [self.currentItem] if x != None and x.isLoading] if self.queue.qsize() > 0 else []
return [x for x in self.queue.get() if x != None and x.isLoading] + [self.currentItem] if not self.queue.empty() else []
loadingShowList = property(_getLoadingShowList)
@ -180,8 +182,7 @@ class ShowQueueItem(generic_queue.QueueItem):
self.show = show
def isInQueue(self):
return self in sickbeard.showQueueScheduler.action.queue + [
sickbeard.showQueueScheduler.action.currentItem] #@UndefinedVariable
return self in sickbeard.showQueueScheduler.action.queue.queue + [sickbeard.showQueueScheduler.action.currentItem]
def _getName(self):
return str(self.show.indexerid)

83
sickbeard/snatch_queue.py Normal file
View File

@ -0,0 +1,83 @@
import Queue
import threading
import sickbeard
from sickbeard import logger, search, generic_queue, ui
from sickbeard.common import Quality
snatch_queue_lock = threading.Lock()
class SnatchQueue(generic_queue.GenericQueue):
def __init__(self):
generic_queue.GenericQueue.__init__(self)
self.queue_name = "SNATCHQUEUE"
# snatch queues
self.ManualQueue = Queue.Queue()
self.BacklogQueue = Queue.Queue()
self.FailedQueue = Queue.Queue()
def is_in_queue(self, queue, show, episodes, quality):
for i, cur_item in enumerate(queue.queue):
if cur_item.results.show == show and cur_item.results.episodes.sort() == episodes.sort():
if cur_item.results.quality < quality:
queue.queue.pop(i)
return False
return True
return False
def add_item(self, item):
resultsKeep = []
for result in item.results:
show = result.extraInfo[0]
episodes = result.episodes
quality = result.quality
# check if we already have a item ready to snatch with same or better quality score
if not self.is_in_queue(self.queue, show, episodes, quality):
generic_queue.GenericQueue.add_item(self, item)
resultsKeep.append(result)
logger.log(
u"Adding item [" + result.name + "] to snatch queue",
logger.DEBUG)
else:
logger.log(
u"Not adding item [" + result.name + "] it's already in the queue with same or higher quality",
logger.DEBUG)
# update item with new results we want to snatch and disgard the rest
item.results = resultsKeep
def snatch_item(self, item):
for result in item.results:
# just use the first result for now
logger.log(u"Downloading " + result.name + " from " + result.provider.name)
status = search.snatchEpisode(result)
item.success = status
generic_queue.QueueItem.finish(item)
return status
def process_results(self, item):
# dynamically select our snatch queue
if isinstance(item, sickbeard.search_queue.ManualSearchQueueItem):
self.queue = self.ManualQueue
elif isinstance(item, sickbeard.search_queue.BacklogQueueItem):
self.queue = self.BacklogQueue
elif isinstance(item, sickbeard.search_queue.FailedQueueItem):
self.queue = self.FailedQueue
for result in item.results:
logger.log(u"Checking if we should snatch " + result.name, logger.DEBUG)
show_obj = result.episodes[0].show
any_qualities, best_qualities = Quality.splitQuality(show_obj.quality)
# if there is a redownload that's higher than this then we definitely need to keep looking
if best_qualities and result.quality == max(best_qualities):
return self.snatch_item(item)
# if there's no redownload that's higher (above) and this is the highest initial download then we're good
elif any_qualities and result.quality in any_qualities:
return self.snatch_item(item)
# Add item to queue if we couldn't find a match to snatch
self.add_item(item)

View File

@ -15,6 +15,9 @@
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os
import time
@ -23,23 +26,24 @@ import sqlite3
import urllib
import urlparse
import re
import threading
import sickbeard
from shove import Shove
from feedcache import cache
from lib.shove import Shove
from lib.feedcache import cache
from sickbeard import db
from sickbeard import logger
from sickbeard.common import Quality
from sickbeard import helpers, show_name_helpers
from sickbeard.exceptions import MultipleShowObjectsException, ex
from sickbeard.exceptions import MultipleShowObjectsException
from sickbeard.exceptions import AuthException
from sickbeard import encodingKludge as ek
from name_parser.parser import NameParser, InvalidNameException
cache_lock = threading.Lock()
class CacheDBConnection(db.DBConnection):
def __init__(self, providerName):
@ -93,6 +97,40 @@ class TVCache():
def _checkItemAuth(self, title, url):
return True
def updateCache(self):
if not self.shouldUpdate():
return
if self._checkAuth(None):
data = self._getRSSData()
# as long as the http request worked we count this as an update
if data:
self.setLastUpdate()
else:
return []
# now that we've loaded the current RSS feed lets delete the old cache
logger.log(u"Clearing " + self.provider.name + " cache and updating with new information")
self._clearCache()
if self._checkAuth(data):
items = data.entries
cl = []
for item in items:
ci = self._parseItem(item)
if ci is not None:
cl.append(ci)
myDB = self._getDB()
myDB.mass_action(cl)
else:
raise AuthException(
u"Your authentication credentials for " + self.provider.name + " are incorrect, check your config")
return []
def getRSSFeed(self, url, post_data=None):
# create provider storaqe cache
storage = Shove('sqlite:///' + ek.ek(os.path.join, sickbeard.CACHE_DIR, self.provider.name) + '.db')
@ -121,50 +159,16 @@ class TVCache():
return f
def updateCache(self):
if not self.shouldUpdate():
return
if self._checkAuth(None):
data = self._getRSSData()
# as long as the http request worked we count this as an update
if data:
self.setLastUpdate()
else:
return []
# now that we've loaded the current RSS feed lets delete the old cache
logger.log(u"Clearing " + self.provider.name + " cache and updating with new information")
self._clearCache()
if self._checkAuth(data):
items = data.entries
ql = []
for item in items:
qi = self._parseItem(item)
if qi is not None:
ql.append(qi)
if len(ql):
myDB = self._getDB()
myDB.mass_action(ql)
else:
raise AuthException(
u"Your authentication credentials for " + self.provider.name + " are incorrect, check your config")
return []
def _translateTitle(self, title):
return title.replace(' ', '.')
def _translateLinkURL(self, url):
return url.replace('&amp;', '&')
def _parseItem(self, item):
def _parseItem(self, item):
title = item.title
url = item.link
@ -197,8 +201,8 @@ class TVCache():
return datetime.datetime.fromtimestamp(lastTime)
def setLastUpdate(self, toDate=None):
def setLastUpdate(self, toDate=None):
if not toDate:
toDate = datetime.datetime.today()
@ -207,8 +211,10 @@ class TVCache():
{'time': int(time.mktime(toDate.timetuple()))},
{'provider': self.providerID})
lastUpdate = property(_getLastUpdate)
def shouldUpdate(self):
# if we've updated recently then skip the update
if datetime.datetime.today() - self.lastUpdate < datetime.timedelta(minutes=self.minTime):
@ -218,13 +224,10 @@ class TVCache():
return True
def _addCacheEntry(self, name, url, quality=None):
cacheResult = sickbeard.name_cache.retrieveNameFromCache(name)
if cacheResult:
logger.log(u"Found Indexer ID:[" + repr(cacheResult) + "], using that for [" + str(name) + "}",
logger.DEBUG)
return None
def _addCacheEntry(self, name, url, quality=None):
indexerid = None
in_cache = False
# if we don't have complete info then parse the filename to get it
try:
@ -242,9 +245,34 @@ class TVCache():
logger.log(u"No series name retrieved from " + name + ", unable to cache it", logger.DEBUG)
return None
showObj = sickbeard.name_cache.retrieveShowFromCache(parse_result.series_name)
cacheResult = sickbeard.name_cache.retrieveNameFromCache(name)
if cacheResult:
in_cache = True
indexerid = int(cacheResult)
if not indexerid:
name_list = show_name_helpers.sceneToNormalShowNames(parse_result.series_name)
for cur_name in name_list:
if not indexerid:
for curShow in sickbeard.showList:
if show_name_helpers.isGoodResult(cur_name, curShow, False):
indexerid = int(curShow.indexerid)
break
if not indexerid:
# do a scene reverse-lookup to get a list of all possible names
scene_id = sickbeard.scene_exceptions.get_scene_exception_by_name(cur_name)
if scene_id:
indexerid = int(scene_id)
break
showObj = None
if indexerid:
logger.log(u"Found Indexer ID: [" + str(indexerid) + "], for [" + str(cur_name) + "}", logger.DEBUG)
showObj = helpers.findCertainShow(sickbeard.showList, indexerid)
if not showObj:
logger.log(u"Show is not in our list of watched shows [" + parse_result.series_name + "], not caching ...", logger.DEBUG)
logger.log(u"No match for show: [" + parse_result.series_name + "], not caching ...", logger.DEBUG)
return None
season = episodes = None
@ -254,7 +282,7 @@ class TVCache():
airdate = parse_result.air_date.toordinal() or parse_result.sports_event_date.toordinal()
sql_results = myDB.select(
"SELECT season, episode FROM tv_episodes WHERE showid = ? AND indexer = ? AND airdate = ?",
[showObj.indexerid, showObj.indexer, airdate])
[indexerid, showObj.indexer, airdate])
if sql_results > 0:
season = int(sql_results[0]["season"])
episodes = [int(sql_results[0]["episode"])]
@ -277,18 +305,21 @@ class TVCache():
name = unicode(name, 'utf-8')
logger.log(u"Added RSS item: [" + name + "] to cache: [" + self.providerID + "]", logger.DEBUG)
sickbeard.name_cache.addNameToCache(name, showObj.indexerid)
if not in_cache:
sickbeard.name_cache.addNameToCache(name, indexerid)
return [
"INSERT INTO [" + self.providerID + "] (name, season, episodes, indexerid, url, time, quality) VALUES (?,?,?,?,?,?,?)",
[name, season, episodeText, showObj.indexerid, url, curTimestamp, quality]]
[name, season, episodeText, indexerid, url, curTimestamp, quality]]
def searchCache(self, episode, manualSearch=False):
neededEps = self.findNeededEpisodes(episode, manualSearch)
return neededEps
def listPropers(self, date=None, delimiter="."):
def listPropers(self, date=None, delimiter="."):
myDB = self._getDB()
sql = "SELECT * FROM [" + self.providerID + "] WHERE name LIKE '%.PROPER.%' OR name LIKE '%.REPACK.%'"
@ -298,6 +329,7 @@ class TVCache():
return filter(lambda x: x['indexerid'] != 0, myDB.select(sql))
def findNeededEpisodes(self, epObj=None, manualSearch=False):
neededEps = {}
@ -319,7 +351,7 @@ class TVCache():
# get the show object, or if it's not one of our shows then ignore it
try:
showObj = helpers.findCertainShow(sickbeard.showList, int(curResult["indexerid"]))
except (MultipleShowObjectsException):
except MultipleShowObjectsException:
showObj = None
if not showObj:
@ -365,3 +397,4 @@ class TVCache():
neededEps[epObj].append(result)
return neededEps

View File

@ -1368,14 +1368,12 @@ class CMD_SickBeardCheckScheduler(ApiCall):
backlogPaused = sickbeard.searchQueueScheduler.action.is_backlog_paused() #@UndefinedVariable
backlogRunning = sickbeard.searchQueueScheduler.action.is_backlog_in_progress() #@UndefinedVariable
searchStatus = sickbeard.currentSearchScheduler.action.amActive #@UndefinedVariable
nextSearch = str(sickbeard.currentSearchScheduler.timeLeft()).split('.')[0]
nextBacklog = sickbeard.backlogSearchScheduler.nextRun().strftime(dateFormat).decode(sickbeard.SYS_ENCODING)
myDB.connection.close()
data = {"backlog_is_paused": int(backlogPaused), "backlog_is_running": int(backlogRunning),
"last_backlog": _ordinal_to_dateForm(sqlResults[0]["last_backlog"]),
"search_is_running": int(searchStatus), "next_search": nextSearch, "next_backlog": nextBacklog}
"next_backlog": nextBacklog}
return _responds(RESULT_SUCCESS, data)
@ -1424,27 +1422,6 @@ class CMD_SickBeardDeleteRootDir(ApiCall):
return _responds(RESULT_SUCCESS, _getRootDirs(), msg="Root directory deleted")
class CMD_SickBeardForceSearch(ApiCall):
_help = {"desc": "force the episode search early"
}
def __init__(self, args, kwargs):
# required
# optional
# super, missing, help
ApiCall.__init__(self, args, kwargs)
def run(self):
""" force the episode search early """
# Changing all old missing episodes to status WANTED
# Beginning search for new episodes on RSS
# Searching all providers for any needed episodes
result = sickbeard.currentSearchScheduler.forceRun()
if result:
return _responds(RESULT_SUCCESS, msg="Episode search forced")
return _responds(RESULT_FAILURE, msg="Can not search for episode")
class CMD_SickBeardGetDefaults(ApiCall):
_help = {"desc": "get sickbeard user defaults"}
@ -2604,7 +2581,6 @@ _functionMaper = {"help": CMD_Help,
"sb.addrootdir": CMD_SickBeardAddRootDir,
"sb.checkscheduler": CMD_SickBeardCheckScheduler,
"sb.deleterootdir": CMD_SickBeardDeleteRootDir,
"sb.forcesearch": CMD_SickBeardForceSearch,
"sb.getdefaults": CMD_SickBeardGetDefaults,
"sb.getmessages": CMD_SickBeardGetMessages,
"sb.getrootdirs": CMD_SickBeardGetRootDirs,

View File

@ -204,24 +204,11 @@ class ManageSearches:
#t.backlogPI = sickbeard.backlogSearchScheduler.action.getProgressIndicator()
t.backlogPaused = sickbeard.searchQueueScheduler.action.is_backlog_paused() # @UndefinedVariable
t.backlogRunning = sickbeard.searchQueueScheduler.action.is_backlog_in_progress() # @UndefinedVariable
t.searchStatus = sickbeard.currentSearchScheduler.action.amActive # @UndefinedVariable
t.submenu = ManageMenu()
return _munge(t)
@cherrypy.expose
def forceSearch(self):
# force it to run the next time it looks
result = sickbeard.currentSearchScheduler.forceRun()
if result:
logger.log(u"Search forced")
ui.notifications.message('Episode search started',
'Note: RSS feeds may not be updated if retrieved recently')
redirect("/manage/manageSearches/")
@cherrypy.expose
def pauseBacklog(self, paused=None):
if paused == "1":