From e353cd13c7b2a0a416c51af3dbdb4a7db30d2b60 Mon Sep 17 00:00:00 2001 From: echel0n Date: Mon, 12 May 2014 22:48:00 -0700 Subject: [PATCH 1/5] Added ability to turn RSS Cache updates on startup and let it start when its interval is reached and continue from there. This can be set from same place you set backlog startups. --- .../interfaces/default/config_search.tmpl | 9 ++++++++- sickbeard/__init__.py | 19 ++++++++++++------- sickbeard/search_queue.py | 15 +++++++-------- sickbeard/webserve.py | 3 ++- 4 files changed, 29 insertions(+), 17 deletions(-) diff --git a/gui/slick/interfaces/default/config_search.tmpl b/gui/slick/interfaces/default/config_search.tmpl index 1bc155c2..89a6d662 100644 --- a/gui/slick/interfaces/default/config_search.tmpl +++ b/gui/slick/interfaces/default/config_search.tmpl @@ -107,11 +107,18 @@ Prefer to download seperate episodes, not complete seasons? +
+ + +
diff --git a/sickbeard/__init__.py b/sickbeard/__init__.py index bc94ba74..44d6bcf3 100644 --- a/sickbeard/__init__.py +++ b/sickbeard/__init__.py @@ -176,6 +176,7 @@ ALLOW_HIGH_PRIORITY = None RSSUPDATE_FREQUENCY = None UPDATE_FREQUENCY = None BACKLOG_FREQUENCY = 21 +RSSUPDATE_STARTUP = None BACKLOG_STARTUP = None MIN_SEARCH_FREQUENCY = 10 @@ -496,8 +497,8 @@ def initialize(consoleLogging=True): NEWZNAB_DATA, NZBS, NZBS_UID, NZBS_HASH, EZRSS, TVTORRENTS, TVTORRENTS_DIGEST, TVTORRENTS_HASH, TVTORRENTS_OPTIONS, BTN, BTN_API_KEY, BTN_OPTIONS, \ THEPIRATEBAY, THEPIRATEBAY_TRUSTED, THEPIRATEBAY_PROXY, THEPIRATEBAY_PROXY_URL, THEPIRATEBAY_BLACKLIST, THEPIRATEBAY_OPTIONS, TORRENTLEECH, TORRENTLEECH_USERNAME, TORRENTLEECH_PASSWORD, TORRENTLEECH_OPTIONS, \ IPTORRENTS, IPTORRENTS_USERNAME, IPTORRENTS_PASSWORD, IPTORRENTS_FREELEECH, IPTORRENTS_OPTIONS, KAT, KAT_VERIFIED, KAT_OPTIONS, PUBLICHD, PUBLICHD_OPTIONS, SCC, SCC_USERNAME, SCC_PASSWORD, SCC_OPTIONS, HDTORRENTS, HDTORRENTS_USERNAME, HDTORRENTS_PASSWORD, HDTORRENTS_UID, HDTORRENTS_HASH, HDTORRENTS_OPTIONS, TORRENTDAY, TORRENTDAY_USERNAME, TORRENTDAY_PASSWORD, TORRENTDAY_UID, TORRENTDAY_HASH, TORRENTDAY_FREELEECH, TORRENTDAY_OPTIONS, \ - HDBITS, HDBITS_USERNAME, HDBITS_PASSKEY, HDBITS_OPTIONS, TORRENT_DIR, USENET_RETENTION, SOCKET_TIMEOUT, RSSUPDATE_FREQUENCY, DEFAULT_SEARCH_FREQUENCY, BACKLOG_FREQUENCY, BACKLOG_STARTUP, INDEXER_DEFAULT, \ - NEXTGEN, NEXTGEN_USERNAME, NEXTGEN_PASSWORD, NEXTGEN_FREELEECH, NEXTGEN_OPTIONS, SPEEDCD, SPEEDCD_USERNAME, SPEEDCD_PASSWORD, SPEEDCD_FREELEECH,\ + HDBITS, HDBITS_USERNAME, HDBITS_PASSKEY, HDBITS_OPTIONS, TORRENT_DIR, USENET_RETENTION, SOCKET_TIMEOUT, RSSUPDATE_FREQUENCY, DEFAULT_SEARCH_FREQUENCY, BACKLOG_FREQUENCY, BACKLOG_STARTUP, INDEXER_DEFAULT, RSSUPDATE_STARTUP, \ + NEXTGEN, NEXTGEN_USERNAME, NEXTGEN_PASSWORD, NEXTGEN_FREELEECH, NEXTGEN_OPTIONS, SPEEDCD, SPEEDCD_USERNAME, SPEEDCD_PASSWORD, SPEEDCD_FREELEECH, \ EZRSS_RATIO, TVTORRENTS_RATIO, BTN_RATIO, THEPIRATEBAY_RATIO, TORRENTLEECH_RATIO, IPTORRENTS_RATIO, KAT_RATIO, PUBLICHD_RATIO, TORRENTDAY_RATIO, SCC_RATIO, HDTORRENTS_RATIO, HDBITS_RATIO, NEXTGEN_RATIO, SPEEDCD_RATIO, \ QUALITY_DEFAULT, FLATTEN_FOLDERS_DEFAULT, SUBTITLES_DEFAULT, STATUS_DEFAULT, \ GROWL_NOTIFY_ONSNATCH, GROWL_NOTIFY_ONDOWNLOAD, GROWL_NOTIFY_ONSUBTITLEDOWNLOAD, TWITTER_NOTIFY_ONSNATCH, TWITTER_NOTIFY_ONDOWNLOAD, TWITTER_NOTIFY_ONSUBTITLEDOWNLOAD, \ @@ -507,7 +508,7 @@ def initialize(consoleLogging=True): USE_PUSHALOT, PUSHALOT_NOTIFY_ONSNATCH, PUSHALOT_NOTIFY_ONDOWNLOAD, PUSHALOT_NOTIFY_ONSUBTITLEDOWNLOAD, PUSHALOT_AUTHORIZATIONTOKEN, \ USE_PUSHBULLET, PUSHBULLET_NOTIFY_ONSNATCH, PUSHBULLET_NOTIFY_ONDOWNLOAD, PUSHBULLET_NOTIFY_ONSUBTITLEDOWNLOAD, PUSHBULLET_API, PUSHBULLET_DEVICE, \ versionCheckScheduler, VERSION_NOTIFY, AUTO_UPDATE, PROCESS_AUTOMATICALLY, UNPACK, \ - KEEP_PROCESSED_DIR, PROCESS_METHOD, TV_DOWNLOAD_DIR, MIN_SEARCH_FREQUENCY, DEFAULT_UPDATE_FREQUENCY,MIN_UPDATE_FREQUENCY,UPDATE_FREQUENCY,\ + KEEP_PROCESSED_DIR, PROCESS_METHOD, TV_DOWNLOAD_DIR, MIN_SEARCH_FREQUENCY, DEFAULT_UPDATE_FREQUENCY, MIN_UPDATE_FREQUENCY, UPDATE_FREQUENCY, \ showQueueScheduler, searchQueueScheduler, ROOT_DIRS, CACHE_DIR, ACTUAL_CACHE_DIR, \ NAMING_PATTERN, NAMING_MULTI_EP, NAMING_FORCE_FOLDERS, NAMING_ABD_PATTERN, NAMING_CUSTOM_ABD, NAMING_SPORTS_PATTERN, NAMING_CUSTOM_SPORTS, NAMING_STRIP_YEAR, \ RENAME_EPISODES, properFinderScheduler, PROVIDER_ORDER, autoPostProcesserScheduler, \ @@ -638,7 +639,8 @@ def initialize(consoleLogging=True): NAMING_PATTERN = check_setting_str(CFG, 'General', 'naming_pattern', 'Season %0S/%SN - S%0SE%0E - %EN') NAMING_ABD_PATTERN = check_setting_str(CFG, 'General', 'naming_abd_pattern', '%Y/%0M/%SN - %A.D - %EN') NAMING_CUSTOM_ABD = check_setting_int(CFG, 'General', 'naming_custom_abd', 0) - NAMING_SPORTS_PATTERN = check_setting_str(CFG, 'General', 'naming_sports_pattern', 'Season %0S/%SN - S%0SE%0E - %EN') + NAMING_SPORTS_PATTERN = check_setting_str(CFG, 'General', 'naming_sports_pattern', + 'Season %0S/%SN - S%0SE%0E - %EN') NAMING_CUSTOM_SPORTS = check_setting_int(CFG, 'General', 'naming_custom_sports', 0) NAMING_MULTI_EP = check_setting_int(CFG, 'General', 'naming_multi_ep', 1) NAMING_FORCE_FOLDERS = naming.check_force_season_folders() @@ -661,6 +663,7 @@ def initialize(consoleLogging=True): ALLOW_HIGH_PRIORITY = bool(check_setting_int(CFG, 'General', 'allow_high_priority', 1)) + RSSUPDATE_STARTUP = bool(check_setting_int(CFG, 'General', 'rssupdate_startup', 1)) BACKLOG_STARTUP = bool(check_setting_int(CFG, 'General', 'backlog_startup', 1)) USENET_RETENTION = check_setting_int(CFG, 'General', 'usenet_retention', 500) @@ -864,7 +867,8 @@ def initialize(consoleLogging=True): USE_BOXCAR2 = bool(check_setting_int(CFG, 'Boxcar2', 'use_boxcar2', 0)) BOXCAR2_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Boxcar2', 'boxcar2_notify_onsnatch', 0)) BOXCAR2_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Boxcar2', 'boxcar2_notify_ondownload', 0)) - BOXCAR2_NOTIFY_ONSUBTITLEDOWNLOAD = bool(check_setting_int(CFG, 'Boxcar2', 'boxcar2_notify_onsubtitledownload', 0)) + BOXCAR2_NOTIFY_ONSUBTITLEDOWNLOAD = bool( + check_setting_int(CFG, 'Boxcar2', 'boxcar2_notify_onsubtitledownload', 0)) BOXCAR2_ACCESSTOKEN = check_setting_str(CFG, 'Boxcar2', 'boxcar2_accesstoken', '') USE_PUSHOVER = bool(check_setting_int(CFG, 'Pushover', 'use_pushover', 0)) @@ -1065,7 +1069,7 @@ def initialize(consoleLogging=True): cycleTime=datetime.timedelta(minutes=RSSUPDATE_FREQUENCY), threadName="RSSUPDATER", silent=True, - runImmediately=True) + runImmediately=RSSUPDATE_STARTUP) showQueueScheduler = scheduler.Scheduler(show_queue.ShowQueue(), cycleTime=datetime.timedelta(seconds=3), @@ -1103,7 +1107,7 @@ def initialize(consoleLogging=True): backlogSearchScheduler = searchBacklog.BacklogSearchScheduler(searchBacklog.BacklogSearcher(), cycleTime=datetime.timedelta( - minutes=BACKLOG_FREQUENCY), + minutes=BACKLOG_FREQUENCY), threadName="BACKLOG", silent=True, runImmediately=BACKLOG_STARTUP) @@ -1382,6 +1386,7 @@ def save_config(): new_config['General']['download_propers'] = int(DOWNLOAD_PROPERS) new_config['General']['prefer_episode_releases'] = int(PREFER_EPISODE_RELEASES) new_config['General']['allow_high_priority'] = int(ALLOW_HIGH_PRIORITY) + new_config['General']['rssupdate_startup'] = int(RSSUPDATE_STARTUP) new_config['General']['backlog_startup'] = int(BACKLOG_STARTUP) new_config['General']['quality_default'] = int(QUALITY_DEFAULT) new_config['General']['status_default'] = int(STATUS_DEFAULT) diff --git a/sickbeard/search_queue.py b/sickbeard/search_queue.py index c7dbc870..f7431f57 100644 --- a/sickbeard/search_queue.py +++ b/sickbeard/search_queue.py @@ -258,15 +258,14 @@ class FailedQueueItem(generic_queue.QueueItem): generic_queue.QueueItem.execute(self) for i, epObj in enumerate(self.episodes): - with epObj.lock: - (release, provider) = failed_history.findRelease(self.show, epObj.season, epObj.episode) - if release: - logger.log(u"Marking release as bad: " + release) - failed_history.markFailed(self.show, epObj.season, epObj.episode) - failed_history.logFailed(release) - history.logFailed(self.show.indexerid, epObj.season, epObj.episode, epObj.status, release, provider) + (release, provider) = failed_history.findRelease(self.show, epObj.season, epObj.episode) + if release: + logger.log(u"Marking release as bad: " + release) + failed_history.markFailed(self.show, epObj.season, epObj.episode) + failed_history.logFailed(release) + history.logFailed(self.show.indexerid, epObj.season, epObj.episode, epObj.status, release, provider) - failed_history.revertEpisode(self.show, epObj.season, epObj.episode) + failed_history.revertEpisode(self.show, epObj.season, epObj.episode) try: logger.log( diff --git a/sickbeard/webserve.py b/sickbeard/webserve.py index a24a2035..4ca85c3a 100644 --- a/sickbeard/webserve.py +++ b/sickbeard/webserve.py @@ -1053,7 +1053,7 @@ class ConfigSearch: nzbget_category=None, nzbget_host=None, nzbget_use_https=None, nzb_method=None, torrent_method=None, usenet_retention=None, rssupdate_frequency=None, backlog_frequency=None, download_propers=None, prefer_episode_releases=None, allow_high_priority=None, backlog_startup=None, - torrent_dir=None, torrent_username=None, torrent_password=None, torrent_host=None, + torrent_dir=None, torrent_username=None, torrent_password=None, torrent_host=None, rssupdate_startup=None, torrent_label=None, torrent_path=None, torrent_verify_cert=None, torrent_ratio=None, torrent_seed_time=None, torrent_paused=None, torrent_high_bandwidth=None, ignore_words=None): @@ -1086,6 +1086,7 @@ class ConfigSearch: sickbeard.ALLOW_HIGH_PRIORITY = config.checkbox_to_value(allow_high_priority) config.change_BACKLOG_FREQUENCY(backlog_frequency) + sickbeard.RSSUPDATE_STARTUP = config.checkbox_to_value(rssupdate_startup) sickbeard.BACKLOG_STARTUP = config.checkbox_to_value(backlog_startup) sickbeard.SAB_USERNAME = sab_username From 9301d294339b4f83ee7e3dbef6de9d45de796266 Mon Sep 17 00:00:00 2001 From: echel0n Date: Tue, 13 May 2014 00:03:10 -0700 Subject: [PATCH 2/5] Improved performance of RSS Cache updater --- sickbeard/search.py | 1 - sickbeard/tvcache.py | 17 +++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/sickbeard/search.py b/sickbeard/search.py index 1d3bec5a..38ea6403 100644 --- a/sickbeard/search.py +++ b/sickbeard/search.py @@ -233,7 +233,6 @@ def pickBestResult(results, show, quality_list=None): return bestResult - def isFinalResult(result): """ Checks if the given result is good enough quality that we can stop searching for other ones. diff --git a/sickbeard/tvcache.py b/sickbeard/tvcache.py index 4e18be35..f59d03ff 100644 --- a/sickbeard/tvcache.py +++ b/sickbeard/tvcache.py @@ -245,7 +245,7 @@ class TVCache(): logger.log(u"No series name retrieved from " + name + ", unable to cache it", logger.DEBUG) return None - cacheResult = sickbeard.name_cache.retrieveNameFromCache(name) + cacheResult = sickbeard.name_cache.retrieveNameFromCache(parse_result.series_name) if cacheResult: in_cache = True indexerid = int(cacheResult) @@ -255,11 +255,12 @@ class TVCache(): if showResult: indexerid = int(showResult[0]) - if not indexerid: - for curShow in sickbeard.showList: - if show_name_helpers.isGoodResult(name, curShow, False): - indexerid = int(curShow.indexerid) - break + # if not indexerid: + # for curShow in sickbeard.showList: + # if curShow.name == parse_result.series_name: + # if show_name_helpers.isGoodResult(name, curShow, False): + # indexerid = int(curShow.indexerid) + # break showObj = None if indexerid: @@ -281,7 +282,7 @@ class TVCache(): season = int(sql_results[0]["season"]) episodes = [int(sql_results[0]["episode"])] else: - season = parse_result.season_number + season = parse_result.season_number if parse_result.season_number != None else 1 episodes = parse_result.episode_numbers if season and episodes: @@ -301,7 +302,7 @@ class TVCache(): logger.log(u"Added RSS item: [" + name + "] to cache: [" + self.providerID + "]", logger.DEBUG) if not in_cache: - sickbeard.name_cache.addNameToCache(name, indexerid) + sickbeard.name_cache.addNameToCache(parse_result.series_name, indexerid) return [ "INSERT INTO [" + self.providerID + "] (name, season, episodes, indexerid, url, time, quality) VALUES (?,?,?,?,?,?,?)", From 2318e43e899c0f5b26ae6359e35a55b67f54a87e Mon Sep 17 00:00:00 2001 From: echel0n Date: Tue, 13 May 2014 03:03:11 -0700 Subject: [PATCH 3/5] Fix for scene numbering manually when show has incomplete xem mapping. Fix for displaying shows scene numbering when show does not exist on xem. --- gui/slick/interfaces/default/displayShow.tmpl | 4 +++- sickbeard/scheduler.py | 5 ++--- sickbeard/tv.py | 6 +++--- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/gui/slick/interfaces/default/displayShow.tmpl b/gui/slick/interfaces/default/displayShow.tmpl index a510a67d..a7ad448f 100644 --- a/gui/slick/interfaces/default/displayShow.tmpl +++ b/gui/slick/interfaces/default/displayShow.tmpl @@ -248,8 +248,10 @@ #if int($show.air_by_date) != 1 and int($show.sports) != 1 #if (epResult["season"], epResult["episode"]) in $xem_numbering: #set ($dfltSeas, $dfltEpis) = $xem_numbering[(epResult["season"], epResult["episode"])] - #else + #elif $xem_numbering and (epResult["season"], epResult["episode"]) not in $xem_numbering: #set ($dfltSeas, $dfltEpis) = (0,0) + #else: + #set ($dfltSeas, $dfltEpis) = (epResult["season"], epResult["episode"]) #end if #if (epResult["season"], epResult["episode"]) in $scene_numbering: #set ($scSeas, $scEpis) = $scene_numbering[(epResult["season"], epResult["episode"])] diff --git a/sickbeard/scheduler.py b/sickbeard/scheduler.py index 7f8781e0..56b980fc 100644 --- a/sickbeard/scheduler.py +++ b/sickbeard/scheduler.py @@ -27,7 +27,7 @@ from sickbeard.exceptions import ex class Scheduler: def __init__(self, action, cycleTime=datetime.timedelta(minutes=10), runImmediately=True, - threadName="ScheduledThread", silent=False, runOnce=False): + threadName="ScheduledThread", silent=False): if runImmediately: self.lastRun = datetime.datetime.fromordinal(1) @@ -44,7 +44,6 @@ class Scheduler: self.initThread() self.abort = False - self.runOnce = runOnce def initThread(self): if self.thread == None or not self.thread.isAlive(): @@ -76,7 +75,7 @@ class Scheduler: logger.log(u"Exception generated in thread " + self.threadName + ": " + ex(e), logger.ERROR) logger.log(repr(traceback.format_exc()), logger.DEBUG) - if self.abort or self.runOnce: + if self.abort: self.abort = False self.thread = None return diff --git a/sickbeard/tv.py b/sickbeard/tv.py index faae6e43..03c73eff 100644 --- a/sickbeard/tv.py +++ b/sickbeard/tv.py @@ -205,7 +205,9 @@ class TVShow(object): if ep != None: self.episodes[season][episode] = ep - return self.episodes[season][episode] + epObj = self.episodes[season][episode] + epObj.convertToSceneNumbering() + return epObj def should_update(self, update_date=datetime.date.today()): @@ -1158,8 +1160,6 @@ class TVEpisode(object): self.specifyEpisode(self.season, self.episode) - self.convertToSceneNumbering() - self.relatedEps = [] self.checkForMetaFiles() From b9310444e5b5f08e4d62347130433b8b4d4b64c0 Mon Sep 17 00:00:00 2001 From: echel0n Date: Tue, 13 May 2014 04:16:32 -0700 Subject: [PATCH 4/5] Fix for failed download issues. Fix for auto-update improperly restarting. --- SickBeard.py | 107 ++++++++++++++++++------------ sickbeard/__init__.py | 15 ++++- sickbeard/failed_history.py | 127 +++++++++++++----------------------- sickbeard/history.py | 10 +-- sickbeard/postProcessor.py | 4 +- sickbeard/search_queue.py | 13 ++-- sickbeard/versionChecker.py | 21 +++--- 7 files changed, 153 insertions(+), 144 deletions(-) diff --git a/SickBeard.py b/SickBeard.py index c892458f..058bd744 100755 --- a/SickBeard.py +++ b/SickBeard.py @@ -96,9 +96,10 @@ def daemonize(): try: pid = os.fork() # @UndefinedVariable - only available in UNIX if pid != 0: - sys.exit(0) + os._exit(0) except OSError, e: - raise RuntimeError("1st fork failed: %s [%d]" % (e.strerror, e.errno)) + sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror)) + sys.exit(1) os.setsid() # @UndefinedVariable - only available in UNIX @@ -110,18 +111,33 @@ def daemonize(): try: pid = os.fork() # @UndefinedVariable - only available in UNIX if pid != 0: - sys.exit(0) + os._exit(0) except OSError, e: - raise RuntimeError("2nd fork failed: %s [%d]" % (e.strerror, e.errno)) - - dev_null = file('/dev/null', 'r') - os.dup2(dev_null.fileno(), sys.stdin.fileno()) + sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror)) + sys.exit(1) + # Write pid if sickbeard.CREATEPID: pid = str(os.getpid()) - logger.log(u"Writing PID " + pid + " to " + str(sickbeard.PIDFILE)) - file(sickbeard.PIDFILE, 'w').write("%s\n" % pid) + logger.log(u"Writing PID: " + pid + " to " + str(sickbeard.PIDFILE)) + try: + file(sickbeard.PIDFILE, 'w').write("%s\n" % pid) + except IOError, e: + logger.log_error_and_exit( + u"Unable to write PID file: " + sickbeard.PIDFILE + " Error: " + str(e.strerror) + " [" + str( + e.errno) + "]") + # Redirect all output + sys.stdout.flush() + sys.stderr.flush() + + devnull = getattr(os, 'devnull', '/dev/null') + stdin = file(devnull, 'r') + stdout = file(devnull, 'a+') + stderr = file(devnull, 'a+') + os.dup2(stdin.fileno(), sys.stdin.fileno()) + os.dup2(stdout.fileno(), sys.stdout.fileno()) + os.dup2(stderr.fileno(), sys.stderr.fileno()) def main(): """ @@ -134,8 +150,8 @@ def main(): sickbeard.PROG_DIR = os.path.dirname(sickbeard.MY_FULLNAME) sickbeard.DATA_DIR = sickbeard.PROG_DIR sickbeard.MY_ARGS = sys.argv[1:] - sickbeard.CREATEPID = False sickbeard.DAEMON = False + sickbeard.CREATEPID = False sickbeard.SYS_ENCODING = None @@ -196,13 +212,15 @@ def main(): if o in ('-p', '--port'): forcedPort = int(a) - # Run as a daemon + # Run as a double forked daemon if o in ('-d', '--daemon'): + sickbeard.DAEMON = True + # When running as daemon disable consoleLogging and don't start browser + consoleLogging = False + noLaunch = True + if sys.platform == 'win32': - print "Daemonize not supported under Windows, starting normally" - else: - consoleLogging = False - sickbeard.DAEMON = True + sickbeard.DAEMON = False # Specify folder to load the config file from if o in ('--config',): @@ -214,21 +232,27 @@ def main(): # Write a pidfile if requested if o in ('--pidfile',): + sickbeard.CREATEPID = True sickbeard.PIDFILE = str(a) # If the pidfile already exists, sickbeard may still be running, so exit if os.path.exists(sickbeard.PIDFILE): - sys.exit("PID file '" + sickbeard.PIDFILE + "' already exists. Exiting.") + sys.exit("PID file: " + sickbeard.PIDFILE + " already exists. Exiting.") - # The pidfile is only useful in daemon mode, make sure we can write the file properly - if sickbeard.DAEMON: - sickbeard.CREATEPID = True - try: - file(sickbeard.PIDFILE, 'w').write("pid\n") - except IOError, e: - raise SystemExit("Unable to write PID file: %s [%d]" % (e.strerror, e.errno)) - else: - logger.log(u"Not running in daemon mode. PID file creation disabled.") + # The pidfile is only useful in daemon mode, make sure we can write the file properly + if sickbeard.CREATEPID: + if sickbeard.DAEMON: + pid_dir = os.path.dirname(sickbeard.PIDFILE) + if not os.access(pid_dir, os.F_OK): + sys.exit("PID dir: " + pid_dir + " doesn't exist. Exiting.") + if not os.access(pid_dir, os.W_OK): + sys.exit("PID dir: " + pid_dir + " must be writable (write permissions). Exiting.") + + else: + if consoleLogging: + sys.stdout.write("Not running in daemon mode. PID file creation disabled.\n") + + sickbeard.CREATEPID = False # If they don't specify a config file then put it in the data dir if not sickbeard.CONFIG_FILE: @@ -263,16 +287,16 @@ def main(): sickbeard.CFG = ConfigObj(sickbeard.CONFIG_FILE) - CUR_DB_VERSION = db.DBConnection().checkDBVersion() + CUR_DB_VERSION = db.DBConnection().checkDBVersion() if CUR_DB_VERSION > 0: if CUR_DB_VERSION < MIN_DB_VERSION: raise SystemExit("Your database version (" + str(db.DBConnection().checkDBVersion()) + ") is too old to migrate from with this version of Sick Beard (" + str(MIN_DB_VERSION) + ").\n" + \ "Upgrade using a previous version of SB first, or start with no database file to begin fresh.") if CUR_DB_VERSION > MAX_DB_VERSION: raise SystemExit("Your database version (" + str(db.DBConnection().checkDBVersion()) + ") has been incremented past what this version of Sick Beard supports (" + str(MAX_DB_VERSION) + ").\n" + \ - "If you have used other forks of SB, your database may be unusable due to their modifications.") - - # Initialize the config and our threads + "If you have used other forks of SB, your database may be unusable due to their modifications.") + + # Initialize the config and our threads sickbeard.initialize(consoleLogging=consoleLogging) sickbeard.showList = [] @@ -306,17 +330,17 @@ def main(): try: initWebServer({ - 'port': startPort, - 'host': webhost, - 'data_root': os.path.join(sickbeard.PROG_DIR, 'gui/'+sickbeard.GUI_NAME), - 'web_root': sickbeard.WEB_ROOT, - 'log_dir': log_dir, - 'username': sickbeard.WEB_USERNAME, - 'password': sickbeard.WEB_PASSWORD, - 'enable_https': sickbeard.ENABLE_HTTPS, - 'https_cert': sickbeard.HTTPS_CERT, - 'https_key': sickbeard.HTTPS_KEY, - }) + 'port': startPort, + 'host': webhost, + 'data_root': os.path.join(sickbeard.PROG_DIR, 'gui/'+sickbeard.GUI_NAME), + 'web_root': sickbeard.WEB_ROOT, + 'log_dir': log_dir, + 'username': sickbeard.WEB_USERNAME, + 'password': sickbeard.WEB_PASSWORD, + 'enable_https': sickbeard.ENABLE_HTTPS, + 'https_cert': sickbeard.HTTPS_CERT, + 'https_key': sickbeard.HTTPS_KEY, + }) except IOError: logger.log(u"Unable to start web server, is something else running on port %d?" % startPort, logger.ERROR) if sickbeard.LAUNCH_BROWSER and not sickbeard.DAEMON: @@ -341,12 +365,13 @@ def main(): # Stay alive while my threads do the work while (True): - time.sleep(1) if sickbeard.invoked_command: sickbeard.invoked_command() sickbeard.invoked_command = None + time.sleep(1) + return if __name__ == "__main__": diff --git a/sickbeard/__init__.py b/sickbeard/__init__.py index 44d6bcf3..d981bed6 100644 --- a/sickbeard/__init__.py +++ b/sickbeard/__init__.py @@ -1259,12 +1259,21 @@ def halt(): __INITIALIZED__ = False +def remove_pid_file(PIDFILE): + try: + if os.path.exists(PIDFILE): + os.remove(PIDFILE) + + except (IOError, OSError): + return False + + return True + def sig_handler(signum=None, frame=None): if type(signum) != type(None): logger.log(u"Signal %i caught, saving and exiting..." % int(signum)) saveAndShutdown() - def saveAll(): global showList @@ -1288,7 +1297,7 @@ def saveAndShutdown(restart=False): if CREATEPID: logger.log(u"Removing pidfile " + str(PIDFILE)) - os.remove(PIDFILE) + remove_pid_file(PIDFILE) if restart: install_type = versionCheckScheduler.action.install_type @@ -1310,7 +1319,7 @@ def saveAndShutdown(restart=False): popen_list += MY_ARGS if '--nolaunch' not in popen_list: popen_list += ['--nolaunch'] - logger.log(u"Restarting Sick Beard with " + str(popen_list)) + logger.log(u"Restarting SickRage with " + str(popen_list)) logger.close() subprocess.Popen(popen_list, cwd=os.getcwd()) diff --git a/sickbeard/failed_history.py b/sickbeard/failed_history.py index 42a3d45c..13923b4f 100644 --- a/sickbeard/failed_history.py +++ b/sickbeard/failed_history.py @@ -22,17 +22,12 @@ import datetime from sickbeard import db from sickbeard import logger -from sickbeard import exceptions +from sickbeard.exceptions import ex, EpisodeNotFoundException from sickbeard.history import dateFormat from sickbeard.common import Quality from sickbeard.common import WANTED, FAILED -def _log_helper(message, level=logger.MESSAGE): - logger.log(message, level) - return message + u"\n" - - def prepareFailedName(release): """Standardizes release name for failed DB""" @@ -41,6 +36,10 @@ def prepareFailedName(release): fixed = fixed.rpartition(".")[0] fixed = re.sub("[\.\-\+\ ]", "_", fixed) + + if not isinstance(fixed, unicode): + fixed = unicode(fixed, 'utf-8') + return fixed @@ -55,26 +54,26 @@ def logFailed(release): sql_results = myDB.select("SELECT * FROM history WHERE release=?", [release]) if len(sql_results) == 0: - log_str += _log_helper( + logger.log( u"Release not found in snatch history. Recording it as bad with no size and no proivder.", logger.WARNING) - log_str += _log_helper( + logger.log( u"Future releases of the same name from providers that don't return size will be skipped.", logger.WARNING) elif len(sql_results) > 1: - log_str += _log_helper(u"Multiple logged snatches found for release", logger.WARNING) + logger.log(u"Multiple logged snatches found for release", logger.WARNING) sizes = len(set(x["size"] for x in sql_results)) providers = len(set(x["provider"] for x in sql_results)) if sizes == 1: - log_str += _log_helper(u"However, they're all the same size. Continuing with found size.", logger.WARNING) + logger.log(u"However, they're all the same size. Continuing with found size.", logger.WARNING) size = sql_results[0]["size"] else: - log_str += _log_helper( + logger.log( u"They also vary in size. Deleting the logged snatches and recording this release with no size/provider", logger.WARNING) for result in sql_results: deleteLoggedSnatch(result["release"], result["size"], result["provider"]) if providers == 1: - log_str += _log_helper(u"They're also from the same provider. Using it as well.") + logger.log(u"They're also from the same provider. Using it as well.") provider = sql_results[0]["provider"] else: size = sql_results[0]["size"] @@ -113,74 +112,45 @@ def hasFailed(release, size, provider="%"): return (len(sql_results) > 0) -def revertEpisode(show_obj, season, episode=None): +def revertEpisode(epObj): """Restore the episodes of a failed download to their original state""" myDB = db.DBConnection("failed.db") - log_str = u"" + - sql_results = myDB.select("SELECT * FROM history WHERE showid=? AND season=?", [show_obj.indexerid, season]) - # {episode: result, ...} + sql_results = myDB.select("SELECT * FROM history WHERE showid=? AND season=?", [epObj.show.indexerid, epObj.season]) history_eps = dict([(res["episode"], res) for res in sql_results]) - if episode: - try: - ep_obj = show_obj.getEpisode(season, episode) - log_str += _log_helper(u"Reverting episode (%s, %s): %s" % (season, episode, ep_obj.name)) - with ep_obj.lock: - if episode in history_eps: - log_str += _log_helper(u"Found in history") - ep_obj.status = history_eps[episode]['old_status'] - else: - log_str += _log_helper(u"WARNING: Episode not found in history. Setting it back to WANTED", - logger.WARNING) - ep_obj.status = WANTED + try: + logger.log(u"Reverting episode (%s, %s): %s" % (epObj.season, epObj.episode, epObj.name)) + with epObj.lock: + if epObj.episode in history_eps: + logger.log(u"Found in history") + epObj.status = history_eps[epObj.episode]['old_status'] + else: + logger.log(u"WARNING: Episode not found in history. Setting it back to WANTED", + logger.WARNING) + epObj.status = WANTED - ep_obj.saveToDB() + epObj.saveToDB() - except exceptions.EpisodeNotFoundException, e: - log_str += _log_helper(u"Unable to create episode, please set its status manually: " + exceptions.ex(e), - logger.WARNING) - else: - # Whole season - log_str += _log_helper(u"Setting season to wanted: " + str(season)) - for ep_obj in show_obj.getAllEpisodes(season): - log_str += _log_helper(u"Reverting episode (%d, %d): %s" % (season, ep_obj.episode, ep_obj.name)) - with ep_obj.lock: - if ep_obj in history_eps: - log_str += _log_helper(u"Found in history") - ep_obj.status = history_eps[ep_obj]['old_status'] - else: - log_str += _log_helper(u"WARNING: Episode not found in history. Setting it back to WANTED", - logger.WARNING) - ep_obj.status = WANTED - - ep_obj.saveToDB() - - return log_str + except EpisodeNotFoundException, e: + logger.log(u"Unable to create episode, please set its status manually: " + ex(e), + logger.WARNING) + + return -def markFailed(show_obj, season, episode=None): +def markFailed(epObj): log_str = u"" - if episode: - try: - ep_obj = show_obj.getEpisode(season, episode) + try: + with epObj.lock: + quality = Quality.splitCompositeStatus(epObj.status)[1] + epObj.status = Quality.compositeStatus(FAILED, quality) + epObj.saveToDB() - with ep_obj.lock: - quality = Quality.splitCompositeStatus(ep_obj.status)[1] - ep_obj.status = Quality.compositeStatus(FAILED, quality) - ep_obj.saveToDB() - - except exceptions.EpisodeNotFoundException, e: - log_str += _log_helper(u"Unable to get episode, please set its status manually: " + exceptions.ex(e), - logger.WARNING) - else: - # Whole season - for ep_obj in show_obj.getAllEpisodes(season): - with ep_obj.lock: - quality = Quality.splitCompositeStatus(ep_obj.status)[1] - ep_obj.status = Quality.compositeStatus(FAILED, quality) - ep_obj.saveToDB() + except EpisodeNotFoundException, e: + logger.log(u"Unable to get episode, please set its status manually: " + ex(e), logger.WARNING) return log_str @@ -191,6 +161,7 @@ def logSnatch(searchResult): logDate = datetime.datetime.today().strftime(dateFormat) release = prepareFailedName(searchResult.name) + providerClass = searchResult.provider if providerClass is not None: provider = providerClass.name @@ -200,13 +171,11 @@ def logSnatch(searchResult): show_obj = searchResult.episodes[0].show for episode in searchResult.episodes: - old_status = show_obj.getEpisode(episode.season, episode.episode).status - myDB.action( "INSERT INTO history (date, size, release, provider, showid, season, episode, old_status)" "VALUES (?, ?, ?, ?, ?, ?, ?, ?)", [logDate, searchResult.size, release, provider, show_obj.indexerid, episode.season, episode.episode, - old_status]) + episode.status]) def deleteLoggedSnatch(release, size, provider): @@ -224,13 +193,11 @@ def trimHistory(): (datetime.datetime.today() - datetime.timedelta(days=30)).strftime(dateFormat))) -def findRelease(show, season, episode): +def findRelease(epObj): """ Find releases in history by show ID and season. Return None for release if multiple found or no release found. """ - if not show: return (None, None, None) - if not season: return (None, None, None) release = None provider = None @@ -238,13 +205,13 @@ def findRelease(show, season, episode): myDB = db.DBConnection("failed.db") # Clear old snatches for this release if any exist - myDB.action("DELETE FROM history WHERE showid=" + str(show.indexerid) + " AND season=" + str( - season) + " AND episode=" + str(episode) + " AND date < (SELECT max(date) FROM history WHERE showid=" + str( - show.indexerid) + " AND season=" + str(season) + " AND episode=" + str(episode) + ")") + myDB.action("DELETE FROM history WHERE showid=" + str(epObj.show.indexerid) + " AND season=" + str( + epObj.season) + " AND episode=" + str(epObj.episode) + " AND date < (SELECT max(date) FROM history WHERE showid=" + str( + epObj.show.indexerid) + " AND season=" + str(epObj.season) + " AND episode=" + str(epObj.episode) + ")") # Search for release in snatch history results = myDB.select("SELECT release, provider, date FROM history WHERE showid=? AND season=? AND episode=?", - [show.indexerid, season, episode]) + [epObj.show.indexerid, epObj.season, epObj.episode]) for result in results: release = str(result["release"]) @@ -255,9 +222,9 @@ def findRelease(show, season, episode): myDB.action("DELETE FROM history WHERE release=? AND date!=?", [release, date]) # Found a previously failed release - logger.log(u"Failed release found for season (%s): (%s)" % (season, result["release"]), logger.DEBUG) + logger.log(u"Failed release found for season (%s): (%s)" % (epObj.season, result["release"]), logger.DEBUG) return (release, provider) # Release was not found - logger.log(u"No releases found for season (%s) of (%s)" % (season, show.indexerid), logger.DEBUG) + logger.log(u"No releases found for season (%s) of (%s)" % (epObj.season, epObj.show.indexerid), logger.DEBUG) return (release, provider) \ No newline at end of file diff --git a/sickbeard/history.py b/sickbeard/history.py index c58b3782..a220e5f1 100644 --- a/sickbeard/history.py +++ b/sickbeard/history.py @@ -85,11 +85,11 @@ def logSubtitle(showid, season, episode, status, subtitleResult): _logHistoryItem(action, showid, season, episode, quality, resource, provider) -def logFailed(indexerid, season, episode, status, release, provider=None): - showid = int(indexerid) - season = int(season) - epNum = int(episode) - status, quality = Quality.splitCompositeStatus(status) +def logFailed(epObj, release, provider=None): + showid = int(epObj.show.indexerid) + season = int(epObj.season) + epNum = int(epObj.episode) + status, quality = Quality.splitCompositeStatus(epObj.status) action = Quality.compositeStatus(FAILED, quality) _logHistoryItem(action, showid, season, epNum, quality, release, provider) diff --git a/sickbeard/postProcessor.py b/sickbeard/postProcessor.py index ea6cacb8..d7a32c0f 100644 --- a/sickbeard/postProcessor.py +++ b/sickbeard/postProcessor.py @@ -625,7 +625,6 @@ class PostProcessor(object): # detect and convert scene numbered releases season, cur_episode = sickbeard.scene_numbering.get_indexer_numbering(indexer_id,indexer,season,cur_episode) - self._log(u"Episode object has been scene converted to " + str(season) + "x" + str(cur_episode), logger.DEBUG) # now that we've figured out which episode this file is just load it manually try: @@ -634,6 +633,9 @@ class PostProcessor(object): self._log(u"Unable to create episode: " + ex(e), logger.DEBUG) raise exceptions.PostProcessingFailed() + self._log(u"Episode object has been converted from Scene numbering " + str(curEp.scene_season) + "x" + str( + curEp.scene_episode) + " to Indexer numbering" + str(curEp.season) + "x" + str(curEp.episode)) + # associate all the episodes together under a single root episode if root_ep == None: root_ep = curEp diff --git a/sickbeard/search_queue.py b/sickbeard/search_queue.py index f7431f57..4b6b83d9 100644 --- a/sickbeard/search_queue.py +++ b/sickbeard/search_queue.py @@ -257,21 +257,22 @@ class FailedQueueItem(generic_queue.QueueItem): def execute(self): generic_queue.QueueItem.execute(self) + failed_episodes = [] for i, epObj in enumerate(self.episodes): - (release, provider) = failed_history.findRelease(self.show, epObj.season, epObj.episode) + (release, provider) = failed_history.findRelease(epObj) if release: logger.log(u"Marking release as bad: " + release) - failed_history.markFailed(self.show, epObj.season, epObj.episode) + failed_history.markFailed(epObj) failed_history.logFailed(release) - history.logFailed(self.show.indexerid, epObj.season, epObj.episode, epObj.status, release, provider) - - failed_history.revertEpisode(self.show, epObj.season, epObj.episode) + history.logFailed(epObj, release, provider) + failed_history.revertEpisode(epObj) + failed_episodes.append(epObj) try: logger.log( "Beginning failed download search for episodes from Season [" + str(self.episodes[0].season) + "]") - searchResult = search.searchProviders(self, self.show, self.episodes[0].season, self.episodes, False, True) + searchResult = search.searchProviders(self, self.show, failed_episodes[0].season, failed_episodes, False, True) if searchResult: self.success = SearchQueue().snatch_item(searchResult) diff --git a/sickbeard/versionChecker.py b/sickbeard/versionChecker.py index 59ce3397..8dda1a50 100644 --- a/sickbeard/versionChecker.py +++ b/sickbeard/versionChecker.py @@ -27,6 +27,7 @@ import tarfile import stat import traceback import gh_api as github +import threading import sickbeard from sickbeard import helpers @@ -57,23 +58,27 @@ class CheckVersion(): self.updater = None def run(self): + updated = None if self.check_for_new_version(): if sickbeard.AUTO_UPDATE: logger.log(u"New update found for SickRage, starting auto-updater ...") updated = sickbeard.versionCheckScheduler.action.update() if updated: logger.log(u"Update was successfull, restarting SickRage ...") - sickbeard.restart(False) - # refresh scene exceptions too - scene_exceptions.retrieve_exceptions() + # do a soft restart + threading.Timer(2, sickbeard.invoke_restart, [False]).start() - # refresh network timezones - network_timezones.update_network_dict() + if not updated: + # refresh scene exceptions too + scene_exceptions.retrieve_exceptions() - # sure, why not? - if sickbeard.USE_FAILED_DOWNLOADS: - failed_history.trimHistory() + # refresh network timezones + network_timezones.update_network_dict() + + # sure, why not? + if sickbeard.USE_FAILED_DOWNLOADS: + failed_history.trimHistory() def find_install_type(self): """ From 52fca3e29abbc37d138581ade9a0a3fd1d65b682 Mon Sep 17 00:00:00 2001 From: echel0n Date: Tue, 13 May 2014 04:19:37 -0700 Subject: [PATCH 5/5] auto update test #1 --- SickBeard.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/SickBeard.py b/SickBeard.py index 058bd744..75663bc8 100755 --- a/SickBeard.py +++ b/SickBeard.py @@ -2,20 +2,20 @@ # Author: Nic Wolfe # URL: http://code.google.com/p/sickbeard/ # -# This file is part of Sick Beard. +# This file is part of SickRage. # -# Sick Beard is free software: you can redistribute it and/or modify +# SickRage is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # -# Sick Beard is distributed in the hope that it will be useful, +# SickRage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License -# along with Sick Beard. If not, see . +# along with SickRage. If not, see . # Check needed software dependencies to nudge users to fix their setup import sys @@ -173,7 +173,7 @@ def main(): # On non-unicode builds this will raise an AttributeError, if encoding type is not valid it throws a LookupError sys.setdefaultencoding(sickbeard.SYS_ENCODING) except: - print 'Sorry, you MUST add the Sick Beard folder to the PYTHONPATH environment variable' + print 'Sorry, you MUST add the SickRage folder to the PYTHONPATH environment variable' print 'or find another way to force Python to use ' + sickbeard.SYS_ENCODING + ' for string encoding.' sys.exit(1) @@ -279,7 +279,7 @@ def main(): os.chdir(sickbeard.DATA_DIR) if consoleLogging: - print "Starting up Sick Beard " + SICKBEARD_VERSION + " from " + sickbeard.CONFIG_FILE + print "Starting up SickRage " + SICKBEARD_VERSION + " from " + sickbeard.CONFIG_FILE # Load the config and publish it to the sickbeard package if not os.path.isfile(sickbeard.CONFIG_FILE): @@ -290,10 +290,10 @@ def main(): CUR_DB_VERSION = db.DBConnection().checkDBVersion() if CUR_DB_VERSION > 0: if CUR_DB_VERSION < MIN_DB_VERSION: - raise SystemExit("Your database version (" + str(db.DBConnection().checkDBVersion()) + ") is too old to migrate from with this version of Sick Beard (" + str(MIN_DB_VERSION) + ").\n" + \ + raise SystemExit("Your database version (" + str(db.DBConnection().checkDBVersion()) + ") is too old to migrate from with this version of SickRage (" + str(MIN_DB_VERSION) + ").\n" + \ "Upgrade using a previous version of SB first, or start with no database file to begin fresh.") if CUR_DB_VERSION > MAX_DB_VERSION: - raise SystemExit("Your database version (" + str(db.DBConnection().checkDBVersion()) + ") has been incremented past what this version of Sick Beard supports (" + str(MAX_DB_VERSION) + ").\n" + \ + raise SystemExit("Your database version (" + str(db.DBConnection().checkDBVersion()) + ") has been incremented past what this version of SickRage supports (" + str(MAX_DB_VERSION) + ").\n" + \ "If you have used other forks of SB, your database may be unusable due to their modifications.") # Initialize the config and our threads