mirror of
https://github.com/moparisthebest/SickRage
synced 2024-12-04 07:02:26 -05:00
Re-coded logger facility for better performance and cleaner code plus has better code for rotation of logs.
This commit is contained in:
parent
683093bc29
commit
3eb366ac05
36
SickBeard.py
36
SickBeard.py
@ -33,7 +33,6 @@ if sys.version_info < (2, 6):
|
||||
|
||||
try:
|
||||
import Cheetah
|
||||
|
||||
if Cheetah.Version[0] != '2':
|
||||
raise ValueError
|
||||
except ValueError:
|
||||
@ -256,36 +255,22 @@ class SickRage(object):
|
||||
raise SystemExit(
|
||||
"Config file root dir '" + os.path.dirname(sickbeard.CONFIG_FILE) + "' must be writeable.")
|
||||
|
||||
# Check if we need to perform a restore first
|
||||
restoreDir = os.path.join(sickbeard.DATA_DIR, 'restore')
|
||||
if os.path.exists(restoreDir):
|
||||
if self.restore(restoreDir, sickbeard.DATA_DIR):
|
||||
logger.log(u"Restore successful...")
|
||||
else:
|
||||
logger.log(u"Restore FAILED!", logger.ERROR)
|
||||
|
||||
os.chdir(sickbeard.DATA_DIR)
|
||||
|
||||
# Check if we need to perform a restore first
|
||||
restoreDir = os.path.join(sickbeard.DATA_DIR, 'restore')
|
||||
if self.consoleLogging and os.path.exists(restoreDir):
|
||||
if self.restore(restoreDir, sickbeard.DATA_DIR):
|
||||
sys.stdout.write("Restore successful...\n")
|
||||
else:
|
||||
sys.stdout.write("Restore FAILED!\n")
|
||||
|
||||
# Load the config and publish it to the sickbeard package
|
||||
if not os.path.isfile(sickbeard.CONFIG_FILE):
|
||||
logger.log(u"Unable to find '" + sickbeard.CONFIG_FILE + "' , all settings will be default!", logger.ERROR)
|
||||
if self.consoleLogging and not os.path.isfile(sickbeard.CONFIG_FILE):
|
||||
sys.stdout.write("Unable to find '" + sickbeard.CONFIG_FILE + "' , all settings will be default!" + "\n")
|
||||
|
||||
sickbeard.CFG = ConfigObj(sickbeard.CONFIG_FILE)
|
||||
|
||||
CUR_DB_VERSION = db.DBConnection().checkDBVersion()
|
||||
|
||||
if CUR_DB_VERSION > 0:
|
||||
if CUR_DB_VERSION < MIN_DB_VERSION:
|
||||
raise SystemExit("Your database version (" + str(
|
||||
CUR_DB_VERSION) + ") is too old to migrate from with this version of SickRage (" + str(
|
||||
MIN_DB_VERSION) + ").\n" + \
|
||||
"Upgrade using a previous version of SB first, or start with no database file to begin fresh.")
|
||||
if CUR_DB_VERSION > MAX_DB_VERSION:
|
||||
raise SystemExit("Your database version (" + str(
|
||||
CUR_DB_VERSION) + ") has been incremented past what this version of SickRage supports (" + str(
|
||||
MAX_DB_VERSION) + ").\n" + \
|
||||
"If you have used other forks of SB, your database may be unusable due to their modifications.")
|
||||
|
||||
# Initialize the config and our threads
|
||||
sickbeard.initialize(consoleLogging=self.consoleLogging)
|
||||
|
||||
@ -517,7 +502,6 @@ class SickRage(object):
|
||||
if '--nolaunch' not in popen_list:
|
||||
popen_list += ['--nolaunch']
|
||||
logger.log(u"Restarting SickRage with " + str(popen_list))
|
||||
logger.close()
|
||||
subprocess.Popen(popen_list, cwd=os.getcwd())
|
||||
|
||||
# system exit
|
||||
|
@ -23,10 +23,10 @@ import datetime
|
||||
import socket
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import os.path
|
||||
|
||||
from threading import Lock
|
||||
import sys
|
||||
|
||||
from github import Github
|
||||
|
||||
@ -549,11 +549,23 @@ def initialize(consoleLogging=True):
|
||||
CheckSection(CFG, 'Pushbullet')
|
||||
CheckSection(CFG, 'Subtitles')
|
||||
|
||||
# debugging
|
||||
DEBUG = bool(check_setting_int(CFG, 'General', 'debug', 0))
|
||||
|
||||
ACTUAL_LOG_DIR = check_setting_str(CFG, 'General', 'log_dir', 'Logs')
|
||||
LOG_DIR = os.path.normpath(os.path.join(DATA_DIR, ACTUAL_LOG_DIR))
|
||||
|
||||
fileLogging = True
|
||||
if not helpers.makeDir(LOG_DIR):
|
||||
sys.stderr.write("!!! No log folder, logging to screen only!\n")
|
||||
fileLogging=False
|
||||
|
||||
# init logging
|
||||
logger.initLogging(consoleLogging=consoleLogging, fileLogging=fileLogging, debug=DEBUG)
|
||||
|
||||
# github api
|
||||
try:
|
||||
gh = Github().get_organization(GIT_ORG).get_repo(GIT_REPO)
|
||||
except:
|
||||
gh = None
|
||||
try:gh = Github().get_organization(GIT_ORG).get_repo(GIT_REPO)
|
||||
except:gh = None
|
||||
|
||||
# git reset on update
|
||||
GIT_RESET = bool(check_setting_int(CFG, 'General', 'git_reset', 0))
|
||||
@ -596,13 +608,6 @@ def initialize(consoleLogging=True):
|
||||
|
||||
THEME_NAME = check_setting_str(CFG, 'GUI', 'theme_name', 'dark')
|
||||
|
||||
ACTUAL_LOG_DIR = check_setting_str(CFG, 'General', 'log_dir', 'Logs')
|
||||
# put the log dir inside the data dir, unless an absolute path
|
||||
LOG_DIR = os.path.normpath(os.path.join(DATA_DIR, ACTUAL_LOG_DIR))
|
||||
|
||||
if not helpers.makeDir(LOG_DIR):
|
||||
logger.log(u"!!! No log folder, logging to screen only!", logger.ERROR)
|
||||
|
||||
SOCKET_TIMEOUT = check_setting_int(CFG, 'General', 'socket_timeout', 30)
|
||||
socket.setdefaulttimeout(SOCKET_TIMEOUT)
|
||||
|
||||
@ -646,8 +651,6 @@ def initialize(consoleLogging=True):
|
||||
USE_API = bool(check_setting_int(CFG, 'General', 'use_api', 0))
|
||||
API_KEY = check_setting_str(CFG, 'General', 'api_key', '', censor_log=True)
|
||||
|
||||
DEBUG = bool(check_setting_int(CFG, 'General', 'debug', 0))
|
||||
|
||||
ENABLE_HTTPS = bool(check_setting_int(CFG, 'General', 'enable_https', 0))
|
||||
|
||||
HTTPS_CERT = check_setting_str(CFG, 'General', 'https_cert', 'server.crt')
|
||||
@ -1099,9 +1102,6 @@ def initialize(consoleLogging=True):
|
||||
logger.log(u"Unable to find '" + CONFIG_FILE + "', all settings will be default!", logger.DEBUG)
|
||||
save_config()
|
||||
|
||||
# start up all the threads
|
||||
logger.sb_log_instance.initLogging(consoleLogging=consoleLogging)
|
||||
|
||||
# initialize the main SB database
|
||||
myDB = db.DBConnection()
|
||||
db.upgradeDatabase(myDB, mainDB.InitialSchema)
|
||||
|
@ -369,7 +369,7 @@ def minimax(val, default, low, high):
|
||||
################################################################################
|
||||
# Check_setting_int #
|
||||
################################################################################
|
||||
def check_setting_int(config, cfg_name, item_name, def_val):
|
||||
def check_setting_int(config, cfg_name, item_name, def_val, silent=True):
|
||||
try:
|
||||
my_val = int(config[cfg_name][item_name])
|
||||
if str(my_val) == str(None):
|
||||
@ -381,14 +381,17 @@ def check_setting_int(config, cfg_name, item_name, def_val):
|
||||
except:
|
||||
config[cfg_name] = {}
|
||||
config[cfg_name][item_name] = my_val
|
||||
logger.log(item_name + " -> " + str(my_val), logger.DEBUG)
|
||||
|
||||
if not silent:
|
||||
logger.log(item_name + " -> " + str(my_val), logger.DEBUG)
|
||||
|
||||
return my_val
|
||||
|
||||
|
||||
################################################################################
|
||||
# Check_setting_float #
|
||||
################################################################################
|
||||
def check_setting_float(config, cfg_name, item_name, def_val):
|
||||
def check_setting_float(config, cfg_name, item_name, def_val, silent=True):
|
||||
try:
|
||||
my_val = float(config[cfg_name][item_name])
|
||||
if str(my_val) == str(None):
|
||||
@ -401,14 +404,16 @@ def check_setting_float(config, cfg_name, item_name, def_val):
|
||||
config[cfg_name] = {}
|
||||
config[cfg_name][item_name] = my_val
|
||||
|
||||
logger.log(item_name + " -> " + str(my_val), logger.DEBUG)
|
||||
if not silent:
|
||||
logger.log(item_name + " -> " + str(my_val), logger.DEBUG)
|
||||
|
||||
return my_val
|
||||
|
||||
|
||||
################################################################################
|
||||
# Check_setting_str #
|
||||
################################################################################
|
||||
def check_setting_str(config, cfg_name, item_name, def_val, log=True, censor_log=False):
|
||||
def check_setting_str(config, cfg_name, item_name, def_val, silent=True, censor_log=False):
|
||||
# For passwords you must include the word `password` in the item_name and add `helpers.encrypt(ITEM_NAME, ENCRYPTION_VERSION)` in save_config()
|
||||
if bool(item_name.find('password') + 1):
|
||||
log = False
|
||||
@ -431,10 +436,8 @@ def check_setting_str(config, cfg_name, item_name, def_val, log=True, censor_log
|
||||
if censor_log or (cfg_name, item_name) in logger.censoredItems.items():
|
||||
logger.censoredItems[cfg_name, item_name] = my_val
|
||||
|
||||
if log:
|
||||
if not silent:
|
||||
logger.log(item_name + " -> " + str(my_val), logger.DEBUG)
|
||||
else:
|
||||
logger.log(item_name + " -> ******", logger.DEBUG)
|
||||
|
||||
return my_val
|
||||
|
||||
|
@ -437,7 +437,7 @@ class Add1080pAndRawHDQualities(RenameSeasonFolders):
|
||||
common.Quality.UNKNOWN], [])
|
||||
|
||||
# update qualities (including templates)
|
||||
logger.log(u"[1/4] Updating pre-defined templates and the quality for each show...", logger.MESSAGE)
|
||||
logger.log(u"[1/4] Updating pre-defined templates and the quality for each show...", logger.INFO)
|
||||
cl = []
|
||||
shows = self.connection.select("SELECT * FROM tv_shows")
|
||||
for cur_show in shows:
|
||||
@ -451,7 +451,7 @@ class Add1080pAndRawHDQualities(RenameSeasonFolders):
|
||||
self.connection.mass_action(cl)
|
||||
|
||||
# update status that are are within the old hdwebdl (1<<3 which is 8) and better -- exclude unknown (1<<15 which is 32768)
|
||||
logger.log(u"[2/4] Updating the status for the episodes within each show...", logger.MESSAGE)
|
||||
logger.log(u"[2/4] Updating the status for the episodes within each show...", logger.INFO)
|
||||
cl = []
|
||||
episodes = self.connection.select("SELECT * FROM tv_episodes WHERE status < 3276800 AND status >= 800")
|
||||
for cur_episode in episodes:
|
||||
@ -462,7 +462,7 @@ class Add1080pAndRawHDQualities(RenameSeasonFolders):
|
||||
# make two seperate passes through the history since snatched and downloaded (action & quality) may not always coordinate together
|
||||
|
||||
# update previous history so it shows the correct action
|
||||
logger.log(u"[3/4] Updating history to reflect the correct action...", logger.MESSAGE)
|
||||
logger.log(u"[3/4] Updating history to reflect the correct action...", logger.INFO)
|
||||
cl = []
|
||||
historyAction = self.connection.select("SELECT * FROM history WHERE action < 3276800 AND action >= 800")
|
||||
for cur_entry in historyAction:
|
||||
@ -471,7 +471,7 @@ class Add1080pAndRawHDQualities(RenameSeasonFolders):
|
||||
self.connection.mass_action(cl)
|
||||
|
||||
# update previous history so it shows the correct quality
|
||||
logger.log(u"[4/4] Updating history to reflect the correct quality...", logger.MESSAGE)
|
||||
logger.log(u"[4/4] Updating history to reflect the correct quality...", logger.INFO)
|
||||
cl = []
|
||||
historyQuality = self.connection.select("SELECT * FROM history WHERE quality < 32768 AND quality >= 8")
|
||||
for cur_entry in historyQuality:
|
||||
@ -745,7 +745,7 @@ class ConvertIndexerToInteger(AddSceneNumbering):
|
||||
backupDatabase(28)
|
||||
|
||||
cl = []
|
||||
logger.log(u"Converting Indexer to Integer ...", logger.MESSAGE)
|
||||
logger.log(u"Converting Indexer to Integer ...", logger.INFO)
|
||||
cl.append(["UPDATE tv_shows SET indexer = ? WHERE LOWER(indexer) = ?", ["1", "tvdb"]])
|
||||
cl.append(["UPDATE tv_shows SET indexer = ? WHERE LOWER(indexer) = ?", ["2", "tvrage"]])
|
||||
cl.append(["UPDATE tv_episodes SET indexer = ? WHERE LOWER(indexer) = ?", ["1", "tvdb"]])
|
||||
@ -791,7 +791,7 @@ class AddSportsOption(AddRequireAndIgnoreWords):
|
||||
|
||||
if self.hasColumn("tv_shows", "air_by_date") and self.hasColumn("tv_shows", "sports"):
|
||||
# update sports column
|
||||
logger.log(u"[4/4] Updating tv_shows to reflect the correct sports value...", logger.MESSAGE)
|
||||
logger.log(u"[4/4] Updating tv_shows to reflect the correct sports value...", logger.INFO)
|
||||
cl = []
|
||||
historyQuality = self.connection.select(
|
||||
"SELECT * FROM tv_shows WHERE LOWER(classification) = 'sports' AND air_by_date = 1 AND sports = 0")
|
||||
|
@ -272,7 +272,7 @@ class DBSanityCheck(object):
|
||||
# ===============
|
||||
|
||||
def upgradeDatabase(connection, schema):
|
||||
logger.log(u"Checking database structure...", logger.MESSAGE)
|
||||
logger.log(u"Checking database structure...", logger.INFO)
|
||||
_processUpgrade(connection, schema)
|
||||
|
||||
|
||||
@ -293,7 +293,7 @@ def _processUpgrade(connection, upgradeClass):
|
||||
instance = upgradeClass(connection)
|
||||
logger.log(u"Checking " + prettyName(upgradeClass.__name__) + " database upgrade", logger.DEBUG)
|
||||
if not instance.test():
|
||||
logger.log(u"Database upgrade required: " + prettyName(upgradeClass.__name__), logger.MESSAGE)
|
||||
logger.log(u"Database upgrade required: " + prettyName(upgradeClass.__name__), logger.INFO)
|
||||
try:
|
||||
instance.execute()
|
||||
except sqlite3.DatabaseError, e:
|
||||
|
@ -72,7 +72,7 @@ class FailedProcessor(object):
|
||||
|
||||
return True
|
||||
|
||||
def _log(self, message, level=logger.MESSAGE):
|
||||
def _log(self, message, level=logger.INFO):
|
||||
"""Log to regular logfile and save for return for PP script log"""
|
||||
logger.log(message, level)
|
||||
self.log += message + "\n"
|
@ -18,336 +18,77 @@
|
||||
|
||||
from __future__ import with_statement
|
||||
|
||||
import time
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
import logging.handlers
|
||||
import threading
|
||||
|
||||
import logging
|
||||
|
||||
import sickbeard
|
||||
import encodingKludge as ek
|
||||
|
||||
from sickbeard import classes
|
||||
|
||||
try:
|
||||
from lib.send2trash import send2trash
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
# number of log files to keep
|
||||
NUM_LOGS = 3
|
||||
|
||||
# log size in bytes
|
||||
LOG_SIZE = 10000000 # 10 megs
|
||||
|
||||
# log levels
|
||||
ERROR = logging.ERROR
|
||||
WARNING = logging.WARNING
|
||||
MESSAGE = logging.INFO
|
||||
INFO = logging.INFO
|
||||
DEBUG = logging.DEBUG
|
||||
DB = 5
|
||||
|
||||
reverseNames = {u'ERROR': ERROR,
|
||||
u'WARNING': WARNING,
|
||||
u'INFO': MESSAGE,
|
||||
u'DEBUG': DEBUG,
|
||||
u'DB': DB}
|
||||
|
||||
censoredItems = {}
|
||||
|
||||
# send logging to null
|
||||
class NullHandler(logging.Handler):
|
||||
def emit(self, record):
|
||||
class NullFilter(logging.Filter):
|
||||
def filter(self, record):
|
||||
pass
|
||||
|
||||
class CensorFilter(logging.Filter):
|
||||
def filter(self, record):
|
||||
for k,v in censoredItems.items():
|
||||
for k, v in censoredItems.items():
|
||||
if v and len(v) > 0 and v in record.msg:
|
||||
record.msg = record.msg.replace(v, len(v)*'*')
|
||||
record.msg = record.msg.replace(v, len(v) * '*')
|
||||
return True
|
||||
|
||||
class SBRotatingLogHandler(object):
|
||||
def __init__(self, log_file, num_files, num_bytes):
|
||||
self.blacklistFilter = CensorFilter()
|
||||
def initLogging(consoleLogging=False, fileLogging=False, debug=False):
|
||||
logFile = os.path.join(sickbeard.LOG_DIR, 'sickrage.log')
|
||||
|
||||
self.num_files = num_files
|
||||
self.num_bytes = num_bytes
|
||||
# Add a new logging level DB
|
||||
logging.addLevelName(DB, 'DB')
|
||||
|
||||
self.log_file = log_file
|
||||
self.log_file_path = log_file
|
||||
self.cur_handler = None
|
||||
# sickrage logger
|
||||
sr_log = logging.getLogger()
|
||||
sr_log.setLevel(DB)
|
||||
|
||||
self.writes_since_check = 0
|
||||
# tornado loggers
|
||||
logging.getLogger("tornado.access").addFilter(NullFilter())
|
||||
|
||||
self.console_logging = False
|
||||
self.log_lock = threading.Lock()
|
||||
# console log handler
|
||||
if consoleLogging:
|
||||
console = logging.StreamHandler()
|
||||
console.addFilter(CensorFilter())
|
||||
console.setLevel(INFO if not debug else DEBUG)
|
||||
console.setFormatter(logging.Formatter('%(asctime)s %(levelname)s::%(message)s', '%H:%M:%S'))
|
||||
sr_log.addHandler(console)
|
||||
|
||||
def __del__(self):
|
||||
pass
|
||||
|
||||
def close_log(self, handler=None):
|
||||
if not handler:
|
||||
handler = self.cur_handler
|
||||
|
||||
if handler:
|
||||
sb_logger = logging.getLogger('sickbeard')
|
||||
sub_logger = logging.getLogger('subliminal')
|
||||
imdb_logger = logging.getLogger('imdbpy')
|
||||
tornado_logger = logging.getLogger('tornado')
|
||||
feedcache_logger = logging.getLogger('feedcache')
|
||||
|
||||
sb_logger.removeHandler(handler)
|
||||
sub_logger.removeHandler(handler)
|
||||
imdb_logger.removeHandler(handler)
|
||||
tornado_logger.removeHandler(handler)
|
||||
feedcache_logger.removeHandler(handler)
|
||||
|
||||
handler.flush()
|
||||
handler.close()
|
||||
|
||||
def initLogging(self, consoleLogging=False):
|
||||
|
||||
if consoleLogging:
|
||||
self.console_logging = consoleLogging
|
||||
|
||||
old_handler = None
|
||||
|
||||
# get old handler in case we want to close it
|
||||
if self.cur_handler:
|
||||
old_handler = self.cur_handler
|
||||
else:
|
||||
|
||||
#Add a new logging level DB
|
||||
logging.addLevelName(5, 'DB')
|
||||
|
||||
# only start consoleLogging on first initialize
|
||||
if self.console_logging:
|
||||
# define a Handler which writes INFO messages or higher to the sys.stderr
|
||||
console = logging.StreamHandler()
|
||||
|
||||
# filter blacklisted words and replace them with asterisks
|
||||
console.addFilter(self.blacklistFilter)
|
||||
|
||||
console.setLevel(logging.INFO)
|
||||
if sickbeard.DEBUG:
|
||||
console.setLevel(logging.DEBUG)
|
||||
|
||||
# set a format which is simpler for console use
|
||||
console.setFormatter(DispatchingFormatter(
|
||||
{'sickbeard': logging.Formatter('%(asctime)s %(levelname)s::%(message)s', '%H:%M:%S'),
|
||||
'subliminal': logging.Formatter('%(asctime)s %(levelname)s::SUBLIMINAL :: %(message)s',
|
||||
'%H:%M:%S'),
|
||||
'imdbpy': logging.Formatter('%(asctime)s %(levelname)s::IMDBPY :: %(message)s', '%H:%M:%S'),
|
||||
'tornado.general': logging.Formatter('%(asctime)s %(levelname)s::TORNADO :: %(message)s', '%H:%M:%S'),
|
||||
'tornado.application': logging.Formatter('%(asctime)s %(levelname)s::TORNADO :: %(message)s', '%H:%M:%S'),
|
||||
'feedcache.cache': logging.Formatter('%(asctime)s %(levelname)s::FEEDCACHE :: %(message)s',
|
||||
'%H:%M:%S')
|
||||
},
|
||||
logging.Formatter('%(message)s'), ))
|
||||
|
||||
# add the handler to the root logger
|
||||
logging.getLogger('sickbeard').addHandler(console)
|
||||
logging.getLogger('tornado.general').addHandler(console)
|
||||
logging.getLogger('tornado.application').addHandler(console)
|
||||
logging.getLogger('subliminal').addHandler(console)
|
||||
logging.getLogger('imdbpy').addHandler(console)
|
||||
logging.getLogger('feedcache').addHandler(console)
|
||||
|
||||
self.log_file_path = os.path.join(sickbeard.LOG_DIR, self.log_file)
|
||||
self.cur_handler = self._config_handler()
|
||||
|
||||
logging.getLogger('sickbeard').addHandler(self.cur_handler)
|
||||
logging.getLogger('tornado.access').addHandler(NullHandler())
|
||||
logging.getLogger('tornado.general').addHandler(self.cur_handler)
|
||||
logging.getLogger('tornado.application').addHandler(self.cur_handler)
|
||||
logging.getLogger('subliminal').addHandler(self.cur_handler)
|
||||
logging.getLogger('imdbpy').addHandler(self.cur_handler)
|
||||
logging.getLogger('feedcache').addHandler(self.cur_handler)
|
||||
|
||||
logging.getLogger('sickbeard').setLevel(DB)
|
||||
|
||||
log_level = logging.WARNING
|
||||
if sickbeard.DEBUG:
|
||||
log_level = logging.DEBUG
|
||||
|
||||
logging.getLogger('tornado.general').setLevel(log_level)
|
||||
logging.getLogger('tornado.application').setLevel(log_level)
|
||||
logging.getLogger('subliminal').setLevel(log_level)
|
||||
logging.getLogger('imdbpy').setLevel(log_level)
|
||||
logging.getLogger('feedcache').setLevel(log_level)
|
||||
# rotating log file handler
|
||||
if fileLogging:
|
||||
rfh = logging.handlers.RotatingFileHandler(logFile, maxBytes=1024 * 1024, backupCount=5, encoding='utf-8')
|
||||
rfh.addFilter(CensorFilter())
|
||||
rfh.setLevel(DEBUG)
|
||||
rfh.setFormatter(logging.Formatter('%(asctime)s %(levelname)-8s %(message)s', '%Y-%m-%d %H:%M:%S'))
|
||||
sr_log.addHandler(rfh)
|
||||
|
||||
|
||||
# already logging in new log folder, close the old handler
|
||||
if old_handler:
|
||||
self.close_log(old_handler)
|
||||
def log(msg, level=INFO, *args, **kwargs):
|
||||
meThread = threading.currentThread().getName()
|
||||
message = meThread + u" :: " + msg
|
||||
|
||||
def _config_handler(self):
|
||||
"""
|
||||
Configure a file handler to log at file_name and return it.
|
||||
"""
|
||||
logging.log(level, message, *args, **kwargs)
|
||||
classes.ErrorViewer.add(classes.UIError(message)) and level == ERROR
|
||||
|
||||
file_handler = logging.FileHandler(self.log_file_path, encoding='utf-8')
|
||||
def log_error_and_exit(self, error_msg, *args, **kwargs):
|
||||
log(error_msg, ERROR, *args, **kwargs)
|
||||
|
||||
# filter blacklisted words and replace them with asterisks
|
||||
file_handler.addFilter(self.blacklistFilter)
|
||||
|
||||
file_handler.setLevel(DB)
|
||||
file_handler.setFormatter(DispatchingFormatter(
|
||||
{'sickbeard': logging.Formatter('%(asctime)s %(levelname)-8s %(message)s', '%Y-%m-%d %H:%M:%S'),
|
||||
'subliminal': logging.Formatter('%(asctime)s %(levelname)-8s SUBLIMINAL :: %(message)s',
|
||||
'%Y-%m-%d %H:%M:%S'),
|
||||
'imdbpy': logging.Formatter('%(asctime)s %(levelname)-8s IMDBPY :: %(message)s', '%Y-%m-%d %H:%M:%S'),
|
||||
'tornado.general': logging.Formatter('%(asctime)s %(levelname)-8s TORNADO :: %(message)s', '%Y-%m-%d %H:%M:%S'),
|
||||
'tornado.application': logging.Formatter('%(asctime)s %(levelname)-8s TORNADO :: %(message)s', '%Y-%m-%d %H:%M:%S'),
|
||||
'feedcache.cache': logging.Formatter('%(asctime)s %(levelname)-8s FEEDCACHE :: %(message)s',
|
||||
'%Y-%m-%d %H:%M:%S')
|
||||
},
|
||||
logging.Formatter('%(message)s'), ))
|
||||
|
||||
return file_handler
|
||||
|
||||
def _log_file_name(self, i):
|
||||
"""
|
||||
Returns a numbered log file name depending on i. If i==0 it just uses logName, if not it appends
|
||||
it to the extension (blah.log.3 for i == 3)
|
||||
|
||||
i: Log number to ues
|
||||
"""
|
||||
|
||||
return self.log_file_path + ('.' + str(i) if i else '')
|
||||
|
||||
def _num_logs(self):
|
||||
"""
|
||||
Scans the log folder and figures out how many log files there are already on disk
|
||||
|
||||
Returns: The number of the last used file (eg. mylog.log.3 would return 3). If there are no logs it returns -1
|
||||
"""
|
||||
|
||||
cur_log = 0
|
||||
while os.path.isfile(self._log_file_name(cur_log)):
|
||||
cur_log += 1
|
||||
return cur_log - 1
|
||||
|
||||
def _rotate_logs(self):
|
||||
|
||||
sb_logger = logging.getLogger('sickbeard')
|
||||
sub_logger = logging.getLogger('subliminal')
|
||||
imdb_logger = logging.getLogger('imdbpy')
|
||||
tornado_logger = logging.getLogger('tornado')
|
||||
feedcache_logger = logging.getLogger('feedcache')
|
||||
|
||||
# delete the old handler
|
||||
if self.cur_handler:
|
||||
self.close_log()
|
||||
|
||||
# rename or delete all the old log files
|
||||
for i in range(self._num_logs(), -1, -1):
|
||||
cur_file_name = self._log_file_name(i)
|
||||
try:
|
||||
if i >= NUM_LOGS:
|
||||
if sickbeard.TRASH_ROTATE_LOGS:
|
||||
new_name = '%s.%s' % (cur_file_name, int(time.time()))
|
||||
os.rename(cur_file_name, new_name)
|
||||
send2trash(new_name)
|
||||
else:
|
||||
os.remove(cur_file_name)
|
||||
else:
|
||||
os.rename(cur_file_name, self._log_file_name(i + 1))
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
# the new log handler will always be on the un-numbered .log file
|
||||
new_file_handler = self._config_handler()
|
||||
|
||||
self.cur_handler = new_file_handler
|
||||
|
||||
sb_logger.addHandler(new_file_handler)
|
||||
sub_logger.addHandler(new_file_handler)
|
||||
imdb_logger.addHandler(new_file_handler)
|
||||
tornado_logger.addHandler(new_file_handler)
|
||||
feedcache_logger.addHandler(new_file_handler)
|
||||
|
||||
def log(self, toLog, logLevel=MESSAGE):
|
||||
|
||||
with self.log_lock:
|
||||
|
||||
# check the size and see if we need to rotate
|
||||
if self.writes_since_check >= 10:
|
||||
if os.path.isfile(self.log_file_path) and os.path.getsize(self.log_file_path) >= LOG_SIZE:
|
||||
self._rotate_logs()
|
||||
self.writes_since_check = 0
|
||||
else:
|
||||
self.writes_since_check += 1
|
||||
|
||||
meThread = threading.currentThread().getName()
|
||||
message = meThread + u" :: " + toLog
|
||||
|
||||
out_line = message
|
||||
|
||||
sb_logger = logging.getLogger('sickbeard')
|
||||
sub_logger = logging.getLogger('subliminal')
|
||||
imdb_logger = logging.getLogger('imdbpy')
|
||||
tornado_logger = logging.getLogger('tornado')
|
||||
feedcache_logger = logging.getLogger('feedcache')
|
||||
|
||||
setattr(sb_logger, 'db', lambda *args: sb_logger.log(DB, *args))
|
||||
|
||||
# filtering
|
||||
sb_logger.addFilter(self.blacklistFilter)
|
||||
sub_logger.addFilter(self.blacklistFilter)
|
||||
imdb_logger.addFilter(self.blacklistFilter)
|
||||
tornado_logger.addFilter(self.blacklistFilter)
|
||||
feedcache_logger.addFilter(self.blacklistFilter)
|
||||
|
||||
try:
|
||||
if logLevel == DEBUG:
|
||||
sb_logger.debug(out_line)
|
||||
elif logLevel == MESSAGE:
|
||||
sb_logger.info(out_line)
|
||||
elif logLevel == WARNING:
|
||||
sb_logger.warning(out_line)
|
||||
elif logLevel == ERROR:
|
||||
sb_logger.error(out_line)
|
||||
|
||||
# add errors to the UI logger
|
||||
classes.ErrorViewer.add(classes.UIError(message))
|
||||
elif logLevel == DB:
|
||||
sb_logger.db(out_line)
|
||||
else:
|
||||
sb_logger.log(logLevel, out_line)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def log_error_and_exit(self, error_msg):
|
||||
log(error_msg, ERROR)
|
||||
|
||||
if not self.console_logging:
|
||||
sys.exit(error_msg.encode(sickbeard.SYS_ENCODING, 'xmlcharrefreplace'))
|
||||
else:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
class DispatchingFormatter:
|
||||
def __init__(self, formatters, default_formatter):
|
||||
self._formatters = formatters
|
||||
self._default_formatter = default_formatter
|
||||
|
||||
def __del__(self):
|
||||
pass
|
||||
|
||||
def format(self, record):
|
||||
formatter = self._formatters.get(record.name, self._default_formatter)
|
||||
return formatter.format(record)
|
||||
|
||||
|
||||
sb_log_instance = SBRotatingLogHandler('sickbeard.log', NUM_LOGS, LOG_SIZE)
|
||||
|
||||
def log(toLog, logLevel=MESSAGE):
|
||||
sb_log_instance.log(toLog, logLevel)
|
||||
|
||||
def log_error_and_exit(error_msg):
|
||||
sb_log_instance.log_error_and_exit(error_msg)
|
||||
|
||||
def close():
|
||||
sb_log_instance.close_log()
|
||||
if not self.consoleLogging:
|
||||
sys.exit(error_msg.encode(sickbeard.SYS_ENCODING, 'xmlcharrefreplace'))
|
||||
else:
|
||||
sys.exit(1)
|
@ -91,7 +91,7 @@ def buildNameCache(show=None):
|
||||
sickbeard.scene_exceptions.retrieve_exceptions()
|
||||
|
||||
if not show:
|
||||
logger.log(u"Building internal name cache for all shows", logger.MESSAGE)
|
||||
logger.log(u"Building internal name cache for all shows", logger.INFO)
|
||||
|
||||
cacheDB = db.DBConnection('cache.db')
|
||||
cache_results = cacheDB.select("SELECT * FROM scene_names")
|
||||
@ -114,7 +114,7 @@ def buildNameCache(show=None):
|
||||
|
||||
nameCache[name] = int(show.indexerid)
|
||||
else:
|
||||
logger.log(u"Building internal name cache for " + show.name, logger.MESSAGE)
|
||||
logger.log(u"Building internal name cache for " + show.name, logger.INFO)
|
||||
|
||||
for curSeason in [-1] + sickbeard.scene_exceptions.get_scene_seasons(show.indexerid):
|
||||
for name in list(set(sickbeard.scene_exceptions.get_scene_exceptions(show.indexerid, season=curSeason) + [
|
||||
|
@ -118,7 +118,7 @@ def _update_zoneinfo():
|
||||
new_hash = str(helpers.md5_for_file(zonefile_tmp))
|
||||
|
||||
if zoneinfo_md5.upper() == new_hash.upper():
|
||||
logger.log(u'Updating timezone info with new one: %s' % new_zoneinfo, logger.MESSAGE)
|
||||
logger.log(u'Updating timezone info with new one: %s' % new_zoneinfo, logger.INFO)
|
||||
try:
|
||||
# remove the old zoneinfo file
|
||||
if cur_zoneinfo is not None:
|
||||
|
@ -107,7 +107,7 @@ class BoxcarNotifier:
|
||||
logger.log("Wrong data sent to boxcar", logger.ERROR)
|
||||
return False
|
||||
|
||||
logger.log("Boxcar notification successful.", logger.MESSAGE)
|
||||
logger.log("Boxcar notification successful.", logger.INFO)
|
||||
return True
|
||||
|
||||
def notify_snatch(self, ep_name, title=notifyStrings[NOTIFY_SNATCH]):
|
||||
|
@ -126,7 +126,7 @@ class KODINotifier:
|
||||
|
||||
result = ''
|
||||
for curHost in [x.strip() for x in host.split(",")]:
|
||||
logger.log(u"Sending KODI notification to '" + curHost + "' - " + message, logger.MESSAGE)
|
||||
logger.log(u"Sending KODI notification to '" + curHost + "' - " + message, logger.INFO)
|
||||
|
||||
kodiapi = self._get_kodi_version(curHost, username, password)
|
||||
if kodiapi:
|
||||
@ -168,7 +168,7 @@ class KODINotifier:
|
||||
|
||||
"""
|
||||
|
||||
logger.log(u"Sending request to update library for KODI host: '" + host + "'", logger.MESSAGE)
|
||||
logger.log(u"Sending request to update library for KODI host: '" + host + "'", logger.INFO)
|
||||
|
||||
kodiapi = self._get_kodi_version(host, sickbeard.KODI_USERNAME, sickbeard.KODI_PASSWORD)
|
||||
if kodiapi:
|
||||
@ -329,7 +329,7 @@ class KODINotifier:
|
||||
time.sleep(5)
|
||||
# do a full update if requested
|
||||
else:
|
||||
logger.log(u"Doing Full Library KODI update on host: " + host, logger.MESSAGE)
|
||||
logger.log(u"Doing Full Library KODI update on host: " + host, logger.INFO)
|
||||
updateCommand = {'command': 'ExecBuiltIn', 'parameter': 'KODI.updatelibrary(video)'}
|
||||
request = self._send_to_kodi(updateCommand, host)
|
||||
|
||||
@ -424,7 +424,7 @@ class KODINotifier:
|
||||
logger.log(u'No KODI host passed, aborting update', logger.DEBUG)
|
||||
return False
|
||||
|
||||
logger.log(u"Updating XMBC library via JSON method for host: " + host, logger.MESSAGE)
|
||||
logger.log(u"Updating XMBC library via JSON method for host: " + host, logger.INFO)
|
||||
|
||||
# if we're doing per-show
|
||||
if showName:
|
||||
@ -487,7 +487,7 @@ class KODINotifier:
|
||||
|
||||
# do a full update if requested
|
||||
else:
|
||||
logger.log(u"Doing Full Library KODI update on host: " + host, logger.MESSAGE)
|
||||
logger.log(u"Doing Full Library KODI update on host: " + host, logger.INFO)
|
||||
updateCommand = '{"jsonrpc":"2.0","method":"VideoLibrary.Scan","id":1}'
|
||||
request = self._send_to_kodi_json(updateCommand, host, sickbeard.KODI_USERNAME, sickbeard.KODI_PASSWORD)
|
||||
|
||||
|
@ -58,7 +58,7 @@ class NMA_Notifier:
|
||||
logger.log(u'Could not send notification to NotifyMyAndroid', logger.ERROR)
|
||||
return False
|
||||
else:
|
||||
logger.log(u"NMA: Notification sent to NotifyMyAndroid", logger.MESSAGE)
|
||||
logger.log(u"NMA: Notification sent to NotifyMyAndroid", logger.INFO)
|
||||
return True
|
||||
|
||||
|
||||
|
@ -171,7 +171,7 @@ class NMJNotifier:
|
||||
logger.log(u"Popcorn Hour returned an errorcode: %s" % (result), logger.ERROR)
|
||||
return False
|
||||
else:
|
||||
logger.log(u"NMJ started background scan", logger.MESSAGE)
|
||||
logger.log(u"NMJ started background scan", logger.INFO)
|
||||
return True
|
||||
|
||||
def _notifyNMJ(self, host=None, database=None, mount=None, force=False):
|
||||
|
@ -154,7 +154,7 @@ class NMJv2Notifier:
|
||||
logger.log(u"Popcorn Hour returned an error: %s" % (error_messages[index]), logger.ERROR)
|
||||
return False
|
||||
else:
|
||||
logger.log(u"NMJv2 started background scan", logger.MESSAGE)
|
||||
logger.log(u"NMJv2 started background scan", logger.INFO)
|
||||
return True
|
||||
|
||||
def _notifyNMJ(self, host=None, force=False):
|
||||
|
@ -93,7 +93,7 @@ class PLEXNotifier(KODINotifier):
|
||||
return False
|
||||
|
||||
logger.log(u"Updating library for the Plex Media Server host: " + sickbeard.PLEX_SERVER_HOST,
|
||||
logger.MESSAGE)
|
||||
logger.INFO)
|
||||
|
||||
url = "http://%s/library/sections" % sickbeard.PLEX_SERVER_HOST
|
||||
try:
|
||||
@ -104,7 +104,7 @@ class PLEXNotifier(KODINotifier):
|
||||
|
||||
sections = xml_sections.getElementsByTagName('Directory')
|
||||
if not sections:
|
||||
logger.log(u"Plex Media Server not running on: " + sickbeard.PLEX_SERVER_HOST, logger.MESSAGE)
|
||||
logger.log(u"Plex Media Server not running on: " + sickbeard.PLEX_SERVER_HOST, logger.INFO)
|
||||
return False
|
||||
|
||||
for s in sections:
|
||||
|
@ -95,7 +95,7 @@ class ProwlNotifier:
|
||||
request_status = response.status
|
||||
|
||||
if request_status == 200:
|
||||
logger.log(u"Prowl notifications sent.", logger.MESSAGE)
|
||||
logger.log(u"Prowl notifications sent.", logger.INFO)
|
||||
return True
|
||||
elif request_status == 401:
|
||||
logger.log(u"Prowl auth failed: %s" % response.reason, logger.ERROR)
|
||||
|
@ -104,7 +104,7 @@ class PushoverNotifier:
|
||||
logger.log("Pushover API message limit reached - try a different API key", logger.ERROR)
|
||||
return False
|
||||
|
||||
logger.log("Pushover notification successful.", logger.MESSAGE)
|
||||
logger.log("Pushover notification successful.", logger.INFO)
|
||||
return True
|
||||
|
||||
def notify_snatch(self, ep_name, title=notifyStrings[NOTIFY_SNATCH]):
|
||||
|
@ -95,7 +95,7 @@ class PostProcessor(object):
|
||||
|
||||
self.version = None
|
||||
|
||||
def _log(self, message, level=logger.MESSAGE):
|
||||
def _log(self, message, level=logger.INFO):
|
||||
"""
|
||||
A wrapper for the internal logger which also keeps track of messages and saves them to a string for later.
|
||||
|
||||
|
@ -95,7 +95,7 @@ def delete_files(processPath, notwantedFiles):
|
||||
except OSError, e:
|
||||
returnStr += logHelper(u"Unable to delete file " + cur_file + ': ' + str(e.strerror), logger.DEBUG)
|
||||
|
||||
def logHelper(logMessage, logLevel=logger.MESSAGE):
|
||||
def logHelper(logMessage, logLevel=logger.INFO):
|
||||
logger.log(logMessage, logLevel)
|
||||
return logMessage + u"\n"
|
||||
|
||||
|
@ -159,14 +159,14 @@ class ProperFinder():
|
||||
parse_result.show.rls_ignore_words):
|
||||
logger.log(
|
||||
u"Ignoring " + curProper.name + " based on ignored words filter: " + parse_result.show.rls_ignore_words,
|
||||
logger.MESSAGE)
|
||||
logger.INFO)
|
||||
continue
|
||||
|
||||
if parse_result.show.rls_require_words and not search.filter_release_name(curProper.name,
|
||||
parse_result.show.rls_require_words):
|
||||
logger.log(
|
||||
u"Ignoring " + curProper.name + " based on required words filter: " + parse_result.show.rls_require_words,
|
||||
logger.MESSAGE)
|
||||
logger.INFO)
|
||||
continue
|
||||
|
||||
# check if we actually want this proper (if it's the right quality)
|
||||
|
@ -171,9 +171,9 @@ class GenericProvider:
|
||||
logger.log(u"Downloading a result from " + self.name + " at " + url)
|
||||
|
||||
if self.providerType == GenericProvider.TORRENT:
|
||||
logger.log(u"Saved magnet link to " + filename, logger.MESSAGE)
|
||||
logger.log(u"Saved magnet link to " + filename, logger.INFO)
|
||||
else:
|
||||
logger.log(u"Saved result to " + filename, logger.MESSAGE)
|
||||
logger.log(u"Saved result to " + filename, logger.INFO)
|
||||
|
||||
if self._verify_download(filename):
|
||||
return True
|
||||
|
@ -147,7 +147,7 @@ class TorrentRssProvider(generic.TorrentProvider):
|
||||
except IOError, e:
|
||||
logger.log("Unable to save the file: " + ex(e), logger.ERROR)
|
||||
return False
|
||||
logger.log(u"Saved custom_torrent html dump " + dumpName + " ", logger.MESSAGE)
|
||||
logger.log(u"Saved custom_torrent html dump " + dumpName + " ", logger.INFO)
|
||||
return True
|
||||
|
||||
def seedRatio(self):
|
||||
|
@ -256,7 +256,7 @@ def update_scene_exceptions(indexer_id, scene_exceptions, season=-1):
|
||||
myDB = db.DBConnection('cache.db')
|
||||
myDB.action('DELETE FROM scene_exceptions WHERE indexer_id=? and season=?', [indexer_id, season])
|
||||
|
||||
logger.log(u"Updating scene exceptions", logger.MESSAGE)
|
||||
logger.log(u"Updating scene exceptions", logger.INFO)
|
||||
|
||||
# A change has been made to the scene exception list. Let's clear the cache, to make this visible
|
||||
if indexer_id in exceptionsCache:
|
||||
|
@ -493,7 +493,7 @@ def xem_refresh(indexer_id, indexer, force=False):
|
||||
try:
|
||||
parsedJSON = sickbeard.helpers.getURL(url, json=True)
|
||||
if not parsedJSON or parsedJSON == '':
|
||||
logger.log(u'No XEN data for show "%s on %s"' % (indexer_id, sickbeard.indexerApi(indexer).name,), logger.MESSAGE)
|
||||
logger.log(u'No XEN data for show "%s on %s"' % (indexer_id, sickbeard.indexerApi(indexer).name,), logger.INFO)
|
||||
return
|
||||
|
||||
if 'success' in parsedJSON['result']:
|
||||
|
@ -214,7 +214,7 @@ def pickBestResult(results, show, quality_list=None):
|
||||
|
||||
if bwl:
|
||||
if not bwl.is_valid(cur_result):
|
||||
logger.log(cur_result.name+" does not match the blacklist or the whitelist, rejecting it. Result: " + bwl.get_last_result_msg(), logger.MESSAGE)
|
||||
logger.log(cur_result.name+" does not match the blacklist or the whitelist, rejecting it. Result: " + bwl.get_last_result_msg(), logger.INFO)
|
||||
continue
|
||||
|
||||
if quality_list and cur_result.quality not in quality_list:
|
||||
@ -223,12 +223,12 @@ def pickBestResult(results, show, quality_list=None):
|
||||
|
||||
if show.rls_ignore_words and filter_release_name(cur_result.name, show.rls_ignore_words):
|
||||
logger.log(u"Ignoring " + cur_result.name + " based on ignored words filter: " + show.rls_ignore_words,
|
||||
logger.MESSAGE)
|
||||
logger.INFO)
|
||||
continue
|
||||
|
||||
if show.rls_require_words and not filter_release_name(cur_result.name, show.rls_require_words):
|
||||
logger.log(u"Ignoring " + cur_result.name + " based on required words filter: " + show.rls_require_words,
|
||||
logger.MESSAGE)
|
||||
logger.INFO)
|
||||
continue
|
||||
|
||||
if sickbeard.USE_FAILED_DOWNLOADS and failed_history.hasFailed(cur_result.name, cur_result.size,
|
||||
|
@ -556,7 +556,7 @@ class QueueItemUpdate(ShowQueueItem):
|
||||
for curSeason in DBEpList:
|
||||
for curEpisode in DBEpList[curSeason]:
|
||||
logger.log(u"Permanently deleting episode " + str(curSeason) + "x" + str(
|
||||
curEpisode) + " from the database", logger.MESSAGE)
|
||||
curEpisode) + " from the database", logger.INFO)
|
||||
curEp = self.show.getEpisode(curSeason, curEpisode)
|
||||
try:
|
||||
curEp.deleteEpisode()
|
||||
|
@ -91,7 +91,7 @@ class SubtitlesFinder():
|
||||
logger.log(u'Not enough services selected. At least 1 service is required to search subtitles in the background', logger.ERROR)
|
||||
return
|
||||
|
||||
logger.log(u'Checking for subtitles', logger.MESSAGE)
|
||||
logger.log(u'Checking for subtitles', logger.INFO)
|
||||
|
||||
# get episodes on which we want subtitles
|
||||
# criteria is:
|
||||
@ -106,7 +106,7 @@ class SubtitlesFinder():
|
||||
myDB = db.DBConnection()
|
||||
sqlResults = myDB.select('SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.subtitles, e.subtitles_searchcount AS searchcount, e.subtitles_lastsearch AS lastsearch, e.location, (? - e.airdate) AS airdate_daydiff FROM tv_episodes AS e INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id) WHERE s.subtitles = 1 AND e.subtitles NOT LIKE (?) AND ((e.subtitles_searchcount <= 2 AND (? - e.airdate) > 7) OR (e.subtitles_searchcount <= 7 AND (? - e.airdate) <= 7)) AND (e.status IN ('+','.join([str(x) for x in Quality.DOWNLOADED])+') OR (e.status IN ('+','.join([str(x) for x in Quality.SNATCHED + Quality.SNATCHED_PROPER])+') AND e.location != ""))', [today, wantedLanguages(True), today, today])
|
||||
if len(sqlResults) == 0:
|
||||
logger.log('No subtitles to download', logger.MESSAGE)
|
||||
logger.log('No subtitles to download', logger.INFO)
|
||||
return
|
||||
|
||||
rules = self._getRules()
|
||||
|
@ -4655,7 +4655,7 @@ class ErrorLogs(WebRoot):
|
||||
return self.redirect("/errorlogs/")
|
||||
|
||||
|
||||
def viewlog(self, minLevel=logger.MESSAGE, maxLines=500):
|
||||
def viewlog(self, minLevel=logger.INFO, maxLines=500):
|
||||
|
||||
t = PageTemplate(rh=self, file="viewlogs.tmpl")
|
||||
t.submenu = self.ErrorLogsMenu()
|
||||
|
Loading…
Reference in New Issue
Block a user