2014-03-10 01:18:05 -04:00
|
|
|
# Author: Nic Wolfe <nic@wolfeden.ca>
|
|
|
|
# URL: http://code.google.com/p/sickbeard/
|
|
|
|
#
|
|
|
|
# This file is part of Sick Beard.
|
|
|
|
#
|
|
|
|
# Sick Beard is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# Sick Beard is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
from __future__ import with_statement
|
|
|
|
|
|
|
|
import glob
|
|
|
|
import os
|
|
|
|
import re
|
|
|
|
import shlex
|
|
|
|
import subprocess
|
|
|
|
import stat
|
|
|
|
import copy
|
|
|
|
|
|
|
|
import sickbeard
|
|
|
|
|
|
|
|
from sickbeard import db
|
|
|
|
from sickbeard import classes
|
|
|
|
from sickbeard import common
|
|
|
|
from sickbeard import exceptions
|
|
|
|
from sickbeard import helpers
|
|
|
|
from sickbeard import history
|
|
|
|
from sickbeard import logger
|
|
|
|
from sickbeard import notifiers
|
|
|
|
from sickbeard import show_name_helpers
|
|
|
|
from sickbeard import scene_exceptions
|
|
|
|
from sickbeard import failed_history
|
|
|
|
from sickbeard import scene_numbering
|
|
|
|
|
|
|
|
from sickbeard import encodingKludge as ek
|
|
|
|
from sickbeard.exceptions import ex
|
|
|
|
|
|
|
|
from sickbeard.name_parser.parser import NameParser, InvalidNameException
|
|
|
|
|
|
|
|
|
|
|
|
class PostProcessor(object):
|
|
|
|
"""
|
|
|
|
A class which will process a media file according to the post processing settings in the config.
|
|
|
|
"""
|
|
|
|
|
|
|
|
EXISTS_LARGER = 1
|
|
|
|
EXISTS_SAME = 2
|
|
|
|
EXISTS_SMALLER = 3
|
|
|
|
DOESNT_EXIST = 4
|
|
|
|
|
2014-03-25 01:57:24 -04:00
|
|
|
IGNORED_FILESTRINGS = ["/.AppleDouble/", ".DS_Store"]
|
2014-03-10 01:18:05 -04:00
|
|
|
|
|
|
|
NZB_NAME = 1
|
|
|
|
FOLDER_NAME = 2
|
|
|
|
FILE_NAME = 3
|
|
|
|
|
2014-03-25 21:42:36 -04:00
|
|
|
def __init__(self, file_path, nzb_name=None, process_method=None, is_priority=None):
|
2014-03-10 01:18:05 -04:00
|
|
|
"""
|
|
|
|
Creates a new post processor with the given file path and optionally an NZB name.
|
|
|
|
|
|
|
|
file_path: The path to the file to be processed
|
|
|
|
nzb_name: The name of the NZB which resulted in this file being downloaded (optional)
|
|
|
|
"""
|
|
|
|
# absolute path to the folder that is being processed
|
|
|
|
self.folder_path = ek.ek(os.path.dirname, ek.ek(os.path.abspath, file_path))
|
|
|
|
|
|
|
|
# full path to file
|
|
|
|
self.file_path = file_path
|
|
|
|
|
|
|
|
# file name only
|
|
|
|
self.file_name = ek.ek(os.path.basename, file_path)
|
|
|
|
|
|
|
|
# the name of the folder only
|
|
|
|
self.folder_name = ek.ek(os.path.basename, self.folder_path)
|
|
|
|
|
|
|
|
# name of the NZB that resulted in this folder
|
|
|
|
self.nzb_name = nzb_name
|
|
|
|
|
|
|
|
self.process_method = process_method if process_method else sickbeard.PROCESS_METHOD
|
|
|
|
|
|
|
|
self.in_history = False
|
|
|
|
self.release_group = None
|
|
|
|
self.is_proper = False
|
|
|
|
|
|
|
|
self.is_priority = is_priority
|
|
|
|
|
2014-03-25 21:42:36 -04:00
|
|
|
self.indexer = None
|
2014-03-10 07:20:29 -04:00
|
|
|
|
2014-03-10 01:18:05 -04:00
|
|
|
self.good_results = {self.NZB_NAME: False,
|
|
|
|
self.FOLDER_NAME: False,
|
|
|
|
self.FILE_NAME: False}
|
|
|
|
|
|
|
|
self.log = ''
|
|
|
|
|
2014-03-10 07:20:29 -04:00
|
|
|
|
2014-03-10 01:18:05 -04:00
|
|
|
def _log(self, message, level=logger.MESSAGE):
|
|
|
|
"""
|
|
|
|
A wrapper for the internal logger which also keeps track of messages and saves them to a string for later.
|
|
|
|
|
|
|
|
message: The string to log (unicode)
|
|
|
|
level: The log level to use (optional)
|
|
|
|
"""
|
|
|
|
logger.log(message, level)
|
|
|
|
self.log += message + '\n'
|
|
|
|
|
|
|
|
def _checkForExistingFile(self, existing_file):
|
|
|
|
"""
|
|
|
|
Checks if a file exists already and if it does whether it's bigger or smaller than
|
|
|
|
the file we are post processing
|
|
|
|
|
|
|
|
existing_file: The file to compare to
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
DOESNT_EXIST if the file doesn't exist
|
|
|
|
EXISTS_LARGER if the file exists and is larger than the file we are post processing
|
|
|
|
EXISTS_SMALLER if the file exists and is smaller than the file we are post processing
|
|
|
|
EXISTS_SAME if the file exists and is the same size as the file we are post processing
|
|
|
|
"""
|
|
|
|
|
|
|
|
if not existing_file:
|
|
|
|
self._log(u"There is no existing file so there's no worries about replacing it", logger.DEBUG)
|
|
|
|
return PostProcessor.DOESNT_EXIST
|
|
|
|
|
|
|
|
# if the new file exists, return the appropriate code depending on the size
|
|
|
|
if ek.ek(os.path.isfile, existing_file):
|
|
|
|
|
|
|
|
# see if it's bigger than our old file
|
|
|
|
if ek.ek(os.path.getsize, existing_file) > ek.ek(os.path.getsize, self.file_path):
|
|
|
|
self._log(u"File " + existing_file + " is larger than " + self.file_path, logger.DEBUG)
|
|
|
|
return PostProcessor.EXISTS_LARGER
|
|
|
|
|
|
|
|
elif ek.ek(os.path.getsize, existing_file) == ek.ek(os.path.getsize, self.file_path):
|
|
|
|
self._log(u"File " + existing_file + " is the same size as " + self.file_path, logger.DEBUG)
|
|
|
|
return PostProcessor.EXISTS_SAME
|
|
|
|
|
|
|
|
else:
|
|
|
|
self._log(u"File " + existing_file + " is smaller than " + self.file_path, logger.DEBUG)
|
|
|
|
return PostProcessor.EXISTS_SMALLER
|
|
|
|
|
|
|
|
else:
|
2014-03-25 01:57:24 -04:00
|
|
|
self._log(u"File " + existing_file + " doesn't exist so there's no worries about replacing it",
|
|
|
|
logger.DEBUG)
|
2014-03-10 01:18:05 -04:00
|
|
|
return PostProcessor.DOESNT_EXIST
|
|
|
|
|
|
|
|
def list_associated_files(self, file_path, base_name_only=False, subtitles_only=False):
|
|
|
|
"""
|
|
|
|
For a given file path searches for files with the same name but different extension and returns their absolute paths
|
|
|
|
|
|
|
|
file_path: The file to check for associated files
|
|
|
|
|
|
|
|
base_name_only: False add extra '.' (conservative search) to file_path minus extension
|
|
|
|
|
|
|
|
Returns: A list containing all files which are associated to the given file
|
|
|
|
"""
|
|
|
|
|
|
|
|
if not file_path:
|
|
|
|
return []
|
|
|
|
|
|
|
|
file_path_list = []
|
|
|
|
|
|
|
|
base_name = file_path.rpartition('.')[0]
|
|
|
|
|
|
|
|
if not base_name_only:
|
2014-03-20 14:03:22 -04:00
|
|
|
base_name = base_name + '.'
|
2014-03-10 01:18:05 -04:00
|
|
|
|
|
|
|
# don't strip it all and use cwd by accident
|
|
|
|
if not base_name:
|
|
|
|
return []
|
|
|
|
|
|
|
|
# don't confuse glob with chars we didn't mean to use
|
|
|
|
base_name = re.sub(r'[\[\]\*\?]', r'[\g<0>]', base_name)
|
|
|
|
|
|
|
|
for associated_file_path in ek.ek(glob.glob, base_name + '*'):
|
|
|
|
# only add associated to list
|
|
|
|
if associated_file_path == file_path:
|
|
|
|
continue
|
|
|
|
# only list it if the only non-shared part is the extension or if it is a subtitle
|
2014-03-25 01:57:24 -04:00
|
|
|
if subtitles_only and not associated_file_path[len(associated_file_path) - 3:] in common.subtitleExtensions:
|
2014-03-10 01:18:05 -04:00
|
|
|
continue
|
|
|
|
|
|
|
|
#Exclude .rar files from associated list
|
|
|
|
if re.search('(^.+\.(rar|r\d+)$)', associated_file_path):
|
|
|
|
continue
|
|
|
|
|
|
|
|
if ek.ek(os.path.isfile, associated_file_path):
|
|
|
|
file_path_list.append(associated_file_path)
|
|
|
|
|
|
|
|
return file_path_list
|
|
|
|
|
|
|
|
def _delete(self, file_path, associated_files=False):
|
|
|
|
"""
|
|
|
|
Deletes the file and optionally all associated files.
|
|
|
|
|
|
|
|
file_path: The file to delete
|
|
|
|
associated_files: True to delete all files which differ only by extension, False to leave them
|
|
|
|
"""
|
|
|
|
|
|
|
|
if not file_path:
|
|
|
|
return
|
|
|
|
|
|
|
|
# figure out which files we want to delete
|
|
|
|
file_list = [file_path]
|
|
|
|
if associated_files:
|
|
|
|
file_list = file_list + self.list_associated_files(file_path)
|
|
|
|
|
|
|
|
if not file_list:
|
|
|
|
self._log(u"There were no files associated with " + file_path + ", not deleting anything", logger.DEBUG)
|
|
|
|
return
|
|
|
|
|
|
|
|
# delete the file and any other files which we want to delete
|
|
|
|
for cur_file in file_list:
|
|
|
|
self._log(u"Deleting file " + cur_file, logger.DEBUG)
|
|
|
|
if ek.ek(os.path.isfile, cur_file):
|
|
|
|
#check first the read-only attribute
|
|
|
|
file_attribute = ek.ek(os.stat, cur_file)[0]
|
|
|
|
if (not file_attribute & stat.S_IWRITE):
|
|
|
|
# File is read-only, so make it writeable
|
|
|
|
self._log('Read only mode on file ' + cur_file + ' Will try to make it writeable', logger.DEBUG)
|
|
|
|
try:
|
2014-03-25 01:57:24 -04:00
|
|
|
ek.ek(os.chmod, cur_file, stat.S_IWRITE)
|
2014-03-10 01:18:05 -04:00
|
|
|
except:
|
|
|
|
self._log(u'Cannot change permissions of ' + cur_file, logger.WARNING)
|
|
|
|
|
|
|
|
ek.ek(os.remove, cur_file)
|
|
|
|
# do the library update for synoindex
|
|
|
|
notifiers.synoindex_notifier.deleteFile(cur_file)
|
|
|
|
|
2014-03-25 01:57:24 -04:00
|
|
|
def _combined_file_operation(self, file_path, new_path, new_base_name, associated_files=False, action=None,
|
|
|
|
subtitles=False):
|
2014-03-10 01:18:05 -04:00
|
|
|
"""
|
|
|
|
Performs a generic operation (move or copy) on a file. Can rename the file as well as change its location,
|
|
|
|
and optionally move associated files too.
|
|
|
|
|
|
|
|
file_path: The full path of the media file to act on
|
|
|
|
new_path: Destination path where we want to move/copy the file to
|
|
|
|
new_base_name: The base filename (no extension) to use during the copy. Use None to keep the same name.
|
|
|
|
associated_files: Boolean, whether we should copy similarly-named files too
|
|
|
|
action: function that takes an old path and new path and does an operation with them (move/copy)
|
|
|
|
"""
|
|
|
|
|
|
|
|
if not action:
|
|
|
|
self._log(u"Must provide an action for the combined file operation", logger.ERROR)
|
|
|
|
return
|
|
|
|
|
|
|
|
file_list = [file_path]
|
|
|
|
if associated_files:
|
|
|
|
file_list = file_list + self.list_associated_files(file_path)
|
|
|
|
elif subtitles:
|
|
|
|
file_list = file_list + self.list_associated_files(file_path, subtitles_only=True)
|
|
|
|
|
|
|
|
if not file_list:
|
|
|
|
self._log(u"There were no files associated with " + file_path + ", not moving anything", logger.DEBUG)
|
|
|
|
return
|
|
|
|
|
|
|
|
# create base name with file_path (media_file without .extension)
|
|
|
|
old_base_name = file_path.rpartition('.')[0]
|
|
|
|
old_base_name_length = len(old_base_name)
|
|
|
|
|
|
|
|
# deal with all files
|
|
|
|
for cur_file_path in file_list:
|
|
|
|
|
|
|
|
cur_file_name = ek.ek(os.path.basename, cur_file_path)
|
|
|
|
|
|
|
|
# get the extension without .
|
|
|
|
cur_extension = cur_file_path[old_base_name_length + 1:]
|
|
|
|
|
|
|
|
# check if file have subtitles language
|
|
|
|
if os.path.splitext(cur_extension)[1][1:] in common.subtitleExtensions:
|
|
|
|
cur_lang = os.path.splitext(cur_extension)[0]
|
|
|
|
if cur_lang in sickbeard.SUBTITLES_LANGUAGES:
|
|
|
|
cur_extension = cur_lang + os.path.splitext(cur_extension)[1]
|
|
|
|
|
|
|
|
# replace .nfo with .nfo-orig to avoid conflicts
|
|
|
|
if cur_extension == 'nfo':
|
|
|
|
cur_extension = 'nfo-orig'
|
|
|
|
|
|
|
|
# If new base name then convert name
|
|
|
|
if new_base_name:
|
|
|
|
new_file_name = new_base_name + '.' + cur_extension
|
|
|
|
# if we're not renaming we still want to change extensions sometimes
|
|
|
|
else:
|
|
|
|
new_file_name = helpers.replaceExtension(cur_file_name, cur_extension)
|
|
|
|
|
|
|
|
if sickbeard.SUBTITLES_DIR and cur_extension in common.subtitleExtensions:
|
|
|
|
subs_new_path = ek.ek(os.path.join, new_path, sickbeard.SUBTITLES_DIR)
|
|
|
|
dir_exists = helpers.makeDir(subs_new_path)
|
|
|
|
if not dir_exists:
|
|
|
|
logger.log(u"Unable to create subtitles folder " + subs_new_path, logger.ERROR)
|
|
|
|
else:
|
|
|
|
helpers.chmodAsParent(subs_new_path)
|
|
|
|
new_file_path = ek.ek(os.path.join, subs_new_path, new_file_name)
|
|
|
|
else:
|
|
|
|
new_file_path = ek.ek(os.path.join, new_path, new_file_name)
|
|
|
|
|
|
|
|
action(cur_file_path, new_file_path)
|
|
|
|
|
|
|
|
def _move(self, file_path, new_path, new_base_name, associated_files=False, subtitles=False):
|
|
|
|
"""
|
|
|
|
file_path: The full path of the media file to move
|
|
|
|
new_path: Destination path where we want to move the file to
|
|
|
|
new_base_name: The base filename (no extension) to use during the move. Use None to keep the same name.
|
|
|
|
associated_files: Boolean, whether we should move similarly-named files too
|
|
|
|
"""
|
|
|
|
|
|
|
|
def _int_move(cur_file_path, new_file_path):
|
|
|
|
|
|
|
|
self._log(u"Moving file from " + cur_file_path + " to " + new_file_path, logger.DEBUG)
|
|
|
|
try:
|
|
|
|
helpers.moveFile(cur_file_path, new_file_path)
|
|
|
|
helpers.chmodAsParent(new_file_path)
|
|
|
|
except (IOError, OSError), e:
|
|
|
|
self._log("Unable to move file " + cur_file_path + " to " + new_file_path + ": " + str(e), logger.ERROR)
|
|
|
|
raise e
|
|
|
|
|
2014-03-25 01:57:24 -04:00
|
|
|
self._combined_file_operation(file_path, new_path, new_base_name, associated_files, action=_int_move,
|
|
|
|
subtitles=subtitles)
|
2014-03-10 01:18:05 -04:00
|
|
|
|
|
|
|
def _copy(self, file_path, new_path, new_base_name, associated_files=False, subtitles=False):
|
|
|
|
"""
|
|
|
|
file_path: The full path of the media file to copy
|
|
|
|
new_path: Destination path where we want to copy the file to
|
|
|
|
new_base_name: The base filename (no extension) to use during the copy. Use None to keep the same name.
|
|
|
|
associated_files: Boolean, whether we should copy similarly-named files too
|
|
|
|
"""
|
|
|
|
|
2014-03-25 01:57:24 -04:00
|
|
|
def _int_copy(cur_file_path, new_file_path):
|
2014-03-10 01:18:05 -04:00
|
|
|
|
|
|
|
self._log(u"Copying file from " + cur_file_path + " to " + new_file_path, logger.DEBUG)
|
|
|
|
try:
|
|
|
|
helpers.copyFile(cur_file_path, new_file_path)
|
|
|
|
helpers.chmodAsParent(new_file_path)
|
|
|
|
except (IOError, OSError), e:
|
|
|
|
logger.log("Unable to copy file " + cur_file_path + " to " + new_file_path + ": " + ex(e), logger.ERROR)
|
|
|
|
raise e
|
|
|
|
|
2014-03-25 01:57:24 -04:00
|
|
|
self._combined_file_operation(file_path, new_path, new_base_name, associated_files, action=_int_copy,
|
|
|
|
subtitles=subtitles)
|
2014-03-10 01:18:05 -04:00
|
|
|
|
|
|
|
|
|
|
|
def _hardlink(self, file_path, new_path, new_base_name, associated_files=False, subtitles=False):
|
|
|
|
"""
|
|
|
|
file_path: The full path of the media file to move
|
|
|
|
new_path: Destination path where we want to create a hard linked file
|
|
|
|
new_base_name: The base filename (no extension) to use during the link. Use None to keep the same name.
|
|
|
|
associated_files: Boolean, whether we should move similarly-named files too
|
|
|
|
"""
|
|
|
|
|
|
|
|
def _int_hard_link(cur_file_path, new_file_path):
|
|
|
|
|
|
|
|
self._log(u"Hard linking file from " + cur_file_path + " to " + new_file_path, logger.DEBUG)
|
|
|
|
try:
|
|
|
|
helpers.hardlinkFile(cur_file_path, new_file_path)
|
|
|
|
helpers.chmodAsParent(new_file_path)
|
|
|
|
except (IOError, OSError), e:
|
|
|
|
self._log("Unable to link file " + cur_file_path + " to " + new_file_path + ": " + ex(e), logger.ERROR)
|
|
|
|
raise e
|
2014-03-25 01:57:24 -04:00
|
|
|
|
2014-03-10 01:18:05 -04:00
|
|
|
self._combined_file_operation(file_path, new_path, new_base_name, associated_files, action=_int_hard_link)
|
|
|
|
|
|
|
|
def _moveAndSymlink(self, file_path, new_path, new_base_name, associated_files=False, subtitles=False):
|
|
|
|
"""
|
|
|
|
file_path: The full path of the media file to move
|
|
|
|
new_path: Destination path where we want to move the file to create a symbolic link to
|
|
|
|
new_base_name: The base filename (no extension) to use during the link. Use None to keep the same name.
|
|
|
|
associated_files: Boolean, whether we should move similarly-named files too
|
|
|
|
"""
|
|
|
|
|
|
|
|
def _int_move_and_sym_link(cur_file_path, new_file_path):
|
|
|
|
|
|
|
|
self._log(u"Moving then symbolic linking file from " + cur_file_path + " to " + new_file_path, logger.DEBUG)
|
|
|
|
try:
|
|
|
|
helpers.moveAndSymlinkFile(cur_file_path, new_file_path)
|
|
|
|
helpers.chmodAsParent(new_file_path)
|
|
|
|
except (IOError, OSError), e:
|
|
|
|
self._log("Unable to link file " + cur_file_path + " to " + new_file_path + ": " + ex(e), logger.ERROR)
|
|
|
|
raise e
|
2014-03-25 01:57:24 -04:00
|
|
|
|
|
|
|
self._combined_file_operation(file_path, new_path, new_base_name, associated_files,
|
|
|
|
action=_int_move_and_sym_link)
|
2014-03-10 01:18:05 -04:00
|
|
|
|
|
|
|
def _history_lookup(self):
|
|
|
|
"""
|
|
|
|
Look up the NZB name in the history and see if it contains a record for self.nzb_name
|
|
|
|
|
|
|
|
Returns a (indexer_id, season, []) tuple. The first two may be None if none were found.
|
|
|
|
"""
|
|
|
|
|
|
|
|
to_return = (None, None, [])
|
|
|
|
|
|
|
|
# if we don't have either of these then there's nothing to use to search the history for anyway
|
|
|
|
if not self.nzb_name and not self.folder_name:
|
|
|
|
self.in_history = False
|
|
|
|
return to_return
|
|
|
|
|
|
|
|
# make a list of possible names to use in the search
|
|
|
|
names = []
|
|
|
|
if self.nzb_name:
|
|
|
|
names.append(self.nzb_name)
|
|
|
|
if '.' in self.nzb_name:
|
|
|
|
names.append(self.nzb_name.rpartition(".")[0])
|
|
|
|
if self.folder_name:
|
|
|
|
names.append(self.folder_name)
|
|
|
|
|
|
|
|
myDB = db.DBConnection()
|
|
|
|
|
|
|
|
# search the database for a possible match and return immediately if we find one
|
|
|
|
for curName in names:
|
|
|
|
sql_results = myDB.select("SELECT * FROM history WHERE resource LIKE ?", [re.sub("[\.\-\ ]", "_", curName)])
|
|
|
|
|
|
|
|
if len(sql_results) == 0:
|
|
|
|
continue
|
|
|
|
|
|
|
|
indexer_id = int(sql_results[0]["showid"])
|
|
|
|
season = int(sql_results[0]["season"])
|
|
|
|
|
|
|
|
self.in_history = True
|
|
|
|
to_return = (indexer_id, season, [])
|
|
|
|
self._log("Found result in history: " + str(to_return), logger.DEBUG)
|
|
|
|
|
|
|
|
if curName == self.nzb_name:
|
|
|
|
self.good_results[self.NZB_NAME] = True
|
|
|
|
elif curName == self.folder_name:
|
|
|
|
self.good_results[self.FOLDER_NAME] = True
|
|
|
|
elif curName == self.file_name:
|
|
|
|
self.good_results[self.FILE_NAME] = True
|
|
|
|
|
|
|
|
return to_return
|
|
|
|
|
|
|
|
self.in_history = False
|
|
|
|
return to_return
|
|
|
|
|
|
|
|
def _analyze_name(self, name, file=True):
|
|
|
|
"""
|
|
|
|
Takes a name and tries to figure out a show, season, and episode from it.
|
|
|
|
|
|
|
|
name: A string which we want to analyze to determine show info from (unicode)
|
|
|
|
|
|
|
|
Returns a (indexer_id, season, [episodes]) tuple. The first two may be None and episodes may be []
|
|
|
|
if none were found.
|
|
|
|
"""
|
|
|
|
|
|
|
|
logger.log(u"Analyzing name " + repr(name))
|
|
|
|
|
|
|
|
to_return = (None, None, [])
|
|
|
|
|
|
|
|
if not name:
|
|
|
|
return to_return
|
|
|
|
|
|
|
|
# parse the name to break it into show name, season, and episode
|
|
|
|
np = NameParser(file)
|
|
|
|
parse_result = np.parse(name)
|
|
|
|
|
|
|
|
self._log("Parsed " + name + " into " + str(parse_result).decode('utf-8'), logger.DEBUG)
|
|
|
|
|
|
|
|
if parse_result.air_by_date:
|
|
|
|
season = -1
|
|
|
|
episodes = [parse_result.air_date]
|
2014-04-28 05:15:29 -04:00
|
|
|
elif parse_result.sports:
|
|
|
|
season = -1
|
|
|
|
episodes = [parse_result.sports_date]
|
2014-03-10 01:18:05 -04:00
|
|
|
else:
|
|
|
|
season = parse_result.season_number
|
|
|
|
episodes = parse_result.episode_numbers
|
|
|
|
|
|
|
|
to_return = (None, season, episodes)
|
|
|
|
|
|
|
|
# do a scene reverse-lookup to get a list of all possible names
|
|
|
|
name_list = show_name_helpers.sceneToNormalShowNames(parse_result.series_name)
|
|
|
|
|
|
|
|
if not name_list:
|
|
|
|
return (None, season, episodes)
|
|
|
|
|
|
|
|
def _finalize(parse_result):
|
|
|
|
self.release_group = parse_result.release_group
|
|
|
|
|
|
|
|
# remember whether it's a proper
|
|
|
|
if parse_result.extra_info:
|
2014-03-25 01:57:24 -04:00
|
|
|
self.is_proper = re.search('(^|[\. _-])(proper|repack)([\. _-]|$)', parse_result.extra_info,
|
|
|
|
re.I) != None
|
2014-03-10 01:18:05 -04:00
|
|
|
|
|
|
|
# if the result is complete then remember that for later
|
2014-03-20 14:03:22 -04:00
|
|
|
if parse_result.series_name and parse_result.season_number != None and parse_result.episode_numbers and parse_result.release_group:
|
2014-03-10 01:18:05 -04:00
|
|
|
test_name = os.path.basename(name)
|
|
|
|
if test_name == self.nzb_name:
|
|
|
|
self.good_results[self.NZB_NAME] = True
|
|
|
|
elif test_name == self.folder_name:
|
|
|
|
self.good_results[self.FOLDER_NAME] = True
|
|
|
|
elif test_name == self.file_name:
|
|
|
|
self.good_results[self.FILE_NAME] = True
|
|
|
|
else:
|
2014-03-25 01:57:24 -04:00
|
|
|
logger.log(u"Nothing was good, found " + repr(test_name) + " and wanted either " + repr(
|
|
|
|
self.nzb_name) + ", " + repr(self.folder_name) + ", or " + repr(self.file_name))
|
2014-03-10 01:18:05 -04:00
|
|
|
else:
|
2014-03-25 01:57:24 -04:00
|
|
|
logger.log(u"Parse result not sufficient(all following have to be set). Will not save release name",
|
|
|
|
logger.DEBUG)
|
2014-03-10 01:18:05 -04:00
|
|
|
logger.log("Parse result(series_name): " + str(parse_result.series_name), logger.DEBUG)
|
|
|
|
logger.log("Parse result(season_number): " + str(parse_result.season_number), logger.DEBUG)
|
|
|
|
logger.log("Parse result(episode_numbers): " + str(parse_result.episode_numbers), logger.DEBUG)
|
|
|
|
logger.log("Parse result(release_group): " + str(parse_result.release_group), logger.DEBUG)
|
|
|
|
|
|
|
|
# for each possible interpretation of that scene name
|
|
|
|
for cur_name in name_list:
|
|
|
|
self._log(u"Checking scene exceptions for a match on " + cur_name, logger.DEBUG)
|
|
|
|
scene_id = scene_exceptions.get_scene_exception_by_name(cur_name)
|
|
|
|
if scene_id:
|
2014-04-27 06:58:49 -04:00
|
|
|
self._log(u"Scene exception lookup got a Indexer ID " + str(scene_id) + ", using that", logger.DEBUG)
|
2014-03-10 01:18:05 -04:00
|
|
|
_finalize(parse_result)
|
|
|
|
return (scene_id, season, episodes)
|
|
|
|
|
|
|
|
# see if we can find the name directly in the DB, if so use it
|
|
|
|
for cur_name in name_list:
|
2014-03-25 01:57:24 -04:00
|
|
|
self._log(u"Looking up " + cur_name + u" in the DB", logger.DEBUG)
|
2014-03-10 01:18:05 -04:00
|
|
|
db_result = helpers.searchDBForShow(cur_name)
|
|
|
|
if db_result:
|
2014-03-25 22:41:28 -04:00
|
|
|
self._log(u"Lookup successful, using " + sickbeard.indexerApi(db_result[0]).name + " id " + str(
|
|
|
|
db_result[1]),
|
|
|
|
logger.DEBUG)
|
2014-03-10 01:18:05 -04:00
|
|
|
_finalize(parse_result)
|
2014-03-10 03:01:46 -04:00
|
|
|
return (int(db_result[1]), season, episodes)
|
2014-03-10 01:18:05 -04:00
|
|
|
|
2014-03-25 21:42:36 -04:00
|
|
|
# see if we can find the name on the Indexer
|
2014-03-10 01:18:05 -04:00
|
|
|
for cur_name in name_list:
|
2014-03-25 22:41:28 -04:00
|
|
|
foundInfo = helpers.searchIndexerForShowID(cur_name, self.indexer)
|
2014-03-25 21:42:36 -04:00
|
|
|
if foundInfo:
|
|
|
|
indexer_id = foundInfo[1]
|
|
|
|
self._log(
|
|
|
|
u"Lookup successful, using " + sickbeard.indexerApi(self.indexer).name + " id " + str(indexer_id),
|
|
|
|
logger.DEBUG)
|
|
|
|
_finalize(parse_result)
|
|
|
|
return (indexer_id, season, episodes)
|
2014-03-10 01:18:05 -04:00
|
|
|
|
|
|
|
_finalize(parse_result)
|
|
|
|
return to_return
|
|
|
|
|
|
|
|
def _find_info(self):
|
|
|
|
"""
|
|
|
|
For a given file try to find the showid, season, and episode.
|
|
|
|
"""
|
|
|
|
|
|
|
|
indexer_id = season = None
|
|
|
|
episodes = []
|
|
|
|
|
2014-03-25 01:57:24 -04:00
|
|
|
# try to look up the nzb in history
|
2014-03-10 01:18:05 -04:00
|
|
|
attempt_list = [self._history_lookup,
|
|
|
|
|
|
|
|
# try to analyze the nzb name
|
|
|
|
lambda: self._analyze_name(self.nzb_name),
|
|
|
|
|
|
|
|
# try to analyze the file name
|
|
|
|
lambda: self._analyze_name(self.file_name),
|
|
|
|
|
|
|
|
# try to analyze the dir name
|
|
|
|
lambda: self._analyze_name(self.folder_name),
|
|
|
|
|
|
|
|
# try to analyze the file + dir names together
|
|
|
|
lambda: self._analyze_name(self.file_path),
|
|
|
|
|
|
|
|
# try to analyze the dir + file name together as one name
|
|
|
|
lambda: self._analyze_name(self.folder_name + u' ' + self.file_name)
|
|
|
|
|
2014-03-25 01:57:24 -04:00
|
|
|
]
|
2014-03-10 01:18:05 -04:00
|
|
|
|
|
|
|
# attempt every possible method to get our info
|
|
|
|
for cur_attempt in attempt_list:
|
|
|
|
|
|
|
|
try:
|
|
|
|
(cur_indexer_id, cur_season, cur_episodes) = cur_attempt()
|
|
|
|
except InvalidNameException, e:
|
|
|
|
logger.log(u"Unable to parse, skipping: " + ex(e), logger.DEBUG)
|
|
|
|
continue
|
|
|
|
|
|
|
|
# if we already did a successful history lookup then keep that indexer_id value
|
|
|
|
if cur_indexer_id and not (self.in_history and indexer_id):
|
|
|
|
indexer_id = cur_indexer_id
|
2014-03-20 14:03:22 -04:00
|
|
|
if cur_season != None:
|
2014-03-10 01:18:05 -04:00
|
|
|
season = cur_season
|
|
|
|
if cur_episodes:
|
|
|
|
episodes = cur_episodes
|
|
|
|
|
|
|
|
# for air-by-date shows we need to look up the season/episode from tvdb
|
|
|
|
if season == -1 and indexer_id and episodes:
|
2014-04-28 05:15:29 -04:00
|
|
|
self._log(u"Looks like this is an air-by-date or sports show, attempting to convert the date to season/episode",
|
2014-03-25 01:57:24 -04:00
|
|
|
logger.DEBUG)
|
2014-03-10 01:18:05 -04:00
|
|
|
|
|
|
|
# try to get language set for this show
|
|
|
|
indexer_lang = None
|
|
|
|
try:
|
|
|
|
showObj = helpers.findCertainShow(sickbeard.showList, indexer_id)
|
2014-03-25 01:57:24 -04:00
|
|
|
if (showObj != None):
|
2014-03-10 08:58:42 -04:00
|
|
|
# set the language of the show
|
2014-03-10 01:18:05 -04:00
|
|
|
indexer_lang = showObj.lang
|
|
|
|
except exceptions.MultipleShowObjectsException:
|
2014-03-25 01:57:24 -04:00
|
|
|
raise #TODO: later I'll just log this, for now I want to know about it ASAP
|
2014-03-10 01:18:05 -04:00
|
|
|
|
|
|
|
try:
|
2014-03-26 15:28:46 -04:00
|
|
|
lINDEXER_API_PARMS = sickbeard.indexerApi(self.indexer).api_params.copy()
|
2014-03-10 01:18:05 -04:00
|
|
|
if indexer_lang and not indexer_lang == 'en':
|
2014-03-12 01:28:30 -04:00
|
|
|
lINDEXER_API_PARMS = {'language': indexer_lang}
|
2014-03-10 01:18:05 -04:00
|
|
|
|
2014-03-26 15:28:46 -04:00
|
|
|
t = sickbeard.indexerApi(self.indexer).indexer(**lINDEXER_API_PARMS)
|
2014-03-12 01:28:30 -04:00
|
|
|
|
2014-03-10 01:18:05 -04:00
|
|
|
epObj = t[indexer_id].airedOn(episodes[0])[0]
|
2014-03-12 01:28:30 -04:00
|
|
|
|
2014-03-10 01:18:05 -04:00
|
|
|
season = int(epObj["seasonnumber"])
|
|
|
|
episodes = [int(epObj["episodenumber"])]
|
2014-03-12 01:28:30 -04:00
|
|
|
|
2014-03-10 01:18:05 -04:00
|
|
|
self._log(u"Got season " + str(season) + " episodes " + str(episodes), logger.DEBUG)
|
2014-03-25 22:05:30 -04:00
|
|
|
except (KeyError, sickbeard.indexer_episodenotfound), e:
|
2014-03-25 01:57:24 -04:00
|
|
|
self._log(u"Unable to find episode with date " + str(episodes[0]) + u" for show " + str(
|
|
|
|
indexer_id) + u", skipping", logger.DEBUG)
|
2014-03-10 01:18:05 -04:00
|
|
|
# we don't want to leave dates in the episode list if we couldn't convert them to real episode numbers
|
|
|
|
episodes = []
|
|
|
|
continue
|
2014-03-25 01:57:24 -04:00
|
|
|
except sickbeard.indexer_error, e:
|
|
|
|
logger.log(u"Unable to contact " + sickbeard.indexerApi(self.indexer).name + ": " + ex(e),
|
|
|
|
logger.WARNING)
|
2014-03-10 01:18:05 -04:00
|
|
|
episodes = []
|
|
|
|
continue
|
|
|
|
|
|
|
|
# if there's no season then we can hopefully just use 1 automatically
|
2014-03-20 14:03:22 -04:00
|
|
|
elif season == None and indexer_id:
|
2014-03-10 01:18:05 -04:00
|
|
|
myDB = db.DBConnection()
|
2014-03-25 01:57:24 -04:00
|
|
|
numseasonsSQlResult = myDB.select(
|
|
|
|
"SELECT COUNT(DISTINCT season) as numseasons FROM tv_episodes WHERE showid = ? and season != 0",
|
|
|
|
[indexer_id])
|
2014-03-20 14:03:22 -04:00
|
|
|
if int(numseasonsSQlResult[0][0]) == 1 and season == None:
|
2014-03-25 01:57:24 -04:00
|
|
|
self._log(
|
|
|
|
u"Don't have a season number, but this show appears to only have 1 season, setting seasonnumber to 1...",
|
|
|
|
logger.DEBUG)
|
2014-03-10 01:18:05 -04:00
|
|
|
season = 1
|
|
|
|
|
2014-03-20 14:03:22 -04:00
|
|
|
if indexer_id and season != None and episodes:
|
2014-03-10 07:20:29 -04:00
|
|
|
return (indexer_id, season, episodes)
|
2014-03-10 01:18:05 -04:00
|
|
|
|
2014-03-10 07:20:29 -04:00
|
|
|
return (indexer_id, season, episodes)
|
2014-03-10 01:18:05 -04:00
|
|
|
|
|
|
|
def _get_ep_obj(self, indexer_id, season, episodes):
|
|
|
|
"""
|
|
|
|
Retrieve the TVEpisode object requested.
|
|
|
|
|
|
|
|
indexer_id: The indexerid of the show (int)
|
|
|
|
season: The season of the episode (int)
|
|
|
|
episodes: A list of episodes to find (list of ints)
|
|
|
|
|
|
|
|
If the episode(s) can be found then a TVEpisode object with the correct related eps will
|
|
|
|
be instantiated and returned. If the episode can't be found then None will be returned.
|
|
|
|
"""
|
|
|
|
|
|
|
|
show_obj = None
|
|
|
|
|
|
|
|
self._log(u"Loading show object for indexer_id " + str(indexer_id), logger.DEBUG)
|
|
|
|
# find the show in the showlist
|
|
|
|
try:
|
|
|
|
show_obj = helpers.findCertainShow(sickbeard.showList, indexer_id)
|
|
|
|
except exceptions.MultipleShowObjectsException:
|
2014-03-25 01:57:24 -04:00
|
|
|
raise #TODO: later I'll just log this, for now I want to know about it ASAP
|
2014-03-10 01:18:05 -04:00
|
|
|
|
|
|
|
# if we can't find the show then there's nothing we can really do
|
|
|
|
if not show_obj:
|
2014-03-25 01:57:24 -04:00
|
|
|
self._log(u"This show isn't in your list, you need to add it to SB before post-processing an episode",
|
|
|
|
logger.ERROR)
|
2014-03-10 01:18:05 -04:00
|
|
|
raise exceptions.PostProcessingFailed()
|
|
|
|
|
|
|
|
root_ep = None
|
|
|
|
for cur_episode in episodes:
|
2014-04-28 18:24:37 -04:00
|
|
|
self._log(u"Retrieving episode object for " + str(season) + "x" + str(cur_episode), logger.DEBUG)
|
2014-03-10 01:18:05 -04:00
|
|
|
|
|
|
|
# now that we've figured out which episode this file is just load it manually
|
|
|
|
try:
|
2014-04-28 18:24:37 -04:00
|
|
|
# convert scene numbered release and load episode from database
|
|
|
|
curEp = show_obj.getEpisode(season, cur_episode, sceneConvert=True)
|
2014-03-10 01:18:05 -04:00
|
|
|
except exceptions.EpisodeNotFoundException, e:
|
|
|
|
self._log(u"Unable to create episode: " + ex(e), logger.DEBUG)
|
|
|
|
raise exceptions.PostProcessingFailed()
|
2014-03-25 01:57:24 -04:00
|
|
|
|
2014-03-10 01:18:05 -04:00
|
|
|
# associate all the episodes together under a single root episode
|
2014-03-20 14:03:22 -04:00
|
|
|
if root_ep == None:
|
2014-03-10 01:18:05 -04:00
|
|
|
root_ep = curEp
|
|
|
|
root_ep.relatedEps = []
|
|
|
|
elif curEp not in root_ep.relatedEps:
|
|
|
|
root_ep.relatedEps.append(curEp)
|
|
|
|
|
|
|
|
return root_ep
|
|
|
|
|
|
|
|
def _get_quality(self, ep_obj):
|
|
|
|
"""
|
|
|
|
Determines the quality of the file that is being post processed, first by checking if it is directly
|
|
|
|
available in the TVEpisode's status or otherwise by parsing through the data available.
|
|
|
|
|
|
|
|
ep_obj: The TVEpisode object related to the file we are post processing
|
|
|
|
|
|
|
|
Returns: A quality value found in common.Quality
|
|
|
|
"""
|
|
|
|
|
|
|
|
ep_quality = common.Quality.UNKNOWN
|
|
|
|
|
|
|
|
# if there is a quality available in the status then we don't need to bother guessing from the filename
|
2014-03-19 19:33:49 -04:00
|
|
|
if ep_obj.status in common.Quality.SNATCHED + common.Quality.SNATCHED_PROPER + common.Quality.SNATCHED_BEST:
|
2014-03-25 01:57:24 -04:00
|
|
|
oldStatus, ep_quality = common.Quality.splitCompositeStatus(ep_obj.status) #@UnusedVariable
|
2014-03-10 01:18:05 -04:00
|
|
|
if ep_quality != common.Quality.UNKNOWN:
|
2014-03-25 01:57:24 -04:00
|
|
|
self._log(
|
|
|
|
u"The old status had a quality in it, using that: " + common.Quality.qualityStrings[ep_quality],
|
|
|
|
logger.DEBUG)
|
2014-03-10 01:18:05 -04:00
|
|
|
return ep_quality
|
|
|
|
|
|
|
|
# nzb name is the most reliable if it exists, followed by folder name and lastly file name
|
|
|
|
name_list = [self.nzb_name, self.folder_name, self.file_name]
|
|
|
|
|
|
|
|
# search all possible names for our new quality, in case the file or dir doesn't have it
|
|
|
|
for cur_name in name_list:
|
|
|
|
|
|
|
|
# some stuff might be None at this point still
|
|
|
|
if not cur_name:
|
|
|
|
continue
|
|
|
|
|
|
|
|
ep_quality = common.Quality.nameQuality(cur_name)
|
2014-03-25 01:57:24 -04:00
|
|
|
self._log(
|
|
|
|
u"Looking up quality for name " + cur_name + u", got " + common.Quality.qualityStrings[ep_quality],
|
|
|
|
logger.DEBUG)
|
2014-03-10 01:18:05 -04:00
|
|
|
|
|
|
|
# if we find a good one then use it
|
|
|
|
if ep_quality != common.Quality.UNKNOWN:
|
2014-03-25 01:57:24 -04:00
|
|
|
logger.log(cur_name + u" looks like it has quality " + common.Quality.qualityStrings[
|
|
|
|
ep_quality] + ", using that", logger.DEBUG)
|
2014-03-10 01:18:05 -04:00
|
|
|
return ep_quality
|
|
|
|
|
|
|
|
# if we didn't get a quality from one of the names above, try assuming from each of the names
|
|
|
|
ep_quality = common.Quality.assumeQuality(self.file_name)
|
2014-03-25 01:57:24 -04:00
|
|
|
self._log(
|
|
|
|
u"Guessing quality for name " + self.file_name + u", got " + common.Quality.qualityStrings[ep_quality],
|
|
|
|
logger.DEBUG)
|
2014-03-10 01:18:05 -04:00
|
|
|
if ep_quality != common.Quality.UNKNOWN:
|
2014-03-25 01:57:24 -04:00
|
|
|
logger.log(self.file_name + u" looks like it has quality " + common.Quality.qualityStrings[
|
|
|
|
ep_quality] + ", using that", logger.DEBUG)
|
2014-03-10 01:18:05 -04:00
|
|
|
return ep_quality
|
|
|
|
|
2014-03-18 09:50:13 -04:00
|
|
|
test = str(ep_quality)
|
2014-03-10 01:18:05 -04:00
|
|
|
return ep_quality
|
|
|
|
|
|
|
|
def _run_extra_scripts(self, ep_obj):
|
|
|
|
"""
|
|
|
|
Executes any extra scripts defined in the config.
|
|
|
|
|
|
|
|
ep_obj: The object to use when calling the extra script
|
|
|
|
"""
|
|
|
|
for curScriptName in sickbeard.EXTRA_SCRIPTS:
|
|
|
|
|
|
|
|
# generate a safe command line string to execute the script and provide all the parameters
|
|
|
|
script_cmd = [piece for piece in re.split("( |\\\".*?\\\"|'.*?')", curScriptName) if piece.strip()]
|
|
|
|
script_cmd[0] = ek.ek(os.path.abspath, script_cmd[0])
|
|
|
|
self._log(u"Absolute path to script: " + script_cmd[0], logger.DEBUG)
|
|
|
|
|
2014-03-25 01:57:24 -04:00
|
|
|
script_cmd = script_cmd + [ep_obj.location, self.file_path, str(ep_obj.show.indexerid), str(ep_obj.season),
|
|
|
|
str(ep_obj.episode), str(ep_obj.airdate)]
|
2014-03-10 01:18:05 -04:00
|
|
|
|
|
|
|
# use subprocess to run the command and capture output
|
|
|
|
self._log(u"Executing command " + str(script_cmd))
|
|
|
|
try:
|
2014-03-25 01:57:24 -04:00
|
|
|
p = subprocess.Popen(script_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.STDOUT, cwd=sickbeard.PROG_DIR)
|
|
|
|
out, err = p.communicate() # @UnusedVariable
|
2014-03-10 01:18:05 -04:00
|
|
|
self._log(u"Script result: " + str(out), logger.DEBUG)
|
|
|
|
|
|
|
|
except OSError, e:
|
|
|
|
self._log(u"Unable to run extra_script: " + ex(e))
|
|
|
|
|
|
|
|
except Exception, e:
|
|
|
|
self._log(u"Unable to run extra_script: " + ex(e))
|
|
|
|
|
|
|
|
def _is_priority(self, ep_obj, new_ep_quality):
|
|
|
|
"""
|
|
|
|
Determines if the episode is a priority download or not (if it is expected). Episodes which are expected
|
|
|
|
(snatched) or larger than the existing episode are priority, others are not.
|
|
|
|
|
|
|
|
ep_obj: The TVEpisode object in question
|
|
|
|
new_ep_quality: The quality of the episode that is being processed
|
|
|
|
|
|
|
|
Returns: True if the episode is priority, False otherwise.
|
|
|
|
"""
|
|
|
|
|
|
|
|
if self.is_priority:
|
|
|
|
return True
|
|
|
|
|
|
|
|
# if SB downloaded this on purpose then this is a priority download
|
2014-03-19 19:33:49 -04:00
|
|
|
if self.in_history or ep_obj.status in common.Quality.SNATCHED + common.Quality.SNATCHED_PROPER + common.Quality.SNATCHED_BEST:
|
2014-03-10 01:18:05 -04:00
|
|
|
self._log(u"SB snatched this episode so I'm marking it as priority", logger.DEBUG)
|
|
|
|
return True
|
|
|
|
|
|
|
|
old_ep_status, old_ep_quality = common.Quality.splitCompositeStatus(ep_obj.status)
|
|
|
|
|
|
|
|
# if the user downloaded it manually and it's higher quality than the existing episode then it's priority
|
|
|
|
if new_ep_quality > old_ep_quality and new_ep_quality != common.Quality.UNKNOWN:
|
2014-03-25 01:57:24 -04:00
|
|
|
self._log(
|
|
|
|
u"This was manually downloaded but it appears to be better quality than what we have so I'm marking it as priority",
|
|
|
|
logger.DEBUG)
|
2014-03-10 01:18:05 -04:00
|
|
|
return True
|
|
|
|
|
|
|
|
# if the user downloaded it manually and it appears to be a PROPER/REPACK then it's priority
|
|
|
|
if self.is_proper and new_ep_quality >= old_ep_quality and new_ep_quality != common.Quality.UNKNOWN:
|
2014-03-25 01:57:24 -04:00
|
|
|
self._log(u"This was manually downloaded but it appears to be a proper so I'm marking it as priority",
|
|
|
|
logger.DEBUG)
|
2014-03-10 01:18:05 -04:00
|
|
|
return True
|
|
|
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
def process(self):
|
|
|
|
"""
|
|
|
|
Post-process a given file
|
|
|
|
"""
|
|
|
|
|
|
|
|
self._log(u"Processing " + self.file_path + " (" + str(self.nzb_name) + ")")
|
|
|
|
|
|
|
|
if ek.ek(os.path.isdir, self.file_path):
|
|
|
|
self._log(u"File " + self.file_path + " seems to be a directory")
|
|
|
|
return False
|
|
|
|
for ignore_file in self.IGNORED_FILESTRINGS:
|
|
|
|
if ignore_file in self.file_path:
|
|
|
|
self._log(u"File " + self.file_path + " is ignored type, skipping")
|
|
|
|
return False
|
|
|
|
# reset per-file stuff
|
|
|
|
self.in_history = False
|
|
|
|
|
2014-03-12 01:28:30 -04:00
|
|
|
# try to find the file info
|
2014-03-10 16:31:41 -04:00
|
|
|
indexer_id = season = episodes = None
|
2014-03-26 22:01:53 -04:00
|
|
|
for indexer in sickbeard.indexerApi().indexers:
|
2014-03-25 21:42:36 -04:00
|
|
|
self.indexer = int(indexer)
|
2014-03-10 08:18:04 -04:00
|
|
|
|
2014-03-25 22:53:55 -04:00
|
|
|
self._log(u"Searching " + sickbeard.indexerApi(self.indexer).name + ", trying to auto-detect Indexer for "
|
|
|
|
"show")
|
|
|
|
|
2014-03-25 21:42:36 -04:00
|
|
|
# try to find the file info
|
2014-03-10 08:18:04 -04:00
|
|
|
(indexer_id, season, episodes) = self._find_info()
|
2014-03-25 21:42:36 -04:00
|
|
|
if indexer_id and season != None and episodes:
|
|
|
|
break
|
2014-03-10 08:18:04 -04:00
|
|
|
|
2014-03-25 22:53:55 -04:00
|
|
|
self._log(u"Can't find thhe show on " + sickbeard.indexerApi(self.indexer).name + ", trying next "
|
|
|
|
"indexer", logger.WARNING)
|
|
|
|
|
2014-03-20 14:03:22 -04:00
|
|
|
if not indexer_id or season == None or not episodes:
|
2014-03-25 21:42:36 -04:00
|
|
|
self._log(u"Can't find thhe show on any of the Indexers, skipping",
|
|
|
|
logger.WARNING)
|
2014-03-10 16:31:41 -04:00
|
|
|
return False
|
2014-03-10 01:18:05 -04:00
|
|
|
|
|
|
|
# retrieve/create the corresponding TVEpisode objects
|
|
|
|
ep_obj = self._get_ep_obj(indexer_id, season, episodes)
|
|
|
|
|
|
|
|
# get the quality of the episode we're processing
|
|
|
|
new_ep_quality = self._get_quality(ep_obj)
|
2014-03-19 19:33:49 -04:00
|
|
|
|
2014-03-10 01:18:05 -04:00
|
|
|
logger.log(u"Quality of the episode we're processing: " + str(new_ep_quality), logger.DEBUG)
|
|
|
|
|
2014-03-19 19:33:49 -04:00
|
|
|
# see if this is a priority download (is it snatched, in history, PROPER, or BEST)
|
2014-03-10 01:18:05 -04:00
|
|
|
priority_download = self._is_priority(ep_obj, new_ep_quality)
|
|
|
|
self._log(u"Is ep a priority download: " + str(priority_download), logger.DEBUG)
|
|
|
|
|
|
|
|
# set the status of the episodes
|
|
|
|
for curEp in [ep_obj] + ep_obj.relatedEps:
|
|
|
|
curEp.status = common.Quality.compositeStatus(common.SNATCHED, new_ep_quality)
|
|
|
|
|
|
|
|
# check for an existing file
|
|
|
|
existing_file_status = self._checkForExistingFile(ep_obj.location)
|
|
|
|
|
|
|
|
# if it's not priority then we don't want to replace smaller files in case it was a mistake
|
|
|
|
if not priority_download:
|
|
|
|
|
|
|
|
# if there's an existing file that we don't want to replace stop here
|
|
|
|
if existing_file_status in (PostProcessor.EXISTS_LARGER, PostProcessor.EXISTS_SAME):
|
2014-03-25 01:57:24 -04:00
|
|
|
self._log(
|
|
|
|
u"File exists and we are not going to replace it because it's not smaller, quitting post-processing",
|
|
|
|
logger.ERROR)
|
2014-03-10 01:18:05 -04:00
|
|
|
return False
|
|
|
|
elif existing_file_status == PostProcessor.EXISTS_SMALLER:
|
|
|
|
self._log(u"File exists and is smaller than the new file so I'm going to replace it", logger.DEBUG)
|
|
|
|
elif existing_file_status != PostProcessor.DOESNT_EXIST:
|
2014-03-25 01:57:24 -04:00
|
|
|
self._log(u"Unknown existing file status. This should never happen, please log this as a bug.",
|
|
|
|
logger.ERROR)
|
2014-03-10 01:18:05 -04:00
|
|
|
return False
|
|
|
|
|
|
|
|
# if the file is priority then we're going to replace it even if it exists
|
|
|
|
else:
|
2014-03-25 01:57:24 -04:00
|
|
|
self._log(
|
|
|
|
u"This download is marked a priority download so I'm going to replace an existing file if I find one",
|
|
|
|
logger.DEBUG)
|
2014-03-10 01:18:05 -04:00
|
|
|
|
|
|
|
# delete the existing file (and company)
|
|
|
|
for cur_ep in [ep_obj] + ep_obj.relatedEps:
|
|
|
|
try:
|
|
|
|
self._delete(cur_ep.location, associated_files=True)
|
|
|
|
# clean up any left over folders
|
|
|
|
if cur_ep.location:
|
2014-03-25 01:57:24 -04:00
|
|
|
helpers.delete_empty_folders(ek.ek(os.path.dirname, cur_ep.location),
|
|
|
|
keep_dir=ep_obj.show._location)
|
2014-03-10 01:18:05 -04:00
|
|
|
except (OSError, IOError):
|
|
|
|
raise exceptions.PostProcessingFailed("Unable to delete the existing files")
|
|
|
|
|
|
|
|
# if the show directory doesn't exist then make it if allowed
|
|
|
|
if not ek.ek(os.path.isdir, ep_obj.show._location) and sickbeard.CREATE_MISSING_SHOW_DIRS:
|
|
|
|
self._log(u"Show directory doesn't exist, creating it", logger.DEBUG)
|
|
|
|
try:
|
|
|
|
ek.ek(os.mkdir, ep_obj.show._location)
|
|
|
|
# do the library update for synoindex
|
|
|
|
notifiers.synoindex_notifier.addFolder(ep_obj.show._location)
|
|
|
|
except (OSError, IOError):
|
|
|
|
raise exceptions.PostProcessingFailed("Unable to create the show directory: " + ep_obj.show._location)
|
|
|
|
|
|
|
|
# get metadata for the show (but not episode because it hasn't been fully processed)
|
|
|
|
ep_obj.show.writeMetadata(True)
|
|
|
|
|
|
|
|
# update the ep info before we rename so the quality & release name go into the name properly
|
|
|
|
for cur_ep in [ep_obj] + ep_obj.relatedEps:
|
|
|
|
with cur_ep.lock:
|
|
|
|
cur_release_name = None
|
|
|
|
|
|
|
|
# use the best possible representation of the release name
|
|
|
|
if self.good_results[self.NZB_NAME]:
|
|
|
|
cur_release_name = self.nzb_name
|
|
|
|
if cur_release_name.lower().endswith('.nzb'):
|
|
|
|
cur_release_name = cur_release_name.rpartition('.')[0]
|
|
|
|
elif self.good_results[self.FOLDER_NAME]:
|
|
|
|
cur_release_name = self.folder_name
|
|
|
|
elif self.good_results[self.FILE_NAME]:
|
|
|
|
cur_release_name = self.file_name
|
|
|
|
# take the extension off the filename, it's not needed
|
|
|
|
if '.' in self.file_name:
|
|
|
|
cur_release_name = self.file_name.rpartition('.')[0]
|
|
|
|
|
|
|
|
if cur_release_name:
|
|
|
|
self._log("Found release name " + cur_release_name, logger.DEBUG)
|
|
|
|
cur_ep.release_name = cur_release_name
|
|
|
|
else:
|
|
|
|
logger.log("good results: " + repr(self.good_results), logger.DEBUG)
|
|
|
|
|
2014-03-19 19:33:49 -04:00
|
|
|
if ep_obj.status in common.Quality.SNATCHED_BEST:
|
|
|
|
cur_ep.status = common.Quality.compositeStatus(common.ARCHIVED, new_ep_quality)
|
|
|
|
else:
|
|
|
|
cur_ep.status = common.Quality.compositeStatus(common.DOWNLOADED, new_ep_quality)
|
2014-03-10 01:18:05 -04:00
|
|
|
|
|
|
|
cur_ep.subtitles = []
|
|
|
|
|
|
|
|
cur_ep.subtitles_searchcount = 0
|
|
|
|
|
|
|
|
cur_ep.subtitles_lastsearch = '0001-01-01 00:00:00'
|
|
|
|
|
|
|
|
cur_ep.is_proper = self.is_proper
|
|
|
|
|
|
|
|
cur_ep.saveToDB()
|
|
|
|
|
|
|
|
# Just want to keep this consistent for failed handling right now
|
|
|
|
releaseName = show_name_helpers.determineReleaseName(self.folder_path, self.nzb_name)
|
|
|
|
if releaseName is not None:
|
|
|
|
failed_history.logSuccess(releaseName)
|
|
|
|
else:
|
|
|
|
self._log(u"Couldn't find release in snatch history", logger.WARNING)
|
|
|
|
|
|
|
|
# find the destination folder
|
|
|
|
try:
|
|
|
|
proper_path = ep_obj.proper_path()
|
|
|
|
proper_absolute_path = ek.ek(os.path.join, ep_obj.show.location, proper_path)
|
|
|
|
|
|
|
|
dest_path = ek.ek(os.path.dirname, proper_absolute_path)
|
|
|
|
except exceptions.ShowDirNotFoundException:
|
2014-03-25 01:57:24 -04:00
|
|
|
raise exceptions.PostProcessingFailed(
|
|
|
|
u"Unable to post-process an episode if the show dir doesn't exist, quitting")
|
2014-03-10 01:18:05 -04:00
|
|
|
|
|
|
|
self._log(u"Destination folder for this episode: " + dest_path, logger.DEBUG)
|
|
|
|
|
|
|
|
# create any folders we need
|
|
|
|
helpers.make_dirs(dest_path)
|
|
|
|
|
|
|
|
# figure out the base name of the resulting episode file
|
|
|
|
if sickbeard.RENAME_EPISODES:
|
|
|
|
orig_extension = self.file_name.rpartition('.')[-1]
|
|
|
|
new_base_name = ek.ek(os.path.basename, proper_path)
|
|
|
|
new_file_name = new_base_name + '.' + orig_extension
|
|
|
|
|
|
|
|
else:
|
|
|
|
# if we're not renaming then there's no new base name, we'll just use the existing name
|
|
|
|
new_base_name = None
|
|
|
|
new_file_name = self.file_name
|
|
|
|
|
|
|
|
try:
|
|
|
|
# move the episode and associated files to the show dir
|
2014-04-02 08:09:53 -04:00
|
|
|
if self.process_method == "copy":
|
2014-03-25 01:57:24 -04:00
|
|
|
self._copy(self.file_path, dest_path, new_base_name, sickbeard.MOVE_ASSOCIATED_FILES,
|
|
|
|
sickbeard.USE_SUBTITLES and ep_obj.show.subtitles)
|
2014-04-02 08:09:53 -04:00
|
|
|
elif self.process_method == "move":
|
2014-03-25 01:57:24 -04:00
|
|
|
self._move(self.file_path, dest_path, new_base_name, sickbeard.MOVE_ASSOCIATED_FILES,
|
|
|
|
sickbeard.USE_SUBTITLES and ep_obj.show.subtitles)
|
2014-04-02 08:09:53 -04:00
|
|
|
elif self.process_method == "hardlink":
|
2014-03-25 01:57:24 -04:00
|
|
|
self._hardlink(self.file_path, dest_path, new_base_name, sickbeard.MOVE_ASSOCIATED_FILES,
|
|
|
|
sickbeard.USE_SUBTITLES and ep_obj.show.subtitles)
|
2014-04-02 08:09:53 -04:00
|
|
|
elif self.process_method == "symlink":
|
2014-03-25 01:57:24 -04:00
|
|
|
self._moveAndSymlink(self.file_path, dest_path, new_base_name, sickbeard.MOVE_ASSOCIATED_FILES,
|
|
|
|
sickbeard.USE_SUBTITLES and ep_obj.show.subtitles)
|
2014-03-10 01:18:05 -04:00
|
|
|
else:
|
2014-04-02 02:29:46 -04:00
|
|
|
logger.log(u"Unknown process method: " + str(self.process_method), logger.ERROR)
|
2014-03-25 01:57:24 -04:00
|
|
|
raise exceptions.PostProcessingFailed("Unable to move the files to their new home")
|
2014-03-10 01:18:05 -04:00
|
|
|
except (OSError, IOError):
|
|
|
|
raise exceptions.PostProcessingFailed("Unable to move the files to their new home")
|
|
|
|
|
|
|
|
# download subtitles
|
|
|
|
if sickbeard.USE_SUBTITLES and ep_obj.show.subtitles:
|
|
|
|
for curEp in [ep_obj]:
|
|
|
|
with cur_ep.lock:
|
|
|
|
cur_ep.location = ek.ek(os.path.join, dest_path, new_file_name)
|
|
|
|
cur_ep.downloadSubtitles(force=True)
|
|
|
|
|
|
|
|
# put the new location in the database
|
|
|
|
for cur_ep in [ep_obj] + ep_obj.relatedEps:
|
|
|
|
with cur_ep.lock:
|
|
|
|
cur_ep.location = ek.ek(os.path.join, dest_path, new_file_name)
|
|
|
|
cur_ep.saveToDB()
|
|
|
|
|
|
|
|
# log it to history
|
|
|
|
history.logDownload(ep_obj, self.file_path, new_ep_quality, self.release_group)
|
|
|
|
|
|
|
|
# send notifications
|
|
|
|
notifiers.notify_download(ep_obj._format_pattern('%SN - %Sx%0E - %EN - %QN'))
|
|
|
|
|
|
|
|
# generate nfo/tbn
|
|
|
|
ep_obj.createMetaFiles()
|
|
|
|
ep_obj.saveToDB()
|
|
|
|
|
|
|
|
# do the library update for XBMC
|
|
|
|
notifiers.xbmc_notifier.update_library(ep_obj.show.name)
|
|
|
|
|
|
|
|
# do the library update for Plex
|
|
|
|
notifiers.plex_notifier.update_library()
|
|
|
|
|
|
|
|
# do the library update for NMJ
|
|
|
|
# nmj_notifier kicks off its library update when the notify_download is issued (inside notifiers)
|
|
|
|
|
|
|
|
# do the library update for Synology Indexer
|
|
|
|
notifiers.synoindex_notifier.addFile(ep_obj.location)
|
|
|
|
|
|
|
|
# do the library update for pyTivo
|
|
|
|
notifiers.pytivo_notifier.update_library(ep_obj)
|
|
|
|
|
|
|
|
# do the library update for Trakt
|
|
|
|
notifiers.trakt_notifier.update_library(ep_obj)
|
|
|
|
|
|
|
|
self._run_extra_scripts(ep_obj)
|
|
|
|
|
|
|
|
return True
|