1
0
mirror of https://github.com/moparisthebest/SickRage synced 2024-12-13 11:32:20 -05:00

Merge branch 'origin/dev'

This commit is contained in:
echel0n 2014-05-06 17:06:21 -07:00
commit 3e34365c1a
30 changed files with 1649 additions and 305 deletions

View File

@ -341,13 +341,12 @@ def main():
# Stay alive while my threads do the work
while (True):
time.sleep(1)
if sickbeard.invoked_command:
sickbeard.invoked_command()
sickbeard.invoked_command = None
time.sleep(1)
return
if __name__ == "__main__":

View File

@ -324,6 +324,16 @@
</label>
</div>
<div class="field-pair">
<label class="nocheck clearfix">
<span class="component-title">Multi-Threading</span>
<input type="text" name="num_of_threads" id="num_of_threads" value="$sickbeard.NUM_OF_THREADS" size="5" />
</label>
<label class="nocheck clearfix">
<span class="component-title">&nbsp;</span>
<span class="component-desc">Number of threads to assign, the more threads the faster SickRage goes if your hardware can handle it! (eg. 1)</span>
</label>
</div>
<input type="submit" class="btn config_submitter" value="Save Changes" />
</fieldset>
</div><!-- /component-group4 //-->

View File

@ -96,6 +96,13 @@
<span class="component-desc">Prefer to download seperate episodes, not complete seasons?</span>
</label>
</div>
<div class="field-pair">
<input type="checkbox" name="backlog_startup" id="backlog_startup" #if $sickbeard.BACKLOG_STARTUP == True then "checked=\"checked\"" else ""# />
<label class="clearfix" for="backlog_startup">
<span class="component title">Process backlog on startup</span>
<span class="component-desc">Start processing backlogged episodes upon startup of Sick Beard?</span>
</label>
</div>
<div class="clearfix"></div>
<input type="submit" class="btn config_submitter" value="Save Changes" /><br/>
@ -215,6 +222,18 @@
</div>
<div id="nzbget_settings">
<div class="field-pair">
<input id="nzbget_use_https" type="checkbox" class="enabler" name="nzbget_use_https" #if $sickbeard.NZBGET_USE_HTTPS == True then "checked=\"checked\"" else ""# />
<label class="clearfix" for="use_failed_downloads">
<span class="component-title">Use HTTPS (SSL)</span>
<span class="component-desc">Use HTTPS to connect to NZBGet?</span>
</label>
<label class="nocheck clearfix" for="nzbget_use_https">
<span class="component-title">&nbsp;</span>
<span class="component-desc"><b>NOTE:</b> You have to enable Secure control in NZBGet and set the correct Secure Port here.</span>
</label>
</div>
<div class="field-pair">
<label class="nocheck clearfix">
<span class="component-title">NZBget HOST:PORT</span>

View File

@ -0,0 +1,3 @@
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)

View File

@ -0,0 +1,23 @@
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Execute computations asynchronously using threads or processes."""
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
from concurrent.futures._base import (FIRST_COMPLETED,
FIRST_EXCEPTION,
ALL_COMPLETED,
CancelledError,
TimeoutError,
Future,
Executor,
wait,
as_completed)
from concurrent.futures.thread import ThreadPoolExecutor
# Jython doesn't have multiprocessing
try:
from concurrent.futures.process import ProcessPoolExecutor
except ImportError:
pass

View File

@ -0,0 +1,577 @@
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
from __future__ import with_statement
import logging
import threading
import time
try:
from collections import namedtuple
except ImportError:
from concurrent.futures._compat import namedtuple
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
FIRST_COMPLETED = 'FIRST_COMPLETED'
FIRST_EXCEPTION = 'FIRST_EXCEPTION'
ALL_COMPLETED = 'ALL_COMPLETED'
_AS_COMPLETED = '_AS_COMPLETED'
# Possible future states (for internal use by the futures package).
PENDING = 'PENDING'
RUNNING = 'RUNNING'
# The future was cancelled by the user...
CANCELLED = 'CANCELLED'
# ...and _Waiter.add_cancelled() was called by a worker.
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
FINISHED = 'FINISHED'
_FUTURE_STATES = [
PENDING,
RUNNING,
CANCELLED,
CANCELLED_AND_NOTIFIED,
FINISHED
]
_STATE_TO_DESCRIPTION_MAP = {
PENDING: "pending",
RUNNING: "running",
CANCELLED: "cancelled",
CANCELLED_AND_NOTIFIED: "cancelled",
FINISHED: "finished"
}
# Logger for internal use by the futures package.
LOGGER = logging.getLogger("concurrent.futures")
class Error(Exception):
"""Base class for all future-related exceptions."""
pass
class CancelledError(Error):
"""The Future was cancelled."""
pass
class TimeoutError(Error):
"""The operation exceeded the given deadline."""
pass
class _Waiter(object):
"""Provides the event that wait() and as_completed() block on."""
def __init__(self):
self.event = threading.Event()
self.finished_futures = []
def add_result(self, future):
self.finished_futures.append(future)
def add_exception(self, future):
self.finished_futures.append(future)
def add_cancelled(self, future):
self.finished_futures.append(future)
class _AsCompletedWaiter(_Waiter):
"""Used by as_completed()."""
def __init__(self):
super(_AsCompletedWaiter, self).__init__()
self.lock = threading.Lock()
def add_result(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _FirstCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_COMPLETED)."""
def add_result(self, future):
super(_FirstCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
super(_FirstCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
super(_FirstCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _AllCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED)."""
def __init__(self, num_pending_calls, stop_on_exception):
self.num_pending_calls = num_pending_calls
self.stop_on_exception = stop_on_exception
self.lock = threading.Lock()
super(_AllCompletedWaiter, self).__init__()
def _decrement_pending_calls(self):
with self.lock:
self.num_pending_calls -= 1
if not self.num_pending_calls:
self.event.set()
def add_result(self, future):
super(_AllCompletedWaiter, self).add_result(future)
self._decrement_pending_calls()
def add_exception(self, future):
super(_AllCompletedWaiter, self).add_exception(future)
if self.stop_on_exception:
self.event.set()
else:
self._decrement_pending_calls()
def add_cancelled(self, future):
super(_AllCompletedWaiter, self).add_cancelled(future)
self._decrement_pending_calls()
class _AcquireFutures(object):
"""A context manager that does an ordered acquire of Future conditions."""
def __init__(self, futures):
self.futures = sorted(futures, key=id)
def __enter__(self):
for future in self.futures:
future._condition.acquire()
def __exit__(self, *args):
for future in self.futures:
future._condition.release()
def _create_and_install_waiters(fs, return_when):
if return_when == _AS_COMPLETED:
waiter = _AsCompletedWaiter()
elif return_when == FIRST_COMPLETED:
waiter = _FirstCompletedWaiter()
else:
pending_count = sum(
f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs)
if return_when == FIRST_EXCEPTION:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True)
elif return_when == ALL_COMPLETED:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False)
else:
raise ValueError("Invalid return condition: %r" % return_when)
for f in fs:
f._waiters.append(waiter)
return waiter
def as_completed(fs, timeout=None):
"""An iterator over the given futures that yields each as it completes.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
iterate over.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator that yields the given Futures as they complete (finished or
cancelled).
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
"""
if timeout is not None:
end_time = timeout + time.time()
with _AcquireFutures(fs):
finished = set(
f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
pending = set(fs) - finished
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
try:
for future in finished:
yield future
while pending:
if timeout is None:
wait_timeout = None
else:
wait_timeout = end_time - time.time()
if wait_timeout < 0:
raise TimeoutError(
'%d (of %d) futures unfinished' % (
len(pending), len(fs)))
waiter.event.wait(wait_timeout)
with waiter.lock:
finished = waiter.finished_futures
waiter.finished_futures = []
waiter.event.clear()
for future in finished:
yield future
pending.remove(future)
finally:
for f in fs:
f._waiters.remove(waiter)
DoneAndNotDoneFutures = namedtuple(
'DoneAndNotDoneFutures', 'done not_done')
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the futures in the given sequence to complete.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
wait upon.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
return_when: Indicates when this function should return. The options
are:
FIRST_COMPLETED - Return when any future finishes or is
cancelled.
FIRST_EXCEPTION - Return when any future finishes by raising an
exception. If no future raises an exception
then it is equivalent to ALL_COMPLETED.
ALL_COMPLETED - Return when all futures finish or are cancelled.
Returns:
A named 2-tuple of sets. The first set, named 'done', contains the
futures that completed (is finished or cancelled) before the wait
completed. The second set, named 'not_done', contains uncompleted
futures.
"""
with _AcquireFutures(fs):
done = set(f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
not_done = set(fs) - done
if (return_when == FIRST_COMPLETED) and done:
return DoneAndNotDoneFutures(done, not_done)
elif (return_when == FIRST_EXCEPTION) and done:
if any(f for f in done
if not f.cancelled() and f.exception() is not None):
return DoneAndNotDoneFutures(done, not_done)
if len(done) == len(fs):
return DoneAndNotDoneFutures(done, not_done)
waiter = _create_and_install_waiters(fs, return_when)
waiter.event.wait(timeout)
for f in fs:
f._waiters.remove(waiter)
done.update(waiter.finished_futures)
return DoneAndNotDoneFutures(done, set(fs) - done)
class Future(object):
"""Represents the result of an asynchronous computation."""
def __init__(self):
"""Initializes the future. Should not be called by clients."""
self._condition = threading.Condition()
self._state = PENDING
self._result = None
self._exception = None
self._waiters = []
self._done_callbacks = []
def _invoke_callbacks(self):
for callback in self._done_callbacks:
try:
callback(self)
except Exception:
LOGGER.exception('exception calling callback for %r', self)
def __repr__(self):
with self._condition:
if self._state == FINISHED:
if self._exception:
return '<Future at %s state=%s raised %s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._exception.__class__.__name__)
else:
return '<Future at %s state=%s returned %s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._result.__class__.__name__)
return '<Future at %s state=%s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state])
def cancel(self):
"""Cancel the future if possible.
Returns True if the future was cancelled, False otherwise. A future
cannot be cancelled if it is running or has already completed.
"""
with self._condition:
if self._state in [RUNNING, FINISHED]:
return False
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
return True
self._state = CANCELLED
self._condition.notify_all()
self._invoke_callbacks()
return True
def cancelled(self):
"""Return True if the future has cancelled."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]
def isAlive(self):
return self.running()
def running(self):
"""Return True if the future is currently executing."""
with self._condition:
return self._state == RUNNING
def done(self):
"""Return True of the future was cancelled or finished executing."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]
def __get_result(self):
if self._exception:
raise self._exception
else:
return self._result
def add_done_callback(self, fn):
"""Attaches a callable that will be called when the future finishes.
Args:
fn: A callable that will be called with this future as its only
argument when the future completes or is cancelled. The callable
will always be called by a thread in the same process in which
it was added. If the future has already completed or been
cancelled then the callable will be called immediately. These
callables are called in the order that they were added.
"""
with self._condition:
if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]:
self._done_callbacks.append(fn)
return
fn(self)
def result(self, timeout=None):
"""Return the result of the call that the future represents.
Args:
timeout: The number of seconds to wait for the result if the future
isn't done. If None, then there is no limit on the wait time.
Returns:
The result of the call that the future represents.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
Exception: If the call raised then that exception will be raised.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
else:
raise TimeoutError()
def exception(self, timeout=None):
"""Return the exception raised by the call that the future represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns:
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
else:
raise TimeoutError()
# The following methods should only be used by Executors and in tests.
def set_running_or_notify_cancel(self):
"""Mark the future as running or process any cancel notifications.
Should only be used by Executor implementations and unit tests.
If the future has been cancelled (cancel() was called and returned
True) then any threads waiting on the future completing (though calls
to as_completed() or wait()) are notified and False is returned.
If the future was not cancelled then it is put in the running state
(future calls to running() will return True) and True is returned.
This method should be called by Executor implementations before
executing the work associated with this future. If this method returns
False then the work should not be executed.
Returns:
False if the Future was cancelled, True otherwise.
Raises:
RuntimeError: if this method was already called or if set_result()
or set_exception() was called.
"""
with self._condition:
if self._state == CANCELLED:
self._state = CANCELLED_AND_NOTIFIED
for waiter in self._waiters:
waiter.add_cancelled(self)
# self._condition.notify_all() is not necessary because
# self.cancel() triggers a notification.
return False
elif self._state == PENDING:
self._state = RUNNING
return True
else:
LOGGER.critical('Future %s in unexpected state: %s',
id(self.future),
self.future._state)
raise RuntimeError('Future in unexpected state')
def set_result(self, result):
"""Sets the return value of work associated with the future.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._result = result
self._state = FINISHED
for waiter in self._waiters:
waiter.add_result(self)
self._condition.notify_all()
self._invoke_callbacks()
def set_exception(self, exception):
"""Sets the result of the future as being the given exception.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._exception = exception
self._state = FINISHED
for waiter in self._waiters:
waiter.add_exception(self)
self._condition.notify_all()
self._invoke_callbacks()
class Executor(object):
"""This is an abstract base class for concrete asynchronous executors."""
def submit(self, fn, *args, **kwargs):
"""Submits a callable to be executed with the given arguments.
Schedules the callable to be executed as fn(*args, **kwargs) and returns
a Future instance representing the execution of the callable.
Returns:
A Future representing the given call.
"""
raise NotImplementedError()
def map(self, fn, *iterables, **kwargs):
"""Returns a iterator equivalent to map(fn, iter).
Args:
fn: A callable that will take as many arguments as there are
passed iterables.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator equivalent to: map(func, *iterables) but the calls may
be evaluated out-of-order.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
Exception: If fn(*args) raises for any values.
"""
timeout = kwargs.get('timeout')
if timeout is not None:
end_time = timeout + time.time()
fs = [self.submit(fn, *args) for args in zip(*iterables)]
try:
for future in fs:
if timeout is None:
yield future.result()
else:
yield future.result(end_time - time.time())
finally:
for future in fs:
future.cancel()
def shutdown(self, wait=True):
"""Clean-up the resources associated with the Executor.
It is safe to call this method several times. Otherwise, no other
methods can be called after this one.
Args:
wait: If True then shutdown will not return until all running
futures have finished executing and the resources used by the
executor have been reclaimed.
"""
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown(wait=True)
return False

View File

@ -0,0 +1,101 @@
from keyword import iskeyword as _iskeyword
from operator import itemgetter as _itemgetter
import sys as _sys
def namedtuple(typename, field_names):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', 'x y')
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, basestring):
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
field_names = tuple(map(str, field_names))
for name in (typename,) + field_names:
if not all(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen_names = set()
for name in field_names:
if name.startswith('_'):
raise ValueError('Field names cannot start with an underscore: %r' % name)
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
numfields = len(field_names)
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
dicttxt = ', '.join('%r: t[%d]' % (name, pos) for pos, name in enumerate(field_names))
template = '''class %(typename)s(tuple):
'%(typename)s(%(argtxt)s)' \n
__slots__ = () \n
_fields = %(field_names)r \n
def __new__(_cls, %(argtxt)s):
return _tuple.__new__(_cls, (%(argtxt)s)) \n
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %(typename)s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %(numfields)d:
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
return result \n
def __repr__(self):
return '%(typename)s(%(reprtxt)s)' %% self \n
def _asdict(t):
'Return a new dict which maps field names to their values'
return {%(dicttxt)s} \n
def _replace(_self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
result = _self._make(map(kwds.pop, %(field_names)r, _self))
if kwds:
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
return result \n
def __getnewargs__(self):
return tuple(self) \n\n''' % locals()
for i, name in enumerate(field_names):
template += ' %s = _property(_itemgetter(%d))\n' % (name, i)
# Execute the template string in a temporary namespace and
# support tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
_property=property, _tuple=tuple)
try:
exec(template, namespace)
except SyntaxError:
e = _sys.exc_info()[1]
raise SyntaxError(e.message + ':\n' + template)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example).
if hasattr(_sys, '_getframe'):
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
return result

View File

@ -0,0 +1,363 @@
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Implements ProcessPoolExecutor.
The follow diagram and text describe the data-flow through the system:
|======================= In-process =====================|== Out-of-process ==|
+----------+ +----------+ +--------+ +-----------+ +---------+
| | => | Work Ids | => | | => | Call Q | => | |
| | +----------+ | | +-----------+ | |
| | | ... | | | | ... | | |
| | | 6 | | | | 5, call() | | |
| | | 7 | | | | ... | | |
| Process | | ... | | Local | +-----------+ | Process |
| Pool | +----------+ | Worker | | #1..n |
| Executor | | Thread | | |
| | +----------- + | | +-----------+ | |
| | <=> | Work Items | <=> | | <= | Result Q | <= | |
| | +------------+ | | +-----------+ | |
| | | 6: call() | | | | ... | | |
| | | future | | | | 4, result | | |
| | | ... | | | | 3, except | | |
+----------+ +------------+ +--------+ +-----------+ +---------+
Executor.submit() called:
- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict
- adds the id of the _WorkItem to the "Work Ids" queue
Local worker thread:
- reads work ids from the "Work Ids" queue and looks up the corresponding
WorkItem from the "Work Items" dict: if the work item has been cancelled then
it is simply removed from the dict, otherwise it is repackaged as a
_CallItem and put in the "Call Q". New _CallItems are put in the "Call Q"
until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because
calls placed in the "Call Q" can no longer be cancelled with Future.cancel().
- reads _ResultItems from "Result Q", updates the future stored in the
"Work Items" dict and deletes the dict entry
Process #1..n:
- reads _CallItems from "Call Q", executes the calls, and puts the resulting
_ResultItems in "Request Q"
"""
from __future__ import with_statement
import atexit
import multiprocessing
import threading
import weakref
import sys
from concurrent.futures import _base
try:
import queue
except ImportError:
import Queue as queue
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
# Workers are created as daemon threads and processes. This is done to allow the
# interpreter to exit when there are still idle processes in a
# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However,
# allowing workers to die with the interpreter has two undesirable properties:
# - The workers would still be running during interpretor shutdown,
# meaning that they would fail in unpredictable ways.
# - The workers could be killed while evaluating a work item, which could
# be bad if the callable being evaluated has external side-effects e.g.
# writing to a file.
#
# To work around this problem, an exit handler is installed which tells the
# workers to exit when their work queues are empty and then waits until the
# threads/processes finish.
_threads_queues = weakref.WeakKeyDictionary()
_shutdown = False
def _python_exit():
global _shutdown
_shutdown = True
items = list(_threads_queues.items())
for t, q in items:
q.put(None)
for t, q in items:
t.join()
# Controls how many more calls than processes will be queued in the call queue.
# A smaller number will mean that processes spend more time idle waiting for
# work while a larger number will make Future.cancel() succeed less frequently
# (Futures in the call queue cannot be cancelled).
EXTRA_QUEUED_CALLS = 1
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
class _ResultItem(object):
def __init__(self, work_id, exception=None, result=None):
self.work_id = work_id
self.exception = exception
self.result = result
class _CallItem(object):
def __init__(self, work_id, fn, args, kwargs):
self.work_id = work_id
self.fn = fn
self.args = args
self.kwargs = kwargs
def _process_worker(call_queue, result_queue):
"""Evaluates calls from call_queue and places the results in result_queue.
This worker is run in a separate process.
Args:
call_queue: A multiprocessing.Queue of _CallItems that will be read and
evaluated by the worker.
result_queue: A multiprocessing.Queue of _ResultItems that will written
to by the worker.
shutdown: A multiprocessing.Event that will be set as a signal to the
worker that it should exit when call_queue is empty.
"""
while True:
call_item = call_queue.get(block=True)
if call_item is None:
# Wake up queue management thread
result_queue.put(None)
return
try:
r = call_item.fn(*call_item.args, **call_item.kwargs)
except BaseException:
e = sys.exc_info()[1]
result_queue.put(_ResultItem(call_item.work_id,
exception=e))
else:
result_queue.put(_ResultItem(call_item.work_id,
result=r))
def _add_call_item_to_queue(pending_work_items,
work_ids,
call_queue):
"""Fills call_queue with _WorkItems from pending_work_items.
This function never blocks.
Args:
pending_work_items: A dict mapping work ids to _WorkItems e.g.
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids
are consumed and the corresponding _WorkItems from
pending_work_items are transformed into _CallItems and put in
call_queue.
call_queue: A multiprocessing.Queue that will be filled with _CallItems
derived from _WorkItems.
"""
while True:
if call_queue.full():
return
try:
work_id = work_ids.get(block=False)
except queue.Empty:
return
else:
work_item = pending_work_items[work_id]
if work_item.future.set_running_or_notify_cancel():
call_queue.put(_CallItem(work_id,
work_item.fn,
work_item.args,
work_item.kwargs),
block=True)
else:
del pending_work_items[work_id]
continue
def _queue_management_worker(executor_reference,
processes,
pending_work_items,
work_ids_queue,
call_queue,
result_queue):
"""Manages the communication between this process and the worker processes.
This function is run in a local thread.
Args:
executor_reference: A weakref.ref to the ProcessPoolExecutor that owns
this thread. Used to determine if the ProcessPoolExecutor has been
garbage collected and that this function can exit.
process: A list of the multiprocessing.Process instances used as
workers.
pending_work_items: A dict mapping work ids to _WorkItems e.g.
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]).
call_queue: A multiprocessing.Queue that will be filled with _CallItems
derived from _WorkItems for processing by the process workers.
result_queue: A multiprocessing.Queue of _ResultItems generated by the
process workers.
"""
nb_shutdown_processes = [0]
def shutdown_one_process():
"""Tell a worker to terminate, which will in turn wake us again"""
call_queue.put(None)
nb_shutdown_processes[0] += 1
while True:
_add_call_item_to_queue(pending_work_items,
work_ids_queue,
call_queue)
result_item = result_queue.get(block=True)
if result_item is not None:
work_item = pending_work_items[result_item.work_id]
del pending_work_items[result_item.work_id]
if result_item.exception:
work_item.future.set_exception(result_item.exception)
else:
work_item.future.set_result(result_item.result)
# Check whether we should start shutting down.
executor = executor_reference()
# No more work items can be added if:
# - The interpreter is shutting down OR
# - The executor that owns this worker has been collected OR
# - The executor that owns this worker has been shutdown.
if _shutdown or executor is None or executor._shutdown_thread:
# Since no new work items can be added, it is safe to shutdown
# this thread if there are no pending work items.
if not pending_work_items:
while nb_shutdown_processes[0] < len(processes):
shutdown_one_process()
# If .join() is not called on the created processes then
# some multiprocessing.Queue methods may deadlock on Mac OS
# X.
for p in processes:
p.join()
call_queue.close()
return
del executor
_system_limits_checked = False
_system_limited = None
def _check_system_limits():
global _system_limits_checked, _system_limited
if _system_limits_checked:
if _system_limited:
raise NotImplementedError(_system_limited)
_system_limits_checked = True
try:
import os
nsems_max = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems_max == -1:
# indetermine limit, assume that limit is determined
# by available memory only
return
if nsems_max >= 256:
# minimum number of semaphores available
# according to POSIX
return
_system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max
raise NotImplementedError(_system_limited)
class ProcessPoolExecutor(_base.Executor):
def __init__(self, max_workers=None):
"""Initializes a new ProcessPoolExecutor instance.
Args:
max_workers: The maximum number of processes that can be used to
execute the given calls. If None or not given then as many
worker processes will be created as the machine has processors.
"""
_check_system_limits()
if max_workers is None:
self._max_workers = multiprocessing.cpu_count()
else:
self._max_workers = max_workers
# Make the call queue slightly larger than the number of processes to
# prevent the worker processes from idling. But don't make it too big
# because futures in the call queue cannot be cancelled.
self._call_queue = multiprocessing.Queue(self._max_workers +
EXTRA_QUEUED_CALLS)
self._result_queue = multiprocessing.Queue()
self._work_ids = queue.Queue()
self._queue_management_thread = None
self._processes = set()
# Shutdown is a two-step process.
self._shutdown_thread = False
self._shutdown_lock = threading.Lock()
self._queue_count = 0
self._pending_work_items = {}
def _start_queue_management_thread(self):
# When the executor gets lost, the weakref callback will wake up
# the queue management thread.
def weakref_cb(_, q=self._result_queue):
q.put(None)
if self._queue_management_thread is None:
self._queue_management_thread = threading.Thread(
target=_queue_management_worker,
args=(weakref.ref(self, weakref_cb),
self._processes,
self._pending_work_items,
self._work_ids,
self._call_queue,
self._result_queue))
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
_threads_queues[self._queue_management_thread] = self._result_queue
def _adjust_process_count(self):
for _ in range(len(self._processes), self._max_workers):
p = multiprocessing.Process(
target=_process_worker,
args=(self._call_queue,
self._result_queue))
p.start()
self._processes.add(p)
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._shutdown_thread:
raise RuntimeError('cannot schedule new futures after shutdown')
f = _base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._pending_work_items[self._queue_count] = w
self._work_ids.put(self._queue_count)
self._queue_count += 1
# Wake up queue management thread
self._result_queue.put(None)
self._start_queue_management_thread()
self._adjust_process_count()
return f
submit.__doc__ = _base.Executor.submit.__doc__
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown_thread = True
if self._queue_management_thread:
# Wake up queue management thread
self._result_queue.put(None)
if wait:
self._queue_management_thread.join()
# To reduce the risk of openning too many files, remove references to
# objects that use file descriptors.
self._queue_management_thread = None
self._call_queue = None
self._result_queue = None
self._processes = None
shutdown.__doc__ = _base.Executor.shutdown.__doc__
atexit.register(_python_exit)

View File

@ -0,0 +1,141 @@
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Implements ThreadPoolExecutor."""
from __future__ import with_statement
import atexit
import threading
import weakref
import sys
from concurrent.futures import _base
try:
import queue
except ImportError:
import Queue as queue
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
# Workers are created as daemon threads. This is done to allow the interpreter
# to exit when there are still idle threads in a ThreadPoolExecutor's thread
# pool (i.e. shutdown() was not called). However, allowing workers to die with
# the interpreter has two undesirable properties:
# - The workers would still be running during interpretor shutdown,
# meaning that they would fail in unpredictable ways.
# - The workers could be killed while evaluating a work item, which could
# be bad if the callable being evaluated has external side-effects e.g.
# writing to a file.
#
# To work around this problem, an exit handler is installed which tells the
# workers to exit when their work queues are empty and then waits until the
# threads finish.
_threads_queues = weakref.WeakKeyDictionary()
_shutdown = False
def _python_exit():
global _shutdown
_shutdown = True
items = list(_threads_queues.items())
for t, q in items:
q.put(None)
for t, q in items:
t.join()
atexit.register(_python_exit)
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self):
if not self.future.set_running_or_notify_cancel():
return
try:
result = self.fn(*self.args, **self.kwargs)
except BaseException:
e = sys.exc_info()[1]
self.future.set_exception(e)
else:
self.future.set_result(result)
def _worker(executor_reference, work_queue):
try:
while True:
work_item = work_queue.get(block=True)
if work_item is not None:
work_item.run()
continue
executor = executor_reference()
# Exit if:
# - The interpreter is shutting down OR
# - The executor that owns the worker has been collected OR
# - The executor that owns the worker has been shutdown.
if _shutdown or executor is None or executor._shutdown:
# Notice other workers
work_queue.put(None)
return
del executor
except BaseException:
_base.LOGGER.critical('Exception in worker', exc_info=True)
class ThreadPoolExecutor(_base.Executor):
def __init__(self, max_workers, name=None):
"""Initializes a new ThreadPoolExecutor instance.
Args:
max_workers: The maximum number of threads that can be used to
execute the given calls.
"""
self._max_workers = max_workers
self._work_queue = queue.Queue()
self._threads = set()
self._shutdown = False
self._shutdown_lock = threading.Lock()
self._name = name
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._shutdown:
raise RuntimeError('cannot schedule new futures after shutdown')
f = _base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
self._adjust_thread_count()
return f
submit.__doc__ = _base.Executor.submit.__doc__
def _adjust_thread_count(self):
# When the executor gets lost, the weakref callback will wake up
# the worker threads.
def weakref_cb(_, q=self._work_queue):
q.put(None)
# TODO(bquinlan): Should avoid creating new threads if there are more
# idle threads than items in the work queue.
if len(self._threads) < self._max_workers:
t = threading.Thread(target=_worker,
args=(weakref.ref(self, weakref_cb),
self._work_queue),)
if self._name:
t.name = self._name
t.daemon = True
t.start()
self._threads.add(t)
_threads_queues[t] = self._work_queue
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown = True
self._work_queue.put(None)
if wait:
for t in self._threads:
t.join()
shutdown.__doc__ = _base.Executor.shutdown.__doc__

24
lib/futures/__init__.py Normal file
View File

@ -0,0 +1,24 @@
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Execute computations asynchronously using threads or processes."""
import warnings
from concurrent.futures import (FIRST_COMPLETED,
FIRST_EXCEPTION,
ALL_COMPLETED,
CancelledError,
TimeoutError,
Future,
Executor,
wait,
as_completed,
ProcessPoolExecutor,
ThreadPoolExecutor)
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
warnings.warn('The futures package has been deprecated. '
'Use the concurrent.futures package instead.',
DeprecationWarning)

1
lib/futures/process.py Normal file
View File

@ -0,0 +1 @@
from concurrent.futures import ProcessPoolExecutor

1
lib/futures/thread.py Normal file
View File

@ -0,0 +1 @@
from concurrent.futures import ThreadPoolExecutor

View File

@ -134,6 +134,7 @@ ROOT_DIRS = None
UPDATE_SHOWS_ON_START = None
SORT_ARTICLE = None
DEBUG = False
NUM_OF_THREADS = None
USE_LISTVIEW = None
METADATA_XBMC = None
@ -174,6 +175,7 @@ ALLOW_HIGH_PRIORITY = None
SEARCH_FREQUENCY = None
UPDATE_FREQUENCY = None
BACKLOG_SEARCH_FREQUENCY = 21
BACKLOG_STARTUP = None
MIN_SEARCH_FREQUENCY = 10
MIN_UPDATE_FREQUENCY = 1
@ -277,6 +279,7 @@ NZBGET_USERNAME = None
NZBGET_PASSWORD = None
NZBGET_CATEGORY = None
NZBGET_HOST = None
NZBGET_USE_HTTPS = False
TORRENT_USERNAME = None
TORRENT_PASSWORD = None
@ -464,7 +467,7 @@ def initialize(consoleLogging=True):
global ACTUAL_LOG_DIR, LOG_DIR, WEB_PORT, WEB_LOG, ENCRYPTION_VERSION, WEB_ROOT, WEB_USERNAME, WEB_PASSWORD, WEB_HOST, WEB_IPV6, USE_API, API_KEY, ENABLE_HTTPS, HTTPS_CERT, HTTPS_KEY, \
HANDLE_REVERSE_PROXY, USE_NZBS, USE_TORRENTS, NZB_METHOD, NZB_DIR, DOWNLOAD_PROPERS, PREFER_EPISODE_RELEASES, ALLOW_HIGH_PRIORITY, TORRENT_METHOD, \
SAB_USERNAME, SAB_PASSWORD, SAB_APIKEY, SAB_CATEGORY, SAB_HOST, \
NZBGET_USERNAME, NZBGET_PASSWORD, NZBGET_CATEGORY, NZBGET_HOST, currentSearchScheduler, backlogSearchScheduler, \
NZBGET_USERNAME, NZBGET_PASSWORD, NZBGET_CATEGORY, NZBGET_HOST, NZBGET_USE_HTTPS, currentSearchScheduler, backlogSearchScheduler, \
TORRENT_USERNAME, TORRENT_PASSWORD, TORRENT_HOST, TORRENT_PATH, TORRENT_RATIO, TORRENT_SEED_TIME, TORRENT_PAUSED, TORRENT_HIGH_BANDWIDTH, TORRENT_LABEL, \
USE_XBMC, XBMC_NOTIFY_ONSNATCH, XBMC_NOTIFY_ONDOWNLOAD, XBMC_NOTIFY_ONSUBTITLEDOWNLOAD, XBMC_UPDATE_FULL, XBMC_UPDATE_ONLYFIRST, \
XBMC_UPDATE_LIBRARY, XBMC_HOST, XBMC_USERNAME, XBMC_PASSWORD, \
@ -475,7 +478,7 @@ def initialize(consoleLogging=True):
NEWZNAB_DATA, NZBS, NZBS_UID, NZBS_HASH, EZRSS, TVTORRENTS, TVTORRENTS_DIGEST, TVTORRENTS_HASH, TVTORRENTS_OPTIONS, BTN, BTN_API_KEY, BTN_OPTIONS, \
THEPIRATEBAY, THEPIRATEBAY_TRUSTED, THEPIRATEBAY_PROXY, THEPIRATEBAY_PROXY_URL, THEPIRATEBAY_BLACKLIST, THEPIRATEBAY_OPTIONS, TORRENTLEECH, TORRENTLEECH_USERNAME, TORRENTLEECH_PASSWORD, TORRENTLEECH_OPTIONS, \
IPTORRENTS, IPTORRENTS_USERNAME, IPTORRENTS_PASSWORD, IPTORRENTS_FREELEECH, IPTORRENTS_OPTIONS, KAT, KAT_VERIFIED, KAT_OPTIONS, PUBLICHD, PUBLICHD_OPTIONS, SCC, SCC_USERNAME, SCC_PASSWORD, SCC_OPTIONS, HDTORRENTS, HDTORRENTS_USERNAME, HDTORRENTS_PASSWORD, HDTORRENTS_UID, HDTORRENTS_HASH, HDTORRENTS_OPTIONS, TORRENTDAY, TORRENTDAY_USERNAME, TORRENTDAY_PASSWORD, TORRENTDAY_UID, TORRENTDAY_HASH, TORRENTDAY_FREELEECH, TORRENTDAY_OPTIONS, \
HDBITS, HDBITS_USERNAME, HDBITS_PASSKEY, HDBITS_OPTIONS, TORRENT_DIR, USENET_RETENTION, SOCKET_TIMEOUT, SEARCH_FREQUENCY, DEFAULT_SEARCH_FREQUENCY, BACKLOG_SEARCH_FREQUENCY, INDEXER_DEFAULT, \
HDBITS, HDBITS_USERNAME, HDBITS_PASSKEY, HDBITS_OPTIONS, TORRENT_DIR, USENET_RETENTION, SOCKET_TIMEOUT, SEARCH_FREQUENCY, DEFAULT_SEARCH_FREQUENCY, BACKLOG_SEARCH_FREQUENCY, BACKLOG_STARTUP, INDEXER_DEFAULT, \
NEXTGEN, NEXTGEN_USERNAME, NEXTGEN_PASSWORD, NEXTGEN_FREELEECH, NEXTGEN_OPTIONS, SPEEDCD, SPEEDCD_USERNAME, SPEEDCD_PASSWORD, SPEEDCD_FREELEECH,\
QUALITY_DEFAULT, FLATTEN_FOLDERS_DEFAULT, SUBTITLES_DEFAULT, STATUS_DEFAULT, \
GROWL_NOTIFY_ONSNATCH, GROWL_NOTIFY_ONDOWNLOAD, GROWL_NOTIFY_ONSUBTITLEDOWNLOAD, TWITTER_NOTIFY_ONSNATCH, TWITTER_NOTIFY_ONDOWNLOAD, TWITTER_NOTIFY_ONSUBTITLEDOWNLOAD, \
@ -501,7 +504,7 @@ def initialize(consoleLogging=True):
GUI_NAME, HOME_LAYOUT, HISTORY_LAYOUT, DISPLAY_SHOW_SPECIALS, COMING_EPS_LAYOUT, COMING_EPS_SORT, COMING_EPS_DISPLAY_PAUSED, COMING_EPS_MISSED_RANGE, DATE_PRESET, TIME_PRESET, TIME_PRESET_W_SECONDS, \
METADATA_WDTV, METADATA_TIVO, IGNORE_WORDS, CALENDAR_UNPROTECTED, CREATE_MISSING_SHOW_DIRS, \
ADD_SHOWS_WO_DIR, USE_SUBTITLES, SUBTITLES_LANGUAGES, SUBTITLES_DIR, SUBTITLES_SERVICES_LIST, SUBTITLES_SERVICES_ENABLED, SUBTITLES_HISTORY, SUBTITLES_FINDER_FREQUENCY, subtitlesFinderScheduler, \
USE_FAILED_DOWNLOADS, DELETE_FAILED, ANON_REDIRECT, LOCALHOST_IP, TMDB_API_KEY, DEBUG, PROXY_SETTING
USE_FAILED_DOWNLOADS, DELETE_FAILED, ANON_REDIRECT, LOCALHOST_IP, TMDB_API_KEY, DEBUG, PROXY_SETTING, NUM_OF_THREADS
if __INITIALIZED__:
return False
@ -568,6 +571,8 @@ def initialize(consoleLogging=True):
DEBUG = bool(check_setting_int(CFG, 'General', 'debug', 0))
NUM_OF_THREADS = check_setting_int(CFG, 'General', 'num_of_threads', 1)
ENABLE_HTTPS = bool(check_setting_int(CFG, 'General', 'enable_https', 0))
HTTPS_CERT = check_setting_str(CFG, 'General', 'https_cert', 'server.crt')
@ -636,6 +641,8 @@ def initialize(consoleLogging=True):
ALLOW_HIGH_PRIORITY = bool(check_setting_int(CFG, 'General', 'allow_high_priority', 1))
BACKLOG_STARTUP = bool(check_setting_int(CFG, 'General', 'backlog_startup', 1))
USENET_RETENTION = check_setting_int(CFG, 'General', 'usenet_retention', 500)
SEARCH_FREQUENCY = check_setting_int(CFG, 'General', 'search_frequency', 40)
@ -752,6 +759,7 @@ def initialize(consoleLogging=True):
NZBGET_PASSWORD = check_setting_str(CFG, 'NZBget', 'nzbget_password', 'tegbzn6789')
NZBGET_CATEGORY = check_setting_str(CFG, 'NZBget', 'nzbget_category', 'tv')
NZBGET_HOST = check_setting_str(CFG, 'NZBget', 'nzbget_host', '')
NZBGET_USE_HTTPS = bool(check_setting_int(CFG, 'NZBget', 'nzbget_use_https', 0))
TORRENT_USERNAME = check_setting_str(CFG, 'TORRENT', 'torrent_username', '')
TORRENT_PASSWORD = check_setting_str(CFG, 'TORRENT', 'torrent_password', '')
@ -1050,6 +1058,8 @@ def initialize(consoleLogging=True):
threadName="BACKLOG",
runImmediately=True)
backlogSearchScheduler.action.cycleTime = BACKLOG_SEARCH_FREQUENCY
if not BACKLOG_STARTUP:
backlogSearchScheduler.silent = True
subtitlesFinderScheduler = scheduler.Scheduler(subtitles.SubtitlesFinder(),
cycleTime=datetime.timedelta(hours=SUBTITLES_FINDER_FREQUENCY),
@ -1310,6 +1320,7 @@ def save_config():
new_config['General']['use_api'] = int(USE_API)
new_config['General']['api_key'] = API_KEY
new_config['General']['debug'] = int(DEBUG)
new_config['General']['num_of_threads'] = int(NUM_OF_THREADS)
new_config['General']['enable_https'] = int(ENABLE_HTTPS)
new_config['General']['https_cert'] = HTTPS_CERT
new_config['General']['https_key'] = HTTPS_KEY
@ -1484,6 +1495,7 @@ def save_config():
new_config['NZBget']['nzbget_password'] = helpers.encrypt(NZBGET_PASSWORD, ENCRYPTION_VERSION)
new_config['NZBget']['nzbget_category'] = NZBGET_CATEGORY
new_config['NZBget']['nzbget_host'] = NZBGET_HOST
new_config['NZBget']['nzbget_use_https'] = int(NZBGET_USE_HTTPS)
new_config['TORRENT'] = {}
new_config['TORRENT']['torrent_username'] = TORRENT_USERNAME

View File

@ -19,6 +19,7 @@
import os
import string
import cherrypy
import time
from sickbeard import encodingKludge as ek
@ -55,6 +56,7 @@ def foldersAtPath(path, includeParent=False):
# walk up the tree until we find a valid path
while path and not os.path.isdir(path):
time.sleep(0.01)
if path == os.path.dirname(path):
path = ''
break

View File

@ -20,7 +20,7 @@ import cherrypy
import os.path
import datetime
import re
import time
from sickbeard import helpers
from sickbeard import logger
from sickbeard import naming
@ -399,7 +399,7 @@ class ConfigMigrator():
sickbeard.CONFIG_VERSION = self.config_version
while self.config_version < self.expected_config_version:
time.sleep(0.01)
next_version = self.config_version + 1
if next_version in self.migration_names:

View File

@ -36,24 +36,24 @@ class MainSanityCheck(db.DBSanityCheck):
self.fix_duplicate_episodes()
self.fix_orphan_episodes()
def fix_duplicate_shows(self):
def fix_duplicate_shows(self, column='indexer_id'):
sqlResults = self.connection.select(
"SELECT show_id, indexer_id, COUNT(indexer_id) as count FROM tv_shows GROUP BY indexer_id HAVING count > 1")
"SELECT show_id, " + column + ", COUNT(" + column + ") as count FROM tv_shows GROUP BY " + column + " HAVING count > 1")
for cur_duplicate in sqlResults:
logger.log(u"Duplicate show detected! indexer_id: " + str(cur_duplicate["indexer_id"]) + u" count: " + str(
logger.log(u"Duplicate show detected! " + column + ": " + str(cur_duplicate[column]) + u" count: " + str(
cur_duplicate["count"]), logger.DEBUG)
cur_dupe_results = self.connection.select(
"SELECT show_id, indexer_id FROM tv_shows WHERE indexer_id = ? LIMIT ?",
[cur_duplicate["indexer_id"], int(cur_duplicate["count"]) - 1]
"SELECT show_id, " + column + " FROM tv_shows WHERE " + column + " = ? LIMIT ?",
[cur_duplicate[column], int(cur_duplicate["count"]) - 1]
)
for cur_dupe_id in cur_dupe_results:
logger.log(
u"Deleting duplicate show with indexer_id: " + str(cur_dupe_id["indexer_id"]) + u" show_id: " + str(
u"Deleting duplicate show with " + column + ": " + str(cur_dupe_id[column]) + u" show_id: " + str(
cur_dupe_id["show_id"]))
self.connection.action("DELETE FROM tv_shows WHERE show_id = ?", [cur_dupe_id["show_id"]])
@ -144,15 +144,19 @@ class InitialSchema(db.SchemaUpgrade):
if not self.hasTable("tv_shows") and not self.hasTable("db_version"):
queries = [
"CREATE TABLE db_version (db_version INTEGER);",
"CREATE TABLE history (action NUMERIC, date NUMERIC, showid NUMERIC, season NUMERIC, episode NUMERIC, quality NUMERIC, resource TEXT, provider TEXT);",
"CREATE TABLE imdb_info (tvdb_id INTEGER PRIMARY KEY, imdb_id TEXT, title TEXT, year NUMERIC, akas TEXT, runtimes NUMERIC, genres TEXT, countries TEXT, country_codes TEXT, certificates TEXT, rating TEXT, votes INTEGER, last_update NUMERIC)",
"CREATE TABLE info (last_backlog NUMERIC, last_tvdb NUMERIC, last_proper_search NUMERIC);",
"CREATE TABLE tv_episodes (episode_id INTEGER PRIMARY KEY, showid NUMERIC, tvdbid NUMERIC, name TEXT, season NUMERIC, episode NUMERIC, description TEXT, airdate NUMERIC, hasnfo NUMERIC, hastbn NUMERIC, status NUMERIC, location TEXT, file_size NUMERIC, release_name TEXT, subtitles TEXT, subtitles_searchcount NUMERIC, subtitles_lastsearch TIMESTAMP, is_proper NUMERIC)",
"CREATE TABLE tv_shows (show_id INTEGER PRIMARY KEY, location TEXT, show_name TEXT, tvdb_id NUMERIC, network TEXT, genre TEXT, runtime NUMERIC, quality NUMERIC, airs TEXT, status TEXT, flatten_folders NUMERIC, paused NUMERIC, startyear NUMERIC, tvr_id NUMERIC, tvr_name TEXT, air_by_date NUMERIC, lang TEXT, subtitles NUMERIC, notify_list TEXT, imdb_id TEXT, last_update_tvdb NUMERIC)",
"CREATE INDEX idx_tv_episodes_showid_airdate ON tv_episodes(showid,airdate);",
"CREATE TABLE history (action NUMERIC, date NUMERIC, showid NUMERIC, season NUMERIC, episode NUMERIC, quality NUMERIC, resource TEXT, provider TEXT)",
"CREATE TABLE imdb_info (indexer_id INTEGER PRIMARY KEY, imdb_id TEXT, title TEXT, year NUMERIC, akas TEXT, runtimes NUMERIC, genres TEXT, countries TEXT, country_codes TEXT, certificates TEXT, rating TEXT, votes INTEGER, last_update NUMERIC)",
"CREATE TABLE info (last_backlog NUMERIC, last_indexer NUMERIC, last_proper_search NUMERIC)",
"CREATE TABLE scene_numbering(indexer TEXT, indexer_id INTEGER, season INTEGER, episode INTEGER,scene_season INTEGER, scene_episode INTEGER, PRIMARY KEY(indexer_id, season, episode))",
"CREATE TABLE tv_shows (show_id INTEGER PRIMARY KEY, indexer_id NUMERIC, indexer TEXT, show_name TEXT, location TEXT, network TEXT, genre TEXT, classification TEXT, runtime NUMERIC, quality NUMERIC, airs TEXT, status TEXT, flatten_folders NUMERIC, paused NUMERIC, startyear NUMERIC, air_by_date NUMERIC, lang TEXT, subtitles NUMERIC, notify_list TEXT, imdb_id TEXT, last_update_indexer NUMERIC, dvdorder NUMERIC, archive_firstmatch NUMERIC, rls_require_words TEXT, rls_ignore_words TEXT, sports NUMERIC);",
"CREATE TABLE tv_episodes (episode_id INTEGER PRIMARY KEY, showid NUMERIC, indexerid NUMERIC, indexer TEXT, name TEXT, season NUMERIC, episode NUMERIC, description TEXT, airdate NUMERIC, hasnfo NUMERIC, hastbn NUMERIC, status NUMERIC, location TEXT, file_size NUMERIC, release_name TEXT, subtitles TEXT, subtitles_searchcount NUMERIC, subtitles_lastsearch TIMESTAMP, is_proper NUMERIC, scene_season NUMERIC, scene_episode NUMERIC);",
"CREATE UNIQUE INDEX idx_indexer_id ON tv_shows (indexer_id)",
"CREATE INDEX idx_showid ON tv_episodes (showid);",
"CREATE UNIQUE INDEX idx_tvdb_id ON tv_shows (tvdb_id);",
"INSERT INTO db_version (db_version) VALUES (18);"
"CREATE INDEX idx_sta_epi_air ON tv_episodes (status,episode, airdate);",
"CREATE INDEX idx_sta_epi_sta_air ON tv_episodes (season,episode, status, airdate);",
"CREATE INDEX idx_status ON tv_episodes (status,season,episode,airdate);",
"CREATE INDEX idx_tv_episodes_showid_airdate ON tv_episodes(showid,airdate)",
"INSERT INTO db_version (db_version) VALUES (28);"
]
for query in queries:
self.connection.action(query)
@ -181,7 +185,7 @@ class AddSizeAndSceneNameFields(InitialSchema):
def execute(self):
backupDatabase(11)
backupDatabase(10)
if not self.hasColumn("tv_episodes", "file_size"):
self.addColumn("tv_episodes", "file_size")
@ -307,30 +311,7 @@ class RenameSeasonFolders(AddSizeAndSceneNameFields):
self.incDBVersion()
class AddSubtitlesSupport(RenameSeasonFolders):
def test(self):
return self.checkDBVersion() >= 12
def execute(self):
self.addColumn("tv_shows", "subtitles")
self.addColumn("tv_episodes", "subtitles", "TEXT", "")
self.addColumn("tv_episodes", "subtitles_searchcount")
self.addColumn("tv_episodes", "subtitles_lastsearch", "TIMESTAMP", str(datetime.datetime.min))
self.incDBVersion()
class AddIMDbInfo(RenameSeasonFolders):
def test(self):
return self.checkDBVersion() >= 13
def execute(self):
self.connection.action(
"CREATE TABLE imdb_info (tvdb_id INTEGER PRIMARY KEY, imdb_id TEXT, title TEXT, year NUMERIC, akas TEXT, runtimes NUMERIC, genres TEXT, countries TEXT, country_codes TEXT, certificates TEXT, rating TEXT, votes INTEGER, last_update NUMERIC)")
self.incDBVersion()
class Add1080pAndRawHDQualities(AddIMDbInfo):
class Add1080pAndRawHDQualities(RenameSeasonFolders):
"""Add support for 1080p related qualities along with RawHD
Quick overview of what the upgrade needs to do:
@ -347,7 +328,7 @@ class Add1080pAndRawHDQualities(AddIMDbInfo):
"""
def test(self):
return self.checkDBVersion() >= 14
return self.checkDBVersion() >= 12
def _update_status(self, old_status):
(status, quality) = common.Quality.splitCompositeStatus(old_status)
@ -464,11 +445,73 @@ class Add1080pAndRawHDQualities(AddIMDbInfo):
logger.log(u"Performing a vacuum on the database.", logger.DEBUG)
self.connection.action("VACUUM")
class AddShowidTvdbidIndex(Add1080pAndRawHDQualities):
""" Adding index on tvdb_id (tv_shows) and showid (tv_episodes) to speed up searches/queries """
def test(self):
return self.checkDBVersion() >= 13
def execute(self):
backupDatabase(13)
logger.log(u"Check for duplicate shows before adding unique index.")
MainSanityCheck(self.connection).fix_duplicate_shows('tvdb_id')
logger.log(u"Adding index on tvdb_id (tv_shows) and showid (tv_episodes) to speed up searches/queries.")
if not self.hasTable("idx_showid"):
self.connection.action("CREATE INDEX idx_showid ON tv_episodes (showid);")
if not self.hasTable("idx_tvdb_id"):
self.connection.action("CREATE UNIQUE INDEX idx_tvdb_id ON tv_shows (tvdb_id);")
self.incDBVersion()
class AddLastUpdateTVDB(AddShowidTvdbidIndex):
""" Adding column last_update_tvdb to tv_shows for controlling nightly updates """
def test(self):
return self.checkDBVersion() >= 14
def execute(self):
backupDatabase(14)
logger.log(u"Adding column last_update_tvdb to tvshows")
if not self.hasColumn("tv_shows", "last_update_tvdb"):
self.addColumn("tv_shows", "last_update_tvdb", default=1)
self.incDBVersion()
class AddRequireAndIgnoreWords(AddLastUpdateTVDB):
""" Adding column rls_require_words and rls_ignore_words to tv_shows """
class AddProperNamingSupport(Add1080pAndRawHDQualities):
def test(self):
return self.checkDBVersion() >= 15
def execute(self):
backupDatabase(15)
logger.log(u"Adding column rls_require_words to tvshows")
if not self.hasColumn("tv_shows", "rls_require_words"):
self.addColumn("tv_shows", "rls_require_words", "TEXT", "")
logger.log(u"Adding column rls_ignore_words to tvshows")
if not self.hasColumn("tv_shows", "rls_ignore_words"):
self.addColumn("tv_shows", "rls_ignore_words", "TEXT", "")
self.incDBVersion()
class AddIMDbInfo(AddRequireAndIgnoreWords):
def test(self):
return self.checkDBVersion() >= 13
def execute(self):
self.connection.action(
"CREATE TABLE imdb_info (tvdb_id INTEGER PRIMARY KEY, imdb_id TEXT, title TEXT, year NUMERIC, akas TEXT, runtimes NUMERIC, genres TEXT, countries TEXT, country_codes TEXT, certificates TEXT, rating TEXT, votes INTEGER, last_update NUMERIC)")
self.incDBVersion()
class AddProperNamingSupport(AddIMDbInfo):
def test(self):
return self.checkDBVersion() >= 14
def execute(self):
self.addColumn("tv_episodes", "is_proper")
self.incDBVersion()
@ -482,50 +525,12 @@ class AddEmailSubscriptionTable(AddProperNamingSupport):
self.addColumn('tv_shows', 'notify_list', 'TEXT', None)
self.incDBVersion()
class AddShowidTvdbidIndex(AddEmailSubscriptionTable):
""" Adding index on tvdb_id (tv_shows) and showid (tv_episodes) to speed up searches/queries """
class AddProperSearch(AddEmailSubscriptionTable):
def test(self):
return self.checkDBVersion() >= 17
return self.checkDBVersion() >= 16
def execute(self):
backupDatabase(17)
logger.log(u"Check for duplicate shows before adding unique index.")
MainSanityCheck(self.connection).fix_duplicate_shows()
logger.log(u"Adding index on tvdb_id (tv_shows) and showid (tv_episodes) to speed up searches/queries.")
if not self.hasTable("idx_showid"):
self.connection.action("CREATE INDEX idx_showid ON tv_episodes (showid);")
if not self.hasTable("idx_tvdb_id"):
self.connection.action("CREATE UNIQUE INDEX idx_tvdb_id ON tv_shows (tvdb_id);")
self.incDBVersion()
class AddUpdateTVDB(AddShowidTvdbidIndex):
""" Adding column last_update_tvdb to tv_shows for controlling nightly updates """
def test(self):
return self.checkDBVersion() >= 18
def execute(self):
backupDatabase(18)
logger.log(u"Adding column last_update_tvdb to tvshows")
if not self.hasColumn("tv_shows", "last_update_tvdb"):
self.addColumn("tv_shows", "last_update_tvdb", default=1)
self.incDBVersion()
class AddProperSearch(AddUpdateTVDB):
def test(self):
return self.checkDBVersion() >= 19
def execute(self):
backupDatabase(19)
backupDatabase(16)
logger.log(u"Adding column last_proper_search to info")
if not self.hasColumn("info", "last_proper_search"):
@ -536,27 +541,32 @@ class AddProperSearch(AddUpdateTVDB):
class AddDvdOrderOption(AddProperSearch):
def test(self):
return self.checkDBVersion() >= 20
return self.hasColumn("tv_shows", "dvdorder")
def execute(self):
backupDatabase(20)
logger.log(u"Adding column dvdorder to tvshows")
if not self.hasColumn("tv_shows", "dvdorder"):
self.addColumn("tv_shows", "dvdorder", "NUMERIC", "0")
self.incDBVersion()
class ConvertTVShowsToIndexerScheme(AddDvdOrderOption):
class AddSubtitlesSupport(AddDvdOrderOption):
def test(self):
return self.checkDBVersion() >= 22
return self.hasColumn("tv_shows", "subtitles")
def execute(self):
backupDatabase(22)
self.addColumn("tv_shows", "subtitles")
self.addColumn("tv_episodes", "subtitles", "TEXT", "")
self.addColumn("tv_episodes", "subtitles_searchcount")
self.addColumn("tv_episodes", "subtitles_lastsearch", "TIMESTAMP", str(datetime.datetime.min))
self.incDBVersion()
logger.log(u"Adding column dvdorder to tvshows")
if not self.hasColumn("tv_shows", "dvdorder"):
self.addColumn("tv_shows", "dvdorder", "NUMERIC", "0")
class ConvertTVShowsToIndexerScheme(AddSubtitlesSupport):
def test(self):
return self.checkDBVersion() >= 19
def execute(self):
backupDatabase(19)
logger.log(u"Converting TV Shows table to Indexer Scheme...")
@ -581,10 +591,10 @@ class ConvertTVShowsToIndexerScheme(AddDvdOrderOption):
class ConvertTVEpisodesToIndexerScheme(ConvertTVShowsToIndexerScheme):
def test(self):
return self.checkDBVersion() >= 23
return self.checkDBVersion() >= 20
def execute(self):
backupDatabase(23)
backupDatabase(20)
logger.log(u"Converting TV Episodes table to Indexer Scheme...")
@ -612,10 +622,10 @@ class ConvertTVEpisodesToIndexerScheme(ConvertTVShowsToIndexerScheme):
class ConvertIMDBInfoToIndexerScheme(ConvertTVEpisodesToIndexerScheme):
def test(self):
return self.checkDBVersion() >= 24
return self.checkDBVersion() >= 21
def execute(self):
backupDatabase(24)
backupDatabase(21)
logger.log(u"Converting IMDB Info table to Indexer Scheme...")
@ -635,10 +645,10 @@ class ConvertIMDBInfoToIndexerScheme(ConvertTVEpisodesToIndexerScheme):
class ConvertInfoToIndexerScheme(ConvertIMDBInfoToIndexerScheme):
def test(self):
return self.checkDBVersion() >= 25
return self.checkDBVersion() >= 22
def execute(self):
backupDatabase(25)
backupDatabase(22)
logger.log(u"Converting Info table to Indexer Scheme...")
@ -657,10 +667,10 @@ class ConvertInfoToIndexerScheme(ConvertIMDBInfoToIndexerScheme):
class AddArchiveFirstMatchOption(ConvertInfoToIndexerScheme):
def test(self):
return self.checkDBVersion() >= 26
return self.checkDBVersion() >= 23
def execute(self):
backupDatabase(26)
backupDatabase(23)
logger.log(u"Adding column archive_firstmatch to tvshows")
if not self.hasColumn("tv_shows", "archive_firstmatch"):
@ -670,10 +680,10 @@ class AddArchiveFirstMatchOption(ConvertInfoToIndexerScheme):
class AddSceneNumbering(AddArchiveFirstMatchOption):
def test(self):
return self.checkDBVersion() >= 27
return self.checkDBVersion() >= 24
def execute(self):
backupDatabase(27)
backupDatabase(24)
if self.hasTable("scene_numbering"):
self.connection.action("DROP TABLE scene_numbering")
@ -686,10 +696,10 @@ class AddSceneNumbering(AddArchiveFirstMatchOption):
class ConvertIndexerToInteger(AddSceneNumbering):
def test(self):
return self.checkDBVersion() >= 28
return self.checkDBVersion() >= 25
def execute(self):
backupDatabase(28)
backupDatabase(25)
ql = []
logger.log(u"Converting Indexer to Integer ...", logger.MESSAGE)
@ -708,10 +718,10 @@ class AddRequireAndIgnoreWords(ConvertIndexerToInteger):
""" Adding column rls_require_words and rls_ignore_words to tv_shows """
def test(self):
return self.checkDBVersion() >= 29
return self.checkDBVersion() >= 26
def execute(self):
backupDatabase(29)
backupDatabase(26)
logger.log(u"Adding column rls_require_words to tvshows")
if not self.hasColumn("tv_shows", "rls_require_words"):
@ -725,10 +735,10 @@ class AddRequireAndIgnoreWords(ConvertIndexerToInteger):
class AddSportsOption(AddRequireAndIgnoreWords):
def test(self):
return self.checkDBVersion() >= 30
return self.checkDBVersion() >= 27
def execute(self):
backupDatabase(30)
backupDatabase(27)
logger.log(u"Adding column sports to tvshows")
if not self.hasColumn("tv_shows", "sports"):
@ -748,10 +758,10 @@ class AddSportsOption(AddRequireAndIgnoreWords):
class AddSceneNumberingToTvEpisodes(AddSportsOption):
def test(self):
return self.checkDBVersion() >= 31
return self.checkDBVersion() >= 28
def execute(self):
backupDatabase(31)
backupDatabase(28)
logger.log(u"Adding column scene_season and scene_episode to tvepisodes")
if not self.hasColumn("tv_episodes", "scene_season"):

View File

@ -79,6 +79,7 @@ class DBConnection:
attempt = 0
while attempt < 5:
time.sleep(0.01)
try:
if args == None:
logger.log(self.filename + ": " + query, logger.DB)
@ -118,6 +119,7 @@ class DBConnection:
attempt = 0
while attempt < 5:
time.sleep(0.01)
try:
for qu in querylist:
if len(qu) == 1:
@ -162,6 +164,7 @@ class DBConnection:
attempt = 0
while attempt < 5:
time.sleep(0.01)
try:
if args == None:
logger.log(self.filename + ": " + query, logger.DB)

View File

@ -549,6 +549,7 @@ def delete_empty_folders(check_empty_dir, keep_dir=None):
# as long as the folder exists and doesn't contain any files, delete it
while ek.ek(os.path.isdir, check_empty_dir) and check_empty_dir != keep_dir:
time.sleep(0.01)
check_files = ek.ek(os.listdir, check_empty_dir)
@ -658,6 +659,8 @@ def sanitizeSceneName(name, ezrss=False):
Returns: A string containing the scene version of the show name given.
"""
if name:
if not ezrss:
bad_chars = u",:()'!?\u2019"
# ezrss leaves : and ! in their show names as far as I can tell
@ -789,6 +792,7 @@ def backupVersionedFile(old_file, version):
new_file = old_file + '.' + 'v' + str(version)
while not ek.ek(os.path.isfile, new_file):
time.sleep(0.01)
if not ek.ek(os.path.isfile, old_file):
logger.log(u"Not creating backup, " + old_file + " doesn't exist", logger.DEBUG)
break

View File

@ -20,6 +20,7 @@ import datetime
import os.path
import re
import regexes
import time
import sickbeard
from sickbeard import logger, helpers, scene_numbering
@ -207,6 +208,7 @@ class NameParser(object):
i = result = 0
for integer, numeral in numeral_map:
time.sleep(0.01)
while n[i:i + len(numeral)] == numeral:
result += integer
i += len(numeral)
@ -424,6 +426,7 @@ class NameParserCache(object):
self._previous_parsed[name] = parse_result
self._previous_parsed_list.append(name)
while len(self._previous_parsed_list) > self._cache_size:
time.sleep(0.01)
del_me = self._previous_parsed_list.pop(0)
self._previous_parsed.pop(del_me)

View File

@ -37,6 +37,10 @@ from common import Quality
def sendNZB(nzb, proper=False):
addToTop = False
nzbgetprio = 0
if sickbeard.NZBGET_USE_HTTPS:
nzbgetXMLrpc = "https://%(username)s:%(password)s@%(host)s/xmlrpc"
else:
nzbgetXMLrpc = "http://%(username)s:%(password)s@%(host)s/xmlrpc"
if sickbeard.NZBGET_HOST == None:
@ -71,7 +75,10 @@ def sendNZB(nzb, proper=False):
# if it aired recently make it high priority and generate DupeKey/Score
for curEp in nzb.episodes:
if dupekey == "":
if curEp.show.indexer == 1:
dupekey = "Sickbeard-" + str(curEp.show.indexerid)
elif curEp.show.indexer == 2:
dupekey = "Sickbeard-tvr" + str(curEp.show.indexerid)
dupekey += "-" + str(curEp.season) + "." + str(curEp.episode)
if datetime.date.today() - curEp.airdate <= datetime.timedelta(days=7):
addToTop = True
@ -82,17 +89,9 @@ def sendNZB(nzb, proper=False):
if proper:
dupescore += 10
# if it's a normal result need to download the NZB content
if nzb.resultType == "nzb":
genProvider = GenericProvider("")
data = genProvider.getURL(nzb.url)
if (data == None):
return False
# if we get a raw data result thats even better
elif nzb.resultType == "nzbdata":
nzbcontent64 = None
if nzb.resultType == "nzbdata":
data = nzb.extraInfo[0]
nzbcontent64 = standard_b64encode(data)
logger.log(u"Sending NZB to NZBget")
@ -103,13 +102,30 @@ def sendNZB(nzb, proper=False):
nzbget_version_str = nzbGetRPC.version()
nzbget_version = helpers.tryInt(nzbget_version_str[:nzbget_version_str.find(".")])
if nzbget_version == 0:
if nzbcontent64 is not None:
nzbget_result = nzbGetRPC.append(nzb.name + ".nzb", sickbeard.NZBGET_CATEGORY, addToTop, nzbcontent64)
else:
if nzb.resultType == "nzb":
genProvider = GenericProvider("")
data = genProvider.getURL(nzb.url)
if (data == None):
return False
nzbcontent64 = standard_b64encode(data)
nzbget_result = nzbGetRPC.append(nzb.name + ".nzb", sickbeard.NZBGET_CATEGORY, addToTop, nzbcontent64)
elif nzbget_version >= 12:
if nzbcontent64 is not None:
nzbget_result = nzbGetRPC.append(nzb.name + ".nzb", sickbeard.NZBGET_CATEGORY, nzbgetprio, False,
nzbcontent64, False, dupekey, dupescore, "score")
else:
nzbget_result = nzbGetRPC.appendurl(nzb.name + ".nzb", sickbeard.NZBGET_CATEGORY, nzbgetprio, False,
nzb.url, False, dupekey, dupescore, "score")
else:
if nzbcontent64 is not None:
nzbget_result = nzbGetRPC.append(nzb.name + ".nzb", sickbeard.NZBGET_CATEGORY, nzbgetprio, False,
nzbcontent64)
else:
nzbget_result = nzbGetRPC.appendurl(nzb.name + ".nzb", sickbeard.NZBGET_CATEGORY, nzbgetprio, False,
nzb.url)
if nzbget_result:
logger.log(u"NZB sent to NZBget successfully", logger.DEBUG)

View File

@ -24,6 +24,7 @@ import os
import re
import urllib
import urlparse
import time
import sickbeard
@ -234,7 +235,12 @@ class GenericProvider:
results = {}
searchItems = {}
if manualSearch:
self.cache.updateCache()
for epObj in episodes:
time.sleep(0.01)
itemList = []
cacheResult = self.cache.searchCache(epObj, manualSearch)
@ -268,6 +274,8 @@ class GenericProvider:
for episode, items in searchItems.items():
for item in items:
time.sleep(0.01)
(title, url) = self._get_title_and_url(item)
quality = self.getQuality(item)
@ -328,6 +336,7 @@ class GenericProvider:
logger.log(
u"Ignoring result " + title + " because we don't want an episode that is " + Quality.qualityStrings[
quality], logger.DEBUG)
time.sleep(0.01)
continue
logger.log(u"Found result " + title + " at " + url, logger.DEBUG)

View File

@ -147,7 +147,7 @@ class HDBitsProvider(generic.TorrentProvider):
if episode:
post_data['tvdb'] = {
'id': show.tvdbid,
'id': show.indexerid,
'season': episode.scene_season,
'episode': episode.scene_episode
}

View File

@ -137,8 +137,8 @@ class PublicHDProvider(generic.TorrentProvider):
urllib.quote(unidecode(search_string)), ';'.join(self.categories[mode]))
logger.log(u"Search string: " + searchURL, logger.DEBUG)
html = self.getURL(searchURL)
if not html:
continue
@ -313,16 +313,15 @@ class PublicHDCache(tvcache.TVCache):
logger.log(u"Clearing " + self.provider.name + " cache and updating with new information")
self._clearCache()
cl = []
ql = []
for result in rss_results:
item = (result[0], result[1])
ci = self._parseItem(item)
if ci is not None:
cl.append(ci)
ql.append(ci)
if len(cl) > 0:
myDB = self._getDB()
myDB.mass_action(cl)
myDB.mass_action(ql)
def _parseItem(self, item):

View File

@ -103,15 +103,20 @@ class TorrentDayProvider(generic.TorrentProvider):
logger.log(u'Invalid username or password for ' + self.name + ', Check your settings!', logger.ERROR)
return False
if requests.utils.dict_from_cookiejar(self.session.cookies)['uid'] and requests.utils.dict_from_cookiejar(self.session.cookies)['pass']:
sickbeard.TORRENTDAY_UID = requests.utils.dict_from_cookiejar(self.session.cookies)['uid']
sickbeard.TORRENTDAY_HASH = requests.utils.dict_from_cookiejar(self.session.cookies)['pass']
self.cookies = {'uid': sickbeard.TORRENTDAY_UID,
'pass': sickbeard.TORRENTDAY_HASH
}
return True
else:
logger.log(u'Unable to obtain cookie for TorrentDay', logger.ERROR)
return False
def _get_season_search_strings(self, ep_obj):
search_string = {'Season': [], 'Episode': []}

View File

@ -61,6 +61,7 @@ class Scheduler:
def runAction(self):
while True:
time.sleep(0.01)
currentTime = datetime.datetime.now()
@ -78,5 +79,3 @@ class Scheduler:
self.abort = False
self.thread = None
return
time.sleep(1)

View File

@ -20,6 +20,7 @@ from __future__ import with_statement
import os
import re
import threading
import traceback
import datetime
@ -116,6 +117,8 @@ def snatchEpisode(result, endStatus=SNATCHED):
for curEp in result.episodes:
if datetime.date.today() - curEp.airdate <= datetime.timedelta(days=7):
result.priority = 1
if re.search('(^|[\. _-])(proper|repack)([\. _-]|$)', result.name, re.I) != None:
endStatus = SNATCHED_PROPER
# NZBs can be sent straight to SAB or saved to disk
if result.resultType in ("nzb", "nzbdata"):
@ -124,11 +127,8 @@ def snatchEpisode(result, endStatus=SNATCHED):
elif sickbeard.NZB_METHOD == "sabnzbd":
dlResult = sab.sendNZB(result)
elif sickbeard.NZB_METHOD == "nzbget":
if endStatus == SNATCHED_PROPER:
s_prop = True
else:
s_prop = False
dlResult = nzbget.sendNZB(result, s_prop)
is_proper = True if endStatus == SNATCHED_PROPER else False
dlResult = nzbget.sendNZB(result, is_proper)
else:
logger.log(u"Unknown NZB action specified in config: " + sickbeard.NZB_METHOD, logger.ERROR)
dlResult = False
@ -171,28 +171,22 @@ def snatchEpisode(result, endStatus=SNATCHED):
return True
def searchForNeededEpisodes():
logger.log(u"Searching all providers for any needed episodes")
def searchForNeededEpisodes(curProvider):
threading.currentThread().name = curProvider.name
logger.log(u"Searching all providers for any needed episodes")
foundResults = {}
didSearch = False
# ask all providers for any episodes it finds
for curProvider in providers.sortedProviderList():
if not curProvider.isActive():
continue
try:
curFoundResults = curProvider.searchRSS()
except exceptions.AuthException, e:
logger.log(u"Authentication error: " + ex(e), logger.ERROR)
continue
return
except Exception, e:
logger.log(u"Error while searching " + curProvider.name + ", skipping: " + ex(e), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
continue
return
didSearch = True
@ -363,28 +357,27 @@ def filterSearchResults(show, results):
return foundResults
def searchProviders(show, season, episodes, seasonSearch=False, manualSearch=False):
def searchProviders(show, season, episodes, curProvider, seasonSearch=False, manualSearch=False):
threading.currentThread().name = curProvider.name
logger.log(u"Searching for stuff we need from " + show.name + " season " + str(season))
foundResults = {}
didSearch = False
for curProvider in providers.sortedProviderList():
if not curProvider.isActive():
continue
if manualSearch:
curProvider.cache.updateCache()
# convert indexer numbering to scene numbering for searches
map(lambda x: x.convertToSceneNumbering, episodes)
try:
curResults = curProvider.findSearchResults(show, season, episodes, seasonSearch, manualSearch)
except exceptions.AuthException, e:
logger.log(u"Authentication error: " + ex(e), logger.ERROR)
continue
return
except Exception, e:
logger.log(u"Error while searching " + curProvider.name + ", skipping: " + ex(e), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
continue
return
# finished searching this provider successfully
didSearch = True

View File

@ -33,4 +33,4 @@ class CurrentSearcher():
def run(self):
search_queue_item = search_queue.RSSSearchQueueItem()
sickbeard.searchQueueScheduler.action.add_item(search_queue_item) #@UndefinedVariable
sickbeard.searchQueueScheduler.action.add_item(search_queue_item)

View File

@ -19,6 +19,8 @@
from __future__ import with_statement
import datetime
from threading import Thread
import threading
import time
import sickbeard
@ -27,6 +29,8 @@ from sickbeard import generic_queue
from sickbeard import search, failed_history, history
from sickbeard import ui
from lib.concurrent import futures
BACKLOG_SEARCH = 10
RSS_SEARCH = 20
FAILED_SEARCH = 30
@ -73,7 +77,7 @@ class SearchQueue(generic_queue.GenericQueue):
generic_queue.GenericQueue.add_item(self, item)
elif isinstance(item, ManualSearchQueueItem) and not self.is_ep_in_queue(item.ep_obj):
generic_queue.GenericQueue.add_item(self, item)
elif isinstance(item, FailedQueueItem) and not self.is_in_queue(item.show, item.segment):
elif isinstance(item, FailedQueueItem) and not self.is_in_queue(item.show, item.episodes):
generic_queue.GenericQueue.add_item(self, item)
else:
logger.log(u"Not adding item, it's already in the queue", logger.DEBUG)
@ -90,24 +94,25 @@ class ManualSearchQueueItem(generic_queue.QueueItem):
def execute(self):
generic_queue.QueueItem.execute(self)
with futures.ThreadPoolExecutor(sickbeard.NUM_OF_THREADS) as executor:
foundResults = list(executor.map(self.process, [x for x in sickbeard.providers.sortedProviderList() if x.isActive()]))
# convert indexer numbering to scene numbering for searches
(self.ep_obj.scene_season, self.ep_obj.scene_episode) = sickbeard.scene_numbering.get_scene_numbering(
self.ep_obj.show.indexerid, self.ep_obj.show.indexer, self.ep_obj.season, self.ep_obj.episode)
logger.log("Beginning manual search for " + self.ep_obj.prettyName() + ' as ' + self.ep_obj.prettySceneName())
foundResults = search.searchProviders(self.ep_obj.show, self.ep_obj.season, [self.ep_obj], manualSearch=True)
result = False
if not foundResults:
ui.notifications.message('No downloads were found',
if self.ep_obj.show.air_by_date:
ui.notifications.message('No downloads were found ...',
"Couldn't find a download for <i>%s</i>" % self.ep_obj.prettyABName())
logger.log(u"Unable to find a download for " + self.ep_obj.prettyABDName())
else:
ui.notifications.message('No downloads were found ...',
"Couldn't find a download for <i>%s</i>" % self.ep_obj.prettyName())
logger.log(u"Unable to find a download for " + self.ep_obj.prettyName())
self.success = result
else:
for foundResult in foundResults:
time.sleep(0.01)
# just use the first result for now
logger.log(u"Downloading " + foundResult.name + " from " + foundResult.provider.name)
@ -115,12 +120,23 @@ class ManualSearchQueueItem(generic_queue.QueueItem):
providerModule = foundResult.provider
if not result:
ui.notifications.error('Error while attempting to snatch ' + foundResult.name + ', check your logs')
ui.notifications.error(
'Error while attempting to snatch ' + foundResult.name + ', check your logs')
elif providerModule == None:
ui.notifications.error('Provider is configured incorrectly, unable to download')
self.success = result
self.finish()
def process(self, curProvider):
if self.ep_obj.show.air_by_date:
logger.log("Beginning manual search for " + self.ep_obj.prettyABDName())
else:
logger.log("Beginning manual search for " + self.ep_obj.prettyName())
return search.searchProviders(self.ep_obj.show, self.ep_obj.season, self.ep_obj, curProvider, True, False)
def finish(self):
# don't let this linger if something goes wrong
if self.success == None:
@ -134,21 +150,22 @@ class RSSSearchQueueItem(generic_queue.QueueItem):
def execute(self):
generic_queue.QueueItem.execute(self)
with futures.ThreadPoolExecutor(sickbeard.NUM_OF_THREADS) as executor:
foundResults = list(executor.map(self.process, [x for x in sickbeard.providers.sortedProviderList() if x.isActive()]))
for curResult in foundResults:
time.sleep(0.01)
if curResult:
search.snatchEpisode(curResult)
generic_queue.QueueItem.finish(self)
def process(self, curProvider):
self._changeMissingEpisodes()
logger.log(u"Beginning search for new episodes on RSS feeds and in cache")
foundResults = search.searchForNeededEpisodes()
if not len(foundResults):
logger.log(u"No needed episodes found on the RSS feeds")
else:
for curResult in foundResults:
search.snatchEpisode(curResult)
time.sleep(2)
generic_queue.QueueItem.finish(self)
return search.searchForNeededEpisodes(curProvider)
def _changeMissingEpisodes(self):
@ -218,41 +235,34 @@ class BacklogQueueItem(generic_queue.QueueItem):
self.wantedEpisodes = self._need_any_episodes(statusResults, bestQualities)
def execute(self):
generic_queue.QueueItem.execute(self)
with futures.ThreadPoolExecutor(sickbeard.NUM_OF_THREADS) as executor:
foundResults = sum(list(executor.map(self.process, [x for x in sickbeard.providers.sortedProviderList() if x.isActive()])))
for curResult in foundResults if foundResults else logger.log(
u"Backlog search found nothing to snatch ..."):
time.sleep(0.01)
search.snatchEpisode(curResult)
self.finish()
def process(self, curProvider):
# check if we want to search for season packs instead of just season/episode
seasonSearch = False
seasonEps = self.show.getAllEpisodes(self.segment)
if len(seasonEps) == len(self.wantedEpisodes):
seasonSearch = True
# convert indexer numbering to scene numbering for searches
for i, epObj in enumerate(self.wantedEpisodes):
(self.wantedEpisodes[i].scene_season,
self.wantedEpisodes[i].scene_episode) = sickbeard.scene_numbering.get_scene_numbering(self.show.indexerid,
self.show.indexer,
epObj.season,
epObj.episode)
logger.log(
"Beginning backlog search for " + self.wantedEpisodes[i].prettyName() + ' as ' + self.wantedEpisodes[
i].prettySceneName())
# search for our wanted items and return the results
results = search.searchProviders(self.show, self.segment, self.wantedEpisodes, seasonSearch=seasonSearch)
# download whatever we find
for curResult in results:
search.snatchEpisode(curResult)
time.sleep(5)
self.finish()
return search.searchProviders(self.show, self.segment, self.wantedEpisodes, curProvider, False, seasonSearch)
def _need_any_episodes(self, statusResults, bestQualities):
wantedEpisodes = []
# check through the list of statuses to see if we want any
for curStatusResult in statusResults:
time.sleep(0.01)
curCompositeStatus = int(curStatusResult["status"])
curStatus, curQuality = common.Quality.splitCompositeStatus(curCompositeStatus)
episode = int(curStatusResult["episode"])
@ -284,16 +294,28 @@ class FailedQueueItem(generic_queue.QueueItem):
def execute(self):
generic_queue.QueueItem.execute(self)
with futures.ThreadPoolExecutor(sickbeard.NUM_OF_THREADS) as executor:
foundResults = list(executor.map(self.process, [x for x in sickbeard.providers.sortedProviderList() if x.isActive()]))
# download whatever we find
for curResult in foundResults:
time.sleep(0.01)
self.success = search.snatchEpisode(curResult)
self.finish()
def process(self, curProvider):
episodes = []
for i, epObj in enumerate(episodes):
# convert indexer numbering to scene numbering for searches
(episodes[i].scene_season, self.episodes[i].scene_episode) = sickbeard.scene_numbering.get_scene_numbering(
self.show.indexerid, self.show.indexer, epObj.season, epObj.episode)
time.sleep(0.01)
if epObj.show.air_by_date:
logger.log("Beginning manual search for " + epObj.prettyABDName())
else:
logger.log(
"Beginning failed download search for " + epObj.prettyName() + ' as ' + epObj.prettySceneName())
"Beginning failed download search for " + epObj.prettyName())
(release, provider) = failed_history.findRelease(self.show, epObj.season, epObj.episode)
if release:
@ -305,12 +327,4 @@ class FailedQueueItem(generic_queue.QueueItem):
failed_history.revertEpisode(self.show, epObj.season, epObj.episode)
episodes.append(epObj)
# get search results
results = search.searchProviders(self.show, episodes[0].season, episodes)
# download whatever we find
for curResult in results:
self.success = search.snatchEpisode(curResult)
time.sleep(5)
self.finish()
return search.searchProviders(self.show, self.episodes[0].season, self.episodes, curProvider, False, False)

View File

@ -1849,9 +1849,9 @@ class TVEpisode(object):
'%Q.N': dot(Quality.qualityStrings[epQual]),
'%Q_N': us(Quality.qualityStrings[epQual]),
'%S': str(self.season),
'%0S': '%02d' % int(self.season),
'%0S': '%02d' % int(self.season) if not self.show.air_by_date else self.season,
'%E': str(self.episode),
'%0E': '%02d' % int(self.episode),
'%0E': '%02d' % int(self.episode)if not self.show.air_by_date else self.episode,
'%XS': str(self.scene_season),
'%0XS': '%02d' % int(self.scene_season),
'%XE': str(self.scene_episode),
@ -2136,3 +2136,15 @@ class TVEpisode(object):
self.saveToDB()
for relEp in self.relatedEps:
relEp.saveToDB()
def convertToSceneNumbering(self):
(self.scene_season, self.scene_episode) = sickbeard.scene_numbering.get_scene_numbering(self.show.indexerid,
self.show.indexer,
self.season,
self.episode)
def convertToIndexerNumbering(self):
(self.season, self.episode) = sickbeard.scene_numbering.get_indexer_numbering(self.show.indexerid,
self.show.indexer,
self.scene_season,
self.scene_episode)

View File

@ -984,7 +984,8 @@ class ConfigGeneral:
update_shows_on_start=None, update_frequency=None, launch_browser=None, web_username=None, use_api=None, api_key=None,
web_password=None, version_notify=None, enable_https=None, https_cert=None, https_key=None,
handle_reverse_proxy=None, sort_article=None, auto_update=None, proxy_setting=None,
anon_redirect=None, git_path=None, calendar_unprotected=None, date_preset=None, time_preset=None, indexer_default=None):
anon_redirect=None, git_path=None, calendar_unprotected=None, date_preset=None, time_preset=None, indexer_default=None,
num_of_threads=None):
results = []
@ -1039,6 +1040,8 @@ class ConfigGeneral:
sickbeard.HANDLE_REVERSE_PROXY = config.checkbox_to_value(handle_reverse_proxy)
sickbeard.NUM_OF_THREADS = config.to_int(num_of_threads)
sickbeard.save_config()
if len(results) > 0:
@ -1063,9 +1066,9 @@ class ConfigSearch:
@cherrypy.expose
def saveSearch(self, use_nzbs=None, use_torrents=None, nzb_dir=None, sab_username=None, sab_password=None,
sab_apikey=None, sab_category=None, sab_host=None, nzbget_username=None, nzbget_password=None,
nzbget_category=None, nzbget_host=None,
nzbget_category=None, nzbget_host=None, nzbget_use_https=None,
nzb_method=None, torrent_method=None, usenet_retention=None, search_frequency=None,
download_propers=None, prefer_episode_releases=None, allow_high_priority=None,
download_propers=None, prefer_episode_releases=None, allow_high_priority=None, backlog_startup=None,
torrent_dir=None, torrent_username=None, torrent_password=None, torrent_host=None,
torrent_label=None, torrent_path=None,
torrent_ratio=None, torrent_seed_time=None, torrent_paused=None, torrent_high_bandwidth=None, ignore_words=None):
@ -1097,6 +1100,11 @@ class ConfigSearch:
sickbeard.PREFER_EPISODE_RELEASES = config.checkbox_to_value(prefer_episode_releases)
sickbeard.ALLOW_HIGH_PRIORITY = config.checkbox_to_value(allow_high_priority)
sickbeard.BACKLOG_STARTUP = config.checkbox_to_value(backlog_startup)
if sickbeard.BACKLOG_STARTUP:
sickbeard.backlogSearchScheduler.silent = False
else:
sickbeard.backlogSearchScheduler.silent = True
sickbeard.SAB_USERNAME = sab_username
sickbeard.SAB_PASSWORD = sab_password
@ -1108,6 +1116,7 @@ class ConfigSearch:
sickbeard.NZBGET_PASSWORD = nzbget_password
sickbeard.NZBGET_CATEGORY = nzbget_category
sickbeard.NZBGET_HOST = config.clean_host(nzbget_host)
sickbeard.NZBGET_USE_HTTPS = config.checkbox_to_value(nzbget_use_https)
sickbeard.TORRENT_USERNAME = torrent_username
sickbeard.TORRENT_PASSWORD = torrent_password
@ -3624,14 +3633,6 @@ class WebInterface:
sql_results[index]['localtime'] = network_timezones.parse_date_time(item['airdate'], item['airs'],
item['network'])
#Normalize/Format the Airing Time
try:
locale.setlocale(locale.LC_TIME, 'us_US')
sql_results[index]['localtime_string'] = sql_results[index]['localtime'].strftime("%A %H:%M %p")
locale.setlocale(locale.LC_ALL, '') #Reseting to default locale
except:
sql_results[index]['localtime_string'] = sql_results[index]['localtime'].strftime("%A %H:%M %p")
sql_results.sort(sorts[sickbeard.COMING_EPS_SORT])
t = PageTemplate(file="comingEpisodes.tmpl")