diff --git a/lib/concurrent/__init__.py b/lib/concurrent/__init__.py
deleted file mode 100644
index b36383a6..00000000
--- a/lib/concurrent/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from pkgutil import extend_path
-
-__path__ = extend_path(__path__, __name__)
diff --git a/lib/concurrent/futures/__init__.py b/lib/concurrent/futures/__init__.py
deleted file mode 100644
index fef52819..00000000
--- a/lib/concurrent/futures/__init__.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2009 Brian Quinlan. All Rights Reserved.
-# Licensed to PSF under a Contributor Agreement.
-
-"""Execute computations asynchronously using threads or processes."""
-
-__author__ = 'Brian Quinlan (brian@sweetapp.com)'
-
-from concurrent.futures._base import (FIRST_COMPLETED,
- FIRST_EXCEPTION,
- ALL_COMPLETED,
- CancelledError,
- TimeoutError,
- Future,
- Executor,
- wait,
- as_completed)
-from concurrent.futures.thread import ThreadPoolExecutor
-
-# Jython doesn't have multiprocessing
-try:
- from concurrent.futures.process import ProcessPoolExecutor
-except ImportError:
- pass
diff --git a/lib/concurrent/futures/_base.py b/lib/concurrent/futures/_base.py
deleted file mode 100644
index a481284b..00000000
--- a/lib/concurrent/futures/_base.py
+++ /dev/null
@@ -1,577 +0,0 @@
-# Copyright 2009 Brian Quinlan. All Rights Reserved.
-# Licensed to PSF under a Contributor Agreement.
-
-from __future__ import with_statement
-import logging
-import threading
-import time
-
-try:
- from collections import namedtuple
-except ImportError:
- from concurrent.futures._compat import namedtuple
-
-__author__ = 'Brian Quinlan (brian@sweetapp.com)'
-
-FIRST_COMPLETED = 'FIRST_COMPLETED'
-FIRST_EXCEPTION = 'FIRST_EXCEPTION'
-ALL_COMPLETED = 'ALL_COMPLETED'
-_AS_COMPLETED = '_AS_COMPLETED'
-
-# Possible future states (for internal use by the futures package).
-PENDING = 'PENDING'
-RUNNING = 'RUNNING'
-# The future was cancelled by the user...
-CANCELLED = 'CANCELLED'
-# ...and _Waiter.add_cancelled() was called by a worker.
-CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
-FINISHED = 'FINISHED'
-
-_FUTURE_STATES = [
- PENDING,
- RUNNING,
- CANCELLED,
- CANCELLED_AND_NOTIFIED,
- FINISHED
-]
-
-_STATE_TO_DESCRIPTION_MAP = {
- PENDING: "pending",
- RUNNING: "running",
- CANCELLED: "cancelled",
- CANCELLED_AND_NOTIFIED: "cancelled",
- FINISHED: "finished"
-}
-
-# Logger for internal use by the futures package.
-LOGGER = logging.getLogger("concurrent.futures")
-
-class Error(Exception):
- """Base class for all future-related exceptions."""
- pass
-
-class CancelledError(Error):
- """The Future was cancelled."""
- pass
-
-class TimeoutError(Error):
- """The operation exceeded the given deadline."""
- pass
-
-class _Waiter(object):
- """Provides the event that wait() and as_completed() block on."""
- def __init__(self):
- self.event = threading.Event()
- self.finished_futures = []
-
- def add_result(self, future):
- self.finished_futures.append(future)
-
- def add_exception(self, future):
- self.finished_futures.append(future)
-
- def add_cancelled(self, future):
- self.finished_futures.append(future)
-
-class _AsCompletedWaiter(_Waiter):
- """Used by as_completed()."""
-
- def __init__(self):
- super(_AsCompletedWaiter, self).__init__()
- self.lock = threading.Lock()
-
- def add_result(self, future):
- with self.lock:
- super(_AsCompletedWaiter, self).add_result(future)
- self.event.set()
-
- def add_exception(self, future):
- with self.lock:
- super(_AsCompletedWaiter, self).add_exception(future)
- self.event.set()
-
- def add_cancelled(self, future):
- with self.lock:
- super(_AsCompletedWaiter, self).add_cancelled(future)
- self.event.set()
-
-class _FirstCompletedWaiter(_Waiter):
- """Used by wait(return_when=FIRST_COMPLETED)."""
-
- def add_result(self, future):
- super(_FirstCompletedWaiter, self).add_result(future)
- self.event.set()
-
- def add_exception(self, future):
- super(_FirstCompletedWaiter, self).add_exception(future)
- self.event.set()
-
- def add_cancelled(self, future):
- super(_FirstCompletedWaiter, self).add_cancelled(future)
- self.event.set()
-
-class _AllCompletedWaiter(_Waiter):
- """Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED)."""
-
- def __init__(self, num_pending_calls, stop_on_exception):
- self.num_pending_calls = num_pending_calls
- self.stop_on_exception = stop_on_exception
- self.lock = threading.Lock()
- super(_AllCompletedWaiter, self).__init__()
-
- def _decrement_pending_calls(self):
- with self.lock:
- self.num_pending_calls -= 1
- if not self.num_pending_calls:
- self.event.set()
-
- def add_result(self, future):
- super(_AllCompletedWaiter, self).add_result(future)
- self._decrement_pending_calls()
-
- def add_exception(self, future):
- super(_AllCompletedWaiter, self).add_exception(future)
- if self.stop_on_exception:
- self.event.set()
- else:
- self._decrement_pending_calls()
-
- def add_cancelled(self, future):
- super(_AllCompletedWaiter, self).add_cancelled(future)
- self._decrement_pending_calls()
-
-class _AcquireFutures(object):
- """A context manager that does an ordered acquire of Future conditions."""
-
- def __init__(self, futures):
- self.futures = sorted(futures, key=id)
-
- def __enter__(self):
- for future in self.futures:
- future._condition.acquire()
-
- def __exit__(self, *args):
- for future in self.futures:
- future._condition.release()
-
-def _create_and_install_waiters(fs, return_when):
- if return_when == _AS_COMPLETED:
- waiter = _AsCompletedWaiter()
- elif return_when == FIRST_COMPLETED:
- waiter = _FirstCompletedWaiter()
- else:
- pending_count = sum(
- f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs)
-
- if return_when == FIRST_EXCEPTION:
- waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True)
- elif return_when == ALL_COMPLETED:
- waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False)
- else:
- raise ValueError("Invalid return condition: %r" % return_when)
-
- for f in fs:
- f._waiters.append(waiter)
-
- return waiter
-
-def as_completed(fs, timeout=None):
- """An iterator over the given futures that yields each as it completes.
-
- Args:
- fs: The sequence of Futures (possibly created by different Executors) to
- iterate over.
- timeout: The maximum number of seconds to wait. If None, then there
- is no limit on the wait time.
-
- Returns:
- An iterator that yields the given Futures as they complete (finished or
- cancelled).
-
- Raises:
- TimeoutError: If the entire result iterator could not be generated
- before the given timeout.
- """
- if timeout is not None:
- end_time = timeout + time.time()
-
- with _AcquireFutures(fs):
- finished = set(
- f for f in fs
- if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
- pending = set(fs) - finished
- waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
-
- try:
- for future in finished:
- yield future
-
- while pending:
- if timeout is None:
- wait_timeout = None
- else:
- wait_timeout = end_time - time.time()
- if wait_timeout < 0:
- raise TimeoutError(
- '%d (of %d) futures unfinished' % (
- len(pending), len(fs)))
-
- waiter.event.wait(wait_timeout)
-
- with waiter.lock:
- finished = waiter.finished_futures
- waiter.finished_futures = []
- waiter.event.clear()
-
- for future in finished:
- yield future
- pending.remove(future)
-
- finally:
- for f in fs:
- f._waiters.remove(waiter)
-
-DoneAndNotDoneFutures = namedtuple(
- 'DoneAndNotDoneFutures', 'done not_done')
-def wait(fs, timeout=None, return_when=ALL_COMPLETED):
- """Wait for the futures in the given sequence to complete.
-
- Args:
- fs: The sequence of Futures (possibly created by different Executors) to
- wait upon.
- timeout: The maximum number of seconds to wait. If None, then there
- is no limit on the wait time.
- return_when: Indicates when this function should return. The options
- are:
-
- FIRST_COMPLETED - Return when any future finishes or is
- cancelled.
- FIRST_EXCEPTION - Return when any future finishes by raising an
- exception. If no future raises an exception
- then it is equivalent to ALL_COMPLETED.
- ALL_COMPLETED - Return when all futures finish or are cancelled.
-
- Returns:
- A named 2-tuple of sets. The first set, named 'done', contains the
- futures that completed (is finished or cancelled) before the wait
- completed. The second set, named 'not_done', contains uncompleted
- futures.
- """
- with _AcquireFutures(fs):
- done = set(f for f in fs
- if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
- not_done = set(fs) - done
-
- if (return_when == FIRST_COMPLETED) and done:
- return DoneAndNotDoneFutures(done, not_done)
- elif (return_when == FIRST_EXCEPTION) and done:
- if any(f for f in done
- if not f.cancelled() and f.exception() is not None):
- return DoneAndNotDoneFutures(done, not_done)
-
- if len(done) == len(fs):
- return DoneAndNotDoneFutures(done, not_done)
-
- waiter = _create_and_install_waiters(fs, return_when)
-
- waiter.event.wait(timeout)
- for f in fs:
- f._waiters.remove(waiter)
-
- done.update(waiter.finished_futures)
- return DoneAndNotDoneFutures(done, set(fs) - done)
-
-class Future(object):
- """Represents the result of an asynchronous computation."""
-
- def __init__(self):
- """Initializes the future. Should not be called by clients."""
- self._condition = threading.Condition()
- self._state = PENDING
- self._result = None
- self._exception = None
- self._waiters = []
- self._done_callbacks = []
-
- def _invoke_callbacks(self):
- for callback in self._done_callbacks:
- try:
- callback(self)
- except Exception:
- LOGGER.exception('exception calling callback for %r', self)
-
- def __repr__(self):
- with self._condition:
- if self._state == FINISHED:
- if self._exception:
- return '' % (
- hex(id(self)),
- _STATE_TO_DESCRIPTION_MAP[self._state],
- self._exception.__class__.__name__)
- else:
- return '' % (
- hex(id(self)),
- _STATE_TO_DESCRIPTION_MAP[self._state],
- self._result.__class__.__name__)
- return '' % (
- hex(id(self)),
- _STATE_TO_DESCRIPTION_MAP[self._state])
-
- def cancel(self):
- """Cancel the future if possible.
-
- Returns True if the future was cancelled, False otherwise. A future
- cannot be cancelled if it is running or has already completed.
- """
- with self._condition:
- if self._state in [RUNNING, FINISHED]:
- return False
-
- if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
- return True
-
- self._state = CANCELLED
- self._condition.notify_all()
-
- self._invoke_callbacks()
- return True
-
- def cancelled(self):
- """Return True if the future has cancelled."""
- with self._condition:
- return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]
-
- def isAlive(self):
- return self.running()
-
- def running(self):
- """Return True if the future is currently executing."""
- with self._condition:
- return self._state == RUNNING
-
- def done(self):
- """Return True of the future was cancelled or finished executing."""
- with self._condition:
- return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]
-
- def __get_result(self):
- if self._exception:
- raise self._exception
- else:
- return self._result
-
- def add_done_callback(self, fn):
- """Attaches a callable that will be called when the future finishes.
-
- Args:
- fn: A callable that will be called with this future as its only
- argument when the future completes or is cancelled. The callable
- will always be called by a thread in the same process in which
- it was added. If the future has already completed or been
- cancelled then the callable will be called immediately. These
- callables are called in the order that they were added.
- """
- with self._condition:
- if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]:
- self._done_callbacks.append(fn)
- return
- fn(self)
-
- def result(self, timeout=None):
- """Return the result of the call that the future represents.
-
- Args:
- timeout: The number of seconds to wait for the result if the future
- isn't done. If None, then there is no limit on the wait time.
-
- Returns:
- The result of the call that the future represents.
-
- Raises:
- CancelledError: If the future was cancelled.
- TimeoutError: If the future didn't finish executing before the given
- timeout.
- Exception: If the call raised then that exception will be raised.
- """
- with self._condition:
- if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
- raise CancelledError()
- elif self._state == FINISHED:
- return self.__get_result()
-
- self._condition.wait(timeout)
-
- if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
- raise CancelledError()
- elif self._state == FINISHED:
- return self.__get_result()
- else:
- raise TimeoutError()
-
- def exception(self, timeout=None):
- """Return the exception raised by the call that the future represents.
-
- Args:
- timeout: The number of seconds to wait for the exception if the
- future isn't done. If None, then there is no limit on the wait
- time.
-
- Returns:
- The exception raised by the call that the future represents or None
- if the call completed without raising.
-
- Raises:
- CancelledError: If the future was cancelled.
- TimeoutError: If the future didn't finish executing before the given
- timeout.
- """
-
- with self._condition:
- if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
- raise CancelledError()
- elif self._state == FINISHED:
- return self._exception
-
- self._condition.wait(timeout)
-
- if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
- raise CancelledError()
- elif self._state == FINISHED:
- return self._exception
- else:
- raise TimeoutError()
-
- # The following methods should only be used by Executors and in tests.
- def set_running_or_notify_cancel(self):
- """Mark the future as running or process any cancel notifications.
-
- Should only be used by Executor implementations and unit tests.
-
- If the future has been cancelled (cancel() was called and returned
- True) then any threads waiting on the future completing (though calls
- to as_completed() or wait()) are notified and False is returned.
-
- If the future was not cancelled then it is put in the running state
- (future calls to running() will return True) and True is returned.
-
- This method should be called by Executor implementations before
- executing the work associated with this future. If this method returns
- False then the work should not be executed.
-
- Returns:
- False if the Future was cancelled, True otherwise.
-
- Raises:
- RuntimeError: if this method was already called or if set_result()
- or set_exception() was called.
- """
- with self._condition:
- if self._state == CANCELLED:
- self._state = CANCELLED_AND_NOTIFIED
- for waiter in self._waiters:
- waiter.add_cancelled(self)
- # self._condition.notify_all() is not necessary because
- # self.cancel() triggers a notification.
- return False
- elif self._state == PENDING:
- self._state = RUNNING
- return True
- else:
- LOGGER.critical('Future %s in unexpected state: %s',
- id(self.future),
- self.future._state)
- raise RuntimeError('Future in unexpected state')
-
- def set_result(self, result):
- """Sets the return value of work associated with the future.
-
- Should only be used by Executor implementations and unit tests.
- """
- with self._condition:
- self._result = result
- self._state = FINISHED
- for waiter in self._waiters:
- waiter.add_result(self)
- self._condition.notify_all()
- self._invoke_callbacks()
-
- def set_exception(self, exception):
- """Sets the result of the future as being the given exception.
-
- Should only be used by Executor implementations and unit tests.
- """
- with self._condition:
- self._exception = exception
- self._state = FINISHED
- for waiter in self._waiters:
- waiter.add_exception(self)
- self._condition.notify_all()
- self._invoke_callbacks()
-
-class Executor(object):
- """This is an abstract base class for concrete asynchronous executors."""
-
- def submit(self, fn, *args, **kwargs):
- """Submits a callable to be executed with the given arguments.
-
- Schedules the callable to be executed as fn(*args, **kwargs) and returns
- a Future instance representing the execution of the callable.
-
- Returns:
- A Future representing the given call.
- """
- raise NotImplementedError()
-
- def map(self, fn, *iterables, **kwargs):
- """Returns a iterator equivalent to map(fn, iter).
-
- Args:
- fn: A callable that will take as many arguments as there are
- passed iterables.
- timeout: The maximum number of seconds to wait. If None, then there
- is no limit on the wait time.
-
- Returns:
- An iterator equivalent to: map(func, *iterables) but the calls may
- be evaluated out-of-order.
-
- Raises:
- TimeoutError: If the entire result iterator could not be generated
- before the given timeout.
- Exception: If fn(*args) raises for any values.
- """
- timeout = kwargs.get('timeout')
- if timeout is not None:
- end_time = timeout + time.time()
-
- fs = [self.submit(fn, *args) for args in zip(*iterables)]
-
- try:
- for future in fs:
- if timeout is None:
- yield future.result()
- else:
- yield future.result(end_time - time.time())
- finally:
- for future in fs:
- future.cancel()
-
- def shutdown(self, wait=True):
- """Clean-up the resources associated with the Executor.
-
- It is safe to call this method several times. Otherwise, no other
- methods can be called after this one.
-
- Args:
- wait: If True then shutdown will not return until all running
- futures have finished executing and the resources used by the
- executor have been reclaimed.
- """
- pass
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- self.shutdown(wait=True)
- return False
diff --git a/lib/concurrent/futures/_compat.py b/lib/concurrent/futures/_compat.py
deleted file mode 100644
index 11462326..00000000
--- a/lib/concurrent/futures/_compat.py
+++ /dev/null
@@ -1,101 +0,0 @@
-from keyword import iskeyword as _iskeyword
-from operator import itemgetter as _itemgetter
-import sys as _sys
-
-
-def namedtuple(typename, field_names):
- """Returns a new subclass of tuple with named fields.
-
- >>> Point = namedtuple('Point', 'x y')
- >>> Point.__doc__ # docstring for the new class
- 'Point(x, y)'
- >>> p = Point(11, y=22) # instantiate with positional args or keywords
- >>> p[0] + p[1] # indexable like a plain tuple
- 33
- >>> x, y = p # unpack like a regular tuple
- >>> x, y
- (11, 22)
- >>> p.x + p.y # fields also accessable by name
- 33
- >>> d = p._asdict() # convert to a dictionary
- >>> d['x']
- 11
- >>> Point(**d) # convert from a dictionary
- Point(x=11, y=22)
- >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
- Point(x=100, y=22)
-
- """
-
- # Parse and validate the field names. Validation serves two purposes,
- # generating informative error messages and preventing template injection attacks.
- if isinstance(field_names, basestring):
- field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
- field_names = tuple(map(str, field_names))
- for name in (typename,) + field_names:
- if not all(c.isalnum() or c=='_' for c in name):
- raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
- if _iskeyword(name):
- raise ValueError('Type names and field names cannot be a keyword: %r' % name)
- if name[0].isdigit():
- raise ValueError('Type names and field names cannot start with a number: %r' % name)
- seen_names = set()
- for name in field_names:
- if name.startswith('_'):
- raise ValueError('Field names cannot start with an underscore: %r' % name)
- if name in seen_names:
- raise ValueError('Encountered duplicate field name: %r' % name)
- seen_names.add(name)
-
- # Create and fill-in the class template
- numfields = len(field_names)
- argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
- reprtxt = ', '.join('%s=%%r' % name for name in field_names)
- dicttxt = ', '.join('%r: t[%d]' % (name, pos) for pos, name in enumerate(field_names))
- template = '''class %(typename)s(tuple):
- '%(typename)s(%(argtxt)s)' \n
- __slots__ = () \n
- _fields = %(field_names)r \n
- def __new__(_cls, %(argtxt)s):
- return _tuple.__new__(_cls, (%(argtxt)s)) \n
- @classmethod
- def _make(cls, iterable, new=tuple.__new__, len=len):
- 'Make a new %(typename)s object from a sequence or iterable'
- result = new(cls, iterable)
- if len(result) != %(numfields)d:
- raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
- return result \n
- def __repr__(self):
- return '%(typename)s(%(reprtxt)s)' %% self \n
- def _asdict(t):
- 'Return a new dict which maps field names to their values'
- return {%(dicttxt)s} \n
- def _replace(_self, **kwds):
- 'Return a new %(typename)s object replacing specified fields with new values'
- result = _self._make(map(kwds.pop, %(field_names)r, _self))
- if kwds:
- raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
- return result \n
- def __getnewargs__(self):
- return tuple(self) \n\n''' % locals()
- for i, name in enumerate(field_names):
- template += ' %s = _property(_itemgetter(%d))\n' % (name, i)
-
- # Execute the template string in a temporary namespace and
- # support tracing utilities by setting a value for frame.f_globals['__name__']
- namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
- _property=property, _tuple=tuple)
- try:
- exec(template, namespace)
- except SyntaxError:
- e = _sys.exc_info()[1]
- raise SyntaxError(e.message + ':\n' + template)
- result = namespace[typename]
-
- # For pickling to work, the __module__ variable needs to be set to the frame
- # where the named tuple is created. Bypass this step in enviroments where
- # sys._getframe is not defined (Jython for example).
- if hasattr(_sys, '_getframe'):
- result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
-
- return result
diff --git a/lib/concurrent/futures/process.py b/lib/concurrent/futures/process.py
deleted file mode 100644
index 98684f8e..00000000
--- a/lib/concurrent/futures/process.py
+++ /dev/null
@@ -1,363 +0,0 @@
-# Copyright 2009 Brian Quinlan. All Rights Reserved.
-# Licensed to PSF under a Contributor Agreement.
-
-"""Implements ProcessPoolExecutor.
-
-The follow diagram and text describe the data-flow through the system:
-
-|======================= In-process =====================|== Out-of-process ==|
-
-+----------+ +----------+ +--------+ +-----------+ +---------+
-| | => | Work Ids | => | | => | Call Q | => | |
-| | +----------+ | | +-----------+ | |
-| | | ... | | | | ... | | |
-| | | 6 | | | | 5, call() | | |
-| | | 7 | | | | ... | | |
-| Process | | ... | | Local | +-----------+ | Process |
-| Pool | +----------+ | Worker | | #1..n |
-| Executor | | Thread | | |
-| | +----------- + | | +-----------+ | |
-| | <=> | Work Items | <=> | | <= | Result Q | <= | |
-| | +------------+ | | +-----------+ | |
-| | | 6: call() | | | | ... | | |
-| | | future | | | | 4, result | | |
-| | | ... | | | | 3, except | | |
-+----------+ +------------+ +--------+ +-----------+ +---------+
-
-Executor.submit() called:
-- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict
-- adds the id of the _WorkItem to the "Work Ids" queue
-
-Local worker thread:
-- reads work ids from the "Work Ids" queue and looks up the corresponding
- WorkItem from the "Work Items" dict: if the work item has been cancelled then
- it is simply removed from the dict, otherwise it is repackaged as a
- _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q"
- until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because
- calls placed in the "Call Q" can no longer be cancelled with Future.cancel().
-- reads _ResultItems from "Result Q", updates the future stored in the
- "Work Items" dict and deletes the dict entry
-
-Process #1..n:
-- reads _CallItems from "Call Q", executes the calls, and puts the resulting
- _ResultItems in "Request Q"
-"""
-
-from __future__ import with_statement
-import atexit
-import multiprocessing
-import threading
-import weakref
-import sys
-
-from concurrent.futures import _base
-
-try:
- import queue
-except ImportError:
- import Queue as queue
-
-__author__ = 'Brian Quinlan (brian@sweetapp.com)'
-
-# Workers are created as daemon threads and processes. This is done to allow the
-# interpreter to exit when there are still idle processes in a
-# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However,
-# allowing workers to die with the interpreter has two undesirable properties:
-# - The workers would still be running during interpretor shutdown,
-# meaning that they would fail in unpredictable ways.
-# - The workers could be killed while evaluating a work item, which could
-# be bad if the callable being evaluated has external side-effects e.g.
-# writing to a file.
-#
-# To work around this problem, an exit handler is installed which tells the
-# workers to exit when their work queues are empty and then waits until the
-# threads/processes finish.
-
-_threads_queues = weakref.WeakKeyDictionary()
-_shutdown = False
-
-def _python_exit():
- global _shutdown
- _shutdown = True
- items = list(_threads_queues.items())
- for t, q in items:
- q.put(None)
- for t, q in items:
- t.join()
-
-# Controls how many more calls than processes will be queued in the call queue.
-# A smaller number will mean that processes spend more time idle waiting for
-# work while a larger number will make Future.cancel() succeed less frequently
-# (Futures in the call queue cannot be cancelled).
-EXTRA_QUEUED_CALLS = 1
-
-class _WorkItem(object):
- def __init__(self, future, fn, args, kwargs):
- self.future = future
- self.fn = fn
- self.args = args
- self.kwargs = kwargs
-
-class _ResultItem(object):
- def __init__(self, work_id, exception=None, result=None):
- self.work_id = work_id
- self.exception = exception
- self.result = result
-
-class _CallItem(object):
- def __init__(self, work_id, fn, args, kwargs):
- self.work_id = work_id
- self.fn = fn
- self.args = args
- self.kwargs = kwargs
-
-def _process_worker(call_queue, result_queue):
- """Evaluates calls from call_queue and places the results in result_queue.
-
- This worker is run in a separate process.
-
- Args:
- call_queue: A multiprocessing.Queue of _CallItems that will be read and
- evaluated by the worker.
- result_queue: A multiprocessing.Queue of _ResultItems that will written
- to by the worker.
- shutdown: A multiprocessing.Event that will be set as a signal to the
- worker that it should exit when call_queue is empty.
- """
- while True:
- call_item = call_queue.get(block=True)
- if call_item is None:
- # Wake up queue management thread
- result_queue.put(None)
- return
- try:
- r = call_item.fn(*call_item.args, **call_item.kwargs)
- except BaseException:
- e = sys.exc_info()[1]
- result_queue.put(_ResultItem(call_item.work_id,
- exception=e))
- else:
- result_queue.put(_ResultItem(call_item.work_id,
- result=r))
-
-def _add_call_item_to_queue(pending_work_items,
- work_ids,
- call_queue):
- """Fills call_queue with _WorkItems from pending_work_items.
-
- This function never blocks.
-
- Args:
- pending_work_items: A dict mapping work ids to _WorkItems e.g.
- {5: <_WorkItem...>, 6: <_WorkItem...>, ...}
- work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids
- are consumed and the corresponding _WorkItems from
- pending_work_items are transformed into _CallItems and put in
- call_queue.
- call_queue: A multiprocessing.Queue that will be filled with _CallItems
- derived from _WorkItems.
- """
- while True:
- if call_queue.full():
- return
- try:
- work_id = work_ids.get(block=False)
- except queue.Empty:
- return
- else:
- work_item = pending_work_items[work_id]
-
- if work_item.future.set_running_or_notify_cancel():
- call_queue.put(_CallItem(work_id,
- work_item.fn,
- work_item.args,
- work_item.kwargs),
- block=True)
- else:
- del pending_work_items[work_id]
- continue
-
-def _queue_management_worker(executor_reference,
- processes,
- pending_work_items,
- work_ids_queue,
- call_queue,
- result_queue):
- """Manages the communication between this process and the worker processes.
-
- This function is run in a local thread.
-
- Args:
- executor_reference: A weakref.ref to the ProcessPoolExecutor that owns
- this thread. Used to determine if the ProcessPoolExecutor has been
- garbage collected and that this function can exit.
- process: A list of the multiprocessing.Process instances used as
- workers.
- pending_work_items: A dict mapping work ids to _WorkItems e.g.
- {5: <_WorkItem...>, 6: <_WorkItem...>, ...}
- work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]).
- call_queue: A multiprocessing.Queue that will be filled with _CallItems
- derived from _WorkItems for processing by the process workers.
- result_queue: A multiprocessing.Queue of _ResultItems generated by the
- process workers.
- """
- nb_shutdown_processes = [0]
- def shutdown_one_process():
- """Tell a worker to terminate, which will in turn wake us again"""
- call_queue.put(None)
- nb_shutdown_processes[0] += 1
- while True:
- _add_call_item_to_queue(pending_work_items,
- work_ids_queue,
- call_queue)
-
- result_item = result_queue.get(block=True)
- if result_item is not None:
- work_item = pending_work_items[result_item.work_id]
- del pending_work_items[result_item.work_id]
-
- if result_item.exception:
- work_item.future.set_exception(result_item.exception)
- else:
- work_item.future.set_result(result_item.result)
- # Check whether we should start shutting down.
- executor = executor_reference()
- # No more work items can be added if:
- # - The interpreter is shutting down OR
- # - The executor that owns this worker has been collected OR
- # - The executor that owns this worker has been shutdown.
- if _shutdown or executor is None or executor._shutdown_thread:
- # Since no new work items can be added, it is safe to shutdown
- # this thread if there are no pending work items.
- if not pending_work_items:
- while nb_shutdown_processes[0] < len(processes):
- shutdown_one_process()
- # If .join() is not called on the created processes then
- # some multiprocessing.Queue methods may deadlock on Mac OS
- # X.
- for p in processes:
- p.join()
- call_queue.close()
- return
- del executor
-
-_system_limits_checked = False
-_system_limited = None
-def _check_system_limits():
- global _system_limits_checked, _system_limited
- if _system_limits_checked:
- if _system_limited:
- raise NotImplementedError(_system_limited)
- _system_limits_checked = True
- try:
- import os
- nsems_max = os.sysconf("SC_SEM_NSEMS_MAX")
- except (AttributeError, ValueError):
- # sysconf not available or setting not available
- return
- if nsems_max == -1:
- # indetermine limit, assume that limit is determined
- # by available memory only
- return
- if nsems_max >= 256:
- # minimum number of semaphores available
- # according to POSIX
- return
- _system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max
- raise NotImplementedError(_system_limited)
-
-class ProcessPoolExecutor(_base.Executor):
- def __init__(self, max_workers=None):
- """Initializes a new ProcessPoolExecutor instance.
-
- Args:
- max_workers: The maximum number of processes that can be used to
- execute the given calls. If None or not given then as many
- worker processes will be created as the machine has processors.
- """
- _check_system_limits()
-
- if max_workers is None:
- self._max_workers = multiprocessing.cpu_count()
- else:
- self._max_workers = max_workers
-
- # Make the call queue slightly larger than the number of processes to
- # prevent the worker processes from idling. But don't make it too big
- # because futures in the call queue cannot be cancelled.
- self._call_queue = multiprocessing.Queue(self._max_workers +
- EXTRA_QUEUED_CALLS)
- self._result_queue = multiprocessing.Queue()
- self._work_ids = queue.Queue()
- self._queue_management_thread = None
- self._processes = set()
-
- # Shutdown is a two-step process.
- self._shutdown_thread = False
- self._shutdown_lock = threading.Lock()
- self._queue_count = 0
- self._pending_work_items = {}
-
- def _start_queue_management_thread(self):
- # When the executor gets lost, the weakref callback will wake up
- # the queue management thread.
- def weakref_cb(_, q=self._result_queue):
- q.put(None)
- if self._queue_management_thread is None:
- self._queue_management_thread = threading.Thread(
- target=_queue_management_worker,
- args=(weakref.ref(self, weakref_cb),
- self._processes,
- self._pending_work_items,
- self._work_ids,
- self._call_queue,
- self._result_queue))
- self._queue_management_thread.daemon = True
- self._queue_management_thread.start()
- _threads_queues[self._queue_management_thread] = self._result_queue
-
- def _adjust_process_count(self):
- for _ in range(len(self._processes), self._max_workers):
- p = multiprocessing.Process(
- target=_process_worker,
- args=(self._call_queue,
- self._result_queue))
- p.start()
- self._processes.add(p)
-
- def submit(self, fn, *args, **kwargs):
- with self._shutdown_lock:
- if self._shutdown_thread:
- raise RuntimeError('cannot schedule new futures after shutdown')
-
- f = _base.Future()
- w = _WorkItem(f, fn, args, kwargs)
-
- self._pending_work_items[self._queue_count] = w
- self._work_ids.put(self._queue_count)
- self._queue_count += 1
- # Wake up queue management thread
- self._result_queue.put(None)
-
- self._start_queue_management_thread()
- self._adjust_process_count()
- return f
- submit.__doc__ = _base.Executor.submit.__doc__
-
- def shutdown(self, wait=True):
- with self._shutdown_lock:
- self._shutdown_thread = True
- if self._queue_management_thread:
- # Wake up queue management thread
- self._result_queue.put(None)
- if wait:
- self._queue_management_thread.join()
- # To reduce the risk of openning too many files, remove references to
- # objects that use file descriptors.
- self._queue_management_thread = None
- self._call_queue = None
- self._result_queue = None
- self._processes = None
- shutdown.__doc__ = _base.Executor.shutdown.__doc__
-
-atexit.register(_python_exit)
diff --git a/lib/concurrent/futures/thread.py b/lib/concurrent/futures/thread.py
deleted file mode 100644
index 9d435cd0..00000000
--- a/lib/concurrent/futures/thread.py
+++ /dev/null
@@ -1,145 +0,0 @@
-# Copyright 2009 Brian Quinlan. All Rights Reserved.
-# Licensed to PSF under a Contributor Agreement.
-
-"""Implements ThreadPoolExecutor."""
-
-from __future__ import with_statement
-import atexit
-import threading
-import weakref
-import sys
-
-from concurrent.futures import _base
-
-try:
- import queue
-except ImportError:
- import Queue as queue
-
-__author__ = 'Brian Quinlan (brian@sweetapp.com)'
-
-# Workers are created as daemon threads. This is done to allow the interpreter
-# to exit when there are still idle threads in a ThreadPoolExecutor's thread
-# pool (i.e. shutdown() was not called). However, allowing workers to die with
-# the interpreter has two undesirable properties:
-# - The workers would still be running during interpretor shutdown,
-# meaning that they would fail in unpredictable ways.
-# - The workers could be killed while evaluating a work item, which could
-# be bad if the callable being evaluated has external side-effects e.g.
-# writing to a file.
-#
-# To work around this problem, an exit handler is installed which tells the
-# workers to exit when their work queues are empty and then waits until the
-# threads finish.
-
-_threads_queues = weakref.WeakKeyDictionary()
-_shutdown = False
-
-def _python_exit():
- global _shutdown
- _shutdown = True
- items = list(_threads_queues.items())
- for t, q in items:
- q.put(None)
- for t, q in items:
- t.join()
-
-atexit.register(_python_exit)
-
-class _WorkItem(object):
- def __init__(self, future, fn, args, kwargs):
- self.future = future
- self.fn = fn
- self.args = args
- self.kwargs = kwargs
-
- def run(self):
- if not self.future.set_running_or_notify_cancel():
- return
-
- try:
- result = self.fn(*self.args, **self.kwargs)
- except BaseException:
- e = sys.exc_info()[1]
- self.future.set_exception(e)
- else:
- self.future.set_result(result)
-
-def _worker(executor_reference, work_queue):
- try:
- while True:
- work_item = work_queue.get(block=True)
- if work_item is not None:
- work_item.run()
- continue
- executor = executor_reference()
- # Exit if:
- # - The interpreter is shutting down OR
- # - The executor that owns the worker has been collected OR
- # - The executor that owns the worker has been shutdown.
- if _shutdown or executor is None or executor._shutdown:
- # Notice other workers
- work_queue.put(None)
- return
- del executor
- except BaseException:
- _base.LOGGER.critical('Exception in worker', exc_info=True)
-
-class ThreadPoolExecutor(_base.Executor):
- def __init__(self, max_workers):
- """Initializes a new ThreadPoolExecutor instance.
-
- Args:
- max_workers: The maximum number of threads that can be used to
- execute the given calls.
- """
- self._max_workers = max_workers
- self._work_queue = queue.Queue()
- self._threads = set()
- self._shutdown = False
- self._shutdown_lock = threading.Lock()
-
- def submit(self, fn, *args, **kwargs):
- with self._shutdown_lock:
- if self._shutdown:
- raise RuntimeError('cannot schedule new futures after shutdown')
-
- f = _base.Future()
- w = _WorkItem(f, fn, args, kwargs)
-
- self._work_queue.put(w)
-
- name = None
- if kwargs.has_key('name'):
- name = kwargs.pop('name')
-
- self._adjust_thread_count(name)
- return f
- submit.__doc__ = _base.Executor.submit.__doc__
-
- def _adjust_thread_count(self, name=None):
- # When the executor gets lost, the weakref callback will wake up
- # the worker threads.
- def weakref_cb(_, q=self._work_queue):
- q.put(None)
- # TODO(bquinlan): Should avoid creating new threads if there are more
- # idle threads than items in the work queue.
- if len(self._threads) < self._max_workers:
- t = threading.Thread(target=_worker,
- args=(weakref.ref(self, weakref_cb),
- self._work_queue),)
- if name:
- t.name = name
- t.daemon = True
- t.start()
- self._threads.add(t)
- _threads_queues[t] = self._work_queue
-
- def shutdown(self, wait=True):
- with self._shutdown_lock:
- self._shutdown = True
- self._work_queue.put(None)
- if wait:
- for t in self._threads:
- t.join()
- shutdown.__doc__ = _base.Executor.shutdown.__doc__
diff --git a/lib/futures/__init__.py b/lib/futures/__init__.py
deleted file mode 100644
index 8f8b2348..00000000
--- a/lib/futures/__init__.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright 2009 Brian Quinlan. All Rights Reserved.
-# Licensed to PSF under a Contributor Agreement.
-
-"""Execute computations asynchronously using threads or processes."""
-
-import warnings
-
-from concurrent.futures import (FIRST_COMPLETED,
- FIRST_EXCEPTION,
- ALL_COMPLETED,
- CancelledError,
- TimeoutError,
- Future,
- Executor,
- wait,
- as_completed,
- ProcessPoolExecutor,
- ThreadPoolExecutor)
-
-__author__ = 'Brian Quinlan (brian@sweetapp.com)'
-
-warnings.warn('The futures package has been deprecated. '
- 'Use the concurrent.futures package instead.',
- DeprecationWarning)
diff --git a/lib/futures/process.py b/lib/futures/process.py
deleted file mode 100644
index e9d37b16..00000000
--- a/lib/futures/process.py
+++ /dev/null
@@ -1 +0,0 @@
-from concurrent.futures import ProcessPoolExecutor
diff --git a/lib/futures/thread.py b/lib/futures/thread.py
deleted file mode 100644
index f6bd05de..00000000
--- a/lib/futures/thread.py
+++ /dev/null
@@ -1 +0,0 @@
-from concurrent.futures import ThreadPoolExecutor
diff --git a/lib/shove/__init__.py b/lib/shove/__init__.py
deleted file mode 100644
index 3be119b4..00000000
--- a/lib/shove/__init__.py
+++ /dev/null
@@ -1,519 +0,0 @@
-# -*- coding: utf-8 -*-
-'''Common object storage frontend.'''
-
-import os
-import zlib
-import urllib
-try:
- import cPickle as pickle
-except ImportError:
- import pickle
-from collections import deque
-
-try:
- # Import store and cache entry points if setuptools installed
- import pkg_resources
- stores = dict((_store.name, _store) for _store in
- pkg_resources.iter_entry_points('shove.stores'))
- caches = dict((_cache.name, _cache) for _cache in
- pkg_resources.iter_entry_points('shove.caches'))
- # Pass if nothing loaded
- if not stores and not caches:
- raise ImportError()
-except ImportError:
- # Static store backend registry
- stores = dict(
- bsddb='shove.store.bsdb:BsdStore',
- cassandra='shove.store.cassandra:CassandraStore',
- dbm='shove.store.dbm:DbmStore',
- durus='shove.store.durusdb:DurusStore',
- file='shove.store.file:FileStore',
- firebird='shove.store.db:DbStore',
- ftp='shove.store.ftp:FtpStore',
- hdf5='shove.store.hdf5:HDF5Store',
- leveldb='shove.store.leveldbstore:LevelDBStore',
- memory='shove.store.memory:MemoryStore',
- mssql='shove.store.db:DbStore',
- mysql='shove.store.db:DbStore',
- oracle='shove.store.db:DbStore',
- postgres='shove.store.db:DbStore',
- redis='shove.store.redisdb:RedisStore',
- s3='shove.store.s3:S3Store',
- simple='shove.store.simple:SimpleStore',
- sqlite='shove.store.db:DbStore',
- svn='shove.store.svn:SvnStore',
- zodb='shove.store.zodb:ZodbStore',
- )
- # Static cache backend registry
- caches = dict(
- bsddb='shove.cache.bsdb:BsdCache',
- file='shove.cache.file:FileCache',
- filelru='shove.cache.filelru:FileLRUCache',
- firebird='shove.cache.db:DbCache',
- memcache='shove.cache.memcached:MemCached',
- memlru='shove.cache.memlru:MemoryLRUCache',
- memory='shove.cache.memory:MemoryCache',
- mssql='shove.cache.db:DbCache',
- mysql='shove.cache.db:DbCache',
- oracle='shove.cache.db:DbCache',
- postgres='shove.cache.db:DbCache',
- redis='shove.cache.redisdb:RedisCache',
- simple='shove.cache.simple:SimpleCache',
- simplelru='shove.cache.simplelru:SimpleLRUCache',
- sqlite='shove.cache.db:DbCache',
- )
-
-
-def getbackend(uri, engines, **kw):
- '''
- Loads the right backend based on a URI.
-
- @param uri Instance or name string
- @param engines A dictionary of scheme/class pairs
- '''
- if isinstance(uri, basestring):
- mod = engines[uri.split('://', 1)[0]]
- # Load module if setuptools not present
- if isinstance(mod, basestring):
- # Isolate classname from dot path
- module, klass = mod.split(':')
- # Load module
- mod = getattr(__import__(module, '', '', ['']), klass)
- # Load appropriate class from setuptools entry point
- else:
- mod = mod.load()
- # Return instance
- return mod(uri, **kw)
- # No-op for existing instances
- return uri
-
-
-def synchronized(func):
- '''
- Decorator to lock and unlock a method (Phillip J. Eby).
-
- @param func Method to decorate
- '''
- def wrapper(self, *__args, **__kw):
- self._lock.acquire()
- try:
- return func(self, *__args, **__kw)
- finally:
- self._lock.release()
- wrapper.__name__ = func.__name__
- wrapper.__dict__ = func.__dict__
- wrapper.__doc__ = func.__doc__
- return wrapper
-
-
-class Base(object):
-
- '''Base Mapping class.'''
-
- def __init__(self, engine, **kw):
- '''
- @keyword compress True, False, or an integer compression level (1-9).
- '''
- self._compress = kw.get('compress', False)
- self._protocol = kw.get('protocol', pickle.HIGHEST_PROTOCOL)
-
- def __getitem__(self, key):
- raise NotImplementedError()
-
- def __setitem__(self, key, value):
- raise NotImplementedError()
-
- def __delitem__(self, key):
- raise NotImplementedError()
-
- def __contains__(self, key):
- try:
- value = self[key]
- except KeyError:
- return False
- return True
-
- def get(self, key, default=None):
- '''
- Fetch a given key from the mapping. If the key does not exist,
- return the default.
-
- @param key Keyword of item in mapping.
- @param default Default value (default: None)
- '''
- try:
- return self[key]
- except KeyError:
- return default
-
- def dumps(self, value):
- '''Optionally serializes and compresses an object.'''
- # Serialize everything but ASCII strings
- value = pickle.dumps(value, protocol=self._protocol)
- if self._compress:
- level = 9 if self._compress is True else self._compress
- value = zlib.compress(value, level)
- return value
-
- def loads(self, value):
- '''Deserializes and optionally decompresses an object.'''
- if self._compress:
- try:
- value = zlib.decompress(value)
- except zlib.error:
- pass
- value = pickle.loads(value)
- return value
-
-
-class BaseStore(Base):
-
- '''Base Store class (based on UserDict.DictMixin).'''
-
- def __init__(self, engine, **kw):
- super(BaseStore, self).__init__(engine, **kw)
- self._store = None
-
- def __cmp__(self, other):
- if other is None:
- return False
- if isinstance(other, BaseStore):
- return cmp(dict(self.iteritems()), dict(other.iteritems()))
-
- def __del__(self):
- # __init__ didn't succeed, so don't bother closing
- if not hasattr(self, '_store'):
- return
- self.close()
-
- def __iter__(self):
- for k in self.keys():
- yield k
-
- def __len__(self):
- return len(self.keys())
-
- def __repr__(self):
- return repr(dict(self.iteritems()))
-
- def close(self):
- '''Closes internal store and clears object references.'''
- try:
- self._store.close()
- except AttributeError:
- pass
- self._store = None
-
- def clear(self):
- '''Removes all keys and values from a store.'''
- for key in self.keys():
- del self[key]
-
- def items(self):
- '''Returns a list with all key/value pairs in the store.'''
- return list(self.iteritems())
-
- def iteritems(self):
- '''Lazily returns all key/value pairs in a store.'''
- for k in self:
- yield (k, self[k])
-
- def iterkeys(self):
- '''Lazy returns all keys in a store.'''
- return self.__iter__()
-
- def itervalues(self):
- '''Lazily returns all values in a store.'''
- for _, v in self.iteritems():
- yield v
-
- def keys(self):
- '''Returns a list with all keys in a store.'''
- raise NotImplementedError()
-
- def pop(self, key, *args):
- '''
- Removes and returns a value from a store.
-
- @param args Default to return if key not present.
- '''
- if len(args) > 1:
- raise TypeError('pop expected at most 2 arguments, got ' + repr(
- 1 + len(args))
- )
- try:
- value = self[key]
- # Return default if key not in store
- except KeyError:
- if args:
- return args[0]
- del self[key]
- return value
-
- def popitem(self):
- '''Removes and returns a key, value pair from a store.'''
- try:
- k, v = self.iteritems().next()
- except StopIteration:
- raise KeyError('Store is empty.')
- del self[k]
- return (k, v)
-
- def setdefault(self, key, default=None):
- '''
- Returns the value corresponding to an existing key or sets the
- to key to the default and returns the default.
-
- @param default Default value (default: None)
- '''
- try:
- return self[key]
- except KeyError:
- self[key] = default
- return default
-
- def update(self, other=None, **kw):
- '''
- Adds to or overwrites the values in this store with values from
- another store.
-
- other Another store
- kw Additional keys and values to store
- '''
- if other is None:
- pass
- elif hasattr(other, 'iteritems'):
- for k, v in other.iteritems():
- self[k] = v
- elif hasattr(other, 'keys'):
- for k in other.keys():
- self[k] = other[k]
- else:
- for k, v in other:
- self[k] = v
- if kw:
- self.update(kw)
-
- def values(self):
- '''Returns a list with all values in a store.'''
- return list(v for _, v in self.iteritems())
-
-
-class Shove(BaseStore):
-
- '''Common object frontend class.'''
-
- def __init__(self, store='simple://', cache='simple://', **kw):
- super(Shove, self).__init__(store, **kw)
- # Load store
- self._store = getbackend(store, stores, **kw)
- # Load cache
- self._cache = getbackend(cache, caches, **kw)
- # Buffer for lazy writing and setting for syncing frequency
- self._buffer, self._sync = dict(), kw.get('sync', 2)
-
- def __getitem__(self, key):
- '''Gets a item from shove.'''
- try:
- return self._cache[key]
- except KeyError:
- # Synchronize cache and store
- self.sync()
- value = self._store[key]
- self._cache[key] = value
- return value
-
- def __setitem__(self, key, value):
- '''Sets an item in shove.'''
- self._cache[key] = self._buffer[key] = value
- # When the buffer reaches self._limit, writes the buffer to the store
- if len(self._buffer) >= self._sync:
- self.sync()
-
- def __delitem__(self, key):
- '''Deletes an item from shove.'''
- try:
- del self._cache[key]
- except KeyError:
- pass
- self.sync()
- del self._store[key]
-
- def keys(self):
- '''Returns a list of keys in shove.'''
- self.sync()
- return self._store.keys()
-
- def sync(self):
- '''Writes buffer to store.'''
- for k, v in self._buffer.iteritems():
- self._store[k] = v
- self._buffer.clear()
-
- def close(self):
- '''Finalizes and closes shove.'''
- # If close has been called, pass
- if self._store is not None:
- try:
- self.sync()
- except AttributeError:
- pass
- self._store.close()
- self._store = self._cache = self._buffer = None
-
-
-class FileBase(Base):
-
- '''Base class for file based storage.'''
-
- def __init__(self, engine, **kw):
- super(FileBase, self).__init__(engine, **kw)
- if engine.startswith('file://'):
- engine = urllib.url2pathname(engine.split('://')[1])
- self._dir = engine
- # Create directory
- if not os.path.exists(self._dir):
- self._createdir()
-
- def __getitem__(self, key):
- # (per Larry Meyn)
- try:
- item = open(self._key_to_file(key), 'rb')
- data = item.read()
- item.close()
- return self.loads(data)
- except:
- raise KeyError(key)
-
- def __setitem__(self, key, value):
- # (per Larry Meyn)
- try:
- item = open(self._key_to_file(key), 'wb')
- item.write(self.dumps(value))
- item.close()
- except (IOError, OSError):
- raise KeyError(key)
-
- def __delitem__(self, key):
- try:
- os.remove(self._key_to_file(key))
- except (IOError, OSError):
- raise KeyError(key)
-
- def __contains__(self, key):
- return os.path.exists(self._key_to_file(key))
-
- def __len__(self):
- return len(os.listdir(self._dir))
-
- def _createdir(self):
- '''Creates the store directory.'''
- try:
- os.makedirs(self._dir)
- except OSError:
- raise EnvironmentError(
- 'Cache directory "%s" does not exist and ' \
- 'could not be created' % self._dir
- )
-
- def _key_to_file(self, key):
- '''Gives the filesystem path for a key.'''
- return os.path.join(self._dir, urllib.quote_plus(key))
-
- def keys(self):
- '''Returns a list of keys in the store.'''
- return [urllib.unquote_plus(name) for name in os.listdir(self._dir)]
-
-
-class SimpleBase(Base):
-
- '''Single-process in-memory store base class.'''
-
- def __init__(self, engine, **kw):
- super(SimpleBase, self).__init__(engine, **kw)
- self._store = dict()
-
- def __getitem__(self, key):
- try:
- return self._store[key]
- except:
- raise KeyError(key)
-
- def __setitem__(self, key, value):
- self._store[key] = value
-
- def __delitem__(self, key):
- try:
- del self._store[key]
- except:
- raise KeyError(key)
-
- def __len__(self):
- return len(self._store)
-
- def keys(self):
- '''Returns a list of keys in the store.'''
- return self._store.keys()
-
-
-class LRUBase(SimpleBase):
-
- def __init__(self, engine, **kw):
- super(LRUBase, self).__init__(engine, **kw)
- self._max_entries = kw.get('max_entries', 300)
- self._hits = 0
- self._misses = 0
- self._queue = deque()
- self._refcount = dict()
-
- def __getitem__(self, key):
- try:
- value = super(LRUBase, self).__getitem__(key)
- self._hits += 1
- except KeyError:
- self._misses += 1
- raise
- self._housekeep(key)
- return value
-
- def __setitem__(self, key, value):
- super(LRUBase, self).__setitem__(key, value)
- self._housekeep(key)
- if len(self._store) > self._max_entries:
- while len(self._store) > self._max_entries:
- k = self._queue.popleft()
- self._refcount[k] -= 1
- if not self._refcount[k]:
- super(LRUBase, self).__delitem__(k)
- del self._refcount[k]
-
- def _housekeep(self, key):
- self._queue.append(key)
- self._refcount[key] = self._refcount.get(key, 0) + 1
- if len(self._queue) > self._max_entries * 4:
- self._purge_queue()
-
- def _purge_queue(self):
- for i in [None] * len(self._queue):
- k = self._queue.popleft()
- if self._refcount[k] == 1:
- self._queue.append(k)
- else:
- self._refcount[k] -= 1
-
-
-class DbBase(Base):
-
- '''Database common base class.'''
-
- def __init__(self, engine, **kw):
- super(DbBase, self).__init__(engine, **kw)
-
- def __delitem__(self, key):
- self._store.delete(self._store.c.key == key).execute()
-
- def __len__(self):
- return self._store.count().execute().fetchone()[0]
-
-
-__all__ = ['Shove']
diff --git a/lib/shove/cache/__init__.py b/lib/shove/cache/__init__.py
deleted file mode 100644
index 40a96afc..00000000
--- a/lib/shove/cache/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# -*- coding: utf-8 -*-
diff --git a/lib/shove/cache/db.py b/lib/shove/cache/db.py
deleted file mode 100644
index 21fea01f..00000000
--- a/lib/shove/cache/db.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-Database object cache.
-
-The shove psuedo-URL used for database object caches is the format used by
-SQLAlchemy:
-
-://:@:/
-
- is the database engine. The engines currently supported SQLAlchemy are
-sqlite, mysql, postgres, oracle, mssql, and firebird.
- is the database account user name
- is the database accound password
- is the database location
- is the database port
- is the name of the specific database
-
-For more information on specific databases see:
-
-http://www.sqlalchemy.org/docs/dbengine.myt#dbengine_supported
-'''
-
-import time
-import random
-from datetime import datetime
-try:
- from sqlalchemy import (
- MetaData, Table, Column, String, Binary, DateTime, select, update,
- insert, delete,
- )
- from shove import DbBase
-except ImportError:
- raise ImportError('Requires SQLAlchemy >= 0.4')
-
-__all__ = ['DbCache']
-
-
-class DbCache(DbBase):
-
- '''database cache backend'''
-
- def __init__(self, engine, **kw):
- super(DbCache, self).__init__(engine, **kw)
- # Get table name
- tablename = kw.get('tablename', 'cache')
- # Bind metadata
- self._metadata = MetaData(engine)
- # Make cache table
- self._store = Table(tablename, self._metadata,
- Column('key', String(60), primary_key=True, nullable=False),
- Column('value', Binary, nullable=False),
- Column('expires', DateTime, nullable=False),
- )
- # Create cache table if it does not exist
- if not self._store.exists():
- self._store.create()
- # Set maximum entries
- self._max_entries = kw.get('max_entries', 300)
- # Maximum number of entries to cull per call if cache is full
- self._maxcull = kw.get('maxcull', 10)
- # Set timeout
- self.timeout = kw.get('timeout', 300)
-
- def __getitem__(self, key):
- row = select(
- [self._store.c.value, self._store.c.expires],
- self._store.c.key == key
- ).execute().fetchone()
- if row is not None:
- # Remove if item expired
- if row.expires < datetime.now().replace(microsecond=0):
- del self[key]
- raise KeyError(key)
- return self.loads(str(row.value))
- raise KeyError(key)
-
- def __setitem__(self, key, value):
- timeout, value, cache = self.timeout, self.dumps(value), self._store
- # Cull if too many items
- if len(self) >= self._max_entries:
- self._cull()
- # Generate expiration time
- expires = datetime.fromtimestamp(
- time.time() + timeout
- ).replace(microsecond=0)
- # Update database if key already present
- if key in self:
- update(
- cache,
- cache.c.key == key,
- dict(value=value, expires=expires),
- ).execute()
- # Insert new key if key not present
- else:
- insert(
- cache, dict(key=key, value=value, expires=expires)
- ).execute()
-
- def _cull(self):
- '''Remove items in cache to make more room.'''
- cache, maxcull = self._store, self._maxcull
- # Remove items that have timed out
- now = datetime.now().replace(microsecond=0)
- delete(cache, cache.c.expires < now).execute()
- # Remove any items over the maximum allowed number in the cache
- if len(self) >= self._max_entries:
- # Upper limit for key query
- ul = maxcull * 2
- # Get list of keys
- keys = [
- i[0] for i in select(
- [cache.c.key], limit=ul
- ).execute().fetchall()
- ]
- # Get some keys at random
- delkeys = list(random.choice(keys) for i in xrange(maxcull))
- delete(cache, cache.c.key.in_(delkeys)).execute()
diff --git a/lib/shove/cache/file.py b/lib/shove/cache/file.py
deleted file mode 100644
index 7b9a4ae7..00000000
--- a/lib/shove/cache/file.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-File-based cache
-
-shove's psuedo-URL for file caches follows the form:
-
-file://
-
-Where the path is a URL path to a directory on a local filesystem.
-Alternatively, a native pathname to the directory can be passed as the 'engine'
-argument.
-'''
-
-import time
-
-from shove import FileBase
-from shove.cache.simple import SimpleCache
-
-
-class FileCache(FileBase, SimpleCache):
-
- '''File-based cache backend'''
-
- def __init__(self, engine, **kw):
- super(FileCache, self).__init__(engine, **kw)
-
- def __getitem__(self, key):
- try:
- exp, value = super(FileCache, self).__getitem__(key)
- # Remove item if time has expired.
- if exp < time.time():
- del self[key]
- raise KeyError(key)
- return value
- except:
- raise KeyError(key)
-
- def __setitem__(self, key, value):
- if len(self) >= self._max_entries:
- self._cull()
- super(FileCache, self).__setitem__(
- key, (time.time() + self.timeout, value)
- )
-
-
-__all__ = ['FileCache']
diff --git a/lib/shove/cache/filelru.py b/lib/shove/cache/filelru.py
deleted file mode 100644
index de076613..00000000
--- a/lib/shove/cache/filelru.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-File-based LRU cache
-
-shove's psuedo-URL for file caches follows the form:
-
-file://
-
-Where the path is a URL path to a directory on a local filesystem.
-Alternatively, a native pathname to the directory can be passed as the 'engine'
-argument.
-'''
-
-from shove import FileBase
-from shove.cache.simplelru import SimpleLRUCache
-
-
-class FileCache(FileBase, SimpleLRUCache):
-
- '''File-based LRU cache backend'''
-
-
-__all__ = ['FileCache']
diff --git a/lib/shove/cache/memcached.py b/lib/shove/cache/memcached.py
deleted file mode 100644
index aedfe282..00000000
--- a/lib/shove/cache/memcached.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-"memcached" cache.
-
-The shove psuedo-URL for a memcache cache is:
-
-memcache://
-'''
-
-try:
- import memcache
-except ImportError:
- raise ImportError("Memcache cache requires the 'memcache' library")
-
-from shove import Base
-
-
-class MemCached(Base):
-
- '''Memcached cache backend'''
-
- def __init__(self, engine, **kw):
- super(MemCached, self).__init__(engine, **kw)
- if engine.startswith('memcache://'):
- engine = engine.split('://')[1]
- self._store = memcache.Client(engine.split(';'))
- # Set timeout
- self.timeout = kw.get('timeout', 300)
-
- def __getitem__(self, key):
- value = self._store.get(key)
- if value is None:
- raise KeyError(key)
- return self.loads(value)
-
- def __setitem__(self, key, value):
- self._store.set(key, self.dumps(value), self.timeout)
-
- def __delitem__(self, key):
- self._store.delete(key)
-
-
-__all__ = ['MemCached']
diff --git a/lib/shove/cache/memlru.py b/lib/shove/cache/memlru.py
deleted file mode 100644
index 7db61ec5..00000000
--- a/lib/shove/cache/memlru.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-Thread-safe in-memory cache using LRU.
-
-The shove psuedo-URL for a memory cache is:
-
-memlru://
-'''
-
-import copy
-import threading
-
-from shove import synchronized
-from shove.cache.simplelru import SimpleLRUCache
-
-
-class MemoryLRUCache(SimpleLRUCache):
-
- '''Thread-safe in-memory cache backend using LRU.'''
-
- def __init__(self, engine, **kw):
- super(MemoryLRUCache, self).__init__(engine, **kw)
- self._lock = threading.Condition()
-
- @synchronized
- def __setitem__(self, key, value):
- super(MemoryLRUCache, self).__setitem__(key, value)
-
- @synchronized
- def __getitem__(self, key):
- return copy.deepcopy(super(MemoryLRUCache, self).__getitem__(key))
-
- @synchronized
- def __delitem__(self, key):
- super(MemoryLRUCache, self).__delitem__(key)
-
-
-__all__ = ['MemoryLRUCache']
diff --git a/lib/shove/cache/memory.py b/lib/shove/cache/memory.py
deleted file mode 100644
index e70f9bbb..00000000
--- a/lib/shove/cache/memory.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-Thread-safe in-memory cache.
-
-The shove psuedo-URL for a memory cache is:
-
-memory://
-'''
-
-import copy
-import threading
-
-from shove import synchronized
-from shove.cache.simple import SimpleCache
-
-
-class MemoryCache(SimpleCache):
-
- '''Thread-safe in-memory cache backend.'''
-
- def __init__(self, engine, **kw):
- super(MemoryCache, self).__init__(engine, **kw)
- self._lock = threading.Condition()
-
- @synchronized
- def __setitem__(self, key, value):
- super(MemoryCache, self).__setitem__(key, value)
-
- @synchronized
- def __getitem__(self, key):
- return copy.deepcopy(super(MemoryCache, self).__getitem__(key))
-
- @synchronized
- def __delitem__(self, key):
- super(MemoryCache, self).__delitem__(key)
-
-
-__all__ = ['MemoryCache']
diff --git a/lib/shove/cache/redisdb.py b/lib/shove/cache/redisdb.py
deleted file mode 100644
index c53536c1..00000000
--- a/lib/shove/cache/redisdb.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-Redis-based object cache
-
-The shove psuedo-URL for a redis cache is:
-
-redis://:/
-'''
-
-import urlparse
-
-try:
- import redis
-except ImportError:
- raise ImportError('This store requires the redis library')
-
-from shove import Base
-
-
-class RedisCache(Base):
-
- '''Redis cache backend'''
-
- init = 'redis://'
-
- def __init__(self, engine, **kw):
- super(RedisCache, self).__init__(engine, **kw)
- spliturl = urlparse.urlsplit(engine)
- host, port = spliturl[1].split(':')
- db = spliturl[2].replace('/', '')
- self._store = redis.Redis(host, int(port), db)
- # Set timeout
- self.timeout = kw.get('timeout', 300)
-
- def __getitem__(self, key):
- return self.loads(self._store[key])
-
- def __setitem__(self, key, value):
- self._store.setex(key, self.dumps(value), self.timeout)
-
- def __delitem__(self, key):
- self._store.delete(key)
-
-
-__all__ = ['RedisCache']
diff --git a/lib/shove/cache/simple.py b/lib/shove/cache/simple.py
deleted file mode 100644
index 6855603e..00000000
--- a/lib/shove/cache/simple.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-Single-process in-memory cache.
-
-The shove psuedo-URL for a simple cache is:
-
-simple://
-'''
-
-import time
-import random
-
-from shove import SimpleBase
-
-
-class SimpleCache(SimpleBase):
-
- '''Single-process in-memory cache.'''
-
- def __init__(self, engine, **kw):
- super(SimpleCache, self).__init__(engine, **kw)
- # Get random seed
- random.seed()
- # Set maximum number of items to cull if over max
- self._maxcull = kw.get('maxcull', 10)
- # Set max entries
- self._max_entries = kw.get('max_entries', 300)
- # Set timeout
- self.timeout = kw.get('timeout', 300)
-
- def __getitem__(self, key):
- exp, value = super(SimpleCache, self).__getitem__(key)
- # Delete if item timed out.
- if exp < time.time():
- super(SimpleCache, self).__delitem__(key)
- raise KeyError(key)
- return value
-
- def __setitem__(self, key, value):
- # Cull values if over max # of entries
- if len(self) >= self._max_entries:
- self._cull()
- # Set expiration time and value
- exp = time.time() + self.timeout
- super(SimpleCache, self).__setitem__(key, (exp, value))
-
- def _cull(self):
- '''Remove items in cache to make room.'''
- num, maxcull = 0, self._maxcull
- # Cull number of items allowed (set by self._maxcull)
- for key in self.keys():
- # Remove only maximum # of items allowed by maxcull
- if num <= maxcull:
- # Remove items if expired
- try:
- self[key]
- except KeyError:
- num += 1
- else:
- break
- # Remove any additional items up to max # of items allowed by maxcull
- while len(self) >= self._max_entries and num <= maxcull:
- # Cull remainder of allowed quota at random
- del self[random.choice(self.keys())]
- num += 1
-
-
-__all__ = ['SimpleCache']
diff --git a/lib/shove/cache/simplelru.py b/lib/shove/cache/simplelru.py
deleted file mode 100644
index fbb6e446..00000000
--- a/lib/shove/cache/simplelru.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-Single-process in-memory LRU cache.
-
-The shove psuedo-URL for a simple cache is:
-
-simplelru://
-'''
-
-from shove import LRUBase
-
-
-class SimpleLRUCache(LRUBase):
-
- '''In-memory cache that purges based on least recently used item.'''
-
-
-__all__ = ['SimpleLRUCache']
diff --git a/lib/shove/store/__init__.py b/lib/shove/store/__init__.py
deleted file mode 100644
index 5d639a07..00000000
--- a/lib/shove/store/__init__.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from urllib import url2pathname
-from shove.store.simple import SimpleStore
-
-
-class ClientStore(SimpleStore):
-
- '''Base class for stores where updates have to be committed.'''
-
- def __init__(self, engine, **kw):
- super(ClientStore, self).__init__(engine, **kw)
- if engine.startswith(self.init):
- self._engine = url2pathname(engine.split('://')[1])
-
- def __getitem__(self, key):
- return self.loads(super(ClientStore, self).__getitem__(key))
-
- def __setitem__(self, key, value):
- super(ClientStore, self).__setitem__(key, self.dumps(value))
-
-
-class SyncStore(ClientStore):
-
- '''Base class for stores where updates have to be committed.'''
-
- def __getitem__(self, key):
- return self.loads(super(SyncStore, self).__getitem__(key))
-
- def __setitem__(self, key, value):
- super(SyncStore, self).__setitem__(key, value)
- try:
- self.sync()
- except AttributeError:
- pass
-
- def __delitem__(self, key):
- super(SyncStore, self).__delitem__(key)
- try:
- self.sync()
- except AttributeError:
- pass
-
-
-__all__ = [
- 'bsdb', 'db', 'dbm', 'durusdb', 'file', 'ftp', 'memory', 's3', 'simple',
- 'svn', 'zodb', 'redisdb', 'hdf5db', 'leveldbstore', 'cassandra',
-]
diff --git a/lib/shove/store/bsdb.py b/lib/shove/store/bsdb.py
deleted file mode 100644
index d1f9c6dc..00000000
--- a/lib/shove/store/bsdb.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-Berkeley Source Database Store.
-
-shove's psuedo-URL for BSDDB stores follows the form:
-
-bsddb://
-
-Where the path is a URL path to a Berkeley database. Alternatively, the native
-pathname to a Berkeley database can be passed as the 'engine' parameter.
-'''
-try:
- import bsddb
-except ImportError:
- raise ImportError('requires bsddb library')
-
-import threading
-
-from shove import synchronized
-from shove.store import SyncStore
-
-
-class BsdStore(SyncStore):
-
- '''Class for Berkeley Source Database Store.'''
-
- init = 'bsddb://'
-
- def __init__(self, engine, **kw):
- super(BsdStore, self).__init__(engine, **kw)
- self._store = bsddb.hashopen(self._engine)
- self._lock = threading.Condition()
- self.sync = self._store.sync
-
- @synchronized
- def __getitem__(self, key):
- return super(BsdStore, self).__getitem__(key)
-
- @synchronized
- def __setitem__(self, key, value):
- super(BsdStore, self).__setitem__(key, value)
-
- @synchronized
- def __delitem__(self, key):
- super(BsdStore, self).__delitem__(key)
-
-
-__all__ = ['BsdStore']
diff --git a/lib/shove/store/cassandra.py b/lib/shove/store/cassandra.py
deleted file mode 100644
index 1f6532ee..00000000
--- a/lib/shove/store/cassandra.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-Cassandra-based object store
-
-The shove psuedo-URL for a cassandra-based store is:
-
-cassandra://://
-'''
-
-import urlparse
-
-try:
- import pycassa
-except ImportError:
- raise ImportError('This store requires the pycassa library')
-
-from shove import BaseStore
-
-
-class CassandraStore(BaseStore):
-
- '''Cassandra based store'''
-
- init = 'cassandra://'
-
- def __init__(self, engine, **kw):
- super(CassandraStore, self).__init__(engine, **kw)
- spliturl = urlparse.urlsplit(engine)
- _, keyspace, column_family = spliturl[2].split('/')
- try:
- self._pool = pycassa.connect(keyspace, [spliturl[1]])
- self._store = pycassa.ColumnFamily(self._pool, column_family)
- except pycassa.InvalidRequestException:
- from pycassa.system_manager import SystemManager
- system_manager = SystemManager(spliturl[1])
- system_manager.create_keyspace(
- keyspace,
- pycassa.system_manager.SIMPLE_STRATEGY,
- {'replication_factor': str(kw.get('replication', 1))}
- )
- system_manager.create_column_family(keyspace, column_family)
- self._pool = pycassa.connect(keyspace, [spliturl[1]])
- self._store = pycassa.ColumnFamily(self._pool, column_family)
-
- def __getitem__(self, key):
- try:
- item = self._store.get(key).get(key)
- if item is not None:
- return self.loads(item)
- raise KeyError(key)
- except pycassa.NotFoundException:
- raise KeyError(key)
-
- def __setitem__(self, key, value):
- self._store.insert(key, dict(key=self.dumps(value)))
-
- def __delitem__(self, key):
- # beware eventual consistency
- try:
- self._store.remove(key)
- except pycassa.NotFoundException:
- raise KeyError(key)
-
- def clear(self):
- # beware eventual consistency
- self._store.truncate()
-
- def keys(self):
- return list(i[0] for i in self._store.get_range())
-
-
-__all__ = ['CassandraStore']
diff --git a/lib/shove/store/db.py b/lib/shove/store/db.py
deleted file mode 100644
index 0004e6f8..00000000
--- a/lib/shove/store/db.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-Database object store.
-
-The shove psuedo-URL used for database object stores is the format used by
-SQLAlchemy:
-
-://:@:/
-
- is the database engine. The engines currently supported SQLAlchemy are
-sqlite, mysql, postgres, oracle, mssql, and firebird.
- is the database account user name
- is the database accound password
- is the database location
- is the database port
- is the name of the specific database
-
-For more information on specific databases see:
-
-http://www.sqlalchemy.org/docs/dbengine.myt#dbengine_supported
-'''
-
-try:
- from sqlalchemy import MetaData, Table, Column, String, Binary, select
- from shove import BaseStore, DbBase
-except ImportError, e:
- raise ImportError('Error: ' + e + ' Requires SQLAlchemy >= 0.4')
-
-
-class DbStore(BaseStore, DbBase):
-
- '''Database cache backend.'''
-
- def __init__(self, engine, **kw):
- super(DbStore, self).__init__(engine, **kw)
- # Get tablename
- tablename = kw.get('tablename', 'store')
- # Bind metadata
- self._metadata = MetaData(engine)
- # Make store table
- self._store = Table(tablename, self._metadata,
- Column('key', String(255), primary_key=True, nullable=False),
- Column('value', Binary, nullable=False),
- )
- # Create store table if it does not exist
- if not self._store.exists():
- self._store.create()
-
- def __getitem__(self, key):
- row = select(
- [self._store.c.value], self._store.c.key == key,
- ).execute().fetchone()
- if row is not None:
- return self.loads(str(row.value))
- raise KeyError(key)
-
- def __setitem__(self, k, v):
- v, store = self.dumps(v), self._store
- # Update database if key already present
- if k in self:
- store.update(store.c.key == k).execute(value=v)
- # Insert new key if key not present
- else:
- store.insert().execute(key=k, value=v)
-
- def keys(self):
- '''Returns a list of keys in the store.'''
- return list(i[0] for i in select(
- [self._store.c.key]
- ).execute().fetchall())
-
-
-__all__ = ['DbStore']
diff --git a/lib/shove/store/dbm.py b/lib/shove/store/dbm.py
deleted file mode 100644
index 323d2484..00000000
--- a/lib/shove/store/dbm.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-DBM Database Store.
-
-shove's psuedo-URL for DBM stores follows the form:
-
-dbm://
-
-Where is a URL path to a DBM database. Alternatively, the native
-pathname to a DBM database can be passed as the 'engine' parameter.
-'''
-
-import anydbm
-
-from shove.store import SyncStore
-
-
-class DbmStore(SyncStore):
-
- '''Class for variants of the DBM database.'''
-
- init = 'dbm://'
-
- def __init__(self, engine, **kw):
- super(DbmStore, self).__init__(engine, **kw)
- self._store = anydbm.open(self._engine, 'c')
- try:
- self.sync = self._store.sync
- except AttributeError:
- pass
-
-
-__all__ = ['DbmStore']
diff --git a/lib/shove/store/durusdb.py b/lib/shove/store/durusdb.py
deleted file mode 100644
index 8e27670e..00000000
--- a/lib/shove/store/durusdb.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-Durus object database frontend.
-
-shove's psuedo-URL for Durus stores follows the form:
-
-durus://
-
-
-Where the path is a URL path to a durus FileStorage database. Alternatively, a
-native pathname to a durus database can be passed as the 'engine' parameter.
-'''
-
-try:
- from durus.connection import Connection
- from durus.file_storage import FileStorage
-except ImportError:
- raise ImportError('Requires Durus library')
-
-from shove.store import SyncStore
-
-
-class DurusStore(SyncStore):
-
- '''Class for Durus object database frontend.'''
-
- init = 'durus://'
-
- def __init__(self, engine, **kw):
- super(DurusStore, self).__init__(engine, **kw)
- self._db = FileStorage(self._engine)
- self._connection = Connection(self._db)
- self.sync = self._connection.commit
- self._store = self._connection.get_root()
-
- def close(self):
- '''Closes all open storage and connections.'''
- self.sync()
- self._db.close()
- super(DurusStore, self).close()
-
-
-__all__ = ['DurusStore']
diff --git a/lib/shove/store/file.py b/lib/shove/store/file.py
deleted file mode 100644
index e66e9c4f..00000000
--- a/lib/shove/store/file.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-Filesystem-based object store
-
-shove's psuedo-URL for filesystem-based stores follows the form:
-
-file://
-
-Where the path is a URL path to a directory on a local filesystem.
-Alternatively, a native pathname to the directory can be passed as the 'engine'
-argument.
-'''
-
-from shove import BaseStore, FileBase
-
-
-class FileStore(FileBase, BaseStore):
-
- '''File-based store.'''
-
- def __init__(self, engine, **kw):
- super(FileStore, self).__init__(engine, **kw)
-
-
-__all__ = ['FileStore']
diff --git a/lib/shove/store/ftp.py b/lib/shove/store/ftp.py
deleted file mode 100644
index c2d4aec6..00000000
--- a/lib/shove/store/ftp.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-FTP-accessed stores
-
-shove's URL for FTP accessed stores follows the standard form for FTP URLs
-defined in RFC-1738:
-
-ftp://:@:/
-'''
-
-import urlparse
-try:
- from cStringIO import StringIO
-except ImportError:
- from StringIO import StringIO
-from ftplib import FTP, error_perm
-
-from shove import BaseStore
-
-
-class FtpStore(BaseStore):
-
- def __init__(self, engine, **kw):
- super(FtpStore, self).__init__(engine, **kw)
- user = kw.get('user', 'anonymous')
- password = kw.get('password', '')
- spliturl = urlparse.urlsplit(engine)
- # Set URL, path, and strip 'ftp://' off
- base, path = spliturl[1], spliturl[2] + '/'
- if '@' in base:
- auth, base = base.split('@')
- user, password = auth.split(':')
- self._store = FTP(base, user, password)
- # Change to remote path if it exits
- try:
- self._store.cwd(path)
- except error_perm:
- self._makedir(path)
- self._base, self._user, self._password = base, user, password
- self._updated, self ._keys = True, None
-
- def __getitem__(self, key):
- try:
- local = StringIO()
- # Download item
- self._store.retrbinary('RETR %s' % key, local.write)
- self._updated = False
- return self.loads(local.getvalue())
- except:
- raise KeyError(key)
-
- def __setitem__(self, key, value):
- local = StringIO(self.dumps(value))
- self._store.storbinary('STOR %s' % key, local)
- self._updated = True
-
- def __delitem__(self, key):
- try:
- self._store.delete(key)
- self._updated = True
- except:
- raise KeyError(key)
-
- def _makedir(self, path):
- '''Makes remote paths on an FTP server.'''
- paths = list(reversed([i for i in path.split('/') if i != '']))
- while paths:
- tpath = paths.pop()
- self._store.mkd(tpath)
- self._store.cwd(tpath)
-
- def keys(self):
- '''Returns a list of keys in a store.'''
- if self._updated or self._keys is None:
- rlist, nlist = list(), list()
- # Remote directory listing
- self._store.retrlines('LIST -a', rlist.append)
- for rlisting in rlist:
- # Split remote file based on whitespace
- rfile = rlisting.split()
- # Append tuple of remote item type & name
- if rfile[-1] not in ('.', '..') and rfile[0].startswith('-'):
- nlist.append(rfile[-1])
- self._keys = nlist
- return self._keys
-
-
-__all__ = ['FtpStore']
diff --git a/lib/shove/store/hdf5.py b/lib/shove/store/hdf5.py
deleted file mode 100644
index a9b618e5..00000000
--- a/lib/shove/store/hdf5.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-HDF5 Database Store.
-
-shove's psuedo-URL for HDF5 stores follows the form:
-
-hdf5:///
-
-Where is a URL path to a HDF5 database. Alternatively, the native
-pathname to a HDF5 database can be passed as the 'engine' parameter.
- is the name of the database.
-'''
-
-try:
- import h5py
-except ImportError:
- raise ImportError('This store requires h5py library')
-
-from shove.store import ClientStore
-
-
-class HDF5Store(ClientStore):
-
- '''LevelDB based store'''
-
- init = 'hdf5://'
-
- def __init__(self, engine, **kw):
- super(HDF5Store, self).__init__(engine, **kw)
- engine, group = self._engine.rsplit('/')
- self._store = h5py.File(engine).require_group(group).attrs
-
-
-__all__ = ['HDF5Store']
diff --git a/lib/shove/store/leveldbstore.py b/lib/shove/store/leveldbstore.py
deleted file mode 100644
index ca73a494..00000000
--- a/lib/shove/store/leveldbstore.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-LevelDB Database Store.
-
-shove's psuedo-URL for LevelDB stores follows the form:
-
-leveldb://
-
-Where is a URL path to a LevelDB database. Alternatively, the native
-pathname to a LevelDB database can be passed as the 'engine' parameter.
-'''
-
-try:
- import leveldb
-except ImportError:
- raise ImportError('This store requires py-leveldb library')
-
-from shove.store import ClientStore
-
-
-class LevelDBStore(ClientStore):
-
- '''LevelDB based store'''
-
- init = 'leveldb://'
-
- def __init__(self, engine, **kw):
- super(LevelDBStore, self).__init__(engine, **kw)
- self._store = leveldb.LevelDB(self._engine)
-
- def __getitem__(self, key):
- item = self.loads(self._store.Get(key))
- if item is not None:
- return item
- raise KeyError(key)
-
- def __setitem__(self, key, value):
- self._store.Put(key, self.dumps(value))
-
- def __delitem__(self, key):
- self._store.Delete(key)
-
- def keys(self):
- return list(k for k in self._store.RangeIter(include_value=False))
-
-
-__all__ = ['LevelDBStore']
diff --git a/lib/shove/store/memory.py b/lib/shove/store/memory.py
deleted file mode 100644
index 525ae69e..00000000
--- a/lib/shove/store/memory.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-Thread-safe in-memory store.
-
-The shove psuedo-URL for a memory store is:
-
-memory://
-'''
-
-import copy
-import threading
-
-from shove import synchronized
-from shove.store.simple import SimpleStore
-
-
-class MemoryStore(SimpleStore):
-
- '''Thread-safe in-memory store.'''
-
- def __init__(self, engine, **kw):
- super(MemoryStore, self).__init__(engine, **kw)
- self._lock = threading.Condition()
-
- @synchronized
- def __getitem__(self, key):
- return copy.deepcopy(super(MemoryStore, self).__getitem__(key))
-
- @synchronized
- def __setitem__(self, key, value):
- super(MemoryStore, self).__setitem__(key, value)
-
- @synchronized
- def __delitem__(self, key):
- super(MemoryStore, self).__delitem__(key)
-
-
-__all__ = ['MemoryStore']
diff --git a/lib/shove/store/redisdb.py b/lib/shove/store/redisdb.py
deleted file mode 100644
index 67fa2ebd..00000000
--- a/lib/shove/store/redisdb.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-Redis-based object store
-
-The shove psuedo-URL for a redis-based store is:
-
-redis://:/
-'''
-
-import urlparse
-
-try:
- import redis
-except ImportError:
- raise ImportError('This store requires the redis library')
-
-from shove.store import ClientStore
-
-
-class RedisStore(ClientStore):
-
- '''Redis based store'''
-
- init = 'redis://'
-
- def __init__(self, engine, **kw):
- super(RedisStore, self).__init__(engine, **kw)
- spliturl = urlparse.urlsplit(engine)
- host, port = spliturl[1].split(':')
- db = spliturl[2].replace('/', '')
- self._store = redis.Redis(host, int(port), db)
-
- def __contains__(self, key):
- return self._store.exists(key)
-
- def clear(self):
- self._store.flushdb()
-
- def keys(self):
- return self._store.keys()
-
- def setdefault(self, key, default=None):
- return self._store.getset(key, default)
-
- def update(self, other=None, **kw):
- args = kw if other is not None else other
- self._store.mset(args)
-
-
-__all__ = ['RedisStore']
diff --git a/lib/shove/store/s3.py b/lib/shove/store/s3.py
deleted file mode 100644
index dbf12f21..00000000
--- a/lib/shove/store/s3.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-S3-accessed stores
-
-shove's psuedo-URL for stores found on Amazon.com's S3 web service follows this
-form:
-
-s3://:@
-
- is the Access Key issued by Amazon
- is the Secret Access Key issued by Amazon
- is the name of the bucket accessed through the S3 service
-'''
-
-try:
- from boto.s3.connection import S3Connection
- from boto.s3.key import Key
-except ImportError:
- raise ImportError('Requires boto library')
-
-from shove import BaseStore
-
-
-class S3Store(BaseStore):
-
- def __init__(self, engine=None, **kw):
- super(S3Store, self).__init__(engine, **kw)
- # key = Access Key, secret=Secret Access Key, bucket=bucket name
- key, secret, bucket = kw.get('key'), kw.get('secret'), kw.get('bucket')
- if engine is not None:
- auth, bucket = engine.split('://')[1].split('@')
- key, secret = auth.split(':')
- # kw 'secure' = (True or False, use HTTPS)
- self._conn = S3Connection(key, secret, kw.get('secure', False))
- buckets = self._conn.get_all_buckets()
- # Use bucket if it exists
- for b in buckets:
- if b.name == bucket:
- self._store = b
- break
- # Create bucket if it doesn't exist
- else:
- self._store = self._conn.create_bucket(bucket)
- # Set bucket permission ('private', 'public-read',
- # 'public-read-write', 'authenticated-read'
- self._store.set_acl(kw.get('acl', 'private'))
- # Updated flag used for avoiding network calls
- self._updated, self._keys = True, None
-
- def __getitem__(self, key):
- rkey = self._store.lookup(key)
- if rkey is None:
- raise KeyError(key)
- # Fetch string
- value = self.loads(rkey.get_contents_as_string())
- # Flag that the store has not been updated
- self._updated = False
- return value
-
- def __setitem__(self, key, value):
- rkey = Key(self._store)
- rkey.key = key
- rkey.set_contents_from_string(self.dumps(value))
- # Flag that the store has been updated
- self._updated = True
-
- def __delitem__(self, key):
- try:
- self._store.delete_key(key)
- # Flag that the store has been updated
- self._updated = True
- except:
- raise KeyError(key)
-
- def keys(self):
- '''Returns a list of keys in the store.'''
- return list(i[0] for i in self.items())
-
- def items(self):
- '''Returns a list of items from the store.'''
- if self._updated or self._keys is None:
- self._keys = self._store.get_all_keys()
- return list((str(k.key), k) for k in self._keys)
-
- def iteritems(self):
- '''Lazily returns items from the store.'''
- for k in self.items():
- yield (k.key, k)
-
-
-__all__ = ['S3Store']
diff --git a/lib/shove/store/simple.py b/lib/shove/store/simple.py
deleted file mode 100644
index 8f7ebb33..00000000
--- a/lib/shove/store/simple.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-Single-process in-memory store.
-
-The shove psuedo-URL for a simple store is:
-
-simple://
-'''
-
-from shove import BaseStore, SimpleBase
-
-
-class SimpleStore(SimpleBase, BaseStore):
-
- '''Single-process in-memory store.'''
-
- def __init__(self, engine, **kw):
- super(SimpleStore, self).__init__(engine, **kw)
-
-
-__all__ = ['SimpleStore']
diff --git a/lib/shove/store/svn.py b/lib/shove/store/svn.py
deleted file mode 100644
index 5bb8c33e..00000000
--- a/lib/shove/store/svn.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-subversion managed store.
-
-The shove psuedo-URL used for a subversion store that is password protected is:
-
-svn::?url=
-
-or for non-password protected repositories:
-
-svn://?url=
-
- is the local repository copy
- is the URL of the subversion repository
-'''
-
-import os
-import urllib
-import threading
-
-try:
- import pysvn
-except ImportError:
- raise ImportError('Requires Python Subversion library')
-
-from shove import BaseStore, synchronized
-
-
-class SvnStore(BaseStore):
-
- '''Class for subversion store.'''
-
- def __init__(self, engine=None, **kw):
- super(SvnStore, self).__init__(engine, **kw)
- # Get path, url from keywords if used
- path, url = kw.get('path'), kw.get('url')
- # Get username. password from keywords if used
- user, password = kw.get('user'), kw.get('password')
- # Process psuedo URL if used
- if engine is not None:
- path, query = engine.split('n://')[1].split('?')
- url = query.split('=')[1]
- # Check for username, password
- if '@' in path:
- auth, path = path.split('@')
- user, password = auth.split(':')
- path = urllib.url2pathname(path)
- # Create subversion client
- self._client = pysvn.Client()
- # Assign username, password
- if user is not None:
- self._client.set_username(user)
- if password is not None:
- self._client.set_password(password)
- # Verify that store exists in repository
- try:
- self._client.info2(url)
- # Create store in repository if it doesn't exist
- except pysvn.ClientError:
- self._client.mkdir(url, 'Adding directory')
- # Verify that local copy exists
- try:
- if self._client.info(path) is None:
- self._client.checkout(url, path)
- # Check it out if it doesn't exist
- except pysvn.ClientError:
- self._client.checkout(url, path)
- self._path, self._url = path, url
- # Lock
- self._lock = threading.Condition()
-
- @synchronized
- def __getitem__(self, key):
- try:
- return self.loads(self._client.cat(self._key_to_file(key)))
- except:
- raise KeyError(key)
-
- @synchronized
- def __setitem__(self, key, value):
- fname = self._key_to_file(key)
- # Write value to file
- open(fname, 'wb').write(self.dumps(value))
- # Add to repository
- if key not in self:
- self._client.add(fname)
- self._client.checkin([fname], 'Adding %s' % fname)
-
- @synchronized
- def __delitem__(self, key):
- try:
- fname = self._key_to_file(key)
- self._client.remove(fname)
- # Remove deleted value from repository
- self._client.checkin([fname], 'Removing %s' % fname)
- except:
- raise KeyError(key)
-
- def _key_to_file(self, key):
- '''Gives the filesystem path for a key.'''
- return os.path.join(self._path, urllib.quote_plus(key))
-
- @synchronized
- def keys(self):
- '''Returns a list of keys in the subversion repository.'''
- return list(str(i.name.split('/')[-1]) for i
- in self._client.ls(self._path))
-
-
-__all__ = ['SvnStore']
diff --git a/lib/shove/store/zodb.py b/lib/shove/store/zodb.py
deleted file mode 100644
index 43768dde..00000000
--- a/lib/shove/store/zodb.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-Zope Object Database store frontend.
-
-shove's psuedo-URL for ZODB stores follows the form:
-
-zodb:
-
-
-Where the path is a URL path to a ZODB FileStorage database. Alternatively, a
-native pathname to a ZODB database can be passed as the 'engine' argument.
-'''
-
-try:
- import transaction
- from ZODB import FileStorage, DB
-except ImportError:
- raise ImportError('Requires ZODB library')
-
-from shove.store import SyncStore
-
-
-class ZodbStore(SyncStore):
-
- '''ZODB store front end.'''
-
- init = 'zodb://'
-
- def __init__(self, engine, **kw):
- super(ZodbStore, self).__init__(engine, **kw)
- # Handle psuedo-URL
- self._storage = FileStorage.FileStorage(self._engine)
- self._db = DB(self._storage)
- self._connection = self._db.open()
- self._store = self._connection.root()
- # Keeps DB in synch through commits of transactions
- self.sync = transaction.commit
-
- def close(self):
- '''Closes all open storage and connections.'''
- self.sync()
- super(ZodbStore, self).close()
- self._connection.close()
- self._db.close()
- self._storage.close()
-
-
-__all__ = ['ZodbStore']
diff --git a/lib/shove/tests/__init__.py b/lib/shove/tests/__init__.py
deleted file mode 100644
index 40a96afc..00000000
--- a/lib/shove/tests/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# -*- coding: utf-8 -*-
diff --git a/lib/shove/tests/test_bsddb_store.py b/lib/shove/tests/test_bsddb_store.py
deleted file mode 100644
index 3de7896e..00000000
--- a/lib/shove/tests/test_bsddb_store.py
+++ /dev/null
@@ -1,133 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import unittest
-
-
-class TestBsdbStore(unittest.TestCase):
-
- def setUp(self):
- from shove import Shove
- self.store = Shove('bsddb://test.db', compress=True)
-
- def tearDown(self):
- import os
- self.store.close()
- os.remove('test.db')
-
- def test__getitem__(self):
- self.store['max'] = 3
- self.assertEqual(self.store['max'], 3)
-
- def test__setitem__(self):
- self.store['max'] = 3
- self.assertEqual(self.store['max'], 3)
-
- def test__delitem__(self):
- self.store['max'] = 3
- del self.store['max']
- self.assertEqual('max' in self.store, False)
-
- def test_get(self):
- self.store['max'] = 3
- self.assertEqual(self.store.get('min'), None)
-
- def test__cmp__(self):
- from shove import Shove
- tstore = Shove()
- self.store['max'] = 3
- tstore['max'] = 3
- self.assertEqual(self.store, tstore)
-
- def test__len__(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.assertEqual(len(self.store), 2)
-
- def test_close(self):
- self.store.close()
- self.assertEqual(self.store, None)
-
- def test_clear(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.clear()
- self.assertEqual(len(self.store), 0)
-
- def test_items(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.items())
- self.assertEqual(('min', 6) in slist, True)
-
- def test_iteritems(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.iteritems())
- self.assertEqual(('min', 6) in slist, True)
-
- def test_iterkeys(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.iterkeys())
- self.assertEqual('min' in slist, True)
-
- def test_itervalues(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.itervalues())
- self.assertEqual(6 in slist, True)
-
- def test_pop(self):
- self.store['max'] = 3
- self.store['min'] = 6
- item = self.store.pop('min')
- self.assertEqual(item, 6)
-
- def test_popitem(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- item = self.store.popitem()
- self.assertEqual(len(item) + len(self.store), 4)
-
- def test_setdefault(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['powl'] = 7
- self.store.setdefault('pow', 8)
- self.assertEqual(self.store['pow'], 8)
-
- def test_update(self):
- from shove import Shove
- tstore = Shove()
- tstore['max'] = 3
- tstore['min'] = 6
- tstore['pow'] = 7
- self.store['max'] = 2
- self.store['min'] = 3
- self.store['pow'] = 7
- self.store.update(tstore)
- self.assertEqual(self.store['min'], 6)
-
- def test_values(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = self.store.values()
- self.assertEqual(6 in slist, True)
-
- def test_keys(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = self.store.keys()
- self.assertEqual('min' in slist, True)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/lib/shove/tests/test_cassandra_store.py b/lib/shove/tests/test_cassandra_store.py
deleted file mode 100644
index a5c60f6a..00000000
--- a/lib/shove/tests/test_cassandra_store.py
+++ /dev/null
@@ -1,137 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import unittest
-
-
-class TestCassandraStore(unittest.TestCase):
-
- def setUp(self):
- from shove import Shove
- from pycassa.system_manager import SystemManager
- system_manager = SystemManager('localhost:9160')
- try:
- system_manager.create_column_family('Foo', 'shove')
- except:
- pass
- self.store = Shove('cassandra://localhost:9160/Foo/shove')
-
- def tearDown(self):
- self.store.clear()
- self.store.close()
- from pycassa.system_manager import SystemManager
- system_manager = SystemManager('localhost:9160')
- system_manager.drop_column_family('Foo', 'shove')
-
- def test__getitem__(self):
- self.store['max'] = 3
- self.assertEqual(self.store['max'], 3)
-
- def test__setitem__(self):
- self.store['max'] = 3
- self.assertEqual(self.store['max'], 3)
-
- def test__delitem__(self):
- self.store['max'] = 3
- del self.store['max']
- self.assertEqual('max' in self.store, False)
-
- def test_get(self):
- self.store['max'] = 3
- self.assertEqual(self.store.get('min'), None)
-
- def test__cmp__(self):
- from shove import Shove
- tstore = Shove()
- self.store['max'] = 3
- tstore['max'] = 3
- self.assertEqual(self.store, tstore)
-
- def test__len__(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.assertEqual(len(self.store), 2)
-
-# def test_clear(self):
-# self.store['max'] = 3
-# self.store['min'] = 6
-# self.store['pow'] = 7
-# self.store.clear()
-# self.assertEqual(len(self.store), 0)
-
- def test_items(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.items())
- self.assertEqual(('min', 6) in slist, True)
-
- def test_iteritems(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.iteritems())
- self.assertEqual(('min', 6) in slist, True)
-
- def test_iterkeys(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.iterkeys())
- self.assertEqual('min' in slist, True)
-
- def test_itervalues(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.itervalues())
- self.assertEqual(6 in slist, True)
-
- def test_pop(self):
- self.store['max'] = 3
- self.store['min'] = 6
- item = self.store.pop('min')
- self.assertEqual(item, 6)
-
-# def test_popitem(self):
-# self.store['max'] = 3
-# self.store['min'] = 6
-# self.store['pow'] = 7
-# item = self.store.popitem()
-# self.assertEqual(len(item) + len(self.store), 4)
-
- def test_setdefault(self):
- self.store['max'] = 3
- self.store['min'] = 6
-# self.store['pow'] = 7
- self.store.setdefault('pow', 8)
- self.assertEqual(self.store.setdefault('pow', 8), 8)
- self.assertEqual(self.store['pow'], 8)
-
- def test_update(self):
- from shove import Shove
- tstore = Shove()
- tstore['max'] = 3
- tstore['min'] = 6
- tstore['pow'] = 7
- self.store['max'] = 2
- self.store['min'] = 3
- self.store['pow'] = 7
- self.store.update(tstore)
- self.assertEqual(self.store['min'], 6)
-
- def test_values(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = self.store.values()
- self.assertEqual(6 in slist, True)
-
- def test_keys(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = self.store.keys()
- self.assertEqual('min' in slist, True)
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/lib/shove/tests/test_db_cache.py b/lib/shove/tests/test_db_cache.py
deleted file mode 100644
index 9dd27a06..00000000
--- a/lib/shove/tests/test_db_cache.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import unittest
-
-
-class TestDbCache(unittest.TestCase):
-
- initstring = 'sqlite:///'
-
- def setUp(self):
- from shove.cache.db import DbCache
- self.cache = DbCache(self.initstring)
-
- def tearDown(self):
- self.cache = None
-
- def test_getitem(self):
- self.cache['test'] = 'test'
- self.assertEqual(self.cache['test'], 'test')
-
- def test_setitem(self):
- self.cache['test'] = 'test'
- self.assertEqual(self.cache['test'], 'test')
-
- def test_delitem(self):
- self.cache['test'] = 'test'
- del self.cache['test']
- self.assertEqual('test' in self.cache, False)
-
- def test_get(self):
- self.assertEqual(self.cache.get('min'), None)
-
- def test_timeout(self):
- import time
- from shove.cache.db import DbCache
- cache = DbCache(self.initstring, timeout=1)
- cache['test'] = 'test'
- time.sleep(2)
-
- def tmp():
- cache['test']
- self.assertRaises(KeyError, tmp)
-
- def test_cull(self):
- from shove.cache.db import DbCache
- cache = DbCache(self.initstring, max_entries=1)
- cache['test'] = 'test'
- cache['test2'] = 'test'
- cache['test2'] = 'test'
- self.assertEquals(len(cache), 1)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/lib/shove/tests/test_db_store.py b/lib/shove/tests/test_db_store.py
deleted file mode 100644
index 1d9ad616..00000000
--- a/lib/shove/tests/test_db_store.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import unittest
-
-
-class TestDbStore(unittest.TestCase):
-
- def setUp(self):
- from shove import Shove
- self.store = Shove('sqlite://', compress=True)
-
- def tearDown(self):
- self.store.close()
-
- def test__getitem__(self):
- self.store['max'] = 3
- self.assertEqual(self.store['max'], 3)
-
- def test__setitem__(self):
- self.store['max'] = 3
- self.assertEqual(self.store['max'], 3)
-
- def test__delitem__(self):
- self.store['max'] = 3
- del self.store['max']
- self.assertEqual('max' in self.store, False)
-
- def test_get(self):
- self.store['max'] = 3
- self.assertEqual(self.store.get('min'), None)
-
- def test__cmp__(self):
- from shove import Shove
- tstore = Shove()
- self.store['max'] = 3
- tstore['max'] = 3
- self.assertEqual(self.store, tstore)
-
- def test__len__(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.assertEqual(len(self.store), 2)
-
- def test_close(self):
- self.store.close()
- self.assertEqual(self.store, None)
-
- def test_clear(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.clear()
- self.assertEqual(len(self.store), 0)
-
- def test_items(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.items())
- self.assertEqual(('min', 6) in slist, True)
-
- def test_iteritems(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.iteritems())
- self.assertEqual(('min', 6) in slist, True)
-
- def test_iterkeys(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.iterkeys())
- self.assertEqual('min' in slist, True)
-
- def test_itervalues(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.itervalues())
- self.assertEqual(6 in slist, True)
-
- def test_pop(self):
- self.store['max'] = 3
- self.store['min'] = 6
- item = self.store.pop('min')
- self.assertEqual(item, 6)
-
- def test_popitem(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- item = self.store.popitem()
- self.assertEqual(len(item) + len(self.store), 4)
-
- def test_setdefault(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['powl'] = 7
- self.store.setdefault('pow', 8)
- self.assertEqual(self.store['pow'], 8)
-
- def test_update(self):
- from shove import Shove
- tstore = Shove()
- tstore['max'] = 3
- tstore['min'] = 6
- tstore['pow'] = 7
- self.store['max'] = 2
- self.store['min'] = 3
- self.store['pow'] = 7
- self.store.update(tstore)
- self.assertEqual(self.store['min'], 6)
-
- def test_values(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = self.store.values()
- self.assertEqual(6 in slist, True)
-
- def test_keys(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = self.store.keys()
- self.assertEqual('min' in slist, True)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/lib/shove/tests/test_dbm_store.py b/lib/shove/tests/test_dbm_store.py
deleted file mode 100644
index e64ac9e7..00000000
--- a/lib/shove/tests/test_dbm_store.py
+++ /dev/null
@@ -1,136 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import unittest
-
-
-class TestDbmStore(unittest.TestCase):
-
- def setUp(self):
- from shove import Shove
- self.store = Shove('dbm://test.dbm', compress=True)
-
- def tearDown(self):
- import os
- self.store.close()
- try:
- os.remove('test.dbm.db')
- except OSError:
- pass
-
- def test__getitem__(self):
- self.store['max'] = 3
- self.assertEqual(self.store['max'], 3)
-
- def test__setitem__(self):
- self.store['max'] = 3
- self.assertEqual(self.store['max'], 3)
-
- def test__delitem__(self):
- self.store['max'] = 3
- del self.store['max']
- self.assertEqual('max' in self.store, False)
-
- def test_get(self):
- self.store['max'] = 3
- self.assertEqual(self.store.get('min'), None)
-
- def test__cmp__(self):
- from shove import Shove
- tstore = Shove()
- self.store['max'] = 3
- tstore['max'] = 3
- self.assertEqual(self.store, tstore)
-
- def test__len__(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.assertEqual(len(self.store), 2)
-
- def test_close(self):
- self.store.close()
- self.assertEqual(self.store, None)
-
- def test_clear(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.clear()
- self.assertEqual(len(self.store), 0)
-
- def test_items(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.items())
- self.assertEqual(('min', 6) in slist, True)
-
- def test_iteritems(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.iteritems())
- self.assertEqual(('min', 6) in slist, True)
-
- def test_iterkeys(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.iterkeys())
- self.assertEqual('min' in slist, True)
-
- def test_itervalues(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.itervalues())
- self.assertEqual(6 in slist, True)
-
- def test_pop(self):
- self.store['max'] = 3
- self.store['min'] = 6
- item = self.store.pop('min')
- self.assertEqual(item, 6)
-
- def test_popitem(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- item = self.store.popitem()
- self.assertEqual(len(item) + len(self.store), 4)
-
- def test_setdefault(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.setdefault('how', 8)
- self.assertEqual(self.store['how'], 8)
-
- def test_update(self):
- from shove import Shove
- tstore = Shove()
- tstore['max'] = 3
- tstore['min'] = 6
- tstore['pow'] = 7
- self.store['max'] = 2
- self.store['min'] = 3
- self.store['pow'] = 7
- self.store.update(tstore)
- self.assertEqual(self.store['min'], 6)
-
- def test_values(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = self.store.values()
- self.assertEqual(6 in slist, True)
-
- def test_keys(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = self.store.keys()
- self.assertEqual('min' in slist, True)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/lib/shove/tests/test_durus_store.py b/lib/shove/tests/test_durus_store.py
deleted file mode 100644
index 006fcc41..00000000
--- a/lib/shove/tests/test_durus_store.py
+++ /dev/null
@@ -1,133 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import unittest
-
-
-class TestDurusStore(unittest.TestCase):
-
- def setUp(self):
- from shove import Shove
- self.store = Shove('durus://test.durus', compress=True)
-
- def tearDown(self):
- import os
- self.store.close()
- os.remove('test.durus')
-
- def test__getitem__(self):
- self.store['max'] = 3
- self.assertEqual(self.store['max'], 3)
-
- def test__setitem__(self):
- self.store['max'] = 3
- self.assertEqual(self.store['max'], 3)
-
- def test__delitem__(self):
- self.store['max'] = 3
- del self.store['max']
- self.assertEqual('max' in self.store, False)
-
- def test_get(self):
- self.store['max'] = 3
- self.assertEqual(self.store.get('min'), None)
-
- def test__cmp__(self):
- from shove import Shove
- tstore = Shove()
- self.store['max'] = 3
- tstore['max'] = 3
- self.assertEqual(self.store, tstore)
-
- def test__len__(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.assertEqual(len(self.store), 2)
-
- def test_close(self):
- self.store.close()
- self.assertEqual(self.store, None)
-
- def test_clear(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.clear()
- self.assertEqual(len(self.store), 0)
-
- def test_items(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.items())
- self.assertEqual(('min', 6) in slist, True)
-
- def test_iteritems(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.iteritems())
- self.assertEqual(('min', 6) in slist, True)
-
- def test_iterkeys(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.iterkeys())
- self.assertEqual('min' in slist, True)
-
- def test_itervalues(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.itervalues())
- self.assertEqual(6 in slist, True)
-
- def test_pop(self):
- self.store['max'] = 3
- self.store['min'] = 6
- item = self.store.pop('min')
- self.assertEqual(item, 6)
-
- def test_popitem(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- item = self.store.popitem()
- self.assertEqual(len(item) + len(self.store), 4)
-
- def test_setdefault(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['powl'] = 7
- self.store.setdefault('pow', 8)
- self.assertEqual(self.store['pow'], 8)
-
- def test_update(self):
- from shove import Shove
- tstore = Shove()
- tstore['max'] = 3
- tstore['min'] = 6
- tstore['pow'] = 7
- self.store['max'] = 2
- self.store['min'] = 3
- self.store['pow'] = 7
- self.store.update(tstore)
- self.assertEqual(self.store['min'], 6)
-
- def test_values(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = self.store.values()
- self.assertEqual(6 in slist, True)
-
- def test_keys(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = self.store.keys()
- self.assertEqual('min' in slist, True)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/lib/shove/tests/test_file_cache.py b/lib/shove/tests/test_file_cache.py
deleted file mode 100644
index b288ce82..00000000
--- a/lib/shove/tests/test_file_cache.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import unittest
-
-
-class TestFileCache(unittest.TestCase):
-
- initstring = 'file://test'
-
- def setUp(self):
- from shove.cache.file import FileCache
- self.cache = FileCache(self.initstring)
-
- def tearDown(self):
- import os
- self.cache = None
- for x in os.listdir('test'):
- os.remove(os.path.join('test', x))
- os.rmdir('test')
-
- def test_getitem(self):
- self.cache['test'] = 'test'
- self.assertEqual(self.cache['test'], 'test')
-
- def test_setitem(self):
- self.cache['test'] = 'test'
- self.assertEqual(self.cache['test'], 'test')
-
- def test_delitem(self):
- self.cache['test'] = 'test'
- del self.cache['test']
- self.assertEqual('test' in self.cache, False)
-
- def test_get(self):
- self.assertEqual(self.cache.get('min'), None)
-
- def test_timeout(self):
- import time
- from shove.cache.file import FileCache
- cache = FileCache(self.initstring, timeout=1)
- cache['test'] = 'test'
- time.sleep(2)
-
- def tmp():
- cache['test']
- self.assertRaises(KeyError, tmp)
-
- def test_cull(self):
- from shove.cache.file import FileCache
- cache = FileCache(self.initstring, max_entries=1)
- cache['test'] = 'test'
- cache['test2'] = 'test'
- num = len(cache)
- self.assertEquals(num, 1)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/lib/shove/tests/test_file_store.py b/lib/shove/tests/test_file_store.py
deleted file mode 100644
index 35643ced..00000000
--- a/lib/shove/tests/test_file_store.py
+++ /dev/null
@@ -1,140 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import unittest
-
-
-class TestFileStore(unittest.TestCase):
-
- def setUp(self):
- from shove import Shove
- self.store = Shove('file://test', compress=True)
-
- def tearDown(self):
- import os
- self.store.close()
- for x in os.listdir('test'):
- os.remove(os.path.join('test', x))
- os.rmdir('test')
-
- def test__getitem__(self):
- self.store['max'] = 3
- self.store.sync()
- self.assertEqual(self.store['max'], 3)
-
- def test__setitem__(self):
- self.store['max'] = 3
- self.store.sync()
- self.assertEqual(self.store['max'], 3)
-
- def test__delitem__(self):
- self.store['max'] = 3
- del self.store['max']
- self.assertEqual('max' in self.store, False)
-
- def test_get(self):
- self.store['max'] = 3
- self.store.sync()
- self.assertEqual(self.store.get('min'), None)
-
- def test__cmp__(self):
- from shove import Shove
- tstore = Shove()
- self.store['max'] = 3
- tstore['max'] = 3
- self.store.sync()
- tstore.sync()
- self.assertEqual(self.store, tstore)
-
- def test__len__(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.assertEqual(len(self.store), 2)
-
- def test_close(self):
- self.store.close()
- self.assertEqual(self.store, None)
-
- def test_clear(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.clear()
- self.assertEqual(len(self.store), 0)
-
- def test_items(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.items())
- self.assertEqual(('min', 6) in slist, True)
-
- def test_iteritems(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.iteritems())
- self.assertEqual(('min', 6) in slist, True)
-
- def test_iterkeys(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.iterkeys())
- self.assertEqual('min' in slist, True)
-
- def test_itervalues(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.itervalues())
- self.assertEqual(6 in slist, True)
-
- def test_pop(self):
- self.store['max'] = 3
- self.store['min'] = 6
- item = self.store.pop('min')
- self.assertEqual(item, 6)
-
- def test_popitem(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- item = self.store.popitem()
- self.assertEqual(len(item) + len(self.store), 4)
-
- def test_setdefault(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['powl'] = 7
- self.store.setdefault('pow', 8)
- self.assertEqual(self.store['pow'], 8)
-
- def test_update(self):
- from shove import Shove
- tstore = Shove()
- tstore['max'] = 3
- tstore['min'] = 6
- tstore['pow'] = 7
- self.store['max'] = 2
- self.store['min'] = 3
- self.store['pow'] = 7
- self.store.update(tstore)
- self.assertEqual(self.store['min'], 6)
-
- def test_values(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = self.store.values()
- self.assertEqual(6 in slist, True)
-
- def test_keys(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = self.store.keys()
- self.assertEqual('min' in slist, True)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/lib/shove/tests/test_ftp_store.py b/lib/shove/tests/test_ftp_store.py
deleted file mode 100644
index 17679a2c..00000000
--- a/lib/shove/tests/test_ftp_store.py
+++ /dev/null
@@ -1,149 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import unittest
-
-
-class TestFtpStore(unittest.TestCase):
-
- ftpstring = 'put ftp string here'
-
- def setUp(self):
- from shove import Shove
- self.store = Shove(self.ftpstring, compress=True)
-
- def tearDown(self):
- self.store.clear()
- self.store.close()
-
- def test__getitem__(self):
- self.store['max'] = 3
- self.store.sync()
- self.assertEqual(self.store['max'], 3)
-
- def test__setitem__(self):
- self.store['max'] = 3
- self.store.sync()
- self.assertEqual(self.store['max'], 3)
-
- def test__delitem__(self):
- self.store['max'] = 3
- del self.store['max']
- self.assertEqual('max' in self.store, False)
-
- def test_get(self):
- self.store['max'] = 3
- self.store.sync()
- self.assertEqual(self.store.get('min'), None)
-
- def test__cmp__(self):
- from shove import Shove
- tstore = Shove()
- self.store['max'] = 3
- tstore['max'] = 3
- self.store.sync()
- tstore.sync()
- self.assertEqual(self.store, tstore)
-
- def test__len__(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store.sync()
- self.assertEqual(len(self.store), 2)
-
- def test_clear(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.sync()
- self.store.clear()
- self.assertEqual(len(self.store), 0)
-
- def test_items(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.sync()
- slist = list(self.store.items())
- self.assertEqual(('min', 6) in slist, True)
-
- def test_iteritems(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.sync()
- slist = list(self.store.iteritems())
- self.assertEqual(('min', 6) in slist, True)
-
- def test_iterkeys(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.sync()
- slist = list(self.store.iterkeys())
- self.assertEqual('min' in slist, True)
-
- def test_itervalues(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.sync()
- slist = list(self.store.itervalues())
- self.assertEqual(6 in slist, True)
-
- def test_pop(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store.sync()
- item = self.store.pop('min')
- self.assertEqual(item, 6)
-
- def test_popitem(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.sync()
- item = self.store.popitem()
- self.store.sync()
- self.assertEqual(len(item) + len(self.store), 4)
-
- def test_setdefault(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['powl'] = 7
- self.store.setdefault('pow', 8)
- self.store.sync()
- self.assertEqual(self.store['pow'], 8)
-
- def test_update(self):
- from shove import Shove
- tstore = Shove()
- tstore['max'] = 3
- tstore['min'] = 6
- tstore['pow'] = 7
- self.store['max'] = 2
- self.store['min'] = 3
- self.store['pow'] = 7
- self.store.sync()
- self.store.update(tstore)
- self.store.sync()
- self.assertEqual(self.store['min'], 6)
-
- def test_values(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.sync()
- slist = self.store.values()
- self.assertEqual(6 in slist, True)
-
- def test_keys(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.sync()
- slist = self.store.keys()
- self.assertEqual('min' in slist, True)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/lib/shove/tests/test_hdf5_store.py b/lib/shove/tests/test_hdf5_store.py
deleted file mode 100644
index b1342ecf..00000000
--- a/lib/shove/tests/test_hdf5_store.py
+++ /dev/null
@@ -1,135 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import unittest2
-
-
-class TestHDF5Store(unittest2.TestCase):
-
- def setUp(self):
- from shove import Shove
- self.store = Shove('hdf5://test.hdf5/test')
-
- def tearDown(self):
- import os
- self.store.close()
- try:
- os.remove('test.hdf5')
- except OSError:
- pass
-
- def test__getitem__(self):
- self.store['max'] = 3
- self.assertEqual(self.store['max'], 3)
-
- def test__setitem__(self):
- self.store['max'] = 3
- self.assertEqual(self.store['max'], 3)
-
- def test__delitem__(self):
- self.store['max'] = 3
- del self.store['max']
- self.assertEqual('max' in self.store, False)
-
- def test_get(self):
- self.store['max'] = 3
- self.assertEqual(self.store.get('min'), None)
-
- def test__cmp__(self):
- from shove import Shove
- tstore = Shove()
- self.store['max'] = 3
- tstore['max'] = 3
- self.assertEqual(self.store, tstore)
-
- def test__len__(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.assertEqual(len(self.store), 2)
-
- def test_close(self):
- self.store.close()
- self.assertEqual(self.store, None)
-
- def test_clear(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.clear()
- self.assertEqual(len(self.store), 0)
-
- def test_items(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.items())
- self.assertEqual(('min', 6) in slist, True)
-
- def test_iteritems(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.iteritems())
- self.assertEqual(('min', 6) in slist, True)
-
- def test_iterkeys(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.iterkeys())
- self.assertEqual('min' in slist, True)
-
- def test_itervalues(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.itervalues())
- self.assertEqual(6 in slist, True)
-
- def test_pop(self):
- self.store['max'] = 3
- self.store['min'] = 6
- item = self.store.pop('min')
- self.assertEqual(item, 6)
-
- def test_popitem(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- item = self.store.popitem()
- self.assertEqual(len(item) + len(self.store), 4)
-
- def test_setdefault(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.setdefault('bow', 8)
- self.assertEqual(self.store['bow'], 8)
-
- def test_update(self):
- from shove import Shove
- tstore = Shove()
- tstore['max'] = 3
- tstore['min'] = 6
- tstore['pow'] = 7
- self.store['max'] = 2
- self.store['min'] = 3
- self.store['pow'] = 7
- self.store.update(tstore)
- self.assertEqual(self.store['min'], 6)
-
- def test_values(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = self.store.values()
- self.assertEqual(6 in slist, True)
-
- def test_keys(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = self.store.keys()
- self.assertEqual('min' in slist, True)
-
-if __name__ == '__main__':
- unittest2.main()
diff --git a/lib/shove/tests/test_leveldb_store.py b/lib/shove/tests/test_leveldb_store.py
deleted file mode 100644
index b3a3d177..00000000
--- a/lib/shove/tests/test_leveldb_store.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import unittest2
-
-
-class TestLevelDBStore(unittest2.TestCase):
-
- def setUp(self):
- from shove import Shove
- self.store = Shove('leveldb://test', compress=True)
-
- def tearDown(self):
- import shutil
- shutil.rmtree('test')
-
- def test__getitem__(self):
- self.store['max'] = 3
- self.assertEqual(self.store['max'], 3)
-
- def test__setitem__(self):
- self.store['max'] = 3
- self.assertEqual(self.store['max'], 3)
-
- def test__delitem__(self):
- self.store['max'] = 3
- del self.store['max']
- self.assertEqual('max' in self.store, False)
-
- def test_get(self):
- self.store['max'] = 3
- self.assertEqual(self.store.get('min'), None)
-
- def test__cmp__(self):
- from shove import Shove
- tstore = Shove()
- self.store['max'] = 3
- tstore['max'] = 3
- self.assertEqual(self.store, tstore)
-
- def test__len__(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.assertEqual(len(self.store), 2)
-
- def test_close(self):
- self.store.close()
- self.assertEqual(self.store, None)
-
- def test_clear(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.clear()
- self.assertEqual(len(self.store), 0)
-
- def test_items(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.items())
- self.assertEqual(('min', 6) in slist, True)
-
- def test_iteritems(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.iteritems())
- self.assertEqual(('min', 6) in slist, True)
-
- def test_iterkeys(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.iterkeys())
- self.assertEqual('min' in slist, True)
-
- def test_itervalues(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.itervalues())
- self.assertEqual(6 in slist, True)
-
- def test_pop(self):
- self.store['max'] = 3
- self.store['min'] = 6
- item = self.store.pop('min')
- self.assertEqual(item, 6)
-
- def test_popitem(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- item = self.store.popitem()
- self.assertEqual(len(item) + len(self.store), 4)
-
- def test_setdefault(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.setdefault('bow', 8)
- self.assertEqual(self.store['bow'], 8)
-
- def test_update(self):
- from shove import Shove
- tstore = Shove()
- tstore['max'] = 3
- tstore['min'] = 6
- tstore['pow'] = 7
- self.store['max'] = 2
- self.store['min'] = 3
- self.store['pow'] = 7
- self.store.update(tstore)
- self.assertEqual(self.store['min'], 6)
-
- def test_values(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = self.store.values()
- self.assertEqual(6 in slist, True)
-
- def test_keys(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = self.store.keys()
- self.assertEqual('min' in slist, True)
-
-
-if __name__ == '__main__':
- unittest2.main()
diff --git a/lib/shove/tests/test_memcached_cache.py b/lib/shove/tests/test_memcached_cache.py
deleted file mode 100644
index 98f0b96d..00000000
--- a/lib/shove/tests/test_memcached_cache.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import unittest
-
-
-class TestMemcached(unittest.TestCase):
-
- initstring = 'memcache://localhost:11211'
-
- def setUp(self):
- from shove.cache.memcached import MemCached
- self.cache = MemCached(self.initstring)
-
- def tearDown(self):
- self.cache = None
-
- def test_getitem(self):
- self.cache['test'] = 'test'
- self.assertEqual(self.cache['test'], 'test')
-
- def test_setitem(self):
- self.cache['test'] = 'test'
- self.assertEqual(self.cache['test'], 'test')
-
- def test_delitem(self):
- self.cache['test'] = 'test'
- del self.cache['test']
- self.assertEqual('test' in self.cache, False)
-
- def test_get(self):
- self.assertEqual(self.cache.get('min'), None)
-
- def test_timeout(self):
- import time
- from shove.cache.memcached import MemCached
- cache = MemCached(self.initstring, timeout=1)
- cache['test'] = 'test'
- time.sleep(1)
-
- def tmp():
- cache['test']
- self.assertRaises(KeyError, tmp)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/lib/shove/tests/test_memory_cache.py b/lib/shove/tests/test_memory_cache.py
deleted file mode 100644
index 87749cdb..00000000
--- a/lib/shove/tests/test_memory_cache.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import unittest
-
-
-class TestMemoryCache(unittest.TestCase):
-
- initstring = 'memory://'
-
- def setUp(self):
- from shove.cache.memory import MemoryCache
- self.cache = MemoryCache(self.initstring)
-
- def tearDown(self):
- self.cache = None
-
- def test_getitem(self):
- self.cache['test'] = 'test'
- self.assertEqual(self.cache['test'], 'test')
-
- def test_setitem(self):
- self.cache['test'] = 'test'
- self.assertEqual(self.cache['test'], 'test')
-
- def test_delitem(self):
- self.cache['test'] = 'test'
- del self.cache['test']
- self.assertEqual('test' in self.cache, False)
-
- def test_get(self):
- self.assertEqual(self.cache.get('min'), None)
-
- def test_timeout(self):
- import time
- from shove.cache.memory import MemoryCache
- cache = MemoryCache(self.initstring, timeout=1)
- cache['test'] = 'test'
- time.sleep(1)
-
- def tmp():
- cache['test']
- self.assertRaises(KeyError, tmp)
-
- def test_cull(self):
- from shove.cache.memory import MemoryCache
- cache = MemoryCache(self.initstring, max_entries=1)
- cache['test'] = 'test'
- cache['test2'] = 'test'
- cache['test2'] = 'test'
- self.assertEquals(len(cache), 1)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/lib/shove/tests/test_memory_store.py b/lib/shove/tests/test_memory_store.py
deleted file mode 100644
index 12e505dd..00000000
--- a/lib/shove/tests/test_memory_store.py
+++ /dev/null
@@ -1,135 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import unittest
-
-
-class TestMemoryStore(unittest.TestCase):
-
- def setUp(self):
- from shove import Shove
- self.store = Shove('memory://', compress=True)
-
- def tearDown(self):
- self.store.close()
-
- def test__getitem__(self):
- self.store['max'] = 3
- self.store.sync()
- self.assertEqual(self.store['max'], 3)
-
- def test__setitem__(self):
- self.store['max'] = 3
- self.store.sync()
- self.assertEqual(self.store['max'], 3)
-
- def test__delitem__(self):
- self.store['max'] = 3
- del self.store['max']
- self.assertEqual('max' in self.store, False)
-
- def test_get(self):
- self.store['max'] = 3
- self.store.sync()
- self.assertEqual(self.store.get('min'), None)
-
- def test__cmp__(self):
- from shove import Shove
- tstore = Shove()
- self.store['max'] = 3
- tstore['max'] = 3
- self.store.sync()
- tstore.sync()
- self.assertEqual(self.store, tstore)
-
- def test__len__(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.assertEqual(len(self.store), 2)
-
- def test_close(self):
- self.store.close()
- self.assertEqual(self.store, None)
-
- def test_clear(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.clear()
- self.assertEqual(len(self.store), 0)
-
- def test_items(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.items())
- self.assertEqual(('min', 6) in slist, True)
-
- def test_iteritems(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.iteritems())
- self.assertEqual(('min', 6) in slist, True)
-
- def test_iterkeys(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.iterkeys())
- self.assertEqual('min' in slist, True)
-
- def test_itervalues(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.itervalues())
- self.assertEqual(6 in slist, True)
-
- def test_pop(self):
- self.store['max'] = 3
- self.store['min'] = 6
- item = self.store.pop('min')
- self.assertEqual(item, 6)
-
- def test_popitem(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- item = self.store.popitem()
- self.assertEqual(len(item) + len(self.store), 4)
-
- def test_setdefault(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['powl'] = 7
- self.store.setdefault('pow', 8)
- self.assertEqual(self.store['pow'], 8)
-
- def test_update(self):
- from shove import Shove
- tstore = Shove()
- tstore['max'] = 3
- tstore['min'] = 6
- tstore['pow'] = 7
- self.store['max'] = 2
- self.store['min'] = 3
- self.store['pow'] = 7
- self.store.update(tstore)
- self.assertEqual(self.store['min'], 6)
-
- def test_values(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = self.store.values()
- self.assertEqual(6 in slist, True)
-
- def test_keys(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = self.store.keys()
- self.assertEqual('min' in slist, True)
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/lib/shove/tests/test_redis_cache.py b/lib/shove/tests/test_redis_cache.py
deleted file mode 100644
index c8e9b8db..00000000
--- a/lib/shove/tests/test_redis_cache.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import unittest
-
-
-class TestRedisCache(unittest.TestCase):
-
- initstring = 'redis://localhost:6379/0'
-
- def setUp(self):
- from shove.cache.redisdb import RedisCache
- self.cache = RedisCache(self.initstring)
-
- def tearDown(self):
- self.cache = None
-
- def test_getitem(self):
- self.cache['test'] = 'test'
- self.assertEqual(self.cache['test'], 'test')
-
- def test_setitem(self):
- self.cache['test'] = 'test'
- self.assertEqual(self.cache['test'], 'test')
-
- def test_delitem(self):
- self.cache['test'] = 'test'
- del self.cache['test']
- self.assertEqual('test' in self.cache, False)
-
- def test_get(self):
- self.assertEqual(self.cache.get('min'), None)
-
- def test_timeout(self):
- import time
- from shove.cache.redisdb import RedisCache
- cache = RedisCache(self.initstring, timeout=1)
- cache['test'] = 'test'
- time.sleep(3)
- def tmp(): #@IgnorePep8
- return cache['test']
- self.assertRaises(KeyError, tmp)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/lib/shove/tests/test_redis_store.py b/lib/shove/tests/test_redis_store.py
deleted file mode 100644
index 06b1e0e9..00000000
--- a/lib/shove/tests/test_redis_store.py
+++ /dev/null
@@ -1,128 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import unittest
-
-
-class TestRedisStore(unittest.TestCase):
-
- def setUp(self):
- from shove import Shove
- self.store = Shove('redis://localhost:6379/0')
-
- def tearDown(self):
- self.store.clear()
- self.store.close()
-
- def test__getitem__(self):
- self.store['max'] = 3
- self.assertEqual(self.store['max'], 3)
-
- def test__setitem__(self):
- self.store['max'] = 3
- self.assertEqual(self.store['max'], 3)
-
- def test__delitem__(self):
- self.store['max'] = 3
- del self.store['max']
- self.assertEqual('max' in self.store, False)
-
- def test_get(self):
- self.store['max'] = 3
- self.assertEqual(self.store.get('min'), None)
-
- def test__cmp__(self):
- from shove import Shove
- tstore = Shove()
- self.store['max'] = 3
- tstore['max'] = 3
- self.assertEqual(self.store, tstore)
-
- def test__len__(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.assertEqual(len(self.store), 2)
-
- def test_clear(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.clear()
- self.assertEqual(len(self.store), 0)
-
- def test_items(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.items())
- self.assertEqual(('min', 6) in slist, True)
-
- def test_iteritems(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.iteritems())
- self.assertEqual(('min', 6) in slist, True)
-
- def test_iterkeys(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.iterkeys())
- self.assertEqual('min' in slist, True)
-
- def test_itervalues(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.itervalues())
- self.assertEqual(6 in slist, True)
-
- def test_pop(self):
- self.store['max'] = 3
- self.store['min'] = 6
- item = self.store.pop('min')
- self.assertEqual(item, 6)
-
- def test_popitem(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- item = self.store.popitem()
- self.assertEqual(len(item) + len(self.store), 4)
-
- def test_setdefault(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['powl'] = 7
- self.store.setdefault('pow', 8)
- self.assertEqual(self.store.setdefault('pow', 8), 8)
- self.assertEqual(self.store['pow'], 8)
-
- def test_update(self):
- from shove import Shove
- tstore = Shove()
- tstore['max'] = 3
- tstore['min'] = 6
- tstore['pow'] = 7
- self.store['max'] = 2
- self.store['min'] = 3
- self.store['pow'] = 7
- self.store.update(tstore)
- self.assertEqual(self.store['min'], 6)
-
- def test_values(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = self.store.values()
- self.assertEqual(6 in slist, True)
-
- def test_keys(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = self.store.keys()
- self.assertEqual('min' in slist, True)
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/lib/shove/tests/test_s3_store.py b/lib/shove/tests/test_s3_store.py
deleted file mode 100644
index 8a0f08d7..00000000
--- a/lib/shove/tests/test_s3_store.py
+++ /dev/null
@@ -1,149 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import unittest
-
-
-class TestS3Store(unittest.TestCase):
-
- s3string = 's3 test string here'
-
- def setUp(self):
- from shove import Shove
- self.store = Shove(self.s3string, compress=True)
-
- def tearDown(self):
- self.store.clear()
- self.store.close()
-
- def test__getitem__(self):
- self.store['max'] = 3
- self.store.sync()
- self.assertEqual(self.store['max'], 3)
-
- def test__setitem__(self):
- self.store['max'] = 3
- self.store.sync()
- self.assertEqual(self.store['max'], 3)
-
- def test__delitem__(self):
- self.store['max'] = 3
- del self.store['max']
- self.assertEqual('max' in self.store, False)
-
- def test_get(self):
- self.store['max'] = 3
- self.store.sync()
- self.assertEqual(self.store.get('min'), None)
-
- def test__cmp__(self):
- from shove import Shove
- tstore = Shove()
- self.store['max'] = 3
- tstore['max'] = 3
- self.store.sync()
- tstore.sync()
- self.assertEqual(self.store, tstore)
-
- def test__len__(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store.sync()
- self.assertEqual(len(self.store), 2)
-
- def test_clear(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.sync()
- self.store.clear()
- self.assertEqual(len(self.store), 0)
-
- def test_items(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.sync()
- slist = list(self.store.items())
- self.assertEqual(('min', 6) in slist, True)
-
- def test_iteritems(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.sync()
- slist = list(self.store.iteritems())
- self.assertEqual(('min', 6) in slist, True)
-
- def test_iterkeys(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.sync()
- slist = list(self.store.iterkeys())
- self.assertEqual('min' in slist, True)
-
- def test_itervalues(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.sync()
- slist = list(self.store.itervalues())
- self.assertEqual(6 in slist, True)
-
- def test_pop(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store.sync()
- item = self.store.pop('min')
- self.assertEqual(item, 6)
-
- def test_popitem(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.sync()
- item = self.store.popitem()
- self.store.sync()
- self.assertEqual(len(item) + len(self.store), 4)
-
- def test_setdefault(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['powl'] = 7
- self.store.setdefault('pow', 8)
- self.store.sync()
- self.assertEqual(self.store['pow'], 8)
-
- def test_update(self):
- from shove import Shove
- tstore = Shove()
- tstore['max'] = 3
- tstore['min'] = 6
- tstore['pow'] = 7
- self.store['max'] = 2
- self.store['min'] = 3
- self.store['pow'] = 7
- self.store.sync()
- self.store.update(tstore)
- self.store.sync()
- self.assertEqual(self.store['min'], 6)
-
- def test_values(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.sync()
- slist = self.store.values()
- self.assertEqual(6 in slist, True)
-
- def test_keys(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.sync()
- slist = self.store.keys()
- self.assertEqual('min' in slist, True)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/lib/shove/tests/test_simple_cache.py b/lib/shove/tests/test_simple_cache.py
deleted file mode 100644
index 8cd1830c..00000000
--- a/lib/shove/tests/test_simple_cache.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import unittest
-
-
-class TestSimpleCache(unittest.TestCase):
-
- initstring = 'simple://'
-
- def setUp(self):
- from shove.cache.simple import SimpleCache
- self.cache = SimpleCache(self.initstring)
-
- def tearDown(self):
- self.cache = None
-
- def test_getitem(self):
- self.cache['test'] = 'test'
- self.assertEqual(self.cache['test'], 'test')
-
- def test_setitem(self):
- self.cache['test'] = 'test'
- self.assertEqual(self.cache['test'], 'test')
-
- def test_delitem(self):
- self.cache['test'] = 'test'
- del self.cache['test']
- self.assertEqual('test' in self.cache, False)
-
- def test_get(self):
- self.assertEqual(self.cache.get('min'), None)
-
- def test_timeout(self):
- import time
- from shove.cache.simple import SimpleCache
- cache = SimpleCache(self.initstring, timeout=1)
- cache['test'] = 'test'
- time.sleep(1)
-
- def tmp():
- cache['test']
- self.assertRaises(KeyError, tmp)
-
- def test_cull(self):
- from shove.cache.simple import SimpleCache
- cache = SimpleCache(self.initstring, max_entries=1)
- cache['test'] = 'test'
- cache['test2'] = 'test'
- cache['test2'] = 'test'
- self.assertEquals(len(cache), 1)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/lib/shove/tests/test_simple_store.py b/lib/shove/tests/test_simple_store.py
deleted file mode 100644
index d2431ec5..00000000
--- a/lib/shove/tests/test_simple_store.py
+++ /dev/null
@@ -1,135 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import unittest
-
-
-class TestSimpleStore(unittest.TestCase):
-
- def setUp(self):
- from shove import Shove
- self.store = Shove('simple://', compress=True)
-
- def tearDown(self):
- self.store.close()
-
- def test__getitem__(self):
- self.store['max'] = 3
- self.store.sync()
- self.assertEqual(self.store['max'], 3)
-
- def test__setitem__(self):
- self.store['max'] = 3
- self.store.sync()
- self.assertEqual(self.store['max'], 3)
-
- def test__delitem__(self):
- self.store['max'] = 3
- del self.store['max']
- self.assertEqual('max' in self.store, False)
-
- def test_get(self):
- self.store['max'] = 3
- self.store.sync()
- self.assertEqual(self.store.get('min'), None)
-
- def test__cmp__(self):
- from shove import Shove
- tstore = Shove()
- self.store['max'] = 3
- tstore['max'] = 3
- self.store.sync()
- tstore.sync()
- self.assertEqual(self.store, tstore)
-
- def test__len__(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.assertEqual(len(self.store), 2)
-
- def test_close(self):
- self.store.close()
- self.assertEqual(self.store, None)
-
- def test_clear(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.clear()
- self.assertEqual(len(self.store), 0)
-
- def test_items(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.items())
- self.assertEqual(('min', 6) in slist, True)
-
- def test_iteritems(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.iteritems())
- self.assertEqual(('min', 6) in slist, True)
-
- def test_iterkeys(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.iterkeys())
- self.assertEqual('min' in slist, True)
-
- def test_itervalues(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.itervalues())
- self.assertEqual(6 in slist, True)
-
- def test_pop(self):
- self.store['max'] = 3
- self.store['min'] = 6
- item = self.store.pop('min')
- self.assertEqual(item, 6)
-
- def test_popitem(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- item = self.store.popitem()
- self.assertEqual(len(item) + len(self.store), 4)
-
- def test_setdefault(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['powl'] = 7
- self.store.setdefault('pow', 8)
- self.assertEqual(self.store['pow'], 8)
-
- def test_update(self):
- from shove import Shove
- tstore = Shove()
- tstore['max'] = 3
- tstore['min'] = 6
- tstore['pow'] = 7
- self.store['max'] = 2
- self.store['min'] = 3
- self.store['pow'] = 7
- self.store.update(tstore)
- self.assertEqual(self.store['min'], 6)
-
- def test_values(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = self.store.values()
- self.assertEqual(6 in slist, True)
-
- def test_keys(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = self.store.keys()
- self.assertEqual('min' in slist, True)
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/lib/shove/tests/test_svn_store.py b/lib/shove/tests/test_svn_store.py
deleted file mode 100644
index b3103816..00000000
--- a/lib/shove/tests/test_svn_store.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import unittest
-
-
-class TestSvnStore(unittest.TestCase):
-
- svnstring = 'SVN test string here'
-
- def setUp(self):
- from shove import Shove
- self.store = Shove(self.svnstring, compress=True)
-
- def tearDown(self):
- self.store.clear()
- self.store.close()
-
- def test__getitem__(self):
- self.store['max'] = 3
- self.store.sync()
- self.assertEqual(self.store['max'], 3)
-
- def test__setitem__(self):
- self.store['max'] = 3
- self.store.sync()
- self.assertEqual(self.store['max'], 3)
-
- def test__delitem__(self):
- self.store['max'] = 3
- del self.store['max']
- self.assertEqual('max' in self.store, False)
-
- def test_get(self):
- self.store['max'] = 3
- self.store.sync()
- self.assertEqual(self.store.get('min'), None)
-
- def test__cmp__(self):
- from shove import Shove
- tstore = Shove()
- self.store['max'] = 3
- tstore['max'] = 3
- self.store.sync()
- tstore.sync()
- self.assertEqual(self.store, tstore)
-
- def test__len__(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store.sync()
- self.assertEqual(len(self.store), 2)
-
- def test_clear(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.sync()
- self.store.clear()
- self.assertEqual(len(self.store), 0)
-
- def test_items(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.sync()
- slist = list(self.store.items())
- self.assertEqual(('min', 6) in slist, True)
-
- def test_iteritems(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.sync()
- slist = list(self.store.iteritems())
- self.assertEqual(('min', 6) in slist, True)
-
- def test_iterkeys(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.sync()
- slist = list(self.store.iterkeys())
- self.assertEqual('min' in slist, True)
-
- def test_itervalues(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.sync()
- slist = list(self.store.itervalues())
- self.assertEqual(6 in slist, True)
-
- def test_pop(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store.sync()
- item = self.store.pop('min')
- self.assertEqual(item, 6)
-
- def test_popitem(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.sync()
- item = self.store.popitem()
- self.store.sync()
- self.assertEqual(len(item) + len(self.store), 4)
-
- def test_setdefault(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['powl'] = 7
- self.store.setdefault('pow', 8)
- self.store.sync()
- self.assertEqual(self.store['pow'], 8)
-
- def test_update(self):
- from shove import Shove
- tstore = Shove()
- tstore['max'] = 3
- tstore['min'] = 6
- tstore['pow'] = 7
- self.store['max'] = 2
- self.store['min'] = 3
- self.store['pow'] = 7
- self.store.sync()
- self.store.update(tstore)
- self.store.sync()
- self.assertEqual(self.store['min'], 6)
-
- def test_values(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.sync()
- slist = self.store.values()
- self.assertEqual(6 in slist, True)
-
- def test_keys(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.sync()
- slist = self.store.keys()
- self.assertEqual('min' in slist, True)
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/lib/shove/tests/test_zodb_store.py b/lib/shove/tests/test_zodb_store.py
deleted file mode 100644
index 9d979fea..00000000
--- a/lib/shove/tests/test_zodb_store.py
+++ /dev/null
@@ -1,138 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import unittest
-
-
-class TestZodbStore(unittest.TestCase):
-
- init = 'zodb://test.db'
-
- def setUp(self):
- from shove import Shove
- self.store = Shove(self.init, compress=True)
-
- def tearDown(self):
- self.store.close()
- import os
- os.remove('test.db')
- os.remove('test.db.index')
- os.remove('test.db.tmp')
- os.remove('test.db.lock')
-
- def test__getitem__(self):
- self.store['max'] = 3
- self.assertEqual(self.store['max'], 3)
-
- def test__setitem__(self):
- self.store['max'] = 3
- self.assertEqual(self.store['max'], 3)
-
- def test__delitem__(self):
- self.store['max'] = 3
- del self.store['max']
- self.assertEqual('max' in self.store, False)
-
- def test_get(self):
- self.store['max'] = 3
- self.assertEqual(self.store.get('min'), None)
-
- def test__cmp__(self):
- from shove import Shove
- tstore = Shove()
- self.store['max'] = 3
- tstore['max'] = 3
- self.assertEqual(self.store, tstore)
-
- def test__len__(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.assertEqual(len(self.store), 2)
-
- def test_close(self):
- self.store.close()
- self.assertEqual(self.store, None)
-
- def test_clear(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- self.store.clear()
- self.assertEqual(len(self.store), 0)
-
- def test_items(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.items())
- self.assertEqual(('min', 6) in slist, True)
-
- def test_iteritems(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.iteritems())
- self.assertEqual(('min', 6) in slist, True)
-
- def test_iterkeys(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.iterkeys())
- self.assertEqual('min' in slist, True)
-
- def test_itervalues(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = list(self.store.itervalues())
- self.assertEqual(6 in slist, True)
-
- def test_pop(self):
- self.store['max'] = 3
- self.store['min'] = 6
- item = self.store.pop('min')
- self.assertEqual(item, 6)
-
- def test_popitem(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- item = self.store.popitem()
- self.assertEqual(len(item) + len(self.store), 4)
-
- def test_setdefault(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['powl'] = 7
- self.store.setdefault('pow', 8)
- self.assertEqual(self.store['pow'], 8)
-
- def test_update(self):
- from shove import Shove
- tstore = Shove()
- tstore['max'] = 3
- tstore['min'] = 6
- tstore['pow'] = 7
- self.store['max'] = 2
- self.store['min'] = 3
- self.store['pow'] = 7
- self.store.update(tstore)
- self.assertEqual(self.store['min'], 6)
-
- def test_values(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = self.store.values()
- self.assertEqual(6 in slist, True)
-
- def test_keys(self):
- self.store['max'] = 3
- self.store['min'] = 6
- self.store['pow'] = 7
- slist = self.store.keys()
- self.assertEqual('min' in slist, True)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py
deleted file mode 100644
index 67155e0f..00000000
--- a/lib/sqlalchemy/__init__.py
+++ /dev/null
@@ -1,133 +0,0 @@
-# sqlalchemy/__init__.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-
-from .sql import (
- alias,
- and_,
- asc,
- between,
- bindparam,
- case,
- cast,
- collate,
- delete,
- desc,
- distinct,
- except_,
- except_all,
- exists,
- extract,
- false,
- func,
- insert,
- intersect,
- intersect_all,
- join,
- literal,
- literal_column,
- modifier,
- not_,
- null,
- or_,
- outerjoin,
- outparam,
- over,
- select,
- subquery,
- text,
- true,
- tuple_,
- type_coerce,
- union,
- union_all,
- update,
- )
-
-from .types import (
- BIGINT,
- BINARY,
- BLOB,
- BOOLEAN,
- BigInteger,
- Binary,
- Boolean,
- CHAR,
- CLOB,
- DATE,
- DATETIME,
- DECIMAL,
- Date,
- DateTime,
- Enum,
- FLOAT,
- Float,
- INT,
- INTEGER,
- Integer,
- Interval,
- LargeBinary,
- NCHAR,
- NVARCHAR,
- NUMERIC,
- Numeric,
- PickleType,
- REAL,
- SMALLINT,
- SmallInteger,
- String,
- TEXT,
- TIME,
- TIMESTAMP,
- Text,
- Time,
- TypeDecorator,
- Unicode,
- UnicodeText,
- VARBINARY,
- VARCHAR,
- )
-
-
-from .schema import (
- CheckConstraint,
- Column,
- ColumnDefault,
- Constraint,
- DefaultClause,
- FetchedValue,
- ForeignKey,
- ForeignKeyConstraint,
- Index,
- MetaData,
- PassiveDefault,
- PrimaryKeyConstraint,
- Sequence,
- Table,
- ThreadLocalMetaData,
- UniqueConstraint,
- DDL,
-)
-
-
-from .inspection import inspect
-from .engine import create_engine, engine_from_config
-
-__version__ = '0.9.4'
-
-def __go(lcls):
- global __all__
-
- from . import events
- from . import util as _sa_util
-
- import inspect as _inspect
-
- __all__ = sorted(name for name, obj in lcls.items()
- if not (name.startswith('_') or _inspect.ismodule(obj)))
-
- _sa_util.dependencies.resolve_all("sqlalchemy")
-__go(locals())
\ No newline at end of file
diff --git a/lib/sqlalchemy/cextension/processors.c b/lib/sqlalchemy/cextension/processors.c
deleted file mode 100644
index d5681776..00000000
--- a/lib/sqlalchemy/cextension/processors.c
+++ /dev/null
@@ -1,706 +0,0 @@
-/*
-processors.c
-Copyright (C) 2010-2014 the SQLAlchemy authors and contributors
-Copyright (C) 2010-2011 Gaetan de Menten gdementen@gmail.com
-
-This module is part of SQLAlchemy and is released under
-the MIT License: http://www.opensource.org/licenses/mit-license.php
-*/
-
-#include
-#include
-
-#define MODULE_NAME "cprocessors"
-#define MODULE_DOC "Module containing C versions of data processing functions."
-
-#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN)
-typedef int Py_ssize_t;
-#define PY_SSIZE_T_MAX INT_MAX
-#define PY_SSIZE_T_MIN INT_MIN
-#endif
-
-static PyObject *
-int_to_boolean(PyObject *self, PyObject *arg)
-{
- long l = 0;
- PyObject *res;
-
- if (arg == Py_None)
- Py_RETURN_NONE;
-
-
-#if PY_MAJOR_VERSION >= 3
- l = PyLong_AsLong(arg);
-#else
- l = PyInt_AsLong(arg);
-#endif
- if (l == 0) {
- res = Py_False;
- } else if (l == 1) {
- res = Py_True;
- } else if ((l == -1) && PyErr_Occurred()) {
- /* -1 can be either the actual value, or an error flag. */
- return NULL;
- } else {
- PyErr_SetString(PyExc_ValueError,
- "int_to_boolean only accepts None, 0 or 1");
- return NULL;
- }
-
- Py_INCREF(res);
- return res;
-}
-
-static PyObject *
-to_str(PyObject *self, PyObject *arg)
-{
- if (arg == Py_None)
- Py_RETURN_NONE;
-
- return PyObject_Str(arg);
-}
-
-static PyObject *
-to_float(PyObject *self, PyObject *arg)
-{
- if (arg == Py_None)
- Py_RETURN_NONE;
-
- return PyNumber_Float(arg);
-}
-
-static PyObject *
-str_to_datetime(PyObject *self, PyObject *arg)
-{
-#if PY_MAJOR_VERSION >= 3
- PyObject *bytes;
- PyObject *err_bytes;
-#endif
- const char *str;
- int numparsed;
- unsigned int year, month, day, hour, minute, second, microsecond = 0;
- PyObject *err_repr;
-
- if (arg == Py_None)
- Py_RETURN_NONE;
-
-#if PY_MAJOR_VERSION >= 3
- bytes = PyUnicode_AsASCIIString(arg);
- if (bytes == NULL)
- str = NULL;
- else
- str = PyBytes_AS_STRING(bytes);
-#else
- str = PyString_AsString(arg);
-#endif
- if (str == NULL) {
- err_repr = PyObject_Repr(arg);
- if (err_repr == NULL)
- return NULL;
-#if PY_MAJOR_VERSION >= 3
- err_bytes = PyUnicode_AsASCIIString(err_repr);
- if (err_bytes == NULL)
- return NULL;
- PyErr_Format(
- PyExc_ValueError,
- "Couldn't parse datetime string '%.200s' "
- "- value is not a string.",
- PyBytes_AS_STRING(err_bytes));
- Py_DECREF(err_bytes);
-#else
- PyErr_Format(
- PyExc_ValueError,
- "Couldn't parse datetime string '%.200s' "
- "- value is not a string.",
- PyString_AsString(err_repr));
-#endif
- Py_DECREF(err_repr);
- return NULL;
- }
-
- /* microseconds are optional */
- /*
- TODO: this is slightly less picky than the Python version which would
- not accept "2000-01-01 00:00:00.". I don't know which is better, but they
- should be coherent.
- */
- numparsed = sscanf(str, "%4u-%2u-%2u %2u:%2u:%2u.%6u", &year, &month, &day,
- &hour, &minute, &second, µsecond);
-#if PY_MAJOR_VERSION >= 3
- Py_DECREF(bytes);
-#endif
- if (numparsed < 6) {
- err_repr = PyObject_Repr(arg);
- if (err_repr == NULL)
- return NULL;
-#if PY_MAJOR_VERSION >= 3
- err_bytes = PyUnicode_AsASCIIString(err_repr);
- if (err_bytes == NULL)
- return NULL;
- PyErr_Format(
- PyExc_ValueError,
- "Couldn't parse datetime string: %.200s",
- PyBytes_AS_STRING(err_bytes));
- Py_DECREF(err_bytes);
-#else
- PyErr_Format(
- PyExc_ValueError,
- "Couldn't parse datetime string: %.200s",
- PyString_AsString(err_repr));
-#endif
- Py_DECREF(err_repr);
- return NULL;
- }
- return PyDateTime_FromDateAndTime(year, month, day,
- hour, minute, second, microsecond);
-}
-
-static PyObject *
-str_to_time(PyObject *self, PyObject *arg)
-{
-#if PY_MAJOR_VERSION >= 3
- PyObject *bytes;
- PyObject *err_bytes;
-#endif
- const char *str;
- int numparsed;
- unsigned int hour, minute, second, microsecond = 0;
- PyObject *err_repr;
-
- if (arg == Py_None)
- Py_RETURN_NONE;
-
-#if PY_MAJOR_VERSION >= 3
- bytes = PyUnicode_AsASCIIString(arg);
- if (bytes == NULL)
- str = NULL;
- else
- str = PyBytes_AS_STRING(bytes);
-#else
- str = PyString_AsString(arg);
-#endif
- if (str == NULL) {
- err_repr = PyObject_Repr(arg);
- if (err_repr == NULL)
- return NULL;
-
-#if PY_MAJOR_VERSION >= 3
- err_bytes = PyUnicode_AsASCIIString(err_repr);
- if (err_bytes == NULL)
- return NULL;
- PyErr_Format(
- PyExc_ValueError,
- "Couldn't parse time string '%.200s' - value is not a string.",
- PyBytes_AS_STRING(err_bytes));
- Py_DECREF(err_bytes);
-#else
- PyErr_Format(
- PyExc_ValueError,
- "Couldn't parse time string '%.200s' - value is not a string.",
- PyString_AsString(err_repr));
-#endif
- Py_DECREF(err_repr);
- return NULL;
- }
-
- /* microseconds are optional */
- /*
- TODO: this is slightly less picky than the Python version which would
- not accept "00:00:00.". I don't know which is better, but they should be
- coherent.
- */
- numparsed = sscanf(str, "%2u:%2u:%2u.%6u", &hour, &minute, &second,
- µsecond);
-#if PY_MAJOR_VERSION >= 3
- Py_DECREF(bytes);
-#endif
- if (numparsed < 3) {
- err_repr = PyObject_Repr(arg);
- if (err_repr == NULL)
- return NULL;
-#if PY_MAJOR_VERSION >= 3
- err_bytes = PyUnicode_AsASCIIString(err_repr);
- if (err_bytes == NULL)
- return NULL;
- PyErr_Format(
- PyExc_ValueError,
- "Couldn't parse time string: %.200s",
- PyBytes_AS_STRING(err_bytes));
- Py_DECREF(err_bytes);
-#else
- PyErr_Format(
- PyExc_ValueError,
- "Couldn't parse time string: %.200s",
- PyString_AsString(err_repr));
-#endif
- Py_DECREF(err_repr);
- return NULL;
- }
- return PyTime_FromTime(hour, minute, second, microsecond);
-}
-
-static PyObject *
-str_to_date(PyObject *self, PyObject *arg)
-{
-#if PY_MAJOR_VERSION >= 3
- PyObject *bytes;
- PyObject *err_bytes;
-#endif
- const char *str;
- int numparsed;
- unsigned int year, month, day;
- PyObject *err_repr;
-
- if (arg == Py_None)
- Py_RETURN_NONE;
-
-#if PY_MAJOR_VERSION >= 3
- bytes = PyUnicode_AsASCIIString(arg);
- if (bytes == NULL)
- str = NULL;
- else
- str = PyBytes_AS_STRING(bytes);
-#else
- str = PyString_AsString(arg);
-#endif
- if (str == NULL) {
- err_repr = PyObject_Repr(arg);
- if (err_repr == NULL)
- return NULL;
-#if PY_MAJOR_VERSION >= 3
- err_bytes = PyUnicode_AsASCIIString(err_repr);
- if (err_bytes == NULL)
- return NULL;
- PyErr_Format(
- PyExc_ValueError,
- "Couldn't parse date string '%.200s' - value is not a string.",
- PyBytes_AS_STRING(err_bytes));
- Py_DECREF(err_bytes);
-#else
- PyErr_Format(
- PyExc_ValueError,
- "Couldn't parse date string '%.200s' - value is not a string.",
- PyString_AsString(err_repr));
-#endif
- Py_DECREF(err_repr);
- return NULL;
- }
-
- numparsed = sscanf(str, "%4u-%2u-%2u", &year, &month, &day);
-#if PY_MAJOR_VERSION >= 3
- Py_DECREF(bytes);
-#endif
- if (numparsed != 3) {
- err_repr = PyObject_Repr(arg);
- if (err_repr == NULL)
- return NULL;
-#if PY_MAJOR_VERSION >= 3
- err_bytes = PyUnicode_AsASCIIString(err_repr);
- if (err_bytes == NULL)
- return NULL;
- PyErr_Format(
- PyExc_ValueError,
- "Couldn't parse date string: %.200s",
- PyBytes_AS_STRING(err_bytes));
- Py_DECREF(err_bytes);
-#else
- PyErr_Format(
- PyExc_ValueError,
- "Couldn't parse date string: %.200s",
- PyString_AsString(err_repr));
-#endif
- Py_DECREF(err_repr);
- return NULL;
- }
- return PyDate_FromDate(year, month, day);
-}
-
-
-/***********
- * Structs *
- ***********/
-
-typedef struct {
- PyObject_HEAD
- PyObject *encoding;
- PyObject *errors;
-} UnicodeResultProcessor;
-
-typedef struct {
- PyObject_HEAD
- PyObject *type;
- PyObject *format;
-} DecimalResultProcessor;
-
-
-
-/**************************
- * UnicodeResultProcessor *
- **************************/
-
-static int
-UnicodeResultProcessor_init(UnicodeResultProcessor *self, PyObject *args,
- PyObject *kwds)
-{
- PyObject *encoding, *errors = NULL;
- static char *kwlist[] = {"encoding", "errors", NULL};
-
-#if PY_MAJOR_VERSION >= 3
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "U|U:__init__", kwlist,
- &encoding, &errors))
- return -1;
-#else
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "S|S:__init__", kwlist,
- &encoding, &errors))
- return -1;
-#endif
-
-#if PY_MAJOR_VERSION >= 3
- encoding = PyUnicode_AsASCIIString(encoding);
-#else
- Py_INCREF(encoding);
-#endif
- self->encoding = encoding;
-
- if (errors) {
-#if PY_MAJOR_VERSION >= 3
- errors = PyUnicode_AsASCIIString(errors);
-#else
- Py_INCREF(errors);
-#endif
- } else {
-#if PY_MAJOR_VERSION >= 3
- errors = PyBytes_FromString("strict");
-#else
- errors = PyString_FromString("strict");
-#endif
- if (errors == NULL)
- return -1;
- }
- self->errors = errors;
-
- return 0;
-}
-
-static PyObject *
-UnicodeResultProcessor_process(UnicodeResultProcessor *self, PyObject *value)
-{
- const char *encoding, *errors;
- char *str;
- Py_ssize_t len;
-
- if (value == Py_None)
- Py_RETURN_NONE;
-
-#if PY_MAJOR_VERSION >= 3
- if (PyBytes_AsStringAndSize(value, &str, &len))
- return NULL;
-
- encoding = PyBytes_AS_STRING(self->encoding);
- errors = PyBytes_AS_STRING(self->errors);
-#else
- if (PyString_AsStringAndSize(value, &str, &len))
- return NULL;
-
- encoding = PyString_AS_STRING(self->encoding);
- errors = PyString_AS_STRING(self->errors);
-#endif
-
- return PyUnicode_Decode(str, len, encoding, errors);
-}
-
-static PyObject *
-UnicodeResultProcessor_conditional_process(UnicodeResultProcessor *self, PyObject *value)
-{
- const char *encoding, *errors;
- char *str;
- Py_ssize_t len;
-
- if (value == Py_None)
- Py_RETURN_NONE;
-
-#if PY_MAJOR_VERSION >= 3
- if (PyUnicode_Check(value) == 1) {
- Py_INCREF(value);
- return value;
- }
-
- if (PyBytes_AsStringAndSize(value, &str, &len))
- return NULL;
-
- encoding = PyBytes_AS_STRING(self->encoding);
- errors = PyBytes_AS_STRING(self->errors);
-#else
-
- if (PyUnicode_Check(value) == 1) {
- Py_INCREF(value);
- return value;
- }
-
- if (PyString_AsStringAndSize(value, &str, &len))
- return NULL;
-
-
- encoding = PyString_AS_STRING(self->encoding);
- errors = PyString_AS_STRING(self->errors);
-#endif
-
- return PyUnicode_Decode(str, len, encoding, errors);
-}
-
-static void
-UnicodeResultProcessor_dealloc(UnicodeResultProcessor *self)
-{
- Py_XDECREF(self->encoding);
- Py_XDECREF(self->errors);
-#if PY_MAJOR_VERSION >= 3
- Py_TYPE(self)->tp_free((PyObject*)self);
-#else
- self->ob_type->tp_free((PyObject*)self);
-#endif
-}
-
-static PyMethodDef UnicodeResultProcessor_methods[] = {
- {"process", (PyCFunction)UnicodeResultProcessor_process, METH_O,
- "The value processor itself."},
- {"conditional_process", (PyCFunction)UnicodeResultProcessor_conditional_process, METH_O,
- "Conditional version of the value processor."},
- {NULL} /* Sentinel */
-};
-
-static PyTypeObject UnicodeResultProcessorType = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "sqlalchemy.cprocessors.UnicodeResultProcessor", /* tp_name */
- sizeof(UnicodeResultProcessor), /* tp_basicsize */
- 0, /* tp_itemsize */
- (destructor)UnicodeResultProcessor_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_compare */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
- "UnicodeResultProcessor objects", /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- UnicodeResultProcessor_methods, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- (initproc)UnicodeResultProcessor_init, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
-};
-
-/**************************
- * DecimalResultProcessor *
- **************************/
-
-static int
-DecimalResultProcessor_init(DecimalResultProcessor *self, PyObject *args,
- PyObject *kwds)
-{
- PyObject *type, *format;
-
-#if PY_MAJOR_VERSION >= 3
- if (!PyArg_ParseTuple(args, "OU", &type, &format))
-#else
- if (!PyArg_ParseTuple(args, "OS", &type, &format))
-#endif
- return -1;
-
- Py_INCREF(type);
- self->type = type;
-
- Py_INCREF(format);
- self->format = format;
-
- return 0;
-}
-
-static PyObject *
-DecimalResultProcessor_process(DecimalResultProcessor *self, PyObject *value)
-{
- PyObject *str, *result, *args;
-
- if (value == Py_None)
- Py_RETURN_NONE;
-
- /* Decimal does not accept float values directly */
- /* SQLite can also give us an integer here (see [ticket:2432]) */
- /* XXX: starting with Python 3.1, we could use Decimal.from_float(f),
- but the result wouldn't be the same */
-
- args = PyTuple_Pack(1, value);
- if (args == NULL)
- return NULL;
-
-#if PY_MAJOR_VERSION >= 3
- str = PyUnicode_Format(self->format, args);
-#else
- str = PyString_Format(self->format, args);
-#endif
-
- Py_DECREF(args);
- if (str == NULL)
- return NULL;
-
- result = PyObject_CallFunctionObjArgs(self->type, str, NULL);
- Py_DECREF(str);
- return result;
-}
-
-static void
-DecimalResultProcessor_dealloc(DecimalResultProcessor *self)
-{
- Py_XDECREF(self->type);
- Py_XDECREF(self->format);
-#if PY_MAJOR_VERSION >= 3
- Py_TYPE(self)->tp_free((PyObject*)self);
-#else
- self->ob_type->tp_free((PyObject*)self);
-#endif
-}
-
-static PyMethodDef DecimalResultProcessor_methods[] = {
- {"process", (PyCFunction)DecimalResultProcessor_process, METH_O,
- "The value processor itself."},
- {NULL} /* Sentinel */
-};
-
-static PyTypeObject DecimalResultProcessorType = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "sqlalchemy.DecimalResultProcessor", /* tp_name */
- sizeof(DecimalResultProcessor), /* tp_basicsize */
- 0, /* tp_itemsize */
- (destructor)DecimalResultProcessor_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_compare */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
- "DecimalResultProcessor objects", /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- DecimalResultProcessor_methods, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- (initproc)DecimalResultProcessor_init, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
-};
-
-static PyMethodDef module_methods[] = {
- {"int_to_boolean", int_to_boolean, METH_O,
- "Convert an integer to a boolean."},
- {"to_str", to_str, METH_O,
- "Convert any value to its string representation."},
- {"to_float", to_float, METH_O,
- "Convert any value to its floating point representation."},
- {"str_to_datetime", str_to_datetime, METH_O,
- "Convert an ISO string to a datetime.datetime object."},
- {"str_to_time", str_to_time, METH_O,
- "Convert an ISO string to a datetime.time object."},
- {"str_to_date", str_to_date, METH_O,
- "Convert an ISO string to a datetime.date object."},
- {NULL, NULL, 0, NULL} /* Sentinel */
-};
-
-#ifndef PyMODINIT_FUNC /* declarations for DLL import/export */
-#define PyMODINIT_FUNC void
-#endif
-
-
-#if PY_MAJOR_VERSION >= 3
-
-static struct PyModuleDef module_def = {
- PyModuleDef_HEAD_INIT,
- MODULE_NAME,
- MODULE_DOC,
- -1,
- module_methods
-};
-
-#define INITERROR return NULL
-
-PyMODINIT_FUNC
-PyInit_cprocessors(void)
-
-#else
-
-#define INITERROR return
-
-PyMODINIT_FUNC
-initcprocessors(void)
-
-#endif
-
-{
- PyObject *m;
-
- UnicodeResultProcessorType.tp_new = PyType_GenericNew;
- if (PyType_Ready(&UnicodeResultProcessorType) < 0)
- INITERROR;
-
- DecimalResultProcessorType.tp_new = PyType_GenericNew;
- if (PyType_Ready(&DecimalResultProcessorType) < 0)
- INITERROR;
-
-#if PY_MAJOR_VERSION >= 3
- m = PyModule_Create(&module_def);
-#else
- m = Py_InitModule3(MODULE_NAME, module_methods, MODULE_DOC);
-#endif
- if (m == NULL)
- INITERROR;
-
- PyDateTime_IMPORT;
-
- Py_INCREF(&UnicodeResultProcessorType);
- PyModule_AddObject(m, "UnicodeResultProcessor",
- (PyObject *)&UnicodeResultProcessorType);
-
- Py_INCREF(&DecimalResultProcessorType);
- PyModule_AddObject(m, "DecimalResultProcessor",
- (PyObject *)&DecimalResultProcessorType);
-
-#if PY_MAJOR_VERSION >= 3
- return m;
-#endif
-}
diff --git a/lib/sqlalchemy/cextension/resultproxy.c b/lib/sqlalchemy/cextension/resultproxy.c
deleted file mode 100644
index 218c7b80..00000000
--- a/lib/sqlalchemy/cextension/resultproxy.c
+++ /dev/null
@@ -1,718 +0,0 @@
-/*
-resultproxy.c
-Copyright (C) 2010-2014 the SQLAlchemy authors and contributors
-Copyright (C) 2010-2011 Gaetan de Menten gdementen@gmail.com
-
-This module is part of SQLAlchemy and is released under
-the MIT License: http://www.opensource.org/licenses/mit-license.php
-*/
-
-#include
-
-#define MODULE_NAME "cresultproxy"
-#define MODULE_DOC "Module containing C versions of core ResultProxy classes."
-
-#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN)
-typedef int Py_ssize_t;
-#define PY_SSIZE_T_MAX INT_MAX
-#define PY_SSIZE_T_MIN INT_MIN
-typedef Py_ssize_t (*lenfunc)(PyObject *);
-#define PyInt_FromSsize_t(x) PyInt_FromLong(x)
-typedef intargfunc ssizeargfunc;
-#endif
-
-
-/***********
- * Structs *
- ***********/
-
-typedef struct {
- PyObject_HEAD
- PyObject *parent;
- PyObject *row;
- PyObject *processors;
- PyObject *keymap;
-} BaseRowProxy;
-
-/****************
- * BaseRowProxy *
- ****************/
-
-static PyObject *
-safe_rowproxy_reconstructor(PyObject *self, PyObject *args)
-{
- PyObject *cls, *state, *tmp;
- BaseRowProxy *obj;
-
- if (!PyArg_ParseTuple(args, "OO", &cls, &state))
- return NULL;
-
- obj = (BaseRowProxy *)PyObject_CallMethod(cls, "__new__", "O", cls);
- if (obj == NULL)
- return NULL;
-
- tmp = PyObject_CallMethod((PyObject *)obj, "__setstate__", "O", state);
- if (tmp == NULL) {
- Py_DECREF(obj);
- return NULL;
- }
- Py_DECREF(tmp);
-
- if (obj->parent == NULL || obj->row == NULL ||
- obj->processors == NULL || obj->keymap == NULL) {
- PyErr_SetString(PyExc_RuntimeError,
- "__setstate__ for BaseRowProxy subclasses must set values "
- "for parent, row, processors and keymap");
- Py_DECREF(obj);
- return NULL;
- }
-
- return (PyObject *)obj;
-}
-
-static int
-BaseRowProxy_init(BaseRowProxy *self, PyObject *args, PyObject *kwds)
-{
- PyObject *parent, *row, *processors, *keymap;
-
- if (!PyArg_UnpackTuple(args, "BaseRowProxy", 4, 4,
- &parent, &row, &processors, &keymap))
- return -1;
-
- Py_INCREF(parent);
- self->parent = parent;
-
- if (!PySequence_Check(row)) {
- PyErr_SetString(PyExc_TypeError, "row must be a sequence");
- return -1;
- }
- Py_INCREF(row);
- self->row = row;
-
- if (!PyList_CheckExact(processors)) {
- PyErr_SetString(PyExc_TypeError, "processors must be a list");
- return -1;
- }
- Py_INCREF(processors);
- self->processors = processors;
-
- if (!PyDict_CheckExact(keymap)) {
- PyErr_SetString(PyExc_TypeError, "keymap must be a dict");
- return -1;
- }
- Py_INCREF(keymap);
- self->keymap = keymap;
-
- return 0;
-}
-
-/* We need the reduce method because otherwise the default implementation
- * does very weird stuff for pickle protocol 0 and 1. It calls
- * BaseRowProxy.__new__(RowProxy_instance) upon *pickling*.
- */
-static PyObject *
-BaseRowProxy_reduce(PyObject *self)
-{
- PyObject *method, *state;
- PyObject *module, *reconstructor, *cls;
-
- method = PyObject_GetAttrString(self, "__getstate__");
- if (method == NULL)
- return NULL;
-
- state = PyObject_CallObject(method, NULL);
- Py_DECREF(method);
- if (state == NULL)
- return NULL;
-
- module = PyImport_ImportModule("sqlalchemy.engine.result");
- if (module == NULL)
- return NULL;
-
- reconstructor = PyObject_GetAttrString(module, "rowproxy_reconstructor");
- Py_DECREF(module);
- if (reconstructor == NULL) {
- Py_DECREF(state);
- return NULL;
- }
-
- cls = PyObject_GetAttrString(self, "__class__");
- if (cls == NULL) {
- Py_DECREF(reconstructor);
- Py_DECREF(state);
- return NULL;
- }
-
- return Py_BuildValue("(N(NN))", reconstructor, cls, state);
-}
-
-static void
-BaseRowProxy_dealloc(BaseRowProxy *self)
-{
- Py_XDECREF(self->parent);
- Py_XDECREF(self->row);
- Py_XDECREF(self->processors);
- Py_XDECREF(self->keymap);
-#if PY_MAJOR_VERSION >= 3
- Py_TYPE(self)->tp_free((PyObject *)self);
-#else
- self->ob_type->tp_free((PyObject *)self);
-#endif
-}
-
-static PyObject *
-BaseRowProxy_processvalues(PyObject *values, PyObject *processors, int astuple)
-{
- Py_ssize_t num_values, num_processors;
- PyObject **valueptr, **funcptr, **resultptr;
- PyObject *func, *result, *processed_value, *values_fastseq;
-
- num_values = PySequence_Length(values);
- num_processors = PyList_Size(processors);
- if (num_values != num_processors) {
- PyErr_Format(PyExc_RuntimeError,
- "number of values in row (%d) differ from number of column "
- "processors (%d)",
- (int)num_values, (int)num_processors);
- return NULL;
- }
-
- if (astuple) {
- result = PyTuple_New(num_values);
- } else {
- result = PyList_New(num_values);
- }
- if (result == NULL)
- return NULL;
-
- values_fastseq = PySequence_Fast(values, "row must be a sequence");
- if (values_fastseq == NULL)
- return NULL;
-
- valueptr = PySequence_Fast_ITEMS(values_fastseq);
- funcptr = PySequence_Fast_ITEMS(processors);
- resultptr = PySequence_Fast_ITEMS(result);
- while (--num_values >= 0) {
- func = *funcptr;
- if (func != Py_None) {
- processed_value = PyObject_CallFunctionObjArgs(func, *valueptr,
- NULL);
- if (processed_value == NULL) {
- Py_DECREF(values_fastseq);
- Py_DECREF(result);
- return NULL;
- }
- *resultptr = processed_value;
- } else {
- Py_INCREF(*valueptr);
- *resultptr = *valueptr;
- }
- valueptr++;
- funcptr++;
- resultptr++;
- }
- Py_DECREF(values_fastseq);
- return result;
-}
-
-static PyListObject *
-BaseRowProxy_values(BaseRowProxy *self)
-{
- return (PyListObject *)BaseRowProxy_processvalues(self->row,
- self->processors, 0);
-}
-
-static PyObject *
-BaseRowProxy_iter(BaseRowProxy *self)
-{
- PyObject *values, *result;
-
- values = BaseRowProxy_processvalues(self->row, self->processors, 1);
- if (values == NULL)
- return NULL;
-
- result = PyObject_GetIter(values);
- Py_DECREF(values);
- if (result == NULL)
- return NULL;
-
- return result;
-}
-
-static Py_ssize_t
-BaseRowProxy_length(BaseRowProxy *self)
-{
- return PySequence_Length(self->row);
-}
-
-static PyObject *
-BaseRowProxy_subscript(BaseRowProxy *self, PyObject *key)
-{
- PyObject *processors, *values;
- PyObject *processor, *value, *processed_value;
- PyObject *row, *record, *result, *indexobject;
- PyObject *exc_module, *exception, *cstr_obj;
-#if PY_MAJOR_VERSION >= 3
- PyObject *bytes;
-#endif
- char *cstr_key;
- long index;
- int key_fallback = 0;
- int tuple_check = 0;
-
-#if PY_MAJOR_VERSION < 3
- if (PyInt_CheckExact(key)) {
- index = PyInt_AS_LONG(key);
- }
-#endif
-
- if (PyLong_CheckExact(key)) {
- index = PyLong_AsLong(key);
- if ((index == -1) && PyErr_Occurred())
- /* -1 can be either the actual value, or an error flag. */
- return NULL;
- } else if (PySlice_Check(key)) {
- values = PyObject_GetItem(self->row, key);
- if (values == NULL)
- return NULL;
-
- processors = PyObject_GetItem(self->processors, key);
- if (processors == NULL) {
- Py_DECREF(values);
- return NULL;
- }
-
- result = BaseRowProxy_processvalues(values, processors, 1);
- Py_DECREF(values);
- Py_DECREF(processors);
- return result;
- } else {
- record = PyDict_GetItem((PyObject *)self->keymap, key);
- if (record == NULL) {
- record = PyObject_CallMethod(self->parent, "_key_fallback",
- "O", key);
- if (record == NULL)
- return NULL;
- key_fallback = 1;
- }
-
- indexobject = PyTuple_GetItem(record, 2);
- if (indexobject == NULL)
- return NULL;
-
- if (key_fallback) {
- Py_DECREF(record);
- }
-
- if (indexobject == Py_None) {
- exc_module = PyImport_ImportModule("sqlalchemy.exc");
- if (exc_module == NULL)
- return NULL;
-
- exception = PyObject_GetAttrString(exc_module,
- "InvalidRequestError");
- Py_DECREF(exc_module);
- if (exception == NULL)
- return NULL;
-
- // wow. this seems quite excessive.
- cstr_obj = PyObject_Str(key);
- if (cstr_obj == NULL)
- return NULL;
-
-/*
- FIXME: raise encoding error exception (in both versions below)
- if the key contains non-ascii chars, instead of an
- InvalidRequestError without any message like in the
- python version.
-*/
-#if PY_MAJOR_VERSION >= 3
- bytes = PyUnicode_AsASCIIString(cstr_obj);
- if (bytes == NULL)
- return NULL;
- cstr_key = PyBytes_AS_STRING(bytes);
-#else
- cstr_key = PyString_AsString(cstr_obj);
-#endif
- if (cstr_key == NULL) {
- Py_DECREF(cstr_obj);
- return NULL;
- }
- Py_DECREF(cstr_obj);
-
- PyErr_Format(exception,
- "Ambiguous column name '%.200s' in result set! "
- "try 'use_labels' option on select statement.", cstr_key);
- return NULL;
- }
-
-#if PY_MAJOR_VERSION >= 3
- index = PyLong_AsLong(indexobject);
-#else
- index = PyInt_AsLong(indexobject);
-#endif
- if ((index == -1) && PyErr_Occurred())
- /* -1 can be either the actual value, or an error flag. */
- return NULL;
- }
- processor = PyList_GetItem(self->processors, index);
- if (processor == NULL)
- return NULL;
-
- row = self->row;
- if (PyTuple_CheckExact(row)) {
- value = PyTuple_GetItem(row, index);
- tuple_check = 1;
- }
- else {
- value = PySequence_GetItem(row, index);
- tuple_check = 0;
- }
-
- if (value == NULL)
- return NULL;
-
- if (processor != Py_None) {
- processed_value = PyObject_CallFunctionObjArgs(processor, value, NULL);
- if (!tuple_check) {
- Py_DECREF(value);
- }
- return processed_value;
- } else {
- if (tuple_check) {
- Py_INCREF(value);
- }
- return value;
- }
-}
-
-static PyObject *
-BaseRowProxy_getitem(PyObject *self, Py_ssize_t i)
-{
- PyObject *index;
-
-#if PY_MAJOR_VERSION >= 3
- index = PyLong_FromSsize_t(i);
-#else
- index = PyInt_FromSsize_t(i);
-#endif
- return BaseRowProxy_subscript((BaseRowProxy*)self, index);
-}
-
-static PyObject *
-BaseRowProxy_getattro(BaseRowProxy *self, PyObject *name)
-{
- PyObject *tmp;
-#if PY_MAJOR_VERSION >= 3
- PyObject *err_bytes;
-#endif
-
- if (!(tmp = PyObject_GenericGetAttr((PyObject *)self, name))) {
- if (!PyErr_ExceptionMatches(PyExc_AttributeError))
- return NULL;
- PyErr_Clear();
- }
- else
- return tmp;
-
- tmp = BaseRowProxy_subscript(self, name);
- if (tmp == NULL && PyErr_ExceptionMatches(PyExc_KeyError)) {
-
-#if PY_MAJOR_VERSION >= 3
- err_bytes = PyUnicode_AsASCIIString(name);
- if (err_bytes == NULL)
- return NULL;
- PyErr_Format(
- PyExc_AttributeError,
- "Could not locate column in row for column '%.200s'",
- PyBytes_AS_STRING(err_bytes)
- );
-#else
- PyErr_Format(
- PyExc_AttributeError,
- "Could not locate column in row for column '%.200s'",
- PyString_AsString(name)
- );
-#endif
- return NULL;
- }
- return tmp;
-}
-
-/***********************
- * getters and setters *
- ***********************/
-
-static PyObject *
-BaseRowProxy_getparent(BaseRowProxy *self, void *closure)
-{
- Py_INCREF(self->parent);
- return self->parent;
-}
-
-static int
-BaseRowProxy_setparent(BaseRowProxy *self, PyObject *value, void *closure)
-{
- PyObject *module, *cls;
-
- if (value == NULL) {
- PyErr_SetString(PyExc_TypeError,
- "Cannot delete the 'parent' attribute");
- return -1;
- }
-
- module = PyImport_ImportModule("sqlalchemy.engine.result");
- if (module == NULL)
- return -1;
-
- cls = PyObject_GetAttrString(module, "ResultMetaData");
- Py_DECREF(module);
- if (cls == NULL)
- return -1;
-
- if (PyObject_IsInstance(value, cls) != 1) {
- PyErr_SetString(PyExc_TypeError,
- "The 'parent' attribute value must be an instance of "
- "ResultMetaData");
- return -1;
- }
- Py_DECREF(cls);
- Py_XDECREF(self->parent);
- Py_INCREF(value);
- self->parent = value;
-
- return 0;
-}
-
-static PyObject *
-BaseRowProxy_getrow(BaseRowProxy *self, void *closure)
-{
- Py_INCREF(self->row);
- return self->row;
-}
-
-static int
-BaseRowProxy_setrow(BaseRowProxy *self, PyObject *value, void *closure)
-{
- if (value == NULL) {
- PyErr_SetString(PyExc_TypeError,
- "Cannot delete the 'row' attribute");
- return -1;
- }
-
- if (!PySequence_Check(value)) {
- PyErr_SetString(PyExc_TypeError,
- "The 'row' attribute value must be a sequence");
- return -1;
- }
-
- Py_XDECREF(self->row);
- Py_INCREF(value);
- self->row = value;
-
- return 0;
-}
-
-static PyObject *
-BaseRowProxy_getprocessors(BaseRowProxy *self, void *closure)
-{
- Py_INCREF(self->processors);
- return self->processors;
-}
-
-static int
-BaseRowProxy_setprocessors(BaseRowProxy *self, PyObject *value, void *closure)
-{
- if (value == NULL) {
- PyErr_SetString(PyExc_TypeError,
- "Cannot delete the 'processors' attribute");
- return -1;
- }
-
- if (!PyList_CheckExact(value)) {
- PyErr_SetString(PyExc_TypeError,
- "The 'processors' attribute value must be a list");
- return -1;
- }
-
- Py_XDECREF(self->processors);
- Py_INCREF(value);
- self->processors = value;
-
- return 0;
-}
-
-static PyObject *
-BaseRowProxy_getkeymap(BaseRowProxy *self, void *closure)
-{
- Py_INCREF(self->keymap);
- return self->keymap;
-}
-
-static int
-BaseRowProxy_setkeymap(BaseRowProxy *self, PyObject *value, void *closure)
-{
- if (value == NULL) {
- PyErr_SetString(PyExc_TypeError,
- "Cannot delete the 'keymap' attribute");
- return -1;
- }
-
- if (!PyDict_CheckExact(value)) {
- PyErr_SetString(PyExc_TypeError,
- "The 'keymap' attribute value must be a dict");
- return -1;
- }
-
- Py_XDECREF(self->keymap);
- Py_INCREF(value);
- self->keymap = value;
-
- return 0;
-}
-
-static PyGetSetDef BaseRowProxy_getseters[] = {
- {"_parent",
- (getter)BaseRowProxy_getparent, (setter)BaseRowProxy_setparent,
- "ResultMetaData",
- NULL},
- {"_row",
- (getter)BaseRowProxy_getrow, (setter)BaseRowProxy_setrow,
- "Original row tuple",
- NULL},
- {"_processors",
- (getter)BaseRowProxy_getprocessors, (setter)BaseRowProxy_setprocessors,
- "list of type processors",
- NULL},
- {"_keymap",
- (getter)BaseRowProxy_getkeymap, (setter)BaseRowProxy_setkeymap,
- "Key to (processor, index) dict",
- NULL},
- {NULL}
-};
-
-static PyMethodDef BaseRowProxy_methods[] = {
- {"values", (PyCFunction)BaseRowProxy_values, METH_NOARGS,
- "Return the values represented by this BaseRowProxy as a list."},
- {"__reduce__", (PyCFunction)BaseRowProxy_reduce, METH_NOARGS,
- "Pickle support method."},
- {NULL} /* Sentinel */
-};
-
-static PySequenceMethods BaseRowProxy_as_sequence = {
- (lenfunc)BaseRowProxy_length, /* sq_length */
- 0, /* sq_concat */
- 0, /* sq_repeat */
- (ssizeargfunc)BaseRowProxy_getitem, /* sq_item */
- 0, /* sq_slice */
- 0, /* sq_ass_item */
- 0, /* sq_ass_slice */
- 0, /* sq_contains */
- 0, /* sq_inplace_concat */
- 0, /* sq_inplace_repeat */
-};
-
-static PyMappingMethods BaseRowProxy_as_mapping = {
- (lenfunc)BaseRowProxy_length, /* mp_length */
- (binaryfunc)BaseRowProxy_subscript, /* mp_subscript */
- 0 /* mp_ass_subscript */
-};
-
-static PyTypeObject BaseRowProxyType = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "sqlalchemy.cresultproxy.BaseRowProxy", /* tp_name */
- sizeof(BaseRowProxy), /* tp_basicsize */
- 0, /* tp_itemsize */
- (destructor)BaseRowProxy_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_compare */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- &BaseRowProxy_as_sequence, /* tp_as_sequence */
- &BaseRowProxy_as_mapping, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- (getattrofunc)BaseRowProxy_getattro,/* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
- "BaseRowProxy is a abstract base class for RowProxy", /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- (getiterfunc)BaseRowProxy_iter, /* tp_iter */
- 0, /* tp_iternext */
- BaseRowProxy_methods, /* tp_methods */
- 0, /* tp_members */
- BaseRowProxy_getseters, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- (initproc)BaseRowProxy_init, /* tp_init */
- 0, /* tp_alloc */
- 0 /* tp_new */
-};
-
-static PyMethodDef module_methods[] = {
- {"safe_rowproxy_reconstructor", safe_rowproxy_reconstructor, METH_VARARGS,
- "reconstruct a RowProxy instance from its pickled form."},
- {NULL, NULL, 0, NULL} /* Sentinel */
-};
-
-#ifndef PyMODINIT_FUNC /* declarations for DLL import/export */
-#define PyMODINIT_FUNC void
-#endif
-
-
-#if PY_MAJOR_VERSION >= 3
-
-static struct PyModuleDef module_def = {
- PyModuleDef_HEAD_INIT,
- MODULE_NAME,
- MODULE_DOC,
- -1,
- module_methods
-};
-
-#define INITERROR return NULL
-
-PyMODINIT_FUNC
-PyInit_cresultproxy(void)
-
-#else
-
-#define INITERROR return
-
-PyMODINIT_FUNC
-initcresultproxy(void)
-
-#endif
-
-{
- PyObject *m;
-
- BaseRowProxyType.tp_new = PyType_GenericNew;
- if (PyType_Ready(&BaseRowProxyType) < 0)
- INITERROR;
-
-#if PY_MAJOR_VERSION >= 3
- m = PyModule_Create(&module_def);
-#else
- m = Py_InitModule3(MODULE_NAME, module_methods, MODULE_DOC);
-#endif
- if (m == NULL)
- INITERROR;
-
- Py_INCREF(&BaseRowProxyType);
- PyModule_AddObject(m, "BaseRowProxy", (PyObject *)&BaseRowProxyType);
-
-#if PY_MAJOR_VERSION >= 3
- return m;
-#endif
-}
diff --git a/lib/sqlalchemy/cextension/utils.c b/lib/sqlalchemy/cextension/utils.c
deleted file mode 100644
index 377ba8a8..00000000
--- a/lib/sqlalchemy/cextension/utils.c
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
-utils.c
-Copyright (C) 2012-2014 the SQLAlchemy authors and contributors
-
-This module is part of SQLAlchemy and is released under
-the MIT License: http://www.opensource.org/licenses/mit-license.php
-*/
-
-#include
-
-#define MODULE_NAME "cutils"
-#define MODULE_DOC "Module containing C versions of utility functions."
-
-/*
- Given arguments from the calling form *multiparams, **params,
- return a list of bind parameter structures, usually a list of
- dictionaries.
-
- In the case of 'raw' execution which accepts positional parameters,
- it may be a list of tuples or lists.
-
- */
-static PyObject *
-distill_params(PyObject *self, PyObject *args)
-{
- PyObject *multiparams, *params;
- PyObject *enclosing_list, *double_enclosing_list;
- PyObject *zero_element, *zero_element_item;
- Py_ssize_t multiparam_size, zero_element_length;
-
- if (!PyArg_UnpackTuple(args, "_distill_params", 2, 2, &multiparams, ¶ms)) {
- return NULL;
- }
-
- if (multiparams != Py_None) {
- multiparam_size = PyTuple_Size(multiparams);
- if (multiparam_size < 0) {
- return NULL;
- }
- }
- else {
- multiparam_size = 0;
- }
-
- if (multiparam_size == 0) {
- if (params != Py_None && PyDict_Size(params) != 0) {
- enclosing_list = PyList_New(1);
- if (enclosing_list == NULL) {
- return NULL;
- }
- Py_INCREF(params);
- if (PyList_SetItem(enclosing_list, 0, params) == -1) {
- Py_DECREF(params);
- Py_DECREF(enclosing_list);
- return NULL;
- }
- }
- else {
- enclosing_list = PyList_New(0);
- if (enclosing_list == NULL) {
- return NULL;
- }
- }
- return enclosing_list;
- }
- else if (multiparam_size == 1) {
- zero_element = PyTuple_GetItem(multiparams, 0);
- if (PyTuple_Check(zero_element) || PyList_Check(zero_element)) {
- zero_element_length = PySequence_Length(zero_element);
-
- if (zero_element_length != 0) {
- zero_element_item = PySequence_GetItem(zero_element, 0);
- if (zero_element_item == NULL) {
- return NULL;
- }
- }
- else {
- zero_element_item = NULL;
- }
-
- if (zero_element_length == 0 ||
- (
- PyObject_HasAttrString(zero_element_item, "__iter__") &&
- !PyObject_HasAttrString(zero_element_item, "strip")
- )
- ) {
- /*
- * execute(stmt, [{}, {}, {}, ...])
- * execute(stmt, [(), (), (), ...])
- */
- Py_XDECREF(zero_element_item);
- Py_INCREF(zero_element);
- return zero_element;
- }
- else {
- /*
- * execute(stmt, ("value", "value"))
- */
- Py_XDECREF(zero_element_item);
- enclosing_list = PyList_New(1);
- if (enclosing_list == NULL) {
- return NULL;
- }
- Py_INCREF(zero_element);
- if (PyList_SetItem(enclosing_list, 0, zero_element) == -1) {
- Py_DECREF(zero_element);
- Py_DECREF(enclosing_list);
- return NULL;
- }
- return enclosing_list;
- }
- }
- else if (PyObject_HasAttrString(zero_element, "keys")) {
- /*
- * execute(stmt, {"key":"value"})
- */
- enclosing_list = PyList_New(1);
- if (enclosing_list == NULL) {
- return NULL;
- }
- Py_INCREF(zero_element);
- if (PyList_SetItem(enclosing_list, 0, zero_element) == -1) {
- Py_DECREF(zero_element);
- Py_DECREF(enclosing_list);
- return NULL;
- }
- return enclosing_list;
- } else {
- enclosing_list = PyList_New(1);
- if (enclosing_list == NULL) {
- return NULL;
- }
- double_enclosing_list = PyList_New(1);
- if (double_enclosing_list == NULL) {
- Py_DECREF(enclosing_list);
- return NULL;
- }
- Py_INCREF(zero_element);
- if (PyList_SetItem(enclosing_list, 0, zero_element) == -1) {
- Py_DECREF(zero_element);
- Py_DECREF(enclosing_list);
- Py_DECREF(double_enclosing_list);
- return NULL;
- }
- if (PyList_SetItem(double_enclosing_list, 0, enclosing_list) == -1) {
- Py_DECREF(zero_element);
- Py_DECREF(enclosing_list);
- Py_DECREF(double_enclosing_list);
- return NULL;
- }
- return double_enclosing_list;
- }
- }
- else {
- zero_element = PyTuple_GetItem(multiparams, 0);
- if (PyObject_HasAttrString(zero_element, "__iter__") &&
- !PyObject_HasAttrString(zero_element, "strip")
- ) {
- Py_INCREF(multiparams);
- return multiparams;
- }
- else {
- enclosing_list = PyList_New(1);
- if (enclosing_list == NULL) {
- return NULL;
- }
- Py_INCREF(multiparams);
- if (PyList_SetItem(enclosing_list, 0, multiparams) == -1) {
- Py_DECREF(multiparams);
- Py_DECREF(enclosing_list);
- return NULL;
- }
- return enclosing_list;
- }
- }
-}
-
-static PyMethodDef module_methods[] = {
- {"_distill_params", distill_params, METH_VARARGS,
- "Distill an execute() parameter structure."},
- {NULL, NULL, 0, NULL} /* Sentinel */
-};
-
-#ifndef PyMODINIT_FUNC /* declarations for DLL import/export */
-#define PyMODINIT_FUNC void
-#endif
-
-#if PY_MAJOR_VERSION >= 3
-
-static struct PyModuleDef module_def = {
- PyModuleDef_HEAD_INIT,
- MODULE_NAME,
- MODULE_DOC,
- -1,
- module_methods
- };
-#endif
-
-
-#if PY_MAJOR_VERSION >= 3
-PyMODINIT_FUNC
-PyInit_cutils(void)
-#else
-PyMODINIT_FUNC
-initcutils(void)
-#endif
-{
- PyObject *m;
-
-#if PY_MAJOR_VERSION >= 3
- m = PyModule_Create(&module_def);
-#else
- m = Py_InitModule3(MODULE_NAME, module_methods, MODULE_DOC);
-#endif
-
-#if PY_MAJOR_VERSION >= 3
- if (m == NULL)
- return NULL;
- return m;
-#else
- if (m == NULL)
- return;
-#endif
-}
-
diff --git a/lib/sqlalchemy/connectors/__init__.py b/lib/sqlalchemy/connectors/__init__.py
deleted file mode 100644
index 761024fe..00000000
--- a/lib/sqlalchemy/connectors/__init__.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# connectors/__init__.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-
-class Connector(object):
- pass
diff --git a/lib/sqlalchemy/connectors/mxodbc.py b/lib/sqlalchemy/connectors/mxodbc.py
deleted file mode 100644
index e5562a25..00000000
--- a/lib/sqlalchemy/connectors/mxodbc.py
+++ /dev/null
@@ -1,149 +0,0 @@
-# connectors/mxodbc.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""
-Provide an SQLALchemy connector for the eGenix mxODBC commercial
-Python adapter for ODBC. This is not a free product, but eGenix
-provides SQLAlchemy with a license for use in continuous integration
-testing.
-
-This has been tested for use with mxODBC 3.1.2 on SQL Server 2005
-and 2008, using the SQL Server Native driver. However, it is
-possible for this to be used on other database platforms.
-
-For more info on mxODBC, see http://www.egenix.com/
-
-"""
-
-import sys
-import re
-import warnings
-
-from . import Connector
-
-
-class MxODBCConnector(Connector):
- driver = 'mxodbc'
-
- supports_sane_multi_rowcount = False
- supports_unicode_statements = True
- supports_unicode_binds = True
-
- supports_native_decimal = True
-
- @classmethod
- def dbapi(cls):
- # this classmethod will normally be replaced by an instance
- # attribute of the same name, so this is normally only called once.
- cls._load_mx_exceptions()
- platform = sys.platform
- if platform == 'win32':
- from mx.ODBC import Windows as module
- # this can be the string "linux2", and possibly others
- elif 'linux' in platform:
- from mx.ODBC import unixODBC as module
- elif platform == 'darwin':
- from mx.ODBC import iODBC as module
- else:
- raise ImportError("Unrecognized platform for mxODBC import")
- return module
-
- @classmethod
- def _load_mx_exceptions(cls):
- """ Import mxODBC exception classes into the module namespace,
- as if they had been imported normally. This is done here
- to avoid requiring all SQLAlchemy users to install mxODBC.
- """
- global InterfaceError, ProgrammingError
- from mx.ODBC import InterfaceError
- from mx.ODBC import ProgrammingError
-
- def on_connect(self):
- def connect(conn):
- conn.stringformat = self.dbapi.MIXED_STRINGFORMAT
- conn.datetimeformat = self.dbapi.PYDATETIME_DATETIMEFORMAT
- conn.decimalformat = self.dbapi.DECIMAL_DECIMALFORMAT
- conn.errorhandler = self._error_handler()
- return connect
-
- def _error_handler(self):
- """ Return a handler that adjusts mxODBC's raised Warnings to
- emit Python standard warnings.
- """
- from mx.ODBC.Error import Warning as MxOdbcWarning
-
- def error_handler(connection, cursor, errorclass, errorvalue):
- if issubclass(errorclass, MxOdbcWarning):
- errorclass.__bases__ = (Warning,)
- warnings.warn(message=str(errorvalue),
- category=errorclass,
- stacklevel=2)
- else:
- raise errorclass(errorvalue)
- return error_handler
-
- def create_connect_args(self, url):
- """ Return a tuple of *args,**kwargs for creating a connection.
-
- The mxODBC 3.x connection constructor looks like this:
-
- connect(dsn, user='', password='',
- clear_auto_commit=1, errorhandler=None)
-
- This method translates the values in the provided uri
- into args and kwargs needed to instantiate an mxODBC Connection.
-
- The arg 'errorhandler' is not used by SQLAlchemy and will
- not be populated.
-
- """
- opts = url.translate_connect_args(username='user')
- opts.update(url.query)
- args = opts.pop('host')
- opts.pop('port', None)
- opts.pop('database', None)
- return (args,), opts
-
- def is_disconnect(self, e, connection, cursor):
- # TODO: eGenix recommends checking connection.closed here
- # Does that detect dropped connections ?
- if isinstance(e, self.dbapi.ProgrammingError):
- return "connection already closed" in str(e)
- elif isinstance(e, self.dbapi.Error):
- return '[08S01]' in str(e)
- else:
- return False
-
- def _get_server_version_info(self, connection):
- # eGenix suggests using conn.dbms_version instead
- # of what we're doing here
- dbapi_con = connection.connection
- version = []
- r = re.compile('[.\-]')
- # 18 == pyodbc.SQL_DBMS_VER
- for n in r.split(dbapi_con.getinfo(18)[1]):
- try:
- version.append(int(n))
- except ValueError:
- version.append(n)
- return tuple(version)
-
- def _get_direct(self, context):
- if context:
- native_odbc_execute = context.execution_options.\
- get('native_odbc_execute', 'auto')
- # default to direct=True in all cases, is more generally
- # compatible especially with SQL Server
- return False if native_odbc_execute is True else True
- else:
- return True
-
- def do_executemany(self, cursor, statement, parameters, context=None):
- cursor.executemany(
- statement, parameters, direct=self._get_direct(context))
-
- def do_execute(self, cursor, statement, parameters, context=None):
- cursor.execute(statement, parameters, direct=self._get_direct(context))
diff --git a/lib/sqlalchemy/connectors/mysqldb.py b/lib/sqlalchemy/connectors/mysqldb.py
deleted file mode 100644
index e4efb220..00000000
--- a/lib/sqlalchemy/connectors/mysqldb.py
+++ /dev/null
@@ -1,144 +0,0 @@
-# connectors/mysqldb.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""Define behaviors common to MySQLdb dialects.
-
-Currently includes MySQL and Drizzle.
-
-"""
-
-from . import Connector
-from ..engine import base as engine_base, default
-from ..sql import operators as sql_operators
-from .. import exc, log, schema, sql, types as sqltypes, util, processors
-import re
-
-
-# the subclassing of Connector by all classes
-# here is not strictly necessary
-
-
-class MySQLDBExecutionContext(Connector):
-
- @property
- def rowcount(self):
- if hasattr(self, '_rowcount'):
- return self._rowcount
- else:
- return self.cursor.rowcount
-
-
-class MySQLDBCompiler(Connector):
- def visit_mod_binary(self, binary, operator, **kw):
- return self.process(binary.left, **kw) + " %% " + \
- self.process(binary.right, **kw)
-
- def post_process_text(self, text):
- return text.replace('%', '%%')
-
-
-class MySQLDBIdentifierPreparer(Connector):
-
- def _escape_identifier(self, value):
- value = value.replace(self.escape_quote, self.escape_to_quote)
- return value.replace("%", "%%")
-
-
-class MySQLDBConnector(Connector):
- driver = 'mysqldb'
- supports_unicode_statements = False
- supports_sane_rowcount = True
- supports_sane_multi_rowcount = True
-
- supports_native_decimal = True
-
- default_paramstyle = 'format'
-
- @classmethod
- def dbapi(cls):
- # is overridden when pymysql is used
- return __import__('MySQLdb')
-
-
- def do_executemany(self, cursor, statement, parameters, context=None):
- rowcount = cursor.executemany(statement, parameters)
- if context is not None:
- context._rowcount = rowcount
-
- def create_connect_args(self, url):
- opts = url.translate_connect_args(database='db', username='user',
- password='passwd')
- opts.update(url.query)
-
- util.coerce_kw_type(opts, 'compress', bool)
- util.coerce_kw_type(opts, 'connect_timeout', int)
- util.coerce_kw_type(opts, 'read_timeout', int)
- util.coerce_kw_type(opts, 'client_flag', int)
- util.coerce_kw_type(opts, 'local_infile', int)
- # Note: using either of the below will cause all strings to be returned
- # as Unicode, both in raw SQL operations and with column types like
- # String and MSString.
- util.coerce_kw_type(opts, 'use_unicode', bool)
- util.coerce_kw_type(opts, 'charset', str)
-
- # Rich values 'cursorclass' and 'conv' are not supported via
- # query string.
-
- ssl = {}
- keys = ['ssl_ca', 'ssl_key', 'ssl_cert', 'ssl_capath', 'ssl_cipher']
- for key in keys:
- if key in opts:
- ssl[key[4:]] = opts[key]
- util.coerce_kw_type(ssl, key[4:], str)
- del opts[key]
- if ssl:
- opts['ssl'] = ssl
-
- # FOUND_ROWS must be set in CLIENT_FLAGS to enable
- # supports_sane_rowcount.
- client_flag = opts.get('client_flag', 0)
- if self.dbapi is not None:
- try:
- CLIENT_FLAGS = __import__(
- self.dbapi.__name__ + '.constants.CLIENT'
- ).constants.CLIENT
- client_flag |= CLIENT_FLAGS.FOUND_ROWS
- except (AttributeError, ImportError):
- self.supports_sane_rowcount = False
- opts['client_flag'] = client_flag
- return [[], opts]
-
- def _get_server_version_info(self, connection):
- dbapi_con = connection.connection
- version = []
- r = re.compile('[.\-]')
- for n in r.split(dbapi_con.get_server_info()):
- try:
- version.append(int(n))
- except ValueError:
- version.append(n)
- return tuple(version)
-
- def _extract_error_code(self, exception):
- return exception.args[0]
-
- def _detect_charset(self, connection):
- """Sniff out the character set in use for connection results."""
-
- try:
- # note: the SQL here would be
- # "SHOW VARIABLES LIKE 'character_set%%'"
- cset_name = connection.connection.character_set_name
- except AttributeError:
- util.warn(
- "No 'character_set_name' can be detected with "
- "this MySQL-Python version; "
- "please upgrade to a recent version of MySQL-Python. "
- "Assuming latin1.")
- return 'latin1'
- else:
- return cset_name()
-
diff --git a/lib/sqlalchemy/connectors/pyodbc.py b/lib/sqlalchemy/connectors/pyodbc.py
deleted file mode 100644
index 284de288..00000000
--- a/lib/sqlalchemy/connectors/pyodbc.py
+++ /dev/null
@@ -1,170 +0,0 @@
-# connectors/pyodbc.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-from . import Connector
-from .. import util
-
-
-import sys
-import re
-
-
-class PyODBCConnector(Connector):
- driver = 'pyodbc'
-
- supports_sane_multi_rowcount = False
-
- if util.py2k:
- # PyODBC unicode is broken on UCS-4 builds
- supports_unicode = sys.maxunicode == 65535
- supports_unicode_statements = supports_unicode
-
- supports_native_decimal = True
- default_paramstyle = 'named'
-
- # for non-DSN connections, this should
- # hold the desired driver name
- pyodbc_driver_name = None
-
- # will be set to True after initialize()
- # if the freetds.so is detected
- freetds = False
-
- # will be set to the string version of
- # the FreeTDS driver if freetds is detected
- freetds_driver_version = None
-
- # will be set to True after initialize()
- # if the libessqlsrv.so is detected
- easysoft = False
-
- def __init__(self, supports_unicode_binds=None, **kw):
- super(PyODBCConnector, self).__init__(**kw)
- self._user_supports_unicode_binds = supports_unicode_binds
-
- @classmethod
- def dbapi(cls):
- return __import__('pyodbc')
-
- def create_connect_args(self, url):
- opts = url.translate_connect_args(username='user')
- opts.update(url.query)
-
- keys = opts
- query = url.query
-
- connect_args = {}
- for param in ('ansi', 'unicode_results', 'autocommit'):
- if param in keys:
- connect_args[param] = util.asbool(keys.pop(param))
-
- if 'odbc_connect' in keys:
- connectors = [util.unquote_plus(keys.pop('odbc_connect'))]
- else:
- dsn_connection = 'dsn' in keys or \
- ('host' in keys and 'database' not in keys)
- if dsn_connection:
- connectors = ['dsn=%s' % (keys.pop('host', '') or \
- keys.pop('dsn', ''))]
- else:
- port = ''
- if 'port' in keys and not 'port' in query:
- port = ',%d' % int(keys.pop('port'))
-
- connectors = ["DRIVER={%s}" %
- keys.pop('driver', self.pyodbc_driver_name),
- 'Server=%s%s' % (keys.pop('host', ''), port),
- 'Database=%s' % keys.pop('database', '')]
-
- user = keys.pop("user", None)
- if user:
- connectors.append("UID=%s" % user)
- connectors.append("PWD=%s" % keys.pop('password', ''))
- else:
- connectors.append("Trusted_Connection=Yes")
-
- # if set to 'Yes', the ODBC layer will try to automagically
- # convert textual data from your database encoding to your
- # client encoding. This should obviously be set to 'No' if
- # you query a cp1253 encoded database from a latin1 client...
- if 'odbc_autotranslate' in keys:
- connectors.append("AutoTranslate=%s" %
- keys.pop("odbc_autotranslate"))
-
- connectors.extend(['%s=%s' % (k, v) for k, v in keys.items()])
- return [[";".join(connectors)], connect_args]
-
- def is_disconnect(self, e, connection, cursor):
- if isinstance(e, self.dbapi.ProgrammingError):
- return "The cursor's connection has been closed." in str(e) or \
- 'Attempt to use a closed connection.' in str(e)
- elif isinstance(e, self.dbapi.Error):
- return '[08S01]' in str(e)
- else:
- return False
-
- def initialize(self, connection):
- # determine FreeTDS first. can't issue SQL easily
- # without getting unicode_statements/binds set up.
-
- pyodbc = self.dbapi
-
- dbapi_con = connection.connection
-
- _sql_driver_name = dbapi_con.getinfo(pyodbc.SQL_DRIVER_NAME)
- self.freetds = bool(re.match(r".*libtdsodbc.*\.so", _sql_driver_name
- ))
- self.easysoft = bool(re.match(r".*libessqlsrv.*\.so", _sql_driver_name
- ))
-
- if self.freetds:
- self.freetds_driver_version = dbapi_con.getinfo(
- pyodbc.SQL_DRIVER_VER)
-
- self.supports_unicode_statements = (
- not util.py2k or
- (not self.freetds and not self.easysoft)
- )
-
- if self._user_supports_unicode_binds is not None:
- self.supports_unicode_binds = self._user_supports_unicode_binds
- elif util.py2k:
- self.supports_unicode_binds = (
- not self.freetds or self.freetds_driver_version >= '0.91'
- ) and not self.easysoft
- else:
- self.supports_unicode_binds = True
-
- # run other initialization which asks for user name, etc.
- super(PyODBCConnector, self).initialize(connection)
-
- def _dbapi_version(self):
- if not self.dbapi:
- return ()
- return self._parse_dbapi_version(self.dbapi.version)
-
- def _parse_dbapi_version(self, vers):
- m = re.match(
- r'(?:py.*-)?([\d\.]+)(?:-(\w+))?',
- vers
- )
- if not m:
- return ()
- vers = tuple([int(x) for x in m.group(1).split(".")])
- if m.group(2):
- vers += (m.group(2),)
- return vers
-
- def _get_server_version_info(self, connection):
- dbapi_con = connection.connection
- version = []
- r = re.compile('[.\-]')
- for n in r.split(dbapi_con.getinfo(self.dbapi.SQL_DBMS_VER)):
- try:
- version.append(int(n))
- except ValueError:
- version.append(n)
- return tuple(version)
diff --git a/lib/sqlalchemy/connectors/zxJDBC.py b/lib/sqlalchemy/connectors/zxJDBC.py
deleted file mode 100644
index e0bbc573..00000000
--- a/lib/sqlalchemy/connectors/zxJDBC.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# connectors/zxJDBC.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-import sys
-from . import Connector
-
-
-class ZxJDBCConnector(Connector):
- driver = 'zxjdbc'
-
- supports_sane_rowcount = False
- supports_sane_multi_rowcount = False
-
- supports_unicode_binds = True
- supports_unicode_statements = sys.version > '2.5.0+'
- description_encoding = None
- default_paramstyle = 'qmark'
-
- jdbc_db_name = None
- jdbc_driver_name = None
-
- @classmethod
- def dbapi(cls):
- from com.ziclix.python.sql import zxJDBC
- return zxJDBC
-
- def _driver_kwargs(self):
- """Return kw arg dict to be sent to connect()."""
- return {}
-
- def _create_jdbc_url(self, url):
- """Create a JDBC url from a :class:`~sqlalchemy.engine.url.URL`"""
- return 'jdbc:%s://%s%s/%s' % (self.jdbc_db_name, url.host,
- url.port is not None
- and ':%s' % url.port or '',
- url.database)
-
- def create_connect_args(self, url):
- opts = self._driver_kwargs()
- opts.update(url.query)
- return [
- [self._create_jdbc_url(url),
- url.username, url.password,
- self.jdbc_driver_name],
- opts]
-
- def is_disconnect(self, e, connection, cursor):
- if not isinstance(e, self.dbapi.ProgrammingError):
- return False
- e = str(e)
- return 'connection is closed' in e or 'cursor is closed' in e
-
- def _get_server_version_info(self, connection):
- # use connection.connection.dbversion, and parse appropriately
- # to get a tuple
- raise NotImplementedError()
diff --git a/lib/sqlalchemy/databases/__init__.py b/lib/sqlalchemy/databases/__init__.py
deleted file mode 100644
index 915eefa4..00000000
--- a/lib/sqlalchemy/databases/__init__.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# databases/__init__.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""Include imports from the sqlalchemy.dialects package for backwards
-compatibility with pre 0.6 versions.
-
-"""
-from ..dialects.sqlite import base as sqlite
-from ..dialects.postgresql import base as postgresql
-postgres = postgresql
-from ..dialects.mysql import base as mysql
-from ..dialects.drizzle import base as drizzle
-from ..dialects.oracle import base as oracle
-from ..dialects.firebird import base as firebird
-from ..dialects.mssql import base as mssql
-from ..dialects.sybase import base as sybase
-
-
-__all__ = (
- 'drizzle',
- 'firebird',
- 'mssql',
- 'mysql',
- 'postgresql',
- 'sqlite',
- 'oracle',
- 'sybase',
- )
diff --git a/lib/sqlalchemy/dialects/__init__.py b/lib/sqlalchemy/dialects/__init__.py
deleted file mode 100644
index 974d4f78..00000000
--- a/lib/sqlalchemy/dialects/__init__.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# dialects/__init__.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-__all__ = (
- 'drizzle',
- 'firebird',
- 'mssql',
- 'mysql',
- 'oracle',
- 'postgresql',
- 'sqlite',
- 'sybase',
- )
-
-from .. import util
-
-def _auto_fn(name):
- """default dialect importer.
-
- plugs into the :class:`.PluginLoader`
- as a first-hit system.
-
- """
- if "." in name:
- dialect, driver = name.split(".")
- else:
- dialect = name
- driver = "base"
- try:
- module = __import__('sqlalchemy.dialects.%s' % (dialect, )).dialects
- except ImportError:
- return None
-
- module = getattr(module, dialect)
- if hasattr(module, driver):
- module = getattr(module, driver)
- return lambda: module.dialect
- else:
- return None
-
-registry = util.PluginLoader("sqlalchemy.dialects", auto_fn=_auto_fn)
diff --git a/lib/sqlalchemy/dialects/drizzle/__init__.py b/lib/sqlalchemy/dialects/drizzle/__init__.py
deleted file mode 100644
index 1392b8e2..00000000
--- a/lib/sqlalchemy/dialects/drizzle/__init__.py
+++ /dev/null
@@ -1,22 +0,0 @@
-from sqlalchemy.dialects.drizzle import base, mysqldb
-
-base.dialect = mysqldb.dialect
-
-from sqlalchemy.dialects.drizzle.base import \
- BIGINT, BINARY, BLOB, \
- BOOLEAN, CHAR, DATE, \
- DATETIME, DECIMAL, DOUBLE, \
- ENUM, FLOAT, INTEGER, \
- NUMERIC, REAL, TEXT, \
- TIME, TIMESTAMP, VARBINARY, \
- VARCHAR, dialect
-
-__all__ = (
- 'BIGINT', 'BINARY', 'BLOB',
- 'BOOLEAN', 'CHAR', 'DATE',
- 'DATETIME', 'DECIMAL', 'DOUBLE',
- 'ENUM', 'FLOAT', 'INTEGER',
- 'NUMERIC', 'REAL', 'TEXT',
- 'TIME', 'TIMESTAMP', 'VARBINARY',
- 'VARCHAR', 'dialect'
-)
diff --git a/lib/sqlalchemy/dialects/drizzle/base.py b/lib/sqlalchemy/dialects/drizzle/base.py
deleted file mode 100644
index b5addb42..00000000
--- a/lib/sqlalchemy/dialects/drizzle/base.py
+++ /dev/null
@@ -1,498 +0,0 @@
-# drizzle/base.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-# Copyright (C) 2010-2011 Monty Taylor
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-
-"""
-
-.. dialect:: drizzle
- :name: Drizzle
-
-Drizzle is a variant of MySQL. Unlike MySQL, Drizzle's default storage engine
-is InnoDB (transactions, foreign-keys) rather than MyISAM. For more
-`Notable Differences `_, visit
-the `Drizzle Documentation `_.
-
-The SQLAlchemy Drizzle dialect leans heavily on the MySQL dialect, so much of
-the :doc:`SQLAlchemy MySQL ` documentation is also relevant.
-
-
-"""
-
-from sqlalchemy import exc
-from sqlalchemy import log
-from sqlalchemy import types as sqltypes
-from sqlalchemy.engine import reflection
-from sqlalchemy.dialects.mysql import base as mysql_dialect
-from sqlalchemy.types import DATE, DATETIME, BOOLEAN, TIME, \
- BLOB, BINARY, VARBINARY
-
-
-class _NumericType(object):
- """Base for Drizzle numeric types."""
-
- def __init__(self, **kw):
- super(_NumericType, self).__init__(**kw)
-
-
-class _FloatType(_NumericType, sqltypes.Float):
- def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
- if isinstance(self, (REAL, DOUBLE)) and \
- (
- (precision is None and scale is not None) or
- (precision is not None and scale is None)
- ):
- raise exc.ArgumentError(
- "You must specify both precision and scale or omit "
- "both altogether.")
-
- super(_FloatType, self).__init__(precision=precision,
- asdecimal=asdecimal, **kw)
- self.scale = scale
-
-
-class _StringType(mysql_dialect._StringType):
- """Base for Drizzle string types."""
-
- def __init__(self, collation=None, binary=False, **kw):
- kw['national'] = False
- super(_StringType, self).__init__(collation=collation, binary=binary,
- **kw)
-
-
-class NUMERIC(_NumericType, sqltypes.NUMERIC):
- """Drizzle NUMERIC type."""
-
- __visit_name__ = 'NUMERIC'
-
- def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
- """Construct a NUMERIC.
-
- :param precision: Total digits in this number. If scale and precision
- are both None, values are stored to limits allowed by the server.
-
- :param scale: The number of digits after the decimal point.
-
- """
-
- super(NUMERIC, self).__init__(precision=precision, scale=scale,
- asdecimal=asdecimal, **kw)
-
-
-class DECIMAL(_NumericType, sqltypes.DECIMAL):
- """Drizzle DECIMAL type."""
-
- __visit_name__ = 'DECIMAL'
-
- def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
- """Construct a DECIMAL.
-
- :param precision: Total digits in this number. If scale and precision
- are both None, values are stored to limits allowed by the server.
-
- :param scale: The number of digits after the decimal point.
-
- """
- super(DECIMAL, self).__init__(precision=precision, scale=scale,
- asdecimal=asdecimal, **kw)
-
-
-class DOUBLE(_FloatType):
- """Drizzle DOUBLE type."""
-
- __visit_name__ = 'DOUBLE'
-
- def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
- """Construct a DOUBLE.
-
- :param precision: Total digits in this number. If scale and precision
- are both None, values are stored to limits allowed by the server.
-
- :param scale: The number of digits after the decimal point.
-
- """
-
- super(DOUBLE, self).__init__(precision=precision, scale=scale,
- asdecimal=asdecimal, **kw)
-
-
-class REAL(_FloatType, sqltypes.REAL):
- """Drizzle REAL type."""
-
- __visit_name__ = 'REAL'
-
- def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
- """Construct a REAL.
-
- :param precision: Total digits in this number. If scale and precision
- are both None, values are stored to limits allowed by the server.
-
- :param scale: The number of digits after the decimal point.
-
- """
-
- super(REAL, self).__init__(precision=precision, scale=scale,
- asdecimal=asdecimal, **kw)
-
-
-class FLOAT(_FloatType, sqltypes.FLOAT):
- """Drizzle FLOAT type."""
-
- __visit_name__ = 'FLOAT'
-
- def __init__(self, precision=None, scale=None, asdecimal=False, **kw):
- """Construct a FLOAT.
-
- :param precision: Total digits in this number. If scale and precision
- are both None, values are stored to limits allowed by the server.
-
- :param scale: The number of digits after the decimal point.
-
- """
-
- super(FLOAT, self).__init__(precision=precision, scale=scale,
- asdecimal=asdecimal, **kw)
-
- def bind_processor(self, dialect):
- return None
-
-
-class INTEGER(sqltypes.INTEGER):
- """Drizzle INTEGER type."""
-
- __visit_name__ = 'INTEGER'
-
- def __init__(self, **kw):
- """Construct an INTEGER."""
-
- super(INTEGER, self).__init__(**kw)
-
-
-class BIGINT(sqltypes.BIGINT):
- """Drizzle BIGINTEGER type."""
-
- __visit_name__ = 'BIGINT'
-
- def __init__(self, **kw):
- """Construct a BIGINTEGER."""
-
- super(BIGINT, self).__init__(**kw)
-
-
-class TIME(mysql_dialect.TIME):
- """Drizzle TIME type."""
-
-
-class TIMESTAMP(sqltypes.TIMESTAMP):
- """Drizzle TIMESTAMP type."""
-
- __visit_name__ = 'TIMESTAMP'
-
-
-class TEXT(_StringType, sqltypes.TEXT):
- """Drizzle TEXT type, for text up to 2^16 characters."""
-
- __visit_name__ = 'TEXT'
-
- def __init__(self, length=None, **kw):
- """Construct a TEXT.
-
- :param length: Optional, if provided the server may optimize storage
- by substituting the smallest TEXT type sufficient to store
- ``length`` characters.
-
- :param collation: Optional, a column-level collation for this string
- value. Takes precedence to 'binary' short-hand.
-
- :param binary: Defaults to False: short-hand, pick the binary
- collation type that matches the column's character set. Generates
- BINARY in schema. This does not affect the type of data stored,
- only the collation of character data.
-
- """
-
- super(TEXT, self).__init__(length=length, **kw)
-
-
-class VARCHAR(_StringType, sqltypes.VARCHAR):
- """Drizzle VARCHAR type, for variable-length character data."""
-
- __visit_name__ = 'VARCHAR'
-
- def __init__(self, length=None, **kwargs):
- """Construct a VARCHAR.
-
- :param collation: Optional, a column-level collation for this string
- value. Takes precedence to 'binary' short-hand.
-
- :param binary: Defaults to False: short-hand, pick the binary
- collation type that matches the column's character set. Generates
- BINARY in schema. This does not affect the type of data stored,
- only the collation of character data.
-
- """
-
- super(VARCHAR, self).__init__(length=length, **kwargs)
-
-
-class CHAR(_StringType, sqltypes.CHAR):
- """Drizzle CHAR type, for fixed-length character data."""
-
- __visit_name__ = 'CHAR'
-
- def __init__(self, length=None, **kwargs):
- """Construct a CHAR.
-
- :param length: Maximum data length, in characters.
-
- :param binary: Optional, use the default binary collation for the
- national character set. This does not affect the type of data
- stored, use a BINARY type for binary data.
-
- :param collation: Optional, request a particular collation. Must be
- compatible with the national character set.
-
- """
-
- super(CHAR, self).__init__(length=length, **kwargs)
-
-
-class ENUM(mysql_dialect.ENUM):
- """Drizzle ENUM type."""
-
- def __init__(self, *enums, **kw):
- """Construct an ENUM.
-
- Example:
-
- Column('myenum', ENUM("foo", "bar", "baz"))
-
- :param enums: The range of valid values for this ENUM. Values will be
- quoted when generating the schema according to the quoting flag (see
- below).
-
- :param strict: Defaults to False: ensure that a given value is in this
- ENUM's range of permissible values when inserting or updating rows.
- Note that Drizzle will not raise a fatal error if you attempt to
- store an out of range value- an alternate value will be stored
- instead.
- (See Drizzle ENUM documentation.)
-
- :param collation: Optional, a column-level collation for this string
- value. Takes precedence to 'binary' short-hand.
-
- :param binary: Defaults to False: short-hand, pick the binary
- collation type that matches the column's character set. Generates
- BINARY in schema. This does not affect the type of data stored,
- only the collation of character data.
-
- :param quoting: Defaults to 'auto': automatically determine enum value
- quoting. If all enum values are surrounded by the same quoting
- character, then use 'quoted' mode. Otherwise, use 'unquoted' mode.
-
- 'quoted': values in enums are already quoted, they will be used
- directly when generating the schema - this usage is deprecated.
-
- 'unquoted': values in enums are not quoted, they will be escaped and
- surrounded by single quotes when generating the schema.
-
- Previous versions of this type always required manually quoted
- values to be supplied; future versions will always quote the string
- literals for you. This is a transitional option.
-
- """
-
- super(ENUM, self).__init__(*enums, **kw)
-
-
-class _DrizzleBoolean(sqltypes.Boolean):
- def get_dbapi_type(self, dbapi):
- return dbapi.NUMERIC
-
-
-colspecs = {
- sqltypes.Numeric: NUMERIC,
- sqltypes.Float: FLOAT,
- sqltypes.Time: TIME,
- sqltypes.Enum: ENUM,
- sqltypes.Boolean: _DrizzleBoolean,
-}
-
-
-# All the types we have in Drizzle
-ischema_names = {
- 'BIGINT': BIGINT,
- 'BINARY': BINARY,
- 'BLOB': BLOB,
- 'BOOLEAN': BOOLEAN,
- 'CHAR': CHAR,
- 'DATE': DATE,
- 'DATETIME': DATETIME,
- 'DECIMAL': DECIMAL,
- 'DOUBLE': DOUBLE,
- 'ENUM': ENUM,
- 'FLOAT': FLOAT,
- 'INT': INTEGER,
- 'INTEGER': INTEGER,
- 'NUMERIC': NUMERIC,
- 'TEXT': TEXT,
- 'TIME': TIME,
- 'TIMESTAMP': TIMESTAMP,
- 'VARBINARY': VARBINARY,
- 'VARCHAR': VARCHAR,
-}
-
-
-class DrizzleCompiler(mysql_dialect.MySQLCompiler):
-
- def visit_typeclause(self, typeclause):
- type_ = typeclause.type.dialect_impl(self.dialect)
- if isinstance(type_, sqltypes.Integer):
- return 'INTEGER'
- else:
- return super(DrizzleCompiler, self).visit_typeclause(typeclause)
-
- def visit_cast(self, cast, **kwargs):
- type_ = self.process(cast.typeclause)
- if type_ is None:
- return self.process(cast.clause)
-
- return 'CAST(%s AS %s)' % (self.process(cast.clause), type_)
-
-
-class DrizzleDDLCompiler(mysql_dialect.MySQLDDLCompiler):
- pass
-
-
-class DrizzleTypeCompiler(mysql_dialect.MySQLTypeCompiler):
- def _extend_numeric(self, type_, spec):
- return spec
-
- def _extend_string(self, type_, defaults, spec):
- """Extend a string-type declaration with standard SQL
- COLLATE annotations and Drizzle specific extensions.
-
- """
-
- def attr(name):
- return getattr(type_, name, defaults.get(name))
-
- if attr('collation'):
- collation = 'COLLATE %s' % type_.collation
- elif attr('binary'):
- collation = 'BINARY'
- else:
- collation = None
-
- return ' '.join([c for c in (spec, collation)
- if c is not None])
-
- def visit_NCHAR(self, type):
- raise NotImplementedError("Drizzle does not support NCHAR")
-
- def visit_NVARCHAR(self, type):
- raise NotImplementedError("Drizzle does not support NVARCHAR")
-
- def visit_FLOAT(self, type_):
- if type_.scale is not None and type_.precision is not None:
- return "FLOAT(%s, %s)" % (type_.precision, type_.scale)
- else:
- return "FLOAT"
-
- def visit_BOOLEAN(self, type_):
- return "BOOLEAN"
-
- def visit_BLOB(self, type_):
- return "BLOB"
-
-
-class DrizzleExecutionContext(mysql_dialect.MySQLExecutionContext):
- pass
-
-
-class DrizzleIdentifierPreparer(mysql_dialect.MySQLIdentifierPreparer):
- pass
-
-
-@log.class_logger
-class DrizzleDialect(mysql_dialect.MySQLDialect):
- """Details of the Drizzle dialect.
-
- Not used directly in application code.
- """
-
- name = 'drizzle'
-
- _supports_cast = True
- supports_sequences = False
- supports_native_boolean = True
- supports_views = False
-
- default_paramstyle = 'format'
- colspecs = colspecs
-
- statement_compiler = DrizzleCompiler
- ddl_compiler = DrizzleDDLCompiler
- type_compiler = DrizzleTypeCompiler
- ischema_names = ischema_names
- preparer = DrizzleIdentifierPreparer
-
- def on_connect(self):
- """Force autocommit - Drizzle Bug#707842 doesn't set this properly"""
-
- def connect(conn):
- conn.autocommit(False)
- return connect
-
- @reflection.cache
- def get_table_names(self, connection, schema=None, **kw):
- """Return a Unicode SHOW TABLES from a given schema."""
-
- if schema is not None:
- current_schema = schema
- else:
- current_schema = self.default_schema_name
-
- charset = 'utf8'
- rp = connection.execute("SHOW TABLES FROM %s" %
- self.identifier_preparer.quote_identifier(current_schema))
- return [row[0] for row in self._compat_fetchall(rp, charset=charset)]
-
- @reflection.cache
- def get_view_names(self, connection, schema=None, **kw):
- raise NotImplementedError
-
- def _detect_casing(self, connection):
- """Sniff out identifier case sensitivity.
-
- Cached per-connection. This value can not change without a server
- restart.
- """
-
- return 0
-
- def _detect_collations(self, connection):
- """Pull the active COLLATIONS list from the server.
-
- Cached per-connection.
- """
-
- collations = {}
- charset = self._connection_charset
- rs = connection.execute(
- 'SELECT CHARACTER_SET_NAME, COLLATION_NAME FROM'
- ' data_dictionary.COLLATIONS')
- for row in self._compat_fetchall(rs, charset):
- collations[row[0]] = row[1]
- return collations
-
- def _detect_ansiquotes(self, connection):
- """Detect and adjust for the ANSI_QUOTES sql mode."""
-
- self._server_ansiquotes = False
- self._backslash_escapes = False
-
-
diff --git a/lib/sqlalchemy/dialects/drizzle/mysqldb.py b/lib/sqlalchemy/dialects/drizzle/mysqldb.py
deleted file mode 100644
index 7d91cc36..00000000
--- a/lib/sqlalchemy/dialects/drizzle/mysqldb.py
+++ /dev/null
@@ -1,48 +0,0 @@
-"""
-.. dialect:: drizzle+mysqldb
- :name: MySQL-Python
- :dbapi: mysqldb
- :connectstring: drizzle+mysqldb://:@[:]/
- :url: http://sourceforge.net/projects/mysql-python
-
-
-"""
-
-from sqlalchemy.dialects.drizzle.base import (
- DrizzleDialect,
- DrizzleExecutionContext,
- DrizzleCompiler,
- DrizzleIdentifierPreparer)
-from sqlalchemy.connectors.mysqldb import (
- MySQLDBExecutionContext,
- MySQLDBCompiler,
- MySQLDBIdentifierPreparer,
- MySQLDBConnector)
-
-
-class DrizzleExecutionContext_mysqldb(MySQLDBExecutionContext,
- DrizzleExecutionContext):
- pass
-
-
-class DrizzleCompiler_mysqldb(MySQLDBCompiler, DrizzleCompiler):
- pass
-
-
-class DrizzleIdentifierPreparer_mysqldb(MySQLDBIdentifierPreparer,
- DrizzleIdentifierPreparer):
- pass
-
-
-class DrizzleDialect_mysqldb(MySQLDBConnector, DrizzleDialect):
- execution_ctx_cls = DrizzleExecutionContext_mysqldb
- statement_compiler = DrizzleCompiler_mysqldb
- preparer = DrizzleIdentifierPreparer_mysqldb
-
- def _detect_charset(self, connection):
- """Sniff out the character set in use for connection results."""
-
- return 'utf8'
-
-
-dialect = DrizzleDialect_mysqldb
diff --git a/lib/sqlalchemy/dialects/firebird/__init__.py b/lib/sqlalchemy/dialects/firebird/__init__.py
deleted file mode 100644
index 094ac3e8..00000000
--- a/lib/sqlalchemy/dialects/firebird/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# firebird/__init__.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-from sqlalchemy.dialects.firebird import base, kinterbasdb, fdb
-
-base.dialect = fdb.dialect
-
-from sqlalchemy.dialects.firebird.base import \
- SMALLINT, BIGINT, FLOAT, FLOAT, DATE, TIME, \
- TEXT, NUMERIC, FLOAT, TIMESTAMP, VARCHAR, CHAR, BLOB,\
- dialect
-
-__all__ = (
- 'SMALLINT', 'BIGINT', 'FLOAT', 'FLOAT', 'DATE', 'TIME',
- 'TEXT', 'NUMERIC', 'FLOAT', 'TIMESTAMP', 'VARCHAR', 'CHAR', 'BLOB',
- 'dialect'
-)
diff --git a/lib/sqlalchemy/dialects/firebird/base.py b/lib/sqlalchemy/dialects/firebird/base.py
deleted file mode 100644
index 21db57b6..00000000
--- a/lib/sqlalchemy/dialects/firebird/base.py
+++ /dev/null
@@ -1,738 +0,0 @@
-# firebird/base.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""
-
-.. dialect:: firebird
- :name: Firebird
-
-Firebird Dialects
------------------
-
-Firebird offers two distinct dialects_ (not to be confused with a
-SQLAlchemy ``Dialect``):
-
-dialect 1
- This is the old syntax and behaviour, inherited from Interbase pre-6.0.
-
-dialect 3
- This is the newer and supported syntax, introduced in Interbase 6.0.
-
-The SQLAlchemy Firebird dialect detects these versions and
-adjusts its representation of SQL accordingly. However,
-support for dialect 1 is not well tested and probably has
-incompatibilities.
-
-Locking Behavior
-----------------
-
-Firebird locks tables aggressively. For this reason, a DROP TABLE may
-hang until other transactions are released. SQLAlchemy does its best
-to release transactions as quickly as possible. The most common cause
-of hanging transactions is a non-fully consumed result set, i.e.::
-
- result = engine.execute("select * from table")
- row = result.fetchone()
- return
-
-Where above, the ``ResultProxy`` has not been fully consumed. The
-connection will be returned to the pool and the transactional state
-rolled back once the Python garbage collector reclaims the objects
-which hold onto the connection, which often occurs asynchronously.
-The above use case can be alleviated by calling ``first()`` on the
-``ResultProxy`` which will fetch the first row and immediately close
-all remaining cursor/connection resources.
-
-RETURNING support
------------------
-
-Firebird 2.0 supports returning a result set from inserts, and 2.1
-extends that to deletes and updates. This is generically exposed by
-the SQLAlchemy ``returning()`` method, such as::
-
- # INSERT..RETURNING
- result = table.insert().returning(table.c.col1, table.c.col2).\\
- values(name='foo')
- print result.fetchall()
-
- # UPDATE..RETURNING
- raises = empl.update().returning(empl.c.id, empl.c.salary).\\
- where(empl.c.sales>100).\\
- values(dict(salary=empl.c.salary * 1.1))
- print raises.fetchall()
-
-
-.. _dialects: http://mc-computing.com/Databases/Firebird/SQL_Dialect.html
-
-"""
-
-import datetime
-
-from sqlalchemy import schema as sa_schema
-from sqlalchemy import exc, types as sqltypes, sql, util
-from sqlalchemy.sql import expression
-from sqlalchemy.engine import base, default, reflection
-from sqlalchemy.sql import compiler
-
-
-from sqlalchemy.types import (BIGINT, BLOB, DATE, FLOAT, INTEGER, NUMERIC,
- SMALLINT, TEXT, TIME, TIMESTAMP, Integer)
-
-
-RESERVED_WORDS = set([
- "active", "add", "admin", "after", "all", "alter", "and", "any", "as",
- "asc", "ascending", "at", "auto", "avg", "before", "begin", "between",
- "bigint", "bit_length", "blob", "both", "by", "case", "cast", "char",
- "character", "character_length", "char_length", "check", "close",
- "collate", "column", "commit", "committed", "computed", "conditional",
- "connect", "constraint", "containing", "count", "create", "cross",
- "cstring", "current", "current_connection", "current_date",
- "current_role", "current_time", "current_timestamp",
- "current_transaction", "current_user", "cursor", "database", "date",
- "day", "dec", "decimal", "declare", "default", "delete", "desc",
- "descending", "disconnect", "distinct", "do", "domain", "double",
- "drop", "else", "end", "entry_point", "escape", "exception",
- "execute", "exists", "exit", "external", "extract", "fetch", "file",
- "filter", "float", "for", "foreign", "from", "full", "function",
- "gdscode", "generator", "gen_id", "global", "grant", "group",
- "having", "hour", "if", "in", "inactive", "index", "inner",
- "input_type", "insensitive", "insert", "int", "integer", "into", "is",
- "isolation", "join", "key", "leading", "left", "length", "level",
- "like", "long", "lower", "manual", "max", "maximum_segment", "merge",
- "min", "minute", "module_name", "month", "names", "national",
- "natural", "nchar", "no", "not", "null", "numeric", "octet_length",
- "of", "on", "only", "open", "option", "or", "order", "outer",
- "output_type", "overflow", "page", "pages", "page_size", "parameter",
- "password", "plan", "position", "post_event", "precision", "primary",
- "privileges", "procedure", "protected", "rdb$db_key", "read", "real",
- "record_version", "recreate", "recursive", "references", "release",
- "reserv", "reserving", "retain", "returning_values", "returns",
- "revoke", "right", "rollback", "rows", "row_count", "savepoint",
- "schema", "second", "segment", "select", "sensitive", "set", "shadow",
- "shared", "singular", "size", "smallint", "snapshot", "some", "sort",
- "sqlcode", "stability", "start", "starting", "starts", "statistics",
- "sub_type", "sum", "suspend", "table", "then", "time", "timestamp",
- "to", "trailing", "transaction", "trigger", "trim", "uncommitted",
- "union", "unique", "update", "upper", "user", "using", "value",
- "values", "varchar", "variable", "varying", "view", "wait", "when",
- "where", "while", "with", "work", "write", "year",
- ])
-
-
-class _StringType(sqltypes.String):
- """Base for Firebird string types."""
-
- def __init__(self, charset=None, **kw):
- self.charset = charset
- super(_StringType, self).__init__(**kw)
-
-
-class VARCHAR(_StringType, sqltypes.VARCHAR):
- """Firebird VARCHAR type"""
- __visit_name__ = 'VARCHAR'
-
- def __init__(self, length=None, **kwargs):
- super(VARCHAR, self).__init__(length=length, **kwargs)
-
-
-class CHAR(_StringType, sqltypes.CHAR):
- """Firebird CHAR type"""
- __visit_name__ = 'CHAR'
-
- def __init__(self, length=None, **kwargs):
- super(CHAR, self).__init__(length=length, **kwargs)
-
-
-class _FBDateTime(sqltypes.DateTime):
- def bind_processor(self, dialect):
- def process(value):
- if type(value) == datetime.date:
- return datetime.datetime(value.year, value.month, value.day)
- else:
- return value
- return process
-
-colspecs = {
- sqltypes.DateTime: _FBDateTime
-}
-
-ischema_names = {
- 'SHORT': SMALLINT,
- 'LONG': INTEGER,
- 'QUAD': FLOAT,
- 'FLOAT': FLOAT,
- 'DATE': DATE,
- 'TIME': TIME,
- 'TEXT': TEXT,
- 'INT64': BIGINT,
- 'DOUBLE': FLOAT,
- 'TIMESTAMP': TIMESTAMP,
- 'VARYING': VARCHAR,
- 'CSTRING': CHAR,
- 'BLOB': BLOB,
- }
-
-
-# TODO: date conversion types (should be implemented as _FBDateTime,
-# _FBDate, etc. as bind/result functionality is required)
-
-class FBTypeCompiler(compiler.GenericTypeCompiler):
- def visit_boolean(self, type_):
- return self.visit_SMALLINT(type_)
-
- def visit_datetime(self, type_):
- return self.visit_TIMESTAMP(type_)
-
- def visit_TEXT(self, type_):
- return "BLOB SUB_TYPE 1"
-
- def visit_BLOB(self, type_):
- return "BLOB SUB_TYPE 0"
-
- def _extend_string(self, type_, basic):
- charset = getattr(type_, 'charset', None)
- if charset is None:
- return basic
- else:
- return '%s CHARACTER SET %s' % (basic, charset)
-
- def visit_CHAR(self, type_):
- basic = super(FBTypeCompiler, self).visit_CHAR(type_)
- return self._extend_string(type_, basic)
-
- def visit_VARCHAR(self, type_):
- if not type_.length:
- raise exc.CompileError(
- "VARCHAR requires a length on dialect %s" %
- self.dialect.name)
- basic = super(FBTypeCompiler, self).visit_VARCHAR(type_)
- return self._extend_string(type_, basic)
-
-
-class FBCompiler(sql.compiler.SQLCompiler):
- """Firebird specific idiosyncrasies"""
-
- ansi_bind_rules = True
-
- #def visit_contains_op_binary(self, binary, operator, **kw):
- # cant use CONTAINING b.c. it's case insensitive.
-
- #def visit_notcontains_op_binary(self, binary, operator, **kw):
- # cant use NOT CONTAINING b.c. it's case insensitive.
-
- def visit_now_func(self, fn, **kw):
- return "CURRENT_TIMESTAMP"
-
- def visit_startswith_op_binary(self, binary, operator, **kw):
- return '%s STARTING WITH %s' % (
- binary.left._compiler_dispatch(self, **kw),
- binary.right._compiler_dispatch(self, **kw))
-
- def visit_notstartswith_op_binary(self, binary, operator, **kw):
- return '%s NOT STARTING WITH %s' % (
- binary.left._compiler_dispatch(self, **kw),
- binary.right._compiler_dispatch(self, **kw))
-
- def visit_mod_binary(self, binary, operator, **kw):
- return "mod(%s, %s)" % (
- self.process(binary.left, **kw),
- self.process(binary.right, **kw))
-
- def visit_alias(self, alias, asfrom=False, **kwargs):
- if self.dialect._version_two:
- return super(FBCompiler, self).\
- visit_alias(alias, asfrom=asfrom, **kwargs)
- else:
- # Override to not use the AS keyword which FB 1.5 does not like
- if asfrom:
- alias_name = isinstance(alias.name,
- expression._truncated_label) and \
- self._truncated_identifier("alias",
- alias.name) or alias.name
-
- return self.process(
- alias.original, asfrom=asfrom, **kwargs) + \
- " " + \
- self.preparer.format_alias(alias, alias_name)
- else:
- return self.process(alias.original, **kwargs)
-
- def visit_substring_func(self, func, **kw):
- s = self.process(func.clauses.clauses[0])
- start = self.process(func.clauses.clauses[1])
- if len(func.clauses.clauses) > 2:
- length = self.process(func.clauses.clauses[2])
- return "SUBSTRING(%s FROM %s FOR %s)" % (s, start, length)
- else:
- return "SUBSTRING(%s FROM %s)" % (s, start)
-
- def visit_length_func(self, function, **kw):
- if self.dialect._version_two:
- return "char_length" + self.function_argspec(function)
- else:
- return "strlen" + self.function_argspec(function)
-
- visit_char_length_func = visit_length_func
-
- def function_argspec(self, func, **kw):
- # TODO: this probably will need to be
- # narrowed to a fixed list, some no-arg functions
- # may require parens - see similar example in the oracle
- # dialect
- if func.clauses is not None and len(func.clauses):
- return self.process(func.clause_expr, **kw)
- else:
- return ""
-
- def default_from(self):
- return " FROM rdb$database"
-
- def visit_sequence(self, seq):
- return "gen_id(%s, 1)" % self.preparer.format_sequence(seq)
-
- def get_select_precolumns(self, select):
- """Called when building a ``SELECT`` statement, position is just
- before column list Firebird puts the limit and offset right
- after the ``SELECT``...
- """
-
- result = ""
- if select._limit:
- result += "FIRST %s " % self.process(sql.literal(select._limit))
- if select._offset:
- result += "SKIP %s " % self.process(sql.literal(select._offset))
- if select._distinct:
- result += "DISTINCT "
- return result
-
- def limit_clause(self, select):
- """Already taken care of in the `get_select_precolumns` method."""
-
- return ""
-
- def returning_clause(self, stmt, returning_cols):
- columns = [
- self._label_select_column(None, c, True, False, {})
- for c in expression._select_iterables(returning_cols)
- ]
-
- return 'RETURNING ' + ', '.join(columns)
-
-
-class FBDDLCompiler(sql.compiler.DDLCompiler):
- """Firebird syntactic idiosyncrasies"""
-
- def visit_create_sequence(self, create):
- """Generate a ``CREATE GENERATOR`` statement for the sequence."""
-
- # no syntax for these
- # http://www.firebirdsql.org/manual/generatorguide-sqlsyntax.html
- if create.element.start is not None:
- raise NotImplemented(
- "Firebird SEQUENCE doesn't support START WITH")
- if create.element.increment is not None:
- raise NotImplemented(
- "Firebird SEQUENCE doesn't support INCREMENT BY")
-
- if self.dialect._version_two:
- return "CREATE SEQUENCE %s" % \
- self.preparer.format_sequence(create.element)
- else:
- return "CREATE GENERATOR %s" % \
- self.preparer.format_sequence(create.element)
-
- def visit_drop_sequence(self, drop):
- """Generate a ``DROP GENERATOR`` statement for the sequence."""
-
- if self.dialect._version_two:
- return "DROP SEQUENCE %s" % \
- self.preparer.format_sequence(drop.element)
- else:
- return "DROP GENERATOR %s" % \
- self.preparer.format_sequence(drop.element)
-
-
-class FBIdentifierPreparer(sql.compiler.IdentifierPreparer):
- """Install Firebird specific reserved words."""
-
- reserved_words = RESERVED_WORDS
- illegal_initial_characters = compiler.ILLEGAL_INITIAL_CHARACTERS.union(['_'])
-
- def __init__(self, dialect):
- super(FBIdentifierPreparer, self).__init__(dialect, omit_schema=True)
-
-
-class FBExecutionContext(default.DefaultExecutionContext):
- def fire_sequence(self, seq, type_):
- """Get the next value from the sequence using ``gen_id()``."""
-
- return self._execute_scalar(
- "SELECT gen_id(%s, 1) FROM rdb$database" %
- self.dialect.identifier_preparer.format_sequence(seq),
- type_
- )
-
-
-class FBDialect(default.DefaultDialect):
- """Firebird dialect"""
-
- name = 'firebird'
-
- max_identifier_length = 31
-
- supports_sequences = True
- sequences_optional = False
- supports_default_values = True
- postfetch_lastrowid = False
-
- supports_native_boolean = False
-
- requires_name_normalize = True
- supports_empty_insert = False
-
- statement_compiler = FBCompiler
- ddl_compiler = FBDDLCompiler
- preparer = FBIdentifierPreparer
- type_compiler = FBTypeCompiler
- execution_ctx_cls = FBExecutionContext
-
- colspecs = colspecs
- ischema_names = ischema_names
-
- construct_arguments = []
-
- # defaults to dialect ver. 3,
- # will be autodetected off upon
- # first connect
- _version_two = True
-
- def initialize(self, connection):
- super(FBDialect, self).initialize(connection)
- self._version_two = ('firebird' in self.server_version_info and \
- self.server_version_info >= (2, )
- ) or \
- ('interbase' in self.server_version_info and \
- self.server_version_info >= (6, )
- )
-
- if not self._version_two:
- # TODO: whatever other pre < 2.0 stuff goes here
- self.ischema_names = ischema_names.copy()
- self.ischema_names['TIMESTAMP'] = sqltypes.DATE
- self.colspecs = {
- sqltypes.DateTime: sqltypes.DATE
- }
-
- self.implicit_returning = self._version_two and \
- self.__dict__.get('implicit_returning', True)
-
- def normalize_name(self, name):
- # Remove trailing spaces: FB uses a CHAR() type,
- # that is padded with spaces
- name = name and name.rstrip()
- if name is None:
- return None
- elif name.upper() == name and \
- not self.identifier_preparer._requires_quotes(name.lower()):
- return name.lower()
- else:
- return name
-
- def denormalize_name(self, name):
- if name is None:
- return None
- elif name.lower() == name and \
- not self.identifier_preparer._requires_quotes(name.lower()):
- return name.upper()
- else:
- return name
-
- def has_table(self, connection, table_name, schema=None):
- """Return ``True`` if the given table exists, ignoring
- the `schema`."""
-
- tblqry = """
- SELECT 1 AS has_table FROM rdb$database
- WHERE EXISTS (SELECT rdb$relation_name
- FROM rdb$relations
- WHERE rdb$relation_name=?)
- """
- c = connection.execute(tblqry, [self.denormalize_name(table_name)])
- return c.first() is not None
-
- def has_sequence(self, connection, sequence_name, schema=None):
- """Return ``True`` if the given sequence (generator) exists."""
-
- genqry = """
- SELECT 1 AS has_sequence FROM rdb$database
- WHERE EXISTS (SELECT rdb$generator_name
- FROM rdb$generators
- WHERE rdb$generator_name=?)
- """
- c = connection.execute(genqry, [self.denormalize_name(sequence_name)])
- return c.first() is not None
-
- @reflection.cache
- def get_table_names(self, connection, schema=None, **kw):
- # there are two queries commonly mentioned for this.
- # this one, using view_blr, is at the Firebird FAQ among other places:
- # http://www.firebirdfaq.org/faq174/
- s = """
- select rdb$relation_name
- from rdb$relations
- where rdb$view_blr is null
- and (rdb$system_flag is null or rdb$system_flag = 0);
- """
-
- # the other query is this one. It's not clear if there's really
- # any difference between these two. This link:
- # http://www.alberton.info/firebird_sql_meta_info.html#.Ur3vXfZGni8
- # states them as interchangeable. Some discussion at [ticket:2898]
- # SELECT DISTINCT rdb$relation_name
- # FROM rdb$relation_fields
- # WHERE rdb$system_flag=0 AND rdb$view_context IS NULL
-
- return [self.normalize_name(row[0]) for row in connection.execute(s)]
-
- @reflection.cache
- def get_view_names(self, connection, schema=None, **kw):
- # see http://www.firebirdfaq.org/faq174/
- s = """
- select rdb$relation_name
- from rdb$relations
- where rdb$view_blr is not null
- and (rdb$system_flag is null or rdb$system_flag = 0);
- """
- return [self.normalize_name(row[0]) for row in connection.execute(s)]
-
- @reflection.cache
- def get_view_definition(self, connection, view_name, schema=None, **kw):
- qry = """
- SELECT rdb$view_source AS view_source
- FROM rdb$relations
- WHERE rdb$relation_name=?
- """
- rp = connection.execute(qry, [self.denormalize_name(view_name)])
- row = rp.first()
- if row:
- return row['view_source']
- else:
- return None
-
- @reflection.cache
- def get_pk_constraint(self, connection, table_name, schema=None, **kw):
- # Query to extract the PK/FK constrained fields of the given table
- keyqry = """
- SELECT se.rdb$field_name AS fname
- FROM rdb$relation_constraints rc
- JOIN rdb$index_segments se ON rc.rdb$index_name=se.rdb$index_name
- WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=?
- """
- tablename = self.denormalize_name(table_name)
- # get primary key fields
- c = connection.execute(keyqry, ["PRIMARY KEY", tablename])
- pkfields = [self.normalize_name(r['fname']) for r in c.fetchall()]
- return {'constrained_columns': pkfields, 'name': None}
-
- @reflection.cache
- def get_column_sequence(self, connection,
- table_name, column_name,
- schema=None, **kw):
- tablename = self.denormalize_name(table_name)
- colname = self.denormalize_name(column_name)
- # Heuristic-query to determine the generator associated to a PK field
- genqry = """
- SELECT trigdep.rdb$depended_on_name AS fgenerator
- FROM rdb$dependencies tabdep
- JOIN rdb$dependencies trigdep
- ON tabdep.rdb$dependent_name=trigdep.rdb$dependent_name
- AND trigdep.rdb$depended_on_type=14
- AND trigdep.rdb$dependent_type=2
- JOIN rdb$triggers trig ON
- trig.rdb$trigger_name=tabdep.rdb$dependent_name
- WHERE tabdep.rdb$depended_on_name=?
- AND tabdep.rdb$depended_on_type=0
- AND trig.rdb$trigger_type=1
- AND tabdep.rdb$field_name=?
- AND (SELECT count(*)
- FROM rdb$dependencies trigdep2
- WHERE trigdep2.rdb$dependent_name = trigdep.rdb$dependent_name) = 2
- """
- genr = connection.execute(genqry, [tablename, colname]).first()
- if genr is not None:
- return dict(name=self.normalize_name(genr['fgenerator']))
-
- @reflection.cache
- def get_columns(self, connection, table_name, schema=None, **kw):
- # Query to extract the details of all the fields of the given table
- tblqry = """
- SELECT r.rdb$field_name AS fname,
- r.rdb$null_flag AS null_flag,
- t.rdb$type_name AS ftype,
- f.rdb$field_sub_type AS stype,
- f.rdb$field_length/
- COALESCE(cs.rdb$bytes_per_character,1) AS flen,
- f.rdb$field_precision AS fprec,
- f.rdb$field_scale AS fscale,
- COALESCE(r.rdb$default_source,
- f.rdb$default_source) AS fdefault
- FROM rdb$relation_fields r
- JOIN rdb$fields f ON r.rdb$field_source=f.rdb$field_name
- JOIN rdb$types t
- ON t.rdb$type=f.rdb$field_type AND
- t.rdb$field_name='RDB$FIELD_TYPE'
- LEFT JOIN rdb$character_sets cs ON
- f.rdb$character_set_id=cs.rdb$character_set_id
- WHERE f.rdb$system_flag=0 AND r.rdb$relation_name=?
- ORDER BY r.rdb$field_position
- """
- # get the PK, used to determine the eventual associated sequence
- pk_constraint = self.get_pk_constraint(connection, table_name)
- pkey_cols = pk_constraint['constrained_columns']
-
- tablename = self.denormalize_name(table_name)
- # get all of the fields for this table
- c = connection.execute(tblqry, [tablename])
- cols = []
- while True:
- row = c.fetchone()
- if row is None:
- break
- name = self.normalize_name(row['fname'])
- orig_colname = row['fname']
-
- # get the data type
- colspec = row['ftype'].rstrip()
- coltype = self.ischema_names.get(colspec)
- if coltype is None:
- util.warn("Did not recognize type '%s' of column '%s'" %
- (colspec, name))
- coltype = sqltypes.NULLTYPE
- elif issubclass(coltype, Integer) and row['fprec'] != 0:
- coltype = NUMERIC(
- precision=row['fprec'],
- scale=row['fscale'] * -1)
- elif colspec in ('VARYING', 'CSTRING'):
- coltype = coltype(row['flen'])
- elif colspec == 'TEXT':
- coltype = TEXT(row['flen'])
- elif colspec == 'BLOB':
- if row['stype'] == 1:
- coltype = TEXT()
- else:
- coltype = BLOB()
- else:
- coltype = coltype()
-
- # does it have a default value?
- defvalue = None
- if row['fdefault'] is not None:
- # the value comes down as "DEFAULT 'value'": there may be
- # more than one whitespace around the "DEFAULT" keyword
- # and it may also be lower case
- # (see also http://tracker.firebirdsql.org/browse/CORE-356)
- defexpr = row['fdefault'].lstrip()
- assert defexpr[:8].rstrip().upper() == \
- 'DEFAULT', "Unrecognized default value: %s" % \
- defexpr
- defvalue = defexpr[8:].strip()
- if defvalue == 'NULL':
- # Redundant
- defvalue = None
- col_d = {
- 'name': name,
- 'type': coltype,
- 'nullable': not bool(row['null_flag']),
- 'default': defvalue,
- 'autoincrement': defvalue is None
- }
-
- if orig_colname.lower() == orig_colname:
- col_d['quote'] = True
-
- # if the PK is a single field, try to see if its linked to
- # a sequence thru a trigger
- if len(pkey_cols) == 1 and name == pkey_cols[0]:
- seq_d = self.get_column_sequence(connection, tablename, name)
- if seq_d is not None:
- col_d['sequence'] = seq_d
-
- cols.append(col_d)
- return cols
-
- @reflection.cache
- def get_foreign_keys(self, connection, table_name, schema=None, **kw):
- # Query to extract the details of each UK/FK of the given table
- fkqry = """
- SELECT rc.rdb$constraint_name AS cname,
- cse.rdb$field_name AS fname,
- ix2.rdb$relation_name AS targetrname,
- se.rdb$field_name AS targetfname
- FROM rdb$relation_constraints rc
- JOIN rdb$indices ix1 ON ix1.rdb$index_name=rc.rdb$index_name
- JOIN rdb$indices ix2 ON ix2.rdb$index_name=ix1.rdb$foreign_key
- JOIN rdb$index_segments cse ON
- cse.rdb$index_name=ix1.rdb$index_name
- JOIN rdb$index_segments se
- ON se.rdb$index_name=ix2.rdb$index_name
- AND se.rdb$field_position=cse.rdb$field_position
- WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=?
- ORDER BY se.rdb$index_name, se.rdb$field_position
- """
- tablename = self.denormalize_name(table_name)
-
- c = connection.execute(fkqry, ["FOREIGN KEY", tablename])
- fks = util.defaultdict(lambda: {
- 'name': None,
- 'constrained_columns': [],
- 'referred_schema': None,
- 'referred_table': None,
- 'referred_columns': []
- })
-
- for row in c:
- cname = self.normalize_name(row['cname'])
- fk = fks[cname]
- if not fk['name']:
- fk['name'] = cname
- fk['referred_table'] = self.normalize_name(row['targetrname'])
- fk['constrained_columns'].append(
- self.normalize_name(row['fname']))
- fk['referred_columns'].append(
- self.normalize_name(row['targetfname']))
- return list(fks.values())
-
- @reflection.cache
- def get_indexes(self, connection, table_name, schema=None, **kw):
- qry = """
- SELECT ix.rdb$index_name AS index_name,
- ix.rdb$unique_flag AS unique_flag,
- ic.rdb$field_name AS field_name
- FROM rdb$indices ix
- JOIN rdb$index_segments ic
- ON ix.rdb$index_name=ic.rdb$index_name
- LEFT OUTER JOIN rdb$relation_constraints
- ON rdb$relation_constraints.rdb$index_name =
- ic.rdb$index_name
- WHERE ix.rdb$relation_name=? AND ix.rdb$foreign_key IS NULL
- AND rdb$relation_constraints.rdb$constraint_type IS NULL
- ORDER BY index_name, ic.rdb$field_position
- """
- c = connection.execute(qry, [self.denormalize_name(table_name)])
-
- indexes = util.defaultdict(dict)
- for row in c:
- indexrec = indexes[row['index_name']]
- if 'name' not in indexrec:
- indexrec['name'] = self.normalize_name(row['index_name'])
- indexrec['column_names'] = []
- indexrec['unique'] = bool(row['unique_flag'])
-
- indexrec['column_names'].append(
- self.normalize_name(row['field_name']))
-
- return list(indexes.values())
-
diff --git a/lib/sqlalchemy/dialects/firebird/fdb.py b/lib/sqlalchemy/dialects/firebird/fdb.py
deleted file mode 100644
index 4d94ef0d..00000000
--- a/lib/sqlalchemy/dialects/firebird/fdb.py
+++ /dev/null
@@ -1,115 +0,0 @@
-# firebird/fdb.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""
-.. dialect:: firebird+fdb
- :name: fdb
- :dbapi: pyodbc
- :connectstring: firebird+fdb://user:password@host:port/path/to/db[?key=value&key=value...]
- :url: http://pypi.python.org/pypi/fdb/
-
- fdb is a kinterbasdb compatible DBAPI for Firebird.
-
- .. versionadded:: 0.8 - Support for the fdb Firebird driver.
-
- .. versionchanged:: 0.9 - The fdb dialect is now the default dialect
- under the ``firebird://`` URL space, as ``fdb`` is now the official
- Python driver for Firebird.
-
-Arguments
-----------
-
-The ``fdb`` dialect is based on the :mod:`sqlalchemy.dialects.firebird.kinterbasdb`
-dialect, however does not accept every argument that Kinterbasdb does.
-
-* ``enable_rowcount`` - True by default, setting this to False disables
- the usage of "cursor.rowcount" with the
- Kinterbasdb dialect, which SQLAlchemy ordinarily calls upon automatically
- after any UPDATE or DELETE statement. When disabled, SQLAlchemy's
- ResultProxy will return -1 for result.rowcount. The rationale here is
- that Kinterbasdb requires a second round trip to the database when
- .rowcount is called - since SQLA's resultproxy automatically closes
- the cursor after a non-result-returning statement, rowcount must be
- called, if at all, before the result object is returned. Additionally,
- cursor.rowcount may not return correct results with older versions
- of Firebird, and setting this flag to False will also cause the
- SQLAlchemy ORM to ignore its usage. The behavior can also be controlled on a
- per-execution basis using the ``enable_rowcount`` option with
- :meth:`.Connection.execution_options`::
-
- conn = engine.connect().execution_options(enable_rowcount=True)
- r = conn.execute(stmt)
- print r.rowcount
-
-* ``retaining`` - False by default. Setting this to True will pass the
- ``retaining=True`` keyword argument to the ``.commit()`` and ``.rollback()``
- methods of the DBAPI connection, which can improve performance in some
- situations, but apparently with significant caveats.
- Please read the fdb and/or kinterbasdb DBAPI documentation in order to
- understand the implications of this flag.
-
- .. versionadded:: 0.8.2 - ``retaining`` keyword argument specifying
- transaction retaining behavior - in 0.8 it defaults to ``True``
- for backwards compatibility.
-
- .. versionchanged:: 0.9.0 - the ``retaining`` flag defaults to ``False``.
- In 0.8 it defaulted to ``True``.
-
- .. seealso::
-
- http://pythonhosted.org/fdb/usage-guide.html#retaining-transactions - information
- on the "retaining" flag.
-
-"""
-
-from .kinterbasdb import FBDialect_kinterbasdb
-from ... import util
-
-
-class FBDialect_fdb(FBDialect_kinterbasdb):
-
- def __init__(self, enable_rowcount=True,
- retaining=False, **kwargs):
- super(FBDialect_fdb, self).__init__(
- enable_rowcount=enable_rowcount,
- retaining=retaining, **kwargs)
-
- @classmethod
- def dbapi(cls):
- return __import__('fdb')
-
- def create_connect_args(self, url):
- opts = url.translate_connect_args(username='user')
- if opts.get('port'):
- opts['host'] = "%s/%s" % (opts['host'], opts['port'])
- del opts['port']
- opts.update(url.query)
-
- util.coerce_kw_type(opts, 'type_conv', int)
-
- return ([], opts)
-
- def _get_server_version_info(self, connection):
- """Get the version of the Firebird server used by a connection.
-
- Returns a tuple of (`major`, `minor`, `build`), three integers
- representing the version of the attached server.
- """
-
- # This is the simpler approach (the other uses the services api),
- # that for backward compatibility reasons returns a string like
- # LI-V6.3.3.12981 Firebird 2.0
- # where the first version is a fake one resembling the old
- # Interbase signature.
-
- isc_info_firebird_version = 103
- fbconn = connection.connection
-
- version = fbconn.db_info(isc_info_firebird_version)
-
- return self._parse_version_info(version)
-
-dialect = FBDialect_fdb
diff --git a/lib/sqlalchemy/dialects/firebird/kinterbasdb.py b/lib/sqlalchemy/dialects/firebird/kinterbasdb.py
deleted file mode 100644
index b8a83a07..00000000
--- a/lib/sqlalchemy/dialects/firebird/kinterbasdb.py
+++ /dev/null
@@ -1,179 +0,0 @@
-# firebird/kinterbasdb.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""
-.. dialect:: firebird+kinterbasdb
- :name: kinterbasdb
- :dbapi: kinterbasdb
- :connectstring: firebird+kinterbasdb://user:password@host:port/path/to/db[?key=value&key=value...]
- :url: http://firebirdsql.org/index.php?op=devel&sub=python
-
-Arguments
-----------
-
-The Kinterbasdb backend accepts the ``enable_rowcount`` and ``retaining``
-arguments accepted by the :mod:`sqlalchemy.dialects.firebird.fdb` dialect. In addition, it
-also accepts the following:
-
-* ``type_conv`` - select the kind of mapping done on the types: by default
- SQLAlchemy uses 200 with Unicode, datetime and decimal support. See
- the linked documents below for further information.
-
-* ``concurrency_level`` - set the backend policy with regards to threading
- issues: by default SQLAlchemy uses policy 1. See the linked documents
- below for futher information.
-
-.. seealso::
-
- http://sourceforge.net/projects/kinterbasdb
-
- http://kinterbasdb.sourceforge.net/dist_docs/usage.html#adv_param_conv_dynamic_type_translation
-
- http://kinterbasdb.sourceforge.net/dist_docs/usage.html#special_issue_concurrency
-
-"""
-
-from .base import FBDialect, FBExecutionContext
-from ... import util, types as sqltypes
-from re import match
-import decimal
-
-
-class _kinterbasdb_numeric(object):
- def bind_processor(self, dialect):
- def process(value):
- if isinstance(value, decimal.Decimal):
- return str(value)
- else:
- return value
- return process
-
-class _FBNumeric_kinterbasdb(_kinterbasdb_numeric, sqltypes.Numeric):
- pass
-
-class _FBFloat_kinterbasdb(_kinterbasdb_numeric, sqltypes.Float):
- pass
-
-
-class FBExecutionContext_kinterbasdb(FBExecutionContext):
- @property
- def rowcount(self):
- if self.execution_options.get('enable_rowcount',
- self.dialect.enable_rowcount):
- return self.cursor.rowcount
- else:
- return -1
-
-
-class FBDialect_kinterbasdb(FBDialect):
- driver = 'kinterbasdb'
- supports_sane_rowcount = False
- supports_sane_multi_rowcount = False
- execution_ctx_cls = FBExecutionContext_kinterbasdb
-
- supports_native_decimal = True
-
- colspecs = util.update_copy(
- FBDialect.colspecs,
- {
- sqltypes.Numeric: _FBNumeric_kinterbasdb,
- sqltypes.Float: _FBFloat_kinterbasdb,
- }
-
- )
-
- def __init__(self, type_conv=200, concurrency_level=1,
- enable_rowcount=True,
- retaining=False, **kwargs):
- super(FBDialect_kinterbasdb, self).__init__(**kwargs)
- self.enable_rowcount = enable_rowcount
- self.type_conv = type_conv
- self.concurrency_level = concurrency_level
- self.retaining = retaining
- if enable_rowcount:
- self.supports_sane_rowcount = True
-
- @classmethod
- def dbapi(cls):
- return __import__('kinterbasdb')
-
- def do_execute(self, cursor, statement, parameters, context=None):
- # kinterbase does not accept a None, but wants an empty list
- # when there are no arguments.
- cursor.execute(statement, parameters or [])
-
- def do_rollback(self, dbapi_connection):
- dbapi_connection.rollback(self.retaining)
-
- def do_commit(self, dbapi_connection):
- dbapi_connection.commit(self.retaining)
-
- def create_connect_args(self, url):
- opts = url.translate_connect_args(username='user')
- if opts.get('port'):
- opts['host'] = "%s/%s" % (opts['host'], opts['port'])
- del opts['port']
- opts.update(url.query)
-
- util.coerce_kw_type(opts, 'type_conv', int)
-
- type_conv = opts.pop('type_conv', self.type_conv)
- concurrency_level = opts.pop('concurrency_level',
- self.concurrency_level)
-
- if self.dbapi is not None:
- initialized = getattr(self.dbapi, 'initialized', None)
- if initialized is None:
- # CVS rev 1.96 changed the name of the attribute:
- # http://kinterbasdb.cvs.sourceforge.net/viewvc/kinterbasdb/
- # Kinterbasdb-3.0/__init__.py?r1=1.95&r2=1.96
- initialized = getattr(self.dbapi, '_initialized', False)
- if not initialized:
- self.dbapi.init(type_conv=type_conv,
- concurrency_level=concurrency_level)
- return ([], opts)
-
- def _get_server_version_info(self, connection):
- """Get the version of the Firebird server used by a connection.
-
- Returns a tuple of (`major`, `minor`, `build`), three integers
- representing the version of the attached server.
- """
-
- # This is the simpler approach (the other uses the services api),
- # that for backward compatibility reasons returns a string like
- # LI-V6.3.3.12981 Firebird 2.0
- # where the first version is a fake one resembling the old
- # Interbase signature.
-
- fbconn = connection.connection
- version = fbconn.server_version
-
- return self._parse_version_info(version)
-
- def _parse_version_info(self, version):
- m = match('\w+-V(\d+)\.(\d+)\.(\d+)\.(\d+)( \w+ (\d+)\.(\d+))?', version)
- if not m:
- raise AssertionError(
- "Could not determine version from string '%s'" % version)
-
- if m.group(5) != None:
- return tuple([int(x) for x in m.group(6, 7, 4)] + ['firebird'])
- else:
- return tuple([int(x) for x in m.group(1, 2, 3)] + ['interbase'])
-
- def is_disconnect(self, e, connection, cursor):
- if isinstance(e, (self.dbapi.OperationalError,
- self.dbapi.ProgrammingError)):
- msg = str(e)
- return ('Unable to complete network request to host' in msg or
- 'Invalid connection state' in msg or
- 'Invalid cursor state' in msg or
- 'connection shutdown' in msg)
- else:
- return False
-
-dialect = FBDialect_kinterbasdb
diff --git a/lib/sqlalchemy/dialects/mssql/__init__.py b/lib/sqlalchemy/dialects/mssql/__init__.py
deleted file mode 100644
index 7a2dfa60..00000000
--- a/lib/sqlalchemy/dialects/mssql/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# mssql/__init__.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-from sqlalchemy.dialects.mssql import base, pyodbc, adodbapi, \
- pymssql, zxjdbc, mxodbc
-
-base.dialect = pyodbc.dialect
-
-from sqlalchemy.dialects.mssql.base import \
- INTEGER, BIGINT, SMALLINT, TINYINT, VARCHAR, NVARCHAR, CHAR, \
- NCHAR, TEXT, NTEXT, DECIMAL, NUMERIC, FLOAT, DATETIME,\
- DATETIME2, DATETIMEOFFSET, DATE, TIME, SMALLDATETIME, \
- BINARY, VARBINARY, BIT, REAL, IMAGE, TIMESTAMP,\
- MONEY, SMALLMONEY, UNIQUEIDENTIFIER, SQL_VARIANT, dialect
-
-
-__all__ = (
- 'INTEGER', 'BIGINT', 'SMALLINT', 'TINYINT', 'VARCHAR', 'NVARCHAR', 'CHAR',
- 'NCHAR', 'TEXT', 'NTEXT', 'DECIMAL', 'NUMERIC', 'FLOAT', 'DATETIME',
- 'DATETIME2', 'DATETIMEOFFSET', 'DATE', 'TIME', 'SMALLDATETIME',
- 'BINARY', 'VARBINARY', 'BIT', 'REAL', 'IMAGE', 'TIMESTAMP',
- 'MONEY', 'SMALLMONEY', 'UNIQUEIDENTIFIER', 'SQL_VARIANT', 'dialect'
-)
diff --git a/lib/sqlalchemy/dialects/mssql/adodbapi.py b/lib/sqlalchemy/dialects/mssql/adodbapi.py
deleted file mode 100644
index 95cf4242..00000000
--- a/lib/sqlalchemy/dialects/mssql/adodbapi.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# mssql/adodbapi.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""
-.. dialect:: mssql+adodbapi
- :name: adodbapi
- :dbapi: adodbapi
- :connectstring: mssql+adodbapi://:@
- :url: http://adodbapi.sourceforge.net/
-
-.. note::
-
- The adodbapi dialect is not implemented SQLAlchemy versions 0.6 and
- above at this time.
-
-"""
-import datetime
-from sqlalchemy import types as sqltypes, util
-from sqlalchemy.dialects.mssql.base import MSDateTime, MSDialect
-import sys
-
-
-class MSDateTime_adodbapi(MSDateTime):
- def result_processor(self, dialect, coltype):
- def process(value):
- # adodbapi will return datetimes with empty time
- # values as datetime.date() objects.
- # Promote them back to full datetime.datetime()
- if type(value) is datetime.date:
- return datetime.datetime(value.year, value.month, value.day)
- return value
- return process
-
-
-class MSDialect_adodbapi(MSDialect):
- supports_sane_rowcount = True
- supports_sane_multi_rowcount = True
- supports_unicode = sys.maxunicode == 65535
- supports_unicode_statements = True
- driver = 'adodbapi'
-
- @classmethod
- def import_dbapi(cls):
- import adodbapi as module
- return module
-
- colspecs = util.update_copy(
- MSDialect.colspecs,
- {
- sqltypes.DateTime: MSDateTime_adodbapi
- }
- )
-
- def create_connect_args(self, url):
- keys = url.query
-
- connectors = ["Provider=SQLOLEDB"]
- if 'port' in keys:
- connectors.append("Data Source=%s, %s" %
- (keys.get("host"), keys.get("port")))
- else:
- connectors.append("Data Source=%s" % keys.get("host"))
- connectors.append("Initial Catalog=%s" % keys.get("database"))
- user = keys.get("user")
- if user:
- connectors.append("User Id=%s" % user)
- connectors.append("Password=%s" % keys.get("password", ""))
- else:
- connectors.append("Integrated Security=SSPI")
- return [[";".join(connectors)], {}]
-
- def is_disconnect(self, e, connection, cursor):
- return isinstance(e, self.dbapi.adodbapi.DatabaseError) and \
- "'connection failure'" in str(e)
-
-dialect = MSDialect_adodbapi
diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py
deleted file mode 100644
index 522cb5ce..00000000
--- a/lib/sqlalchemy/dialects/mssql/base.py
+++ /dev/null
@@ -1,1550 +0,0 @@
-# mssql/base.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""
-.. dialect:: mssql
- :name: Microsoft SQL Server
-
-
-Auto Increment Behavior
------------------------
-
-``IDENTITY`` columns are supported by using SQLAlchemy
-``schema.Sequence()`` objects. In other words::
-
- from sqlalchemy import Table, Integer, Sequence, Column
-
- Table('test', metadata,
- Column('id', Integer,
- Sequence('blah',100,10), primary_key=True),
- Column('name', String(20))
- ).create(some_engine)
-
-would yield::
-
- CREATE TABLE test (
- id INTEGER NOT NULL IDENTITY(100,10) PRIMARY KEY,
- name VARCHAR(20) NULL,
- )
-
-Note that the ``start`` and ``increment`` values for sequences are
-optional and will default to 1,1.
-
-Implicit ``autoincrement`` behavior works the same in MSSQL as it
-does in other dialects and results in an ``IDENTITY`` column.
-
-* Support for ``SET IDENTITY_INSERT ON`` mode (automagic on / off for
- ``INSERT`` s)
-
-* Support for auto-fetching of ``@@IDENTITY/@@SCOPE_IDENTITY()`` on
- ``INSERT``
-
-Collation Support
------------------
-
-Character collations are supported by the base string types,
-specified by the string argument "collation"::
-
- from sqlalchemy import VARCHAR
- Column('login', VARCHAR(32, collation='Latin1_General_CI_AS'))
-
-When such a column is associated with a :class:`.Table`, the
-CREATE TABLE statement for this column will yield::
-
- login VARCHAR(32) COLLATE Latin1_General_CI_AS NULL
-
-.. versionadded:: 0.8 Character collations are now part of the base string
- types.
-
-LIMIT/OFFSET Support
---------------------
-
-MSSQL has no support for the LIMIT or OFFSET keysowrds. LIMIT is
-supported directly through the ``TOP`` Transact SQL keyword::
-
- select.limit
-
-will yield::
-
- SELECT TOP n
-
-If using SQL Server 2005 or above, LIMIT with OFFSET
-support is available through the ``ROW_NUMBER OVER`` construct.
-For versions below 2005, LIMIT with OFFSET usage will fail.
-
-Nullability
------------
-MSSQL has support for three levels of column nullability. The default
-nullability allows nulls and is explicit in the CREATE TABLE
-construct::
-
- name VARCHAR(20) NULL
-
-If ``nullable=None`` is specified then no specification is made. In
-other words the database's configured default is used. This will
-render::
-
- name VARCHAR(20)
-
-If ``nullable`` is ``True`` or ``False`` then the column will be
-``NULL` or ``NOT NULL`` respectively.
-
-Date / Time Handling
---------------------
-DATE and TIME are supported. Bind parameters are converted
-to datetime.datetime() objects as required by most MSSQL drivers,
-and results are processed from strings if needed.
-The DATE and TIME types are not available for MSSQL 2005 and
-previous - if a server version below 2008 is detected, DDL
-for these types will be issued as DATETIME.
-
-.. _mssql_indexes:
-
-Clustered Index Support
------------------------
-
-The MSSQL dialect supports clustered indexes (and primary keys) via the
-``mssql_clustered`` option. This option is available to :class:`.Index`,
-:class:`.UniqueConstraint`. and :class:`.PrimaryKeyConstraint`.
-
-To generate a clustered index::
-
- Index("my_index", table.c.x, mssql_clustered=True)
-
-which renders the index as ``CREATE CLUSTERED INDEX my_index ON table (x)``.
-
-.. versionadded:: 0.8
-
-To generate a clustered primary key use::
-
- Table('my_table', metadata,
- Column('x', ...),
- Column('y', ...),
- PrimaryKeyConstraint("x", "y", mssql_clustered=True))
-
-which will render the table, for example, as::
-
- CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL, PRIMARY KEY CLUSTERED (x, y))
-
-Similarly, we can generate a clustered unique constraint using::
-
- Table('my_table', metadata,
- Column('x', ...),
- Column('y', ...),
- PrimaryKeyConstraint("x"),
- UniqueConstraint("y", mssql_clustered=True),
- )
-
- .. versionadded:: 0.9.2
-
-MSSQL-Specific Index Options
------------------------------
-
-In addition to clustering, the MSSQL dialect supports other special options
-for :class:`.Index`.
-
-INCLUDE
-^^^^^^^
-
-The ``mssql_include`` option renders INCLUDE(colname) for the given string names::
-
- Index("my_index", table.c.x, mssql_include=['y'])
-
-would render the index as ``CREATE INDEX my_index ON table (x) INCLUDE (y)``
-
-.. versionadded:: 0.8
-
-Index ordering
-^^^^^^^^^^^^^^
-
-Index ordering is available via functional expressions, such as::
-
- Index("my_index", table.c.x.desc())
-
-would render the index as ``CREATE INDEX my_index ON table (x DESC)``
-
-.. versionadded:: 0.8
-
-.. seealso::
-
- :ref:`schema_indexes_functional`
-
-Compatibility Levels
---------------------
-MSSQL supports the notion of setting compatibility levels at the
-database level. This allows, for instance, to run a database that
-is compatible with SQL2000 while running on a SQL2005 database
-server. ``server_version_info`` will always return the database
-server version information (in this case SQL2005) and not the
-compatibility level information. Because of this, if running under
-a backwards compatibility mode SQAlchemy may attempt to use T-SQL
-statements that are unable to be parsed by the database server.
-
-Triggers
---------
-
-SQLAlchemy by default uses OUTPUT INSERTED to get at newly
-generated primary key values via IDENTITY columns or other
-server side defaults. MS-SQL does not
-allow the usage of OUTPUT INSERTED on tables that have triggers.
-To disable the usage of OUTPUT INSERTED on a per-table basis,
-specify ``implicit_returning=False`` for each :class:`.Table`
-which has triggers::
-
- Table('mytable', metadata,
- Column('id', Integer, primary_key=True),
- # ...,
- implicit_returning=False
- )
-
-Declarative form::
-
- class MyClass(Base):
- # ...
- __table_args__ = {'implicit_returning':False}
-
-
-This option can also be specified engine-wide using the
-``implicit_returning=False`` argument on :func:`.create_engine`.
-
-Enabling Snapshot Isolation
----------------------------
-
-Not necessarily specific to SQLAlchemy, SQL Server has a default transaction
-isolation mode that locks entire tables, and causes even mildly concurrent
-applications to have long held locks and frequent deadlocks.
-Enabling snapshot isolation for the database as a whole is recommended
-for modern levels of concurrency support. This is accomplished via the
-following ALTER DATABASE commands executed at the SQL prompt::
-
- ALTER DATABASE MyDatabase SET ALLOW_SNAPSHOT_ISOLATION ON
-
- ALTER DATABASE MyDatabase SET READ_COMMITTED_SNAPSHOT ON
-
-Background on SQL Server snapshot isolation is available at
-http://msdn.microsoft.com/en-us/library/ms175095.aspx.
-
-Known Issues
-------------
-
-* No support for more than one ``IDENTITY`` column per table
-* reflection of indexes does not work with versions older than
- SQL Server 2005
-
-"""
-import datetime
-import operator
-import re
-
-from ... import sql, schema as sa_schema, exc, util
-from ...sql import compiler, expression, \
- util as sql_util, cast
-from ... import engine
-from ...engine import reflection, default
-from ... import types as sqltypes
-from ...types import INTEGER, BIGINT, SMALLINT, DECIMAL, NUMERIC, \
- FLOAT, TIMESTAMP, DATETIME, DATE, BINARY,\
- VARBINARY, TEXT, VARCHAR, NVARCHAR, CHAR, NCHAR
-
-
-from ...util import update_wrapper
-from . import information_schema as ischema
-
-MS_2008_VERSION = (10,)
-MS_2005_VERSION = (9,)
-MS_2000_VERSION = (8,)
-
-RESERVED_WORDS = set(
- ['add', 'all', 'alter', 'and', 'any', 'as', 'asc', 'authorization',
- 'backup', 'begin', 'between', 'break', 'browse', 'bulk', 'by', 'cascade',
- 'case', 'check', 'checkpoint', 'close', 'clustered', 'coalesce',
- 'collate', 'column', 'commit', 'compute', 'constraint', 'contains',
- 'containstable', 'continue', 'convert', 'create', 'cross', 'current',
- 'current_date', 'current_time', 'current_timestamp', 'current_user',
- 'cursor', 'database', 'dbcc', 'deallocate', 'declare', 'default',
- 'delete', 'deny', 'desc', 'disk', 'distinct', 'distributed', 'double',
- 'drop', 'dump', 'else', 'end', 'errlvl', 'escape', 'except', 'exec',
- 'execute', 'exists', 'exit', 'external', 'fetch', 'file', 'fillfactor',
- 'for', 'foreign', 'freetext', 'freetexttable', 'from', 'full',
- 'function', 'goto', 'grant', 'group', 'having', 'holdlock', 'identity',
- 'identity_insert', 'identitycol', 'if', 'in', 'index', 'inner', 'insert',
- 'intersect', 'into', 'is', 'join', 'key', 'kill', 'left', 'like',
- 'lineno', 'load', 'merge', 'national', 'nocheck', 'nonclustered', 'not',
- 'null', 'nullif', 'of', 'off', 'offsets', 'on', 'open', 'opendatasource',
- 'openquery', 'openrowset', 'openxml', 'option', 'or', 'order', 'outer',
- 'over', 'percent', 'pivot', 'plan', 'precision', 'primary', 'print',
- 'proc', 'procedure', 'public', 'raiserror', 'read', 'readtext',
- 'reconfigure', 'references', 'replication', 'restore', 'restrict',
- 'return', 'revert', 'revoke', 'right', 'rollback', 'rowcount',
- 'rowguidcol', 'rule', 'save', 'schema', 'securityaudit', 'select',
- 'session_user', 'set', 'setuser', 'shutdown', 'some', 'statistics',
- 'system_user', 'table', 'tablesample', 'textsize', 'then', 'to', 'top',
- 'tran', 'transaction', 'trigger', 'truncate', 'tsequal', 'union',
- 'unique', 'unpivot', 'update', 'updatetext', 'use', 'user', 'values',
- 'varying', 'view', 'waitfor', 'when', 'where', 'while', 'with',
- 'writetext',
- ])
-
-
-class REAL(sqltypes.REAL):
- __visit_name__ = 'REAL'
-
- def __init__(self, **kw):
- # REAL is a synonym for FLOAT(24) on SQL server
- kw['precision'] = 24
- super(REAL, self).__init__(**kw)
-
-
-class TINYINT(sqltypes.Integer):
- __visit_name__ = 'TINYINT'
-
-
-# MSSQL DATE/TIME types have varied behavior, sometimes returning
-# strings. MSDate/TIME check for everything, and always
-# filter bind parameters into datetime objects (required by pyodbc,
-# not sure about other dialects).
-
-class _MSDate(sqltypes.Date):
- def bind_processor(self, dialect):
- def process(value):
- if type(value) == datetime.date:
- return datetime.datetime(value.year, value.month, value.day)
- else:
- return value
- return process
-
- _reg = re.compile(r"(\d+)-(\d+)-(\d+)")
-
- def result_processor(self, dialect, coltype):
- def process(value):
- if isinstance(value, datetime.datetime):
- return value.date()
- elif isinstance(value, util.string_types):
- return datetime.date(*[
- int(x or 0)
- for x in self._reg.match(value).groups()
- ])
- else:
- return value
- return process
-
-
-class TIME(sqltypes.TIME):
- def __init__(self, precision=None, **kwargs):
- self.precision = precision
- super(TIME, self).__init__()
-
- __zero_date = datetime.date(1900, 1, 1)
-
- def bind_processor(self, dialect):
- def process(value):
- if isinstance(value, datetime.datetime):
- value = datetime.datetime.combine(
- self.__zero_date, value.time())
- elif isinstance(value, datetime.time):
- value = datetime.datetime.combine(self.__zero_date, value)
- return value
- return process
-
- _reg = re.compile(r"(\d+):(\d+):(\d+)(?:\.(\d{0,6}))?")
-
- def result_processor(self, dialect, coltype):
- def process(value):
- if isinstance(value, datetime.datetime):
- return value.time()
- elif isinstance(value, util.string_types):
- return datetime.time(*[
- int(x or 0)
- for x in self._reg.match(value).groups()])
- else:
- return value
- return process
-_MSTime = TIME
-
-
-class _DateTimeBase(object):
- def bind_processor(self, dialect):
- def process(value):
- if type(value) == datetime.date:
- return datetime.datetime(value.year, value.month, value.day)
- else:
- return value
- return process
-
-
-class _MSDateTime(_DateTimeBase, sqltypes.DateTime):
- pass
-
-
-class SMALLDATETIME(_DateTimeBase, sqltypes.DateTime):
- __visit_name__ = 'SMALLDATETIME'
-
-
-class DATETIME2(_DateTimeBase, sqltypes.DateTime):
- __visit_name__ = 'DATETIME2'
-
- def __init__(self, precision=None, **kw):
- super(DATETIME2, self).__init__(**kw)
- self.precision = precision
-
-
-# TODO: is this not an Interval ?
-class DATETIMEOFFSET(sqltypes.TypeEngine):
- __visit_name__ = 'DATETIMEOFFSET'
-
- def __init__(self, precision=None, **kwargs):
- self.precision = precision
-
-
-class _StringType(object):
- """Base for MSSQL string types."""
-
- def __init__(self, collation=None):
- super(_StringType, self).__init__(collation=collation)
-
-
-
-
-class NTEXT(sqltypes.UnicodeText):
- """MSSQL NTEXT type, for variable-length unicode text up to 2^30
- characters."""
-
- __visit_name__ = 'NTEXT'
-
-
-
-class IMAGE(sqltypes.LargeBinary):
- __visit_name__ = 'IMAGE'
-
-
-class BIT(sqltypes.TypeEngine):
- __visit_name__ = 'BIT'
-
-
-class MONEY(sqltypes.TypeEngine):
- __visit_name__ = 'MONEY'
-
-
-class SMALLMONEY(sqltypes.TypeEngine):
- __visit_name__ = 'SMALLMONEY'
-
-
-class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
- __visit_name__ = "UNIQUEIDENTIFIER"
-
-
-class SQL_VARIANT(sqltypes.TypeEngine):
- __visit_name__ = 'SQL_VARIANT'
-
-# old names.
-MSDateTime = _MSDateTime
-MSDate = _MSDate
-MSReal = REAL
-MSTinyInteger = TINYINT
-MSTime = TIME
-MSSmallDateTime = SMALLDATETIME
-MSDateTime2 = DATETIME2
-MSDateTimeOffset = DATETIMEOFFSET
-MSText = TEXT
-MSNText = NTEXT
-MSString = VARCHAR
-MSNVarchar = NVARCHAR
-MSChar = CHAR
-MSNChar = NCHAR
-MSBinary = BINARY
-MSVarBinary = VARBINARY
-MSImage = IMAGE
-MSBit = BIT
-MSMoney = MONEY
-MSSmallMoney = SMALLMONEY
-MSUniqueIdentifier = UNIQUEIDENTIFIER
-MSVariant = SQL_VARIANT
-
-ischema_names = {
- 'int': INTEGER,
- 'bigint': BIGINT,
- 'smallint': SMALLINT,
- 'tinyint': TINYINT,
- 'varchar': VARCHAR,
- 'nvarchar': NVARCHAR,
- 'char': CHAR,
- 'nchar': NCHAR,
- 'text': TEXT,
- 'ntext': NTEXT,
- 'decimal': DECIMAL,
- 'numeric': NUMERIC,
- 'float': FLOAT,
- 'datetime': DATETIME,
- 'datetime2': DATETIME2,
- 'datetimeoffset': DATETIMEOFFSET,
- 'date': DATE,
- 'time': TIME,
- 'smalldatetime': SMALLDATETIME,
- 'binary': BINARY,
- 'varbinary': VARBINARY,
- 'bit': BIT,
- 'real': REAL,
- 'image': IMAGE,
- 'timestamp': TIMESTAMP,
- 'money': MONEY,
- 'smallmoney': SMALLMONEY,
- 'uniqueidentifier': UNIQUEIDENTIFIER,
- 'sql_variant': SQL_VARIANT,
-}
-
-
-class MSTypeCompiler(compiler.GenericTypeCompiler):
- def _extend(self, spec, type_, length=None):
- """Extend a string-type declaration with standard SQL
- COLLATE annotations.
-
- """
-
- if getattr(type_, 'collation', None):
- collation = 'COLLATE %s' % type_.collation
- else:
- collation = None
-
- if not length:
- length = type_.length
-
- if length:
- spec = spec + "(%s)" % length
-
- return ' '.join([c for c in (spec, collation)
- if c is not None])
-
- def visit_FLOAT(self, type_):
- precision = getattr(type_, 'precision', None)
- if precision is None:
- return "FLOAT"
- else:
- return "FLOAT(%(precision)s)" % {'precision': precision}
-
- def visit_TINYINT(self, type_):
- return "TINYINT"
-
- def visit_DATETIMEOFFSET(self, type_):
- if type_.precision:
- return "DATETIMEOFFSET(%s)" % type_.precision
- else:
- return "DATETIMEOFFSET"
-
- def visit_TIME(self, type_):
- precision = getattr(type_, 'precision', None)
- if precision:
- return "TIME(%s)" % precision
- else:
- return "TIME"
-
- def visit_DATETIME2(self, type_):
- precision = getattr(type_, 'precision', None)
- if precision:
- return "DATETIME2(%s)" % precision
- else:
- return "DATETIME2"
-
- def visit_SMALLDATETIME(self, type_):
- return "SMALLDATETIME"
-
- def visit_unicode(self, type_):
- return self.visit_NVARCHAR(type_)
-
- def visit_unicode_text(self, type_):
- return self.visit_NTEXT(type_)
-
- def visit_NTEXT(self, type_):
- return self._extend("NTEXT", type_)
-
- def visit_TEXT(self, type_):
- return self._extend("TEXT", type_)
-
- def visit_VARCHAR(self, type_):
- return self._extend("VARCHAR", type_, length=type_.length or 'max')
-
- def visit_CHAR(self, type_):
- return self._extend("CHAR", type_)
-
- def visit_NCHAR(self, type_):
- return self._extend("NCHAR", type_)
-
- def visit_NVARCHAR(self, type_):
- return self._extend("NVARCHAR", type_, length=type_.length or 'max')
-
- def visit_date(self, type_):
- if self.dialect.server_version_info < MS_2008_VERSION:
- return self.visit_DATETIME(type_)
- else:
- return self.visit_DATE(type_)
-
- def visit_time(self, type_):
- if self.dialect.server_version_info < MS_2008_VERSION:
- return self.visit_DATETIME(type_)
- else:
- return self.visit_TIME(type_)
-
- def visit_large_binary(self, type_):
- return self.visit_IMAGE(type_)
-
- def visit_IMAGE(self, type_):
- return "IMAGE"
-
- def visit_VARBINARY(self, type_):
- return self._extend(
- "VARBINARY",
- type_,
- length=type_.length or 'max')
-
- def visit_boolean(self, type_):
- return self.visit_BIT(type_)
-
- def visit_BIT(self, type_):
- return "BIT"
-
- def visit_MONEY(self, type_):
- return "MONEY"
-
- def visit_SMALLMONEY(self, type_):
- return 'SMALLMONEY'
-
- def visit_UNIQUEIDENTIFIER(self, type_):
- return "UNIQUEIDENTIFIER"
-
- def visit_SQL_VARIANT(self, type_):
- return 'SQL_VARIANT'
-
-
-class MSExecutionContext(default.DefaultExecutionContext):
- _enable_identity_insert = False
- _select_lastrowid = False
- _result_proxy = None
- _lastrowid = None
-
- def pre_exec(self):
- """Activate IDENTITY_INSERT if needed."""
-
- if self.isinsert:
- tbl = self.compiled.statement.table
- seq_column = tbl._autoincrement_column
- insert_has_sequence = seq_column is not None
-
- if insert_has_sequence:
- self._enable_identity_insert = \
- seq_column.key in self.compiled_parameters[0]
- else:
- self._enable_identity_insert = False
-
- self._select_lastrowid = insert_has_sequence and \
- not self.compiled.returning and \
- not self._enable_identity_insert and \
- not self.executemany
-
- if self._enable_identity_insert:
- self.root_connection._cursor_execute(self.cursor,
- "SET IDENTITY_INSERT %s ON" %
- self.dialect.identifier_preparer.format_table(tbl),
- (), self)
-
- def post_exec(self):
- """Disable IDENTITY_INSERT if enabled."""
-
- conn = self.root_connection
- if self._select_lastrowid:
- if self.dialect.use_scope_identity:
- conn._cursor_execute(self.cursor,
- "SELECT scope_identity() AS lastrowid", (), self)
- else:
- conn._cursor_execute(self.cursor,
- "SELECT @@identity AS lastrowid", (), self)
- # fetchall() ensures the cursor is consumed without closing it
- row = self.cursor.fetchall()[0]
- self._lastrowid = int(row[0])
-
- if (self.isinsert or self.isupdate or self.isdelete) and \
- self.compiled.returning:
- self._result_proxy = engine.FullyBufferedResultProxy(self)
-
- if self._enable_identity_insert:
- conn._cursor_execute(self.cursor,
- "SET IDENTITY_INSERT %s OFF" %
- self.dialect.identifier_preparer.
- format_table(self.compiled.statement.table),
- (), self)
-
- def get_lastrowid(self):
- return self._lastrowid
-
- def handle_dbapi_exception(self, e):
- if self._enable_identity_insert:
- try:
- self.cursor.execute(
- "SET IDENTITY_INSERT %s OFF" %
- self.dialect.identifier_preparer.\
- format_table(self.compiled.statement.table)
- )
- except:
- pass
-
- def get_result_proxy(self):
- if self._result_proxy:
- return self._result_proxy
- else:
- return engine.ResultProxy(self)
-
-
-class MSSQLCompiler(compiler.SQLCompiler):
- returning_precedes_values = True
-
- extract_map = util.update_copy(
- compiler.SQLCompiler.extract_map,
- {
- 'doy': 'dayofyear',
- 'dow': 'weekday',
- 'milliseconds': 'millisecond',
- 'microseconds': 'microsecond'
- })
-
- def __init__(self, *args, **kwargs):
- self.tablealiases = {}
- super(MSSQLCompiler, self).__init__(*args, **kwargs)
-
- def visit_now_func(self, fn, **kw):
- return "CURRENT_TIMESTAMP"
-
- def visit_current_date_func(self, fn, **kw):
- return "GETDATE()"
-
- def visit_length_func(self, fn, **kw):
- return "LEN%s" % self.function_argspec(fn, **kw)
-
- def visit_char_length_func(self, fn, **kw):
- return "LEN%s" % self.function_argspec(fn, **kw)
-
- def visit_concat_op_binary(self, binary, operator, **kw):
- return "%s + %s" % \
- (self.process(binary.left, **kw),
- self.process(binary.right, **kw))
-
- def visit_true(self, expr, **kw):
- return '1'
-
- def visit_false(self, expr, **kw):
- return '0'
-
- def visit_match_op_binary(self, binary, operator, **kw):
- return "CONTAINS (%s, %s)" % (
- self.process(binary.left, **kw),
- self.process(binary.right, **kw))
-
- def get_select_precolumns(self, select):
- """ MS-SQL puts TOP, it's version of LIMIT here """
- if select._distinct or select._limit is not None:
- s = select._distinct and "DISTINCT " or ""
-
- # ODBC drivers and possibly others
- # don't support bind params in the SELECT clause on SQL Server.
- # so have to use literal here.
- if select._limit is not None:
- if not select._offset:
- s += "TOP %d " % select._limit
- return s
- return compiler.SQLCompiler.get_select_precolumns(self, select)
-
- def get_from_hint_text(self, table, text):
- return text
-
- def get_crud_hint_text(self, table, text):
- return text
-
- def limit_clause(self, select):
- # Limit in mssql is after the select keyword
- return ""
-
- def visit_select(self, select, **kwargs):
- """Look for ``LIMIT`` and OFFSET in a select statement, and if
- so tries to wrap it in a subquery with ``row_number()`` criterion.
-
- """
- if select._offset and not getattr(select, '_mssql_visit', None):
- # to use ROW_NUMBER(), an ORDER BY is required.
- if not select._order_by_clause.clauses:
- raise exc.CompileError('MSSQL requires an order_by when '
- 'using an offset.')
-
- _offset = select._offset
- _limit = select._limit
- _order_by_clauses = select._order_by_clause.clauses
- select = select._generate()
- select._mssql_visit = True
- select = select.column(
- sql.func.ROW_NUMBER().over(order_by=_order_by_clauses)
- .label("mssql_rn")
- ).order_by(None).alias()
-
- mssql_rn = sql.column('mssql_rn')
- limitselect = sql.select([c for c in select.c if
- c.key != 'mssql_rn'])
- limitselect.append_whereclause(mssql_rn > _offset)
- if _limit is not None:
- limitselect.append_whereclause(mssql_rn <= (_limit + _offset))
- return self.process(limitselect, iswrapper=True, **kwargs)
- else:
- return compiler.SQLCompiler.visit_select(self, select, **kwargs)
-
- def _schema_aliased_table(self, table):
- if getattr(table, 'schema', None) is not None:
- if table not in self.tablealiases:
- self.tablealiases[table] = table.alias()
- return self.tablealiases[table]
- else:
- return None
-
- def visit_table(self, table, mssql_aliased=False, iscrud=False, **kwargs):
- if mssql_aliased is table or iscrud:
- return super(MSSQLCompiler, self).visit_table(table, **kwargs)
-
- # alias schema-qualified tables
- alias = self._schema_aliased_table(table)
- if alias is not None:
- return self.process(alias, mssql_aliased=table, **kwargs)
- else:
- return super(MSSQLCompiler, self).visit_table(table, **kwargs)
-
- def visit_alias(self, alias, **kwargs):
- # translate for schema-qualified table aliases
- kwargs['mssql_aliased'] = alias.original
- return super(MSSQLCompiler, self).visit_alias(alias, **kwargs)
-
- def visit_extract(self, extract, **kw):
- field = self.extract_map.get(extract.field, extract.field)
- return 'DATEPART("%s", %s)' % \
- (field, self.process(extract.expr, **kw))
-
- def visit_savepoint(self, savepoint_stmt):
- return "SAVE TRANSACTION %s" % self.preparer.format_savepoint(savepoint_stmt)
-
- def visit_rollback_to_savepoint(self, savepoint_stmt):
- return ("ROLLBACK TRANSACTION %s"
- % self.preparer.format_savepoint(savepoint_stmt))
-
- def visit_column(self, column, add_to_result_map=None, **kwargs):
- if column.table is not None and \
- (not self.isupdate and not self.isdelete) or self.is_subquery():
- # translate for schema-qualified table aliases
- t = self._schema_aliased_table(column.table)
- if t is not None:
- converted = expression._corresponding_column_or_error(
- t, column)
- if add_to_result_map is not None:
- add_to_result_map(
- column.name,
- column.name,
- (column, column.name, column.key),
- column.type
- )
-
- return super(MSSQLCompiler, self).\
- visit_column(converted, **kwargs)
-
- return super(MSSQLCompiler, self).visit_column(
- column, add_to_result_map=add_to_result_map, **kwargs)
-
- def visit_binary(self, binary, **kwargs):
- """Move bind parameters to the right-hand side of an operator, where
- possible.
-
- """
- if (
- isinstance(binary.left, expression.BindParameter)
- and binary.operator == operator.eq
- and not isinstance(binary.right, expression.BindParameter)
- ):
- return self.process(
- expression.BinaryExpression(binary.right,
- binary.left,
- binary.operator),
- **kwargs)
- return super(MSSQLCompiler, self).visit_binary(binary, **kwargs)
-
- def returning_clause(self, stmt, returning_cols):
-
- if self.isinsert or self.isupdate:
- target = stmt.table.alias("inserted")
- else:
- target = stmt.table.alias("deleted")
-
- adapter = sql_util.ClauseAdapter(target)
-
- columns = [
- self._label_select_column(None, adapter.traverse(c),
- True, False, {})
- for c in expression._select_iterables(returning_cols)
- ]
-
- return 'OUTPUT ' + ', '.join(columns)
-
- def get_cte_preamble(self, recursive):
- # SQL Server finds it too inconvenient to accept
- # an entirely optional, SQL standard specified,
- # "RECURSIVE" word with their "WITH",
- # so here we go
- return "WITH"
-
- def label_select_column(self, select, column, asfrom):
- if isinstance(column, expression.Function):
- return column.label(None)
- else:
- return super(MSSQLCompiler, self).\
- label_select_column(select, column, asfrom)
-
- def for_update_clause(self, select):
- # "FOR UPDATE" is only allowed on "DECLARE CURSOR" which
- # SQLAlchemy doesn't use
- return ''
-
- def order_by_clause(self, select, **kw):
- order_by = self.process(select._order_by_clause, **kw)
-
- # MSSQL only allows ORDER BY in subqueries if there is a LIMIT
- if order_by and (not self.is_subquery() or select._limit):
- return " ORDER BY " + order_by
- else:
- return ""
-
- def update_from_clause(self, update_stmt,
- from_table, extra_froms,
- from_hints,
- **kw):
- """Render the UPDATE..FROM clause specific to MSSQL.
-
- In MSSQL, if the UPDATE statement involves an alias of the table to
- be updated, then the table itself must be added to the FROM list as
- well. Otherwise, it is optional. Here, we add it regardless.
-
- """
- return "FROM " + ', '.join(
- t._compiler_dispatch(self, asfrom=True,
- fromhints=from_hints, **kw)
- for t in [from_table] + extra_froms)
-
-
-class MSSQLStrictCompiler(MSSQLCompiler):
- """A subclass of MSSQLCompiler which disables the usage of bind
- parameters where not allowed natively by MS-SQL.
-
- A dialect may use this compiler on a platform where native
- binds are used.
-
- """
- ansi_bind_rules = True
-
- def visit_in_op_binary(self, binary, operator, **kw):
- kw['literal_binds'] = True
- return "%s IN %s" % (
- self.process(binary.left, **kw),
- self.process(binary.right, **kw)
- )
-
- def visit_notin_op_binary(self, binary, operator, **kw):
- kw['literal_binds'] = True
- return "%s NOT IN %s" % (
- self.process(binary.left, **kw),
- self.process(binary.right, **kw)
- )
-
- def render_literal_value(self, value, type_):
- """
- For date and datetime values, convert to a string
- format acceptable to MSSQL. That seems to be the
- so-called ODBC canonical date format which looks
- like this:
-
- yyyy-mm-dd hh:mi:ss.mmm(24h)
-
- For other data types, call the base class implementation.
- """
- # datetime and date are both subclasses of datetime.date
- if issubclass(type(value), datetime.date):
- # SQL Server wants single quotes around the date string.
- return "'" + str(value) + "'"
- else:
- return super(MSSQLStrictCompiler, self).\
- render_literal_value(value, type_)
-
-
-class MSDDLCompiler(compiler.DDLCompiler):
- def get_column_specification(self, column, **kwargs):
- colspec = (self.preparer.format_column(column) + " "
- + self.dialect.type_compiler.process(column.type))
-
- if column.nullable is not None:
- if not column.nullable or column.primary_key or \
- isinstance(column.default, sa_schema.Sequence):
- colspec += " NOT NULL"
- else:
- colspec += " NULL"
-
- if column.table is None:
- raise exc.CompileError(
- "mssql requires Table-bound columns "
- "in order to generate DDL")
-
- # install an IDENTITY Sequence if we either a sequence or an implicit IDENTITY column
- if isinstance(column.default, sa_schema.Sequence):
- if column.default.start == 0:
- start = 0
- else:
- start = column.default.start or 1
-
- colspec += " IDENTITY(%s,%s)" % (start, column.default.increment or 1)
- elif column is column.table._autoincrement_column:
- colspec += " IDENTITY(1,1)"
- else:
- default = self.get_column_default_string(column)
- if default is not None:
- colspec += " DEFAULT " + default
-
- return colspec
-
- def visit_create_index(self, create, include_schema=False):
- index = create.element
- self._verify_index_table(index)
- preparer = self.preparer
- text = "CREATE "
- if index.unique:
- text += "UNIQUE "
-
- # handle clustering option
- if index.dialect_options['mssql']['clustered']:
- text += "CLUSTERED "
-
- text += "INDEX %s ON %s (%s)" \
- % (
- self._prepared_index_name(index,
- include_schema=include_schema),
- preparer.format_table(index.table),
- ', '.join(
- self.sql_compiler.process(expr,
- include_table=False, literal_binds=True) for
- expr in index.expressions)
- )
-
- # handle other included columns
- if index.dialect_options['mssql']['include']:
- inclusions = [index.table.c[col]
- if isinstance(col, util.string_types) else col
- for col in index.dialect_options['mssql']['include']]
-
- text += " INCLUDE (%s)" \
- % ', '.join([preparer.quote(c.name)
- for c in inclusions])
-
- return text
-
- def visit_drop_index(self, drop):
- return "\nDROP INDEX %s ON %s" % (
- self._prepared_index_name(drop.element, include_schema=False),
- self.preparer.format_table(drop.element.table)
- )
-
- def visit_primary_key_constraint(self, constraint):
- if len(constraint) == 0:
- return ''
- text = ""
- if constraint.name is not None:
- text += "CONSTRAINT %s " % \
- self.preparer.format_constraint(constraint)
- text += "PRIMARY KEY "
-
- if constraint.dialect_options['mssql']['clustered']:
- text += "CLUSTERED "
-
- text += "(%s)" % ', '.join(self.preparer.quote(c.name)
- for c in constraint)
- text += self.define_constraint_deferrability(constraint)
- return text
-
- def visit_unique_constraint(self, constraint):
- if len(constraint) == 0:
- return ''
- text = ""
- if constraint.name is not None:
- text += "CONSTRAINT %s " % \
- self.preparer.format_constraint(constraint)
- text += "UNIQUE "
-
- if constraint.dialect_options['mssql']['clustered']:
- text += "CLUSTERED "
-
- text += "(%s)" % ', '.join(self.preparer.quote(c.name)
- for c in constraint)
- text += self.define_constraint_deferrability(constraint)
- return text
-
-class MSIdentifierPreparer(compiler.IdentifierPreparer):
- reserved_words = RESERVED_WORDS
-
- def __init__(self, dialect):
- super(MSIdentifierPreparer, self).__init__(dialect, initial_quote='[',
- final_quote=']')
-
- def _escape_identifier(self, value):
- return value
-
- def quote_schema(self, schema, force=None):
- """Prepare a quoted table and schema name."""
- result = '.'.join([self.quote(x, force) for x in schema.split('.')])
- return result
-
-
-def _db_plus_owner_listing(fn):
- def wrap(dialect, connection, schema=None, **kw):
- dbname, owner = _owner_plus_db(dialect, schema)
- return _switch_db(dbname, connection, fn, dialect, connection,
- dbname, owner, schema, **kw)
- return update_wrapper(wrap, fn)
-
-
-def _db_plus_owner(fn):
- def wrap(dialect, connection, tablename, schema=None, **kw):
- dbname, owner = _owner_plus_db(dialect, schema)
- return _switch_db(dbname, connection, fn, dialect, connection,
- tablename, dbname, owner, schema, **kw)
- return update_wrapper(wrap, fn)
-
-
-def _switch_db(dbname, connection, fn, *arg, **kw):
- if dbname:
- current_db = connection.scalar("select db_name()")
- connection.execute("use %s" % dbname)
- try:
- return fn(*arg, **kw)
- finally:
- if dbname:
- connection.execute("use %s" % current_db)
-
-
-def _owner_plus_db(dialect, schema):
- if not schema:
- return None, dialect.default_schema_name
- elif "." in schema:
- return schema.split(".", 1)
- else:
- return None, schema
-
-
-class MSDialect(default.DefaultDialect):
- name = 'mssql'
- supports_default_values = True
- supports_empty_insert = False
- execution_ctx_cls = MSExecutionContext
- use_scope_identity = True
- max_identifier_length = 128
- schema_name = "dbo"
-
- colspecs = {
- sqltypes.DateTime: _MSDateTime,
- sqltypes.Date: _MSDate,
- sqltypes.Time: TIME,
- }
-
- ischema_names = ischema_names
-
- supports_native_boolean = False
- supports_unicode_binds = True
- postfetch_lastrowid = True
-
- server_version_info = ()
-
- statement_compiler = MSSQLCompiler
- ddl_compiler = MSDDLCompiler
- type_compiler = MSTypeCompiler
- preparer = MSIdentifierPreparer
-
- construct_arguments = [
- (sa_schema.PrimaryKeyConstraint, {
- "clustered": False
- }),
- (sa_schema.UniqueConstraint, {
- "clustered": False
- }),
- (sa_schema.Index, {
- "clustered": False,
- "include": None
- })
- ]
-
- def __init__(self,
- query_timeout=None,
- use_scope_identity=True,
- max_identifier_length=None,
- schema_name="dbo", **opts):
- self.query_timeout = int(query_timeout or 0)
- self.schema_name = schema_name
-
- self.use_scope_identity = use_scope_identity
- self.max_identifier_length = int(max_identifier_length or 0) or \
- self.max_identifier_length
- super(MSDialect, self).__init__(**opts)
-
- def do_savepoint(self, connection, name):
- # give the DBAPI a push
- connection.execute("IF @@TRANCOUNT = 0 BEGIN TRANSACTION")
- super(MSDialect, self).do_savepoint(connection, name)
-
- def do_release_savepoint(self, connection, name):
- # SQL Server does not support RELEASE SAVEPOINT
- pass
-
- def initialize(self, connection):
- super(MSDialect, self).initialize(connection)
- if self.server_version_info[0] not in list(range(8, 17)):
- # FreeTDS with version 4.2 seems to report here
- # a number like "95.10.255". Don't know what
- # that is. So emit warning.
- util.warn(
- "Unrecognized server version info '%s'. Version specific "
- "behaviors may not function properly. If using ODBC "
- "with FreeTDS, ensure server version 7.0 or 8.0, not 4.2, "
- "is configured in the FreeTDS configuration." %
- ".".join(str(x) for x in self.server_version_info))
- if self.server_version_info >= MS_2005_VERSION and \
- 'implicit_returning' not in self.__dict__:
- self.implicit_returning = True
-
- def _get_default_schema_name(self, connection):
- user_name = connection.scalar("SELECT user_name()")
- if user_name is not None:
- # now, get the default schema
- query = sql.text("""
- SELECT default_schema_name FROM
- sys.database_principals
- WHERE name = :name
- AND type = 'S'
- """)
- try:
- default_schema_name = connection.scalar(query, name=user_name)
- if default_schema_name is not None:
- return util.text_type(default_schema_name)
- except:
- pass
- return self.schema_name
-
- @_db_plus_owner
- def has_table(self, connection, tablename, dbname, owner, schema):
- columns = ischema.columns
-
- whereclause = columns.c.table_name == tablename
-
- if owner:
- whereclause = sql.and_(whereclause,
- columns.c.table_schema == owner)
- s = sql.select([columns], whereclause)
- c = connection.execute(s)
- return c.first() is not None
-
- @reflection.cache
- def get_schema_names(self, connection, **kw):
- s = sql.select([ischema.schemata.c.schema_name],
- order_by=[ischema.schemata.c.schema_name]
- )
- schema_names = [r[0] for r in connection.execute(s)]
- return schema_names
-
- @reflection.cache
- @_db_plus_owner_listing
- def get_table_names(self, connection, dbname, owner, schema, **kw):
- tables = ischema.tables
- s = sql.select([tables.c.table_name],
- sql.and_(
- tables.c.table_schema == owner,
- tables.c.table_type == 'BASE TABLE'
- ),
- order_by=[tables.c.table_name]
- )
- table_names = [r[0] for r in connection.execute(s)]
- return table_names
-
- @reflection.cache
- @_db_plus_owner_listing
- def get_view_names(self, connection, dbname, owner, schema, **kw):
- tables = ischema.tables
- s = sql.select([tables.c.table_name],
- sql.and_(
- tables.c.table_schema == owner,
- tables.c.table_type == 'VIEW'
- ),
- order_by=[tables.c.table_name]
- )
- view_names = [r[0] for r in connection.execute(s)]
- return view_names
-
- @reflection.cache
- @_db_plus_owner
- def get_indexes(self, connection, tablename, dbname, owner, schema, **kw):
- # using system catalogs, don't support index reflection
- # below MS 2005
- if self.server_version_info < MS_2005_VERSION:
- return []
-
- rp = connection.execute(
- sql.text("select ind.index_id, ind.is_unique, ind.name "
- "from sys.indexes as ind join sys.tables as tab on "
- "ind.object_id=tab.object_id "
- "join sys.schemas as sch on sch.schema_id=tab.schema_id "
- "where tab.name = :tabname "
- "and sch.name=:schname "
- "and ind.is_primary_key=0",
- bindparams=[
- sql.bindparam('tabname', tablename,
- sqltypes.String(convert_unicode=True)),
- sql.bindparam('schname', owner,
- sqltypes.String(convert_unicode=True))
- ],
- typemap={
- 'name': sqltypes.Unicode()
- }
- )
- )
- indexes = {}
- for row in rp:
- indexes[row['index_id']] = {
- 'name': row['name'],
- 'unique': row['is_unique'] == 1,
- 'column_names': []
- }
- rp = connection.execute(
- sql.text(
- "select ind_col.index_id, ind_col.object_id, col.name "
- "from sys.columns as col "
- "join sys.tables as tab on tab.object_id=col.object_id "
- "join sys.index_columns as ind_col on "
- "(ind_col.column_id=col.column_id and "
- "ind_col.object_id=tab.object_id) "
- "join sys.schemas as sch on sch.schema_id=tab.schema_id "
- "where tab.name=:tabname "
- "and sch.name=:schname",
- bindparams=[
- sql.bindparam('tabname', tablename,
- sqltypes.String(convert_unicode=True)),
- sql.bindparam('schname', owner,
- sqltypes.String(convert_unicode=True))
- ],
- typemap={'name': sqltypes.Unicode()}
- ),
- )
- for row in rp:
- if row['index_id'] in indexes:
- indexes[row['index_id']]['column_names'].append(row['name'])
-
- return list(indexes.values())
-
- @reflection.cache
- @_db_plus_owner
- def get_view_definition(self, connection, viewname, dbname, owner, schema, **kw):
- rp = connection.execute(
- sql.text(
- "select definition from sys.sql_modules as mod, "
- "sys.views as views, "
- "sys.schemas as sch"
- " where "
- "mod.object_id=views.object_id and "
- "views.schema_id=sch.schema_id and "
- "views.name=:viewname and sch.name=:schname",
- bindparams=[
- sql.bindparam('viewname', viewname,
- sqltypes.String(convert_unicode=True)),
- sql.bindparam('schname', owner,
- sqltypes.String(convert_unicode=True))
- ]
- )
- )
-
- if rp:
- view_def = rp.scalar()
- return view_def
-
- @reflection.cache
- @_db_plus_owner
- def get_columns(self, connection, tablename, dbname, owner, schema, **kw):
- # Get base columns
- columns = ischema.columns
- if owner:
- whereclause = sql.and_(columns.c.table_name == tablename,
- columns.c.table_schema == owner)
- else:
- whereclause = columns.c.table_name == tablename
- s = sql.select([columns], whereclause,
- order_by=[columns.c.ordinal_position])
-
- c = connection.execute(s)
- cols = []
- while True:
- row = c.fetchone()
- if row is None:
- break
- (name, type, nullable, charlen,
- numericprec, numericscale, default, collation) = (
- row[columns.c.column_name],
- row[columns.c.data_type],
- row[columns.c.is_nullable] == 'YES',
- row[columns.c.character_maximum_length],
- row[columns.c.numeric_precision],
- row[columns.c.numeric_scale],
- row[columns.c.column_default],
- row[columns.c.collation_name]
- )
- coltype = self.ischema_names.get(type, None)
-
- kwargs = {}
- if coltype in (MSString, MSChar, MSNVarchar, MSNChar, MSText,
- MSNText, MSBinary, MSVarBinary,
- sqltypes.LargeBinary):
- kwargs['length'] = charlen
- if collation:
- kwargs['collation'] = collation
- if coltype == MSText or \
- (coltype in (MSString, MSNVarchar) and charlen == -1):
- kwargs.pop('length')
-
- if coltype is None:
- util.warn(
- "Did not recognize type '%s' of column '%s'" %
- (type, name))
- coltype = sqltypes.NULLTYPE
- else:
- if issubclass(coltype, sqltypes.Numeric) and \
- coltype is not MSReal:
- kwargs['scale'] = numericscale
- kwargs['precision'] = numericprec
-
- coltype = coltype(**kwargs)
- cdict = {
- 'name': name,
- 'type': coltype,
- 'nullable': nullable,
- 'default': default,
- 'autoincrement': False,
- }
- cols.append(cdict)
- # autoincrement and identity
- colmap = {}
- for col in cols:
- colmap[col['name']] = col
- # We also run an sp_columns to check for identity columns:
- cursor = connection.execute("sp_columns @table_name = '%s', "
- "@table_owner = '%s'"
- % (tablename, owner))
- ic = None
- while True:
- row = cursor.fetchone()
- if row is None:
- break
- (col_name, type_name) = row[3], row[5]
- if type_name.endswith("identity") and col_name in colmap:
- ic = col_name
- colmap[col_name]['autoincrement'] = True
- colmap[col_name]['sequence'] = dict(
- name='%s_identity' % col_name)
- break
- cursor.close()
-
- if ic is not None and self.server_version_info >= MS_2005_VERSION:
- table_fullname = "%s.%s" % (owner, tablename)
- cursor = connection.execute(
- "select ident_seed('%s'), ident_incr('%s')"
- % (table_fullname, table_fullname)
- )
-
- row = cursor.first()
- if row is not None and row[0] is not None:
- colmap[ic]['sequence'].update({
- 'start': int(row[0]),
- 'increment': int(row[1])
- })
- return cols
-
- @reflection.cache
- @_db_plus_owner
- def get_pk_constraint(self, connection, tablename, dbname, owner, schema, **kw):
- pkeys = []
- TC = ischema.constraints
- C = ischema.key_constraints.alias('C')
-
- # Primary key constraints
- s = sql.select([C.c.column_name, TC.c.constraint_type, C.c.constraint_name],
- sql.and_(TC.c.constraint_name == C.c.constraint_name,
- TC.c.table_schema == C.c.table_schema,
- C.c.table_name == tablename,
- C.c.table_schema == owner)
- )
- c = connection.execute(s)
- constraint_name = None
- for row in c:
- if 'PRIMARY' in row[TC.c.constraint_type.name]:
- pkeys.append(row[0])
- if constraint_name is None:
- constraint_name = row[C.c.constraint_name.name]
- return {'constrained_columns': pkeys, 'name': constraint_name}
-
- @reflection.cache
- @_db_plus_owner
- def get_foreign_keys(self, connection, tablename, dbname, owner, schema, **kw):
- RR = ischema.ref_constraints
- C = ischema.key_constraints.alias('C')
- R = ischema.key_constraints.alias('R')
-
- # Foreign key constraints
- s = sql.select([C.c.column_name,
- R.c.table_schema, R.c.table_name, R.c.column_name,
- RR.c.constraint_name, RR.c.match_option,
- RR.c.update_rule,
- RR.c.delete_rule],
- sql.and_(C.c.table_name == tablename,
- C.c.table_schema == owner,
- C.c.constraint_name == RR.c.constraint_name,
- R.c.constraint_name ==
- RR.c.unique_constraint_name,
- C.c.ordinal_position == R.c.ordinal_position
- ),
- order_by=[RR.c.constraint_name, R.c.ordinal_position]
- )
-
- # group rows by constraint ID, to handle multi-column FKs
- fkeys = []
- fknm, scols, rcols = (None, [], [])
-
- def fkey_rec():
- return {
- 'name': None,
- 'constrained_columns': [],
- 'referred_schema': None,
- 'referred_table': None,
- 'referred_columns': []
- }
-
- fkeys = util.defaultdict(fkey_rec)
-
- for r in connection.execute(s).fetchall():
- scol, rschema, rtbl, rcol, rfknm, fkmatch, fkuprule, fkdelrule = r
-
- rec = fkeys[rfknm]
- rec['name'] = rfknm
- if not rec['referred_table']:
- rec['referred_table'] = rtbl
- if schema is not None or owner != rschema:
- if dbname:
- rschema = dbname + "." + rschema
- rec['referred_schema'] = rschema
-
- local_cols, remote_cols = \
- rec['constrained_columns'],\
- rec['referred_columns']
-
- local_cols.append(scol)
- remote_cols.append(rcol)
-
- return list(fkeys.values())
diff --git a/lib/sqlalchemy/dialects/mssql/information_schema.py b/lib/sqlalchemy/dialects/mssql/information_schema.py
deleted file mode 100644
index 26e70f7f..00000000
--- a/lib/sqlalchemy/dialects/mssql/information_schema.py
+++ /dev/null
@@ -1,114 +0,0 @@
-# mssql/information_schema.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-# TODO: should be using the sys. catalog with SQL Server, not information schema
-
-from ... import Table, MetaData, Column
-from ...types import String, Unicode, UnicodeText, Integer, TypeDecorator
-from ... import cast
-from ... import util
-from ...sql import expression
-from ...ext.compiler import compiles
-
-ischema = MetaData()
-
-class CoerceUnicode(TypeDecorator):
- impl = Unicode
-
- def process_bind_param(self, value, dialect):
- if util.py2k and isinstance(value, util.binary_type):
- value = value.decode(dialect.encoding)
- return value
-
- def bind_expression(self, bindvalue):
- return _cast_on_2005(bindvalue)
-
-class _cast_on_2005(expression.ColumnElement):
- def __init__(self, bindvalue):
- self.bindvalue = bindvalue
-
-@compiles(_cast_on_2005)
-def _compile(element, compiler, **kw):
- from . import base
- if compiler.dialect.server_version_info < base.MS_2005_VERSION:
- return compiler.process(element.bindvalue, **kw)
- else:
- return compiler.process(cast(element.bindvalue, Unicode), **kw)
-
-schemata = Table("SCHEMATA", ischema,
- Column("CATALOG_NAME", CoerceUnicode, key="catalog_name"),
- Column("SCHEMA_NAME", CoerceUnicode, key="schema_name"),
- Column("SCHEMA_OWNER", CoerceUnicode, key="schema_owner"),
- schema="INFORMATION_SCHEMA")
-
-tables = Table("TABLES", ischema,
- Column("TABLE_CATALOG", CoerceUnicode, key="table_catalog"),
- Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
- Column("TABLE_NAME", CoerceUnicode, key="table_name"),
- Column("TABLE_TYPE", String(convert_unicode=True), key="table_type"),
- schema="INFORMATION_SCHEMA")
-
-columns = Table("COLUMNS", ischema,
- Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
- Column("TABLE_NAME", CoerceUnicode, key="table_name"),
- Column("COLUMN_NAME", CoerceUnicode, key="column_name"),
- Column("IS_NULLABLE", Integer, key="is_nullable"),
- Column("DATA_TYPE", String, key="data_type"),
- Column("ORDINAL_POSITION", Integer, key="ordinal_position"),
- Column("CHARACTER_MAXIMUM_LENGTH", Integer, key="character_maximum_length"),
- Column("NUMERIC_PRECISION", Integer, key="numeric_precision"),
- Column("NUMERIC_SCALE", Integer, key="numeric_scale"),
- Column("COLUMN_DEFAULT", Integer, key="column_default"),
- Column("COLLATION_NAME", String, key="collation_name"),
- schema="INFORMATION_SCHEMA")
-
-constraints = Table("TABLE_CONSTRAINTS", ischema,
- Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
- Column("TABLE_NAME", CoerceUnicode, key="table_name"),
- Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"),
- Column("CONSTRAINT_TYPE", String(convert_unicode=True), key="constraint_type"),
- schema="INFORMATION_SCHEMA")
-
-column_constraints = Table("CONSTRAINT_COLUMN_USAGE", ischema,
- Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
- Column("TABLE_NAME", CoerceUnicode, key="table_name"),
- Column("COLUMN_NAME", CoerceUnicode, key="column_name"),
- Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"),
- schema="INFORMATION_SCHEMA")
-
-key_constraints = Table("KEY_COLUMN_USAGE", ischema,
- Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
- Column("TABLE_NAME", CoerceUnicode, key="table_name"),
- Column("COLUMN_NAME", CoerceUnicode, key="column_name"),
- Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"),
- Column("ORDINAL_POSITION", Integer, key="ordinal_position"),
- schema="INFORMATION_SCHEMA")
-
-ref_constraints = Table("REFERENTIAL_CONSTRAINTS", ischema,
- Column("CONSTRAINT_CATALOG", CoerceUnicode, key="constraint_catalog"),
- Column("CONSTRAINT_SCHEMA", CoerceUnicode, key="constraint_schema"),
- Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"),
- # TODO: is CATLOG misspelled ?
- Column("UNIQUE_CONSTRAINT_CATLOG", CoerceUnicode,
- key="unique_constraint_catalog"),
-
- Column("UNIQUE_CONSTRAINT_SCHEMA", CoerceUnicode,
- key="unique_constraint_schema"),
- Column("UNIQUE_CONSTRAINT_NAME", CoerceUnicode,
- key="unique_constraint_name"),
- Column("MATCH_OPTION", String, key="match_option"),
- Column("UPDATE_RULE", String, key="update_rule"),
- Column("DELETE_RULE", String, key="delete_rule"),
- schema="INFORMATION_SCHEMA")
-
-views = Table("VIEWS", ischema,
- Column("TABLE_CATALOG", CoerceUnicode, key="table_catalog"),
- Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
- Column("TABLE_NAME", CoerceUnicode, key="table_name"),
- Column("VIEW_DEFINITION", CoerceUnicode, key="view_definition"),
- Column("CHECK_OPTION", String, key="check_option"),
- Column("IS_UPDATABLE", String, key="is_updatable"),
- schema="INFORMATION_SCHEMA")
diff --git a/lib/sqlalchemy/dialects/mssql/mxodbc.py b/lib/sqlalchemy/dialects/mssql/mxodbc.py
deleted file mode 100644
index 5b686c47..00000000
--- a/lib/sqlalchemy/dialects/mssql/mxodbc.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# mssql/mxodbc.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""
-.. dialect:: mssql+mxodbc
- :name: mxODBC
- :dbapi: mxodbc
- :connectstring: mssql+mxodbc://:@
- :url: http://www.egenix.com/
-
-Execution Modes
----------------
-
-mxODBC features two styles of statement execution, using the
-``cursor.execute()`` and ``cursor.executedirect()`` methods (the second being
-an extension to the DBAPI specification). The former makes use of a particular
-API call specific to the SQL Server Native Client ODBC driver known
-SQLDescribeParam, while the latter does not.
-
-mxODBC apparently only makes repeated use of a single prepared statement
-when SQLDescribeParam is used. The advantage to prepared statement reuse is
-one of performance. The disadvantage is that SQLDescribeParam has a limited
-set of scenarios in which bind parameters are understood, including that they
-cannot be placed within the argument lists of function calls, anywhere outside
-the FROM, or even within subqueries within the FROM clause - making the usage
-of bind parameters within SELECT statements impossible for all but the most
-simplistic statements.
-
-For this reason, the mxODBC dialect uses the "native" mode by default only for
-INSERT, UPDATE, and DELETE statements, and uses the escaped string mode for
-all other statements.
-
-This behavior can be controlled via
-:meth:`~sqlalchemy.sql.expression.Executable.execution_options` using the
-``native_odbc_execute`` flag with a value of ``True`` or ``False``, where a
-value of ``True`` will unconditionally use native bind parameters and a value
-of ``False`` will unconditionally use string-escaped parameters.
-
-"""
-
-
-from ... import types as sqltypes
-from ...connectors.mxodbc import MxODBCConnector
-from .pyodbc import MSExecutionContext_pyodbc, _MSNumeric_pyodbc
-from .base import (MSDialect,
- MSSQLStrictCompiler,
- _MSDateTime, _MSDate, _MSTime)
-
-
-class _MSNumeric_mxodbc(_MSNumeric_pyodbc):
- """Include pyodbc's numeric processor.
- """
-
-
-class _MSDate_mxodbc(_MSDate):
- def bind_processor(self, dialect):
- def process(value):
- if value is not None:
- return "%s-%s-%s" % (value.year, value.month, value.day)
- else:
- return None
- return process
-
-
-class _MSTime_mxodbc(_MSTime):
- def bind_processor(self, dialect):
- def process(value):
- if value is not None:
- return "%s:%s:%s" % (value.hour, value.minute, value.second)
- else:
- return None
- return process
-
-
-class MSExecutionContext_mxodbc(MSExecutionContext_pyodbc):
- """
- The pyodbc execution context is useful for enabling
- SELECT SCOPE_IDENTITY in cases where OUTPUT clause
- does not work (tables with insert triggers).
- """
- #todo - investigate whether the pyodbc execution context
- # is really only being used in cases where OUTPUT
- # won't work.
-
-
-class MSDialect_mxodbc(MxODBCConnector, MSDialect):
-
- # this is only needed if "native ODBC" mode is used,
- # which is now disabled by default.
- #statement_compiler = MSSQLStrictCompiler
-
- execution_ctx_cls = MSExecutionContext_mxodbc
-
- # flag used by _MSNumeric_mxodbc
- _need_decimal_fix = True
-
- colspecs = {
- sqltypes.Numeric: _MSNumeric_mxodbc,
- sqltypes.DateTime: _MSDateTime,
- sqltypes.Date: _MSDate_mxodbc,
- sqltypes.Time: _MSTime_mxodbc,
- }
-
- def __init__(self, description_encoding=None, **params):
- super(MSDialect_mxodbc, self).__init__(**params)
- self.description_encoding = description_encoding
-
-dialect = MSDialect_mxodbc
diff --git a/lib/sqlalchemy/dialects/mssql/pymssql.py b/lib/sqlalchemy/dialects/mssql/pymssql.py
deleted file mode 100644
index 0182fee1..00000000
--- a/lib/sqlalchemy/dialects/mssql/pymssql.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# mssql/pymssql.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""
-.. dialect:: mssql+pymssql
- :name: pymssql
- :dbapi: pymssql
- :connectstring: mssql+pymssql://:@?charset=utf8
- :url: http://pymssql.org/
-
-pymssql is a Python module that provides a Python DBAPI interface around
-`FreeTDS `_. Compatible builds are available for
-Linux, MacOSX and Windows platforms.
-
-"""
-from .base import MSDialect
-from ... import types as sqltypes, util, processors
-import re
-
-
-class _MSNumeric_pymssql(sqltypes.Numeric):
- def result_processor(self, dialect, type_):
- if not self.asdecimal:
- return processors.to_float
- else:
- return sqltypes.Numeric.result_processor(self, dialect, type_)
-
-
-class MSDialect_pymssql(MSDialect):
- supports_sane_rowcount = False
- driver = 'pymssql'
-
- colspecs = util.update_copy(
- MSDialect.colspecs,
- {
- sqltypes.Numeric: _MSNumeric_pymssql,
- sqltypes.Float: sqltypes.Float,
- }
- )
-
- @classmethod
- def dbapi(cls):
- module = __import__('pymssql')
- # pymmsql doesn't have a Binary method. we use string
- # TODO: monkeypatching here is less than ideal
- module.Binary = lambda x: x if hasattr(x, 'decode') else str(x)
-
- client_ver = tuple(int(x) for x in module.__version__.split("."))
- if client_ver < (1, ):
- util.warn("The pymssql dialect expects at least "
- "the 1.0 series of the pymssql DBAPI.")
- return module
-
- def __init__(self, **params):
- super(MSDialect_pymssql, self).__init__(**params)
- self.use_scope_identity = True
-
- def _get_server_version_info(self, connection):
- vers = connection.scalar("select @@version")
- m = re.match(
- r"Microsoft SQL Server.*? - (\d+).(\d+).(\d+).(\d+)", vers)
- if m:
- return tuple(int(x) for x in m.group(1, 2, 3, 4))
- else:
- return None
-
- def create_connect_args(self, url):
- opts = url.translate_connect_args(username='user')
- opts.update(url.query)
- port = opts.pop('port', None)
- if port and 'host' in opts:
- opts['host'] = "%s:%s" % (opts['host'], port)
- return [[], opts]
-
- def is_disconnect(self, e, connection, cursor):
- for msg in (
- "Adaptive Server connection timed out",
- "Net-Lib error during Connection reset by peer",
- "message 20003", # connection timeout
- "Error 10054",
- "Not connected to any MS SQL server",
- "Connection is closed"
- ):
- if msg in str(e):
- return True
- else:
- return False
-
-dialect = MSDialect_pymssql
diff --git a/lib/sqlalchemy/dialects/mssql/pyodbc.py b/lib/sqlalchemy/dialects/mssql/pyodbc.py
deleted file mode 100644
index 8c43eb8a..00000000
--- a/lib/sqlalchemy/dialects/mssql/pyodbc.py
+++ /dev/null
@@ -1,260 +0,0 @@
-# mssql/pyodbc.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""
-.. dialect:: mssql+pyodbc
- :name: PyODBC
- :dbapi: pyodbc
- :connectstring: mssql+pyodbc://:@
- :url: http://pypi.python.org/pypi/pyodbc/
-
-Additional Connection Examples
--------------------------------
-
-Examples of pyodbc connection string URLs:
-
-* ``mssql+pyodbc://mydsn`` - connects using the specified DSN named ``mydsn``.
- The connection string that is created will appear like::
-
- dsn=mydsn;Trusted_Connection=Yes
-
-* ``mssql+pyodbc://user:pass@mydsn`` - connects using the DSN named
- ``mydsn`` passing in the ``UID`` and ``PWD`` information. The
- connection string that is created will appear like::
-
- dsn=mydsn;UID=user;PWD=pass
-
-* ``mssql+pyodbc://user:pass@mydsn/?LANGUAGE=us_english`` - connects
- using the DSN named ``mydsn`` passing in the ``UID`` and ``PWD``
- information, plus the additional connection configuration option
- ``LANGUAGE``. The connection string that is created will appear
- like::
-
- dsn=mydsn;UID=user;PWD=pass;LANGUAGE=us_english
-
-* ``mssql+pyodbc://user:pass@host/db`` - connects using a connection
- that would appear like::
-
- DRIVER={SQL Server};Server=host;Database=db;UID=user;PWD=pass
-
-* ``mssql+pyodbc://user:pass@host:123/db`` - connects using a connection
- string which includes the port
- information using the comma syntax. This will create the following
- connection string::
-
- DRIVER={SQL Server};Server=host,123;Database=db;UID=user;PWD=pass
-
-* ``mssql+pyodbc://user:pass@host/db?port=123`` - connects using a connection
- string that includes the port
- information as a separate ``port`` keyword. This will create the
- following connection string::
-
- DRIVER={SQL Server};Server=host;Database=db;UID=user;PWD=pass;port=123
-
-* ``mssql+pyodbc://user:pass@host/db?driver=MyDriver`` - connects using a connection
- string that includes a custom
- ODBC driver name. This will create the following connection string::
-
- DRIVER={MyDriver};Server=host;Database=db;UID=user;PWD=pass
-
-If you require a connection string that is outside the options
-presented above, use the ``odbc_connect`` keyword to pass in a
-urlencoded connection string. What gets passed in will be urldecoded
-and passed directly.
-
-For example::
-
- mssql+pyodbc:///?odbc_connect=dsn%3Dmydsn%3BDatabase%3Ddb
-
-would create the following connection string::
-
- dsn=mydsn;Database=db
-
-Encoding your connection string can be easily accomplished through
-the python shell. For example::
-
- >>> import urllib
- >>> urllib.quote_plus('dsn=mydsn;Database=db')
- 'dsn%3Dmydsn%3BDatabase%3Ddb'
-
-Unicode Binds
--------------
-
-The current state of PyODBC on a unix backend with FreeTDS and/or
-EasySoft is poor regarding unicode; different OS platforms and versions of UnixODBC
-versus IODBC versus FreeTDS/EasySoft versus PyODBC itself dramatically
-alter how strings are received. The PyODBC dialect attempts to use all the information
-it knows to determine whether or not a Python unicode literal can be
-passed directly to the PyODBC driver or not; while SQLAlchemy can encode
-these to bytestrings first, some users have reported that PyODBC mis-handles
-bytestrings for certain encodings and requires a Python unicode object,
-while the author has observed widespread cases where a Python unicode
-is completely misinterpreted by PyODBC, particularly when dealing with
-the information schema tables used in table reflection, and the value
-must first be encoded to a bytestring.
-
-It is for this reason that whether or not unicode literals for bound
-parameters be sent to PyODBC can be controlled using the
-``supports_unicode_binds`` parameter to ``create_engine()``. When
-left at its default of ``None``, the PyODBC dialect will use its
-best guess as to whether or not the driver deals with unicode literals
-well. When ``False``, unicode literals will be encoded first, and when
-``True`` unicode literals will be passed straight through. This is an interim
-flag that hopefully should not be needed when the unicode situation stabilizes
-for unix + PyODBC.
-
-.. versionadded:: 0.7.7
- ``supports_unicode_binds`` parameter to ``create_engine()``\ .
-
-"""
-
-from .base import MSExecutionContext, MSDialect
-from ...connectors.pyodbc import PyODBCConnector
-from ... import types as sqltypes, util
-import decimal
-
-class _ms_numeric_pyodbc(object):
-
- """Turns Decimals with adjusted() < 0 or > 7 into strings.
-
- The routines here are needed for older pyodbc versions
- as well as current mxODBC versions.
-
- """
-
- def bind_processor(self, dialect):
-
- super_process = super(_ms_numeric_pyodbc, self).\
- bind_processor(dialect)
-
- if not dialect._need_decimal_fix:
- return super_process
-
- def process(value):
- if self.asdecimal and \
- isinstance(value, decimal.Decimal):
-
- adjusted = value.adjusted()
- if adjusted < 0:
- return self._small_dec_to_string(value)
- elif adjusted > 7:
- return self._large_dec_to_string(value)
-
- if super_process:
- return super_process(value)
- else:
- return value
- return process
-
- # these routines needed for older versions of pyodbc.
- # as of 2.1.8 this logic is integrated.
-
- def _small_dec_to_string(self, value):
- return "%s0.%s%s" % (
- (value < 0 and '-' or ''),
- '0' * (abs(value.adjusted()) - 1),
- "".join([str(nint) for nint in value.as_tuple()[1]]))
-
- def _large_dec_to_string(self, value):
- _int = value.as_tuple()[1]
- if 'E' in str(value):
- result = "%s%s%s" % (
- (value < 0 and '-' or ''),
- "".join([str(s) for s in _int]),
- "0" * (value.adjusted() - (len(_int) - 1)))
- else:
- if (len(_int) - 1) > value.adjusted():
- result = "%s%s.%s" % (
- (value < 0 and '-' or ''),
- "".join(
- [str(s) for s in _int][0:value.adjusted() + 1]),
- "".join(
- [str(s) for s in _int][value.adjusted() + 1:]))
- else:
- result = "%s%s" % (
- (value < 0 and '-' or ''),
- "".join(
- [str(s) for s in _int][0:value.adjusted() + 1]))
- return result
-
-class _MSNumeric_pyodbc(_ms_numeric_pyodbc, sqltypes.Numeric):
- pass
-
-class _MSFloat_pyodbc(_ms_numeric_pyodbc, sqltypes.Float):
- pass
-
-class MSExecutionContext_pyodbc(MSExecutionContext):
- _embedded_scope_identity = False
-
- def pre_exec(self):
- """where appropriate, issue "select scope_identity()" in the same
- statement.
-
- Background on why "scope_identity()" is preferable to "@@identity":
- http://msdn.microsoft.com/en-us/library/ms190315.aspx
-
- Background on why we attempt to embed "scope_identity()" into the same
- statement as the INSERT:
- http://code.google.com/p/pyodbc/wiki/FAQs#How_do_I_retrieve_autogenerated/identity_values?
-
- """
-
- super(MSExecutionContext_pyodbc, self).pre_exec()
-
- # don't embed the scope_identity select into an
- # "INSERT .. DEFAULT VALUES"
- if self._select_lastrowid and \
- self.dialect.use_scope_identity and \
- len(self.parameters[0]):
- self._embedded_scope_identity = True
-
- self.statement += "; select scope_identity()"
-
- def post_exec(self):
- if self._embedded_scope_identity:
- # Fetch the last inserted id from the manipulated statement
- # We may have to skip over a number of result sets with
- # no data (due to triggers, etc.)
- while True:
- try:
- # fetchall() ensures the cursor is consumed
- # without closing it (FreeTDS particularly)
- row = self.cursor.fetchall()[0]
- break
- except self.dialect.dbapi.Error as e:
- # no way around this - nextset() consumes the previous set
- # so we need to just keep flipping
- self.cursor.nextset()
-
- self._lastrowid = int(row[0])
- else:
- super(MSExecutionContext_pyodbc, self).post_exec()
-
-
-class MSDialect_pyodbc(PyODBCConnector, MSDialect):
-
- execution_ctx_cls = MSExecutionContext_pyodbc
-
- pyodbc_driver_name = 'SQL Server'
-
- colspecs = util.update_copy(
- MSDialect.colspecs,
- {
- sqltypes.Numeric: _MSNumeric_pyodbc,
- sqltypes.Float: _MSFloat_pyodbc
- }
- )
-
- def __init__(self, description_encoding=None, **params):
- super(MSDialect_pyodbc, self).__init__(**params)
- self.description_encoding = description_encoding
- self.use_scope_identity = self.use_scope_identity and \
- self.dbapi and \
- hasattr(self.dbapi.Cursor, 'nextset')
- self._need_decimal_fix = self.dbapi and \
- self._dbapi_version() < (2, 1, 8)
-
-dialect = MSDialect_pyodbc
diff --git a/lib/sqlalchemy/dialects/mssql/zxjdbc.py b/lib/sqlalchemy/dialects/mssql/zxjdbc.py
deleted file mode 100644
index 706eef3a..00000000
--- a/lib/sqlalchemy/dialects/mssql/zxjdbc.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# mssql/zxjdbc.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""
-.. dialect:: mssql+zxjdbc
- :name: zxJDBC for Jython
- :dbapi: zxjdbc
- :connectstring: mssql+zxjdbc://user:pass@host:port/dbname[?key=value&key=value...]
- :driverurl: http://jtds.sourceforge.net/
-
-
-"""
-from ...connectors.zxJDBC import ZxJDBCConnector
-from .base import MSDialect, MSExecutionContext
-from ... import engine
-
-
-class MSExecutionContext_zxjdbc(MSExecutionContext):
-
- _embedded_scope_identity = False
-
- def pre_exec(self):
- super(MSExecutionContext_zxjdbc, self).pre_exec()
- # scope_identity after the fact returns null in jTDS so we must
- # embed it
- if self._select_lastrowid and self.dialect.use_scope_identity:
- self._embedded_scope_identity = True
- self.statement += "; SELECT scope_identity()"
-
- def post_exec(self):
- if self._embedded_scope_identity:
- while True:
- try:
- row = self.cursor.fetchall()[0]
- break
- except self.dialect.dbapi.Error:
- self.cursor.nextset()
- self._lastrowid = int(row[0])
-
- if (self.isinsert or self.isupdate or self.isdelete) and \
- self.compiled.returning:
- self._result_proxy = engine.FullyBufferedResultProxy(self)
-
- if self._enable_identity_insert:
- table = self.dialect.identifier_preparer.format_table(
- self.compiled.statement.table)
- self.cursor.execute("SET IDENTITY_INSERT %s OFF" % table)
-
-
-class MSDialect_zxjdbc(ZxJDBCConnector, MSDialect):
- jdbc_db_name = 'jtds:sqlserver'
- jdbc_driver_name = 'net.sourceforge.jtds.jdbc.Driver'
-
- execution_ctx_cls = MSExecutionContext_zxjdbc
-
- def _get_server_version_info(self, connection):
- return tuple(
- int(x)
- for x in connection.connection.dbversion.split('.')
- )
-
-dialect = MSDialect_zxjdbc
diff --git a/lib/sqlalchemy/dialects/mysql/__init__.py b/lib/sqlalchemy/dialects/mysql/__init__.py
deleted file mode 100644
index 4eb8cc6d..00000000
--- a/lib/sqlalchemy/dialects/mysql/__init__.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# mysql/__init__.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-from . import base, mysqldb, oursql, \
- pyodbc, zxjdbc, mysqlconnector, pymysql,\
- gaerdbms, cymysql
-
-# default dialect
-base.dialect = mysqldb.dialect
-
-from .base import \
- BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, DATETIME, \
- DECIMAL, DOUBLE, ENUM, DECIMAL,\
- FLOAT, INTEGER, INTEGER, LONGBLOB, LONGTEXT, MEDIUMBLOB, \
- MEDIUMINT, MEDIUMTEXT, NCHAR, \
- NVARCHAR, NUMERIC, SET, SMALLINT, REAL, TEXT, TIME, TIMESTAMP, \
- TINYBLOB, TINYINT, TINYTEXT,\
- VARBINARY, VARCHAR, YEAR, dialect
-
-__all__ = (
-'BIGINT', 'BINARY', 'BIT', 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME', 'DECIMAL', 'DOUBLE',
-'ENUM', 'DECIMAL', 'FLOAT', 'INTEGER', 'INTEGER', 'LONGBLOB', 'LONGTEXT', 'MEDIUMBLOB', 'MEDIUMINT',
-'MEDIUMTEXT', 'NCHAR', 'NVARCHAR', 'NUMERIC', 'SET', 'SMALLINT', 'REAL', 'TEXT', 'TIME', 'TIMESTAMP',
-'TINYBLOB', 'TINYINT', 'TINYTEXT', 'VARBINARY', 'VARCHAR', 'YEAR', 'dialect'
-)
diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py
deleted file mode 100644
index ba6e7b62..00000000
--- a/lib/sqlalchemy/dialects/mysql/base.py
+++ /dev/null
@@ -1,3078 +0,0 @@
-# mysql/base.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""
-
-.. dialect:: mysql
- :name: MySQL
-
-Supported Versions and Features
--------------------------------
-
-SQLAlchemy supports MySQL starting with version 4.1 through modern releases.
-However, no heroic measures are taken to work around major missing
-SQL features - if your server version does not support sub-selects, for
-example, they won't work in SQLAlchemy either.
-
-See the official MySQL documentation for detailed information about features
-supported in any given server release.
-
-.. _mysql_connection_timeouts:
-
-Connection Timeouts
--------------------
-
-MySQL features an automatic connection close behavior, for connections that have
-been idle for eight hours or more. To circumvent having this issue, use the
-``pool_recycle`` option which controls the maximum age of any connection::
-
- engine = create_engine('mysql+mysqldb://...', pool_recycle=3600)
-
-.. _mysql_storage_engines:
-
-CREATE TABLE arguments including Storage Engines
-------------------------------------------------
-
-MySQL's CREATE TABLE syntax includes a wide array of special options,
-including ``ENGINE``, ``CHARSET``, ``MAX_ROWS``, ``ROW_FORMAT``, ``INSERT_METHOD``, and many more.
-To accommodate the rendering of these arguments, specify the form
-``mysql_argument_name="value"``. For example, to specify a table with
-``ENGINE`` of ``InnoDB``, ``CHARSET`` of ``utf8``, and ``KEY_BLOCK_SIZE`` of ``1024``::
-
- Table('mytable', metadata,
- Column('data', String(32)),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- mysql_key_block_size="1024"
- )
-
-The MySQL dialect will normally transfer any keyword specified as ``mysql_keyword_name``
-to be rendered as ``KEYWORD_NAME`` in the ``CREATE TABLE`` statement. A handful
-of these names will render with a space instead of an underscore; to support this,
-the MySQL dialect has awareness of these particular names, which include
-``DATA DIRECTORY`` (e.g. ``mysql_data_directory``), ``CHARACTER SET`` (e.g.
-``mysql_character_set``) and ``INDEX DIRECTORY`` (e.g. ``mysql_index_directory``).
-
-The most common argument is ``mysql_engine``, which refers to the storage engine
-for the table. Historically, MySQL server installations would default
-to ``MyISAM`` for this value, although newer versions may be defaulting
-to ``InnoDB``. The ``InnoDB`` engine is typically preferred for its support
-of transactions and foreign keys.
-
-A :class:`.Table` that is created in a MySQL database with a storage engine
-of ``MyISAM`` will be essentially non-transactional, meaning any INSERT/UPDATE/DELETE
-statement referring to this table will be invoked as autocommit. It also will have no
-support for foreign key constraints; while the ``CREATE TABLE`` statement
-accepts foreign key options, when using the ``MyISAM`` storage engine these
-arguments are discarded. Reflecting such a table will also produce no
-foreign key constraint information.
-
-For fully atomic transactions as well as support for foreign key
-constraints, all participating ``CREATE TABLE`` statements must specify a
-transactional engine, which in the vast majority of cases is ``InnoDB``.
-
-.. seealso::
-
- `The InnoDB Storage Engine
- `_ -
- on the MySQL website.
-
-Case Sensitivity and Table Reflection
--------------------------------------
-
-MySQL has inconsistent support for case-sensitive identifier
-names, basing support on specific details of the underlying
-operating system. However, it has been observed that no matter
-what case sensitivity behavior is present, the names of tables in
-foreign key declarations are *always* received from the database
-as all-lower case, making it impossible to accurately reflect a
-schema where inter-related tables use mixed-case identifier names.
-
-Therefore it is strongly advised that table names be declared as
-all lower case both within SQLAlchemy as well as on the MySQL
-database itself, especially if database reflection features are
-to be used.
-
-Transaction Isolation Level
----------------------------
-
-:func:`.create_engine` accepts an ``isolation_level``
-parameter which results in the command ``SET SESSION
-TRANSACTION ISOLATION LEVEL `` being invoked for
-every new connection. Valid values for this parameter are
-``READ COMMITTED``, ``READ UNCOMMITTED``,
-``REPEATABLE READ``, and ``SERIALIZABLE``::
-
- engine = create_engine(
- "mysql://scott:tiger@localhost/test",
- isolation_level="READ UNCOMMITTED"
- )
-
-.. versionadded:: 0.7.6
-
-AUTO_INCREMENT Behavior
------------------------
-
-When creating tables, SQLAlchemy will automatically set ``AUTO_INCREMENT`` on
-the first :class:`.Integer` primary key column which is not marked as a foreign key::
-
- >>> t = Table('mytable', metadata,
- ... Column('mytable_id', Integer, primary_key=True)
- ... )
- >>> t.create()
- CREATE TABLE mytable (
- id INTEGER NOT NULL AUTO_INCREMENT,
- PRIMARY KEY (id)
- )
-
-You can disable this behavior by passing ``False`` to the :paramref:`~.Column.autoincrement`
-argument of :class:`.Column`. This flag can also be used to enable
-auto-increment on a secondary column in a multi-column key for some storage
-engines::
-
- Table('mytable', metadata,
- Column('gid', Integer, primary_key=True, autoincrement=False),
- Column('id', Integer, primary_key=True)
- )
-
-Ansi Quoting Style
-------------------
-
-MySQL features two varieties of identifier "quoting style", one using
-backticks and the other using quotes, e.g. ```some_identifier``` vs.
-``"some_identifier"``. All MySQL dialects detect which version
-is in use by checking the value of ``sql_mode`` when a connection is first
-established with a particular :class:`.Engine`. This quoting style comes
-into play when rendering table and column names as well as when reflecting
-existing database structures. The detection is entirely automatic and
-no special configuration is needed to use either quoting style.
-
-.. versionchanged:: 0.6 detection of ANSI quoting style is entirely automatic,
- there's no longer any end-user ``create_engine()`` options in this regard.
-
-MySQL SQL Extensions
---------------------
-
-Many of the MySQL SQL extensions are handled through SQLAlchemy's generic
-function and operator support::
-
- table.select(table.c.password==func.md5('plaintext'))
- table.select(table.c.username.op('regexp')('^[a-d]'))
-
-And of course any valid MySQL statement can be executed as a string as well.
-
-Some limited direct support for MySQL extensions to SQL is currently
-available.
-
-* SELECT pragma::
-
- select(..., prefixes=['HIGH_PRIORITY', 'SQL_SMALL_RESULT'])
-
-* UPDATE with LIMIT::
-
- update(..., mysql_limit=10)
-
-rowcount Support
-----------------
-
-SQLAlchemy standardizes the DBAPI ``cursor.rowcount`` attribute to be the
-usual definition of "number of rows matched by an UPDATE or DELETE" statement.
-This is in contradiction to the default setting on most MySQL DBAPI drivers,
-which is "number of rows actually modified/deleted". For this reason, the
-SQLAlchemy MySQL dialects always set the ``constants.CLIENT.FOUND_ROWS`` flag,
-or whatever is equivalent for the DBAPI in use, on connect, unless the flag value
-is overridden using DBAPI-specific options
-(such as ``client_flag`` for the MySQL-Python driver, ``found_rows`` for the
-OurSQL driver).
-
-See also:
-
-:attr:`.ResultProxy.rowcount`
-
-
-CAST Support
-------------
-
-MySQL documents the CAST operator as available in version 4.0.2. When using the
-SQLAlchemy :func:`.cast` function, SQLAlchemy
-will not render the CAST token on MySQL before this version, based on server version
-detection, instead rendering the internal expression directly.
-
-CAST may still not be desirable on an early MySQL version post-4.0.2, as it didn't
-add all datatype support until 4.1.1. If your application falls into this
-narrow area, the behavior of CAST can be controlled using the
-:ref:`sqlalchemy.ext.compiler_toplevel` system, as per the recipe below::
-
- from sqlalchemy.sql.expression import Cast
- from sqlalchemy.ext.compiler import compiles
-
- @compiles(Cast, 'mysql')
- def _check_mysql_version(element, compiler, **kw):
- if compiler.dialect.server_version_info < (4, 1, 0):
- return compiler.process(element.clause, **kw)
- else:
- return compiler.visit_cast(element, **kw)
-
-The above function, which only needs to be declared once
-within an application, overrides the compilation of the
-:func:`.cast` construct to check for version 4.1.0 before
-fully rendering CAST; else the internal element of the
-construct is rendered directly.
-
-
-.. _mysql_indexes:
-
-MySQL Specific Index Options
-----------------------------
-
-MySQL-specific extensions to the :class:`.Index` construct are available.
-
-Index Length
-~~~~~~~~~~~~~
-
-MySQL provides an option to create index entries with a certain length, where
-"length" refers to the number of characters or bytes in each value which will
-become part of the index. SQLAlchemy provides this feature via the
-``mysql_length`` parameter::
-
- Index('my_index', my_table.c.data, mysql_length=10)
-
- Index('a_b_idx', my_table.c.a, my_table.c.b, mysql_length={'a': 4, 'b': 9})
-
-Prefix lengths are given in characters for nonbinary string types and in bytes
-for binary string types. The value passed to the keyword argument *must* be
-either an integer (and, thus, specify the same prefix length value for all
-columns of the index) or a dict in which keys are column names and values are
-prefix length values for corresponding columns. MySQL only allows a length for
-a column of an index if it is for a CHAR, VARCHAR, TEXT, BINARY, VARBINARY and
-BLOB.
-
-.. versionadded:: 0.8.2 ``mysql_length`` may now be specified as a dictionary
- for use with composite indexes.
-
-Index Types
-~~~~~~~~~~~~~
-
-Some MySQL storage engines permit you to specify an index type when creating
-an index or primary key constraint. SQLAlchemy provides this feature via the
-``mysql_using`` parameter on :class:`.Index`::
-
- Index('my_index', my_table.c.data, mysql_using='hash')
-
-As well as the ``mysql_using`` parameter on :class:`.PrimaryKeyConstraint`::
-
- PrimaryKeyConstraint("data", mysql_using='hash')
-
-The value passed to the keyword argument will be simply passed through to the
-underlying CREATE INDEX or PRIMARY KEY clause, so it *must* be a valid index
-type for your MySQL storage engine.
-
-More information can be found at:
-
-http://dev.mysql.com/doc/refman/5.0/en/create-index.html
-
-http://dev.mysql.com/doc/refman/5.0/en/create-table.html
-
-.. _mysql_foreign_keys:
-
-MySQL Foreign Keys
-------------------
-
-MySQL's behavior regarding foreign keys has some important caveats.
-
-Foreign Key Arguments to Avoid
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-MySQL does not support the foreign key arguments "DEFERRABLE", "INITIALLY",
-or "MATCH". Using the ``deferrable`` or ``initially`` keyword argument with
-:class:`.ForeignKeyConstraint` or :class:`.ForeignKey` will have the effect of these keywords being
-rendered in a DDL expression, which will then raise an error on MySQL.
-In order to use these keywords on a foreign key while having them ignored
-on a MySQL backend, use a custom compile rule::
-
- from sqlalchemy.ext.compiler import compiles
- from sqlalchemy.schema import ForeignKeyConstraint
-
- @compiles(ForeignKeyConstraint, "mysql")
- def process(element, compiler, **kw):
- element.deferrable = element.initially = None
- return compiler.visit_foreign_key_constraint(element, **kw)
-
-.. versionchanged:: 0.9.0 - the MySQL backend no longer silently ignores
- the ``deferrable`` or ``initially`` keyword arguments of :class:`.ForeignKeyConstraint`
- and :class:`.ForeignKey`.
-
-The "MATCH" keyword is in fact more insidious, and is explicitly disallowed
-by SQLAlchemy in conjunction with the MySQL backend. This argument is silently
-ignored by MySQL, but in addition has the effect of ON UPDATE and ON DELETE options
-also being ignored by the backend. Therefore MATCH should never be used with the
-MySQL backend; as is the case with DEFERRABLE and INITIALLY, custom compilation
-rules can be used to correct a MySQL ForeignKeyConstraint at DDL definition time.
-
-.. versionadded:: 0.9.0 - the MySQL backend will raise a :class:`.CompileError`
- when the ``match`` keyword is used with :class:`.ForeignKeyConstraint`
- or :class:`.ForeignKey`.
-
-Reflection of Foreign Key Constraints
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Not all MySQL storage engines support foreign keys. When using the
-very common ``MyISAM`` MySQL storage engine, the information loaded by table
-reflection will not include foreign keys. For these tables, you may supply a
-:class:`~sqlalchemy.ForeignKeyConstraint` at reflection time::
-
- Table('mytable', metadata,
- ForeignKeyConstraint(['other_id'], ['othertable.other_id']),
- autoload=True
- )
-
-.. seealso::
-
- :ref:`mysql_storage_engines`
-
-"""
-
-import datetime
-import re
-import sys
-
-from ... import schema as sa_schema
-from ... import exc, log, sql, util
-from ...sql import compiler
-from array import array as _array
-
-from ...engine import reflection
-from ...engine import default
-from ... import types as sqltypes
-from ...util import topological
-from ...types import DATE, BOOLEAN, \
- BLOB, BINARY, VARBINARY
-
-RESERVED_WORDS = set(
- ['accessible', 'add', 'all', 'alter', 'analyze', 'and', 'as', 'asc',
- 'asensitive', 'before', 'between', 'bigint', 'binary', 'blob', 'both',
- 'by', 'call', 'cascade', 'case', 'change', 'char', 'character', 'check',
- 'collate', 'column', 'condition', 'constraint', 'continue', 'convert',
- 'create', 'cross', 'current_date', 'current_time', 'current_timestamp',
- 'current_user', 'cursor', 'database', 'databases', 'day_hour',
- 'day_microsecond', 'day_minute', 'day_second', 'dec', 'decimal',
- 'declare', 'default', 'delayed', 'delete', 'desc', 'describe',
- 'deterministic', 'distinct', 'distinctrow', 'div', 'double', 'drop',
- 'dual', 'each', 'else', 'elseif', 'enclosed', 'escaped', 'exists',
- 'exit', 'explain', 'false', 'fetch', 'float', 'float4', 'float8',
- 'for', 'force', 'foreign', 'from', 'fulltext', 'grant', 'group', 'having',
- 'high_priority', 'hour_microsecond', 'hour_minute', 'hour_second', 'if',
- 'ignore', 'in', 'index', 'infile', 'inner', 'inout', 'insensitive',
- 'insert', 'int', 'int1', 'int2', 'int3', 'int4', 'int8', 'integer',
- 'interval', 'into', 'is', 'iterate', 'join', 'key', 'keys', 'kill',
- 'leading', 'leave', 'left', 'like', 'limit', 'linear', 'lines', 'load',
- 'localtime', 'localtimestamp', 'lock', 'long', 'longblob', 'longtext',
- 'loop', 'low_priority', 'master_ssl_verify_server_cert', 'match',
- 'mediumblob', 'mediumint', 'mediumtext', 'middleint',
- 'minute_microsecond', 'minute_second', 'mod', 'modifies', 'natural',
- 'not', 'no_write_to_binlog', 'null', 'numeric', 'on', 'optimize',
- 'option', 'optionally', 'or', 'order', 'out', 'outer', 'outfile',
- 'precision', 'primary', 'procedure', 'purge', 'range', 'read', 'reads',
- 'read_only', 'read_write', 'real', 'references', 'regexp', 'release',
- 'rename', 'repeat', 'replace', 'require', 'restrict', 'return',
- 'revoke', 'right', 'rlike', 'schema', 'schemas', 'second_microsecond',
- 'select', 'sensitive', 'separator', 'set', 'show', 'smallint', 'spatial',
- 'specific', 'sql', 'sqlexception', 'sqlstate', 'sqlwarning',
- 'sql_big_result', 'sql_calc_found_rows', 'sql_small_result', 'ssl',
- 'starting', 'straight_join', 'table', 'terminated', 'then', 'tinyblob',
- 'tinyint', 'tinytext', 'to', 'trailing', 'trigger', 'true', 'undo',
- 'union', 'unique', 'unlock', 'unsigned', 'update', 'usage', 'use',
- 'using', 'utc_date', 'utc_time', 'utc_timestamp', 'values', 'varbinary',
- 'varchar', 'varcharacter', 'varying', 'when', 'where', 'while', 'with',
-
- 'write', 'x509', 'xor', 'year_month', 'zerofill', # 5.0
-
- 'columns', 'fields', 'privileges', 'soname', 'tables', # 4.1
-
- 'accessible', 'linear', 'master_ssl_verify_server_cert', 'range',
- 'read_only', 'read_write', # 5.1
-
- 'general', 'ignore_server_ids', 'master_heartbeat_period', 'maxvalue',
- 'resignal', 'signal', 'slow', # 5.5
-
- 'get', 'io_after_gtids', 'io_before_gtids', 'master_bind', 'one_shot',
- 'partition', 'sql_after_gtids', 'sql_before_gtids', # 5.6
-
- ])
-
-AUTOCOMMIT_RE = re.compile(
- r'\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER|LOAD +DATA|REPLACE)',
- re.I | re.UNICODE)
-SET_RE = re.compile(
- r'\s*SET\s+(?:(?:GLOBAL|SESSION)\s+)?\w',
- re.I | re.UNICODE)
-
-
-class _NumericType(object):
- """Base for MySQL numeric types.
-
- This is the base both for NUMERIC as well as INTEGER, hence
- it's a mixin.
-
- """
-
- def __init__(self, unsigned=False, zerofill=False, **kw):
- self.unsigned = unsigned
- self.zerofill = zerofill
- super(_NumericType, self).__init__(**kw)
-
- def __repr__(self):
- return util.generic_repr(self,
- to_inspect=[_NumericType, sqltypes.Numeric])
-
-class _FloatType(_NumericType, sqltypes.Float):
- def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
- if isinstance(self, (REAL, DOUBLE)) and \
- (
- (precision is None and scale is not None) or
- (precision is not None and scale is None)
- ):
- raise exc.ArgumentError(
- "You must specify both precision and scale or omit "
- "both altogether.")
- super(_FloatType, self).__init__(precision=precision, asdecimal=asdecimal, **kw)
- self.scale = scale
-
- def __repr__(self):
- return util.generic_repr(self,
- to_inspect=[_FloatType, _NumericType, sqltypes.Float])
-
-class _IntegerType(_NumericType, sqltypes.Integer):
- def __init__(self, display_width=None, **kw):
- self.display_width = display_width
- super(_IntegerType, self).__init__(**kw)
-
- def __repr__(self):
- return util.generic_repr(self,
- to_inspect=[_IntegerType, _NumericType, sqltypes.Integer])
-
-class _StringType(sqltypes.String):
- """Base for MySQL string types."""
-
- def __init__(self, charset=None, collation=None,
- ascii=False, binary=False, unicode=False,
- national=False, **kw):
- self.charset = charset
-
- # allow collate= or collation=
- kw.setdefault('collation', kw.pop('collate', collation))
-
- self.ascii = ascii
- self.unicode = unicode
- self.binary = binary
- self.national = national
- super(_StringType, self).__init__(**kw)
-
- def __repr__(self):
- return util.generic_repr(self,
- to_inspect=[_StringType, sqltypes.String])
-
-class NUMERIC(_NumericType, sqltypes.NUMERIC):
- """MySQL NUMERIC type."""
-
- __visit_name__ = 'NUMERIC'
-
- def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
- """Construct a NUMERIC.
-
- :param precision: Total digits in this number. If scale and precision
- are both None, values are stored to limits allowed by the server.
-
- :param scale: The number of digits after the decimal point.
-
- :param unsigned: a boolean, optional.
-
- :param zerofill: Optional. If true, values will be stored as strings
- left-padded with zeros. Note that this does not effect the values
- returned by the underlying database API, which continue to be
- numeric.
-
- """
- super(NUMERIC, self).__init__(precision=precision,
- scale=scale, asdecimal=asdecimal, **kw)
-
-
-class DECIMAL(_NumericType, sqltypes.DECIMAL):
- """MySQL DECIMAL type."""
-
- __visit_name__ = 'DECIMAL'
-
- def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
- """Construct a DECIMAL.
-
- :param precision: Total digits in this number. If scale and precision
- are both None, values are stored to limits allowed by the server.
-
- :param scale: The number of digits after the decimal point.
-
- :param unsigned: a boolean, optional.
-
- :param zerofill: Optional. If true, values will be stored as strings
- left-padded with zeros. Note that this does not effect the values
- returned by the underlying database API, which continue to be
- numeric.
-
- """
- super(DECIMAL, self).__init__(precision=precision, scale=scale,
- asdecimal=asdecimal, **kw)
-
-
-class DOUBLE(_FloatType):
- """MySQL DOUBLE type."""
-
- __visit_name__ = 'DOUBLE'
-
- def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
- """Construct a DOUBLE.
-
- .. note::
-
- The :class:`.DOUBLE` type by default converts from float
- to Decimal, using a truncation that defaults to 10 digits. Specify
- either ``scale=n`` or ``decimal_return_scale=n`` in order to change
- this scale, or ``asdecimal=False`` to return values directly as
- Python floating points.
-
- :param precision: Total digits in this number. If scale and precision
- are both None, values are stored to limits allowed by the server.
-
- :param scale: The number of digits after the decimal point.
-
- :param unsigned: a boolean, optional.
-
- :param zerofill: Optional. If true, values will be stored as strings
- left-padded with zeros. Note that this does not effect the values
- returned by the underlying database API, which continue to be
- numeric.
-
- """
- super(DOUBLE, self).__init__(precision=precision, scale=scale,
- asdecimal=asdecimal, **kw)
-
-
-class REAL(_FloatType, sqltypes.REAL):
- """MySQL REAL type."""
-
- __visit_name__ = 'REAL'
-
- def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
- """Construct a REAL.
-
- .. note::
-
- The :class:`.REAL` type by default converts from float
- to Decimal, using a truncation that defaults to 10 digits. Specify
- either ``scale=n`` or ``decimal_return_scale=n`` in order to change
- this scale, or ``asdecimal=False`` to return values directly as
- Python floating points.
-
- :param precision: Total digits in this number. If scale and precision
- are both None, values are stored to limits allowed by the server.
-
- :param scale: The number of digits after the decimal point.
-
- :param unsigned: a boolean, optional.
-
- :param zerofill: Optional. If true, values will be stored as strings
- left-padded with zeros. Note that this does not effect the values
- returned by the underlying database API, which continue to be
- numeric.
-
- """
- super(REAL, self).__init__(precision=precision, scale=scale,
- asdecimal=asdecimal, **kw)
-
-
-class FLOAT(_FloatType, sqltypes.FLOAT):
- """MySQL FLOAT type."""
-
- __visit_name__ = 'FLOAT'
-
- def __init__(self, precision=None, scale=None, asdecimal=False, **kw):
- """Construct a FLOAT.
-
- :param precision: Total digits in this number. If scale and precision
- are both None, values are stored to limits allowed by the server.
-
- :param scale: The number of digits after the decimal point.
-
- :param unsigned: a boolean, optional.
-
- :param zerofill: Optional. If true, values will be stored as strings
- left-padded with zeros. Note that this does not effect the values
- returned by the underlying database API, which continue to be
- numeric.
-
- """
- super(FLOAT, self).__init__(precision=precision, scale=scale,
- asdecimal=asdecimal, **kw)
-
- def bind_processor(self, dialect):
- return None
-
-
-class INTEGER(_IntegerType, sqltypes.INTEGER):
- """MySQL INTEGER type."""
-
- __visit_name__ = 'INTEGER'
-
- def __init__(self, display_width=None, **kw):
- """Construct an INTEGER.
-
- :param display_width: Optional, maximum display width for this number.
-
- :param unsigned: a boolean, optional.
-
- :param zerofill: Optional. If true, values will be stored as strings
- left-padded with zeros. Note that this does not effect the values
- returned by the underlying database API, which continue to be
- numeric.
-
- """
- super(INTEGER, self).__init__(display_width=display_width, **kw)
-
-
-class BIGINT(_IntegerType, sqltypes.BIGINT):
- """MySQL BIGINTEGER type."""
-
- __visit_name__ = 'BIGINT'
-
- def __init__(self, display_width=None, **kw):
- """Construct a BIGINTEGER.
-
- :param display_width: Optional, maximum display width for this number.
-
- :param unsigned: a boolean, optional.
-
- :param zerofill: Optional. If true, values will be stored as strings
- left-padded with zeros. Note that this does not effect the values
- returned by the underlying database API, which continue to be
- numeric.
-
- """
- super(BIGINT, self).__init__(display_width=display_width, **kw)
-
-
-class MEDIUMINT(_IntegerType):
- """MySQL MEDIUMINTEGER type."""
-
- __visit_name__ = 'MEDIUMINT'
-
- def __init__(self, display_width=None, **kw):
- """Construct a MEDIUMINTEGER
-
- :param display_width: Optional, maximum display width for this number.
-
- :param unsigned: a boolean, optional.
-
- :param zerofill: Optional. If true, values will be stored as strings
- left-padded with zeros. Note that this does not effect the values
- returned by the underlying database API, which continue to be
- numeric.
-
- """
- super(MEDIUMINT, self).__init__(display_width=display_width, **kw)
-
-
-class TINYINT(_IntegerType):
- """MySQL TINYINT type."""
-
- __visit_name__ = 'TINYINT'
-
- def __init__(self, display_width=None, **kw):
- """Construct a TINYINT.
-
- :param display_width: Optional, maximum display width for this number.
-
- :param unsigned: a boolean, optional.
-
- :param zerofill: Optional. If true, values will be stored as strings
- left-padded with zeros. Note that this does not effect the values
- returned by the underlying database API, which continue to be
- numeric.
-
- """
- super(TINYINT, self).__init__(display_width=display_width, **kw)
-
-
-class SMALLINT(_IntegerType, sqltypes.SMALLINT):
- """MySQL SMALLINTEGER type."""
-
- __visit_name__ = 'SMALLINT'
-
- def __init__(self, display_width=None, **kw):
- """Construct a SMALLINTEGER.
-
- :param display_width: Optional, maximum display width for this number.
-
- :param unsigned: a boolean, optional.
-
- :param zerofill: Optional. If true, values will be stored as strings
- left-padded with zeros. Note that this does not effect the values
- returned by the underlying database API, which continue to be
- numeric.
-
- """
- super(SMALLINT, self).__init__(display_width=display_width, **kw)
-
-
-class BIT(sqltypes.TypeEngine):
- """MySQL BIT type.
-
- This type is for MySQL 5.0.3 or greater for MyISAM, and 5.0.5 or greater for
- MyISAM, MEMORY, InnoDB and BDB. For older versions, use a MSTinyInteger()
- type.
-
- """
-
- __visit_name__ = 'BIT'
-
- def __init__(self, length=None):
- """Construct a BIT.
-
- :param length: Optional, number of bits.
-
- """
- self.length = length
-
- def result_processor(self, dialect, coltype):
- """Convert a MySQL's 64 bit, variable length binary string to a long.
-
- TODO: this is MySQL-db, pyodbc specific. OurSQL and mysqlconnector
- already do this, so this logic should be moved to those dialects.
-
- """
-
- def process(value):
- if value is not None:
- v = 0
- for i in map(ord, value):
- v = v << 8 | i
- return v
- return value
- return process
-
-
-class TIME(sqltypes.TIME):
- """MySQL TIME type. """
-
- __visit_name__ = 'TIME'
-
- def __init__(self, timezone=False, fsp=None):
- """Construct a MySQL TIME type.
-
- :param timezone: not used by the MySQL dialect.
- :param fsp: fractional seconds precision value.
- MySQL 5.6 supports storage of fractional seconds;
- this parameter will be used when emitting DDL
- for the TIME type.
-
- .. note::
-
- DBAPI driver support for fractional seconds may
- be limited; current support includes
- MySQL Connector/Python.
-
- .. versionadded:: 0.8 The MySQL-specific TIME
- type as well as fractional seconds support.
-
- """
- super(TIME, self).__init__(timezone=timezone)
- self.fsp = fsp
-
- def result_processor(self, dialect, coltype):
- time = datetime.time
-
- def process(value):
- # convert from a timedelta value
- if value is not None:
- microseconds = value.microseconds
- seconds = value.seconds
- minutes = seconds // 60
- return time(minutes // 60,
- minutes % 60,
- seconds - minutes * 60,
- microsecond=microseconds)
- else:
- return None
- return process
-
-
-class TIMESTAMP(sqltypes.TIMESTAMP):
- """MySQL TIMESTAMP type.
-
- """
-
- __visit_name__ = 'TIMESTAMP'
-
- def __init__(self, timezone=False, fsp=None):
- """Construct a MySQL TIMESTAMP type.
-
- :param timezone: not used by the MySQL dialect.
- :param fsp: fractional seconds precision value.
- MySQL 5.6.4 supports storage of fractional seconds;
- this parameter will be used when emitting DDL
- for the TIMESTAMP type.
-
- .. note::
-
- DBAPI driver support for fractional seconds may
- be limited; current support includes
- MySQL Connector/Python.
-
- .. versionadded:: 0.8.5 Added MySQL-specific :class:`.mysql.TIMESTAMP`
- with fractional seconds support.
-
- """
- super(TIMESTAMP, self).__init__(timezone=timezone)
- self.fsp = fsp
-
-
-class DATETIME(sqltypes.DATETIME):
- """MySQL DATETIME type.
-
- """
-
- __visit_name__ = 'DATETIME'
-
- def __init__(self, timezone=False, fsp=None):
- """Construct a MySQL DATETIME type.
-
- :param timezone: not used by the MySQL dialect.
- :param fsp: fractional seconds precision value.
- MySQL 5.6.4 supports storage of fractional seconds;
- this parameter will be used when emitting DDL
- for the DATETIME type.
-
- .. note::
-
- DBAPI driver support for fractional seconds may
- be limited; current support includes
- MySQL Connector/Python.
-
- .. versionadded:: 0.8.5 Added MySQL-specific :class:`.mysql.DATETIME`
- with fractional seconds support.
-
- """
- super(DATETIME, self).__init__(timezone=timezone)
- self.fsp = fsp
-
-
-class YEAR(sqltypes.TypeEngine):
- """MySQL YEAR type, for single byte storage of years 1901-2155."""
-
- __visit_name__ = 'YEAR'
-
- def __init__(self, display_width=None):
- self.display_width = display_width
-
-
-class TEXT(_StringType, sqltypes.TEXT):
- """MySQL TEXT type, for text up to 2^16 characters."""
-
- __visit_name__ = 'TEXT'
-
- def __init__(self, length=None, **kw):
- """Construct a TEXT.
-
- :param length: Optional, if provided the server may optimize storage
- by substituting the smallest TEXT type sufficient to store
- ``length`` characters.
-
- :param charset: Optional, a column-level character set for this string
- value. Takes precedence to 'ascii' or 'unicode' short-hand.
-
- :param collation: Optional, a column-level collation for this string
- value. Takes precedence to 'binary' short-hand.
-
- :param ascii: Defaults to False: short-hand for the ``latin1``
- character set, generates ASCII in schema.
-
- :param unicode: Defaults to False: short-hand for the ``ucs2``
- character set, generates UNICODE in schema.
-
- :param national: Optional. If true, use the server's configured
- national character set.
-
- :param binary: Defaults to False: short-hand, pick the binary
- collation type that matches the column's character set. Generates
- BINARY in schema. This does not affect the type of data stored,
- only the collation of character data.
-
- """
- super(TEXT, self).__init__(length=length, **kw)
-
-
-class TINYTEXT(_StringType):
- """MySQL TINYTEXT type, for text up to 2^8 characters."""
-
- __visit_name__ = 'TINYTEXT'
-
- def __init__(self, **kwargs):
- """Construct a TINYTEXT.
-
- :param charset: Optional, a column-level character set for this string
- value. Takes precedence to 'ascii' or 'unicode' short-hand.
-
- :param collation: Optional, a column-level collation for this string
- value. Takes precedence to 'binary' short-hand.
-
- :param ascii: Defaults to False: short-hand for the ``latin1``
- character set, generates ASCII in schema.
-
- :param unicode: Defaults to False: short-hand for the ``ucs2``
- character set, generates UNICODE in schema.
-
- :param national: Optional. If true, use the server's configured
- national character set.
-
- :param binary: Defaults to False: short-hand, pick the binary
- collation type that matches the column's character set. Generates
- BINARY in schema. This does not affect the type of data stored,
- only the collation of character data.
-
- """
- super(TINYTEXT, self).__init__(**kwargs)
-
-
-class MEDIUMTEXT(_StringType):
- """MySQL MEDIUMTEXT type, for text up to 2^24 characters."""
-
- __visit_name__ = 'MEDIUMTEXT'
-
- def __init__(self, **kwargs):
- """Construct a MEDIUMTEXT.
-
- :param charset: Optional, a column-level character set for this string
- value. Takes precedence to 'ascii' or 'unicode' short-hand.
-
- :param collation: Optional, a column-level collation for this string
- value. Takes precedence to 'binary' short-hand.
-
- :param ascii: Defaults to False: short-hand for the ``latin1``
- character set, generates ASCII in schema.
-
- :param unicode: Defaults to False: short-hand for the ``ucs2``
- character set, generates UNICODE in schema.
-
- :param national: Optional. If true, use the server's configured
- national character set.
-
- :param binary: Defaults to False: short-hand, pick the binary
- collation type that matches the column's character set. Generates
- BINARY in schema. This does not affect the type of data stored,
- only the collation of character data.
-
- """
- super(MEDIUMTEXT, self).__init__(**kwargs)
-
-
-class LONGTEXT(_StringType):
- """MySQL LONGTEXT type, for text up to 2^32 characters."""
-
- __visit_name__ = 'LONGTEXT'
-
- def __init__(self, **kwargs):
- """Construct a LONGTEXT.
-
- :param charset: Optional, a column-level character set for this string
- value. Takes precedence to 'ascii' or 'unicode' short-hand.
-
- :param collation: Optional, a column-level collation for this string
- value. Takes precedence to 'binary' short-hand.
-
- :param ascii: Defaults to False: short-hand for the ``latin1``
- character set, generates ASCII in schema.
-
- :param unicode: Defaults to False: short-hand for the ``ucs2``
- character set, generates UNICODE in schema.
-
- :param national: Optional. If true, use the server's configured
- national character set.
-
- :param binary: Defaults to False: short-hand, pick the binary
- collation type that matches the column's character set. Generates
- BINARY in schema. This does not affect the type of data stored,
- only the collation of character data.
-
- """
- super(LONGTEXT, self).__init__(**kwargs)
-
-
-class VARCHAR(_StringType, sqltypes.VARCHAR):
- """MySQL VARCHAR type, for variable-length character data."""
-
- __visit_name__ = 'VARCHAR'
-
- def __init__(self, length=None, **kwargs):
- """Construct a VARCHAR.
-
- :param charset: Optional, a column-level character set for this string
- value. Takes precedence to 'ascii' or 'unicode' short-hand.
-
- :param collation: Optional, a column-level collation for this string
- value. Takes precedence to 'binary' short-hand.
-
- :param ascii: Defaults to False: short-hand for the ``latin1``
- character set, generates ASCII in schema.
-
- :param unicode: Defaults to False: short-hand for the ``ucs2``
- character set, generates UNICODE in schema.
-
- :param national: Optional. If true, use the server's configured
- national character set.
-
- :param binary: Defaults to False: short-hand, pick the binary
- collation type that matches the column's character set. Generates
- BINARY in schema. This does not affect the type of data stored,
- only the collation of character data.
-
- """
- super(VARCHAR, self).__init__(length=length, **kwargs)
-
-
-class CHAR(_StringType, sqltypes.CHAR):
- """MySQL CHAR type, for fixed-length character data."""
-
- __visit_name__ = 'CHAR'
-
- def __init__(self, length=None, **kwargs):
- """Construct a CHAR.
-
- :param length: Maximum data length, in characters.
-
- :param binary: Optional, use the default binary collation for the
- national character set. This does not affect the type of data
- stored, use a BINARY type for binary data.
-
- :param collation: Optional, request a particular collation. Must be
- compatible with the national character set.
-
- """
- super(CHAR, self).__init__(length=length, **kwargs)
-
- @classmethod
- def _adapt_string_for_cast(self, type_):
- # copy the given string type into a CHAR
- # for the purposes of rendering a CAST expression
- type_ = sqltypes.to_instance(type_)
- if isinstance(type_, sqltypes.CHAR):
- return type_
- elif isinstance(type_, _StringType):
- return CHAR(
- length=type_.length,
- charset=type_.charset,
- collation=type_.collation,
- ascii=type_.ascii,
- binary=type_.binary,
- unicode=type_.unicode,
- national=False # not supported in CAST
- )
- else:
- return CHAR(length=type_.length)
-
-class NVARCHAR(_StringType, sqltypes.NVARCHAR):
- """MySQL NVARCHAR type.
-
- For variable-length character data in the server's configured national
- character set.
- """
-
- __visit_name__ = 'NVARCHAR'
-
- def __init__(self, length=None, **kwargs):
- """Construct an NVARCHAR.
-
- :param length: Maximum data length, in characters.
-
- :param binary: Optional, use the default binary collation for the
- national character set. This does not affect the type of data
- stored, use a BINARY type for binary data.
-
- :param collation: Optional, request a particular collation. Must be
- compatible with the national character set.
-
- """
- kwargs['national'] = True
- super(NVARCHAR, self).__init__(length=length, **kwargs)
-
-
-class NCHAR(_StringType, sqltypes.NCHAR):
- """MySQL NCHAR type.
-
- For fixed-length character data in the server's configured national
- character set.
- """
-
- __visit_name__ = 'NCHAR'
-
- def __init__(self, length=None, **kwargs):
- """Construct an NCHAR.
-
- :param length: Maximum data length, in characters.
-
- :param binary: Optional, use the default binary collation for the
- national character set. This does not affect the type of data
- stored, use a BINARY type for binary data.
-
- :param collation: Optional, request a particular collation. Must be
- compatible with the national character set.
-
- """
- kwargs['national'] = True
- super(NCHAR, self).__init__(length=length, **kwargs)
-
-
-class TINYBLOB(sqltypes._Binary):
- """MySQL TINYBLOB type, for binary data up to 2^8 bytes."""
-
- __visit_name__ = 'TINYBLOB'
-
-
-class MEDIUMBLOB(sqltypes._Binary):
- """MySQL MEDIUMBLOB type, for binary data up to 2^24 bytes."""
-
- __visit_name__ = 'MEDIUMBLOB'
-
-
-class LONGBLOB(sqltypes._Binary):
- """MySQL LONGBLOB type, for binary data up to 2^32 bytes."""
-
- __visit_name__ = 'LONGBLOB'
-
-class _EnumeratedValues(_StringType):
- def _init_values(self, values, kw):
- self.quoting = kw.pop('quoting', 'auto')
-
- if self.quoting == 'auto' and len(values):
- # What quoting character are we using?
- q = None
- for e in values:
- if len(e) == 0:
- self.quoting = 'unquoted'
- break
- elif q is None:
- q = e[0]
-
- if len(e) == 1 or e[0] != q or e[-1] != q:
- self.quoting = 'unquoted'
- break
- else:
- self.quoting = 'quoted'
-
- if self.quoting == 'quoted':
- util.warn_deprecated(
- 'Manually quoting %s value literals is deprecated. Supply '
- 'unquoted values and use the quoting= option in cases of '
- 'ambiguity.' % self.__class__.__name__)
-
- values = self._strip_values(values)
-
- self._enumerated_values = values
- length = max([len(v) for v in values] + [0])
- return values, length
-
- @classmethod
- def _strip_values(cls, values):
- strip_values = []
- for a in values:
- if a[0:1] == '"' or a[0:1] == "'":
- # strip enclosing quotes and unquote interior
- a = a[1:-1].replace(a[0] * 2, a[0])
- strip_values.append(a)
- return strip_values
-
-class ENUM(sqltypes.Enum, _EnumeratedValues):
- """MySQL ENUM type."""
-
- __visit_name__ = 'ENUM'
-
- def __init__(self, *enums, **kw):
- """Construct an ENUM.
-
- E.g.::
-
- Column('myenum', ENUM("foo", "bar", "baz"))
-
- :param enums: The range of valid values for this ENUM. Values will be
- quoted when generating the schema according to the quoting flag (see
- below).
-
- :param strict: Defaults to False: ensure that a given value is in this
- ENUM's range of permissible values when inserting or updating rows.
- Note that MySQL will not raise a fatal error if you attempt to store
- an out of range value- an alternate value will be stored instead.
- (See MySQL ENUM documentation.)
-
- :param charset: Optional, a column-level character set for this string
- value. Takes precedence to 'ascii' or 'unicode' short-hand.
-
- :param collation: Optional, a column-level collation for this string
- value. Takes precedence to 'binary' short-hand.
-
- :param ascii: Defaults to False: short-hand for the ``latin1``
- character set, generates ASCII in schema.
-
- :param unicode: Defaults to False: short-hand for the ``ucs2``
- character set, generates UNICODE in schema.
-
- :param binary: Defaults to False: short-hand, pick the binary
- collation type that matches the column's character set. Generates
- BINARY in schema. This does not affect the type of data stored,
- only the collation of character data.
-
- :param quoting: Defaults to 'auto': automatically determine enum value
- quoting. If all enum values are surrounded by the same quoting
- character, then use 'quoted' mode. Otherwise, use 'unquoted' mode.
-
- 'quoted': values in enums are already quoted, they will be used
- directly when generating the schema - this usage is deprecated.
-
- 'unquoted': values in enums are not quoted, they will be escaped and
- surrounded by single quotes when generating the schema.
-
- Previous versions of this type always required manually quoted
- values to be supplied; future versions will always quote the string
- literals for you. This is a transitional option.
-
- """
- values, length = self._init_values(enums, kw)
- self.strict = kw.pop('strict', False)
- kw.pop('metadata', None)
- kw.pop('schema', None)
- kw.pop('name', None)
- kw.pop('quote', None)
- kw.pop('native_enum', None)
- kw.pop('inherit_schema', None)
- _StringType.__init__(self, length=length, **kw)
- sqltypes.Enum.__init__(self, *values)
-
- def __repr__(self):
- return util.generic_repr(self,
- to_inspect=[ENUM, _StringType, sqltypes.Enum])
-
- def bind_processor(self, dialect):
- super_convert = super(ENUM, self).bind_processor(dialect)
-
- def process(value):
- if self.strict and value is not None and value not in self.enums:
- raise exc.InvalidRequestError('"%s" not a valid value for '
- 'this enum' % value)
- if super_convert:
- return super_convert(value)
- else:
- return value
- return process
-
- def adapt(self, cls, **kw):
- if issubclass(cls, ENUM):
- kw['strict'] = self.strict
- return sqltypes.Enum.adapt(self, cls, **kw)
-
-
-class SET(_EnumeratedValues):
- """MySQL SET type."""
-
- __visit_name__ = 'SET'
-
- def __init__(self, *values, **kw):
- """Construct a SET.
-
- E.g.::
-
- Column('myset', SET("foo", "bar", "baz"))
-
- :param values: The range of valid values for this SET. Values will be
- quoted when generating the schema according to the quoting flag (see
- below).
-
- .. versionchanged:: 0.9.0 quoting is applied automatically to
- :class:`.mysql.SET` in the same way as for :class:`.mysql.ENUM`.
-
- :param charset: Optional, a column-level character set for this string
- value. Takes precedence to 'ascii' or 'unicode' short-hand.
-
- :param collation: Optional, a column-level collation for this string
- value. Takes precedence to 'binary' short-hand.
-
- :param ascii: Defaults to False: short-hand for the ``latin1``
- character set, generates ASCII in schema.
-
- :param unicode: Defaults to False: short-hand for the ``ucs2``
- character set, generates UNICODE in schema.
-
- :param binary: Defaults to False: short-hand, pick the binary
- collation type that matches the column's character set. Generates
- BINARY in schema. This does not affect the type of data stored,
- only the collation of character data.
-
- :param quoting: Defaults to 'auto': automatically determine enum value
- quoting. If all enum values are surrounded by the same quoting
- character, then use 'quoted' mode. Otherwise, use 'unquoted' mode.
-
- 'quoted': values in enums are already quoted, they will be used
- directly when generating the schema - this usage is deprecated.
-
- 'unquoted': values in enums are not quoted, they will be escaped and
- surrounded by single quotes when generating the schema.
-
- Previous versions of this type always required manually quoted
- values to be supplied; future versions will always quote the string
- literals for you. This is a transitional option.
-
- .. versionadded:: 0.9.0
-
- """
- values, length = self._init_values(values, kw)
- self.values = tuple(values)
-
- kw.setdefault('length', length)
- super(SET, self).__init__(**kw)
-
- def result_processor(self, dialect, coltype):
- def process(value):
- # The good news:
- # No ',' quoting issues- commas aren't allowed in SET values
- # The bad news:
- # Plenty of driver inconsistencies here.
- if isinstance(value, set):
- # ..some versions convert '' to an empty set
- if not value:
- value.add('')
- return value
- # ...and some versions return strings
- if value is not None:
- return set(value.split(','))
- else:
- return value
- return process
-
- def bind_processor(self, dialect):
- super_convert = super(SET, self).bind_processor(dialect)
-
- def process(value):
- if value is None or isinstance(value, util.int_types + util.string_types):
- pass
- else:
- if None in value:
- value = set(value)
- value.remove(None)
- value.add('')
- value = ','.join(value)
- if super_convert:
- return super_convert(value)
- else:
- return value
- return process
-
-# old names
-MSTime = TIME
-MSSet = SET
-MSEnum = ENUM
-MSLongBlob = LONGBLOB
-MSMediumBlob = MEDIUMBLOB
-MSTinyBlob = TINYBLOB
-MSBlob = BLOB
-MSBinary = BINARY
-MSVarBinary = VARBINARY
-MSNChar = NCHAR
-MSNVarChar = NVARCHAR
-MSChar = CHAR
-MSString = VARCHAR
-MSLongText = LONGTEXT
-MSMediumText = MEDIUMTEXT
-MSTinyText = TINYTEXT
-MSText = TEXT
-MSYear = YEAR
-MSTimeStamp = TIMESTAMP
-MSBit = BIT
-MSSmallInteger = SMALLINT
-MSTinyInteger = TINYINT
-MSMediumInteger = MEDIUMINT
-MSBigInteger = BIGINT
-MSNumeric = NUMERIC
-MSDecimal = DECIMAL
-MSDouble = DOUBLE
-MSReal = REAL
-MSFloat = FLOAT
-MSInteger = INTEGER
-
-colspecs = {
- _IntegerType: _IntegerType,
- _NumericType: _NumericType,
- _FloatType: _FloatType,
- sqltypes.Numeric: NUMERIC,
- sqltypes.Float: FLOAT,
- sqltypes.Time: TIME,
- sqltypes.Enum: ENUM,
-}
-
-# Everything 3.23 through 5.1 excepting OpenGIS types.
-ischema_names = {
- 'bigint': BIGINT,
- 'binary': BINARY,
- 'bit': BIT,
- 'blob': BLOB,
- 'boolean': BOOLEAN,
- 'char': CHAR,
- 'date': DATE,
- 'datetime': DATETIME,
- 'decimal': DECIMAL,
- 'double': DOUBLE,
- 'enum': ENUM,
- 'fixed': DECIMAL,
- 'float': FLOAT,
- 'int': INTEGER,
- 'integer': INTEGER,
- 'longblob': LONGBLOB,
- 'longtext': LONGTEXT,
- 'mediumblob': MEDIUMBLOB,
- 'mediumint': MEDIUMINT,
- 'mediumtext': MEDIUMTEXT,
- 'nchar': NCHAR,
- 'nvarchar': NVARCHAR,
- 'numeric': NUMERIC,
- 'set': SET,
- 'smallint': SMALLINT,
- 'text': TEXT,
- 'time': TIME,
- 'timestamp': TIMESTAMP,
- 'tinyblob': TINYBLOB,
- 'tinyint': TINYINT,
- 'tinytext': TINYTEXT,
- 'varbinary': VARBINARY,
- 'varchar': VARCHAR,
- 'year': YEAR,
-}
-
-
-class MySQLExecutionContext(default.DefaultExecutionContext):
-
- def should_autocommit_text(self, statement):
- return AUTOCOMMIT_RE.match(statement)
-
-
-class MySQLCompiler(compiler.SQLCompiler):
-
- render_table_with_column_in_update_from = True
- """Overridden from base SQLCompiler value"""
-
- extract_map = compiler.SQLCompiler.extract_map.copy()
- extract_map.update({'milliseconds': 'millisecond'})
-
- def visit_random_func(self, fn, **kw):
- return "rand%s" % self.function_argspec(fn)
-
- def visit_utc_timestamp_func(self, fn, **kw):
- return "UTC_TIMESTAMP"
-
- def visit_sysdate_func(self, fn, **kw):
- return "SYSDATE()"
-
- def visit_concat_op_binary(self, binary, operator, **kw):
- return "concat(%s, %s)" % (self.process(binary.left),
- self.process(binary.right))
-
- def visit_match_op_binary(self, binary, operator, **kw):
- return "MATCH (%s) AGAINST (%s IN BOOLEAN MODE)" % \
- (self.process(binary.left), self.process(binary.right))
-
- def get_from_hint_text(self, table, text):
- return text
-
- def visit_typeclause(self, typeclause):
- type_ = typeclause.type.dialect_impl(self.dialect)
- if isinstance(type_, sqltypes.Integer):
- if getattr(type_, 'unsigned', False):
- return 'UNSIGNED INTEGER'
- else:
- return 'SIGNED INTEGER'
- elif isinstance(type_, sqltypes.TIMESTAMP):
- return 'DATETIME'
- elif isinstance(type_, (sqltypes.DECIMAL, sqltypes.DateTime,
- sqltypes.Date, sqltypes.Time)):
- return self.dialect.type_compiler.process(type_)
- elif isinstance(type_, sqltypes.String) and not isinstance(type_, (ENUM, SET)):
- adapted = CHAR._adapt_string_for_cast(type_)
- return self.dialect.type_compiler.process(adapted)
- elif isinstance(type_, sqltypes._Binary):
- return 'BINARY'
- elif isinstance(type_, sqltypes.NUMERIC):
- return self.dialect.type_compiler.process(
- type_).replace('NUMERIC', 'DECIMAL')
- else:
- return None
-
- def visit_cast(self, cast, **kwargs):
- # No cast until 4, no decimals until 5.
- if not self.dialect._supports_cast:
- return self.process(cast.clause.self_group())
-
- type_ = self.process(cast.typeclause)
- if type_ is None:
- return self.process(cast.clause.self_group())
-
- return 'CAST(%s AS %s)' % (self.process(cast.clause), type_)
-
- def render_literal_value(self, value, type_):
- value = super(MySQLCompiler, self).render_literal_value(value, type_)
- if self.dialect._backslash_escapes:
- value = value.replace('\\', '\\\\')
- return value
-
- def get_select_precolumns(self, select):
- """Add special MySQL keywords in place of DISTINCT.
-
- .. note::
-
- this usage is deprecated. :meth:`.Select.prefix_with`
- should be used for special keywords at the start
- of a SELECT.
-
- """
- if isinstance(select._distinct, util.string_types):
- return select._distinct.upper() + " "
- elif select._distinct:
- return "DISTINCT "
- else:
- return ""
-
- def visit_join(self, join, asfrom=False, **kwargs):
- return ''.join(
- (self.process(join.left, asfrom=True, **kwargs),
- (join.isouter and " LEFT OUTER JOIN " or " INNER JOIN "),
- self.process(join.right, asfrom=True, **kwargs),
- " ON ",
- self.process(join.onclause, **kwargs)))
-
- def for_update_clause(self, select):
- if select._for_update_arg.read:
- return " LOCK IN SHARE MODE"
- else:
- return " FOR UPDATE"
-
- def limit_clause(self, select):
- # MySQL supports:
- # LIMIT
- # LIMIT ,
- # and in server versions > 3.3:
- # LIMIT OFFSET
- # The latter is more readable for offsets but we're stuck with the
- # former until we can refine dialects by server revision.
-
- limit, offset = select._limit, select._offset
-
- if (limit, offset) == (None, None):
- return ''
- elif offset is not None:
- # As suggested by the MySQL docs, need to apply an
- # artificial limit if one wasn't provided
- # http://dev.mysql.com/doc/refman/5.0/en/select.html
- if limit is None:
- # hardwire the upper limit. Currently
- # needed by OurSQL with Python 3
- # (https://bugs.launchpad.net/oursql/+bug/686232),
- # but also is consistent with the usage of the upper
- # bound as part of MySQL's "syntax" for OFFSET with
- # no LIMIT
- return ' \n LIMIT %s, %s' % (
- self.process(sql.literal(offset)),
- "18446744073709551615")
- else:
- return ' \n LIMIT %s, %s' % (
- self.process(sql.literal(offset)),
- self.process(sql.literal(limit)))
- else:
- # No offset provided, so just use the limit
- return ' \n LIMIT %s' % (self.process(sql.literal(limit)),)
-
- def update_limit_clause(self, update_stmt):
- limit = update_stmt.kwargs.get('%s_limit' % self.dialect.name, None)
- if limit:
- return "LIMIT %s" % limit
- else:
- return None
-
- def update_tables_clause(self, update_stmt, from_table, extra_froms, **kw):
- return ', '.join(t._compiler_dispatch(self, asfrom=True, **kw)
- for t in [from_table] + list(extra_froms))
-
- def update_from_clause(self, update_stmt, from_table,
- extra_froms, from_hints, **kw):
- return None
-
-
-# ug. "InnoDB needs indexes on foreign keys and referenced keys [...].
-# Starting with MySQL 4.1.2, these indexes are created automatically.
-# In older versions, the indexes must be created explicitly or the
-# creation of foreign key constraints fails."
-
-class MySQLDDLCompiler(compiler.DDLCompiler):
- def create_table_constraints(self, table):
- """Get table constraints."""
- constraint_string = super(
- MySQLDDLCompiler, self).create_table_constraints(table)
-
- # why self.dialect.name and not 'mysql'? because of drizzle
- is_innodb = 'engine' in table.dialect_options[self.dialect.name] and \
- table.dialect_options[self.dialect.name]['engine'].lower() == 'innodb'
-
- auto_inc_column = table._autoincrement_column
-
- if is_innodb and \
- auto_inc_column is not None and \
- auto_inc_column is not list(table.primary_key)[0]:
- if constraint_string:
- constraint_string += ", \n\t"
- constraint_string += "KEY %s (%s)" % (
- self.preparer.quote(
- "idx_autoinc_%s" % auto_inc_column.name
- ),
- self.preparer.format_column(auto_inc_column)
- )
-
- return constraint_string
-
- def get_column_specification(self, column, **kw):
- """Builds column DDL."""
-
- colspec = [self.preparer.format_column(column),
- self.dialect.type_compiler.process(column.type)
- ]
-
- default = self.get_column_default_string(column)
- if default is not None:
- colspec.append('DEFAULT ' + default)
-
- is_timestamp = isinstance(column.type, sqltypes.TIMESTAMP)
- if not column.nullable and not is_timestamp:
- colspec.append('NOT NULL')
-
- elif column.nullable and is_timestamp and default is None:
- colspec.append('NULL')
-
- if column is column.table._autoincrement_column and \
- column.server_default is None:
- colspec.append('AUTO_INCREMENT')
-
- return ' '.join(colspec)
-
- def post_create_table(self, table):
- """Build table-level CREATE options like ENGINE and COLLATE."""
-
- table_opts = []
-
- opts = dict(
- (
- k[len(self.dialect.name) + 1:].upper(),
- v
- )
- for k, v in table.kwargs.items()
- if k.startswith('%s_' % self.dialect.name)
- )
-
- for opt in topological.sort([
- ('DEFAULT_CHARSET', 'COLLATE'),
- ('DEFAULT_CHARACTER_SET', 'COLLATE'),
- ('PARTITION_BY', 'PARTITIONS'), # only for test consistency
- ], opts):
- arg = opts[opt]
- if opt in _options_of_type_string:
- arg = "'%s'" % arg.replace("\\", "\\\\").replace("'", "''")
-
- if opt in ('DATA_DIRECTORY', 'INDEX_DIRECTORY',
- 'DEFAULT_CHARACTER_SET', 'CHARACTER_SET',
- 'DEFAULT_CHARSET',
- 'DEFAULT_COLLATE', 'PARTITION_BY'):
- opt = opt.replace('_', ' ')
-
- joiner = '='
- if opt in ('TABLESPACE', 'DEFAULT CHARACTER SET',
- 'CHARACTER SET', 'COLLATE', 'PARTITION BY', 'PARTITIONS'):
- joiner = ' '
-
- table_opts.append(joiner.join((opt, arg)))
- return ' '.join(table_opts)
-
- def visit_create_index(self, create):
- index = create.element
- self._verify_index_table(index)
- preparer = self.preparer
- table = preparer.format_table(index.table)
- columns = [self.sql_compiler.process(expr, include_table=False,
- literal_binds=True)
- for expr in index.expressions]
-
- name = self._prepared_index_name(index)
-
- text = "CREATE "
- if index.unique:
- text += "UNIQUE "
- text += "INDEX %s ON %s " % (name, table)
-
- length = index.dialect_options['mysql']['length']
- if length is not None:
-
- if isinstance(length, dict):
- # length value can be a (column_name --> integer value) mapping
- # specifying the prefix length for each column of the index
- columns = ', '.join(
- ('%s(%d)' % (col, length[col])
- if col in length else '%s' % col)
- for col in columns
- )
- else:
- # or can be an integer value specifying the same
- # prefix length for all columns of the index
- columns = ', '.join(
- '%s(%d)' % (col, length)
- for col in columns
- )
- else:
- columns = ', '.join(columns)
- text += '(%s)' % columns
-
- using = index.dialect_options['mysql']['using']
- if using is not None:
- text += " USING %s" % (preparer.quote(using))
-
- return text
-
- def visit_primary_key_constraint(self, constraint):
- text = super(MySQLDDLCompiler, self).\
- visit_primary_key_constraint(constraint)
- using = constraint.dialect_options['mysql']['using']
- if using:
- text += " USING %s" % (self.preparer.quote(using))
- return text
-
- def visit_drop_index(self, drop):
- index = drop.element
-
- return "\nDROP INDEX %s ON %s" % (
- self._prepared_index_name(index,
- include_schema=False),
- self.preparer.format_table(index.table))
-
- def visit_drop_constraint(self, drop):
- constraint = drop.element
- if isinstance(constraint, sa_schema.ForeignKeyConstraint):
- qual = "FOREIGN KEY "
- const = self.preparer.format_constraint(constraint)
- elif isinstance(constraint, sa_schema.PrimaryKeyConstraint):
- qual = "PRIMARY KEY "
- const = ""
- elif isinstance(constraint, sa_schema.UniqueConstraint):
- qual = "INDEX "
- const = self.preparer.format_constraint(constraint)
- else:
- qual = ""
- const = self.preparer.format_constraint(constraint)
- return "ALTER TABLE %s DROP %s%s" % \
- (self.preparer.format_table(constraint.table),
- qual, const)
-
- def define_constraint_match(self, constraint):
- if constraint.match is not None:
- raise exc.CompileError(
- "MySQL ignores the 'MATCH' keyword while at the same time "
- "causes ON UPDATE/ON DELETE clauses to be ignored.")
- return ""
-
-class MySQLTypeCompiler(compiler.GenericTypeCompiler):
- def _extend_numeric(self, type_, spec):
- "Extend a numeric-type declaration with MySQL specific extensions."
-
- if not self._mysql_type(type_):
- return spec
-
- if type_.unsigned:
- spec += ' UNSIGNED'
- if type_.zerofill:
- spec += ' ZEROFILL'
- return spec
-
- def _extend_string(self, type_, defaults, spec):
- """Extend a string-type declaration with standard SQL CHARACTER SET /
- COLLATE annotations and MySQL specific extensions.
-
- """
-
- def attr(name):
- return getattr(type_, name, defaults.get(name))
-
- if attr('charset'):
- charset = 'CHARACTER SET %s' % attr('charset')
- elif attr('ascii'):
- charset = 'ASCII'
- elif attr('unicode'):
- charset = 'UNICODE'
- else:
- charset = None
-
- if attr('collation'):
- collation = 'COLLATE %s' % type_.collation
- elif attr('binary'):
- collation = 'BINARY'
- else:
- collation = None
-
- if attr('national'):
- # NATIONAL (aka NCHAR/NVARCHAR) trumps charsets.
- return ' '.join([c for c in ('NATIONAL', spec, collation)
- if c is not None])
- return ' '.join([c for c in (spec, charset, collation)
- if c is not None])
-
- def _mysql_type(self, type_):
- return isinstance(type_, (_StringType, _NumericType))
-
- def visit_NUMERIC(self, type_):
- if type_.precision is None:
- return self._extend_numeric(type_, "NUMERIC")
- elif type_.scale is None:
- return self._extend_numeric(type_,
- "NUMERIC(%(precision)s)" %
- {'precision': type_.precision})
- else:
- return self._extend_numeric(type_,
- "NUMERIC(%(precision)s, %(scale)s)" %
- {'precision': type_.precision,
- 'scale': type_.scale})
-
- def visit_DECIMAL(self, type_):
- if type_.precision is None:
- return self._extend_numeric(type_, "DECIMAL")
- elif type_.scale is None:
- return self._extend_numeric(type_,
- "DECIMAL(%(precision)s)" %
- {'precision': type_.precision})
- else:
- return self._extend_numeric(type_,
- "DECIMAL(%(precision)s, %(scale)s)" %
- {'precision': type_.precision,
- 'scale': type_.scale})
-
- def visit_DOUBLE(self, type_):
- if type_.precision is not None and type_.scale is not None:
- return self._extend_numeric(type_,
- "DOUBLE(%(precision)s, %(scale)s)" %
- {'precision': type_.precision,
- 'scale': type_.scale})
- else:
- return self._extend_numeric(type_, 'DOUBLE')
-
- def visit_REAL(self, type_):
- if type_.precision is not None and type_.scale is not None:
- return self._extend_numeric(type_,
- "REAL(%(precision)s, %(scale)s)" %
- {'precision': type_.precision,
- 'scale': type_.scale})
- else:
- return self._extend_numeric(type_, 'REAL')
-
- def visit_FLOAT(self, type_):
- if self._mysql_type(type_) and \
- type_.scale is not None and \
- type_.precision is not None:
- return self._extend_numeric(type_,
- "FLOAT(%s, %s)" % (type_.precision, type_.scale))
- elif type_.precision is not None:
- return self._extend_numeric(type_,
- "FLOAT(%s)" % (type_.precision,))
- else:
- return self._extend_numeric(type_, "FLOAT")
-
- def visit_INTEGER(self, type_):
- if self._mysql_type(type_) and type_.display_width is not None:
- return self._extend_numeric(type_,
- "INTEGER(%(display_width)s)" %
- {'display_width': type_.display_width})
- else:
- return self._extend_numeric(type_, "INTEGER")
-
- def visit_BIGINT(self, type_):
- if self._mysql_type(type_) and type_.display_width is not None:
- return self._extend_numeric(type_,
- "BIGINT(%(display_width)s)" %
- {'display_width': type_.display_width})
- else:
- return self._extend_numeric(type_, "BIGINT")
-
- def visit_MEDIUMINT(self, type_):
- if self._mysql_type(type_) and type_.display_width is not None:
- return self._extend_numeric(type_,
- "MEDIUMINT(%(display_width)s)" %
- {'display_width': type_.display_width})
- else:
- return self._extend_numeric(type_, "MEDIUMINT")
-
- def visit_TINYINT(self, type_):
- if self._mysql_type(type_) and type_.display_width is not None:
- return self._extend_numeric(type_,
- "TINYINT(%s)" % type_.display_width)
- else:
- return self._extend_numeric(type_, "TINYINT")
-
- def visit_SMALLINT(self, type_):
- if self._mysql_type(type_) and type_.display_width is not None:
- return self._extend_numeric(type_,
- "SMALLINT(%(display_width)s)" %
- {'display_width': type_.display_width}
- )
- else:
- return self._extend_numeric(type_, "SMALLINT")
-
- def visit_BIT(self, type_):
- if type_.length is not None:
- return "BIT(%s)" % type_.length
- else:
- return "BIT"
-
- def visit_DATETIME(self, type_):
- if getattr(type_, 'fsp', None):
- return "DATETIME(%d)" % type_.fsp
- else:
- return "DATETIME"
-
- def visit_DATE(self, type_):
- return "DATE"
-
- def visit_TIME(self, type_):
- if getattr(type_, 'fsp', None):
- return "TIME(%d)" % type_.fsp
- else:
- return "TIME"
-
- def visit_TIMESTAMP(self, type_):
- if getattr(type_, 'fsp', None):
- return "TIMESTAMP(%d)" % type_.fsp
- else:
- return "TIMESTAMP"
-
- def visit_YEAR(self, type_):
- if type_.display_width is None:
- return "YEAR"
- else:
- return "YEAR(%s)" % type_.display_width
-
- def visit_TEXT(self, type_):
- if type_.length:
- return self._extend_string(type_, {}, "TEXT(%d)" % type_.length)
- else:
- return self._extend_string(type_, {}, "TEXT")
-
- def visit_TINYTEXT(self, type_):
- return self._extend_string(type_, {}, "TINYTEXT")
-
- def visit_MEDIUMTEXT(self, type_):
- return self._extend_string(type_, {}, "MEDIUMTEXT")
-
- def visit_LONGTEXT(self, type_):
- return self._extend_string(type_, {}, "LONGTEXT")
-
- def visit_VARCHAR(self, type_):
- if type_.length:
- return self._extend_string(type_, {}, "VARCHAR(%d)" % type_.length)
- else:
- raise exc.CompileError(
- "VARCHAR requires a length on dialect %s" %
- self.dialect.name)
-
- def visit_CHAR(self, type_):
- if type_.length:
- return self._extend_string(type_, {}, "CHAR(%(length)s)" %
- {'length': type_.length})
- else:
- return self._extend_string(type_, {}, "CHAR")
-
- def visit_NVARCHAR(self, type_):
- # We'll actually generate the equiv. "NATIONAL VARCHAR" instead
- # of "NVARCHAR".
- if type_.length:
- return self._extend_string(type_, {'national': True},
- "VARCHAR(%(length)s)" % {'length': type_.length})
- else:
- raise exc.CompileError(
- "NVARCHAR requires a length on dialect %s" %
- self.dialect.name)
-
- def visit_NCHAR(self, type_):
- # We'll actually generate the equiv.
- # "NATIONAL CHAR" instead of "NCHAR".
- if type_.length:
- return self._extend_string(type_, {'national': True},
- "CHAR(%(length)s)" % {'length': type_.length})
- else:
- return self._extend_string(type_, {'national': True}, "CHAR")
-
- def visit_VARBINARY(self, type_):
- return "VARBINARY(%d)" % type_.length
-
- def visit_large_binary(self, type_):
- return self.visit_BLOB(type_)
-
- def visit_enum(self, type_):
- if not type_.native_enum:
- return super(MySQLTypeCompiler, self).visit_enum(type_)
- else:
- return self._visit_enumerated_values("ENUM", type_, type_.enums)
-
- def visit_BLOB(self, type_):
- if type_.length:
- return "BLOB(%d)" % type_.length
- else:
- return "BLOB"
-
- def visit_TINYBLOB(self, type_):
- return "TINYBLOB"
-
- def visit_MEDIUMBLOB(self, type_):
- return "MEDIUMBLOB"
-
- def visit_LONGBLOB(self, type_):
- return "LONGBLOB"
-
- def _visit_enumerated_values(self, name, type_, enumerated_values):
- quoted_enums = []
- for e in enumerated_values:
- quoted_enums.append("'%s'" % e.replace("'", "''"))
- return self._extend_string(type_, {}, "%s(%s)" % (
- name, ",".join(quoted_enums))
- )
-
- def visit_ENUM(self, type_):
- return self._visit_enumerated_values("ENUM", type_,
- type_._enumerated_values)
-
- def visit_SET(self, type_):
- return self._visit_enumerated_values("SET", type_,
- type_._enumerated_values)
-
- def visit_BOOLEAN(self, type):
- return "BOOL"
-
-
-class MySQLIdentifierPreparer(compiler.IdentifierPreparer):
-
- reserved_words = RESERVED_WORDS
-
- def __init__(self, dialect, server_ansiquotes=False, **kw):
- if not server_ansiquotes:
- quote = "`"
- else:
- quote = '"'
-
- super(MySQLIdentifierPreparer, self).__init__(
- dialect,
- initial_quote=quote,
- escape_quote=quote)
-
- def _quote_free_identifiers(self, *ids):
- """Unilaterally identifier-quote any number of strings."""
-
- return tuple([self.quote_identifier(i) for i in ids if i is not None])
-
-
-@log.class_logger
-class MySQLDialect(default.DefaultDialect):
- """Details of the MySQL dialect. Not used directly in application code."""
-
- name = 'mysql'
- supports_alter = True
-
- # identifiers are 64, however aliases can be 255...
- max_identifier_length = 255
- max_index_name_length = 64
-
- supports_native_enum = True
-
- supports_sane_rowcount = True
- supports_sane_multi_rowcount = False
- supports_multivalues_insert = True
-
- default_paramstyle = 'format'
- colspecs = colspecs
-
- statement_compiler = MySQLCompiler
- ddl_compiler = MySQLDDLCompiler
- type_compiler = MySQLTypeCompiler
- ischema_names = ischema_names
- preparer = MySQLIdentifierPreparer
-
- # default SQL compilation settings -
- # these are modified upon initialize(),
- # i.e. first connect
- _backslash_escapes = True
- _server_ansiquotes = False
-
- construct_arguments = [
- (sa_schema.Table, {
- "*": None
- }),
- (sql.Update, {
- "limit": None
- }),
- (sa_schema.PrimaryKeyConstraint, {
- "using": None
- }),
- (sa_schema.Index, {
- "using": None,
- "length": None,
- })
- ]
-
- def __init__(self, isolation_level=None, **kwargs):
- kwargs.pop('use_ansiquotes', None) # legacy
- default.DefaultDialect.__init__(self, **kwargs)
- self.isolation_level = isolation_level
-
- def on_connect(self):
- if self.isolation_level is not None:
- def connect(conn):
- self.set_isolation_level(conn, self.isolation_level)
- return connect
- else:
- return None
-
- _isolation_lookup = set(['SERIALIZABLE',
- 'READ UNCOMMITTED', 'READ COMMITTED', 'REPEATABLE READ'])
-
- def set_isolation_level(self, connection, level):
- level = level.replace('_', ' ')
- if level not in self._isolation_lookup:
- raise exc.ArgumentError(
- "Invalid value '%s' for isolation_level. "
- "Valid isolation levels for %s are %s" %
- (level, self.name, ", ".join(self._isolation_lookup))
- )
- cursor = connection.cursor()
- cursor.execute("SET SESSION TRANSACTION ISOLATION LEVEL %s" % level)
- cursor.execute("COMMIT")
- cursor.close()
-
- def get_isolation_level(self, connection):
- cursor = connection.cursor()
- cursor.execute('SELECT @@tx_isolation')
- val = cursor.fetchone()[0]
- cursor.close()
- if util.py3k and isinstance(val, bytes):
- val = val.decode()
- return val.upper().replace("-", " ")
-
- def do_commit(self, dbapi_connection):
- """Execute a COMMIT."""
-
- # COMMIT/ROLLBACK were introduced in 3.23.15.
- # Yes, we have at least one user who has to talk to these old versions!
- #
- # Ignore commit/rollback if support isn't present, otherwise even basic
- # operations via autocommit fail.
- try:
- dbapi_connection.commit()
- except:
- if self.server_version_info < (3, 23, 15):
- args = sys.exc_info()[1].args
- if args and args[0] == 1064:
- return
- raise
-
- def do_rollback(self, dbapi_connection):
- """Execute a ROLLBACK."""
-
- try:
- dbapi_connection.rollback()
- except:
- if self.server_version_info < (3, 23, 15):
- args = sys.exc_info()[1].args
- if args and args[0] == 1064:
- return
- raise
-
- def do_begin_twophase(self, connection, xid):
- connection.execute(sql.text("XA BEGIN :xid"), xid=xid)
-
- def do_prepare_twophase(self, connection, xid):
- connection.execute(sql.text("XA END :xid"), xid=xid)
- connection.execute(sql.text("XA PREPARE :xid"), xid=xid)
-
- def do_rollback_twophase(self, connection, xid, is_prepared=True,
- recover=False):
- if not is_prepared:
- connection.execute(sql.text("XA END :xid"), xid=xid)
- connection.execute(sql.text("XA ROLLBACK :xid"), xid=xid)
-
- def do_commit_twophase(self, connection, xid, is_prepared=True,
- recover=False):
- if not is_prepared:
- self.do_prepare_twophase(connection, xid)
- connection.execute(sql.text("XA COMMIT :xid"), xid=xid)
-
- def do_recover_twophase(self, connection):
- resultset = connection.execute("XA RECOVER")
- return [row['data'][0:row['gtrid_length']] for row in resultset]
-
- def is_disconnect(self, e, connection, cursor):
- if isinstance(e, self.dbapi.OperationalError):
- return self._extract_error_code(e) in \
- (2006, 2013, 2014, 2045, 2055)
- elif isinstance(e, self.dbapi.InterfaceError):
- # if underlying connection is closed,
- # this is the error you get
- return "(0, '')" in str(e)
- else:
- return False
-
- def _compat_fetchall(self, rp, charset=None):
- """Proxy result rows to smooth over MySQL-Python driver
- inconsistencies."""
-
- return [_DecodingRowProxy(row, charset) for row in rp.fetchall()]
-
- def _compat_fetchone(self, rp, charset=None):
- """Proxy a result row to smooth over MySQL-Python driver
- inconsistencies."""
-
- return _DecodingRowProxy(rp.fetchone(), charset)
-
- def _compat_first(self, rp, charset=None):
- """Proxy a result row to smooth over MySQL-Python driver
- inconsistencies."""
-
- return _DecodingRowProxy(rp.first(), charset)
-
- def _extract_error_code(self, exception):
- raise NotImplementedError()
-
- def _get_default_schema_name(self, connection):
- return connection.execute('SELECT DATABASE()').scalar()
-
- def has_table(self, connection, table_name, schema=None):
- # SHOW TABLE STATUS LIKE and SHOW TABLES LIKE do not function properly
- # on macosx (and maybe win?) with multibyte table names.
- #
- # TODO: if this is not a problem on win, make the strategy swappable
- # based on platform. DESCRIBE is slower.
-
- # [ticket:726]
- # full_name = self.identifier_preparer.format_table(table,
- # use_schema=True)
-
- full_name = '.'.join(self.identifier_preparer._quote_free_identifiers(
- schema, table_name))
-
- st = "DESCRIBE %s" % full_name
- rs = None
- try:
- try:
- rs = connection.execute(st)
- have = rs.fetchone() is not None
- rs.close()
- return have
- except exc.DBAPIError as e:
- if self._extract_error_code(e.orig) == 1146:
- return False
- raise
- finally:
- if rs:
- rs.close()
-
- def initialize(self, connection):
- self._connection_charset = self._detect_charset(connection)
- self._detect_ansiquotes(connection)
- if self._server_ansiquotes:
- # if ansiquotes == True, build a new IdentifierPreparer
- # with the new setting
- self.identifier_preparer = self.preparer(self,
- server_ansiquotes=self._server_ansiquotes)
-
- default.DefaultDialect.initialize(self, connection)
-
- @property
- def _supports_cast(self):
- return self.server_version_info is None or \
- self.server_version_info >= (4, 0, 2)
-
- @reflection.cache
- def get_schema_names(self, connection, **kw):
- rp = connection.execute("SHOW schemas")
- return [r[0] for r in rp]
-
- @reflection.cache
- def get_table_names(self, connection, schema=None, **kw):
- """Return a Unicode SHOW TABLES from a given schema."""
- if schema is not None:
- current_schema = schema
- else:
- current_schema = self.default_schema_name
-
- charset = self._connection_charset
- if self.server_version_info < (5, 0, 2):
- rp = connection.execute("SHOW TABLES FROM %s" %
- self.identifier_preparer.quote_identifier(current_schema))
- return [row[0] for
- row in self._compat_fetchall(rp, charset=charset)]
- else:
- rp = connection.execute("SHOW FULL TABLES FROM %s" %
- self.identifier_preparer.quote_identifier(current_schema))
-
- return [row[0]
- for row in self._compat_fetchall(rp, charset=charset)
- if row[1] == 'BASE TABLE']
-
- @reflection.cache
- def get_view_names(self, connection, schema=None, **kw):
- if self.server_version_info < (5, 0, 2):
- raise NotImplementedError
- if schema is None:
- schema = self.default_schema_name
- if self.server_version_info < (5, 0, 2):
- return self.get_table_names(connection, schema)
- charset = self._connection_charset
- rp = connection.execute("SHOW FULL TABLES FROM %s" %
- self.identifier_preparer.quote_identifier(schema))
- return [row[0]
- for row in self._compat_fetchall(rp, charset=charset)
- if row[1] in ('VIEW', 'SYSTEM VIEW')]
-
- @reflection.cache
- def get_table_options(self, connection, table_name, schema=None, **kw):
-
- parsed_state = self._parsed_state_or_create(
- connection, table_name, schema, **kw)
- return parsed_state.table_options
-
- @reflection.cache
- def get_columns(self, connection, table_name, schema=None, **kw):
- parsed_state = self._parsed_state_or_create(
- connection, table_name, schema, **kw)
- return parsed_state.columns
-
- @reflection.cache
- def get_pk_constraint(self, connection, table_name, schema=None, **kw):
- parsed_state = self._parsed_state_or_create(
- connection, table_name, schema, **kw)
- for key in parsed_state.keys:
- if key['type'] == 'PRIMARY':
- # There can be only one.
- cols = [s[0] for s in key['columns']]
- return {'constrained_columns': cols, 'name': None}
- return {'constrained_columns': [], 'name': None}
-
- @reflection.cache
- def get_foreign_keys(self, connection, table_name, schema=None, **kw):
-
- parsed_state = self._parsed_state_or_create(
- connection, table_name, schema, **kw)
- default_schema = None
-
- fkeys = []
-
- for spec in parsed_state.constraints:
- # only FOREIGN KEYs
- ref_name = spec['table'][-1]
- ref_schema = len(spec['table']) > 1 and spec['table'][-2] or schema
-
- if not ref_schema:
- if default_schema is None:
- default_schema = \
- connection.dialect.default_schema_name
- if schema == default_schema:
- ref_schema = schema
-
- loc_names = spec['local']
- ref_names = spec['foreign']
-
- con_kw = {}
- for opt in ('onupdate', 'ondelete'):
- if spec.get(opt, False):
- con_kw[opt] = spec[opt]
-
- fkey_d = {
- 'name': spec['name'],
- 'constrained_columns': loc_names,
- 'referred_schema': ref_schema,
- 'referred_table': ref_name,
- 'referred_columns': ref_names,
- 'options': con_kw
- }
- fkeys.append(fkey_d)
- return fkeys
-
- @reflection.cache
- def get_indexes(self, connection, table_name, schema=None, **kw):
-
- parsed_state = self._parsed_state_or_create(
- connection, table_name, schema, **kw)
-
- indexes = []
- for spec in parsed_state.keys:
- unique = False
- flavor = spec['type']
- if flavor == 'PRIMARY':
- continue
- if flavor == 'UNIQUE':
- unique = True
- elif flavor in (None, 'FULLTEXT', 'SPATIAL'):
- pass
- else:
- self.logger.info(
- "Converting unknown KEY type %s to a plain KEY" % flavor)
- pass
- index_d = {}
- index_d['name'] = spec['name']
- index_d['column_names'] = [s[0] for s in spec['columns']]
- index_d['unique'] = unique
- index_d['type'] = flavor
- indexes.append(index_d)
- return indexes
-
- @reflection.cache
- def get_unique_constraints(self, connection, table_name,
- schema=None, **kw):
- parsed_state = self._parsed_state_or_create(
- connection, table_name, schema, **kw)
-
- return [
- {
- 'name': key['name'],
- 'column_names': [col[0] for col in key['columns']]
- }
- for key in parsed_state.keys
- if key['type'] == 'UNIQUE'
- ]
-
- @reflection.cache
- def get_view_definition(self, connection, view_name, schema=None, **kw):
-
- charset = self._connection_charset
- full_name = '.'.join(self.identifier_preparer._quote_free_identifiers(
- schema, view_name))
- sql = self._show_create_table(connection, None, charset,
- full_name=full_name)
- return sql
-
- def _parsed_state_or_create(self, connection, table_name,
- schema=None, **kw):
- return self._setup_parser(
- connection,
- table_name,
- schema,
- info_cache=kw.get('info_cache', None)
- )
-
- @util.memoized_property
- def _tabledef_parser(self):
- """return the MySQLTableDefinitionParser, generate if needed.
-
- The deferred creation ensures that the dialect has
- retrieved server version information first.
-
- """
- if (self.server_version_info < (4, 1) and self._server_ansiquotes):
- # ANSI_QUOTES doesn't affect SHOW CREATE TABLE on < 4.1
- preparer = self.preparer(self, server_ansiquotes=False)
- else:
- preparer = self.identifier_preparer
- return MySQLTableDefinitionParser(self, preparer)
-
- @reflection.cache
- def _setup_parser(self, connection, table_name, schema=None, **kw):
- charset = self._connection_charset
- parser = self._tabledef_parser
- full_name = '.'.join(self.identifier_preparer._quote_free_identifiers(
- schema, table_name))
- sql = self._show_create_table(connection, None, charset,
- full_name=full_name)
- if sql.startswith('CREATE ALGORITHM'):
- # Adapt views to something table-like.
- columns = self._describe_table(connection, None, charset,
- full_name=full_name)
- sql = parser._describe_to_create(table_name, columns)
- return parser.parse(sql, charset)
-
- def _detect_charset(self, connection):
- raise NotImplementedError()
-
- def _detect_casing(self, connection):
- """Sniff out identifier case sensitivity.
-
- Cached per-connection. This value can not change without a server
- restart.
-
- """
- # http://dev.mysql.com/doc/refman/5.0/en/name-case-sensitivity.html
-
- charset = self._connection_charset
- row = self._compat_first(connection.execute(
- "SHOW VARIABLES LIKE 'lower_case_table_names'"),
- charset=charset)
- if not row:
- cs = 0
- else:
- # 4.0.15 returns OFF or ON according to [ticket:489]
- # 3.23 doesn't, 4.0.27 doesn't..
- if row[1] == 'OFF':
- cs = 0
- elif row[1] == 'ON':
- cs = 1
- else:
- cs = int(row[1])
- return cs
-
- def _detect_collations(self, connection):
- """Pull the active COLLATIONS list from the server.
-
- Cached per-connection.
- """
-
- collations = {}
- if self.server_version_info < (4, 1, 0):
- pass
- else:
- charset = self._connection_charset
- rs = connection.execute('SHOW COLLATION')
- for row in self._compat_fetchall(rs, charset):
- collations[row[0]] = row[1]
- return collations
-
- def _detect_ansiquotes(self, connection):
- """Detect and adjust for the ANSI_QUOTES sql mode."""
-
- row = self._compat_first(
- connection.execute("SHOW VARIABLES LIKE 'sql_mode'"),
- charset=self._connection_charset)
-
- if not row:
- mode = ''
- else:
- mode = row[1] or ''
- # 4.0
- if mode.isdigit():
- mode_no = int(mode)
- mode = (mode_no | 4 == mode_no) and 'ANSI_QUOTES' or ''
-
- self._server_ansiquotes = 'ANSI_QUOTES' in mode
-
- # as of MySQL 5.0.1
- self._backslash_escapes = 'NO_BACKSLASH_ESCAPES' not in mode
-
-
- def _show_create_table(self, connection, table, charset=None,
- full_name=None):
- """Run SHOW CREATE TABLE for a ``Table``."""
-
- if full_name is None:
- full_name = self.identifier_preparer.format_table(table)
- st = "SHOW CREATE TABLE %s" % full_name
-
- rp = None
- try:
- rp = connection.execute(st)
- except exc.DBAPIError as e:
- if self._extract_error_code(e.orig) == 1146:
- raise exc.NoSuchTableError(full_name)
- else:
- raise
- row = self._compat_first(rp, charset=charset)
- if not row:
- raise exc.NoSuchTableError(full_name)
- return row[1].strip()
-
- return sql
-
- def _describe_table(self, connection, table, charset=None,
- full_name=None):
- """Run DESCRIBE for a ``Table`` and return processed rows."""
-
- if full_name is None:
- full_name = self.identifier_preparer.format_table(table)
- st = "DESCRIBE %s" % full_name
-
- rp, rows = None, None
- try:
- try:
- rp = connection.execute(st)
- except exc.DBAPIError as e:
- if self._extract_error_code(e.orig) == 1146:
- raise exc.NoSuchTableError(full_name)
- else:
- raise
- rows = self._compat_fetchall(rp, charset=charset)
- finally:
- if rp:
- rp.close()
- return rows
-
-
-class ReflectedState(object):
- """Stores raw information about a SHOW CREATE TABLE statement."""
-
- def __init__(self):
- self.columns = []
- self.table_options = {}
- self.table_name = None
- self.keys = []
- self.constraints = []
-
-
-@log.class_logger
-class MySQLTableDefinitionParser(object):
- """Parses the results of a SHOW CREATE TABLE statement."""
-
- def __init__(self, dialect, preparer):
- self.dialect = dialect
- self.preparer = preparer
- self._prep_regexes()
-
- def parse(self, show_create, charset):
- state = ReflectedState()
- state.charset = charset
- for line in re.split(r'\r?\n', show_create):
- if line.startswith(' ' + self.preparer.initial_quote):
- self._parse_column(line, state)
- # a regular table options line
- elif line.startswith(') '):
- self._parse_table_options(line, state)
- # an ANSI-mode table options line
- elif line == ')':
- pass
- elif line.startswith('CREATE '):
- self._parse_table_name(line, state)
- # Not present in real reflection, but may be if
- # loading from a file.
- elif not line:
- pass
- else:
- type_, spec = self._parse_constraints(line)
- if type_ is None:
- util.warn("Unknown schema content: %r" % line)
- elif type_ == 'key':
- state.keys.append(spec)
- elif type_ == 'constraint':
- state.constraints.append(spec)
- else:
- pass
- return state
-
- def _parse_constraints(self, line):
- """Parse a KEY or CONSTRAINT line.
-
- :param line: A line of SHOW CREATE TABLE output
- """
-
- # KEY
- m = self._re_key.match(line)
- if m:
- spec = m.groupdict()
- # convert columns into name, length pairs
- spec['columns'] = self._parse_keyexprs(spec['columns'])
- return 'key', spec
-
- # CONSTRAINT
- m = self._re_constraint.match(line)
- if m:
- spec = m.groupdict()
- spec['table'] = \
- self.preparer.unformat_identifiers(spec['table'])
- spec['local'] = [c[0]
- for c in self._parse_keyexprs(spec['local'])]
- spec['foreign'] = [c[0]
- for c in self._parse_keyexprs(spec['foreign'])]
- return 'constraint', spec
-
- # PARTITION and SUBPARTITION
- m = self._re_partition.match(line)
- if m:
- # Punt!
- return 'partition', line
-
- # No match.
- return (None, line)
-
- def _parse_table_name(self, line, state):
- """Extract the table name.
-
- :param line: The first line of SHOW CREATE TABLE
- """
-
- regex, cleanup = self._pr_name
- m = regex.match(line)
- if m:
- state.table_name = cleanup(m.group('name'))
-
- def _parse_table_options(self, line, state):
- """Build a dictionary of all reflected table-level options.
-
- :param line: The final line of SHOW CREATE TABLE output.
- """
-
- options = {}
-
- if not line or line == ')':
- pass
-
- else:
- rest_of_line = line[:]
- for regex, cleanup in self._pr_options:
- m = regex.search(rest_of_line)
- if not m:
- continue
- directive, value = m.group('directive'), m.group('val')
- if cleanup:
- value = cleanup(value)
- options[directive.lower()] = value
- rest_of_line = regex.sub('', rest_of_line)
-
- for nope in ('auto_increment', 'data directory', 'index directory'):
- options.pop(nope, None)
-
- for opt, val in options.items():
- state.table_options['%s_%s' % (self.dialect.name, opt)] = val
-
- def _parse_column(self, line, state):
- """Extract column details.
-
- Falls back to a 'minimal support' variant if full parse fails.
-
- :param line: Any column-bearing line from SHOW CREATE TABLE
- """
-
- spec = None
- m = self._re_column.match(line)
- if m:
- spec = m.groupdict()
- spec['full'] = True
- else:
- m = self._re_column_loose.match(line)
- if m:
- spec = m.groupdict()
- spec['full'] = False
- if not spec:
- util.warn("Unknown column definition %r" % line)
- return
- if not spec['full']:
- util.warn("Incomplete reflection of column definition %r" % line)
-
- name, type_, args, notnull = \
- spec['name'], spec['coltype'], spec['arg'], spec['notnull']
-
- try:
- col_type = self.dialect.ischema_names[type_]
- except KeyError:
- util.warn("Did not recognize type '%s' of column '%s'" %
- (type_, name))
- col_type = sqltypes.NullType
-
- # Column type positional arguments eg. varchar(32)
- if args is None or args == '':
- type_args = []
- elif args[0] == "'" and args[-1] == "'":
- type_args = self._re_csv_str.findall(args)
- else:
- type_args = [int(v) for v in self._re_csv_int.findall(args)]
-
- # Column type keyword options
- type_kw = {}
- for kw in ('unsigned', 'zerofill'):
- if spec.get(kw, False):
- type_kw[kw] = True
- for kw in ('charset', 'collate'):
- if spec.get(kw, False):
- type_kw[kw] = spec[kw]
-
- if issubclass(col_type, _EnumeratedValues):
- type_args = _EnumeratedValues._strip_values(type_args)
-
- type_instance = col_type(*type_args, **type_kw)
-
- col_args, col_kw = [], {}
-
- # NOT NULL
- col_kw['nullable'] = True
- if spec.get('notnull', False):
- col_kw['nullable'] = False
-
- # AUTO_INCREMENT
- if spec.get('autoincr', False):
- col_kw['autoincrement'] = True
- elif issubclass(col_type, sqltypes.Integer):
- col_kw['autoincrement'] = False
-
- # DEFAULT
- default = spec.get('default', None)
-
- if default == 'NULL':
- # eliminates the need to deal with this later.
- default = None
-
- col_d = dict(name=name, type=type_instance, default=default)
- col_d.update(col_kw)
- state.columns.append(col_d)
-
- def _describe_to_create(self, table_name, columns):
- """Re-format DESCRIBE output as a SHOW CREATE TABLE string.
-
- DESCRIBE is a much simpler reflection and is sufficient for
- reflecting views for runtime use. This method formats DDL
- for columns only- keys are omitted.
-
- :param columns: A sequence of DESCRIBE or SHOW COLUMNS 6-tuples.
- SHOW FULL COLUMNS FROM rows must be rearranged for use with
- this function.
- """
-
- buffer = []
- for row in columns:
- (name, col_type, nullable, default, extra) = \
- [row[i] for i in (0, 1, 2, 4, 5)]
-
- line = [' ']
- line.append(self.preparer.quote_identifier(name))
- line.append(col_type)
- if not nullable:
- line.append('NOT NULL')
- if default:
- if 'auto_increment' in default:
- pass
- elif (col_type.startswith('timestamp') and
- default.startswith('C')):
- line.append('DEFAULT')
- line.append(default)
- elif default == 'NULL':
- line.append('DEFAULT')
- line.append(default)
- else:
- line.append('DEFAULT')
- line.append("'%s'" % default.replace("'", "''"))
- if extra:
- line.append(extra)
-
- buffer.append(' '.join(line))
-
- return ''.join([('CREATE TABLE %s (\n' %
- self.preparer.quote_identifier(table_name)),
- ',\n'.join(buffer),
- '\n) '])
-
- def _parse_keyexprs(self, identifiers):
- """Unpack '"col"(2),"col" ASC'-ish strings into components."""
-
- return self._re_keyexprs.findall(identifiers)
-
- def _prep_regexes(self):
- """Pre-compile regular expressions."""
-
- self._re_columns = []
- self._pr_options = []
-
- _final = self.preparer.final_quote
-
- quotes = dict(zip(('iq', 'fq', 'esc_fq'),
- [re.escape(s) for s in
- (self.preparer.initial_quote,
- _final,
- self.preparer._escape_identifier(_final))]))
-
- self._pr_name = _pr_compile(
- r'^CREATE (?:\w+ +)?TABLE +'
- r'%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +\($' % quotes,
- self.preparer._unescape_identifier)
-
- # `col`,`col2`(32),`col3`(15) DESC
- #
- # Note: ASC and DESC aren't reflected, so we'll punt...
- self._re_keyexprs = _re_compile(
- r'(?:'
- r'(?:%(iq)s((?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)'
- r'(?:\((\d+)\))?(?=\,|$))+' % quotes)
-
- # 'foo' or 'foo','bar' or 'fo,o','ba''a''r'
- self._re_csv_str = _re_compile(r'\x27(?:\x27\x27|[^\x27])*\x27')
-
- # 123 or 123,456
- self._re_csv_int = _re_compile(r'\d+')
-
- # `colname` [type opts]
- # (NOT NULL | NULL)
- # DEFAULT ('value' | CURRENT_TIMESTAMP...)
- # COMMENT 'comment'
- # COLUMN_FORMAT (FIXED|DYNAMIC|DEFAULT)
- # STORAGE (DISK|MEMORY)
- self._re_column = _re_compile(
- r' '
- r'%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
- r'(?P\w+)'
- r'(?:\((?P(?:\d+|\d+,\d+|'
- r'(?:\x27(?:\x27\x27|[^\x27])*\x27,?)+))\))?'
- r'(?: +(?PUNSIGNED))?'
- r'(?: +(?PZEROFILL))?'
- r'(?: +CHARACTER SET +(?P[\w_]+))?'
- r'(?: +COLLATE +(?P[\w_]+))?'
- r'(?: +(?PNOT NULL))?'
- r'(?: +DEFAULT +(?P'
- r'(?:NULL|\x27(?:\x27\x27|[^\x27])*\x27|\w+'
- r'(?: +ON UPDATE \w+)?)'
- r'))?'
- r'(?: +(?PAUTO_INCREMENT))?'
- r'(?: +COMMENT +(P(?:\x27\x27|[^\x27])+))?'
- r'(?: +COLUMN_FORMAT +(?P\w+))?'
- r'(?: +STORAGE +(?P\w+))?'
- r'(?: +(?P.*))?'
- r',?$'
- % quotes
- )
-
- # Fallback, try to parse as little as possible
- self._re_column_loose = _re_compile(
- r' '
- r'%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
- r'(?P\w+)'
- r'(?:\((?P(?:\d+|\d+,\d+|\x27(?:\x27\x27|[^\x27])+\x27))\))?'
- r'.*?(?PNOT NULL)?'
- % quotes
- )
-
- # (PRIMARY|UNIQUE|FULLTEXT|SPATIAL) INDEX `name` (USING (BTREE|HASH))?
- # (`col` (ASC|DESC)?, `col` (ASC|DESC)?)
- # KEY_BLOCK_SIZE size | WITH PARSER name
- self._re_key = _re_compile(
- r' '
- r'(?:(?P\S+) )?KEY'
- r'(?: +%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)?'
- r'(?: +USING +(?P\S+))?'
- r' +\((?P.+?)\)'
- r'(?: +USING +(?P\S+))?'
- r'(?: +KEY_BLOCK_SIZE +(?P\S+))?'
- r'(?: +WITH PARSER +(?P\S+))?'
- r',?$'
- % quotes
- )
-
- # CONSTRAINT `name` FOREIGN KEY (`local_col`)
- # REFERENCES `remote` (`remote_col`)
- # MATCH FULL | MATCH PARTIAL | MATCH SIMPLE
- # ON DELETE CASCADE ON UPDATE RESTRICT
- #
- # unique constraints come back as KEYs
- kw = quotes.copy()
- kw['on'] = 'RESTRICT|CASCADE|SET NULL|NOACTION'
- self._re_constraint = _re_compile(
- r' '
- r'CONSTRAINT +'
- r'%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
- r'FOREIGN KEY +'
- r'\((?P[^\)]+?)\) REFERENCES +'
- r'(?P%(iq)s[^%(fq)s]+%(fq)s(?:\.%(iq)s[^%(fq)s]+%(fq)s)?) +'
- r'\((?P[^\)]+?)\)'
- r'(?: +(?PMATCH \w+))?'
- r'(?: +ON DELETE (?P%(on)s))?'
- r'(?: +ON UPDATE (?P%(on)s))?'
- % kw
- )
-
- # PARTITION
- #
- # punt!
- self._re_partition = _re_compile(r'(?:.*)(?:SUB)?PARTITION(?:.*)')
-
- # Table-level options (COLLATE, ENGINE, etc.)
- # Do the string options first, since they have quoted
- # strings we need to get rid of.
- for option in _options_of_type_string:
- self._add_option_string(option)
-
- for option in ('ENGINE', 'TYPE', 'AUTO_INCREMENT',
- 'AVG_ROW_LENGTH', 'CHARACTER SET',
- 'DEFAULT CHARSET', 'CHECKSUM',
- 'COLLATE', 'DELAY_KEY_WRITE', 'INSERT_METHOD',
- 'MAX_ROWS', 'MIN_ROWS', 'PACK_KEYS', 'ROW_FORMAT',
- 'KEY_BLOCK_SIZE'):
- self._add_option_word(option)
-
- self._add_option_regex('UNION', r'\([^\)]+\)')
- self._add_option_regex('TABLESPACE', r'.*? STORAGE DISK')
- self._add_option_regex('RAID_TYPE',
- r'\w+\s+RAID_CHUNKS\s*\=\s*\w+RAID_CHUNKSIZE\s*=\s*\w+')
-
- _optional_equals = r'(?:\s*(?:=\s*)|\s+)'
-
- def _add_option_string(self, directive):
- regex = (r'(?P%s)%s'
- r"'(?P(?:[^']|'')*?)'(?!')" %
- (re.escape(directive), self._optional_equals))
- self._pr_options.append(_pr_compile(regex, lambda v:
- v.replace("\\\\", "\\").replace("''", "'")))
-
- def _add_option_word(self, directive):
- regex = (r'(?P%s)%s'
- r'(?P\w+)' %
- (re.escape(directive), self._optional_equals))
- self._pr_options.append(_pr_compile(regex))
-
- def _add_option_regex(self, directive, regex):
- regex = (r'(?P%s)%s'
- r'(?P%s)' %
- (re.escape(directive), self._optional_equals, regex))
- self._pr_options.append(_pr_compile(regex))
-
-_options_of_type_string = ('COMMENT', 'DATA DIRECTORY', 'INDEX DIRECTORY',
- 'PASSWORD', 'CONNECTION')
-
-
-
-class _DecodingRowProxy(object):
- """Return unicode-decoded values based on type inspection.
-
- Smooth over data type issues (esp. with alpha driver versions) and
- normalize strings as Unicode regardless of user-configured driver
- encoding settings.
-
- """
-
- # Some MySQL-python versions can return some columns as
- # sets.Set(['value']) (seriously) but thankfully that doesn't
- # seem to come up in DDL queries.
-
- def __init__(self, rowproxy, charset):
- self.rowproxy = rowproxy
- self.charset = charset
-
- def __getitem__(self, index):
- item = self.rowproxy[index]
- if isinstance(item, _array):
- item = item.tostring()
-
- if self.charset and isinstance(item, util.binary_type):
- return item.decode(self.charset)
- else:
- return item
-
- def __getattr__(self, attr):
- item = getattr(self.rowproxy, attr)
- if isinstance(item, _array):
- item = item.tostring()
- if self.charset and isinstance(item, util.binary_type):
- return item.decode(self.charset)
- else:
- return item
-
-
-def _pr_compile(regex, cleanup=None):
- """Prepare a 2-tuple of compiled regex and callable."""
-
- return (_re_compile(regex), cleanup)
-
-
-def _re_compile(regex):
- """Compile a string to regex, I and UNICODE."""
-
- return re.compile(regex, re.I | re.UNICODE)
diff --git a/lib/sqlalchemy/dialects/mysql/cymysql.py b/lib/sqlalchemy/dialects/mysql/cymysql.py
deleted file mode 100644
index 49728045..00000000
--- a/lib/sqlalchemy/dialects/mysql/cymysql.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# mysql/cymysql.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""
-
-.. dialect:: mysql+cymysql
- :name: CyMySQL
- :dbapi: cymysql
- :connectstring: mysql+cymysql://:@/[?]
- :url: https://github.com/nakagami/CyMySQL
-
-"""
-import re
-
-from .mysqldb import MySQLDialect_mysqldb
-from .base import (BIT, MySQLDialect)
-from ... import util
-
-class _cymysqlBIT(BIT):
- def result_processor(self, dialect, coltype):
- """Convert a MySQL's 64 bit, variable length binary string to a long.
- """
-
- def process(value):
- if value is not None:
- v = 0
- for i in util.iterbytes(value):
- v = v << 8 | i
- return v
- return value
- return process
-
-
-class MySQLDialect_cymysql(MySQLDialect_mysqldb):
- driver = 'cymysql'
-
- description_encoding = None
- supports_sane_rowcount = True
- supports_sane_multi_rowcount = False
- supports_unicode_statements = True
-
- colspecs = util.update_copy(
- MySQLDialect.colspecs,
- {
- BIT: _cymysqlBIT,
- }
- )
-
- @classmethod
- def dbapi(cls):
- return __import__('cymysql')
-
- def _get_server_version_info(self, connection):
- dbapi_con = connection.connection
- version = []
- r = re.compile('[.\-]')
- for n in r.split(dbapi_con.server_version):
- try:
- version.append(int(n))
- except ValueError:
- version.append(n)
- return tuple(version)
-
- def _detect_charset(self, connection):
- return connection.connection.charset
-
- def _extract_error_code(self, exception):
- return exception.errno
-
- def is_disconnect(self, e, connection, cursor):
- if isinstance(e, self.dbapi.OperationalError):
- return self._extract_error_code(e) in \
- (2006, 2013, 2014, 2045, 2055)
- elif isinstance(e, self.dbapi.InterfaceError):
- # if underlying connection is closed,
- # this is the error you get
- return True
- else:
- return False
-
-dialect = MySQLDialect_cymysql
diff --git a/lib/sqlalchemy/dialects/mysql/gaerdbms.py b/lib/sqlalchemy/dialects/mysql/gaerdbms.py
deleted file mode 100644
index 13203fce..00000000
--- a/lib/sqlalchemy/dialects/mysql/gaerdbms.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# mysql/gaerdbms.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-"""
-.. dialect:: mysql+gaerdbms
- :name: Google Cloud SQL
- :dbapi: rdbms
- :connectstring: mysql+gaerdbms:///?instance=
- :url: https://developers.google.com/appengine/docs/python/cloud-sql/developers-guide
-
- This dialect is based primarily on the :mod:`.mysql.mysqldb` dialect with minimal
- changes.
-
- .. versionadded:: 0.7.8
-
-
-Pooling
--------
-
-Google App Engine connections appear to be randomly recycled,
-so the dialect does not pool connections. The :class:`.NullPool`
-implementation is installed within the :class:`.Engine` by
-default.
-
-"""
-
-import os
-
-from .mysqldb import MySQLDialect_mysqldb
-from ...pool import NullPool
-import re
-
-
-def _is_dev_environment():
- return os.environ.get('SERVER_SOFTWARE', '').startswith('Development/')
-
-
-class MySQLDialect_gaerdbms(MySQLDialect_mysqldb):
-
- @classmethod
- def dbapi(cls):
- # from django:
- # http://code.google.com/p/googleappengine/source/
- # browse/trunk/python/google/storage/speckle/
- # python/django/backend/base.py#118
- # see also [ticket:2649]
- # see also http://stackoverflow.com/q/14224679/34549
- from google.appengine.api import apiproxy_stub_map
-
- if _is_dev_environment():
- from google.appengine.api import rdbms_mysqldb
- return rdbms_mysqldb
- elif apiproxy_stub_map.apiproxy.GetStub('rdbms'):
- from google.storage.speckle.python.api import rdbms_apiproxy
- return rdbms_apiproxy
- else:
- from google.storage.speckle.python.api import rdbms_googleapi
- return rdbms_googleapi
-
- @classmethod
- def get_pool_class(cls, url):
- # Cloud SQL connections die at any moment
- return NullPool
-
- def create_connect_args(self, url):
- opts = url.translate_connect_args()
- if not _is_dev_environment():
- # 'dsn' and 'instance' are because we are skipping
- # the traditional google.api.rdbms wrapper
- opts['dsn'] = ''
- opts['instance'] = url.query['instance']
- return [], opts
-
- def _extract_error_code(self, exception):
- match = re.compile(r"^(\d+)L?:|^\((\d+)L?,").match(str(exception))
- # The rdbms api will wrap then re-raise some types of errors
- # making this regex return no matches.
- code = match.group(1) or match.group(2) if match else None
- if code:
- return int(code)
-
-dialect = MySQLDialect_gaerdbms
diff --git a/lib/sqlalchemy/dialects/mysql/mysqlconnector.py b/lib/sqlalchemy/dialects/mysql/mysqlconnector.py
deleted file mode 100644
index 3536c3ad..00000000
--- a/lib/sqlalchemy/dialects/mysql/mysqlconnector.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# mysql/mysqlconnector.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""
-.. dialect:: mysql+mysqlconnector
- :name: MySQL Connector/Python
- :dbapi: myconnpy
- :connectstring: mysql+mysqlconnector://:@[:]/
- :url: http://dev.mysql.com/downloads/connector/python/
-
-
-"""
-
-from .base import (MySQLDialect,
- MySQLExecutionContext, MySQLCompiler, MySQLIdentifierPreparer,
- BIT)
-
-from ... import util
-
-
-class MySQLExecutionContext_mysqlconnector(MySQLExecutionContext):
-
- def get_lastrowid(self):
- return self.cursor.lastrowid
-
-
-class MySQLCompiler_mysqlconnector(MySQLCompiler):
- def visit_mod_binary(self, binary, operator, **kw):
- return self.process(binary.left, **kw) + " %% " + \
- self.process(binary.right, **kw)
-
- def post_process_text(self, text):
- return text.replace('%', '%%')
-
-
-class MySQLIdentifierPreparer_mysqlconnector(MySQLIdentifierPreparer):
-
- def _escape_identifier(self, value):
- value = value.replace(self.escape_quote, self.escape_to_quote)
- return value.replace("%", "%%")
-
-
-class _myconnpyBIT(BIT):
- def result_processor(self, dialect, coltype):
- """MySQL-connector already converts mysql bits, so."""
-
- return None
-
-
-class MySQLDialect_mysqlconnector(MySQLDialect):
- driver = 'mysqlconnector'
-
- if util.py2k:
- supports_unicode_statements = False
- supports_unicode_binds = True
-
- supports_sane_rowcount = True
- supports_sane_multi_rowcount = True
-
- supports_native_decimal = True
-
- default_paramstyle = 'format'
- execution_ctx_cls = MySQLExecutionContext_mysqlconnector
- statement_compiler = MySQLCompiler_mysqlconnector
-
- preparer = MySQLIdentifierPreparer_mysqlconnector
-
- colspecs = util.update_copy(
- MySQLDialect.colspecs,
- {
- BIT: _myconnpyBIT,
- }
- )
-
- @classmethod
- def dbapi(cls):
- from mysql import connector
- return connector
-
- def create_connect_args(self, url):
- opts = url.translate_connect_args(username='user')
-
- opts.update(url.query)
-
- util.coerce_kw_type(opts, 'buffered', bool)
- util.coerce_kw_type(opts, 'raise_on_warnings', bool)
- opts.setdefault('buffered', True)
- opts.setdefault('raise_on_warnings', True)
-
- # FOUND_ROWS must be set in ClientFlag to enable
- # supports_sane_rowcount.
- if self.dbapi is not None:
- try:
- from mysql.connector.constants import ClientFlag
- client_flags = opts.get('client_flags', ClientFlag.get_default())
- client_flags |= ClientFlag.FOUND_ROWS
- opts['client_flags'] = client_flags
- except:
- pass
- return [[], opts]
-
- def _get_server_version_info(self, connection):
- dbapi_con = connection.connection
- version = dbapi_con.get_server_version()
- return tuple(version)
-
- def _detect_charset(self, connection):
- return connection.connection.charset
-
- def _extract_error_code(self, exception):
- return exception.errno
-
- def is_disconnect(self, e, connection, cursor):
- errnos = (2006, 2013, 2014, 2045, 2055, 2048)
- exceptions = (self.dbapi.OperationalError, self.dbapi.InterfaceError)
- if isinstance(e, exceptions):
- return e.errno in errnos or \
- "MySQL Connection not available." in str(e)
- else:
- return False
-
- def _compat_fetchall(self, rp, charset=None):
- return rp.fetchall()
-
- def _compat_fetchone(self, rp, charset=None):
- return rp.fetchone()
-
-dialect = MySQLDialect_mysqlconnector
diff --git a/lib/sqlalchemy/dialects/mysql/mysqldb.py b/lib/sqlalchemy/dialects/mysql/mysqldb.py
deleted file mode 100644
index 7fb63f13..00000000
--- a/lib/sqlalchemy/dialects/mysql/mysqldb.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# mysql/mysqldb.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""
-
-.. dialect:: mysql+mysqldb
- :name: MySQL-Python
- :dbapi: mysqldb
- :connectstring: mysql+mysqldb://:@[:]/
- :url: http://sourceforge.net/projects/mysql-python
-
-
-Unicode
--------
-
-MySQLdb requires a "charset" parameter to be passed in order for it
-to handle non-ASCII characters correctly. When this parameter is passed,
-MySQLdb will also implicitly set the "use_unicode" flag to true, which means
-that it will return Python unicode objects instead of bytestrings.
-However, SQLAlchemy's decode process, when C extensions are enabled,
-is orders of magnitude faster than that of MySQLdb as it does not call into
-Python functions to do so. Therefore, the **recommended URL to use for
-unicode** will include both charset and use_unicode=0::
-
- create_engine("mysql+mysqldb://user:pass@host/dbname?charset=utf8&use_unicode=0")
-
-As of this writing, MySQLdb only runs on Python 2. It is not known how
-MySQLdb behaves on Python 3 as far as unicode decoding.
-
-
-Known Issues
--------------
-
-MySQL-python version 1.2.2 has a serious memory leak related
-to unicode conversion, a feature which is disabled via ``use_unicode=0``.
-It is strongly advised to use the latest version of MySQL-Python.
-
-"""
-
-from .base import (MySQLDialect, MySQLExecutionContext,
- MySQLCompiler, MySQLIdentifierPreparer)
-from ...connectors.mysqldb import (
- MySQLDBExecutionContext,
- MySQLDBCompiler,
- MySQLDBIdentifierPreparer,
- MySQLDBConnector
- )
-from .base import TEXT
-from ... import sql
-
-class MySQLExecutionContext_mysqldb(MySQLDBExecutionContext, MySQLExecutionContext):
- pass
-
-
-class MySQLCompiler_mysqldb(MySQLDBCompiler, MySQLCompiler):
- pass
-
-
-class MySQLIdentifierPreparer_mysqldb(MySQLDBIdentifierPreparer, MySQLIdentifierPreparer):
- pass
-
-
-class MySQLDialect_mysqldb(MySQLDBConnector, MySQLDialect):
- execution_ctx_cls = MySQLExecutionContext_mysqldb
- statement_compiler = MySQLCompiler_mysqldb
- preparer = MySQLIdentifierPreparer_mysqldb
-
- def _check_unicode_returns(self, connection):
- # work around issue fixed in
- # https://github.com/farcepest/MySQLdb1/commit/cd44524fef63bd3fcb71947392326e9742d520e8
- # specific issue w/ the utf8_bin collation and unicode returns
-
- has_utf8_bin = connection.scalar(
- "show collation where %s = 'utf8' and %s = 'utf8_bin'"
- % (
- self.identifier_preparer.quote("Charset"),
- self.identifier_preparer.quote("Collation")
- ))
- if has_utf8_bin:
- additional_tests = [
- sql.collate(sql.cast(
- sql.literal_column(
- "'test collated returns'"),
- TEXT(charset='utf8')), "utf8_bin")
- ]
- else:
- additional_tests = []
- return super(MySQLDBConnector, self)._check_unicode_returns(
- connection, additional_tests)
-
-dialect = MySQLDialect_mysqldb
diff --git a/lib/sqlalchemy/dialects/mysql/oursql.py b/lib/sqlalchemy/dialects/mysql/oursql.py
deleted file mode 100644
index e6b50f33..00000000
--- a/lib/sqlalchemy/dialects/mysql/oursql.py
+++ /dev/null
@@ -1,261 +0,0 @@
-# mysql/oursql.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""
-
-.. dialect:: mysql+oursql
- :name: OurSQL
- :dbapi: oursql
- :connectstring: mysql+oursql://:@[:]/
- :url: http://packages.python.org/oursql/
-
-Unicode
--------
-
-oursql defaults to using ``utf8`` as the connection charset, but other
-encodings may be used instead. Like the MySQL-Python driver, unicode support
-can be completely disabled::
-
- # oursql sets the connection charset to utf8 automatically; all strings come
- # back as utf8 str
- create_engine('mysql+oursql:///mydb?use_unicode=0')
-
-To not automatically use ``utf8`` and instead use whatever the connection
-defaults to, there is a separate parameter::
-
- # use the default connection charset; all strings come back as unicode
- create_engine('mysql+oursql:///mydb?default_charset=1')
-
- # use latin1 as the connection charset; all strings come back as unicode
- create_engine('mysql+oursql:///mydb?charset=latin1')
-"""
-
-import re
-
-from .base import (BIT, MySQLDialect, MySQLExecutionContext)
-from ... import types as sqltypes, util
-
-
-class _oursqlBIT(BIT):
- def result_processor(self, dialect, coltype):
- """oursql already converts mysql bits, so."""
-
- return None
-
-
-class MySQLExecutionContext_oursql(MySQLExecutionContext):
-
- @property
- def plain_query(self):
- return self.execution_options.get('_oursql_plain_query', False)
-
-
-class MySQLDialect_oursql(MySQLDialect):
- driver = 'oursql'
-
- if util.py2k:
- supports_unicode_binds = True
- supports_unicode_statements = True
-
- supports_native_decimal = True
-
- supports_sane_rowcount = True
- supports_sane_multi_rowcount = True
- execution_ctx_cls = MySQLExecutionContext_oursql
-
- colspecs = util.update_copy(
- MySQLDialect.colspecs,
- {
- sqltypes.Time: sqltypes.Time,
- BIT: _oursqlBIT,
- }
- )
-
- @classmethod
- def dbapi(cls):
- return __import__('oursql')
-
- def do_execute(self, cursor, statement, parameters, context=None):
- """Provide an implementation of *cursor.execute(statement, parameters)*."""
-
- if context and context.plain_query:
- cursor.execute(statement, plain_query=True)
- else:
- cursor.execute(statement, parameters)
-
- def do_begin(self, connection):
- connection.cursor().execute('BEGIN', plain_query=True)
-
- def _xa_query(self, connection, query, xid):
- if util.py2k:
- arg = connection.connection._escape_string(xid)
- else:
- charset = self._connection_charset
- arg = connection.connection._escape_string(xid.encode(charset)).decode(charset)
- arg = "'%s'" % arg
- connection.execution_options(_oursql_plain_query=True).execute(query % arg)
-
- # Because mysql is bad, these methods have to be
- # reimplemented to use _PlainQuery. Basically, some queries
- # refuse to return any data if they're run through
- # the parameterized query API, or refuse to be parameterized
- # in the first place.
- def do_begin_twophase(self, connection, xid):
- self._xa_query(connection, 'XA BEGIN %s', xid)
-
- def do_prepare_twophase(self, connection, xid):
- self._xa_query(connection, 'XA END %s', xid)
- self._xa_query(connection, 'XA PREPARE %s', xid)
-
- def do_rollback_twophase(self, connection, xid, is_prepared=True,
- recover=False):
- if not is_prepared:
- self._xa_query(connection, 'XA END %s', xid)
- self._xa_query(connection, 'XA ROLLBACK %s', xid)
-
- def do_commit_twophase(self, connection, xid, is_prepared=True,
- recover=False):
- if not is_prepared:
- self.do_prepare_twophase(connection, xid)
- self._xa_query(connection, 'XA COMMIT %s', xid)
-
- # Q: why didn't we need all these "plain_query" overrides earlier ?
- # am i on a newer/older version of OurSQL ?
- def has_table(self, connection, table_name, schema=None):
- return MySQLDialect.has_table(
- self,
- connection.connect().execution_options(_oursql_plain_query=True),
- table_name,
- schema
- )
-
- def get_table_options(self, connection, table_name, schema=None, **kw):
- return MySQLDialect.get_table_options(
- self,
- connection.connect().execution_options(_oursql_plain_query=True),
- table_name,
- schema=schema,
- **kw
- )
-
- def get_columns(self, connection, table_name, schema=None, **kw):
- return MySQLDialect.get_columns(
- self,
- connection.connect().execution_options(_oursql_plain_query=True),
- table_name,
- schema=schema,
- **kw
- )
-
- def get_view_names(self, connection, schema=None, **kw):
- return MySQLDialect.get_view_names(
- self,
- connection.connect().execution_options(_oursql_plain_query=True),
- schema=schema,
- **kw
- )
-
- def get_table_names(self, connection, schema=None, **kw):
- return MySQLDialect.get_table_names(
- self,
- connection.connect().execution_options(_oursql_plain_query=True),
- schema
- )
-
- def get_schema_names(self, connection, **kw):
- return MySQLDialect.get_schema_names(
- self,
- connection.connect().execution_options(_oursql_plain_query=True),
- **kw
- )
-
- def initialize(self, connection):
- return MySQLDialect.initialize(
- self,
- connection.execution_options(_oursql_plain_query=True)
- )
-
- def _show_create_table(self, connection, table, charset=None,
- full_name=None):
- return MySQLDialect._show_create_table(
- self,
- connection.contextual_connect(close_with_result=True).
- execution_options(_oursql_plain_query=True),
- table, charset, full_name
- )
-
- def is_disconnect(self, e, connection, cursor):
- if isinstance(e, self.dbapi.ProgrammingError):
- return e.errno is None and 'cursor' not in e.args[1] and e.args[1].endswith('closed')
- else:
- return e.errno in (2006, 2013, 2014, 2045, 2055)
-
- def create_connect_args(self, url):
- opts = url.translate_connect_args(database='db', username='user',
- password='passwd')
- opts.update(url.query)
-
- util.coerce_kw_type(opts, 'port', int)
- util.coerce_kw_type(opts, 'compress', bool)
- util.coerce_kw_type(opts, 'autoping', bool)
- util.coerce_kw_type(opts, 'raise_on_warnings', bool)
-
- util.coerce_kw_type(opts, 'default_charset', bool)
- if opts.pop('default_charset', False):
- opts['charset'] = None
- else:
- util.coerce_kw_type(opts, 'charset', str)
- opts['use_unicode'] = opts.get('use_unicode', True)
- util.coerce_kw_type(opts, 'use_unicode', bool)
-
- # FOUND_ROWS must be set in CLIENT_FLAGS to enable
- # supports_sane_rowcount.
- opts.setdefault('found_rows', True)
-
- ssl = {}
- for key in ['ssl_ca', 'ssl_key', 'ssl_cert',
- 'ssl_capath', 'ssl_cipher']:
- if key in opts:
- ssl[key[4:]] = opts[key]
- util.coerce_kw_type(ssl, key[4:], str)
- del opts[key]
- if ssl:
- opts['ssl'] = ssl
-
- return [[], opts]
-
- def _get_server_version_info(self, connection):
- dbapi_con = connection.connection
- version = []
- r = re.compile('[.\-]')
- for n in r.split(dbapi_con.server_info):
- try:
- version.append(int(n))
- except ValueError:
- version.append(n)
- return tuple(version)
-
- def _extract_error_code(self, exception):
- return exception.errno
-
- def _detect_charset(self, connection):
- """Sniff out the character set in use for connection results."""
-
- return connection.connection.charset
-
- def _compat_fetchall(self, rp, charset=None):
- """oursql isn't super-broken like MySQLdb, yaaay."""
- return rp.fetchall()
-
- def _compat_fetchone(self, rp, charset=None):
- """oursql isn't super-broken like MySQLdb, yaaay."""
- return rp.fetchone()
-
- def _compat_first(self, rp, charset=None):
- return rp.first()
-
-
-dialect = MySQLDialect_oursql
diff --git a/lib/sqlalchemy/dialects/mysql/pymysql.py b/lib/sqlalchemy/dialects/mysql/pymysql.py
deleted file mode 100644
index 7989203c..00000000
--- a/lib/sqlalchemy/dialects/mysql/pymysql.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# mysql/pymysql.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""
-
-.. dialect:: mysql+pymysql
- :name: PyMySQL
- :dbapi: pymysql
- :connectstring: mysql+pymysql://:@/[?]
- :url: http://code.google.com/p/pymysql/
-
-MySQL-Python Compatibility
---------------------------
-
-The pymysql DBAPI is a pure Python port of the MySQL-python (MySQLdb) driver,
-and targets 100% compatibility. Most behavioral notes for MySQL-python apply to
-the pymysql driver as well.
-
-"""
-
-from .mysqldb import MySQLDialect_mysqldb
-from ...util import py3k
-
-class MySQLDialect_pymysql(MySQLDialect_mysqldb):
- driver = 'pymysql'
-
- description_encoding = None
- if py3k:
- supports_unicode_statements = True
-
-
- @classmethod
- def dbapi(cls):
- return __import__('pymysql')
-
- if py3k:
- def _extract_error_code(self, exception):
- if isinstance(exception.args[0], Exception):
- exception = exception.args[0]
- return exception.args[0]
-
-dialect = MySQLDialect_pymysql
diff --git a/lib/sqlalchemy/dialects/mysql/pyodbc.py b/lib/sqlalchemy/dialects/mysql/pyodbc.py
deleted file mode 100644
index e60e39ce..00000000
--- a/lib/sqlalchemy/dialects/mysql/pyodbc.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# mysql/pyodbc.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""
-
-
-.. dialect:: mysql+pyodbc
- :name: PyODBC
- :dbapi: pyodbc
- :connectstring: mysql+pyodbc://:@
- :url: http://pypi.python.org/pypi/pyodbc/
-
-
-Limitations
------------
-
-The mysql-pyodbc dialect is subject to unresolved character encoding issues
-which exist within the current ODBC drivers available.
-(see http://code.google.com/p/pyodbc/issues/detail?id=25). Consider usage
-of OurSQL, MySQLdb, or MySQL-connector/Python.
-
-"""
-
-from .base import MySQLDialect, MySQLExecutionContext
-from ...connectors.pyodbc import PyODBCConnector
-from ... import util
-import re
-
-
-class MySQLExecutionContext_pyodbc(MySQLExecutionContext):
-
- def get_lastrowid(self):
- cursor = self.create_cursor()
- cursor.execute("SELECT LAST_INSERT_ID()")
- lastrowid = cursor.fetchone()[0]
- cursor.close()
- return lastrowid
-
-
-class MySQLDialect_pyodbc(PyODBCConnector, MySQLDialect):
- supports_unicode_statements = False
- execution_ctx_cls = MySQLExecutionContext_pyodbc
-
- pyodbc_driver_name = "MySQL"
-
- def __init__(self, **kw):
- # deal with http://code.google.com/p/pyodbc/issues/detail?id=25
- kw.setdefault('convert_unicode', True)
- super(MySQLDialect_pyodbc, self).__init__(**kw)
-
- def _detect_charset(self, connection):
- """Sniff out the character set in use for connection results."""
-
- # Prefer 'character_set_results' for the current connection over the
- # value in the driver. SET NAMES or individual variable SETs will
- # change the charset without updating the driver's view of the world.
- #
- # If it's decided that issuing that sort of SQL leaves you SOL, then
- # this can prefer the driver value.
- rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
- opts = dict([(row[0], row[1]) for row in self._compat_fetchall(rs)])
- for key in ('character_set_connection', 'character_set'):
- if opts.get(key, None):
- return opts[key]
-
- util.warn("Could not detect the connection character set. Assuming latin1.")
- return 'latin1'
-
- def _extract_error_code(self, exception):
- m = re.compile(r"\((\d+)\)").search(str(exception.args))
- c = m.group(1)
- if c:
- return int(c)
- else:
- return None
-
-dialect = MySQLDialect_pyodbc
diff --git a/lib/sqlalchemy/dialects/mysql/zxjdbc.py b/lib/sqlalchemy/dialects/mysql/zxjdbc.py
deleted file mode 100644
index b5fcfbda..00000000
--- a/lib/sqlalchemy/dialects/mysql/zxjdbc.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# mysql/zxjdbc.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""
-
-.. dialect:: mysql+zxjdbc
- :name: zxjdbc for Jython
- :dbapi: zxjdbc
- :connectstring: mysql+zxjdbc://:@[:]/
- :driverurl: http://dev.mysql.com/downloads/connector/j/
-
-Character Sets
---------------
-
-SQLAlchemy zxjdbc dialects pass unicode straight through to the
-zxjdbc/JDBC layer. To allow multiple character sets to be sent from the
-MySQL Connector/J JDBC driver, by default SQLAlchemy sets its
-``characterEncoding`` connection property to ``UTF-8``. It may be
-overriden via a ``create_engine`` URL parameter.
-
-"""
-import re
-
-from ... import types as sqltypes, util
-from ...connectors.zxJDBC import ZxJDBCConnector
-from .base import BIT, MySQLDialect, MySQLExecutionContext
-
-
-class _ZxJDBCBit(BIT):
- def result_processor(self, dialect, coltype):
- """Converts boolean or byte arrays from MySQL Connector/J to longs."""
- def process(value):
- if value is None:
- return value
- if isinstance(value, bool):
- return int(value)
- v = 0
- for i in value:
- v = v << 8 | (i & 0xff)
- value = v
- return value
- return process
-
-
-class MySQLExecutionContext_zxjdbc(MySQLExecutionContext):
- def get_lastrowid(self):
- cursor = self.create_cursor()
- cursor.execute("SELECT LAST_INSERT_ID()")
- lastrowid = cursor.fetchone()[0]
- cursor.close()
- return lastrowid
-
-
-class MySQLDialect_zxjdbc(ZxJDBCConnector, MySQLDialect):
- jdbc_db_name = 'mysql'
- jdbc_driver_name = 'com.mysql.jdbc.Driver'
-
- execution_ctx_cls = MySQLExecutionContext_zxjdbc
-
- colspecs = util.update_copy(
- MySQLDialect.colspecs,
- {
- sqltypes.Time: sqltypes.Time,
- BIT: _ZxJDBCBit
- }
- )
-
- def _detect_charset(self, connection):
- """Sniff out the character set in use for connection results."""
- # Prefer 'character_set_results' for the current connection over the
- # value in the driver. SET NAMES or individual variable SETs will
- # change the charset without updating the driver's view of the world.
- #
- # If it's decided that issuing that sort of SQL leaves you SOL, then
- # this can prefer the driver value.
- rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
- opts = dict((row[0], row[1]) for row in self._compat_fetchall(rs))
- for key in ('character_set_connection', 'character_set'):
- if opts.get(key, None):
- return opts[key]
-
- util.warn("Could not detect the connection character set. Assuming latin1.")
- return 'latin1'
-
- def _driver_kwargs(self):
- """return kw arg dict to be sent to connect()."""
- return dict(characterEncoding='UTF-8', yearIsDateType='false')
-
- def _extract_error_code(self, exception):
- # e.g.: DBAPIError: (Error) Table 'test.u2' doesn't exist
- # [SQLCode: 1146], [SQLState: 42S02] 'DESCRIBE `u2`' ()
- m = re.compile(r"\[SQLCode\: (\d+)\]").search(str(exception.args))
- c = m.group(1)
- if c:
- return int(c)
-
- def _get_server_version_info(self, connection):
- dbapi_con = connection.connection
- version = []
- r = re.compile('[.\-]')
- for n in r.split(dbapi_con.dbversion):
- try:
- version.append(int(n))
- except ValueError:
- version.append(n)
- return tuple(version)
-
-dialect = MySQLDialect_zxjdbc
diff --git a/lib/sqlalchemy/dialects/oracle/__init__.py b/lib/sqlalchemy/dialects/oracle/__init__.py
deleted file mode 100644
index b75762ab..00000000
--- a/lib/sqlalchemy/dialects/oracle/__init__.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# oracle/__init__.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-from sqlalchemy.dialects.oracle import base, cx_oracle, zxjdbc
-
-base.dialect = cx_oracle.dialect
-
-from sqlalchemy.dialects.oracle.base import \
- VARCHAR, NVARCHAR, CHAR, DATE, NUMBER,\
- BLOB, BFILE, CLOB, NCLOB, TIMESTAMP, RAW,\
- FLOAT, DOUBLE_PRECISION, LONG, dialect, INTERVAL,\
- VARCHAR2, NVARCHAR2, ROWID, dialect
-
-
-__all__ = (
-'VARCHAR', 'NVARCHAR', 'CHAR', 'DATE', 'NUMBER',
-'BLOB', 'BFILE', 'CLOB', 'NCLOB', 'TIMESTAMP', 'RAW',
-'FLOAT', 'DOUBLE_PRECISION', 'LONG', 'dialect', 'INTERVAL',
-'VARCHAR2', 'NVARCHAR2', 'ROWID'
-)
diff --git a/lib/sqlalchemy/dialects/oracle/base.py b/lib/sqlalchemy/dialects/oracle/base.py
deleted file mode 100644
index 8bacb885..00000000
--- a/lib/sqlalchemy/dialects/oracle/base.py
+++ /dev/null
@@ -1,1291 +0,0 @@
-# oracle/base.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""
-.. dialect:: oracle
- :name: Oracle
-
- Oracle version 8 through current (11g at the time of this writing) are supported.
-
-Connect Arguments
------------------
-
-The dialect supports several :func:`~sqlalchemy.create_engine()` arguments which
-affect the behavior of the dialect regardless of driver in use.
-
-* ``use_ansi`` - Use ANSI JOIN constructs (see the section on Oracle 8). Defaults
- to ``True``. If ``False``, Oracle-8 compatible constructs are used for joins.
-
-* ``optimize_limits`` - defaults to ``False``. see the section on LIMIT/OFFSET.
-
-* ``use_binds_for_limits`` - defaults to ``True``. see the section on LIMIT/OFFSET.
-
-Auto Increment Behavior
------------------------
-
-SQLAlchemy Table objects which include integer primary keys are usually assumed to have
-"autoincrementing" behavior, meaning they can generate their own primary key values upon
-INSERT. Since Oracle has no "autoincrement" feature, SQLAlchemy relies upon sequences
-to produce these values. With the Oracle dialect, *a sequence must always be explicitly
-specified to enable autoincrement*. This is divergent with the majority of documentation
-examples which assume the usage of an autoincrement-capable database. To specify sequences,
-use the sqlalchemy.schema.Sequence object which is passed to a Column construct::
-
- t = Table('mytable', metadata,
- Column('id', Integer, Sequence('id_seq'), primary_key=True),
- Column(...), ...
- )
-
-This step is also required when using table reflection, i.e. autoload=True::
-
- t = Table('mytable', metadata,
- Column('id', Integer, Sequence('id_seq'), primary_key=True),
- autoload=True
- )
-
-Identifier Casing
------------------
-
-In Oracle, the data dictionary represents all case insensitive identifier names
-using UPPERCASE text. SQLAlchemy on the other hand considers an all-lower case identifier
-name to be case insensitive. The Oracle dialect converts all case insensitive identifiers
-to and from those two formats during schema level communication, such as reflection of
-tables and indexes. Using an UPPERCASE name on the SQLAlchemy side indicates a
-case sensitive identifier, and SQLAlchemy will quote the name - this will cause mismatches
-against data dictionary data received from Oracle, so unless identifier names have been
-truly created as case sensitive (i.e. using quoted names), all lowercase names should be
-used on the SQLAlchemy side.
-
-
-LIMIT/OFFSET Support
---------------------
-
-Oracle has no support for the LIMIT or OFFSET keywords. SQLAlchemy uses
-a wrapped subquery approach in conjunction with ROWNUM. The exact methodology
-is taken from
-http://www.oracle.com/technology/oramag/oracle/06-sep/o56asktom.html .
-
-There are two options which affect its behavior:
-
-* the "FIRST ROWS()" optimization keyword is not used by default. To enable the usage of this
- optimization directive, specify ``optimize_limits=True`` to :func:`.create_engine`.
-* the values passed for the limit/offset are sent as bound parameters. Some users have observed
- that Oracle produces a poor query plan when the values are sent as binds and not
- rendered literally. To render the limit/offset values literally within the SQL
- statement, specify ``use_binds_for_limits=False`` to :func:`.create_engine`.
-
-Some users have reported better performance when the entirely different approach of a
-window query is used, i.e. ROW_NUMBER() OVER (ORDER BY), to provide LIMIT/OFFSET (note
-that the majority of users don't observe this). To suit this case the
-method used for LIMIT/OFFSET can be replaced entirely. See the recipe at
-http://www.sqlalchemy.org/trac/wiki/UsageRecipes/WindowFunctionsByDefault
-which installs a select compiler that overrides the generation of limit/offset with
-a window function.
-
-.. _oracle_returning:
-
-RETURNING Support
------------------
-
-The Oracle database supports a limited form of RETURNING, in order to retrieve result
-sets of matched rows from INSERT, UPDATE and DELETE statements. Oracle's
-RETURNING..INTO syntax only supports one row being returned, as it relies upon
-OUT parameters in order to function. In addition, supported DBAPIs have further
-limitations (see :ref:`cx_oracle_returning`).
-
-SQLAlchemy's "implicit returning" feature, which employs RETURNING within an INSERT
-and sometimes an UPDATE statement in order to fetch newly generated primary key values
-and other SQL defaults and expressions, is normally enabled on the Oracle
-backend. By default, "implicit returning" typically only fetches the value of a
-single ``nextval(some_seq)`` expression embedded into an INSERT in order to increment
-a sequence within an INSERT statement and get the value back at the same time.
-To disable this feature across the board, specify ``implicit_returning=False`` to
-:func:`.create_engine`::
-
- engine = create_engine("oracle://scott:tiger@dsn", implicit_returning=False)
-
-Implicit returning can also be disabled on a table-by-table basis as a table option::
-
- # Core Table
- my_table = Table("my_table", metadata, ..., implicit_returning=False)
-
-
- # declarative
- class MyClass(Base):
- __tablename__ = 'my_table'
- __table_args__ = {"implicit_returning": False}
-
-.. seealso::
-
- :ref:`cx_oracle_returning` - additional cx_oracle-specific restrictions on implicit returning.
-
-ON UPDATE CASCADE
------------------
-
-Oracle doesn't have native ON UPDATE CASCADE functionality. A trigger based solution
-is available at http://asktom.oracle.com/tkyte/update_cascade/index.html .
-
-When using the SQLAlchemy ORM, the ORM has limited ability to manually issue
-cascading updates - specify ForeignKey objects using the
-"deferrable=True, initially='deferred'" keyword arguments,
-and specify "passive_updates=False" on each relationship().
-
-Oracle 8 Compatibility
-----------------------
-
-When Oracle 8 is detected, the dialect internally configures itself to the following
-behaviors:
-
-* the use_ansi flag is set to False. This has the effect of converting all
- JOIN phrases into the WHERE clause, and in the case of LEFT OUTER JOIN
- makes use of Oracle's (+) operator.
-
-* the NVARCHAR2 and NCLOB datatypes are no longer generated as DDL when
- the :class:`~sqlalchemy.types.Unicode` is used - VARCHAR2 and CLOB are issued
- instead. This because these types don't seem to work correctly on Oracle 8
- even though they are available. The :class:`~sqlalchemy.types.NVARCHAR`
- and :class:`~sqlalchemy.dialects.oracle.NCLOB` types will always generate NVARCHAR2 and NCLOB.
-
-* the "native unicode" mode is disabled when using cx_oracle, i.e. SQLAlchemy
- encodes all Python unicode objects to "string" before passing in as bind parameters.
-
-Synonym/DBLINK Reflection
--------------------------
-
-When using reflection with Table objects, the dialect can optionally search for tables
-indicated by synonyms, either in local or remote schemas or accessed over DBLINK,
-by passing the flag ``oracle_resolve_synonyms=True`` as a
-keyword argument to the :class:`.Table` construct::
-
- some_table = Table('some_table', autoload=True,
- autoload_with=some_engine,
- oracle_resolve_synonyms=True)
-
-When this flag is set, the given name (such as ``some_table`` above) will
-be searched not just in the ``ALL_TABLES`` view, but also within the
-``ALL_SYNONYMS`` view to see if this name is actually a synonym to another name.
-If the synonym is located and refers to a DBLINK, the oracle dialect knows
-how to locate the table's information using DBLINK syntax (e.g. ``@dblink``).
-
-``oracle_resolve_synonyms`` is accepted wherever reflection arguments are
-accepted, including methods such as :meth:`.MetaData.reflect` and
-:meth:`.Inspector.get_columns`.
-
-If synonyms are not in use, this flag should be left disabled.
-
-DateTime Compatibility
-----------------------
-
-Oracle has no datatype known as ``DATETIME``, it instead has only ``DATE``,
-which can actually store a date and time value. For this reason, the Oracle
-dialect provides a type :class:`.oracle.DATE` which is a subclass of
-:class:`.DateTime`. This type has no special behavior, and is only
-present as a "marker" for this type; additionally, when a database column
-is reflected and the type is reported as ``DATE``, the time-supporting
-:class:`.oracle.DATE` type is used.
-
-.. versionchanged:: 0.9.4 Added :class:`.oracle.DATE` to subclass
- :class:`.DateTime`. This is a change as previous versions
- would reflect a ``DATE`` column as :class:`.types.DATE`, which subclasses
- :class:`.Date`. The only significance here is for schemes that are
- examining the type of column for use in special Python translations or
- for migrating schemas to other database backends.
-
-"""
-
-import re
-
-from sqlalchemy import util, sql
-from sqlalchemy.engine import default, base, reflection
-from sqlalchemy.sql import compiler, visitors, expression
-from sqlalchemy.sql import operators as sql_operators, functions as sql_functions
-from sqlalchemy import types as sqltypes, schema as sa_schema
-from sqlalchemy.types import VARCHAR, NVARCHAR, CHAR, \
- BLOB, CLOB, TIMESTAMP, FLOAT
-
-RESERVED_WORDS = \
- set('SHARE RAW DROP BETWEEN FROM DESC OPTION PRIOR LONG THEN '\
- 'DEFAULT ALTER IS INTO MINUS INTEGER NUMBER GRANT IDENTIFIED '\
- 'ALL TO ORDER ON FLOAT DATE HAVING CLUSTER NOWAIT RESOURCE '\
- 'ANY TABLE INDEX FOR UPDATE WHERE CHECK SMALLINT WITH DELETE '\
- 'BY ASC REVOKE LIKE SIZE RENAME NOCOMPRESS NULL GROUP VALUES '\
- 'AS IN VIEW EXCLUSIVE COMPRESS SYNONYM SELECT INSERT EXISTS '\
- 'NOT TRIGGER ELSE CREATE INTERSECT PCTFREE DISTINCT USER '\
- 'CONNECT SET MODE OF UNIQUE VARCHAR2 VARCHAR LOCK OR CHAR '\
- 'DECIMAL UNION PUBLIC AND START UID COMMENT CURRENT LEVEL'.split())
-
-NO_ARG_FNS = set('UID CURRENT_DATE SYSDATE USER '
- 'CURRENT_TIME CURRENT_TIMESTAMP'.split())
-
-
-class RAW(sqltypes._Binary):
- __visit_name__ = 'RAW'
-OracleRaw = RAW
-
-
-class NCLOB(sqltypes.Text):
- __visit_name__ = 'NCLOB'
-
-
-class VARCHAR2(VARCHAR):
- __visit_name__ = 'VARCHAR2'
-
-NVARCHAR2 = NVARCHAR
-
-
-class NUMBER(sqltypes.Numeric, sqltypes.Integer):
- __visit_name__ = 'NUMBER'
-
- def __init__(self, precision=None, scale=None, asdecimal=None):
- if asdecimal is None:
- asdecimal = bool(scale and scale > 0)
-
- super(NUMBER, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal)
-
- def adapt(self, impltype):
- ret = super(NUMBER, self).adapt(impltype)
- # leave a hint for the DBAPI handler
- ret._is_oracle_number = True
- return ret
-
- @property
- def _type_affinity(self):
- if bool(self.scale and self.scale > 0):
- return sqltypes.Numeric
- else:
- return sqltypes.Integer
-
-
-class DOUBLE_PRECISION(sqltypes.Numeric):
- __visit_name__ = 'DOUBLE_PRECISION'
-
- def __init__(self, precision=None, scale=None, asdecimal=None):
- if asdecimal is None:
- asdecimal = False
-
- super(DOUBLE_PRECISION, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal)
-
-
-class BFILE(sqltypes.LargeBinary):
- __visit_name__ = 'BFILE'
-
-
-class LONG(sqltypes.Text):
- __visit_name__ = 'LONG'
-
-class DATE(sqltypes.DateTime):
- """Provide the oracle DATE type.
-
- This type has no special Python behavior, except that it subclasses
- :class:`.types.DateTime`; this is to suit the fact that the Oracle
- ``DATE`` type supports a time value.
-
- .. versionadded:: 0.9.4
-
- """
- __visit_name__ = 'DATE'
-
-
- def _compare_type_affinity(self, other):
- return other._type_affinity in (sqltypes.DateTime, sqltypes.Date)
-
-
-class INTERVAL(sqltypes.TypeEngine):
- __visit_name__ = 'INTERVAL'
-
- def __init__(self,
- day_precision=None,
- second_precision=None):
- """Construct an INTERVAL.
-
- Note that only DAY TO SECOND intervals are currently supported.
- This is due to a lack of support for YEAR TO MONTH intervals
- within available DBAPIs (cx_oracle and zxjdbc).
-
- :param day_precision: the day precision value. this is the number of digits
- to store for the day field. Defaults to "2"
- :param second_precision: the second precision value. this is the number of digits
- to store for the fractional seconds field. Defaults to "6".
-
- """
- self.day_precision = day_precision
- self.second_precision = second_precision
-
- @classmethod
- def _adapt_from_generic_interval(cls, interval):
- return INTERVAL(day_precision=interval.day_precision,
- second_precision=interval.second_precision)
-
- @property
- def _type_affinity(self):
- return sqltypes.Interval
-
-
-class ROWID(sqltypes.TypeEngine):
- """Oracle ROWID type.
-
- When used in a cast() or similar, generates ROWID.
-
- """
- __visit_name__ = 'ROWID'
-
-
-class _OracleBoolean(sqltypes.Boolean):
- def get_dbapi_type(self, dbapi):
- return dbapi.NUMBER
-
-colspecs = {
- sqltypes.Boolean: _OracleBoolean,
- sqltypes.Interval: INTERVAL,
- sqltypes.DateTime: DATE
-}
-
-ischema_names = {
- 'VARCHAR2': VARCHAR,
- 'NVARCHAR2': NVARCHAR,
- 'CHAR': CHAR,
- 'DATE': DATE,
- 'NUMBER': NUMBER,
- 'BLOB': BLOB,
- 'BFILE': BFILE,
- 'CLOB': CLOB,
- 'NCLOB': NCLOB,
- 'TIMESTAMP': TIMESTAMP,
- 'TIMESTAMP WITH TIME ZONE': TIMESTAMP,
- 'INTERVAL DAY TO SECOND': INTERVAL,
- 'RAW': RAW,
- 'FLOAT': FLOAT,
- 'DOUBLE PRECISION': DOUBLE_PRECISION,
- 'LONG': LONG,
-}
-
-
-class OracleTypeCompiler(compiler.GenericTypeCompiler):
- # Note:
- # Oracle DATE == DATETIME
- # Oracle does not allow milliseconds in DATE
- # Oracle does not support TIME columns
-
- def visit_datetime(self, type_):
- return self.visit_DATE(type_)
-
- def visit_float(self, type_):
- return self.visit_FLOAT(type_)
-
- def visit_unicode(self, type_):
- if self.dialect._supports_nchar:
- return self.visit_NVARCHAR2(type_)
- else:
- return self.visit_VARCHAR2(type_)
-
- def visit_INTERVAL(self, type_):
- return "INTERVAL DAY%s TO SECOND%s" % (
- type_.day_precision is not None and
- "(%d)" % type_.day_precision or
- "",
- type_.second_precision is not None and
- "(%d)" % type_.second_precision or
- "",
- )
-
- def visit_LONG(self, type_):
- return "LONG"
-
- def visit_TIMESTAMP(self, type_):
- if type_.timezone:
- return "TIMESTAMP WITH TIME ZONE"
- else:
- return "TIMESTAMP"
-
- def visit_DOUBLE_PRECISION(self, type_):
- return self._generate_numeric(type_, "DOUBLE PRECISION")
-
- def visit_NUMBER(self, type_, **kw):
- return self._generate_numeric(type_, "NUMBER", **kw)
-
- def _generate_numeric(self, type_, name, precision=None, scale=None):
- if precision is None:
- precision = type_.precision
-
- if scale is None:
- scale = getattr(type_, 'scale', None)
-
- if precision is None:
- return name
- elif scale is None:
- n = "%(name)s(%(precision)s)"
- return n % {'name': name, 'precision': precision}
- else:
- n = "%(name)s(%(precision)s, %(scale)s)"
- return n % {'name': name, 'precision': precision, 'scale': scale}
-
- def visit_string(self, type_):
- return self.visit_VARCHAR2(type_)
-
- def visit_VARCHAR2(self, type_):
- return self._visit_varchar(type_, '', '2')
-
- def visit_NVARCHAR2(self, type_):
- return self._visit_varchar(type_, 'N', '2')
- visit_NVARCHAR = visit_NVARCHAR2
-
- def visit_VARCHAR(self, type_):
- return self._visit_varchar(type_, '', '')
-
- def _visit_varchar(self, type_, n, num):
- if not type_.length:
- return "%(n)sVARCHAR%(two)s" % {'two': num, 'n': n}
- elif not n and self.dialect._supports_char_length:
- varchar = "VARCHAR%(two)s(%(length)s CHAR)"
- return varchar % {'length': type_.length, 'two': num}
- else:
- varchar = "%(n)sVARCHAR%(two)s(%(length)s)"
- return varchar % {'length': type_.length, 'two': num, 'n': n}
-
- def visit_text(self, type_):
- return self.visit_CLOB(type_)
-
- def visit_unicode_text(self, type_):
- if self.dialect._supports_nchar:
- return self.visit_NCLOB(type_)
- else:
- return self.visit_CLOB(type_)
-
- def visit_large_binary(self, type_):
- return self.visit_BLOB(type_)
-
- def visit_big_integer(self, type_):
- return self.visit_NUMBER(type_, precision=19)
-
- def visit_boolean(self, type_):
- return self.visit_SMALLINT(type_)
-
- def visit_RAW(self, type_):
- if type_.length:
- return "RAW(%(length)s)" % {'length': type_.length}
- else:
- return "RAW"
-
- def visit_ROWID(self, type_):
- return "ROWID"
-
-
-class OracleCompiler(compiler.SQLCompiler):
- """Oracle compiler modifies the lexical structure of Select
- statements to work under non-ANSI configured Oracle databases, if
- the use_ansi flag is False.
- """
-
- compound_keywords = util.update_copy(
- compiler.SQLCompiler.compound_keywords,
- {
- expression.CompoundSelect.EXCEPT: 'MINUS'
- }
- )
-
- def __init__(self, *args, **kwargs):
- self.__wheres = {}
- self._quoted_bind_names = {}
- super(OracleCompiler, self).__init__(*args, **kwargs)
-
- def visit_mod_binary(self, binary, operator, **kw):
- return "mod(%s, %s)" % (self.process(binary.left, **kw),
- self.process(binary.right, **kw))
-
- def visit_now_func(self, fn, **kw):
- return "CURRENT_TIMESTAMP"
-
- def visit_char_length_func(self, fn, **kw):
- return "LENGTH" + self.function_argspec(fn, **kw)
-
- def visit_match_op_binary(self, binary, operator, **kw):
- return "CONTAINS (%s, %s)" % (self.process(binary.left),
- self.process(binary.right))
-
- def visit_true(self, expr, **kw):
- return '1'
-
- def visit_false(self, expr, **kw):
- return '0'
-
- def get_select_hint_text(self, byfroms):
- return " ".join(
- "/*+ %s */" % text for table, text in byfroms.items()
- )
-
- def function_argspec(self, fn, **kw):
- if len(fn.clauses) > 0 or fn.name.upper() not in NO_ARG_FNS:
- return compiler.SQLCompiler.function_argspec(self, fn, **kw)
- else:
- return ""
-
- def default_from(self):
- """Called when a ``SELECT`` statement has no froms,
- and no ``FROM`` clause is to be appended.
-
- The Oracle compiler tacks a "FROM DUAL" to the statement.
- """
-
- return " FROM DUAL"
-
- def visit_join(self, join, **kwargs):
- if self.dialect.use_ansi:
- return compiler.SQLCompiler.visit_join(self, join, **kwargs)
- else:
- kwargs['asfrom'] = True
- if isinstance(join.right, expression.FromGrouping):
- right = join.right.element
- else:
- right = join.right
- return self.process(join.left, **kwargs) + \
- ", " + self.process(right, **kwargs)
-
-
- def _get_nonansi_join_whereclause(self, froms):
- clauses = []
-
- def visit_join(join):
- if join.isouter:
- def visit_binary(binary):
- if binary.operator == sql_operators.eq:
- if join.right.is_derived_from(binary.left.table):
- binary.left = _OuterJoinColumn(binary.left)
- elif join.right.is_derived_from(binary.right.table):
- binary.right = _OuterJoinColumn(binary.right)
- clauses.append(visitors.cloned_traverse(join.onclause, {},
- {'binary': visit_binary}))
- else:
- clauses.append(join.onclause)
-
- for j in join.left, join.right:
- if isinstance(j, expression.Join):
- visit_join(j)
- elif isinstance(j, expression.FromGrouping):
- visit_join(j.element)
-
- for f in froms:
- if isinstance(f, expression.Join):
- visit_join(f)
-
- if not clauses:
- return None
- else:
- return sql.and_(*clauses)
-
- def visit_outer_join_column(self, vc):
- return self.process(vc.column) + "(+)"
-
- def visit_sequence(self, seq):
- return self.dialect.identifier_preparer.format_sequence(seq) + ".nextval"
-
- def visit_alias(self, alias, asfrom=False, ashint=False, **kwargs):
- """Oracle doesn't like ``FROM table AS alias``. Is the AS standard SQL??"""
-
- if asfrom or ashint:
- alias_name = isinstance(alias.name, expression._truncated_label) and \
- self._truncated_identifier("alias", alias.name) or alias.name
-
- if ashint:
- return alias_name
- elif asfrom:
- return self.process(alias.original, asfrom=asfrom, **kwargs) + \
- " " + self.preparer.format_alias(alias, alias_name)
- else:
- return self.process(alias.original, **kwargs)
-
- def returning_clause(self, stmt, returning_cols):
- columns = []
- binds = []
- for i, column in enumerate(expression._select_iterables(returning_cols)):
- if column.type._has_column_expression:
- col_expr = column.type.column_expression(column)
- else:
- col_expr = column
- outparam = sql.outparam("ret_%d" % i, type_=column.type)
- self.binds[outparam.key] = outparam
- binds.append(self.bindparam_string(self._truncate_bindparam(outparam)))
- columns.append(self.process(col_expr, within_columns_clause=False))
- self.result_map[outparam.key] = (
- outparam.key,
- (column, getattr(column, 'name', None),
- getattr(column, 'key', None)),
- column.type
- )
-
- return 'RETURNING ' + ', '.join(columns) + " INTO " + ", ".join(binds)
-
- def _TODO_visit_compound_select(self, select):
- """Need to determine how to get ``LIMIT``/``OFFSET`` into a ``UNION`` for Oracle."""
- pass
-
- def visit_select(self, select, **kwargs):
- """Look for ``LIMIT`` and OFFSET in a select statement, and if
- so tries to wrap it in a subquery with ``rownum`` criterion.
- """
-
- if not getattr(select, '_oracle_visit', None):
- if not self.dialect.use_ansi:
- froms = self._display_froms_for_select(
- select, kwargs.get('asfrom', False))
- whereclause = self._get_nonansi_join_whereclause(froms)
- if whereclause is not None:
- select = select.where(whereclause)
- select._oracle_visit = True
-
- if select._limit is not None or select._offset is not None:
- # See http://www.oracle.com/technology/oramag/oracle/06-sep/o56asktom.html
- #
- # Generalized form of an Oracle pagination query:
- # select ... from (
- # select /*+ FIRST_ROWS(N) */ ...., rownum as ora_rn from (
- # select distinct ... where ... order by ...
- # ) where ROWNUM <= :limit+:offset
- # ) where ora_rn > :offset
- # Outer select and "ROWNUM as ora_rn" can be dropped if limit=0
-
- # TODO: use annotations instead of clone + attr set ?
- select = select._generate()
- select._oracle_visit = True
-
- # Wrap the middle select and add the hint
- limitselect = sql.select([c for c in select.c])
- if select._limit and self.dialect.optimize_limits:
- limitselect = limitselect.prefix_with("/*+ FIRST_ROWS(%d) */" % select._limit)
-
- limitselect._oracle_visit = True
- limitselect._is_wrapper = True
-
- # If needed, add the limiting clause
- if select._limit is not None:
- max_row = select._limit
- if select._offset is not None:
- max_row += select._offset
- if not self.dialect.use_binds_for_limits:
- max_row = sql.literal_column("%d" % max_row)
- limitselect.append_whereclause(
- sql.literal_column("ROWNUM") <= max_row)
-
- # If needed, add the ora_rn, and wrap again with offset.
- if select._offset is None:
- limitselect._for_update_arg = select._for_update_arg
- select = limitselect
- else:
- limitselect = limitselect.column(
- sql.literal_column("ROWNUM").label("ora_rn"))
- limitselect._oracle_visit = True
- limitselect._is_wrapper = True
-
- offsetselect = sql.select(
- [c for c in limitselect.c if c.key != 'ora_rn'])
- offsetselect._oracle_visit = True
- offsetselect._is_wrapper = True
-
- offset_value = select._offset
- if not self.dialect.use_binds_for_limits:
- offset_value = sql.literal_column("%d" % offset_value)
- offsetselect.append_whereclause(
- sql.literal_column("ora_rn") > offset_value)
-
- offsetselect._for_update_arg = select._for_update_arg
- select = offsetselect
-
- kwargs['iswrapper'] = getattr(select, '_is_wrapper', False)
- return compiler.SQLCompiler.visit_select(self, select, **kwargs)
-
- def limit_clause(self, select):
- return ""
-
- def for_update_clause(self, select):
- if self.is_subquery():
- return ""
-
- tmp = ' FOR UPDATE'
-
- if select._for_update_arg.of:
- tmp += ' OF ' + ', '.join(
- self.process(elem) for elem in
- select._for_update_arg.of
- )
-
- if select._for_update_arg.nowait:
- tmp += " NOWAIT"
-
- return tmp
-
-
-class OracleDDLCompiler(compiler.DDLCompiler):
-
- def define_constraint_cascades(self, constraint):
- text = ""
- if constraint.ondelete is not None:
- text += " ON DELETE %s" % constraint.ondelete
-
- # oracle has no ON UPDATE CASCADE -
- # its only available via triggers http://asktom.oracle.com/tkyte/update_cascade/index.html
- if constraint.onupdate is not None:
- util.warn(
- "Oracle does not contain native UPDATE CASCADE "
- "functionality - onupdates will not be rendered for foreign keys. "
- "Consider using deferrable=True, initially='deferred' or triggers.")
-
- return text
-
- def visit_create_index(self, create, **kw):
- return super(OracleDDLCompiler, self).\
- visit_create_index(create, include_schema=True)
-
-
-class OracleIdentifierPreparer(compiler.IdentifierPreparer):
-
- reserved_words = set([x.lower() for x in RESERVED_WORDS])
- illegal_initial_characters = set(range(0, 10)).union(["_", "$"])
-
- def _bindparam_requires_quotes(self, value):
- """Return True if the given identifier requires quoting."""
- lc_value = value.lower()
- return (lc_value in self.reserved_words
- or value[0] in self.illegal_initial_characters
- or not self.legal_characters.match(util.text_type(value))
- )
-
- def format_savepoint(self, savepoint):
- name = re.sub(r'^_+', '', savepoint.ident)
- return super(OracleIdentifierPreparer, self).format_savepoint(savepoint, name)
-
-
-class OracleExecutionContext(default.DefaultExecutionContext):
- def fire_sequence(self, seq, type_):
- return self._execute_scalar("SELECT " +
- self.dialect.identifier_preparer.format_sequence(seq) +
- ".nextval FROM DUAL", type_)
-
-
-class OracleDialect(default.DefaultDialect):
- name = 'oracle'
- supports_alter = True
- supports_unicode_statements = False
- supports_unicode_binds = False
- max_identifier_length = 30
- supports_sane_rowcount = True
- supports_sane_multi_rowcount = False
-
- supports_sequences = True
- sequences_optional = False
- postfetch_lastrowid = False
-
- default_paramstyle = 'named'
- colspecs = colspecs
- ischema_names = ischema_names
- requires_name_normalize = True
-
- supports_default_values = False
- supports_empty_insert = False
-
- statement_compiler = OracleCompiler
- ddl_compiler = OracleDDLCompiler
- type_compiler = OracleTypeCompiler
- preparer = OracleIdentifierPreparer
- execution_ctx_cls = OracleExecutionContext
-
- reflection_options = ('oracle_resolve_synonyms', )
-
- construct_arguments = [
- (sa_schema.Table, {"resolve_synonyms": False})
- ]
-
- def __init__(self,
- use_ansi=True,
- optimize_limits=False,
- use_binds_for_limits=True,
- **kwargs):
- default.DefaultDialect.__init__(self, **kwargs)
- self.use_ansi = use_ansi
- self.optimize_limits = optimize_limits
- self.use_binds_for_limits = use_binds_for_limits
-
- def initialize(self, connection):
- super(OracleDialect, self).initialize(connection)
- self.implicit_returning = self.__dict__.get(
- 'implicit_returning',
- self.server_version_info > (10, )
- )
-
- if self._is_oracle_8:
- self.colspecs = self.colspecs.copy()
- self.colspecs.pop(sqltypes.Interval)
- self.use_ansi = False
-
- @property
- def _is_oracle_8(self):
- return self.server_version_info and \
- self.server_version_info < (9, )
-
- @property
- def _supports_char_length(self):
- return not self._is_oracle_8
-
- @property
- def _supports_nchar(self):
- return not self._is_oracle_8
-
- def do_release_savepoint(self, connection, name):
- # Oracle does not support RELEASE SAVEPOINT
- pass
-
- def has_table(self, connection, table_name, schema=None):
- if not schema:
- schema = self.default_schema_name
- cursor = connection.execute(
- sql.text("SELECT table_name FROM all_tables "
- "WHERE table_name = :name AND owner = :schema_name"),
- name=self.denormalize_name(table_name), schema_name=self.denormalize_name(schema))
- return cursor.first() is not None
-
- def has_sequence(self, connection, sequence_name, schema=None):
- if not schema:
- schema = self.default_schema_name
- cursor = connection.execute(
- sql.text("SELECT sequence_name FROM all_sequences "
- "WHERE sequence_name = :name AND sequence_owner = :schema_name"),
- name=self.denormalize_name(sequence_name), schema_name=self.denormalize_name(schema))
- return cursor.first() is not None
-
- def normalize_name(self, name):
- if name is None:
- return None
- if util.py2k:
- if isinstance(name, str):
- name = name.decode(self.encoding)
- if name.upper() == name and \
- not self.identifier_preparer._requires_quotes(name.lower()):
- return name.lower()
- else:
- return name
-
- def denormalize_name(self, name):
- if name is None:
- return None
- elif name.lower() == name and not self.identifier_preparer._requires_quotes(name.lower()):
- name = name.upper()
- if util.py2k:
- if not self.supports_unicode_binds:
- name = name.encode(self.encoding)
- else:
- name = unicode(name)
- return name
-
- def _get_default_schema_name(self, connection):
- return self.normalize_name(connection.execute('SELECT USER FROM DUAL').scalar())
-
- def _resolve_synonym(self, connection, desired_owner=None, desired_synonym=None, desired_table=None):
- """search for a local synonym matching the given desired owner/name.
-
- if desired_owner is None, attempts to locate a distinct owner.
-
- returns the actual name, owner, dblink name, and synonym name if found.
- """
-
- q = "SELECT owner, table_owner, table_name, db_link, "\
- "synonym_name FROM all_synonyms WHERE "
- clauses = []
- params = {}
- if desired_synonym:
- clauses.append("synonym_name = :synonym_name")
- params['synonym_name'] = desired_synonym
- if desired_owner:
- clauses.append("owner = :desired_owner")
- params['desired_owner'] = desired_owner
- if desired_table:
- clauses.append("table_name = :tname")
- params['tname'] = desired_table
-
- q += " AND ".join(clauses)
-
- result = connection.execute(sql.text(q), **params)
- if desired_owner:
- row = result.first()
- if row:
- return row['table_name'], row['table_owner'], row['db_link'], row['synonym_name']
- else:
- return None, None, None, None
- else:
- rows = result.fetchall()
- if len(rows) > 1:
- raise AssertionError("There are multiple tables visible to the schema, you must specify owner")
- elif len(rows) == 1:
- row = rows[0]
- return row['table_name'], row['table_owner'], row['db_link'], row['synonym_name']
- else:
- return None, None, None, None
-
- @reflection.cache
- def _prepare_reflection_args(self, connection, table_name, schema=None,
- resolve_synonyms=False, dblink='', **kw):
-
- if resolve_synonyms:
- actual_name, owner, dblink, synonym = self._resolve_synonym(
- connection,
- desired_owner=self.denormalize_name(schema),
- desired_synonym=self.denormalize_name(table_name)
- )
- else:
- actual_name, owner, dblink, synonym = None, None, None, None
- if not actual_name:
- actual_name = self.denormalize_name(table_name)
-
- if dblink:
- # using user_db_links here since all_db_links appears
- # to have more restricted permissions.
- # http://docs.oracle.com/cd/B28359_01/server.111/b28310/ds_admin005.htm
- # will need to hear from more users if we are doing
- # the right thing here. See [ticket:2619]
- owner = connection.scalar(
- sql.text("SELECT username FROM user_db_links "
- "WHERE db_link=:link"), link=dblink)
- dblink = "@" + dblink
- elif not owner:
- owner = self.denormalize_name(schema or self.default_schema_name)
-
- return (actual_name, owner, dblink or '', synonym)
-
- @reflection.cache
- def get_schema_names(self, connection, **kw):
- s = "SELECT username FROM all_users ORDER BY username"
- cursor = connection.execute(s,)
- return [self.normalize_name(row[0]) for row in cursor]
-
- @reflection.cache
- def get_table_names(self, connection, schema=None, **kw):
- schema = self.denormalize_name(schema or self.default_schema_name)
-
- # note that table_names() isnt loading DBLINKed or synonym'ed tables
- if schema is None:
- schema = self.default_schema_name
- s = sql.text(
- "SELECT table_name FROM all_tables "
- "WHERE nvl(tablespace_name, 'no tablespace') NOT IN ('SYSTEM', 'SYSAUX') "
- "AND OWNER = :owner "
- "AND IOT_NAME IS NULL")
- cursor = connection.execute(s, owner=schema)
- return [self.normalize_name(row[0]) for row in cursor]
-
- @reflection.cache
- def get_view_names(self, connection, schema=None, **kw):
- schema = self.denormalize_name(schema or self.default_schema_name)
- s = sql.text("SELECT view_name FROM all_views WHERE owner = :owner")
- cursor = connection.execute(s, owner=self.denormalize_name(schema))
- return [self.normalize_name(row[0]) for row in cursor]
-
- @reflection.cache
- def get_columns(self, connection, table_name, schema=None, **kw):
- """
-
- kw arguments can be:
-
- oracle_resolve_synonyms
-
- dblink
-
- """
-
- resolve_synonyms = kw.get('oracle_resolve_synonyms', False)
- dblink = kw.get('dblink', '')
- info_cache = kw.get('info_cache')
-
- (table_name, schema, dblink, synonym) = \
- self._prepare_reflection_args(connection, table_name, schema,
- resolve_synonyms, dblink,
- info_cache=info_cache)
- columns = []
- if self._supports_char_length:
- char_length_col = 'char_length'
- else:
- char_length_col = 'data_length'
-
- params = {"table_name": table_name}
- text = "SELECT column_name, data_type, %(char_length_col)s, "\
- "data_precision, data_scale, "\
- "nullable, data_default FROM ALL_TAB_COLUMNS%(dblink)s "\
- "WHERE table_name = :table_name"
- if schema is not None:
- params['owner'] = schema
- text += " AND owner = :owner "
- text += " ORDER BY column_id"
- text = text % {'dblink': dblink, 'char_length_col': char_length_col}
-
- c = connection.execute(sql.text(text), **params)
-
- for row in c:
- (colname, orig_colname, coltype, length, precision, scale, nullable, default) = \
- (self.normalize_name(row[0]), row[0], row[1], row[2], row[3], row[4], row[5] == 'Y', row[6])
-
- if coltype == 'NUMBER':
- coltype = NUMBER(precision, scale)
- elif coltype in ('VARCHAR2', 'NVARCHAR2', 'CHAR'):
- coltype = self.ischema_names.get(coltype)(length)
- elif 'WITH TIME ZONE' in coltype:
- coltype = TIMESTAMP(timezone=True)
- else:
- coltype = re.sub(r'\(\d+\)', '', coltype)
- try:
- coltype = self.ischema_names[coltype]
- except KeyError:
- util.warn("Did not recognize type '%s' of column '%s'" %
- (coltype, colname))
- coltype = sqltypes.NULLTYPE
-
- cdict = {
- 'name': colname,
- 'type': coltype,
- 'nullable': nullable,
- 'default': default,
- 'autoincrement': default is None
- }
- if orig_colname.lower() == orig_colname:
- cdict['quote'] = True
-
- columns.append(cdict)
- return columns
-
- @reflection.cache
- def get_indexes(self, connection, table_name, schema=None,
- resolve_synonyms=False, dblink='', **kw):
-
- info_cache = kw.get('info_cache')
- (table_name, schema, dblink, synonym) = \
- self._prepare_reflection_args(connection, table_name, schema,
- resolve_synonyms, dblink,
- info_cache=info_cache)
- indexes = []
-
- params = {'table_name': table_name}
- text = \
- "SELECT a.index_name, a.column_name, b.uniqueness "\
- "\nFROM ALL_IND_COLUMNS%(dblink)s a, "\
- "\nALL_INDEXES%(dblink)s b "\
- "\nWHERE "\
- "\na.index_name = b.index_name "\
- "\nAND a.table_owner = b.table_owner "\
- "\nAND a.table_name = b.table_name "\
- "\nAND a.table_name = :table_name "
-
- if schema is not None:
- params['schema'] = schema
- text += "AND a.table_owner = :schema "
-
- text += "ORDER BY a.index_name, a.column_position"
-
- text = text % {'dblink': dblink}
-
- q = sql.text(text)
- rp = connection.execute(q, **params)
- indexes = []
- last_index_name = None
- pk_constraint = self.get_pk_constraint(
- connection, table_name, schema, resolve_synonyms=resolve_synonyms,
- dblink=dblink, info_cache=kw.get('info_cache'))
- pkeys = pk_constraint['constrained_columns']
- uniqueness = dict(NONUNIQUE=False, UNIQUE=True)
-
- oracle_sys_col = re.compile(r'SYS_NC\d+\$', re.IGNORECASE)
-
- def upper_name_set(names):
- return set([i.upper() for i in names])
-
- pk_names = upper_name_set(pkeys)
-
- def remove_if_primary_key(index):
- # don't include the primary key index
- if index is not None and \
- upper_name_set(index['column_names']) == pk_names:
- indexes.pop()
-
- index = None
- for rset in rp:
- if rset.index_name != last_index_name:
- remove_if_primary_key(index)
- index = dict(name=self.normalize_name(rset.index_name), column_names=[])
- indexes.append(index)
- index['unique'] = uniqueness.get(rset.uniqueness, False)
-
- # filter out Oracle SYS_NC names. could also do an outer join
- # to the all_tab_columns table and check for real col names there.
- if not oracle_sys_col.match(rset.column_name):
- index['column_names'].append(self.normalize_name(rset.column_name))
- last_index_name = rset.index_name
- remove_if_primary_key(index)
- return indexes
-
- @reflection.cache
- def _get_constraint_data(self, connection, table_name, schema=None,
- dblink='', **kw):
-
- params = {'table_name': table_name}
-
- text = \
- "SELECT"\
- "\nac.constraint_name,"\
- "\nac.constraint_type,"\
- "\nloc.column_name AS local_column,"\
- "\nrem.table_name AS remote_table,"\
- "\nrem.column_name AS remote_column,"\
- "\nrem.owner AS remote_owner,"\
- "\nloc.position as loc_pos,"\
- "\nrem.position as rem_pos"\
- "\nFROM all_constraints%(dblink)s ac,"\
- "\nall_cons_columns%(dblink)s loc,"\
- "\nall_cons_columns%(dblink)s rem"\
- "\nWHERE ac.table_name = :table_name"\
- "\nAND ac.constraint_type IN ('R','P')"
-
- if schema is not None:
- params['owner'] = schema
- text += "\nAND ac.owner = :owner"
-
- text += \
- "\nAND ac.owner = loc.owner"\
- "\nAND ac.constraint_name = loc.constraint_name"\
- "\nAND ac.r_owner = rem.owner(+)"\
- "\nAND ac.r_constraint_name = rem.constraint_name(+)"\
- "\nAND (rem.position IS NULL or loc.position=rem.position)"\
- "\nORDER BY ac.constraint_name, loc.position"
-
- text = text % {'dblink': dblink}
- rp = connection.execute(sql.text(text), **params)
- constraint_data = rp.fetchall()
- return constraint_data
-
- @reflection.cache
- def get_pk_constraint(self, connection, table_name, schema=None, **kw):
- resolve_synonyms = kw.get('oracle_resolve_synonyms', False)
- dblink = kw.get('dblink', '')
- info_cache = kw.get('info_cache')
-
- (table_name, schema, dblink, synonym) = \
- self._prepare_reflection_args(connection, table_name, schema,
- resolve_synonyms, dblink,
- info_cache=info_cache)
- pkeys = []
- constraint_name = None
- constraint_data = self._get_constraint_data(connection, table_name,
- schema, dblink,
- info_cache=kw.get('info_cache'))
-
- for row in constraint_data:
- (cons_name, cons_type, local_column, remote_table, remote_column, remote_owner) = \
- row[0:2] + tuple([self.normalize_name(x) for x in row[2:6]])
- if cons_type == 'P':
- if constraint_name is None:
- constraint_name = self.normalize_name(cons_name)
- pkeys.append(local_column)
- return {'constrained_columns': pkeys, 'name': constraint_name}
-
- @reflection.cache
- def get_foreign_keys(self, connection, table_name, schema=None, **kw):
- """
-
- kw arguments can be:
-
- oracle_resolve_synonyms
-
- dblink
-
- """
-
- requested_schema = schema # to check later on
- resolve_synonyms = kw.get('oracle_resolve_synonyms', False)
- dblink = kw.get('dblink', '')
- info_cache = kw.get('info_cache')
-
- (table_name, schema, dblink, synonym) = \
- self._prepare_reflection_args(connection, table_name, schema,
- resolve_synonyms, dblink,
- info_cache=info_cache)
-
- constraint_data = self._get_constraint_data(connection, table_name,
- schema, dblink,
- info_cache=kw.get('info_cache'))
-
- def fkey_rec():
- return {
- 'name': None,
- 'constrained_columns': [],
- 'referred_schema': None,
- 'referred_table': None,
- 'referred_columns': []
- }
-
- fkeys = util.defaultdict(fkey_rec)
-
- for row in constraint_data:
- (cons_name, cons_type, local_column, remote_table, remote_column, remote_owner) = \
- row[0:2] + tuple([self.normalize_name(x) for x in row[2:6]])
-
- if cons_type == 'R':
- if remote_table is None:
- # ticket 363
- util.warn(
- ("Got 'None' querying 'table_name' from "
- "all_cons_columns%(dblink)s - does the user have "
- "proper rights to the table?") % {'dblink': dblink})
- continue
-
- rec = fkeys[cons_name]
- rec['name'] = cons_name
- local_cols, remote_cols = rec['constrained_columns'], rec['referred_columns']
-
- if not rec['referred_table']:
- if resolve_synonyms:
- ref_remote_name, ref_remote_owner, ref_dblink, ref_synonym = \
- self._resolve_synonym(
- connection,
- desired_owner=self.denormalize_name(remote_owner),
- desired_table=self.denormalize_name(remote_table)
- )
- if ref_synonym:
- remote_table = self.normalize_name(ref_synonym)
- remote_owner = self.normalize_name(ref_remote_owner)
-
- rec['referred_table'] = remote_table
-
- if requested_schema is not None or self.denormalize_name(remote_owner) != schema:
- rec['referred_schema'] = remote_owner
-
- local_cols.append(local_column)
- remote_cols.append(remote_column)
-
- return list(fkeys.values())
-
- @reflection.cache
- def get_view_definition(self, connection, view_name, schema=None,
- resolve_synonyms=False, dblink='', **kw):
- info_cache = kw.get('info_cache')
- (view_name, schema, dblink, synonym) = \
- self._prepare_reflection_args(connection, view_name, schema,
- resolve_synonyms, dblink,
- info_cache=info_cache)
-
- params = {'view_name': view_name}
- text = "SELECT text FROM all_views WHERE view_name=:view_name"
-
- if schema is not None:
- text += " AND owner = :schema"
- params['schema'] = schema
-
- rp = connection.execute(sql.text(text), **params).scalar()
- if rp:
- if util.py2k:
- rp = rp.decode(self.encoding)
- return rp
- else:
- return None
-
-
-class _OuterJoinColumn(sql.ClauseElement):
- __visit_name__ = 'outer_join_column'
-
- def __init__(self, column):
- self.column = column
diff --git a/lib/sqlalchemy/dialects/oracle/cx_oracle.py b/lib/sqlalchemy/dialects/oracle/cx_oracle.py
deleted file mode 100644
index b8ee90b5..00000000
--- a/lib/sqlalchemy/dialects/oracle/cx_oracle.py
+++ /dev/null
@@ -1,941 +0,0 @@
-# oracle/cx_oracle.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""
-
-.. dialect:: oracle+cx_oracle
- :name: cx-Oracle
- :dbapi: cx_oracle
- :connectstring: oracle+cx_oracle://user:pass@host:port/dbname[?key=value&key=value...]
- :url: http://cx-oracle.sourceforge.net/
-
-Additional Connect Arguments
-----------------------------
-
-When connecting with ``dbname`` present, the host, port, and dbname tokens are
-converted to a TNS name using
-the cx_oracle ``makedsn()`` function. Otherwise, the host token is taken
-directly as a TNS name.
-
-Additional arguments which may be specified either as query string arguments
-on the URL, or as keyword arguments to :func:`.create_engine()` are:
-
-* ``allow_twophase`` - enable two-phase transactions. Defaults to ``True``.
-
-* ``arraysize`` - set the cx_oracle.arraysize value on cursors, defaulted
- to 50. This setting is significant with cx_Oracle as the contents of LOB
- objects are only readable within a "live" row (e.g. within a batch of
- 50 rows).
-
-* ``auto_convert_lobs`` - defaults to True; See :ref:`cx_oracle_lob`.
-
-* ``auto_setinputsizes`` - the cx_oracle.setinputsizes() call is issued for
- all bind parameters. This is required for LOB datatypes but can be
- disabled to reduce overhead. Defaults to ``True``. Specific types
- can be excluded from this process using the ``exclude_setinputsizes``
- parameter.
-
-* ``coerce_to_unicode`` - see :ref:`cx_oracle_unicode` for detail.
-
-* ``coerce_to_decimal`` - see :ref:`cx_oracle_numeric` for detail.
-
-* ``exclude_setinputsizes`` - a tuple or list of string DBAPI type names to
- be excluded from the "auto setinputsizes" feature. The type names here
- must match DBAPI types that are found in the "cx_Oracle" module namespace,
- such as cx_Oracle.UNICODE, cx_Oracle.NCLOB, etc. Defaults to
- ``(STRING, UNICODE)``.
-
- .. versionadded:: 0.8 specific DBAPI types can be excluded from the
- auto_setinputsizes feature via the exclude_setinputsizes attribute.
-
-* ``mode`` - This is given the string value of SYSDBA or SYSOPER, or alternatively
- an integer value. This value is only available as a URL query string
- argument.
-
-* ``threaded`` - enable multithreaded access to cx_oracle connections. Defaults
- to ``True``. Note that this is the opposite default of the cx_Oracle DBAPI
- itself.
-
-.. _cx_oracle_unicode:
-
-Unicode
--------
-
-The cx_Oracle DBAPI as of version 5 fully supports unicode, and has the ability
-to return string results as Python unicode objects natively.
-
-When used in Python 3, cx_Oracle returns all strings as Python unicode objects
-(that is, plain ``str`` in Python 3). In Python 2, it will return as Python
-unicode those column values that are of type ``NVARCHAR`` or ``NCLOB``. For
-column values that are of type ``VARCHAR`` or other non-unicode string types,
-it will return values as Python strings (e.g. bytestrings).
-
-The cx_Oracle SQLAlchemy dialect presents two different options for the use case of
-returning ``VARCHAR`` column values as Python unicode objects under Python 2:
-
-* the cx_Oracle DBAPI has the ability to coerce all string results to Python
- unicode objects unconditionally using output type handlers. This has
- the advantage that the unicode conversion is global to all statements
- at the cx_Oracle driver level, meaning it works with raw textual SQL
- statements that have no typing information associated. However, this system
- has been observed to incur signfiicant performance overhead, not only because
- it takes effect for all string values unconditionally, but also because cx_Oracle under
- Python 2 seems to use a pure-Python function call in order to do the
- decode operation, which under cPython can orders of magnitude slower
- than doing it using C functions alone.
-
-* SQLAlchemy has unicode-decoding services built in, and when using SQLAlchemy's
- C extensions, these functions do not use any Python function calls and
- are very fast. The disadvantage to this approach is that the unicode
- conversion only takes effect for statements where the :class:`.Unicode` type
- or :class:`.String` type with ``convert_unicode=True`` is explicitly
- associated with the result column. This is the case for any ORM or Core
- query or SQL expression as well as for a :func:`.text` construct that specifies
- output column types, so in the vast majority of cases this is not an issue.
- However, when sending a completely raw string to :meth:`.Connection.execute`,
- this typing information isn't present, unless the string is handled
- within a :func:`.text` construct that adds typing information.
-
-As of version 0.9.2 of SQLAlchemy, the default approach is to use SQLAlchemy's
-typing system. This keeps cx_Oracle's expensive Python 2 approach
-disabled unless the user explicitly wants it. Under Python 3, SQLAlchemy detects
-that cx_Oracle is returning unicode objects natively and cx_Oracle's system
-is used.
-
-To re-enable cx_Oracle's output type handler under Python 2, the
-``coerce_to_unicode=True`` flag (new in 0.9.4) can be passed to
-:func:`.create_engine`::
-
- engine = create_engine("oracle+cx_oracle://dsn", coerce_to_unicode=True)
-
-Alternatively, to run a pure string SQL statement and get ``VARCHAR`` results
-as Python unicode under Python 2 without using cx_Oracle's native handlers,
-the :func:`.text` feature can be used::
-
- from sqlalchemy import text, Unicode
- result = conn.execute(text("select username from user").columns(username=Unicode))
-
-.. versionchanged:: 0.9.2 cx_Oracle's outputtypehandlers are no longer used for
- unicode results of non-unicode datatypes in Python 2, after they were identified as a major
- performance bottleneck. SQLAlchemy's own unicode facilities are used
- instead.
-
-.. versionadded:: 0.9.4 Added the ``coerce_to_unicode`` flag, to re-enable
- cx_Oracle's outputtypehandler and revert to pre-0.9.2 behavior.
-
-.. _cx_oracle_returning:
-
-RETURNING Support
------------------
-
-The cx_oracle DBAPI supports a limited subset of Oracle's already limited RETURNING support.
-Typically, results can only be guaranteed for at most one column being returned;
-this is the typical case when SQLAlchemy uses RETURNING to get just the value of a
-primary-key-associated sequence value. Additional column expressions will
-cause problems in a non-determinative way, due to cx_oracle's lack of support for
-the OCI_DATA_AT_EXEC API which is required for more complex RETURNING scenarios.
-
-For this reason, stability may be enhanced by disabling RETURNING support completely;
-SQLAlchemy otherwise will use RETURNING to fetch newly sequence-generated
-primary keys. As illustrated in :ref:`oracle_returning`::
-
- engine = create_engine("oracle://scott:tiger@dsn", implicit_returning=False)
-
-.. seealso::
-
- http://docs.oracle.com/cd/B10501_01/appdev.920/a96584/oci05bnd.htm#420693 - OCI documentation for RETURNING
-
- http://sourceforge.net/mailarchive/message.php?msg_id=31338136 - cx_oracle developer commentary
-
-.. _cx_oracle_lob:
-
-LOB Objects
------------
-
-cx_oracle returns oracle LOBs using the cx_oracle.LOB object. SQLAlchemy converts
-these to strings so that the interface of the Binary type is consistent with that of
-other backends, and so that the linkage to a live cursor is not needed in scenarios
-like result.fetchmany() and result.fetchall(). This means that by default, LOB
-objects are fully fetched unconditionally by SQLAlchemy, and the linkage to a live
-cursor is broken.
-
-To disable this processing, pass ``auto_convert_lobs=False`` to :func:`.create_engine()`.
-
-Two Phase Transaction Support
------------------------------
-
-Two Phase transactions are implemented using XA transactions, and are known
-to work in a rudimental fashion with recent versions of cx_Oracle
-as of SQLAlchemy 0.8.0b2, 0.7.10. However, the mechanism is not yet
-considered to be robust and should still be regarded as experimental.
-
-In particular, the cx_Oracle DBAPI as recently as 5.1.2 has a bug regarding
-two phase which prevents
-a particular DBAPI connection from being consistently usable in both
-prepared transactions as well as traditional DBAPI usage patterns; therefore
-once a particular connection is used via :meth:`.Connection.begin_prepared`,
-all subsequent usages of the underlying DBAPI connection must be within
-the context of prepared transactions.
-
-The default behavior of :class:`.Engine` is to maintain a pool of DBAPI
-connections. Therefore, due to the above glitch, a DBAPI connection that has
-been used in a two-phase operation, and is then returned to the pool, will
-not be usable in a non-two-phase context. To avoid this situation,
-the application can make one of several choices:
-
-* Disable connection pooling using :class:`.NullPool`
-
-* Ensure that the particular :class:`.Engine` in use is only used
- for two-phase operations. A :class:`.Engine` bound to an ORM
- :class:`.Session` which includes ``twophase=True`` will consistently
- use the two-phase transaction style.
-
-* For ad-hoc two-phase operations without disabling pooling, the DBAPI
- connection in use can be evicted from the connection pool using the
- :meth:`.Connection.detach` method.
-
-.. versionchanged:: 0.8.0b2,0.7.10
- Support for cx_oracle prepared transactions has been implemented
- and tested.
-
-.. _cx_oracle_numeric:
-
-Precision Numerics
-------------------
-
-The SQLAlchemy dialect goes through a lot of steps to ensure
-that decimal numbers are sent and received with full accuracy.
-An "outputtypehandler" callable is associated with each
-cx_oracle connection object which detects numeric types and
-receives them as string values, instead of receiving a Python
-``float`` directly, which is then passed to the Python
-``Decimal`` constructor. The :class:`.Numeric` and
-:class:`.Float` types under the cx_oracle dialect are aware of
-this behavior, and will coerce the ``Decimal`` to ``float`` if
-the ``asdecimal`` flag is ``False`` (default on :class:`.Float`,
-optional on :class:`.Numeric`).
-
-Because the handler coerces to ``Decimal`` in all cases first,
-the feature can detract significantly from performance.
-If precision numerics aren't required, the decimal handling
-can be disabled by passing the flag ``coerce_to_decimal=False``
-to :func:`.create_engine`::
-
- engine = create_engine("oracle+cx_oracle://dsn", coerce_to_decimal=False)
-
-.. versionadded:: 0.7.6
- Add the ``coerce_to_decimal`` flag.
-
-Another alternative to performance is to use the
-`cdecimal `_ library;
-see :class:`.Numeric` for additional notes.
-
-The handler attempts to use the "precision" and "scale"
-attributes of the result set column to best determine if
-subsequent incoming values should be received as ``Decimal`` as
-opposed to int (in which case no processing is added). There are
-several scenarios where OCI_ does not provide unambiguous data
-as to the numeric type, including some situations where
-individual rows may return a combination of floating point and
-integer values. Certain values for "precision" and "scale" have
-been observed to determine this scenario. When it occurs, the
-outputtypehandler receives as string and then passes off to a
-processing function which detects, for each returned value, if a
-decimal point is present, and if so converts to ``Decimal``,
-otherwise to int. The intention is that simple int-based
-statements like "SELECT my_seq.nextval() FROM DUAL" continue to
-return ints and not ``Decimal`` objects, and that any kind of
-floating point value is received as a string so that there is no
-floating point loss of precision.
-
-The "decimal point is present" logic itself is also sensitive to
-locale. Under OCI_, this is controlled by the NLS_LANG
-environment variable. Upon first connection, the dialect runs a
-test to determine the current "decimal" character, which can be
-a comma "," for european locales. From that point forward the
-outputtypehandler uses that character to represent a decimal
-point. Note that cx_oracle 5.0.3 or greater is required
-when dealing with numerics with locale settings that don't use
-a period "." as the decimal character.
-
-.. versionchanged:: 0.6.6
- The outputtypehandler supports the case where the locale uses a
- comma "," character to represent a decimal point.
-
-.. _OCI: http://www.oracle.com/technetwork/database/features/oci/index.html
-
-"""
-
-from __future__ import absolute_import
-
-from .base import OracleCompiler, OracleDialect, OracleExecutionContext
-from . import base as oracle
-from ...engine import result as _result
-from sqlalchemy import types as sqltypes, util, exc, processors
-import random
-import collections
-import decimal
-import re
-
-
-class _OracleNumeric(sqltypes.Numeric):
- def bind_processor(self, dialect):
- # cx_oracle accepts Decimal objects and floats
- return None
-
- def result_processor(self, dialect, coltype):
- # we apply a cx_oracle type handler to all connections
- # that converts floating point strings to Decimal().
- # However, in some subquery situations, Oracle doesn't
- # give us enough information to determine int or Decimal.
- # It could even be int/Decimal differently on each row,
- # regardless of the scale given for the originating type.
- # So we still need an old school isinstance() handler
- # here for decimals.
-
- if dialect.supports_native_decimal:
- if self.asdecimal:
- fstring = "%%.%df" % self._effective_decimal_return_scale
-
- def to_decimal(value):
- if value is None:
- return None
- elif isinstance(value, decimal.Decimal):
- return value
- else:
- return decimal.Decimal(fstring % value)
-
- return to_decimal
- else:
- if self.precision is None and self.scale is None:
- return processors.to_float
- elif not getattr(self, '_is_oracle_number', False) \
- and self.scale is not None:
- return processors.to_float
- else:
- return None
- else:
- # cx_oracle 4 behavior, will assume
- # floats
- return super(_OracleNumeric, self).\
- result_processor(dialect, coltype)
-
-
-class _OracleDate(sqltypes.Date):
- def bind_processor(self, dialect):
- return None
-
- def result_processor(self, dialect, coltype):
- def process(value):
- if value is not None:
- return value.date()
- else:
- return value
- return process
-
-
-class _LOBMixin(object):
- def result_processor(self, dialect, coltype):
- if not dialect.auto_convert_lobs:
- # return the cx_oracle.LOB directly.
- return None
-
- def process(value):
- if value is not None:
- return value.read()
- else:
- return value
- return process
-
-
-class _NativeUnicodeMixin(object):
- if util.py2k:
- def bind_processor(self, dialect):
- if dialect._cx_oracle_with_unicode:
- def process(value):
- if value is None:
- return value
- else:
- return unicode(value)
- return process
- else:
- return super(_NativeUnicodeMixin, self).bind_processor(dialect)
-
- # we apply a connection output handler that returns
- # unicode in all cases, so the "native_unicode" flag
- # will be set for the default String.result_processor.
-
-
-class _OracleChar(_NativeUnicodeMixin, sqltypes.CHAR):
- def get_dbapi_type(self, dbapi):
- return dbapi.FIXED_CHAR
-
-
-class _OracleNVarChar(_NativeUnicodeMixin, sqltypes.NVARCHAR):
- def get_dbapi_type(self, dbapi):
- return getattr(dbapi, 'UNICODE', dbapi.STRING)
-
-
-class _OracleText(_LOBMixin, sqltypes.Text):
- def get_dbapi_type(self, dbapi):
- return dbapi.CLOB
-
-
-class _OracleLong(oracle.LONG):
- # a raw LONG is a text type, but does *not*
- # get the LobMixin with cx_oracle.
-
- def get_dbapi_type(self, dbapi):
- return dbapi.LONG_STRING
-
-class _OracleString(_NativeUnicodeMixin, sqltypes.String):
- pass
-
-
-class _OracleUnicodeText(_LOBMixin, _NativeUnicodeMixin, sqltypes.UnicodeText):
- def get_dbapi_type(self, dbapi):
- return dbapi.NCLOB
-
- def result_processor(self, dialect, coltype):
- lob_processor = _LOBMixin.result_processor(self, dialect, coltype)
- if lob_processor is None:
- return None
-
- string_processor = sqltypes.UnicodeText.result_processor(self, dialect, coltype)
-
- if string_processor is None:
- return lob_processor
- else:
- def process(value):
- return string_processor(lob_processor(value))
- return process
-
-
-class _OracleInteger(sqltypes.Integer):
- def result_processor(self, dialect, coltype):
- def to_int(val):
- if val is not None:
- val = int(val)
- return val
- return to_int
-
-
-class _OracleBinary(_LOBMixin, sqltypes.LargeBinary):
- def get_dbapi_type(self, dbapi):
- return dbapi.BLOB
-
- def bind_processor(self, dialect):
- return None
-
-
-class _OracleInterval(oracle.INTERVAL):
- def get_dbapi_type(self, dbapi):
- return dbapi.INTERVAL
-
-
-class _OracleRaw(oracle.RAW):
- pass
-
-
-class _OracleRowid(oracle.ROWID):
- def get_dbapi_type(self, dbapi):
- return dbapi.ROWID
-
-
-class OracleCompiler_cx_oracle(OracleCompiler):
- def bindparam_string(self, name, **kw):
- quote = getattr(name, 'quote', None)
- if quote is True or quote is not False and \
- self.preparer._bindparam_requires_quotes(name):
- quoted_name = '"%s"' % name
- self._quoted_bind_names[name] = quoted_name
- return OracleCompiler.bindparam_string(self, quoted_name, **kw)
- else:
- return OracleCompiler.bindparam_string(self, name, **kw)
-
-
-class OracleExecutionContext_cx_oracle(OracleExecutionContext):
-
- def pre_exec(self):
- quoted_bind_names = \
- getattr(self.compiled, '_quoted_bind_names', None)
- if quoted_bind_names:
- if not self.dialect.supports_unicode_statements:
- # if DBAPI doesn't accept unicode statements,
- # keys in self.parameters would have been encoded
- # here. so convert names in quoted_bind_names
- # to encoded as well.
- quoted_bind_names = \
- dict(
- (fromname.encode(self.dialect.encoding),
- toname.encode(self.dialect.encoding))
- for fromname, toname in
- quoted_bind_names.items()
- )
- for param in self.parameters:
- for fromname, toname in quoted_bind_names.items():
- param[toname] = param[fromname]
- del param[fromname]
-
- if self.dialect.auto_setinputsizes:
- # cx_oracle really has issues when you setinputsizes
- # on String, including that outparams/RETURNING
- # breaks for varchars
- self.set_input_sizes(quoted_bind_names,
- exclude_types=self.dialect.exclude_setinputsizes
- )
-
- # if a single execute, check for outparams
- if len(self.compiled_parameters) == 1:
- for bindparam in self.compiled.binds.values():
- if bindparam.isoutparam:
- dbtype = bindparam.type.dialect_impl(self.dialect).\
- get_dbapi_type(self.dialect.dbapi)
- if not hasattr(self, 'out_parameters'):
- self.out_parameters = {}
- if dbtype is None:
- raise exc.InvalidRequestError(
- "Cannot create out parameter for parameter "
- "%r - it's type %r is not supported by"
- " cx_oracle" %
- (bindparam.key, bindparam.type)
- )
- name = self.compiled.bind_names[bindparam]
- self.out_parameters[name] = self.cursor.var(dbtype)
- self.parameters[0][quoted_bind_names.get(name, name)] = \
- self.out_parameters[name]
-
- def create_cursor(self):
- c = self._dbapi_connection.cursor()
- if self.dialect.arraysize:
- c.arraysize = self.dialect.arraysize
-
- return c
-
- def get_result_proxy(self):
- if hasattr(self, 'out_parameters') and self.compiled.returning:
- returning_params = dict(
- (k, v.getvalue())
- for k, v in self.out_parameters.items()
- )
- return ReturningResultProxy(self, returning_params)
-
- result = None
- if self.cursor.description is not None:
- for column in self.cursor.description:
- type_code = column[1]
- if type_code in self.dialect._cx_oracle_binary_types:
- result = _result.BufferedColumnResultProxy(self)
-
- if result is None:
- result = _result.ResultProxy(self)
-
- if hasattr(self, 'out_parameters'):
- if self.compiled_parameters is not None and \
- len(self.compiled_parameters) == 1:
- result.out_parameters = out_parameters = {}
-
- for bind, name in self.compiled.bind_names.items():
- if name in self.out_parameters:
- type = bind.type
- impl_type = type.dialect_impl(self.dialect)
- dbapi_type = impl_type.get_dbapi_type(self.dialect.dbapi)
- result_processor = impl_type.\
- result_processor(self.dialect,
- dbapi_type)
- if result_processor is not None:
- out_parameters[name] = \
- result_processor(self.out_parameters[name].getvalue())
- else:
- out_parameters[name] = self.out_parameters[name].getvalue()
- else:
- result.out_parameters = dict(
- (k, v.getvalue())
- for k, v in self.out_parameters.items()
- )
-
- return result
-
-
-class OracleExecutionContext_cx_oracle_with_unicode(OracleExecutionContext_cx_oracle):
- """Support WITH_UNICODE in Python 2.xx.
-
- WITH_UNICODE allows cx_Oracle's Python 3 unicode handling
- behavior under Python 2.x. This mode in some cases disallows
- and in other cases silently passes corrupted data when
- non-Python-unicode strings (a.k.a. plain old Python strings)
- are passed as arguments to connect(), the statement sent to execute(),
- or any of the bind parameter keys or values sent to execute().
- This optional context therefore ensures that all statements are
- passed as Python unicode objects.
-
- """
- def __init__(self, *arg, **kw):
- OracleExecutionContext_cx_oracle.__init__(self, *arg, **kw)
- self.statement = util.text_type(self.statement)
-
- def _execute_scalar(self, stmt):
- return super(OracleExecutionContext_cx_oracle_with_unicode, self).\
- _execute_scalar(util.text_type(stmt))
-
-
-class ReturningResultProxy(_result.FullyBufferedResultProxy):
- """Result proxy which stuffs the _returning clause + outparams into the fetch."""
-
- def __init__(self, context, returning_params):
- self._returning_params = returning_params
- super(ReturningResultProxy, self).__init__(context)
-
- def _cursor_description(self):
- returning = self.context.compiled.returning
- return [
- ("ret_%d" % i, None)
- for i, col in enumerate(returning)
- ]
-
- def _buffer_rows(self):
- return collections.deque([tuple(self._returning_params["ret_%d" % i]
- for i, c in enumerate(self._returning_params))])
-
-
-class OracleDialect_cx_oracle(OracleDialect):
- execution_ctx_cls = OracleExecutionContext_cx_oracle
- statement_compiler = OracleCompiler_cx_oracle
-
- driver = "cx_oracle"
-
- colspecs = colspecs = {
- sqltypes.Numeric: _OracleNumeric,
- sqltypes.Date: _OracleDate, # generic type, assume datetime.date is desired
- sqltypes.LargeBinary: _OracleBinary,
- sqltypes.Boolean: oracle._OracleBoolean,
- sqltypes.Interval: _OracleInterval,
- oracle.INTERVAL: _OracleInterval,
- sqltypes.Text: _OracleText,
- sqltypes.String: _OracleString,
- sqltypes.UnicodeText: _OracleUnicodeText,
- sqltypes.CHAR: _OracleChar,
-
- # a raw LONG is a text type, but does *not*
- # get the LobMixin with cx_oracle.
- oracle.LONG: _OracleLong,
-
- # this is only needed for OUT parameters.
- # it would be nice if we could not use it otherwise.
- sqltypes.Integer: _OracleInteger,
-
- oracle.RAW: _OracleRaw,
- sqltypes.Unicode: _OracleNVarChar,
- sqltypes.NVARCHAR: _OracleNVarChar,
- oracle.ROWID: _OracleRowid,
- }
-
- execute_sequence_format = list
-
- def __init__(self,
- auto_setinputsizes=True,
- exclude_setinputsizes=("STRING", "UNICODE"),
- auto_convert_lobs=True,
- threaded=True,
- allow_twophase=True,
- coerce_to_decimal=True,
- coerce_to_unicode=False,
- arraysize=50, **kwargs):
- OracleDialect.__init__(self, **kwargs)
- self.threaded = threaded
- self.arraysize = arraysize
- self.allow_twophase = allow_twophase
- self.supports_timestamp = self.dbapi is None or \
- hasattr(self.dbapi, 'TIMESTAMP')
- self.auto_setinputsizes = auto_setinputsizes
- self.auto_convert_lobs = auto_convert_lobs
-
- if hasattr(self.dbapi, 'version'):
- self.cx_oracle_ver = tuple([int(x) for x in
- self.dbapi.version.split('.')])
- else:
- self.cx_oracle_ver = (0, 0, 0)
-
- def types(*names):
- return set(
- getattr(self.dbapi, name, None) for name in names
- ).difference([None])
-
- self.exclude_setinputsizes = types(*(exclude_setinputsizes or ()))
- self._cx_oracle_string_types = types("STRING", "UNICODE",
- "NCLOB", "CLOB")
- self._cx_oracle_unicode_types = types("UNICODE", "NCLOB")
- self._cx_oracle_binary_types = types("BFILE", "CLOB", "NCLOB", "BLOB")
- self.supports_unicode_binds = self.cx_oracle_ver >= (5, 0)
-
- self.coerce_to_unicode = (
- self.cx_oracle_ver >= (5, 0) and
- coerce_to_unicode
- )
-
- self.supports_native_decimal = (
- self.cx_oracle_ver >= (5, 0) and
- coerce_to_decimal
- )
-
- self._cx_oracle_native_nvarchar = self.cx_oracle_ver >= (5, 0)
-
- if self.cx_oracle_ver is None:
- # this occurs in tests with mock DBAPIs
- self._cx_oracle_string_types = set()
- self._cx_oracle_with_unicode = False
- elif self.cx_oracle_ver >= (5,) and not hasattr(self.dbapi, 'UNICODE'):
- # cx_Oracle WITH_UNICODE mode. *only* python
- # unicode objects accepted for anything
- self.supports_unicode_statements = True
- self.supports_unicode_binds = True
- self._cx_oracle_with_unicode = True
-
- if util.py2k:
- # There's really no reason to run with WITH_UNICODE under Python 2.x.
- # Give the user a hint.
- util.warn(
- "cx_Oracle is compiled under Python 2.xx using the "
- "WITH_UNICODE flag. Consider recompiling cx_Oracle "
- "without this flag, which is in no way necessary for full "
- "support of Unicode. Otherwise, all string-holding bind "
- "parameters must be explicitly typed using SQLAlchemy's "
- "String type or one of its subtypes,"
- "or otherwise be passed as Python unicode. "
- "Plain Python strings passed as bind parameters will be "
- "silently corrupted by cx_Oracle."
- )
- self.execution_ctx_cls = \
- OracleExecutionContext_cx_oracle_with_unicode
- else:
- self._cx_oracle_with_unicode = False
-
- if self.cx_oracle_ver is None or \
- not self.auto_convert_lobs or \
- not hasattr(self.dbapi, 'CLOB'):
- self.dbapi_type_map = {}
- else:
- # only use this for LOB objects. using it for strings, dates
- # etc. leads to a little too much magic, reflection doesn't know if it should
- # expect encoded strings or unicodes, etc.
- self.dbapi_type_map = {
- self.dbapi.CLOB: oracle.CLOB(),
- self.dbapi.NCLOB: oracle.NCLOB(),
- self.dbapi.BLOB: oracle.BLOB(),
- self.dbapi.BINARY: oracle.RAW(),
- }
-
- @classmethod
- def dbapi(cls):
- import cx_Oracle
- return cx_Oracle
-
- def initialize(self, connection):
- super(OracleDialect_cx_oracle, self).initialize(connection)
- if self._is_oracle_8:
- self.supports_unicode_binds = False
- self._detect_decimal_char(connection)
-
- def _detect_decimal_char(self, connection):
- """detect if the decimal separator character is not '.', as
- is the case with european locale settings for NLS_LANG.
-
- cx_oracle itself uses similar logic when it formats Python
- Decimal objects to strings on the bind side (as of 5.0.3),
- as Oracle sends/receives string numerics only in the
- current locale.
-
- """
- if self.cx_oracle_ver < (5,):
- # no output type handlers before version 5
- return
-
- cx_Oracle = self.dbapi
- conn = connection.connection
-
- # override the output_type_handler that's
- # on the cx_oracle connection with a plain
- # one on the cursor
-
- def output_type_handler(cursor, name, defaultType,
- size, precision, scale):
- return cursor.var(
- cx_Oracle.STRING,
- 255, arraysize=cursor.arraysize)
-
- cursor = conn.cursor()
- cursor.outputtypehandler = output_type_handler
- cursor.execute("SELECT 0.1 FROM DUAL")
- val = cursor.fetchone()[0]
- cursor.close()
- char = re.match(r"([\.,])", val).group(1)
- if char != '.':
- _detect_decimal = self._detect_decimal
- self._detect_decimal = \
- lambda value: _detect_decimal(value.replace(char, '.'))
- self._to_decimal = \
- lambda value: decimal.Decimal(value.replace(char, '.'))
-
- def _detect_decimal(self, value):
- if "." in value:
- return decimal.Decimal(value)
- else:
- return int(value)
-
- _to_decimal = decimal.Decimal
-
- def on_connect(self):
- if self.cx_oracle_ver < (5,):
- # no output type handlers before version 5
- return
-
- cx_Oracle = self.dbapi
-
- def output_type_handler(cursor, name, defaultType,
- size, precision, scale):
- # convert all NUMBER with precision + positive scale to Decimal
- # this almost allows "native decimal" mode.
- if self.supports_native_decimal and \
- defaultType == cx_Oracle.NUMBER and \
- precision and scale > 0:
- return cursor.var(
- cx_Oracle.STRING,
- 255,
- outconverter=self._to_decimal,
- arraysize=cursor.arraysize)
- # if NUMBER with zero precision and 0 or neg scale, this appears
- # to indicate "ambiguous". Use a slower converter that will
- # make a decision based on each value received - the type
- # may change from row to row (!). This kills
- # off "native decimal" mode, handlers still needed.
- elif self.supports_native_decimal and \
- defaultType == cx_Oracle.NUMBER \
- and not precision and scale <= 0:
- return cursor.var(
- cx_Oracle.STRING,
- 255,
- outconverter=self._detect_decimal,
- arraysize=cursor.arraysize)
- # allow all strings to come back natively as Unicode
- elif self.coerce_to_unicode and \
- defaultType in (cx_Oracle.STRING, cx_Oracle.FIXED_CHAR):
- return cursor.var(util.text_type, size, cursor.arraysize)
-
- def on_connect(conn):
- conn.outputtypehandler = output_type_handler
-
- return on_connect
-
- def create_connect_args(self, url):
- dialect_opts = dict(url.query)
- for opt in ('use_ansi', 'auto_setinputsizes', 'auto_convert_lobs',
- 'threaded', 'allow_twophase'):
- if opt in dialect_opts:
- util.coerce_kw_type(dialect_opts, opt, bool)
- setattr(self, opt, dialect_opts[opt])
-
- if url.database:
- # if we have a database, then we have a remote host
- port = url.port
- if port:
- port = int(port)
- else:
- port = 1521
- dsn = self.dbapi.makedsn(url.host, port, url.database)
- else:
- # we have a local tnsname
- dsn = url.host
-
- opts = dict(
- user=url.username,
- password=url.password,
- dsn=dsn,
- threaded=self.threaded,
- twophase=self.allow_twophase,
- )
-
- if util.py2k:
- if self._cx_oracle_with_unicode:
- for k, v in opts.items():
- if isinstance(v, str):
- opts[k] = unicode(v)
- else:
- for k, v in opts.items():
- if isinstance(v, unicode):
- opts[k] = str(v)
-
- if 'mode' in url.query:
- opts['mode'] = url.query['mode']
- if isinstance(opts['mode'], util.string_types):
- mode = opts['mode'].upper()
- if mode == 'SYSDBA':
- opts['mode'] = self.dbapi.SYSDBA
- elif mode == 'SYSOPER':
- opts['mode'] = self.dbapi.SYSOPER
- else:
- util.coerce_kw_type(opts, 'mode', int)
- return ([], opts)
-
- def _get_server_version_info(self, connection):
- return tuple(
- int(x)
- for x in connection.connection.version.split('.')
- )
-
- def is_disconnect(self, e, connection, cursor):
- error, = e.args
- if isinstance(e, self.dbapi.InterfaceError):
- return "not connected" in str(e)
- elif hasattr(error, 'code'):
- # ORA-00028: your session has been killed
- # ORA-03114: not connected to ORACLE
- # ORA-03113: end-of-file on communication channel
- # ORA-03135: connection lost contact
- # ORA-01033: ORACLE initialization or shutdown in progress
- # ORA-02396: exceeded maximum idle time, please connect again
- # TODO: Others ?
- return error.code in (28, 3114, 3113, 3135, 1033, 2396)
- else:
- return False
-
- def create_xid(self):
- """create a two-phase transaction ID.
-
- this id will be passed to do_begin_twophase(), do_rollback_twophase(),
- do_commit_twophase(). its format is unspecified."""
-
- id = random.randint(0, 2 ** 128)
- return (0x1234, "%032x" % id, "%032x" % 9)
-
- def do_executemany(self, cursor, statement, parameters, context=None):
- if isinstance(parameters, tuple):
- parameters = list(parameters)
- cursor.executemany(statement, parameters)
-
- def do_begin_twophase(self, connection, xid):
- connection.connection.begin(*xid)
-
- def do_prepare_twophase(self, connection, xid):
- result = connection.connection.prepare()
- connection.info['cx_oracle_prepared'] = result
-
- def do_rollback_twophase(self, connection, xid, is_prepared=True,
- recover=False):
- self.do_rollback(connection.connection)
-
- def do_commit_twophase(self, connection, xid, is_prepared=True,
- recover=False):
- if not is_prepared:
- self.do_commit(connection.connection)
- else:
- oci_prepared = connection.info['cx_oracle_prepared']
- if oci_prepared:
- self.do_commit(connection.connection)
-
- def do_recover_twophase(self, connection):
- connection.info.pop('cx_oracle_prepared', None)
-
-dialect = OracleDialect_cx_oracle
diff --git a/lib/sqlalchemy/dialects/oracle/zxjdbc.py b/lib/sqlalchemy/dialects/oracle/zxjdbc.py
deleted file mode 100644
index 710645b2..00000000
--- a/lib/sqlalchemy/dialects/oracle/zxjdbc.py
+++ /dev/null
@@ -1,218 +0,0 @@
-# oracle/zxjdbc.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""
-.. dialect:: oracle+zxjdbc
- :name: zxJDBC for Jython
- :dbapi: zxjdbc
- :connectstring: oracle+zxjdbc://user:pass@host/dbname
- :driverurl: http://www.oracle.com/technology/software/tech/java/sqlj_jdbc/index.html.
-
-"""
-import decimal
-import re
-
-from sqlalchemy import sql, types as sqltypes, util
-from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector
-from sqlalchemy.dialects.oracle.base import OracleCompiler, OracleDialect, OracleExecutionContext
-from sqlalchemy.engine import result as _result
-from sqlalchemy.sql import expression
-import collections
-
-SQLException = zxJDBC = None
-
-
-class _ZxJDBCDate(sqltypes.Date):
-
- def result_processor(self, dialect, coltype):
- def process(value):
- if value is None:
- return None
- else:
- return value.date()
- return process
-
-
-class _ZxJDBCNumeric(sqltypes.Numeric):
-
- def result_processor(self, dialect, coltype):
- #XXX: does the dialect return Decimal or not???
- # if it does (in all cases), we could use a None processor as well as
- # the to_float generic processor
- if self.asdecimal:
- def process(value):
- if isinstance(value, decimal.Decimal):
- return value
- else:
- return decimal.Decimal(str(value))
- else:
- def process(value):
- if isinstance(value, decimal.Decimal):
- return float(value)
- else:
- return value
- return process
-
-
-class OracleCompiler_zxjdbc(OracleCompiler):
-
- def returning_clause(self, stmt, returning_cols):
- self.returning_cols = list(expression._select_iterables(returning_cols))
-
- # within_columns_clause=False so that labels (foo AS bar) don't render
- columns = [self.process(c, within_columns_clause=False, result_map=self.result_map)
- for c in self.returning_cols]
-
- if not hasattr(self, 'returning_parameters'):
- self.returning_parameters = []
-
- binds = []
- for i, col in enumerate(self.returning_cols):
- dbtype = col.type.dialect_impl(self.dialect).get_dbapi_type(self.dialect.dbapi)
- self.returning_parameters.append((i + 1, dbtype))
-
- bindparam = sql.bindparam("ret_%d" % i, value=ReturningParam(dbtype))
- self.binds[bindparam.key] = bindparam
- binds.append(self.bindparam_string(self._truncate_bindparam(bindparam)))
-
- return 'RETURNING ' + ', '.join(columns) + " INTO " + ", ".join(binds)
-
-
-class OracleExecutionContext_zxjdbc(OracleExecutionContext):
-
- def pre_exec(self):
- if hasattr(self.compiled, 'returning_parameters'):
- # prepare a zxJDBC statement so we can grab its underlying
- # OraclePreparedStatement's getReturnResultSet later
- self.statement = self.cursor.prepare(self.statement)
-
- def get_result_proxy(self):
- if hasattr(self.compiled, 'returning_parameters'):
- rrs = None
- try:
- try:
- rrs = self.statement.__statement__.getReturnResultSet()
- next(rrs)
- except SQLException as sqle:
- msg = '%s [SQLCode: %d]' % (sqle.getMessage(), sqle.getErrorCode())
- if sqle.getSQLState() is not None:
- msg += ' [SQLState: %s]' % sqle.getSQLState()
- raise zxJDBC.Error(msg)
- else:
- row = tuple(self.cursor.datahandler.getPyObject(rrs, index, dbtype)
- for index, dbtype in self.compiled.returning_parameters)
- return ReturningResultProxy(self, row)
- finally:
- if rrs is not None:
- try:
- rrs.close()
- except SQLException:
- pass
- self.statement.close()
-
- return _result.ResultProxy(self)
-
- def create_cursor(self):
- cursor = self._dbapi_connection.cursor()
- cursor.datahandler = self.dialect.DataHandler(cursor.datahandler)
- return cursor
-
-
-class ReturningResultProxy(_result.FullyBufferedResultProxy):
-
- """ResultProxy backed by the RETURNING ResultSet results."""
-
- def __init__(self, context, returning_row):
- self._returning_row = returning_row
- super(ReturningResultProxy, self).__init__(context)
-
- def _cursor_description(self):
- ret = []
- for c in self.context.compiled.returning_cols:
- if hasattr(c, 'name'):
- ret.append((c.name, c.type))
- else:
- ret.append((c.anon_label, c.type))
- return ret
-
- def _buffer_rows(self):
- return collections.deque([self._returning_row])
-
-
-class ReturningParam(object):
-
- """A bindparam value representing a RETURNING parameter.
-
- Specially handled by OracleReturningDataHandler.
- """
-
- def __init__(self, type):
- self.type = type
-
- def __eq__(self, other):
- if isinstance(other, ReturningParam):
- return self.type == other.type
- return NotImplemented
-
- def __ne__(self, other):
- if isinstance(other, ReturningParam):
- return self.type != other.type
- return NotImplemented
-
- def __repr__(self):
- kls = self.__class__
- return '<%s.%s object at 0x%x type=%s>' % (kls.__module__, kls.__name__, id(self),
- self.type)
-
-
-class OracleDialect_zxjdbc(ZxJDBCConnector, OracleDialect):
- jdbc_db_name = 'oracle'
- jdbc_driver_name = 'oracle.jdbc.OracleDriver'
-
- statement_compiler = OracleCompiler_zxjdbc
- execution_ctx_cls = OracleExecutionContext_zxjdbc
-
- colspecs = util.update_copy(
- OracleDialect.colspecs,
- {
- sqltypes.Date: _ZxJDBCDate,
- sqltypes.Numeric: _ZxJDBCNumeric
- }
- )
-
- def __init__(self, *args, **kwargs):
- super(OracleDialect_zxjdbc, self).__init__(*args, **kwargs)
- global SQLException, zxJDBC
- from java.sql import SQLException
- from com.ziclix.python.sql import zxJDBC
- from com.ziclix.python.sql.handler import OracleDataHandler
-
- class OracleReturningDataHandler(OracleDataHandler):
- """zxJDBC DataHandler that specially handles ReturningParam."""
-
- def setJDBCObject(self, statement, index, object, dbtype=None):
- if type(object) is ReturningParam:
- statement.registerReturnParameter(index, object.type)
- elif dbtype is None:
- OracleDataHandler.setJDBCObject(
- self, statement, index, object)
- else:
- OracleDataHandler.setJDBCObject(
- self, statement, index, object, dbtype)
- self.DataHandler = OracleReturningDataHandler
-
- def initialize(self, connection):
- super(OracleDialect_zxjdbc, self).initialize(connection)
- self.implicit_returning = connection.connection.driverversion >= '10.2'
-
- def _create_jdbc_url(self, url):
- return 'jdbc:oracle:thin:@%s:%s:%s' % (url.host, url.port or 1521, url.database)
-
- def _get_server_version_info(self, connection):
- version = re.search(r'Release ([\d\.]+)', connection.connection.dbversion).group(1)
- return tuple(int(x) for x in version.split('.'))
-
-dialect = OracleDialect_zxjdbc
diff --git a/lib/sqlalchemy/dialects/postgres.py b/lib/sqlalchemy/dialects/postgres.py
deleted file mode 100644
index 6ed7e18b..00000000
--- a/lib/sqlalchemy/dialects/postgres.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# dialects/postgres.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-# backwards compat with the old name
-from sqlalchemy.util import warn_deprecated
-
-warn_deprecated(
- "The SQLAlchemy PostgreSQL dialect has been renamed from 'postgres' to 'postgresql'. "
- "The new URL format is postgresql[+driver]://:@/"
- )
-
-from sqlalchemy.dialects.postgresql import *
-from sqlalchemy.dialects.postgresql import base
diff --git a/lib/sqlalchemy/dialects/postgresql/__init__.py b/lib/sqlalchemy/dialects/postgresql/__init__.py
deleted file mode 100644
index 180e9fc7..00000000
--- a/lib/sqlalchemy/dialects/postgresql/__init__.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# postgresql/__init__.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-from . import base, psycopg2, pg8000, pypostgresql, zxjdbc
-
-base.dialect = psycopg2.dialect
-
-from .base import \
- INTEGER, BIGINT, SMALLINT, VARCHAR, CHAR, TEXT, NUMERIC, FLOAT, REAL, \
- INET, CIDR, UUID, BIT, MACADDR, DOUBLE_PRECISION, TIMESTAMP, TIME, \
- DATE, BYTEA, BOOLEAN, INTERVAL, ARRAY, ENUM, dialect, array, Any, All, \
- TSVECTOR
-from .constraints import ExcludeConstraint
-from .hstore import HSTORE, hstore
-from .json import JSON, JSONElement
-from .ranges import INT4RANGE, INT8RANGE, NUMRANGE, DATERANGE, TSRANGE, \
- TSTZRANGE
-
-__all__ = (
- 'INTEGER', 'BIGINT', 'SMALLINT', 'VARCHAR', 'CHAR', 'TEXT', 'NUMERIC',
- 'FLOAT', 'REAL', 'INET', 'CIDR', 'UUID', 'BIT', 'MACADDR',
- 'DOUBLE_PRECISION', 'TIMESTAMP', 'TIME', 'DATE', 'BYTEA', 'BOOLEAN',
- 'INTERVAL', 'ARRAY', 'ENUM', 'dialect', 'Any', 'All', 'array', 'HSTORE',
- 'hstore', 'INT4RANGE', 'INT8RANGE', 'NUMRANGE', 'DATERANGE',
- 'TSRANGE', 'TSTZRANGE', 'json', 'JSON', 'JSONElement'
-)
diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py
deleted file mode 100644
index f69a6e01..00000000
--- a/lib/sqlalchemy/dialects/postgresql/base.py
+++ /dev/null
@@ -1,2367 +0,0 @@
-# postgresql/base.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""
-.. dialect:: postgresql
- :name: PostgreSQL
-
-
-Sequences/SERIAL
-----------------
-
-PostgreSQL supports sequences, and SQLAlchemy uses these as the default means
-of creating new primary key values for integer-based primary key columns. When
-creating tables, SQLAlchemy will issue the ``SERIAL`` datatype for
-integer-based primary key columns, which generates a sequence and server side
-default corresponding to the column.
-
-To specify a specific named sequence to be used for primary key generation,
-use the :func:`~sqlalchemy.schema.Sequence` construct::
-
- Table('sometable', metadata,
- Column('id', Integer, Sequence('some_id_seq'), primary_key=True)
- )
-
-When SQLAlchemy issues a single INSERT statement, to fulfill the contract of
-having the "last insert identifier" available, a RETURNING clause is added to
-the INSERT statement which specifies the primary key columns should be
-returned after the statement completes. The RETURNING functionality only takes
-place if Postgresql 8.2 or later is in use. As a fallback approach, the
-sequence, whether specified explicitly or implicitly via ``SERIAL``, is
-executed independently beforehand, the returned value to be used in the
-subsequent insert. Note that when an
-:func:`~sqlalchemy.sql.expression.insert()` construct is executed using
-"executemany" semantics, the "last inserted identifier" functionality does not
-apply; no RETURNING clause is emitted nor is the sequence pre-executed in this
-case.
-
-To force the usage of RETURNING by default off, specify the flag
-``implicit_returning=False`` to :func:`.create_engine`.
-
-.. _postgresql_isolation_level:
-
-Transaction Isolation Level
----------------------------
-
-All Postgresql dialects support setting of transaction isolation level
-both via a dialect-specific parameter ``isolation_level``
-accepted by :func:`.create_engine`,
-as well as the ``isolation_level`` argument as passed to :meth:`.Connection.execution_options`.
-When using a non-psycopg2 dialect, this feature works by issuing the
-command ``SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL
-`` for each new connection.
-
-To set isolation level using :func:`.create_engine`::
-
- engine = create_engine(
- "postgresql+pg8000://scott:tiger@localhost/test",
- isolation_level="READ UNCOMMITTED"
- )
-
-To set using per-connection execution options::
-
- connection = engine.connect()
- connection = connection.execution_options(isolation_level="READ COMMITTED")
-
-Valid values for ``isolation_level`` include:
-
-* ``READ COMMITTED``
-* ``READ UNCOMMITTED``
-* ``REPEATABLE READ``
-* ``SERIALIZABLE``
-
-The :mod:`~sqlalchemy.dialects.postgresql.psycopg2` dialect also offers the special level ``AUTOCOMMIT``. See
-:ref:`psycopg2_isolation_level` for details.
-
-.. _postgresql_schema_reflection:
-
-Remote-Schema Table Introspection and Postgresql search_path
-------------------------------------------------------------
-
-The Postgresql dialect can reflect tables from any schema. The
-:paramref:`.Table.schema` argument, or alternatively the
-:paramref:`.MetaData.reflect.schema` argument determines which schema will
-be searched for the table or tables. The reflected :class:`.Table` objects
-will in all cases retain this ``.schema`` attribute as was specified. However,
-with regards to tables which these :class:`.Table` objects refer to via
-foreign key constraint, a decision must be made as to how the ``.schema``
-is represented in those remote tables, in the case where that remote
-schema name is also a member of the current
-`Postgresql search path `_.
-
-By default, the Postgresql dialect mimics the behavior encouraged by
-Postgresql's own ``pg_get_constraintdef()`` builtin procedure. This function
-returns a sample definition for a particular foreign key constraint,
-omitting the referenced schema name from that definition when the name is
-also in the Postgresql schema search path. The interaction below
-illustrates this behavior::
-
- test=> CREATE TABLE test_schema.referred(id INTEGER PRIMARY KEY);
- CREATE TABLE
- test=> CREATE TABLE referring(
- test(> id INTEGER PRIMARY KEY,
- test(> referred_id INTEGER REFERENCES test_schema.referred(id));
- CREATE TABLE
- test=> SET search_path TO public, test_schema;
- test=> SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM
- test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
- test-> JOIN pg_catalog.pg_constraint r ON c.oid = r.conrelid
- test-> WHERE c.relname='referring' AND r.contype = 'f'
- test-> ;
- pg_get_constraintdef
- ---------------------------------------------------
- FOREIGN KEY (referred_id) REFERENCES referred(id)
- (1 row)
-
-Above, we created a table ``referred`` as a member of the remote schema ``test_schema``, however
-when we added ``test_schema`` to the PG ``search_path`` and then asked ``pg_get_constraintdef()``
-for the ``FOREIGN KEY`` syntax, ``test_schema`` was not included in the
-output of the function.
-
-On the other hand, if we set the search path back to the typical default
-of ``public``::
-
- test=> SET search_path TO public;
- SET
-
-The same query against ``pg_get_constraintdef()`` now returns the fully
-schema-qualified name for us::
-
- test=> SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM
- test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
- test-> JOIN pg_catalog.pg_constraint r ON c.oid = r.conrelid
- test-> WHERE c.relname='referring' AND r.contype = 'f';
- pg_get_constraintdef
- ---------------------------------------------------------------
- FOREIGN KEY (referred_id) REFERENCES test_schema.referred(id)
- (1 row)
-
-SQLAlchemy will by default use the return value of ``pg_get_constraintdef()``
-in order to determine the remote schema name. That is, if our ``search_path``
-were set to include ``test_schema``, and we invoked a table
-reflection process as follows::
-
- >>> from sqlalchemy import Table, MetaData, create_engine
- >>> engine = create_engine("postgresql://scott:tiger@localhost/test")
- >>> with engine.connect() as conn:
- ... conn.execute("SET search_path TO test_schema, public")
- ... meta = MetaData()
- ... referring = Table('referring', meta, autoload=True, autoload_with=conn)
- ...
-
-
-The above process would deliver to the :attr:`.MetaData.tables` collection
-``referred`` table named **without** the schema::
-
- >>> meta.tables['referred'].schema is None
- True
-
-To alter the behavior of reflection such that the referred schema is maintained
-regardless of the ``search_path`` setting, use the ``postgresql_ignore_search_path``
-option, which can be specified as a dialect-specific argument to both
-:class:`.Table` as well as :meth:`.MetaData.reflect`::
-
- >>> with engine.connect() as conn:
- ... conn.execute("SET search_path TO test_schema, public")
- ... meta = MetaData()
- ... referring = Table('referring', meta, autoload=True, autoload_with=conn,
- ... postgresql_ignore_search_path=True)
- ...
-
-
-We will now have ``test_schema.referred`` stored as schema-qualified::
-
- >>> meta.tables['test_schema.referred'].schema
- 'test_schema'
-
-.. sidebar:: Best Practices for Postgresql Schema reflection
-
- The description of Postgresql schema reflection behavior is complex, and is
- the product of many years of dealing with widely varied use cases and user preferences.
- But in fact, there's no need to understand any of it if you just stick to the simplest
- use pattern: leave the ``search_path`` set to its default of ``public`` only, never refer
- to the name ``public`` as an explicit schema name otherwise, and
- refer to all other schema names explicitly when building
- up a :class:`.Table` object. The options described here are only for those users
- who can't, or prefer not to, stay within these guidelines.
-
-Note that **in all cases**, the "default" schema is always reflected as ``None``.
-The "default" schema on Postgresql is that which is returned by the
-Postgresql ``current_schema()`` function. On a typical Postgresql installation,
-this is the name ``public``. So a table that refers to another which is
-in the ``public`` (i.e. default) schema will always have the ``.schema`` attribute
-set to ``None``.
-
-.. versionadded:: 0.9.2 Added the ``postgresql_ignore_search_path``
- dialect-level option accepted by :class:`.Table` and :meth:`.MetaData.reflect`.
-
-
-.. seealso::
-
- `The Schema Search Path `_ - on the Postgresql website.
-
-INSERT/UPDATE...RETURNING
--------------------------
-
-The dialect supports PG 8.2's ``INSERT..RETURNING``, ``UPDATE..RETURNING`` and
-``DELETE..RETURNING`` syntaxes. ``INSERT..RETURNING`` is used by default
-for single-row INSERT statements in order to fetch newly generated
-primary key identifiers. To specify an explicit ``RETURNING`` clause,
-use the :meth:`._UpdateBase.returning` method on a per-statement basis::
-
- # INSERT..RETURNING
- result = table.insert().returning(table.c.col1, table.c.col2).\\
- values(name='foo')
- print result.fetchall()
-
- # UPDATE..RETURNING
- result = table.update().returning(table.c.col1, table.c.col2).\\
- where(table.c.name=='foo').values(name='bar')
- print result.fetchall()
-
- # DELETE..RETURNING
- result = table.delete().returning(table.c.col1, table.c.col2).\\
- where(table.c.name=='foo')
- print result.fetchall()
-
-.. _postgresql_match:
-
-Full Text Search
-----------------
-
-SQLAlchemy makes available the Postgresql ``@@`` operator via the
-:meth:`.ColumnElement.match` method on any textual column expression.
-On a Postgresql dialect, an expression like the following::
-
- select([sometable.c.text.match("search string")])
-
-will emit to the database::
-
- SELECT text @@ to_tsquery('search string') FROM table
-
-The Postgresql text search functions such as ``to_tsquery()``
-and ``to_tsvector()`` are available
-explicitly using the standard :attr:`.func` construct. For example::
-
- select([
- func.to_tsvector('fat cats ate rats').match('cat & rat')
- ])
-
-Emits the equivalent of::
-
- SELECT to_tsvector('fat cats ate rats') @@ to_tsquery('cat & rat')
-
-The :class:`.postgresql.TSVECTOR` type can provide for explicit CAST::
-
- from sqlalchemy.dialects.postgresql import TSVECTOR
- from sqlalchemy import select, cast
- select([cast("some text", TSVECTOR)])
-
-produces a statement equivalent to::
-
- SELECT CAST('some text' AS TSVECTOR) AS anon_1
-
-
-FROM ONLY ...
-------------------------
-
-The dialect supports PostgreSQL's ONLY keyword for targeting only a particular
-table in an inheritance hierarchy. This can be used to produce the
-``SELECT ... FROM ONLY``, ``UPDATE ONLY ...``, and ``DELETE FROM ONLY ...``
-syntaxes. It uses SQLAlchemy's hints mechanism::
-
- # SELECT ... FROM ONLY ...
- result = table.select().with_hint(table, 'ONLY', 'postgresql')
- print result.fetchall()
-
- # UPDATE ONLY ...
- table.update(values=dict(foo='bar')).with_hint('ONLY',
- dialect_name='postgresql')
-
- # DELETE FROM ONLY ...
- table.delete().with_hint('ONLY', dialect_name='postgresql')
-
-.. _postgresql_indexes:
-
-Postgresql-Specific Index Options
----------------------------------
-
-Several extensions to the :class:`.Index` construct are available, specific
-to the PostgreSQL dialect.
-
-Partial Indexes
-^^^^^^^^^^^^^^^^
-
-Partial indexes add criterion to the index definition so that the index is
-applied to a subset of rows. These can be specified on :class:`.Index`
-using the ``postgresql_where`` keyword argument::
-
- Index('my_index', my_table.c.id, postgresql_where=tbl.c.value > 10)
-
-Operator Classes
-^^^^^^^^^^^^^^^^^
-
-PostgreSQL allows the specification of an *operator class* for each column of
-an index (see
-http://www.postgresql.org/docs/8.3/interactive/indexes-opclass.html).
-The :class:`.Index` construct allows these to be specified via the
-``postgresql_ops`` keyword argument::
-
- Index('my_index', my_table.c.id, my_table.c.data,
- postgresql_ops={
- 'data': 'text_pattern_ops',
- 'id': 'int4_ops'
- })
-
-.. versionadded:: 0.7.2
- ``postgresql_ops`` keyword argument to :class:`.Index` construct.
-
-Note that the keys in the ``postgresql_ops`` dictionary are the "key" name of
-the :class:`.Column`, i.e. the name used to access it from the ``.c``
-collection of :class:`.Table`, which can be configured to be different than
-the actual name of the column as expressed in the database.
-
-Index Types
-^^^^^^^^^^^^
-
-PostgreSQL provides several index types: B-Tree, Hash, GiST, and GIN, as well
-as the ability for users to create their own (see
-http://www.postgresql.org/docs/8.3/static/indexes-types.html). These can be
-specified on :class:`.Index` using the ``postgresql_using`` keyword argument::
-
- Index('my_index', my_table.c.data, postgresql_using='gin')
-
-The value passed to the keyword argument will be simply passed through to the
-underlying CREATE INDEX command, so it *must* be a valid index type for your
-version of PostgreSQL.
-
-"""
-from collections import defaultdict
-import re
-
-from ... import sql, schema, exc, util
-from ...engine import default, reflection
-from ...sql import compiler, expression, operators
-from ... import types as sqltypes
-
-try:
- from uuid import UUID as _python_UUID
-except ImportError:
- _python_UUID = None
-
-from sqlalchemy.types import INTEGER, BIGINT, SMALLINT, VARCHAR, \
- CHAR, TEXT, FLOAT, NUMERIC, \
- DATE, BOOLEAN, REAL
-
-RESERVED_WORDS = set(
- ["all", "analyse", "analyze", "and", "any", "array", "as", "asc",
- "asymmetric", "both", "case", "cast", "check", "collate", "column",
- "constraint", "create", "current_catalog", "current_date",
- "current_role", "current_time", "current_timestamp", "current_user",
- "default", "deferrable", "desc", "distinct", "do", "else", "end",
- "except", "false", "fetch", "for", "foreign", "from", "grant", "group",
- "having", "in", "initially", "intersect", "into", "leading", "limit",
- "localtime", "localtimestamp", "new", "not", "null", "of", "off", "offset",
- "old", "on", "only", "or", "order", "placing", "primary", "references",
- "returning", "select", "session_user", "some", "symmetric", "table",
- "then", "to", "trailing", "true", "union", "unique", "user", "using",
- "variadic", "when", "where", "window", "with", "authorization",
- "between", "binary", "cross", "current_schema", "freeze", "full",
- "ilike", "inner", "is", "isnull", "join", "left", "like", "natural",
- "notnull", "outer", "over", "overlaps", "right", "similar", "verbose"
- ])
-
-_DECIMAL_TYPES = (1231, 1700)
-_FLOAT_TYPES = (700, 701, 1021, 1022)
-_INT_TYPES = (20, 21, 23, 26, 1005, 1007, 1016)
-
-
-class BYTEA(sqltypes.LargeBinary):
- __visit_name__ = 'BYTEA'
-
-
-class DOUBLE_PRECISION(sqltypes.Float):
- __visit_name__ = 'DOUBLE_PRECISION'
-
-
-class INET(sqltypes.TypeEngine):
- __visit_name__ = "INET"
-PGInet = INET
-
-
-class CIDR(sqltypes.TypeEngine):
- __visit_name__ = "CIDR"
-PGCidr = CIDR
-
-
-class MACADDR(sqltypes.TypeEngine):
- __visit_name__ = "MACADDR"
-PGMacAddr = MACADDR
-
-
-class TIMESTAMP(sqltypes.TIMESTAMP):
- def __init__(self, timezone=False, precision=None):
- super(TIMESTAMP, self).__init__(timezone=timezone)
- self.precision = precision
-
-
-class TIME(sqltypes.TIME):
- def __init__(self, timezone=False, precision=None):
- super(TIME, self).__init__(timezone=timezone)
- self.precision = precision
-
-
-class INTERVAL(sqltypes.TypeEngine):
- """Postgresql INTERVAL type.
-
- The INTERVAL type may not be supported on all DBAPIs.
- It is known to work on psycopg2 and not pg8000 or zxjdbc.
-
- """
- __visit_name__ = 'INTERVAL'
-
- def __init__(self, precision=None):
- self.precision = precision
-
- @classmethod
- def _adapt_from_generic_interval(cls, interval):
- return INTERVAL(precision=interval.second_precision)
-
- @property
- def _type_affinity(self):
- return sqltypes.Interval
-
-PGInterval = INTERVAL
-
-
-class BIT(sqltypes.TypeEngine):
- __visit_name__ = 'BIT'
-
- def __init__(self, length=None, varying=False):
- if not varying:
- # BIT without VARYING defaults to length 1
- self.length = length or 1
- else:
- # but BIT VARYING can be unlimited-length, so no default
- self.length = length
- self.varying = varying
-
-PGBit = BIT
-
-
-class UUID(sqltypes.TypeEngine):
- """Postgresql UUID type.
-
- Represents the UUID column type, interpreting
- data either as natively returned by the DBAPI
- or as Python uuid objects.
-
- The UUID type may not be supported on all DBAPIs.
- It is known to work on psycopg2 and not pg8000.
-
- """
- __visit_name__ = 'UUID'
-
- def __init__(self, as_uuid=False):
- """Construct a UUID type.
-
-
- :param as_uuid=False: if True, values will be interpreted
- as Python uuid objects, converting to/from string via the
- DBAPI.
-
- """
- if as_uuid and _python_UUID is None:
- raise NotImplementedError(
- "This version of Python does not support the native UUID type."
- )
- self.as_uuid = as_uuid
-
- def bind_processor(self, dialect):
- if self.as_uuid:
- def process(value):
- if value is not None:
- value = util.text_type(value)
- return value
- return process
- else:
- return None
-
- def result_processor(self, dialect, coltype):
- if self.as_uuid:
- def process(value):
- if value is not None:
- value = _python_UUID(value)
- return value
- return process
- else:
- return None
-
-PGUuid = UUID
-
-class TSVECTOR(sqltypes.TypeEngine):
- """The :class:`.postgresql.TSVECTOR` type implements the Postgresql
- text search type TSVECTOR.
-
- It can be used to do full text queries on natural language
- documents.
-
- .. versionadded:: 0.9.0
-
- .. seealso::
-
- :ref:`postgresql_match`
-
- """
- __visit_name__ = 'TSVECTOR'
-
-
-
-class _Slice(expression.ColumnElement):
- __visit_name__ = 'slice'
- type = sqltypes.NULLTYPE
-
- def __init__(self, slice_, source_comparator):
- self.start = source_comparator._check_literal(
- source_comparator.expr,
- operators.getitem, slice_.start)
- self.stop = source_comparator._check_literal(
- source_comparator.expr,
- operators.getitem, slice_.stop)
-
-
-class Any(expression.ColumnElement):
- """Represent the clause ``left operator ANY (right)``. ``right`` must be
- an array expression.
-
- .. seealso::
-
- :class:`.postgresql.ARRAY`
-
- :meth:`.postgresql.ARRAY.Comparator.any` - ARRAY-bound method
-
- """
- __visit_name__ = 'any'
-
- def __init__(self, left, right, operator=operators.eq):
- self.type = sqltypes.Boolean()
- self.left = expression._literal_as_binds(left)
- self.right = right
- self.operator = operator
-
-
-class All(expression.ColumnElement):
- """Represent the clause ``left operator ALL (right)``. ``right`` must be
- an array expression.
-
- .. seealso::
-
- :class:`.postgresql.ARRAY`
-
- :meth:`.postgresql.ARRAY.Comparator.all` - ARRAY-bound method
-
- """
- __visit_name__ = 'all'
-
- def __init__(self, left, right, operator=operators.eq):
- self.type = sqltypes.Boolean()
- self.left = expression._literal_as_binds(left)
- self.right = right
- self.operator = operator
-
-
-class array(expression.Tuple):
- """A Postgresql ARRAY literal.
-
- This is used to produce ARRAY literals in SQL expressions, e.g.::
-
- from sqlalchemy.dialects.postgresql import array
- from sqlalchemy.dialects import postgresql
- from sqlalchemy import select, func
-
- stmt = select([
- array([1,2]) + array([3,4,5])
- ])
-
- print stmt.compile(dialect=postgresql.dialect())
-
- Produces the SQL::
-
- SELECT ARRAY[%(param_1)s, %(param_2)s] ||
- ARRAY[%(param_3)s, %(param_4)s, %(param_5)s]) AS anon_1
-
- An instance of :class:`.array` will always have the datatype
- :class:`.ARRAY`. The "inner" type of the array is inferred from
- the values present, unless the ``type_`` keyword argument is passed::
-
- array(['foo', 'bar'], type_=CHAR)
-
- .. versionadded:: 0.8 Added the :class:`~.postgresql.array` literal type.
-
- See also:
-
- :class:`.postgresql.ARRAY`
-
- """
- __visit_name__ = 'array'
-
- def __init__(self, clauses, **kw):
- super(array, self).__init__(*clauses, **kw)
- self.type = ARRAY(self.type)
-
- def _bind_param(self, operator, obj):
- return array(*[
- expression.BindParameter(None, o, _compared_to_operator=operator,
- _compared_to_type=self.type, unique=True)
- for o in obj
- ])
-
- def self_group(self, against=None):
- return self
-
-
-class ARRAY(sqltypes.Concatenable, sqltypes.TypeEngine):
- """Postgresql ARRAY type.
-
- Represents values as Python lists.
-
- An :class:`.ARRAY` type is constructed given the "type"
- of element::
-
- mytable = Table("mytable", metadata,
- Column("data", ARRAY(Integer))
- )
-
- The above type represents an N-dimensional array,
- meaning Postgresql will interpret values with any number
- of dimensions automatically. To produce an INSERT
- construct that passes in a 1-dimensional array of integers::
-
- connection.execute(
- mytable.insert(),
- data=[1,2,3]
- )
-
- The :class:`.ARRAY` type can be constructed given a fixed number
- of dimensions::
-
- mytable = Table("mytable", metadata,
- Column("data", ARRAY(Integer, dimensions=2))
- )
-
- This has the effect of the :class:`.ARRAY` type
- specifying that number of bracketed blocks when a :class:`.Table`
- is used in a CREATE TABLE statement, or when the type is used
- within a :func:`.expression.cast` construct; it also causes
- the bind parameter and result set processing of the type
- to optimize itself to expect exactly that number of dimensions.
- Note that Postgresql itself still allows N dimensions with such a type.
-
- SQL expressions of type :class:`.ARRAY` have support for "index" and
- "slice" behavior. The Python ``[]`` operator works normally here, given
- integer indexes or slices. Note that Postgresql arrays default
- to 1-based indexing. The operator produces binary expression
- constructs which will produce the appropriate SQL, both for
- SELECT statements::
-
- select([mytable.c.data[5], mytable.c.data[2:7]])
-
- as well as UPDATE statements when the :meth:`.Update.values` method
- is used::
-
- mytable.update().values({
- mytable.c.data[5]: 7,
- mytable.c.data[2:7]: [1, 2, 3]
- })
-
- :class:`.ARRAY` provides special methods for containment operations,
- e.g.::
-
- mytable.c.data.contains([1, 2])
-
- For a full list of special methods see :class:`.ARRAY.Comparator`.
-
- .. versionadded:: 0.8 Added support for index and slice operations
- to the :class:`.ARRAY` type, including support for UPDATE
- statements, and special array containment operations.
-
- The :class:`.ARRAY` type may not be supported on all DBAPIs.
- It is known to work on psycopg2 and not pg8000.
-
- See also:
-
- :class:`.postgresql.array` - produce a literal array value.
-
- """
- __visit_name__ = 'ARRAY'
-
- class Comparator(sqltypes.Concatenable.Comparator):
- """Define comparison operations for :class:`.ARRAY`."""
-
- def __getitem__(self, index):
- if isinstance(index, slice):
- index = _Slice(index, self)
- return_type = self.type
- else:
- return_type = self.type.item_type
- return self._binary_operate(self.expr, operators.getitem, index,
- result_type=return_type)
-
- def any(self, other, operator=operators.eq):
- """Return ``other operator ANY (array)`` clause.
-
- Argument places are switched, because ANY requires array
- expression to be on the right hand-side.
-
- E.g.::
-
- from sqlalchemy.sql import operators
-
- conn.execute(
- select([table.c.data]).where(
- table.c.data.any(7, operator=operators.lt)
- )
- )
-
- :param other: expression to be compared
- :param operator: an operator object from the
- :mod:`sqlalchemy.sql.operators`
- package, defaults to :func:`.operators.eq`.
-
- .. seealso::
-
- :class:`.postgresql.Any`
-
- :meth:`.postgresql.ARRAY.Comparator.all`
-
- """
- return Any(other, self.expr, operator=operator)
-
- def all(self, other, operator=operators.eq):
- """Return ``other operator ALL (array)`` clause.
-
- Argument places are switched, because ALL requires array
- expression to be on the right hand-side.
-
- E.g.::
-
- from sqlalchemy.sql import operators
-
- conn.execute(
- select([table.c.data]).where(
- table.c.data.all(7, operator=operators.lt)
- )
- )
-
- :param other: expression to be compared
- :param operator: an operator object from the
- :mod:`sqlalchemy.sql.operators`
- package, defaults to :func:`.operators.eq`.
-
- .. seealso::
-
- :class:`.postgresql.All`
-
- :meth:`.postgresql.ARRAY.Comparator.any`
-
- """
- return All(other, self.expr, operator=operator)
-
- def contains(self, other, **kwargs):
- """Boolean expression. Test if elements are a superset of the
- elements of the argument array expression.
- """
- return self.expr.op('@>')(other)
-
- def contained_by(self, other):
- """Boolean expression. Test if elements are a proper subset of the
- elements of the argument array expression.
- """
- return self.expr.op('<@')(other)
-
- def overlap(self, other):
- """Boolean expression. Test if array has elements in common with
- an argument array expression.
- """
- return self.expr.op('&&')(other)
-
- def _adapt_expression(self, op, other_comparator):
- if isinstance(op, operators.custom_op):
- if op.opstring in ['@>', '<@', '&&']:
- return op, sqltypes.Boolean
- return sqltypes.Concatenable.Comparator.\
- _adapt_expression(self, op, other_comparator)
-
- comparator_factory = Comparator
-
- def __init__(self, item_type, as_tuple=False, dimensions=None):
- """Construct an ARRAY.
-
- E.g.::
-
- Column('myarray', ARRAY(Integer))
-
- Arguments are:
-
- :param item_type: The data type of items of this array. Note that
- dimensionality is irrelevant here, so multi-dimensional arrays like
- ``INTEGER[][]``, are constructed as ``ARRAY(Integer)``, not as
- ``ARRAY(ARRAY(Integer))`` or such.
-
- :param as_tuple=False: Specify whether return results
- should be converted to tuples from lists. DBAPIs such
- as psycopg2 return lists by default. When tuples are
- returned, the results are hashable.
-
- :param dimensions: if non-None, the ARRAY will assume a fixed
- number of dimensions. This will cause the DDL emitted for this
- ARRAY to include the exact number of bracket clauses ``[]``,
- and will also optimize the performance of the type overall.
- Note that PG arrays are always implicitly "non-dimensioned",
- meaning they can store any number of dimensions no matter how
- they were declared.
-
- """
- if isinstance(item_type, ARRAY):
- raise ValueError("Do not nest ARRAY types; ARRAY(basetype) "
- "handles multi-dimensional arrays of basetype")
- if isinstance(item_type, type):
- item_type = item_type()
- self.item_type = item_type
- self.as_tuple = as_tuple
- self.dimensions = dimensions
-
- @property
- def python_type(self):
- return list
-
- def compare_values(self, x, y):
- return x == y
-
- def _proc_array(self, arr, itemproc, dim, collection):
- if dim is None:
- arr = list(arr)
- if dim == 1 or dim is None and (
- # this has to be (list, tuple), or at least
- # not hasattr('__iter__'), since Py3K strings
- # etc. have __iter__
- not arr or not isinstance(arr[0], (list, tuple))):
- if itemproc:
- return collection(itemproc(x) for x in arr)
- else:
- return collection(arr)
- else:
- return collection(
- self._proc_array(
- x, itemproc,
- dim - 1 if dim is not None else None,
- collection)
- for x in arr
- )
-
- def bind_processor(self, dialect):
- item_proc = self.item_type.\
- dialect_impl(dialect).\
- bind_processor(dialect)
-
- def process(value):
- if value is None:
- return value
- else:
- return self._proc_array(
- value,
- item_proc,
- self.dimensions,
- list)
- return process
-
- def result_processor(self, dialect, coltype):
- item_proc = self.item_type.\
- dialect_impl(dialect).\
- result_processor(dialect, coltype)
-
- def process(value):
- if value is None:
- return value
- else:
- return self._proc_array(
- value,
- item_proc,
- self.dimensions,
- tuple if self.as_tuple else list)
- return process
-
-PGArray = ARRAY
-
-
-class ENUM(sqltypes.Enum):
- """Postgresql ENUM type.
-
- This is a subclass of :class:`.types.Enum` which includes
- support for PG's ``CREATE TYPE``.
-
- :class:`~.postgresql.ENUM` is used automatically when
- using the :class:`.types.Enum` type on PG assuming
- the ``native_enum`` is left as ``True``. However, the
- :class:`~.postgresql.ENUM` class can also be instantiated
- directly in order to access some additional Postgresql-specific
- options, namely finer control over whether or not
- ``CREATE TYPE`` should be emitted.
-
- Note that both :class:`.types.Enum` as well as
- :class:`~.postgresql.ENUM` feature create/drop
- methods; the base :class:`.types.Enum` type ultimately
- delegates to the :meth:`~.postgresql.ENUM.create` and
- :meth:`~.postgresql.ENUM.drop` methods present here.
-
- """
-
- def __init__(self, *enums, **kw):
- """Construct an :class:`~.postgresql.ENUM`.
-
- Arguments are the same as that of
- :class:`.types.Enum`, but also including
- the following parameters.
-
- :param create_type: Defaults to True.
- Indicates that ``CREATE TYPE`` should be
- emitted, after optionally checking for the
- presence of the type, when the parent
- table is being created; and additionally
- that ``DROP TYPE`` is called when the table
- is dropped. When ``False``, no check
- will be performed and no ``CREATE TYPE``
- or ``DROP TYPE`` is emitted, unless
- :meth:`~.postgresql.ENUM.create`
- or :meth:`~.postgresql.ENUM.drop`
- are called directly.
- Setting to ``False`` is helpful
- when invoking a creation scheme to a SQL file
- without access to the actual database -
- the :meth:`~.postgresql.ENUM.create` and
- :meth:`~.postgresql.ENUM.drop` methods can
- be used to emit SQL to a target bind.
-
- .. versionadded:: 0.7.4
-
- """
- self.create_type = kw.pop("create_type", True)
- super(ENUM, self).__init__(*enums, **kw)
-
- def create(self, bind=None, checkfirst=True):
- """Emit ``CREATE TYPE`` for this
- :class:`~.postgresql.ENUM`.
-
- If the underlying dialect does not support
- Postgresql CREATE TYPE, no action is taken.
-
- :param bind: a connectable :class:`.Engine`,
- :class:`.Connection`, or similar object to emit
- SQL.
- :param checkfirst: if ``True``, a query against
- the PG catalog will be first performed to see
- if the type does not exist already before
- creating.
-
- """
- if not bind.dialect.supports_native_enum:
- return
-
- if not checkfirst or \
- not bind.dialect.has_type(bind, self.name, schema=self.schema):
- bind.execute(CreateEnumType(self))
-
- def drop(self, bind=None, checkfirst=True):
- """Emit ``DROP TYPE`` for this
- :class:`~.postgresql.ENUM`.
-
- If the underlying dialect does not support
- Postgresql DROP TYPE, no action is taken.
-
- :param bind: a connectable :class:`.Engine`,
- :class:`.Connection`, or similar object to emit
- SQL.
- :param checkfirst: if ``True``, a query against
- the PG catalog will be first performed to see
- if the type actually exists before dropping.
-
- """
- if not bind.dialect.supports_native_enum:
- return
-
- if not checkfirst or \
- bind.dialect.has_type(bind, self.name, schema=self.schema):
- bind.execute(DropEnumType(self))
-
- def _check_for_name_in_memos(self, checkfirst, kw):
- """Look in the 'ddl runner' for 'memos', then
- note our name in that collection.
-
- This to ensure a particular named enum is operated
- upon only once within any kind of create/drop
- sequence without relying upon "checkfirst".
-
- """
- if not self.create_type:
- return True
- if '_ddl_runner' in kw:
- ddl_runner = kw['_ddl_runner']
- if '_pg_enums' in ddl_runner.memo:
- pg_enums = ddl_runner.memo['_pg_enums']
- else:
- pg_enums = ddl_runner.memo['_pg_enums'] = set()
- present = self.name in pg_enums
- pg_enums.add(self.name)
- return present
- else:
- return False
-
- def _on_table_create(self, target, bind, checkfirst, **kw):
- if not self._check_for_name_in_memos(checkfirst, kw):
- self.create(bind=bind, checkfirst=checkfirst)
-
- def _on_metadata_create(self, target, bind, checkfirst, **kw):
- if self.metadata is not None and \
- not self._check_for_name_in_memos(checkfirst, kw):
- self.create(bind=bind, checkfirst=checkfirst)
-
- def _on_metadata_drop(self, target, bind, checkfirst, **kw):
- if not self._check_for_name_in_memos(checkfirst, kw):
- self.drop(bind=bind, checkfirst=checkfirst)
-
-colspecs = {
- sqltypes.Interval: INTERVAL,
- sqltypes.Enum: ENUM,
-}
-
-ischema_names = {
- 'integer': INTEGER,
- 'bigint': BIGINT,
- 'smallint': SMALLINT,
- 'character varying': VARCHAR,
- 'character': CHAR,
- '"char"': sqltypes.String,
- 'name': sqltypes.String,
- 'text': TEXT,
- 'numeric': NUMERIC,
- 'float': FLOAT,
- 'real': REAL,
- 'inet': INET,
- 'cidr': CIDR,
- 'uuid': UUID,
- 'bit': BIT,
- 'bit varying': BIT,
- 'macaddr': MACADDR,
- 'double precision': DOUBLE_PRECISION,
- 'timestamp': TIMESTAMP,
- 'timestamp with time zone': TIMESTAMP,
- 'timestamp without time zone': TIMESTAMP,
- 'time with time zone': TIME,
- 'time without time zone': TIME,
- 'date': DATE,
- 'time': TIME,
- 'bytea': BYTEA,
- 'boolean': BOOLEAN,
- 'interval': INTERVAL,
- 'interval year to month': INTERVAL,
- 'interval day to second': INTERVAL,
- 'tsvector' : TSVECTOR
-}
-
-
-class PGCompiler(compiler.SQLCompiler):
-
- def visit_array(self, element, **kw):
- return "ARRAY[%s]" % self.visit_clauselist(element, **kw)
-
- def visit_slice(self, element, **kw):
- return "%s:%s" % (
- self.process(element.start, **kw),
- self.process(element.stop, **kw),
- )
-
- def visit_any(self, element, **kw):
- return "%s%sANY (%s)" % (
- self.process(element.left, **kw),
- compiler.OPERATORS[element.operator],
- self.process(element.right, **kw)
- )
-
- def visit_all(self, element, **kw):
- return "%s%sALL (%s)" % (
- self.process(element.left, **kw),
- compiler.OPERATORS[element.operator],
- self.process(element.right, **kw)
- )
-
- def visit_getitem_binary(self, binary, operator, **kw):
- return "%s[%s]" % (
- self.process(binary.left, **kw),
- self.process(binary.right, **kw)
- )
-
- def visit_match_op_binary(self, binary, operator, **kw):
- return "%s @@ to_tsquery(%s)" % (
- self.process(binary.left, **kw),
- self.process(binary.right, **kw))
-
- def visit_ilike_op_binary(self, binary, operator, **kw):
- escape = binary.modifiers.get("escape", None)
-
- return '%s ILIKE %s' % \
- (self.process(binary.left, **kw),
- self.process(binary.right, **kw)) \
- + (
- ' ESCAPE ' +
- self.render_literal_value(escape, sqltypes.STRINGTYPE)
- if escape else ''
- )
-
- def visit_notilike_op_binary(self, binary, operator, **kw):
- escape = binary.modifiers.get("escape", None)
- return '%s NOT ILIKE %s' % \
- (self.process(binary.left, **kw),
- self.process(binary.right, **kw)) \
- + (
- ' ESCAPE ' +
- self.render_literal_value(escape, sqltypes.STRINGTYPE)
- if escape else ''
- )
-
- def render_literal_value(self, value, type_):
- value = super(PGCompiler, self).render_literal_value(value, type_)
-
- if self.dialect._backslash_escapes:
- value = value.replace('\\', '\\\\')
- return value
-
- def visit_sequence(self, seq):
- return "nextval('%s')" % self.preparer.format_sequence(seq)
-
- def limit_clause(self, select):
- text = ""
- if select._limit is not None:
- text += " \n LIMIT " + self.process(sql.literal(select._limit))
- if select._offset is not None:
- if select._limit is None:
- text += " \n LIMIT ALL"
- text += " OFFSET " + self.process(sql.literal(select._offset))
- return text
-
- def format_from_hint_text(self, sqltext, table, hint, iscrud):
- if hint.upper() != 'ONLY':
- raise exc.CompileError("Unrecognized hint: %r" % hint)
- return "ONLY " + sqltext
-
- def get_select_precolumns(self, select):
- if select._distinct is not False:
- if select._distinct is True:
- return "DISTINCT "
- elif isinstance(select._distinct, (list, tuple)):
- return "DISTINCT ON (" + ', '.join(
- [self.process(col) for col in select._distinct]
- ) + ") "
- else:
- return "DISTINCT ON (" + self.process(select._distinct) + ") "
- else:
- return ""
-
- def for_update_clause(self, select):
-
- if select._for_update_arg.read:
- tmp = " FOR SHARE"
- else:
- tmp = " FOR UPDATE"
-
- if select._for_update_arg.of:
- tables = util.OrderedSet(
- c.table if isinstance(c, expression.ColumnClause)
- else c for c in select._for_update_arg.of)
- tmp += " OF " + ", ".join(
- self.process(table, ashint=True)
- for table in tables
- )
-
- if select._for_update_arg.nowait:
- tmp += " NOWAIT"
-
- return tmp
-
- def returning_clause(self, stmt, returning_cols):
-
- columns = [
- self._label_select_column(None, c, True, False, {})
- for c in expression._select_iterables(returning_cols)
- ]
-
- return 'RETURNING ' + ', '.join(columns)
-
-
- def visit_substring_func(self, func, **kw):
- s = self.process(func.clauses.clauses[0], **kw)
- start = self.process(func.clauses.clauses[1], **kw)
- if len(func.clauses.clauses) > 2:
- length = self.process(func.clauses.clauses[2], **kw)
- return "SUBSTRING(%s FROM %s FOR %s)" % (s, start, length)
- else:
- return "SUBSTRING(%s FROM %s)" % (s, start)
-
-class PGDDLCompiler(compiler.DDLCompiler):
- def get_column_specification(self, column, **kwargs):
-
- colspec = self.preparer.format_column(column)
- impl_type = column.type.dialect_impl(self.dialect)
- if column.primary_key and \
- column is column.table._autoincrement_column and \
- (
- self.dialect.supports_smallserial or
- not isinstance(impl_type, sqltypes.SmallInteger)
- ) and (
- column.default is None or
- (
- isinstance(column.default, schema.Sequence) and
- column.default.optional
- )):
- if isinstance(impl_type, sqltypes.BigInteger):
- colspec += " BIGSERIAL"
- elif isinstance(impl_type, sqltypes.SmallInteger):
- colspec += " SMALLSERIAL"
- else:
- colspec += " SERIAL"
- else:
- colspec += " " + self.dialect.type_compiler.process(column.type)
- default = self.get_column_default_string(column)
- if default is not None:
- colspec += " DEFAULT " + default
-
- if not column.nullable:
- colspec += " NOT NULL"
- return colspec
-
- def visit_create_enum_type(self, create):
- type_ = create.element
-
- return "CREATE TYPE %s AS ENUM (%s)" % (
- self.preparer.format_type(type_),
- ", ".join(
- self.sql_compiler.process(sql.literal(e), literal_binds=True)
- for e in type_.enums)
- )
-
- def visit_drop_enum_type(self, drop):
- type_ = drop.element
-
- return "DROP TYPE %s" % (
- self.preparer.format_type(type_)
- )
-
- def visit_create_index(self, create):
- preparer = self.preparer
- index = create.element
- self._verify_index_table(index)
- text = "CREATE "
- if index.unique:
- text += "UNIQUE "
- text += "INDEX %s ON %s " % (
- self._prepared_index_name(index,
- include_schema=False),
- preparer.format_table(index.table)
- )
-
- using = index.dialect_options['postgresql']['using']
- if using:
- text += "USING %s " % preparer.quote(using)
-
- ops = index.dialect_options["postgresql"]["ops"]
- text += "(%s)" \
- % (
- ', '.join([
- self.sql_compiler.process(
- expr.self_group()
- if not isinstance(expr, expression.ColumnClause)
- else expr,
- include_table=False, literal_binds=True) +
- (c.key in ops and (' ' + ops[c.key]) or '')
- for expr, c in zip(index.expressions, index.columns)])
- )
-
- whereclause = index.dialect_options["postgresql"]["where"]
-
- if whereclause is not None:
- where_compiled = self.sql_compiler.process(
- whereclause, include_table=False,
- literal_binds=True)
- text += " WHERE " + where_compiled
- return text
-
- def visit_exclude_constraint(self, constraint):
- text = ""
- if constraint.name is not None:
- text += "CONSTRAINT %s " % \
- self.preparer.format_constraint(constraint)
- elements = []
- for c in constraint.columns:
- op = constraint.operators[c.name]
- elements.append(self.preparer.quote(c.name) + ' WITH '+op)
- text += "EXCLUDE USING %s (%s)" % (constraint.using, ', '.join(elements))
- if constraint.where is not None:
- text += ' WHERE (%s)' % self.sql_compiler.process(
- constraint.where,
- literal_binds=True)
- text += self.define_constraint_deferrability(constraint)
- return text
-
-
-class PGTypeCompiler(compiler.GenericTypeCompiler):
- def visit_TSVECTOR(self, type):
- return "TSVECTOR"
-
- def visit_INET(self, type_):
- return "INET"
-
- def visit_CIDR(self, type_):
- return "CIDR"
-
- def visit_MACADDR(self, type_):
- return "MACADDR"
-
- def visit_FLOAT(self, type_):
- if not type_.precision:
- return "FLOAT"
- else:
- return "FLOAT(%(precision)s)" % {'precision': type_.precision}
-
- def visit_DOUBLE_PRECISION(self, type_):
- return "DOUBLE PRECISION"
-
- def visit_BIGINT(self, type_):
- return "BIGINT"
-
- def visit_HSTORE(self, type_):
- return "HSTORE"
-
- def visit_JSON(self, type_):
- return "JSON"
-
- def visit_INT4RANGE(self, type_):
- return "INT4RANGE"
-
- def visit_INT8RANGE(self, type_):
- return "INT8RANGE"
-
- def visit_NUMRANGE(self, type_):
- return "NUMRANGE"
-
- def visit_DATERANGE(self, type_):
- return "DATERANGE"
-
- def visit_TSRANGE(self, type_):
- return "TSRANGE"
-
- def visit_TSTZRANGE(self, type_):
- return "TSTZRANGE"
-
- def visit_datetime(self, type_):
- return self.visit_TIMESTAMP(type_)
-
- def visit_enum(self, type_):
- if not type_.native_enum or not self.dialect.supports_native_enum:
- return super(PGTypeCompiler, self).visit_enum(type_)
- else:
- return self.visit_ENUM(type_)
-
- def visit_ENUM(self, type_):
- return self.dialect.identifier_preparer.format_type(type_)
-
- def visit_TIMESTAMP(self, type_):
- return "TIMESTAMP%s %s" % (
- getattr(type_, 'precision', None) and "(%d)" %
- type_.precision or "",
- (type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE"
- )
-
- def visit_TIME(self, type_):
- return "TIME%s %s" % (
- getattr(type_, 'precision', None) and "(%d)" %
- type_.precision or "",
- (type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE"
- )
-
- def visit_INTERVAL(self, type_):
- if type_.precision is not None:
- return "INTERVAL(%d)" % type_.precision
- else:
- return "INTERVAL"
-
- def visit_BIT(self, type_):
- if type_.varying:
- compiled = "BIT VARYING"
- if type_.length is not None:
- compiled += "(%d)" % type_.length
- else:
- compiled = "BIT(%d)" % type_.length
- return compiled
-
- def visit_UUID(self, type_):
- return "UUID"
-
- def visit_large_binary(self, type_):
- return self.visit_BYTEA(type_)
-
- def visit_BYTEA(self, type_):
- return "BYTEA"
-
- def visit_ARRAY(self, type_):
- return self.process(type_.item_type) + ('[]' * (type_.dimensions
- if type_.dimensions
- is not None else 1))
-
-
-class PGIdentifierPreparer(compiler.IdentifierPreparer):
-
- reserved_words = RESERVED_WORDS
-
- def _unquote_identifier(self, value):
- if value[0] == self.initial_quote:
- value = value[1:-1].\
- replace(self.escape_to_quote, self.escape_quote)
- return value
-
- def format_type(self, type_, use_schema=True):
- if not type_.name:
- raise exc.CompileError("Postgresql ENUM type requires a name.")
-
- name = self.quote(type_.name)
- if not self.omit_schema and use_schema and type_.schema is not None:
- name = self.quote_schema(type_.schema) + "." + name
- return name
-
-
-class PGInspector(reflection.Inspector):
-
- def __init__(self, conn):
- reflection.Inspector.__init__(self, conn)
-
- def get_table_oid(self, table_name, schema=None):
- """Return the oid from `table_name` and `schema`."""
-
- return self.dialect.get_table_oid(self.bind, table_name, schema,
- info_cache=self.info_cache)
-
-
-class CreateEnumType(schema._CreateDropBase):
- __visit_name__ = "create_enum_type"
-
-
-class DropEnumType(schema._CreateDropBase):
- __visit_name__ = "drop_enum_type"
-
-
-class PGExecutionContext(default.DefaultExecutionContext):
- def fire_sequence(self, seq, type_):
- return self._execute_scalar(("select nextval('%s')" % \
- self.dialect.identifier_preparer.format_sequence(seq)), type_)
-
- def get_insert_default(self, column):
- if column.primary_key and column is column.table._autoincrement_column:
- if column.server_default and column.server_default.has_argument:
-
- # pre-execute passive defaults on primary key columns
- return self._execute_scalar("select %s" %
- column.server_default.arg, column.type)
-
- elif (column.default is None or
- (column.default.is_sequence and
- column.default.optional)):
-
- # execute the sequence associated with a SERIAL primary
- # key column. for non-primary-key SERIAL, the ID just
- # generates server side.
-
- try:
- seq_name = column._postgresql_seq_name
- except AttributeError:
- tab = column.table.name
- col = column.name
- tab = tab[0:29 + max(0, (29 - len(col)))]
- col = col[0:29 + max(0, (29 - len(tab)))]
- name = "%s_%s_seq" % (tab, col)
- column._postgresql_seq_name = seq_name = name
-
- sch = column.table.schema
- if sch is not None:
- exc = "select nextval('\"%s\".\"%s\"')" % \
- (sch, seq_name)
- else:
- exc = "select nextval('\"%s\"')" % \
- (seq_name, )
-
- return self._execute_scalar(exc, column.type)
-
- return super(PGExecutionContext, self).get_insert_default(column)
-
-
-class PGDialect(default.DefaultDialect):
- name = 'postgresql'
- supports_alter = True
- max_identifier_length = 63
- supports_sane_rowcount = True
-
- supports_native_enum = True
- supports_native_boolean = True
- supports_smallserial = True
-
- supports_sequences = True
- sequences_optional = True
- preexecute_autoincrement_sequences = True
- postfetch_lastrowid = False
-
- supports_default_values = True
- supports_empty_insert = False
- supports_multivalues_insert = True
- default_paramstyle = 'pyformat'
- ischema_names = ischema_names
- colspecs = colspecs
-
- statement_compiler = PGCompiler
- ddl_compiler = PGDDLCompiler
- type_compiler = PGTypeCompiler
- preparer = PGIdentifierPreparer
- execution_ctx_cls = PGExecutionContext
- inspector = PGInspector
- isolation_level = None
-
- construct_arguments = [
- (schema.Index, {
- "using": False,
- "where": None,
- "ops": {}
- }),
- (schema.Table, {
- "ignore_search_path": False
- })
- ]
-
- reflection_options = ('postgresql_ignore_search_path', )
-
- _backslash_escapes = True
-
- def __init__(self, isolation_level=None, json_serializer=None,
- json_deserializer=None, **kwargs):
- default.DefaultDialect.__init__(self, **kwargs)
- self.isolation_level = isolation_level
- self._json_deserializer = json_deserializer
- self._json_serializer = json_serializer
-
- def initialize(self, connection):
- super(PGDialect, self).initialize(connection)
- self.implicit_returning = self.server_version_info > (8, 2) and \
- self.__dict__.get('implicit_returning', True)
- self.supports_native_enum = self.server_version_info >= (8, 3)
- if not self.supports_native_enum:
- self.colspecs = self.colspecs.copy()
- # pop base Enum type
- self.colspecs.pop(sqltypes.Enum, None)
- # psycopg2, others may have placed ENUM here as well
- self.colspecs.pop(ENUM, None)
-
- # http://www.postgresql.org/docs/9.3/static/release-9-2.html#AEN116689
- self.supports_smallserial = self.server_version_info >= (9, 2)
-
- self._backslash_escapes = self.server_version_info < (8, 2) or \
- connection.scalar(
- "show standard_conforming_strings"
- ) == 'off'
-
- def on_connect(self):
- if self.isolation_level is not None:
- def connect(conn):
- self.set_isolation_level(conn, self.isolation_level)
- return connect
- else:
- return None
-
- _isolation_lookup = set(['SERIALIZABLE',
- 'READ UNCOMMITTED', 'READ COMMITTED', 'REPEATABLE READ'])
-
- def set_isolation_level(self, connection, level):
- level = level.replace('_', ' ')
- if level not in self._isolation_lookup:
- raise exc.ArgumentError(
- "Invalid value '%s' for isolation_level. "
- "Valid isolation levels for %s are %s" %
- (level, self.name, ", ".join(self._isolation_lookup))
- )
- cursor = connection.cursor()
- cursor.execute(
- "SET SESSION CHARACTERISTICS AS TRANSACTION "
- "ISOLATION LEVEL %s" % level)
- cursor.execute("COMMIT")
- cursor.close()
-
- def get_isolation_level(self, connection):
- cursor = connection.cursor()
- cursor.execute('show transaction isolation level')
- val = cursor.fetchone()[0]
- cursor.close()
- return val.upper()
-
- def do_begin_twophase(self, connection, xid):
- self.do_begin(connection.connection)
-
- def do_prepare_twophase(self, connection, xid):
- connection.execute("PREPARE TRANSACTION '%s'" % xid)
-
- def do_rollback_twophase(self, connection, xid,
- is_prepared=True, recover=False):
- if is_prepared:
- if recover:
- #FIXME: ugly hack to get out of transaction
- # context when committing recoverable transactions
- # Must find out a way how to make the dbapi not
- # open a transaction.
- connection.execute("ROLLBACK")
- connection.execute("ROLLBACK PREPARED '%s'" % xid)
- connection.execute("BEGIN")
- self.do_rollback(connection.connection)
- else:
- self.do_rollback(connection.connection)
-
- def do_commit_twophase(self, connection, xid,
- is_prepared=True, recover=False):
- if is_prepared:
- if recover:
- connection.execute("ROLLBACK")
- connection.execute("COMMIT PREPARED '%s'" % xid)
- connection.execute("BEGIN")
- self.do_rollback(connection.connection)
- else:
- self.do_commit(connection.connection)
-
- def do_recover_twophase(self, connection):
- resultset = connection.execute(
- sql.text("SELECT gid FROM pg_prepared_xacts"))
- return [row[0] for row in resultset]
-
- def _get_default_schema_name(self, connection):
- return connection.scalar("select current_schema()")
-
- def has_schema(self, connection, schema):
- query = "select nspname from pg_namespace where lower(nspname)=:schema"
- cursor = connection.execute(
- sql.text(
- query,
- bindparams=[
- sql.bindparam(
- 'schema', util.text_type(schema.lower()),
- type_=sqltypes.Unicode)]
- )
- )
-
- return bool(cursor.first())
-
- def has_table(self, connection, table_name, schema=None):
- # seems like case gets folded in pg_class...
- if schema is None:
- cursor = connection.execute(
- sql.text(
- "select relname from pg_class c join pg_namespace n on "
- "n.oid=c.relnamespace where n.nspname=current_schema() and "
- "relname=:name",
- bindparams=[
- sql.bindparam('name', util.text_type(table_name),
- type_=sqltypes.Unicode)]
- )
- )
- else:
- cursor = connection.execute(
- sql.text(
- "select relname from pg_class c join pg_namespace n on "
- "n.oid=c.relnamespace where n.nspname=:schema and "
- "relname=:name",
- bindparams=[
- sql.bindparam('name',
- util.text_type(table_name), type_=sqltypes.Unicode),
- sql.bindparam('schema',
- util.text_type(schema), type_=sqltypes.Unicode)]
- )
- )
- return bool(cursor.first())
-
- def has_sequence(self, connection, sequence_name, schema=None):
- if schema is None:
- cursor = connection.execute(
- sql.text(
- "SELECT relname FROM pg_class c join pg_namespace n on "
- "n.oid=c.relnamespace where relkind='S' and "
- "n.nspname=current_schema() "
- "and relname=:name",
- bindparams=[
- sql.bindparam('name', util.text_type(sequence_name),
- type_=sqltypes.Unicode)
- ]
- )
- )
- else:
- cursor = connection.execute(
- sql.text(
- "SELECT relname FROM pg_class c join pg_namespace n on "
- "n.oid=c.relnamespace where relkind='S' and "
- "n.nspname=:schema and relname=:name",
- bindparams=[
- sql.bindparam('name', util.text_type(sequence_name),
- type_=sqltypes.Unicode),
- sql.bindparam('schema',
- util.text_type(schema), type_=sqltypes.Unicode)
- ]
- )
- )
-
- return bool(cursor.first())
-
- def has_type(self, connection, type_name, schema=None):
- if schema is not None:
- query = """
- SELECT EXISTS (
- SELECT * FROM pg_catalog.pg_type t, pg_catalog.pg_namespace n
- WHERE t.typnamespace = n.oid
- AND t.typname = :typname
- AND n.nspname = :nspname
- )
- """
- query = sql.text(query)
- else:
- query = """
- SELECT EXISTS (
- SELECT * FROM pg_catalog.pg_type t
- WHERE t.typname = :typname
- AND pg_type_is_visible(t.oid)
- )
- """
- query = sql.text(query)
- query = query.bindparams(
- sql.bindparam('typname',
- util.text_type(type_name), type_=sqltypes.Unicode),
- )
- if schema is not None:
- query = query.bindparams(
- sql.bindparam('nspname',
- util.text_type(schema), type_=sqltypes.Unicode),
- )
- cursor = connection.execute(query)
- return bool(cursor.scalar())
-
- def _get_server_version_info(self, connection):
- v = connection.execute("select version()").scalar()
- m = re.match(
- '.*(?:PostgreSQL|EnterpriseDB) '
- '(\d+)\.(\d+)(?:\.(\d+))?(?:\.\d+)?(?:devel)?',
- v)
- if not m:
- raise AssertionError(
- "Could not determine version from string '%s'" % v)
- return tuple([int(x) for x in m.group(1, 2, 3) if x is not None])
-
- @reflection.cache
- def get_table_oid(self, connection, table_name, schema=None, **kw):
- """Fetch the oid for schema.table_name.
-
- Several reflection methods require the table oid. The idea for using
- this method is that it can be fetched one time and cached for
- subsequent calls.
-
- """
- table_oid = None
- if schema is not None:
- schema_where_clause = "n.nspname = :schema"
- else:
- schema_where_clause = "pg_catalog.pg_table_is_visible(c.oid)"
- query = """
- SELECT c.oid
- FROM pg_catalog.pg_class c
- LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
- WHERE (%s)
- AND c.relname = :table_name AND c.relkind in ('r','v')
- """ % schema_where_clause
- # Since we're binding to unicode, table_name and schema_name must be
- # unicode.
- table_name = util.text_type(table_name)
- if schema is not None:
- schema = util.text_type(schema)
- s = sql.text(query).bindparams(table_name=sqltypes.Unicode)
- s = s.columns(oid=sqltypes.Integer)
- if schema:
- s = s.bindparams(sql.bindparam('schema', type_=sqltypes.Unicode))
- c = connection.execute(s, table_name=table_name, schema=schema)
- table_oid = c.scalar()
- if table_oid is None:
- raise exc.NoSuchTableError(table_name)
- return table_oid
-
- @reflection.cache
- def get_schema_names(self, connection, **kw):
- s = """
- SELECT nspname
- FROM pg_namespace
- ORDER BY nspname
- """
- rp = connection.execute(s)
- # what about system tables?
-
- if util.py2k:
- schema_names = [row[0].decode(self.encoding) for row in rp \
- if not row[0].startswith('pg_')]
- else:
- schema_names = [row[0] for row in rp \
- if not row[0].startswith('pg_')]
- return schema_names
-
- @reflection.cache
- def get_table_names(self, connection, schema=None, **kw):
- if schema is not None:
- current_schema = schema
- else:
- current_schema = self.default_schema_name
-
- result = connection.execute(
- sql.text("SELECT relname FROM pg_class c "
- "WHERE relkind = 'r' "
- "AND '%s' = (select nspname from pg_namespace n "
- "where n.oid = c.relnamespace) " %
- current_schema,
- typemap={'relname': sqltypes.Unicode}
- )
- )
- return [row[0] for row in result]
-
- @reflection.cache
- def get_view_names(self, connection, schema=None, **kw):
- if schema is not None:
- current_schema = schema
- else:
- current_schema = self.default_schema_name
- s = """
- SELECT relname
- FROM pg_class c
- WHERE relkind = 'v'
- AND '%(schema)s' = (select nspname from pg_namespace n
- where n.oid = c.relnamespace)
- """ % dict(schema=current_schema)
-
- if util.py2k:
- view_names = [row[0].decode(self.encoding)
- for row in connection.execute(s)]
- else:
- view_names = [row[0] for row in connection.execute(s)]
- return view_names
-
- @reflection.cache
- def get_view_definition(self, connection, view_name, schema=None, **kw):
- if schema is not None:
- current_schema = schema
- else:
- current_schema = self.default_schema_name
- s = """
- SELECT definition FROM pg_views
- WHERE schemaname = :schema
- AND viewname = :view_name
- """
- rp = connection.execute(sql.text(s),
- view_name=view_name, schema=current_schema)
- if rp:
- if util.py2k:
- view_def = rp.scalar().decode(self.encoding)
- else:
- view_def = rp.scalar()
- return view_def
-
- @reflection.cache
- def get_columns(self, connection, table_name, schema=None, **kw):
-
- table_oid = self.get_table_oid(connection, table_name, schema,
- info_cache=kw.get('info_cache'))
- SQL_COLS = """
- SELECT a.attname,
- pg_catalog.format_type(a.atttypid, a.atttypmod),
- (SELECT pg_catalog.pg_get_expr(d.adbin, d.adrelid)
- FROM pg_catalog.pg_attrdef d
- WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum
- AND a.atthasdef)
- AS DEFAULT,
- a.attnotnull, a.attnum, a.attrelid as table_oid
- FROM pg_catalog.pg_attribute a
- WHERE a.attrelid = :table_oid
- AND a.attnum > 0 AND NOT a.attisdropped
- ORDER BY a.attnum
- """
- s = sql.text(SQL_COLS,
- bindparams=[sql.bindparam('table_oid', type_=sqltypes.Integer)],
- typemap={'attname': sqltypes.Unicode, 'default': sqltypes.Unicode}
- )
- c = connection.execute(s, table_oid=table_oid)
- rows = c.fetchall()
- domains = self._load_domains(connection)
- enums = self._load_enums(connection)
-
- # format columns
- columns = []
- for name, format_type, default, notnull, attnum, table_oid in rows:
- column_info = self._get_column_info(
- name, format_type, default, notnull, domains, enums, schema)
- columns.append(column_info)
- return columns
-
- def _get_column_info(self, name, format_type, default,
- notnull, domains, enums, schema):
- ## strip (*) from character varying(5), timestamp(5)
- # with time zone, geometry(POLYGON), etc.
- attype = re.sub(r'\(.*\)', '', format_type)
-
- # strip '[]' from integer[], etc.
- attype = re.sub(r'\[\]', '', attype)
-
- nullable = not notnull
- is_array = format_type.endswith('[]')
- charlen = re.search('\(([\d,]+)\)', format_type)
- if charlen:
- charlen = charlen.group(1)
- args = re.search('\((.*)\)', format_type)
- if args and args.group(1):
- args = tuple(re.split('\s*,\s*', args.group(1)))
- else:
- args = ()
- kwargs = {}
-
- if attype == 'numeric':
- if charlen:
- prec, scale = charlen.split(',')
- args = (int(prec), int(scale))
- else:
- args = ()
- elif attype == 'double precision':
- args = (53, )
- elif attype == 'integer':
- args = ()
- elif attype in ('timestamp with time zone',
- 'time with time zone'):
- kwargs['timezone'] = True
- if charlen:
- kwargs['precision'] = int(charlen)
- args = ()
- elif attype in ('timestamp without time zone',
- 'time without time zone', 'time'):
- kwargs['timezone'] = False
- if charlen:
- kwargs['precision'] = int(charlen)
- args = ()
- elif attype == 'bit varying':
- kwargs['varying'] = True
- if charlen:
- args = (int(charlen),)
- else:
- args = ()
- elif attype in ('interval', 'interval year to month',
- 'interval day to second'):
- if charlen:
- kwargs['precision'] = int(charlen)
- args = ()
- elif charlen:
- args = (int(charlen),)
-
- while True:
- if attype in self.ischema_names:
- coltype = self.ischema_names[attype]
- break
- elif attype in enums:
- enum = enums[attype]
- coltype = ENUM
- if "." in attype:
- kwargs['schema'], kwargs['name'] = attype.split('.')
- else:
- kwargs['name'] = attype
- args = tuple(enum['labels'])
- break
- elif attype in domains:
- domain = domains[attype]
- attype = domain['attype']
- # A table can't override whether the domain is nullable.
- nullable = domain['nullable']
- if domain['default'] and not default:
- # It can, however, override the default
- # value, but can't set it to null.
- default = domain['default']
- continue
- else:
- coltype = None
- break
-
- if coltype:
- coltype = coltype(*args, **kwargs)
- if is_array:
- coltype = ARRAY(coltype)
- else:
- util.warn("Did not recognize type '%s' of column '%s'" %
- (attype, name))
- coltype = sqltypes.NULLTYPE
- # adjust the default value
- autoincrement = False
- if default is not None:
- match = re.search(r"""(nextval\(')([^']+)('.*$)""", default)
- if match is not None:
- autoincrement = True
- # the default is related to a Sequence
- sch = schema
- if '.' not in match.group(2) and sch is not None:
- # unconditionally quote the schema name. this could
- # later be enhanced to obey quoting rules /
- # "quote schema"
- default = match.group(1) + \
- ('"%s"' % sch) + '.' + \
- match.group(2) + match.group(3)
-
- column_info = dict(name=name, type=coltype, nullable=nullable,
- default=default, autoincrement=autoincrement)
- return column_info
-
- @reflection.cache
- def get_pk_constraint(self, connection, table_name, schema=None, **kw):
- table_oid = self.get_table_oid(connection, table_name, schema,
- info_cache=kw.get('info_cache'))
-
- if self.server_version_info < (8, 4):
- PK_SQL = """
- SELECT a.attname
- FROM
- pg_class t
- join pg_index ix on t.oid = ix.indrelid
- join pg_attribute a
- on t.oid=a.attrelid AND %s
- WHERE
- t.oid = :table_oid and ix.indisprimary = 't'
- ORDER BY a.attnum
- """ % self._pg_index_any("a.attnum", "ix.indkey")
-
- else:
- # unnest() and generate_subscripts() both introduced in
- # version 8.4
- PK_SQL = """
- SELECT a.attname
- FROM pg_attribute a JOIN (
- SELECT unnest(ix.indkey) attnum,
- generate_subscripts(ix.indkey, 1) ord
- FROM pg_index ix
- WHERE ix.indrelid = :table_oid AND ix.indisprimary
- ) k ON a.attnum=k.attnum
- WHERE a.attrelid = :table_oid
- ORDER BY k.ord
- """
- t = sql.text(PK_SQL, typemap={'attname': sqltypes.Unicode})
- c = connection.execute(t, table_oid=table_oid)
- cols = [r[0] for r in c.fetchall()]
-
- PK_CONS_SQL = """
- SELECT conname
- FROM pg_catalog.pg_constraint r
- WHERE r.conrelid = :table_oid AND r.contype = 'p'
- ORDER BY 1
- """
- t = sql.text(PK_CONS_SQL, typemap={'conname': sqltypes.Unicode})
- c = connection.execute(t, table_oid=table_oid)
- name = c.scalar()
-
- return {'constrained_columns': cols, 'name': name}
-
- @reflection.cache
- def get_foreign_keys(self, connection, table_name, schema=None,
- postgresql_ignore_search_path=False, **kw):
- preparer = self.identifier_preparer
- table_oid = self.get_table_oid(connection, table_name, schema,
- info_cache=kw.get('info_cache'))
-
- FK_SQL = """
- SELECT r.conname,
- pg_catalog.pg_get_constraintdef(r.oid, true) as condef,
- n.nspname as conschema
- FROM pg_catalog.pg_constraint r,
- pg_namespace n,
- pg_class c
-
- WHERE r.conrelid = :table AND
- r.contype = 'f' AND
- c.oid = confrelid AND
- n.oid = c.relnamespace
- ORDER BY 1
- """
- # http://www.postgresql.org/docs/9.0/static/sql-createtable.html
- FK_REGEX = re.compile(
- r'FOREIGN KEY \((.*?)\) REFERENCES (?:(.*?)\.)?(.*?)\((.*?)\)'
- r'[\s]?(MATCH (FULL|PARTIAL|SIMPLE)+)?'
- r'[\s]?(ON UPDATE (CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?'
- r'[\s]?(ON DELETE (CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?'
- r'[\s]?(DEFERRABLE|NOT DEFERRABLE)?'
- r'[\s]?(INITIALLY (DEFERRED|IMMEDIATE)+)?'
- )
-
- t = sql.text(FK_SQL, typemap={
- 'conname': sqltypes.Unicode,
- 'condef': sqltypes.Unicode})
- c = connection.execute(t, table=table_oid)
- fkeys = []
- for conname, condef, conschema in c.fetchall():
- m = re.search(FK_REGEX, condef).groups()
-
- constrained_columns, referred_schema, \
- referred_table, referred_columns, \
- _, match, _, onupdate, _, ondelete, \
- deferrable, _, initially = m
-
- if deferrable is not None:
- deferrable = True if deferrable == 'DEFERRABLE' else False
- constrained_columns = [preparer._unquote_identifier(x)
- for x in re.split(r'\s*,\s*', constrained_columns)]
-
- if postgresql_ignore_search_path:
- # when ignoring search path, we use the actual schema
- # provided it isn't the "default" schema
- if conschema != self.default_schema_name:
- referred_schema = conschema
- else:
- referred_schema = schema
- elif referred_schema:
- # referred_schema is the schema that we regexp'ed from
- # pg_get_constraintdef(). If the schema is in the search
- # path, pg_get_constraintdef() will give us None.
- referred_schema = \
- preparer._unquote_identifier(referred_schema)
- elif schema is not None and schema == conschema:
- # If the actual schema matches the schema of the table
- # we're reflecting, then we will use that.
- referred_schema = schema
-
- referred_table = preparer._unquote_identifier(referred_table)
- referred_columns = [preparer._unquote_identifier(x)
- for x in re.split(r'\s*,\s', referred_columns)]
- fkey_d = {
- 'name': conname,
- 'constrained_columns': constrained_columns,
- 'referred_schema': referred_schema,
- 'referred_table': referred_table,
- 'referred_columns': referred_columns,
- 'options': {
- 'onupdate': onupdate,
- 'ondelete': ondelete,
- 'deferrable': deferrable,
- 'initially': initially,
- 'match': match
- }
- }
- fkeys.append(fkey_d)
- return fkeys
-
- def _pg_index_any(self, col, compare_to):
- if self.server_version_info < (8, 1):
- # http://www.postgresql.org/message-id/10279.1124395722@sss.pgh.pa.us
- # "In CVS tip you could replace this with "attnum = ANY (indkey)".
- # Unfortunately, most array support doesn't work on int2vector in
- # pre-8.1 releases, so I think you're kinda stuck with the above
- # for now.
- # regards, tom lane"
- return "(%s)" % " OR ".join(
- "%s[%d] = %s" % (compare_to, ind, col)
- for ind in range(0, 10)
- )
- else:
- return "%s = ANY(%s)" % (col, compare_to)
-
- @reflection.cache
- def get_indexes(self, connection, table_name, schema, **kw):
- table_oid = self.get_table_oid(connection, table_name, schema,
- info_cache=kw.get('info_cache'))
-
- # cast indkey as varchar since it's an int2vector,
- # returned as a list by some drivers such as pypostgresql
-
- IDX_SQL = """
- SELECT
- i.relname as relname,
- ix.indisunique, ix.indexprs, ix.indpred,
- a.attname, a.attnum, ix.indkey%s
- FROM
- pg_class t
- join pg_index ix on t.oid = ix.indrelid
- join pg_class i on i.oid=ix.indexrelid
- left outer join
- pg_attribute a
- on t.oid=a.attrelid and %s
- WHERE
- t.relkind = 'r'
- and t.oid = :table_oid
- and ix.indisprimary = 'f'
- ORDER BY
- t.relname,
- i.relname
- """ % (
- # version 8.3 here was based on observing the
- # cast does not work in PG 8.2.4, does work in 8.3.0.
- # nothing in PG changelogs regarding this.
- "::varchar" if self.server_version_info >= (8, 3) else "",
- self._pg_index_any("a.attnum", "ix.indkey")
- )
-
- t = sql.text(IDX_SQL, typemap={'attname': sqltypes.Unicode})
- c = connection.execute(t, table_oid=table_oid)
-
- indexes = defaultdict(lambda: defaultdict(dict))
-
- sv_idx_name = None
- for row in c.fetchall():
- idx_name, unique, expr, prd, col, col_num, idx_key = row
-
- if expr:
- if idx_name != sv_idx_name:
- util.warn(
- "Skipped unsupported reflection of "
- "expression-based index %s"
- % idx_name)
- sv_idx_name = idx_name
- continue
-
- if prd and not idx_name == sv_idx_name:
- util.warn(
- "Predicate of partial index %s ignored during reflection"
- % idx_name)
- sv_idx_name = idx_name
-
- index = indexes[idx_name]
- if col is not None:
- index['cols'][col_num] = col
- index['key'] = [int(k.strip()) for k in idx_key.split()]
- index['unique'] = unique
-
- return [
- {'name': name,
- 'unique': idx['unique'],
- 'column_names': [idx['cols'][i] for i in idx['key']]}
- for name, idx in indexes.items()
- ]
-
- @reflection.cache
- def get_unique_constraints(self, connection, table_name,
- schema=None, **kw):
- table_oid = self.get_table_oid(connection, table_name, schema,
- info_cache=kw.get('info_cache'))
-
- UNIQUE_SQL = """
- SELECT
- cons.conname as name,
- cons.conkey as key,
- a.attnum as col_num,
- a.attname as col_name
- FROM
- pg_catalog.pg_constraint cons
- join pg_attribute a
- on cons.conrelid = a.attrelid AND a.attnum = ANY(cons.conkey)
- WHERE
- cons.conrelid = :table_oid AND
- cons.contype = 'u'
- """
-
- t = sql.text(UNIQUE_SQL, typemap={'col_name': sqltypes.Unicode})
- c = connection.execute(t, table_oid=table_oid)
-
- uniques = defaultdict(lambda: defaultdict(dict))
- for row in c.fetchall():
- uc = uniques[row.name]
- uc["key"] = row.key
- uc["cols"][row.col_num] = row.col_name
-
- return [
- {'name': name,
- 'column_names': [uc["cols"][i] for i in uc["key"]]}
- for name, uc in uniques.items()
- ]
-
- def _load_enums(self, connection):
- if not self.supports_native_enum:
- return {}
-
- ## Load data types for enums:
- SQL_ENUMS = """
- SELECT t.typname as "name",
- -- no enum defaults in 8.4 at least
- -- t.typdefault as "default",
- pg_catalog.pg_type_is_visible(t.oid) as "visible",
- n.nspname as "schema",
- e.enumlabel as "label"
- FROM pg_catalog.pg_type t
- LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
- LEFT JOIN pg_catalog.pg_enum e ON t.oid = e.enumtypid
- WHERE t.typtype = 'e'
- ORDER BY "name", e.oid -- e.oid gives us label order
- """
-
- s = sql.text(SQL_ENUMS, typemap={
- 'attname': sqltypes.Unicode,
- 'label': sqltypes.Unicode})
- c = connection.execute(s)
-
- enums = {}
- for enum in c.fetchall():
- if enum['visible']:
- # 'visible' just means whether or not the enum is in a
- # schema that's on the search path -- or not overridden by
- # a schema with higher precedence. If it's not visible,
- # it will be prefixed with the schema-name when it's used.
- name = enum['name']
- else:
- name = "%s.%s" % (enum['schema'], enum['name'])
-
- if name in enums:
- enums[name]['labels'].append(enum['label'])
- else:
- enums[name] = {
- 'labels': [enum['label']],
- }
-
- return enums
-
- def _load_domains(self, connection):
- ## Load data types for domains:
- SQL_DOMAINS = """
- SELECT t.typname as "name",
- pg_catalog.format_type(t.typbasetype, t.typtypmod) as "attype",
- not t.typnotnull as "nullable",
- t.typdefault as "default",
- pg_catalog.pg_type_is_visible(t.oid) as "visible",
- n.nspname as "schema"
- FROM pg_catalog.pg_type t
- LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
- WHERE t.typtype = 'd'
- """
-
- s = sql.text(SQL_DOMAINS, typemap={'attname': sqltypes.Unicode})
- c = connection.execute(s)
-
- domains = {}
- for domain in c.fetchall():
- ## strip (30) from character varying(30)
- attype = re.search('([^\(]+)', domain['attype']).group(1)
- if domain['visible']:
- # 'visible' just means whether or not the domain is in a
- # schema that's on the search path -- or not overridden by
- # a schema with higher precedence. If it's not visible,
- # it will be prefixed with the schema-name when it's used.
- name = domain['name']
- else:
- name = "%s.%s" % (domain['schema'], domain['name'])
-
- domains[name] = {
- 'attype': attype,
- 'nullable': domain['nullable'],
- 'default': domain['default']
- }
-
- return domains
diff --git a/lib/sqlalchemy/dialects/postgresql/constraints.py b/lib/sqlalchemy/dialects/postgresql/constraints.py
deleted file mode 100644
index f45cef1a..00000000
--- a/lib/sqlalchemy/dialects/postgresql/constraints.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# Copyright (C) 2013-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-from sqlalchemy.schema import ColumnCollectionConstraint
-from sqlalchemy.sql import expression
-
-class ExcludeConstraint(ColumnCollectionConstraint):
- """A table-level EXCLUDE constraint.
-
- Defines an EXCLUDE constraint as described in the `postgres
- documentation`__.
-
- __ http://www.postgresql.org/docs/9.0/static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE
- """
-
- __visit_name__ = 'exclude_constraint'
-
- where = None
-
- def __init__(self, *elements, **kw):
- """
- :param \*elements:
- A sequence of two tuples of the form ``(column, operator)`` where
- column must be a column name or Column object and operator must
- be a string containing the operator to use.
-
- :param name:
- Optional, the in-database name of this constraint.
-
- :param deferrable:
- Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
- issuing DDL for this constraint.
-
- :param initially:
- Optional string. If set, emit INITIALLY when issuing DDL
- for this constraint.
-
- :param using:
- Optional string. If set, emit USING when issuing DDL
- for this constraint. Defaults to 'gist'.
-
- :param where:
- Optional string. If set, emit WHERE when issuing DDL
- for this constraint.
-
- """
- ColumnCollectionConstraint.__init__(
- self,
- *[col for col, op in elements],
- name=kw.get('name'),
- deferrable=kw.get('deferrable'),
- initially=kw.get('initially')
- )
- self.operators = {}
- for col_or_string, op in elements:
- name = getattr(col_or_string, 'name', col_or_string)
- self.operators[name] = op
- self.using = kw.get('using', 'gist')
- where = kw.get('where')
- if where:
- self.where = expression._literal_as_text(where)
-
- def copy(self, **kw):
- elements = [(col, self.operators[col])
- for col in self.columns.keys()]
- c = self.__class__(*elements,
- name=self.name,
- deferrable=self.deferrable,
- initially=self.initially)
- c.dispatch._update(self.dispatch)
- return c
-
diff --git a/lib/sqlalchemy/dialects/postgresql/hstore.py b/lib/sqlalchemy/dialects/postgresql/hstore.py
deleted file mode 100644
index 76562088..00000000
--- a/lib/sqlalchemy/dialects/postgresql/hstore.py
+++ /dev/null
@@ -1,369 +0,0 @@
-# postgresql/hstore.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-import re
-
-from .base import ARRAY, ischema_names
-from ... import types as sqltypes
-from ...sql import functions as sqlfunc
-from ...sql.operators import custom_op
-from ... import util
-
-__all__ = ('HSTORE', 'hstore')
-
-# My best guess at the parsing rules of hstore literals, since no formal
-# grammar is given. This is mostly reverse engineered from PG's input parser
-# behavior.
-HSTORE_PAIR_RE = re.compile(r"""
-(
- "(?P (\\ . | [^"])* )" # Quoted key
-)
-[ ]* => [ ]* # Pair operator, optional adjoining whitespace
-(
- (?P NULL ) # NULL value
- | "(?P (\\ . | [^"])* )" # Quoted value
-)
-""", re.VERBOSE)
-
-HSTORE_DELIMITER_RE = re.compile(r"""
-[ ]* , [ ]*
-""", re.VERBOSE)
-
-
-def _parse_error(hstore_str, pos):
- """format an unmarshalling error."""
-
- ctx = 20
- hslen = len(hstore_str)
-
- parsed_tail = hstore_str[max(pos - ctx - 1, 0):min(pos, hslen)]
- residual = hstore_str[min(pos, hslen):min(pos + ctx + 1, hslen)]
-
- if len(parsed_tail) > ctx:
- parsed_tail = '[...]' + parsed_tail[1:]
- if len(residual) > ctx:
- residual = residual[:-1] + '[...]'
-
- return "After %r, could not parse residual at position %d: %r" % (
- parsed_tail, pos, residual)
-
-
-def _parse_hstore(hstore_str):
- """Parse an hstore from it's literal string representation.
-
- Attempts to approximate PG's hstore input parsing rules as closely as
- possible. Although currently this is not strictly necessary, since the
- current implementation of hstore's output syntax is stricter than what it
- accepts as input, the documentation makes no guarantees that will always
- be the case.
-
-
-
- """
- result = {}
- pos = 0
- pair_match = HSTORE_PAIR_RE.match(hstore_str)
-
- while pair_match is not None:
- key = pair_match.group('key').replace(r'\"', '"').replace("\\\\", "\\")
- if pair_match.group('value_null'):
- value = None
- else:
- value = pair_match.group('value').replace(r'\"', '"').replace("\\\\", "\\")
- result[key] = value
-
- pos += pair_match.end()
-
- delim_match = HSTORE_DELIMITER_RE.match(hstore_str[pos:])
- if delim_match is not None:
- pos += delim_match.end()
-
- pair_match = HSTORE_PAIR_RE.match(hstore_str[pos:])
-
- if pos != len(hstore_str):
- raise ValueError(_parse_error(hstore_str, pos))
-
- return result
-
-
-def _serialize_hstore(val):
- """Serialize a dictionary into an hstore literal. Keys and values must
- both be strings (except None for values).
-
- """
- def esc(s, position):
- if position == 'value' and s is None:
- return 'NULL'
- elif isinstance(s, util.string_types):
- return '"%s"' % s.replace("\\", "\\\\").replace('"', r'\"')
- else:
- raise ValueError("%r in %s position is not a string." %
- (s, position))
-
- return ', '.join('%s=>%s' % (esc(k, 'key'), esc(v, 'value'))
- for k, v in val.items())
-
-
-class HSTORE(sqltypes.Concatenable, sqltypes.TypeEngine):
- """Represent the Postgresql HSTORE type.
-
- The :class:`.HSTORE` type stores dictionaries containing strings, e.g.::
-
- data_table = Table('data_table', metadata,
- Column('id', Integer, primary_key=True),
- Column('data', HSTORE)
- )
-
- with engine.connect() as conn:
- conn.execute(
- data_table.insert(),
- data = {"key1": "value1", "key2": "value2"}
- )
-
- :class:`.HSTORE` provides for a wide range of operations, including:
-
- * Index operations::
-
- data_table.c.data['some key'] == 'some value'
-
- * Containment operations::
-
- data_table.c.data.has_key('some key')
-
- data_table.c.data.has_all(['one', 'two', 'three'])
-
- * Concatenation::
-
- data_table.c.data + {"k1": "v1"}
-
- For a full list of special methods see :class:`.HSTORE.comparator_factory`.
-
- For usage with the SQLAlchemy ORM, it may be desirable to combine
- the usage of :class:`.HSTORE` with :class:`.MutableDict` dictionary
- now part of the :mod:`sqlalchemy.ext.mutable`
- extension. This extension will allow "in-place" changes to the
- dictionary, e.g. addition of new keys or replacement/removal of existing
- keys to/from the current dictionary, to produce events which will be detected
- by the unit of work::
-
- from sqlalchemy.ext.mutable import MutableDict
-
- class MyClass(Base):
- __tablename__ = 'data_table'
-
- id = Column(Integer, primary_key=True)
- data = Column(MutableDict.as_mutable(HSTORE))
-
- my_object = session.query(MyClass).one()
-
- # in-place mutation, requires Mutable extension
- # in order for the ORM to detect
- my_object.data['some_key'] = 'some value'
-
- session.commit()
-
- When the :mod:`sqlalchemy.ext.mutable` extension is not used, the ORM
- will not be alerted to any changes to the contents of an existing dictionary,
- unless that dictionary value is re-assigned to the HSTORE-attribute itself,
- thus generating a change event.
-
- .. versionadded:: 0.8
-
- .. seealso::
-
- :class:`.hstore` - render the Postgresql ``hstore()`` function.
-
-
- """
-
- __visit_name__ = 'HSTORE'
-
- class comparator_factory(sqltypes.Concatenable.Comparator):
- """Define comparison operations for :class:`.HSTORE`."""
-
- def has_key(self, other):
- """Boolean expression. Test for presence of a key. Note that the
- key may be a SQLA expression.
- """
- return self.expr.op('?')(other)
-
- def has_all(self, other):
- """Boolean expression. Test for presence of all keys in the PG
- array.
- """
- return self.expr.op('?&')(other)
-
- def has_any(self, other):
- """Boolean expression. Test for presence of any key in the PG
- array.
- """
- return self.expr.op('?|')(other)
-
- def defined(self, key):
- """Boolean expression. Test for presence of a non-NULL value for
- the key. Note that the key may be a SQLA expression.
- """
- return _HStoreDefinedFunction(self.expr, key)
-
- def contains(self, other, **kwargs):
- """Boolean expression. Test if keys are a superset of the keys of
- the argument hstore expression.
- """
- return self.expr.op('@>')(other)
-
- def contained_by(self, other):
- """Boolean expression. Test if keys are a proper subset of the
- keys of the argument hstore expression.
- """
- return self.expr.op('<@')(other)
-
- def __getitem__(self, other):
- """Text expression. Get the value at a given key. Note that the
- key may be a SQLA expression.
- """
- return self.expr.op('->', precedence=5)(other)
-
- def delete(self, key):
- """HStore expression. Returns the contents of this hstore with the
- given key deleted. Note that the key may be a SQLA expression.
- """
- if isinstance(key, dict):
- key = _serialize_hstore(key)
- return _HStoreDeleteFunction(self.expr, key)
-
- def slice(self, array):
- """HStore expression. Returns a subset of an hstore defined by
- array of keys.
- """
- return _HStoreSliceFunction(self.expr, array)
-
- def keys(self):
- """Text array expression. Returns array of keys."""
- return _HStoreKeysFunction(self.expr)
-
- def vals(self):
- """Text array expression. Returns array of values."""
- return _HStoreValsFunction(self.expr)
-
- def array(self):
- """Text array expression. Returns array of alternating keys and
- values.
- """
- return _HStoreArrayFunction(self.expr)
-
- def matrix(self):
- """Text array expression. Returns array of [key, value] pairs."""
- return _HStoreMatrixFunction(self.expr)
-
- def _adapt_expression(self, op, other_comparator):
- if isinstance(op, custom_op):
- if op.opstring in ['?', '?&', '?|', '@>', '<@']:
- return op, sqltypes.Boolean
- elif op.opstring == '->':
- return op, sqltypes.Text
- return sqltypes.Concatenable.Comparator.\
- _adapt_expression(self, op, other_comparator)
-
- def bind_processor(self, dialect):
- if util.py2k:
- encoding = dialect.encoding
- def process(value):
- if isinstance(value, dict):
- return _serialize_hstore(value).encode(encoding)
- else:
- return value
- else:
- def process(value):
- if isinstance(value, dict):
- return _serialize_hstore(value)
- else:
- return value
- return process
-
- def result_processor(self, dialect, coltype):
- if util.py2k:
- encoding = dialect.encoding
- def process(value):
- if value is not None:
- return _parse_hstore(value.decode(encoding))
- else:
- return value
- else:
- def process(value):
- if value is not None:
- return _parse_hstore(value)
- else:
- return value
- return process
-
-
-ischema_names['hstore'] = HSTORE
-
-
-class hstore(sqlfunc.GenericFunction):
- """Construct an hstore value within a SQL expression using the
- Postgresql ``hstore()`` function.
-
- The :class:`.hstore` function accepts one or two arguments as described
- in the Postgresql documentation.
-
- E.g.::
-
- from sqlalchemy.dialects.postgresql import array, hstore
-
- select([hstore('key1', 'value1')])
-
- select([
- hstore(
- array(['key1', 'key2', 'key3']),
- array(['value1', 'value2', 'value3'])
- )
- ])
-
- .. versionadded:: 0.8
-
- .. seealso::
-
- :class:`.HSTORE` - the Postgresql ``HSTORE`` datatype.
-
- """
- type = HSTORE
- name = 'hstore'
-
-
-class _HStoreDefinedFunction(sqlfunc.GenericFunction):
- type = sqltypes.Boolean
- name = 'defined'
-
-
-class _HStoreDeleteFunction(sqlfunc.GenericFunction):
- type = HSTORE
- name = 'delete'
-
-
-class _HStoreSliceFunction(sqlfunc.GenericFunction):
- type = HSTORE
- name = 'slice'
-
-
-class _HStoreKeysFunction(sqlfunc.GenericFunction):
- type = ARRAY(sqltypes.Text)
- name = 'akeys'
-
-
-class _HStoreValsFunction(sqlfunc.GenericFunction):
- type = ARRAY(sqltypes.Text)
- name = 'avals'
-
-
-class _HStoreArrayFunction(sqlfunc.GenericFunction):
- type = ARRAY(sqltypes.Text)
- name = 'hstore_to_array'
-
-
-class _HStoreMatrixFunction(sqlfunc.GenericFunction):
- type = ARRAY(sqltypes.Text)
- name = 'hstore_to_matrix'
diff --git a/lib/sqlalchemy/dialects/postgresql/json.py b/lib/sqlalchemy/dialects/postgresql/json.py
deleted file mode 100644
index 2e29185e..00000000
--- a/lib/sqlalchemy/dialects/postgresql/json.py
+++ /dev/null
@@ -1,199 +0,0 @@
-# postgresql/json.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-from __future__ import absolute_import
-
-import json
-
-from .base import ischema_names
-from ... import types as sqltypes
-from ...sql.operators import custom_op
-from ... import sql
-from ...sql import elements
-from ... import util
-
-__all__ = ('JSON', 'JSONElement')
-
-
-class JSONElement(elements.BinaryExpression):
- """Represents accessing an element of a :class:`.JSON` value.
-
- The :class:`.JSONElement` is produced whenever using the Python index
- operator on an expression that has the type :class:`.JSON`::
-
- expr = mytable.c.json_data['some_key']
-
- The expression typically compiles to a JSON access such as ``col -> key``.
- Modifiers are then available for typing behavior, including :meth:`.JSONElement.cast`
- and :attr:`.JSONElement.astext`.
-
- """
- def __init__(self, left, right, astext=False, opstring=None, result_type=None):
- self._astext = astext
- if opstring is None:
- if hasattr(right, '__iter__') and \
- not isinstance(right, util.string_types):
- opstring = "#>"
- right = "{%s}" % (", ".join(util.text_type(elem) for elem in right))
- else:
- opstring = "->"
-
- self._json_opstring = opstring
- operator = custom_op(opstring, precedence=5)
- right = left._check_literal(left, operator, right)
- super(JSONElement, self).__init__(left, right, operator, type_=result_type)
-
- @property
- def astext(self):
- """Convert this :class:`.JSONElement` to use the 'astext' operator
- when evaluated.
-
- E.g.::
-
- select([data_table.c.data['some key'].astext])
-
- .. seealso::
-
- :meth:`.JSONElement.cast`
-
- """
- if self._astext:
- return self
- else:
- return JSONElement(
- self.left,
- self.right,
- astext=True,
- opstring=self._json_opstring + ">",
- result_type=sqltypes.String(convert_unicode=True)
- )
-
- def cast(self, type_):
- """Convert this :class:`.JSONElement` to apply both the 'astext' operator
- as well as an explicit type cast when evaulated.
-
- E.g.::
-
- select([data_table.c.data['some key'].cast(Integer)])
-
- .. seealso::
-
- :attr:`.JSONElement.astext`
-
- """
- if not self._astext:
- return self.astext.cast(type_)
- else:
- return sql.cast(self, type_)
-
-
-class JSON(sqltypes.TypeEngine):
- """Represent the Postgresql JSON type.
-
- The :class:`.JSON` type stores arbitrary JSON format data, e.g.::
-
- data_table = Table('data_table', metadata,
- Column('id', Integer, primary_key=True),
- Column('data', JSON)
- )
-
- with engine.connect() as conn:
- conn.execute(
- data_table.insert(),
- data = {"key1": "value1", "key2": "value2"}
- )
-
- :class:`.JSON` provides several operations:
-
- * Index operations::
-
- data_table.c.data['some key']
-
- * Index operations returning text (required for text comparison)::
-
- data_table.c.data['some key'].astext == 'some value'
-
- * Index operations with a built-in CAST call::
-
- data_table.c.data['some key'].cast(Integer) == 5
-
- * Path index operations::
-
- data_table.c.data[('key_1', 'key_2', ..., 'key_n')]
-
- * Path index operations returning text (required for text comparison)::
-
- data_table.c.data[('key_1', 'key_2', ..., 'key_n')].astext == 'some value'
-
- Index operations return an instance of :class:`.JSONElement`, which represents
- an expression such as ``column -> index``. This element then defines
- methods such as :attr:`.JSONElement.astext` and :meth:`.JSONElement.cast`
- for setting up type behavior.
-
- The :class:`.JSON` type, when used with the SQLAlchemy ORM, does not detect
- in-place mutations to the structure. In order to detect these, the
- :mod:`sqlalchemy.ext.mutable` extension must be used. This extension will
- allow "in-place" changes to the datastructure to produce events which
- will be detected by the unit of work. See the example at :class:`.HSTORE`
- for a simple example involving a dictionary.
-
- Custom serializers and deserializers are specified at the dialect level,
- that is using :func:`.create_engine`. The reason for this is that when
- using psycopg2, the DBAPI only allows serializers at the per-cursor
- or per-connection level. E.g.::
-
- engine = create_engine("postgresql://scott:tiger@localhost/test",
- json_serializer=my_serialize_fn,
- json_deserializer=my_deserialize_fn
- )
-
- When using the psycopg2 dialect, the json_deserializer is registered
- against the database using ``psycopg2.extras.register_default_json``.
-
- .. versionadded:: 0.9
-
- """
-
- __visit_name__ = 'JSON'
-
- class comparator_factory(sqltypes.Concatenable.Comparator):
- """Define comparison operations for :class:`.JSON`."""
-
- def __getitem__(self, other):
- """Get the value at a given key."""
-
- return JSONElement(self.expr, other)
-
- def _adapt_expression(self, op, other_comparator):
- if isinstance(op, custom_op):
- if op.opstring == '->':
- return op, sqltypes.Text
- return sqltypes.Concatenable.Comparator.\
- _adapt_expression(self, op, other_comparator)
-
- def bind_processor(self, dialect):
- json_serializer = dialect._json_serializer or json.dumps
- if util.py2k:
- encoding = dialect.encoding
- def process(value):
- return json_serializer(value).encode(encoding)
- else:
- def process(value):
- return json_serializer(value)
- return process
-
- def result_processor(self, dialect, coltype):
- json_deserializer = dialect._json_deserializer or json.loads
- if util.py2k:
- encoding = dialect.encoding
- def process(value):
- return json_deserializer(value.decode(encoding))
- else:
- def process(value):
- return json_deserializer(value)
- return process
-
-
-ischema_names['json'] = JSON
diff --git a/lib/sqlalchemy/dialects/postgresql/pg8000.py b/lib/sqlalchemy/dialects/postgresql/pg8000.py
deleted file mode 100644
index bc73f975..00000000
--- a/lib/sqlalchemy/dialects/postgresql/pg8000.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# postgresql/pg8000.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""
-.. dialect:: postgresql+pg8000
- :name: pg8000
- :dbapi: pg8000
- :connectstring: postgresql+pg8000://user:password@host:port/dbname[?key=value&key=value...]
- :url: http://pybrary.net/pg8000/
-
-Unicode
--------
-
-pg8000 requires that the postgresql client encoding be
-configured in the postgresql.conf file in order to use encodings
-other than ascii. Set this value to the same value as the
-"encoding" parameter on create_engine(), usually "utf-8".
-
-Interval
---------
-
-Passing data from/to the Interval type is not supported as of
-yet.
-
-"""
-from ... import util, exc
-import decimal
-from ... import processors
-from ... import types as sqltypes
-from .base import PGDialect, \
- PGCompiler, PGIdentifierPreparer, PGExecutionContext,\
- _DECIMAL_TYPES, _FLOAT_TYPES, _INT_TYPES
-
-
-class _PGNumeric(sqltypes.Numeric):
- def result_processor(self, dialect, coltype):
- if self.asdecimal:
- if coltype in _FLOAT_TYPES:
- return processors.to_decimal_processor_factory(
- decimal.Decimal,
- self._effective_decimal_return_scale)
- elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
- # pg8000 returns Decimal natively for 1700
- return None
- else:
- raise exc.InvalidRequestError(
- "Unknown PG numeric type: %d" % coltype)
- else:
- if coltype in _FLOAT_TYPES:
- # pg8000 returns float natively for 701
- return None
- elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
- return processors.to_float
- else:
- raise exc.InvalidRequestError(
- "Unknown PG numeric type: %d" % coltype)
-
-
-class _PGNumericNoBind(_PGNumeric):
- def bind_processor(self, dialect):
- return None
-
-
-class PGExecutionContext_pg8000(PGExecutionContext):
- pass
-
-
-class PGCompiler_pg8000(PGCompiler):
- def visit_mod_binary(self, binary, operator, **kw):
- return self.process(binary.left, **kw) + " %% " + \
- self.process(binary.right, **kw)
-
- def post_process_text(self, text):
- if '%%' in text:
- util.warn("The SQLAlchemy postgresql dialect "
- "now automatically escapes '%' in text() "
- "expressions to '%%'.")
- return text.replace('%', '%%')
-
-
-class PGIdentifierPreparer_pg8000(PGIdentifierPreparer):
- def _escape_identifier(self, value):
- value = value.replace(self.escape_quote, self.escape_to_quote)
- return value.replace('%', '%%')
-
-
-class PGDialect_pg8000(PGDialect):
- driver = 'pg8000'
-
- supports_unicode_statements = True
-
- supports_unicode_binds = True
-
- default_paramstyle = 'format'
- supports_sane_multi_rowcount = False
- execution_ctx_cls = PGExecutionContext_pg8000
- statement_compiler = PGCompiler_pg8000
- preparer = PGIdentifierPreparer_pg8000
- description_encoding = 'use_encoding'
-
- colspecs = util.update_copy(
- PGDialect.colspecs,
- {
- sqltypes.Numeric: _PGNumericNoBind,
- sqltypes.Float: _PGNumeric
- }
- )
-
- @classmethod
- def dbapi(cls):
- return __import__('pg8000').dbapi
-
- def create_connect_args(self, url):
- opts = url.translate_connect_args(username='user')
- if 'port' in opts:
- opts['port'] = int(opts['port'])
- opts.update(url.query)
- return ([], opts)
-
- def is_disconnect(self, e, connection, cursor):
- return "connection is closed" in str(e)
-
-dialect = PGDialect_pg8000
diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2.py b/lib/sqlalchemy/dialects/postgresql/psycopg2.py
deleted file mode 100644
index ac177062..00000000
--- a/lib/sqlalchemy/dialects/postgresql/psycopg2.py
+++ /dev/null
@@ -1,515 +0,0 @@
-# postgresql/psycopg2.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""
-.. dialect:: postgresql+psycopg2
- :name: psycopg2
- :dbapi: psycopg2
- :connectstring: postgresql+psycopg2://user:password@host:port/dbname[?key=value&key=value...]
- :url: http://pypi.python.org/pypi/psycopg2/
-
-psycopg2 Connect Arguments
------------------------------------
-
-psycopg2-specific keyword arguments which are accepted by
-:func:`.create_engine()` are:
-
-* ``server_side_cursors``: Enable the usage of "server side cursors" for SQL
- statements which support this feature. What this essentially means from a
- psycopg2 point of view is that the cursor is created using a name, e.g.
- ``connection.cursor('some name')``, which has the effect that result rows are
- not immediately pre-fetched and buffered after statement execution, but are
- instead left on the server and only retrieved as needed. SQLAlchemy's
- :class:`~sqlalchemy.engine.ResultProxy` uses special row-buffering
- behavior when this feature is enabled, such that groups of 100 rows at a
- time are fetched over the wire to reduce conversational overhead.
- Note that the ``stream_results=True`` execution option is a more targeted
- way of enabling this mode on a per-execution basis.
-* ``use_native_unicode``: Enable the usage of Psycopg2 "native unicode" mode
- per connection. True by default.
-* ``isolation_level``: This option, available for all Posgtresql dialects,
- includes the ``AUTOCOMMIT`` isolation level when using the psycopg2
- dialect. See :ref:`psycopg2_isolation_level`.
-
-
-Unix Domain Connections
-------------------------
-
-psycopg2 supports connecting via Unix domain connections. When the ``host``
-portion of the URL is omitted, SQLAlchemy passes ``None`` to psycopg2,
-which specifies Unix-domain communication rather than TCP/IP communication::
-
- create_engine("postgresql+psycopg2://user:password@/dbname")
-
-By default, the socket file used is to connect to a Unix-domain socket
-in ``/tmp``, or whatever socket directory was specified when PostgreSQL
-was built. This value can be overridden by passing a pathname to psycopg2,
-using ``host`` as an additional keyword argument::
-
- create_engine("postgresql+psycopg2://user:password@/dbname?host=/var/lib/postgresql")
-
-See also:
-
-`PQconnectdbParams `_
-
-Per-Statement/Connection Execution Options
--------------------------------------------
-
-The following DBAPI-specific options are respected when used with
-:meth:`.Connection.execution_options`, :meth:`.Executable.execution_options`,
-:meth:`.Query.execution_options`, in addition to those not specific to DBAPIs:
-
-* isolation_level - Set the transaction isolation level for the lifespan of a
- :class:`.Connection` (can only be set on a connection, not a statement
- or query). See :ref:`psycopg2_isolation_level`.
-
-* stream_results - Enable or disable usage of psycopg2 server side cursors -
- this feature makes use of "named" cursors in combination with special
- result handling methods so that result rows are not fully buffered.
- If ``None`` or not set, the ``server_side_cursors`` option of the
- :class:`.Engine` is used.
-
-Unicode
--------
-
-By default, the psycopg2 driver uses the ``psycopg2.extensions.UNICODE``
-extension, such that the DBAPI receives and returns all strings as Python
-Unicode objects directly - SQLAlchemy passes these values through without
-change. Psycopg2 here will encode/decode string values based on the
-current "client encoding" setting; by default this is the value in
-the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``.
-Typically, this can be changed to ``utf-8``, as a more useful default::
-
- #client_encoding = sql_ascii # actually, defaults to database
- # encoding
- client_encoding = utf8
-
-A second way to affect the client encoding is to set it within Psycopg2
-locally. SQLAlchemy will call psycopg2's ``set_client_encoding()``
-method (see: http://initd.org/psycopg/docs/connection.html#connection.set_client_encoding)
-on all new connections based on the value passed to
-:func:`.create_engine` using the ``client_encoding`` parameter::
-
- engine = create_engine("postgresql://user:pass@host/dbname", client_encoding='utf8')
-
-This overrides the encoding specified in the Postgresql client configuration.
-
-.. versionadded:: 0.7.3
- The psycopg2-specific ``client_encoding`` parameter to
- :func:`.create_engine`.
-
-SQLAlchemy can also be instructed to skip the usage of the psycopg2
-``UNICODE`` extension and to instead utilize it's own unicode encode/decode
-services, which are normally reserved only for those DBAPIs that don't
-fully support unicode directly. Passing ``use_native_unicode=False`` to
-:func:`.create_engine` will disable usage of ``psycopg2.extensions.UNICODE``.
-SQLAlchemy will instead encode data itself into Python bytestrings on the way
-in and coerce from bytes on the way back,
-using the value of the :func:`.create_engine` ``encoding`` parameter, which
-defaults to ``utf-8``.
-SQLAlchemy's own unicode encode/decode functionality is steadily becoming
-obsolete as more DBAPIs support unicode fully along with the approach of
-Python 3; in modern usage psycopg2 should be relied upon to handle unicode.
-
-Transactions
-------------
-
-The psycopg2 dialect fully supports SAVEPOINT and two-phase commit operations.
-
-.. _psycopg2_isolation_level:
-
-Psycopg2 Transaction Isolation Level
--------------------------------------
-
-As discussed in :ref:`postgresql_isolation_level`,
-all Postgresql dialects support setting of transaction isolation level
-both via the ``isolation_level`` parameter passed to :func:`.create_engine`,
-as well as the ``isolation_level`` argument used by :meth:`.Connection.execution_options`.
-When using the psycopg2 dialect, these options make use of
-psycopg2's ``set_isolation_level()`` connection method, rather than
-emitting a Postgresql directive; this is because psycopg2's API-level
-setting is always emitted at the start of each transaction in any case.
-
-The psycopg2 dialect supports these constants for isolation level:
-
-* ``READ COMMITTED``
-* ``READ UNCOMMITTED``
-* ``REPEATABLE READ``
-* ``SERIALIZABLE``
-* ``AUTOCOMMIT``
-
-.. versionadded:: 0.8.2 support for AUTOCOMMIT isolation level when using
- psycopg2.
-
-
-NOTICE logging
----------------
-
-The psycopg2 dialect will log Postgresql NOTICE messages via the
-``sqlalchemy.dialects.postgresql`` logger::
-
- import logging
- logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO)
-
-.. _psycopg2_hstore::
-
-HSTORE type
-------------
-
-The ``psycopg2`` DBAPI includes an extension to natively handle marshalling of the
-HSTORE type. The SQLAlchemy psycopg2 dialect will enable this extension
-by default when it is detected that the target database has the HSTORE
-type set up for use. In other words, when the dialect makes the first
-connection, a sequence like the following is performed:
-
-1. Request the available HSTORE oids using ``psycopg2.extras.HstoreAdapter.get_oids()``.
- If this function returns a list of HSTORE identifiers, we then determine that
- the ``HSTORE`` extension is present.
-
-2. If the ``use_native_hstore`` flag is at it's default of ``True``, and
- we've detected that ``HSTORE`` oids are available, the
- ``psycopg2.extensions.register_hstore()`` extension is invoked for all
- connections.
-
-The ``register_hstore()`` extension has the effect of **all Python dictionaries
-being accepted as parameters regardless of the type of target column in SQL**.
-The dictionaries are converted by this extension into a textual HSTORE expression.
-If this behavior is not desired, disable the
-use of the hstore extension by setting ``use_native_hstore`` to ``False`` as follows::
-
- engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test",
- use_native_hstore=False)
-
-The ``HSTORE`` type is **still supported** when the ``psycopg2.extensions.register_hstore()``
-extension is not used. It merely means that the coercion between Python dictionaries and the HSTORE
-string format, on both the parameter side and the result side, will take
-place within SQLAlchemy's own marshalling logic, and not that of ``psycopg2`` which
-may be more performant.
-
-"""
-from __future__ import absolute_import
-
-import re
-import logging
-
-from ... import util, exc
-import decimal
-from ... import processors
-from ...engine import result as _result
-from ...sql import expression
-from ... import types as sqltypes
-from .base import PGDialect, PGCompiler, \
- PGIdentifierPreparer, PGExecutionContext, \
- ENUM, ARRAY, _DECIMAL_TYPES, _FLOAT_TYPES,\
- _INT_TYPES
-from .hstore import HSTORE
-from .json import JSON
-
-
-logger = logging.getLogger('sqlalchemy.dialects.postgresql')
-
-
-class _PGNumeric(sqltypes.Numeric):
- def bind_processor(self, dialect):
- return None
-
- def result_processor(self, dialect, coltype):
- if self.asdecimal:
- if coltype in _FLOAT_TYPES:
- return processors.to_decimal_processor_factory(
- decimal.Decimal,
- self._effective_decimal_return_scale)
- elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
- # pg8000 returns Decimal natively for 1700
- return None
- else:
- raise exc.InvalidRequestError(
- "Unknown PG numeric type: %d" % coltype)
- else:
- if coltype in _FLOAT_TYPES:
- # pg8000 returns float natively for 701
- return None
- elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
- return processors.to_float
- else:
- raise exc.InvalidRequestError(
- "Unknown PG numeric type: %d" % coltype)
-
-
-class _PGEnum(ENUM):
- def result_processor(self, dialect, coltype):
- if util.py2k and self.convert_unicode is True:
- # we can't easily use PG's extensions here because
- # the OID is on the fly, and we need to give it a python
- # function anyway - not really worth it.
- self.convert_unicode = "force_nocheck"
- return super(_PGEnum, self).result_processor(dialect, coltype)
-
-class _PGHStore(HSTORE):
- def bind_processor(self, dialect):
- if dialect._has_native_hstore:
- return None
- else:
- return super(_PGHStore, self).bind_processor(dialect)
-
- def result_processor(self, dialect, coltype):
- if dialect._has_native_hstore:
- return None
- else:
- return super(_PGHStore, self).result_processor(dialect, coltype)
-
-
-class _PGJSON(JSON):
-
- def result_processor(self, dialect, coltype):
- if dialect._has_native_json:
- return None
- else:
- return super(_PGJSON, self).result_processor(dialect, coltype)
-
-# When we're handed literal SQL, ensure it's a SELECT-query. Since
-# 8.3, combining cursors and "FOR UPDATE" has been fine.
-SERVER_SIDE_CURSOR_RE = re.compile(
- r'\s*SELECT',
- re.I | re.UNICODE)
-
-_server_side_id = util.counter()
-
-
-class PGExecutionContext_psycopg2(PGExecutionContext):
- def create_cursor(self):
- # TODO: coverage for server side cursors + select.for_update()
-
- if self.dialect.server_side_cursors:
- is_server_side = \
- self.execution_options.get('stream_results', True) and (
- (self.compiled and isinstance(self.compiled.statement, expression.Selectable) \
- or \
- (
- (not self.compiled or
- isinstance(self.compiled.statement, expression.TextClause))
- and self.statement and SERVER_SIDE_CURSOR_RE.match(self.statement))
- )
- )
- else:
- is_server_side = \
- self.execution_options.get('stream_results', False)
-
- self.__is_server_side = is_server_side
- if is_server_side:
- # use server-side cursors:
- # http://lists.initd.org/pipermail/psycopg/2007-January/005251.html
- ident = "c_%s_%s" % (hex(id(self))[2:], hex(_server_side_id())[2:])
- return self._dbapi_connection.cursor(ident)
- else:
- return self._dbapi_connection.cursor()
-
- def get_result_proxy(self):
- # TODO: ouch
- if logger.isEnabledFor(logging.INFO):
- self._log_notices(self.cursor)
-
- if self.__is_server_side:
- return _result.BufferedRowResultProxy(self)
- else:
- return _result.ResultProxy(self)
-
- def _log_notices(self, cursor):
- for notice in cursor.connection.notices:
- # NOTICE messages have a
- # newline character at the end
- logger.info(notice.rstrip())
-
- cursor.connection.notices[:] = []
-
-
-class PGCompiler_psycopg2(PGCompiler):
- def visit_mod_binary(self, binary, operator, **kw):
- return self.process(binary.left, **kw) + " %% " + \
- self.process(binary.right, **kw)
-
- def post_process_text(self, text):
- return text.replace('%', '%%')
-
-
-class PGIdentifierPreparer_psycopg2(PGIdentifierPreparer):
- def _escape_identifier(self, value):
- value = value.replace(self.escape_quote, self.escape_to_quote)
- return value.replace('%', '%%')
-
-
-class PGDialect_psycopg2(PGDialect):
- driver = 'psycopg2'
- if util.py2k:
- supports_unicode_statements = False
-
- default_paramstyle = 'pyformat'
- supports_sane_multi_rowcount = False # set to true based on psycopg2 version
- execution_ctx_cls = PGExecutionContext_psycopg2
- statement_compiler = PGCompiler_psycopg2
- preparer = PGIdentifierPreparer_psycopg2
- psycopg2_version = (0, 0)
-
- _has_native_hstore = False
- _has_native_json = False
-
- colspecs = util.update_copy(
- PGDialect.colspecs,
- {
- sqltypes.Numeric: _PGNumeric,
- ENUM: _PGEnum, # needs force_unicode
- sqltypes.Enum: _PGEnum, # needs force_unicode
- HSTORE: _PGHStore,
- JSON: _PGJSON
- }
- )
-
- def __init__(self, server_side_cursors=False, use_native_unicode=True,
- client_encoding=None,
- use_native_hstore=True,
- **kwargs):
- PGDialect.__init__(self, **kwargs)
- self.server_side_cursors = server_side_cursors
- self.use_native_unicode = use_native_unicode
- self.use_native_hstore = use_native_hstore
- self.supports_unicode_binds = use_native_unicode
- self.client_encoding = client_encoding
- if self.dbapi and hasattr(self.dbapi, '__version__'):
- m = re.match(r'(\d+)\.(\d+)(?:\.(\d+))?',
- self.dbapi.__version__)
- if m:
- self.psycopg2_version = tuple(
- int(x)
- for x in m.group(1, 2, 3)
- if x is not None)
-
- def initialize(self, connection):
- super(PGDialect_psycopg2, self).initialize(connection)
- self._has_native_hstore = self.use_native_hstore and \
- self._hstore_oids(connection.connection) \
- is not None
- self._has_native_json = self.psycopg2_version >= (2, 5)
-
- # http://initd.org/psycopg/docs/news.html#what-s-new-in-psycopg-2-0-9
- self.supports_sane_multi_rowcount = self.psycopg2_version >= (2, 0, 9)
-
- @classmethod
- def dbapi(cls):
- import psycopg2
- return psycopg2
-
- @util.memoized_property
- def _isolation_lookup(self):
- from psycopg2 import extensions
- return {
- 'AUTOCOMMIT': extensions.ISOLATION_LEVEL_AUTOCOMMIT,
- 'READ COMMITTED': extensions.ISOLATION_LEVEL_READ_COMMITTED,
- 'READ UNCOMMITTED': extensions.ISOLATION_LEVEL_READ_UNCOMMITTED,
- 'REPEATABLE READ': extensions.ISOLATION_LEVEL_REPEATABLE_READ,
- 'SERIALIZABLE': extensions.ISOLATION_LEVEL_SERIALIZABLE
- }
-
- def set_isolation_level(self, connection, level):
- try:
- level = self._isolation_lookup[level.replace('_', ' ')]
- except KeyError:
- raise exc.ArgumentError(
- "Invalid value '%s' for isolation_level. "
- "Valid isolation levels for %s are %s" %
- (level, self.name, ", ".join(self._isolation_lookup))
- )
-
- connection.set_isolation_level(level)
-
- def on_connect(self):
- from psycopg2 import extras, extensions
-
- fns = []
- if self.client_encoding is not None:
- def on_connect(conn):
- conn.set_client_encoding(self.client_encoding)
- fns.append(on_connect)
-
- if self.isolation_level is not None:
- def on_connect(conn):
- self.set_isolation_level(conn, self.isolation_level)
- fns.append(on_connect)
-
- if self.dbapi and self.use_native_unicode:
- def on_connect(conn):
- extensions.register_type(extensions.UNICODE, conn)
- extensions.register_type(extensions.UNICODEARRAY, conn)
- fns.append(on_connect)
-
- if self.dbapi and self.use_native_hstore:
- def on_connect(conn):
- hstore_oids = self._hstore_oids(conn)
- if hstore_oids is not None:
- oid, array_oid = hstore_oids
- if util.py2k:
- extras.register_hstore(conn, oid=oid,
- array_oid=array_oid,
- unicode=True)
- else:
- extras.register_hstore(conn, oid=oid,
- array_oid=array_oid)
- fns.append(on_connect)
-
- if self.dbapi and self._json_deserializer:
- def on_connect(conn):
- extras.register_default_json(conn, loads=self._json_deserializer)
- fns.append(on_connect)
-
- if fns:
- def on_connect(conn):
- for fn in fns:
- fn(conn)
- return on_connect
- else:
- return None
-
- @util.memoized_instancemethod
- def _hstore_oids(self, conn):
- if self.psycopg2_version >= (2, 4):
- from psycopg2 import extras
- oids = extras.HstoreAdapter.get_oids(conn)
- if oids is not None and oids[0]:
- return oids[0:2]
- return None
-
- def create_connect_args(self, url):
- opts = url.translate_connect_args(username='user')
- if 'port' in opts:
- opts['port'] = int(opts['port'])
- opts.update(url.query)
- return ([], opts)
-
- def is_disconnect(self, e, connection, cursor):
- if isinstance(e, self.dbapi.Error):
- str_e = str(e).partition("\n")[0]
- for msg in [
- # these error messages from libpq: interfaces/libpq/fe-misc.c
- # and interfaces/libpq/fe-secure.c.
- # TODO: these are sent through gettext in libpq and we can't
- # check within other locales - consider using connection.closed
- 'terminating connection',
- 'closed the connection',
- 'connection not open',
- 'could not receive data from server',
- 'could not send data to server',
- # psycopg2 client errors, psycopg2/conenction.h, psycopg2/cursor.h
- 'connection already closed',
- 'cursor already closed',
- # not sure where this path is originally from, it may
- # be obsolete. It really says "losed", not "closed".
- 'losed the connection unexpectedly'
- ]:
- idx = str_e.find(msg)
- if idx >= 0 and '"' not in str_e[:idx]:
- return True
- return False
-
-dialect = PGDialect_psycopg2
diff --git a/lib/sqlalchemy/dialects/postgresql/pypostgresql.py b/lib/sqlalchemy/dialects/postgresql/pypostgresql.py
deleted file mode 100644
index f030d2c1..00000000
--- a/lib/sqlalchemy/dialects/postgresql/pypostgresql.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# postgresql/pypostgresql.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""
-.. dialect:: postgresql+pypostgresql
- :name: py-postgresql
- :dbapi: pypostgresql
- :connectstring: postgresql+pypostgresql://user:password@host:port/dbname[?key=value&key=value...]
- :url: http://python.projects.pgfoundry.org/
-
-
-"""
-from ... import util
-from ... import types as sqltypes
-from .base import PGDialect, PGExecutionContext
-from ... import processors
-
-
-class PGNumeric(sqltypes.Numeric):
- def bind_processor(self, dialect):
- return processors.to_str
-
- def result_processor(self, dialect, coltype):
- if self.asdecimal:
- return None
- else:
- return processors.to_float
-
-
-class PGExecutionContext_pypostgresql(PGExecutionContext):
- pass
-
-
-class PGDialect_pypostgresql(PGDialect):
- driver = 'pypostgresql'
-
- supports_unicode_statements = True
- supports_unicode_binds = True
- description_encoding = None
- default_paramstyle = 'pyformat'
-
- # requires trunk version to support sane rowcounts
- # TODO: use dbapi version information to set this flag appropriately
- supports_sane_rowcount = True
- supports_sane_multi_rowcount = False
-
- execution_ctx_cls = PGExecutionContext_pypostgresql
- colspecs = util.update_copy(
- PGDialect.colspecs,
- {
- sqltypes.Numeric: PGNumeric,
-
- # prevents PGNumeric from being used
- sqltypes.Float: sqltypes.Float,
- }
- )
-
- @classmethod
- def dbapi(cls):
- from postgresql.driver import dbapi20
- return dbapi20
-
- def create_connect_args(self, url):
- opts = url.translate_connect_args(username='user')
- if 'port' in opts:
- opts['port'] = int(opts['port'])
- else:
- opts['port'] = 5432
- opts.update(url.query)
- return ([], opts)
-
- def is_disconnect(self, e, connection, cursor):
- return "connection is closed" in str(e)
-
-dialect = PGDialect_pypostgresql
diff --git a/lib/sqlalchemy/dialects/postgresql/ranges.py b/lib/sqlalchemy/dialects/postgresql/ranges.py
deleted file mode 100644
index 57b0c4c3..00000000
--- a/lib/sqlalchemy/dialects/postgresql/ranges.py
+++ /dev/null
@@ -1,160 +0,0 @@
-# Copyright (C) 2013-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-from .base import ischema_names
-from ... import types as sqltypes
-
-__all__ = ('INT4RANGE', 'INT8RANGE', 'NUMRANGE')
-
-class RangeOperators(object):
- """
- This mixin provides functionality for the Range Operators
- listed in Table 9-44 of the `postgres documentation`__ for Range
- Functions and Operators. It is used by all the range types
- provided in the ``postgres`` dialect and can likely be used for
- any range types you create yourself.
-
- __ http://www.postgresql.org/docs/devel/static/functions-range.html
-
- No extra support is provided for the Range Functions listed in
- Table 9-45 of the postgres documentation. For these, the normal
- :func:`~sqlalchemy.sql.expression.func` object should be used.
-
- .. versionadded:: 0.8.2 Support for Postgresql RANGE operations.
-
- """
-
- class comparator_factory(sqltypes.Concatenable.Comparator):
- """Define comparison operations for range types."""
-
- def __ne__(self, other):
- "Boolean expression. Returns true if two ranges are not equal"
- return self.expr.op('<>')(other)
-
- def contains(self, other, **kw):
- """Boolean expression. Returns true if the right hand operand,
- which can be an element or a range, is contained within the
- column.
- """
- return self.expr.op('@>')(other)
-
- def contained_by(self, other):
- """Boolean expression. Returns true if the column is contained
- within the right hand operand.
- """
- return self.expr.op('<@')(other)
-
- def overlaps(self, other):
- """Boolean expression. Returns true if the column overlaps
- (has points in common with) the right hand operand.
- """
- return self.expr.op('&&')(other)
-
- def strictly_left_of(self, other):
- """Boolean expression. Returns true if the column is strictly
- left of the right hand operand.
- """
- return self.expr.op('<<')(other)
-
- __lshift__ = strictly_left_of
-
- def strictly_right_of(self, other):
- """Boolean expression. Returns true if the column is strictly
- right of the right hand operand.
- """
- return self.expr.op('>>')(other)
-
- __rshift__ = strictly_right_of
-
- def not_extend_right_of(self, other):
- """Boolean expression. Returns true if the range in the column
- does not extend right of the range in the operand.
- """
- return self.expr.op('&<')(other)
-
- def not_extend_left_of(self, other):
- """Boolean expression. Returns true if the range in the column
- does not extend left of the range in the operand.
- """
- return self.expr.op('&>')(other)
-
- def adjacent_to(self, other):
- """Boolean expression. Returns true if the range in the column
- is adjacent to the range in the operand.
- """
- return self.expr.op('-|-')(other)
-
- def __add__(self, other):
- """Range expression. Returns the union of the two ranges.
- Will raise an exception if the resulting range is not
- contigous.
- """
- return self.expr.op('+')(other)
-
-class INT4RANGE(RangeOperators, sqltypes.TypeEngine):
- """Represent the Postgresql INT4RANGE type.
-
- .. versionadded:: 0.8.2
-
- """
-
- __visit_name__ = 'INT4RANGE'
-
-ischema_names['int4range'] = INT4RANGE
-
-class INT8RANGE(RangeOperators, sqltypes.TypeEngine):
- """Represent the Postgresql INT8RANGE type.
-
- .. versionadded:: 0.8.2
-
- """
-
- __visit_name__ = 'INT8RANGE'
-
-ischema_names['int8range'] = INT8RANGE
-
-class NUMRANGE(RangeOperators, sqltypes.TypeEngine):
- """Represent the Postgresql NUMRANGE type.
-
- .. versionadded:: 0.8.2
-
- """
-
- __visit_name__ = 'NUMRANGE'
-
-ischema_names['numrange'] = NUMRANGE
-
-class DATERANGE(RangeOperators, sqltypes.TypeEngine):
- """Represent the Postgresql DATERANGE type.
-
- .. versionadded:: 0.8.2
-
- """
-
- __visit_name__ = 'DATERANGE'
-
-ischema_names['daterange'] = DATERANGE
-
-class TSRANGE(RangeOperators, sqltypes.TypeEngine):
- """Represent the Postgresql TSRANGE type.
-
- .. versionadded:: 0.8.2
-
- """
-
- __visit_name__ = 'TSRANGE'
-
-ischema_names['tsrange'] = TSRANGE
-
-class TSTZRANGE(RangeOperators, sqltypes.TypeEngine):
- """Represent the Postgresql TSTZRANGE type.
-
- .. versionadded:: 0.8.2
-
- """
-
- __visit_name__ = 'TSTZRANGE'
-
-ischema_names['tstzrange'] = TSTZRANGE
diff --git a/lib/sqlalchemy/dialects/postgresql/zxjdbc.py b/lib/sqlalchemy/dialects/postgresql/zxjdbc.py
deleted file mode 100644
index 67e7d53e..00000000
--- a/lib/sqlalchemy/dialects/postgresql/zxjdbc.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# postgresql/zxjdbc.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""
-.. dialect:: postgresql+zxjdbc
- :name: zxJDBC for Jython
- :dbapi: zxjdbc
- :connectstring: postgresql+zxjdbc://scott:tiger@localhost/db
- :driverurl: http://jdbc.postgresql.org/
-
-
-"""
-from ...connectors.zxJDBC import ZxJDBCConnector
-from .base import PGDialect, PGExecutionContext
-
-
-class PGExecutionContext_zxjdbc(PGExecutionContext):
-
- def create_cursor(self):
- cursor = self._dbapi_connection.cursor()
- cursor.datahandler = self.dialect.DataHandler(cursor.datahandler)
- return cursor
-
-
-class PGDialect_zxjdbc(ZxJDBCConnector, PGDialect):
- jdbc_db_name = 'postgresql'
- jdbc_driver_name = 'org.postgresql.Driver'
-
- execution_ctx_cls = PGExecutionContext_zxjdbc
-
- supports_native_decimal = True
-
- def __init__(self, *args, **kwargs):
- super(PGDialect_zxjdbc, self).__init__(*args, **kwargs)
- from com.ziclix.python.sql.handler import PostgresqlDataHandler
- self.DataHandler = PostgresqlDataHandler
-
- def _get_server_version_info(self, connection):
- parts = connection.connection.dbversion.split('.')
- return tuple(int(x) for x in parts)
-
-dialect = PGDialect_zxjdbc
diff --git a/lib/sqlalchemy/dialects/sqlite/__init__.py b/lib/sqlalchemy/dialects/sqlite/__init__.py
deleted file mode 100644
index 80846c9e..00000000
--- a/lib/sqlalchemy/dialects/sqlite/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# sqlite/__init__.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-from sqlalchemy.dialects.sqlite import base, pysqlite
-
-# default dialect
-base.dialect = pysqlite.dialect
-
-from sqlalchemy.dialects.sqlite.base import (
- BLOB, BOOLEAN, CHAR, DATE, DATETIME, DECIMAL, FLOAT, INTEGER, REAL,
- NUMERIC, SMALLINT, TEXT, TIME, TIMESTAMP, VARCHAR, dialect,
-)
-
-__all__ = ('BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME', 'DECIMAL',
- 'FLOAT', 'INTEGER', 'NUMERIC', 'SMALLINT', 'TEXT', 'TIME',
- 'TIMESTAMP', 'VARCHAR', 'REAL', 'dialect')
diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py
deleted file mode 100644
index 90df9c19..00000000
--- a/lib/sqlalchemy/dialects/sqlite/base.py
+++ /dev/null
@@ -1,1049 +0,0 @@
-# sqlite/base.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""
-.. dialect:: sqlite
- :name: SQLite
-
-
-Date and Time Types
--------------------
-
-SQLite does not have built-in DATE, TIME, or DATETIME types, and pysqlite does
-not provide out of the box functionality for translating values between Python
-`datetime` objects and a SQLite-supported format. SQLAlchemy's own
-:class:`~sqlalchemy.types.DateTime` and related types provide date formatting
-and parsing functionality when SQlite is used. The implementation classes are
-:class:`~.sqlite.DATETIME`, :class:`~.sqlite.DATE` and :class:`~.sqlite.TIME`.
-These types represent dates and times as ISO formatted strings, which also
-nicely support ordering. There's no reliance on typical "libc" internals for
-these functions so historical dates are fully supported.
-
-Auto Incrementing Behavior
---------------------------
-
-Background on SQLite's autoincrement is at: http://sqlite.org/autoinc.html
-
-Two things to note:
-
-* The AUTOINCREMENT keyword is **not** required for SQLite tables to
- generate primary key values automatically. AUTOINCREMENT only means that the
- algorithm used to generate ROWID values should be slightly different.
-* SQLite does **not** generate primary key (i.e. ROWID) values, even for
- one column, if the table has a composite (i.e. multi-column) primary key.
- This is regardless of the AUTOINCREMENT keyword being present or not.
-
-To specifically render the AUTOINCREMENT keyword on the primary key column when
-rendering DDL, add the flag ``sqlite_autoincrement=True`` to the Table
-construct::
-
- Table('sometable', metadata,
- Column('id', Integer, primary_key=True),
- sqlite_autoincrement=True)
-
-Transaction Isolation Level
----------------------------
-
-:func:`.create_engine` accepts an ``isolation_level`` parameter which results
-in the command ``PRAGMA read_uncommitted `` being invoked for every new
-connection. Valid values for this parameter are ``SERIALIZABLE`` and ``READ
-UNCOMMITTED`` corresponding to a value of 0 and 1, respectively. See the
-section :ref:`pysqlite_serializable` for an important workaround when using
-serializable isolation with Pysqlite.
-
-Database Locking Behavior / Concurrency
----------------------------------------
-
-Note that SQLite is not designed for a high level of concurrency. The database
-itself, being a file, is locked completely during write operations and within
-transactions, meaning exactly one connection has exclusive access to the
-database during this period - all other connections will be blocked during this
-time.
-
-The Python DBAPI specification also calls for a connection model that is always
-in a transaction; there is no BEGIN method, only commit and rollback. This
-implies that a SQLite DBAPI driver would technically allow only serialized
-access to a particular database file at all times. The pysqlite driver attempts
-to ameliorate this by deferring the actual BEGIN statement until the first DML
-(INSERT, UPDATE, or DELETE) is received within a transaction. While this breaks
-serializable isolation, it at least delays the exclusive locking inherent in
-SQLite's design.
-
-SQLAlchemy's default mode of usage with the ORM is known as "autocommit=False",
-which means the moment the :class:`.Session` begins to be used, a transaction
-is begun. As the :class:`.Session` is used, the autoflush feature, also on by
-default, will flush out pending changes to the database before each query. The
-effect of this is that a :class:`.Session` used in its default mode will often
-emit DML early on, long before the transaction is actually committed. This
-again will have the effect of serializing access to the SQLite database. If
-highly concurrent reads are desired against the SQLite database, it is advised
-that the autoflush feature be disabled, and potentially even that autocommit be
-re-enabled, which has the effect of each SQL statement and flush committing
-changes immediately.
-
-For more information on SQLite's lack of concurrency by design, please see
-`Situations Where Another RDBMS May Work Better - High Concurrency
-`_ near the bottom of the page.
-
-.. _sqlite_foreign_keys:
-
-Foreign Key Support
--------------------
-
-SQLite supports FOREIGN KEY syntax when emitting CREATE statements for tables,
-however by default these constraints have no effect on the operation of the
-table.
-
-Constraint checking on SQLite has three prerequisites:
-
-* At least version 3.6.19 of SQLite must be in use
-* The SQLite libary must be compiled *without* the SQLITE_OMIT_FOREIGN_KEY
- or SQLITE_OMIT_TRIGGER symbols enabled.
-* The ``PRAGMA foreign_keys = ON`` statement must be emitted on all connections
- before use.
-
-SQLAlchemy allows for the ``PRAGMA`` statement to be emitted automatically for
-new connections through the usage of events::
-
- from sqlalchemy.engine import Engine
- from sqlalchemy import event
-
- @event.listens_for(Engine, "connect")
- def set_sqlite_pragma(dbapi_connection, connection_record):
- cursor = dbapi_connection.cursor()
- cursor.execute("PRAGMA foreign_keys=ON")
- cursor.close()
-
-.. seealso::
-
- `SQLite Foreign Key Support `_ - on
- the SQLite web site.
-
- :ref:`event_toplevel` - SQLAlchemy event API.
-
-.. _sqlite_type_reflection:
-
-Type Reflection
----------------
-
-SQLite types are unlike those of most other database backends, in that
-the string name of the type usually does not correspond to a "type" in a
-one-to-one fashion. Instead, SQLite links per-column typing behavior
-to one of five so-called "type affinities" based on a string matching
-pattern for the type.
-
-SQLAlchemy's reflection process, when inspecting types, uses a simple
-lookup table to link the keywords returned to provided SQLAlchemy types.
-This lookup table is present within the SQLite dialect as it is for all
-other dialects. However, the SQLite dialect has a different "fallback"
-routine for when a particular type name is not located in the lookup map;
-it instead implements the SQLite "type affinity" scheme located at
-http://www.sqlite.org/datatype3.html section 2.1.
-
-The provided typemap will make direct associations from an exact string
-name match for the following types:
-
-:class:`~.types.BIGINT`, :class:`~.types.BLOB`,
-:class:`~.types.BOOLEAN`, :class:`~.types.BOOLEAN`,
-:class:`~.types.CHAR`, :class:`~.types.DATE`,
-:class:`~.types.DATETIME`, :class:`~.types.FLOAT`,
-:class:`~.types.DECIMAL`, :class:`~.types.FLOAT`,
-:class:`~.types.INTEGER`, :class:`~.types.INTEGER`,
-:class:`~.types.NUMERIC`, :class:`~.types.REAL`,
-:class:`~.types.SMALLINT`, :class:`~.types.TEXT`,
-:class:`~.types.TIME`, :class:`~.types.TIMESTAMP`,
-:class:`~.types.VARCHAR`, :class:`~.types.NVARCHAR`,
-:class:`~.types.NCHAR`
-
-When a type name does not match one of the above types, the "type affinity"
-lookup is used instead:
-
-* :class:`~.types.INTEGER` is returned if the type name includes the
- string ``INT``
-* :class:`~.types.TEXT` is returned if the type name includes the
- string ``CHAR``, ``CLOB`` or ``TEXT``
-* :class:`~.types.NullType` is returned if the type name includes the
- string ``BLOB``
-* :class:`~.types.REAL` is returned if the type name includes the string
- ``REAL``, ``FLOA`` or ``DOUB``.
-* Otherwise, the :class:`~.types.NUMERIC` type is used.
-
-.. versionadded:: 0.9.3 Support for SQLite type affinity rules when reflecting
- columns.
-
-"""
-
-import datetime
-import re
-
-from ... import processors
-from ... import sql, exc
-from ... import types as sqltypes, schema as sa_schema
-from ... import util
-from ...engine import default, reflection
-from ...sql import compiler
-
-from ...types import (BLOB, BOOLEAN, CHAR, DATE, DECIMAL, FLOAT, INTEGER, REAL,
- NUMERIC, SMALLINT, TEXT, TIMESTAMP, VARCHAR)
-
-
-class _DateTimeMixin(object):
- _reg = None
- _storage_format = None
-
- def __init__(self, storage_format=None, regexp=None, **kw):
- super(_DateTimeMixin, self).__init__(**kw)
- if regexp is not None:
- self._reg = re.compile(regexp)
- if storage_format is not None:
- self._storage_format = storage_format
-
- def adapt(self, cls, **kw):
- if issubclass(cls, _DateTimeMixin):
- if self._storage_format:
- kw["storage_format"] = self._storage_format
- if self._reg:
- kw["regexp"] = self._reg
- return super(_DateTimeMixin, self).adapt(cls, **kw)
-
- def literal_processor(self, dialect):
- bp = self.bind_processor(dialect)
- def process(value):
- return "'%s'" % bp(value)
- return process
-
-
-class DATETIME(_DateTimeMixin, sqltypes.DateTime):
- """Represent a Python datetime object in SQLite using a string.
-
- The default string storage format is::
-
- "%(year)04d-%(month)02d-%(day)02d %(hour)02d:%(min)02d:%(second)02d.%(microsecond)06d"
-
- e.g.::
-
- 2011-03-15 12:05:57.10558
-
- The storage format can be customized to some degree using the
- ``storage_format`` and ``regexp`` parameters, such as::
-
- import re
- from sqlalchemy.dialects.sqlite import DATETIME
-
- dt = DATETIME(
- storage_format="%(year)04d/%(month)02d/%(day)02d %(hour)02d:%(min)02d:%(second)02d",
- regexp=r"(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)"
- )
-
- :param storage_format: format string which will be applied to the dict with
- keys year, month, day, hour, minute, second, and microsecond.
-
- :param regexp: regular expression which will be applied to incoming result
- rows. If the regexp contains named groups, the resulting match dict is
- applied to the Python datetime() constructor as keyword arguments.
- Otherwise, if positional groups are used, the the datetime() constructor
- is called with positional arguments via
- ``*map(int, match_obj.groups(0))``.
- """
-
- _storage_format = (
- "%(year)04d-%(month)02d-%(day)02d "
- "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
- )
-
- def __init__(self, *args, **kwargs):
- truncate_microseconds = kwargs.pop('truncate_microseconds', False)
- super(DATETIME, self).__init__(*args, **kwargs)
- if truncate_microseconds:
- assert 'storage_format' not in kwargs, "You can specify only "\
- "one of truncate_microseconds or storage_format."
- assert 'regexp' not in kwargs, "You can specify only one of "\
- "truncate_microseconds or regexp."
- self._storage_format = (
- "%(year)04d-%(month)02d-%(day)02d "
- "%(hour)02d:%(minute)02d:%(second)02d"
- )
-
- def bind_processor(self, dialect):
- datetime_datetime = datetime.datetime
- datetime_date = datetime.date
- format = self._storage_format
-
- def process(value):
- if value is None:
- return None
- elif isinstance(value, datetime_datetime):
- return format % {
- 'year': value.year,
- 'month': value.month,
- 'day': value.day,
- 'hour': value.hour,
- 'minute': value.minute,
- 'second': value.second,
- 'microsecond': value.microsecond,
- }
- elif isinstance(value, datetime_date):
- return format % {
- 'year': value.year,
- 'month': value.month,
- 'day': value.day,
- 'hour': 0,
- 'minute': 0,
- 'second': 0,
- 'microsecond': 0,
- }
- else:
- raise TypeError("SQLite DateTime type only accepts Python "
- "datetime and date objects as input.")
- return process
-
- def result_processor(self, dialect, coltype):
- if self._reg:
- return processors.str_to_datetime_processor_factory(
- self._reg, datetime.datetime)
- else:
- return processors.str_to_datetime
-
-
-class DATE(_DateTimeMixin, sqltypes.Date):
- """Represent a Python date object in SQLite using a string.
-
- The default string storage format is::
-
- "%(year)04d-%(month)02d-%(day)02d"
-
- e.g.::
-
- 2011-03-15
-
- The storage format can be customized to some degree using the
- ``storage_format`` and ``regexp`` parameters, such as::
-
- import re
- from sqlalchemy.dialects.sqlite import DATE
-
- d = DATE(
- storage_format="%(month)02d/%(day)02d/%(year)04d",
- regexp=re.compile("(?P\d+)/(?P\d+)/(?P\d+)")
- )
-
- :param storage_format: format string which will be applied to the
- dict with keys year, month, and day.
-
- :param regexp: regular expression which will be applied to
- incoming result rows. If the regexp contains named groups, the
- resulting match dict is applied to the Python date() constructor
- as keyword arguments. Otherwise, if positional groups are used, the
- the date() constructor is called with positional arguments via
- ``*map(int, match_obj.groups(0))``.
- """
-
- _storage_format = "%(year)04d-%(month)02d-%(day)02d"
-
- def bind_processor(self, dialect):
- datetime_date = datetime.date
- format = self._storage_format
-
- def process(value):
- if value is None:
- return None
- elif isinstance(value, datetime_date):
- return format % {
- 'year': value.year,
- 'month': value.month,
- 'day': value.day,
- }
- else:
- raise TypeError("SQLite Date type only accepts Python "
- "date objects as input.")
- return process
-
- def result_processor(self, dialect, coltype):
- if self._reg:
- return processors.str_to_datetime_processor_factory(
- self._reg, datetime.date)
- else:
- return processors.str_to_date
-
-
-class TIME(_DateTimeMixin, sqltypes.Time):
- """Represent a Python time object in SQLite using a string.
-
- The default string storage format is::
-
- "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
-
- e.g.::
-
- 12:05:57.10558
-
- The storage format can be customized to some degree using the
- ``storage_format`` and ``regexp`` parameters, such as::
-
- import re
- from sqlalchemy.dialects.sqlite import TIME
-
- t = TIME(
- storage_format="%(hour)02d-%(minute)02d-%(second)02d-%(microsecond)06d",
- regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?")
- )
-
- :param storage_format: format string which will be applied to the dict with
- keys hour, minute, second, and microsecond.
-
- :param regexp: regular expression which will be applied to incoming result
- rows. If the regexp contains named groups, the resulting match dict is
- applied to the Python time() constructor as keyword arguments. Otherwise,
- if positional groups are used, the the time() constructor is called with
- positional arguments via ``*map(int, match_obj.groups(0))``.
- """
-
- _storage_format = "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
-
- def __init__(self, *args, **kwargs):
- truncate_microseconds = kwargs.pop('truncate_microseconds', False)
- super(TIME, self).__init__(*args, **kwargs)
- if truncate_microseconds:
- assert 'storage_format' not in kwargs, "You can specify only "\
- "one of truncate_microseconds or storage_format."
- assert 'regexp' not in kwargs, "You can specify only one of "\
- "truncate_microseconds or regexp."
- self._storage_format = "%(hour)02d:%(minute)02d:%(second)02d"
-
- def bind_processor(self, dialect):
- datetime_time = datetime.time
- format = self._storage_format
-
- def process(value):
- if value is None:
- return None
- elif isinstance(value, datetime_time):
- return format % {
- 'hour': value.hour,
- 'minute': value.minute,
- 'second': value.second,
- 'microsecond': value.microsecond,
- }
- else:
- raise TypeError("SQLite Time type only accepts Python "
- "time objects as input.")
- return process
-
- def result_processor(self, dialect, coltype):
- if self._reg:
- return processors.str_to_datetime_processor_factory(
- self._reg, datetime.time)
- else:
- return processors.str_to_time
-
-colspecs = {
- sqltypes.Date: DATE,
- sqltypes.DateTime: DATETIME,
- sqltypes.Time: TIME,
-}
-
-ischema_names = {
- 'BIGINT': sqltypes.BIGINT,
- 'BLOB': sqltypes.BLOB,
- 'BOOL': sqltypes.BOOLEAN,
- 'BOOLEAN': sqltypes.BOOLEAN,
- 'CHAR': sqltypes.CHAR,
- 'DATE': sqltypes.DATE,
- 'DATETIME': sqltypes.DATETIME,
- 'DOUBLE': sqltypes.FLOAT,
- 'DECIMAL': sqltypes.DECIMAL,
- 'FLOAT': sqltypes.FLOAT,
- 'INT': sqltypes.INTEGER,
- 'INTEGER': sqltypes.INTEGER,
- 'NUMERIC': sqltypes.NUMERIC,
- 'REAL': sqltypes.REAL,
- 'SMALLINT': sqltypes.SMALLINT,
- 'TEXT': sqltypes.TEXT,
- 'TIME': sqltypes.TIME,
- 'TIMESTAMP': sqltypes.TIMESTAMP,
- 'VARCHAR': sqltypes.VARCHAR,
- 'NVARCHAR': sqltypes.NVARCHAR,
- 'NCHAR': sqltypes.NCHAR,
-}
-
-
-class SQLiteCompiler(compiler.SQLCompiler):
- extract_map = util.update_copy(
- compiler.SQLCompiler.extract_map,
- {
- 'month': '%m',
- 'day': '%d',
- 'year': '%Y',
- 'second': '%S',
- 'hour': '%H',
- 'doy': '%j',
- 'minute': '%M',
- 'epoch': '%s',
- 'dow': '%w',
- 'week': '%W',
- })
-
- def visit_now_func(self, fn, **kw):
- return "CURRENT_TIMESTAMP"
-
- def visit_localtimestamp_func(self, func, **kw):
- return 'DATETIME(CURRENT_TIMESTAMP, "localtime")'
-
- def visit_true(self, expr, **kw):
- return '1'
-
- def visit_false(self, expr, **kw):
- return '0'
-
- def visit_char_length_func(self, fn, **kw):
- return "length%s" % self.function_argspec(fn)
-
- def visit_cast(self, cast, **kwargs):
- if self.dialect.supports_cast:
- return super(SQLiteCompiler, self).visit_cast(cast, **kwargs)
- else:
- return self.process(cast.clause, **kwargs)
-
- def visit_extract(self, extract, **kw):
- try:
- return "CAST(STRFTIME('%s', %s) AS INTEGER)" % (
- self.extract_map[extract.field],
- self.process(extract.expr, **kw)
- )
- except KeyError:
- raise exc.CompileError(
- "%s is not a valid extract argument." % extract.field)
-
- def limit_clause(self, select):
- text = ""
- if select._limit is not None:
- text += "\n LIMIT " + self.process(sql.literal(select._limit))
- if select._offset is not None:
- if select._limit is None:
- text += "\n LIMIT " + self.process(sql.literal(-1))
- text += " OFFSET " + self.process(sql.literal(select._offset))
- else:
- text += " OFFSET " + self.process(sql.literal(0))
- return text
-
- def for_update_clause(self, select):
- # sqlite has no "FOR UPDATE" AFAICT
- return ''
-
-
-class SQLiteDDLCompiler(compiler.DDLCompiler):
-
- def get_column_specification(self, column, **kwargs):
- coltype = self.dialect.type_compiler.process(column.type)
- colspec = self.preparer.format_column(column) + " " + coltype
- default = self.get_column_default_string(column)
- if default is not None:
- colspec += " DEFAULT " + default
-
- if not column.nullable:
- colspec += " NOT NULL"
-
- if (column.primary_key and
- column.table.dialect_options['sqlite']['autoincrement'] and
- len(column.table.primary_key.columns) == 1 and
- issubclass(column.type._type_affinity, sqltypes.Integer) and
- not column.foreign_keys):
- colspec += " PRIMARY KEY AUTOINCREMENT"
-
- return colspec
-
- def visit_primary_key_constraint(self, constraint):
- # for columns with sqlite_autoincrement=True,
- # the PRIMARY KEY constraint can only be inline
- # with the column itself.
- if len(constraint.columns) == 1:
- c = list(constraint)[0]
- if (c.primary_key and
- c.table.dialect_options['sqlite']['autoincrement'] and
- issubclass(c.type._type_affinity, sqltypes.Integer) and
- not c.foreign_keys):
- return None
-
- return super(SQLiteDDLCompiler, self).visit_primary_key_constraint(
- constraint)
-
- def visit_foreign_key_constraint(self, constraint):
-
- local_table = list(constraint._elements.values())[0].parent.table
- remote_table = list(constraint._elements.values())[0].column.table
-
- if local_table.schema != remote_table.schema:
- return None
- else:
- return super(SQLiteDDLCompiler, self).visit_foreign_key_constraint(
- constraint)
-
- def define_constraint_remote_table(self, constraint, table, preparer):
- """Format the remote table clause of a CREATE CONSTRAINT clause."""
-
- return preparer.format_table(table, use_schema=False)
-
- def visit_create_index(self, create):
- return super(SQLiteDDLCompiler, self).visit_create_index(
- create, include_table_schema=False)
-
-
-class SQLiteTypeCompiler(compiler.GenericTypeCompiler):
- def visit_large_binary(self, type_):
- return self.visit_BLOB(type_)
-
-
-class SQLiteIdentifierPreparer(compiler.IdentifierPreparer):
- reserved_words = set([
- 'add', 'after', 'all', 'alter', 'analyze', 'and', 'as', 'asc',
- 'attach', 'autoincrement', 'before', 'begin', 'between', 'by',
- 'cascade', 'case', 'cast', 'check', 'collate', 'column', 'commit',
- 'conflict', 'constraint', 'create', 'cross', 'current_date',
- 'current_time', 'current_timestamp', 'database', 'default',
- 'deferrable', 'deferred', 'delete', 'desc', 'detach', 'distinct',
- 'drop', 'each', 'else', 'end', 'escape', 'except', 'exclusive',
- 'explain', 'false', 'fail', 'for', 'foreign', 'from', 'full', 'glob',
- 'group', 'having', 'if', 'ignore', 'immediate', 'in', 'index',
- 'indexed', 'initially', 'inner', 'insert', 'instead', 'intersect',
- 'into', 'is', 'isnull', 'join', 'key', 'left', 'like', 'limit',
- 'match', 'natural', 'not', 'notnull', 'null', 'of', 'offset', 'on',
- 'or', 'order', 'outer', 'plan', 'pragma', 'primary', 'query',
- 'raise', 'references', 'reindex', 'rename', 'replace', 'restrict',
- 'right', 'rollback', 'row', 'select', 'set', 'table', 'temp',
- 'temporary', 'then', 'to', 'transaction', 'trigger', 'true', 'union',
- 'unique', 'update', 'using', 'vacuum', 'values', 'view', 'virtual',
- 'when', 'where',
- ])
-
- def format_index(self, index, use_schema=True, name=None):
- """Prepare a quoted index and schema name."""
-
- if name is None:
- name = index.name
- result = self.quote(name, index.quote)
- if (not self.omit_schema and
- use_schema and
- getattr(index.table, "schema", None)):
- result = self.quote_schema(index.table.schema,
- index.table.quote_schema) + "." + result
- return result
-
-
-class SQLiteExecutionContext(default.DefaultExecutionContext):
- @util.memoized_property
- def _preserve_raw_colnames(self):
- return self.execution_options.get("sqlite_raw_colnames", False)
-
- def _translate_colname(self, colname):
- # adjust for dotted column names. SQLite in the case of UNION may store
- # col names as "tablename.colname" in cursor.description
- if not self._preserve_raw_colnames and "." in colname:
- return colname.split(".")[1], colname
- else:
- return colname, None
-
-
-class SQLiteDialect(default.DefaultDialect):
- name = 'sqlite'
- supports_alter = False
- supports_unicode_statements = True
- supports_unicode_binds = True
- supports_default_values = True
- supports_empty_insert = False
- supports_cast = True
- supports_multivalues_insert = True
- supports_right_nested_joins = False
-
- default_paramstyle = 'qmark'
- execution_ctx_cls = SQLiteExecutionContext
- statement_compiler = SQLiteCompiler
- ddl_compiler = SQLiteDDLCompiler
- type_compiler = SQLiteTypeCompiler
- preparer = SQLiteIdentifierPreparer
- ischema_names = ischema_names
- colspecs = colspecs
- isolation_level = None
-
- supports_cast = True
- supports_default_values = True
-
- construct_arguments = [
- (sa_schema.Table, {
- "autoincrement": False
- })
- ]
-
- _broken_fk_pragma_quotes = False
-
- def __init__(self, isolation_level=None, native_datetime=False, **kwargs):
- default.DefaultDialect.__init__(self, **kwargs)
- self.isolation_level = isolation_level
-
- # this flag used by pysqlite dialect, and perhaps others in the future,
- # to indicate the driver is handling date/timestamp conversions (and
- # perhaps datetime/time as well on some hypothetical driver ?)
- self.native_datetime = native_datetime
-
- if self.dbapi is not None:
- self.supports_default_values = (
- self.dbapi.sqlite_version_info >= (3, 3, 8))
- self.supports_cast = (
- self.dbapi.sqlite_version_info >= (3, 2, 3))
- self.supports_multivalues_insert = (
- # http://www.sqlite.org/releaselog/3_7_11.html
- self.dbapi.sqlite_version_info >= (3, 7, 11))
- # see http://www.sqlalchemy.org/trac/ticket/2568
- # as well as http://www.sqlite.org/src/info/600482d161
- self._broken_fk_pragma_quotes = (
- self.dbapi.sqlite_version_info < (3, 6, 14))
-
- _isolation_lookup = {
- 'READ UNCOMMITTED': 1,
- 'SERIALIZABLE': 0,
- }
-
- def set_isolation_level(self, connection, level):
- try:
- isolation_level = self._isolation_lookup[level.replace('_', ' ')]
- except KeyError:
- raise exc.ArgumentError(
- "Invalid value '%s' for isolation_level. "
- "Valid isolation levels for %s are %s" %
- (level, self.name, ", ".join(self._isolation_lookup))
- )
- cursor = connection.cursor()
- cursor.execute("PRAGMA read_uncommitted = %d" % isolation_level)
- cursor.close()
-
- def get_isolation_level(self, connection):
- cursor = connection.cursor()
- cursor.execute('PRAGMA read_uncommitted')
- res = cursor.fetchone()
- if res:
- value = res[0]
- else:
- # http://www.sqlite.org/changes.html#version_3_3_3
- # "Optional READ UNCOMMITTED isolation (instead of the
- # default isolation level of SERIALIZABLE) and
- # table level locking when database connections
- # share a common cache.""
- # pre-SQLite 3.3.0 default to 0
- value = 0
- cursor.close()
- if value == 0:
- return "SERIALIZABLE"
- elif value == 1:
- return "READ UNCOMMITTED"
- else:
- assert False, "Unknown isolation level %s" % value
-
- def on_connect(self):
- if self.isolation_level is not None:
- def connect(conn):
- self.set_isolation_level(conn, self.isolation_level)
- return connect
- else:
- return None
-
- @reflection.cache
- def get_table_names(self, connection, schema=None, **kw):
- if schema is not None:
- qschema = self.identifier_preparer.quote_identifier(schema)
- master = '%s.sqlite_master' % qschema
- s = ("SELECT name FROM %s "
- "WHERE type='table' ORDER BY name") % (master,)
- rs = connection.execute(s)
- else:
- try:
- s = ("SELECT name FROM "
- " (SELECT * FROM sqlite_master UNION ALL "
- " SELECT * FROM sqlite_temp_master) "
- "WHERE type='table' ORDER BY name")
- rs = connection.execute(s)
- except exc.DBAPIError:
- s = ("SELECT name FROM sqlite_master "
- "WHERE type='table' ORDER BY name")
- rs = connection.execute(s)
-
- return [row[0] for row in rs]
-
- def has_table(self, connection, table_name, schema=None):
- quote = self.identifier_preparer.quote_identifier
- if schema is not None:
- pragma = "PRAGMA %s." % quote(schema)
- else:
- pragma = "PRAGMA "
- qtable = quote(table_name)
- statement = "%stable_info(%s)" % (pragma, qtable)
- cursor = _pragma_cursor(connection.execute(statement))
- row = cursor.fetchone()
-
- # consume remaining rows, to work around
- # http://www.sqlite.org/cvstrac/tktview?tn=1884
- while not cursor.closed and cursor.fetchone() is not None:
- pass
-
- return row is not None
-
- @reflection.cache
- def get_view_names(self, connection, schema=None, **kw):
- if schema is not None:
- qschema = self.identifier_preparer.quote_identifier(schema)
- master = '%s.sqlite_master' % qschema
- s = ("SELECT name FROM %s "
- "WHERE type='view' ORDER BY name") % (master,)
- rs = connection.execute(s)
- else:
- try:
- s = ("SELECT name FROM "
- " (SELECT * FROM sqlite_master UNION ALL "
- " SELECT * FROM sqlite_temp_master) "
- "WHERE type='view' ORDER BY name")
- rs = connection.execute(s)
- except exc.DBAPIError:
- s = ("SELECT name FROM sqlite_master "
- "WHERE type='view' ORDER BY name")
- rs = connection.execute(s)
-
- return [row[0] for row in rs]
-
- @reflection.cache
- def get_view_definition(self, connection, view_name, schema=None, **kw):
- if schema is not None:
- qschema = self.identifier_preparer.quote_identifier(schema)
- master = '%s.sqlite_master' % qschema
- s = ("SELECT sql FROM %s WHERE name = '%s'"
- "AND type='view'") % (master, view_name)
- rs = connection.execute(s)
- else:
- try:
- s = ("SELECT sql FROM "
- " (SELECT * FROM sqlite_master UNION ALL "
- " SELECT * FROM sqlite_temp_master) "
- "WHERE name = '%s' "
- "AND type='view'") % view_name
- rs = connection.execute(s)
- except exc.DBAPIError:
- s = ("SELECT sql FROM sqlite_master WHERE name = '%s' "
- "AND type='view'") % view_name
- rs = connection.execute(s)
-
- result = rs.fetchall()
- if result:
- return result[0].sql
-
- @reflection.cache
- def get_columns(self, connection, table_name, schema=None, **kw):
- quote = self.identifier_preparer.quote_identifier
- if schema is not None:
- pragma = "PRAGMA %s." % quote(schema)
- else:
- pragma = "PRAGMA "
- qtable = quote(table_name)
- statement = "%stable_info(%s)" % (pragma, qtable)
- c = _pragma_cursor(connection.execute(statement))
-
- rows = c.fetchall()
- columns = []
- for row in rows:
- (name, type_, nullable, default, primary_key) = (
- row[1], row[2].upper(), not row[3], row[4], row[5])
-
- columns.append(self._get_column_info(name, type_, nullable,
- default, primary_key))
- return columns
-
- def _get_column_info(self, name, type_, nullable, default, primary_key):
- coltype = self._resolve_type_affinity(type_)
-
- if default is not None:
- default = util.text_type(default)
-
- return {
- 'name': name,
- 'type': coltype,
- 'nullable': nullable,
- 'default': default,
- 'autoincrement': default is None,
- 'primary_key': primary_key,
- }
-
- def _resolve_type_affinity(self, type_):
- """Return a data type from a reflected column, using affinity tules.
-
- SQLite's goal for universal compatability introduces some complexity
- during reflection, as a column's defined type might not actually be a
- type that SQLite understands - or indeed, my not be defined *at all*.
- Internally, SQLite handles this with a 'data type affinity' for each
- column definition, mapping to one of 'TEXT', 'NUMERIC', 'INTEGER',
- 'REAL', or 'NONE' (raw bits). The algorithm that determines this is
- listed in http://www.sqlite.org/datatype3.html section 2.1.
-
- This method allows SQLAlchemy to support that algorithm, while still
- providing access to smarter reflection utilities by regcognizing
- column definitions that SQLite only supports through affinity (like
- DATE and DOUBLE).
-
- """
- match = re.match(r'([\w ]+)(\(.*?\))?', type_)
- if match:
- coltype = match.group(1)
- args = match.group(2)
- else:
- coltype = ''
- args = ''
-
- if coltype in self.ischema_names:
- coltype = self.ischema_names[coltype]
- elif 'INT' in coltype:
- coltype = sqltypes.INTEGER
- elif 'CHAR' in coltype or 'CLOB' in coltype or 'TEXT' in coltype:
- coltype = sqltypes.TEXT
- elif 'BLOB' in coltype or not coltype:
- coltype = sqltypes.NullType
- elif 'REAL' in coltype or 'FLOA' in coltype or 'DOUB' in coltype:
- coltype = sqltypes.REAL
- else:
- coltype = sqltypes.NUMERIC
-
- if args is not None:
- args = re.findall(r'(\d+)', args)
- try:
- coltype = coltype(*[int(a) for a in args])
- except TypeError:
- util.warn(
- "Could not instantiate type %s with "
- "reflected arguments %s; using no arguments." %
- (coltype, args))
- coltype = coltype()
- else:
- coltype = coltype()
-
- return coltype
-
- @reflection.cache
- def get_pk_constraint(self, connection, table_name, schema=None, **kw):
- cols = self.get_columns(connection, table_name, schema, **kw)
- pkeys = []
- for col in cols:
- if col['primary_key']:
- pkeys.append(col['name'])
- return {'constrained_columns': pkeys, 'name': None}
-
- @reflection.cache
- def get_foreign_keys(self, connection, table_name, schema=None, **kw):
- quote = self.identifier_preparer.quote_identifier
- if schema is not None:
- pragma = "PRAGMA %s." % quote(schema)
- else:
- pragma = "PRAGMA "
- qtable = quote(table_name)
- statement = "%sforeign_key_list(%s)" % (pragma, qtable)
- c = _pragma_cursor(connection.execute(statement))
- fkeys = []
- fks = {}
- while True:
- row = c.fetchone()
- if row is None:
- break
- (numerical_id, rtbl, lcol, rcol) = (row[0], row[2], row[3], row[4])
-
- self._parse_fk(fks, fkeys, numerical_id, rtbl, lcol, rcol)
- return fkeys
-
- def _parse_fk(self, fks, fkeys, numerical_id, rtbl, lcol, rcol):
- # sqlite won't return rcol if the table was created with REFERENCES
- # |