summaryrefslogtreecommitdiffhomepage
path: root/libs/dogpile
diff options
context:
space:
mode:
authormorpheus65535 <[email protected]>2018-09-16 20:27:00 -0400
committermorpheus65535 <[email protected]>2018-09-16 20:33:04 -0400
commit0f061f21226f91883c841f85ceef31b30981277a (patch)
treea1350723ae688ccbae4d4ca564cc4175ccc73996 /libs/dogpile
parent8b681d8a151a3b41d3aaa5bfdd7a082bdda7896c (diff)
downloadbazarr-0f061f21226f91883c841f85ceef31b30981277a.tar.gz
bazarr-0f061f21226f91883c841f85ceef31b30981277a.zip
Include dependencies and remove requirements.txt
Diffstat (limited to 'libs/dogpile')
-rw-r--r--libs/dogpile/__init__.py4
-rw-r--r--libs/dogpile/cache/__init__.py4
-rw-r--r--libs/dogpile/cache/api.py215
-rw-r--r--libs/dogpile/cache/backends/__init__.py22
-rw-r--r--libs/dogpile/cache/backends/file.py447
-rw-r--r--libs/dogpile/cache/backends/memcached.py364
-rw-r--r--libs/dogpile/cache/backends/memory.py124
-rw-r--r--libs/dogpile/cache/backends/null.py62
-rw-r--r--libs/dogpile/cache/backends/redis.py183
-rw-r--r--libs/dogpile/cache/exception.py25
-rw-r--r--libs/dogpile/cache/plugins/__init__.py0
-rw-r--r--libs/dogpile/cache/plugins/mako_cache.py90
-rw-r--r--libs/dogpile/cache/proxy.py95
-rw-r--r--libs/dogpile/cache/region.py1518
-rw-r--r--libs/dogpile/cache/util.py146
-rw-r--r--libs/dogpile/core.py17
-rw-r--r--libs/dogpile/lock.py189
-rw-r--r--libs/dogpile/util/__init__.py4
-rw-r--r--libs/dogpile/util/compat.py65
-rw-r--r--libs/dogpile/util/langhelpers.py123
-rw-r--r--libs/dogpile/util/nameregistry.py84
-rw-r--r--libs/dogpile/util/readwrite_lock.py132
22 files changed, 3913 insertions, 0 deletions
diff --git a/libs/dogpile/__init__.py b/libs/dogpile/__init__.py
new file mode 100644
index 000000000..dd4472abd
--- /dev/null
+++ b/libs/dogpile/__init__.py
@@ -0,0 +1,4 @@
+__version__ = '0.6.7'
+
+from .lock import Lock # noqa
+from .lock import NeedRegenerationException # noqa
diff --git a/libs/dogpile/cache/__init__.py b/libs/dogpile/cache/__init__.py
new file mode 100644
index 000000000..fb57cbcc2
--- /dev/null
+++ b/libs/dogpile/cache/__init__.py
@@ -0,0 +1,4 @@
+from .region import CacheRegion, register_backend, make_region # noqa
+
+# backwards compat
+from .. import __version__ # noqa
diff --git a/libs/dogpile/cache/api.py b/libs/dogpile/cache/api.py
new file mode 100644
index 000000000..d66e5a707
--- /dev/null
+++ b/libs/dogpile/cache/api.py
@@ -0,0 +1,215 @@
+import operator
+from ..util.compat import py3k
+
+
+class NoValue(object):
+ """Describe a missing cache value.
+
+ The :attr:`.NO_VALUE` module global
+ should be used.
+
+ """
+ @property
+ def payload(self):
+ return self
+
+ def __repr__(self):
+ """Ensure __repr__ is a consistent value in case NoValue is used to
+ fill another cache key.
+
+ """
+ return '<dogpile.cache.api.NoValue object>'
+
+ if py3k:
+ def __bool__(self): # pragma NO COVERAGE
+ return False
+ else:
+ def __nonzero__(self): # pragma NO COVERAGE
+ return False
+
+
+NO_VALUE = NoValue()
+"""Value returned from ``get()`` that describes
+a key not present."""
+
+
+class CachedValue(tuple):
+ """Represent a value stored in the cache.
+
+ :class:`.CachedValue` is a two-tuple of
+ ``(payload, metadata)``, where ``metadata``
+ is dogpile.cache's tracking information (
+ currently the creation time). The metadata
+ and tuple structure is pickleable, if
+ the backend requires serialization.
+
+ """
+ payload = property(operator.itemgetter(0))
+ """Named accessor for the payload."""
+
+ metadata = property(operator.itemgetter(1))
+ """Named accessor for the dogpile.cache metadata dictionary."""
+
+ def __new__(cls, payload, metadata):
+ return tuple.__new__(cls, (payload, metadata))
+
+ def __reduce__(self):
+ return CachedValue, (self.payload, self.metadata)
+
+
+class CacheBackend(object):
+ """Base class for backend implementations."""
+
+ key_mangler = None
+ """Key mangling function.
+
+ May be None, or otherwise declared
+ as an ordinary instance method.
+
+ """
+
+ def __init__(self, arguments): # pragma NO COVERAGE
+ """Construct a new :class:`.CacheBackend`.
+
+ Subclasses should override this to
+ handle the given arguments.
+
+ :param arguments: The ``arguments`` parameter
+ passed to :func:`.make_registry`.
+
+ """
+ raise NotImplementedError()
+
+ @classmethod
+ def from_config_dict(cls, config_dict, prefix):
+ prefix_len = len(prefix)
+ return cls(
+ dict(
+ (key[prefix_len:], config_dict[key])
+ for key in config_dict
+ if key.startswith(prefix)
+ )
+ )
+
+ def has_lock_timeout(self):
+ return False
+
+ def get_mutex(self, key):
+ """Return an optional mutexing object for the given key.
+
+ This object need only provide an ``acquire()``
+ and ``release()`` method.
+
+ May return ``None``, in which case the dogpile
+ lock will use a regular ``threading.Lock``
+ object to mutex concurrent threads for
+ value creation. The default implementation
+ returns ``None``.
+
+ Different backends may want to provide various
+ kinds of "mutex" objects, such as those which
+ link to lock files, distributed mutexes,
+ memcached semaphores, etc. Whatever
+ kind of system is best suited for the scope
+ and behavior of the caching backend.
+
+ A mutex that takes the key into account will
+ allow multiple regenerate operations across
+ keys to proceed simultaneously, while a mutex
+ that does not will serialize regenerate operations
+ to just one at a time across all keys in the region.
+ The latter approach, or a variant that involves
+ a modulus of the given key's hash value,
+ can be used as a means of throttling the total
+ number of value recreation operations that may
+ proceed at one time.
+
+ """
+ return None
+
+ def get(self, key): # pragma NO COVERAGE
+ """Retrieve a value from the cache.
+
+ The returned value should be an instance of
+ :class:`.CachedValue`, or ``NO_VALUE`` if
+ not present.
+
+ """
+ raise NotImplementedError()
+
+ def get_multi(self, keys): # pragma NO COVERAGE
+ """Retrieve multiple values from the cache.
+
+ The returned value should be a list, corresponding
+ to the list of keys given.
+
+ .. versionadded:: 0.5.0
+
+ """
+ raise NotImplementedError()
+
+ def set(self, key, value): # pragma NO COVERAGE
+ """Set a value in the cache.
+
+ The key will be whatever was passed
+ to the registry, processed by the
+ "key mangling" function, if any.
+ The value will always be an instance
+ of :class:`.CachedValue`.
+
+ """
+ raise NotImplementedError()
+
+ def set_multi(self, mapping): # pragma NO COVERAGE
+ """Set multiple values in the cache.
+
+ ``mapping`` is a dict in which
+ the key will be whatever was passed
+ to the registry, processed by the
+ "key mangling" function, if any.
+ The value will always be an instance
+ of :class:`.CachedValue`.
+
+ When implementing a new :class:`.CacheBackend` or cutomizing via
+ :class:`.ProxyBackend`, be aware that when this method is invoked by
+ :meth:`.Region.get_or_create_multi`, the ``mapping`` values are the
+ same ones returned to the upstream caller. If the subclass alters the
+ values in any way, it must not do so 'in-place' on the ``mapping`` dict
+ -- that will have the undesirable effect of modifying the returned
+ values as well.
+
+ .. versionadded:: 0.5.0
+
+ """
+ raise NotImplementedError()
+
+ def delete(self, key): # pragma NO COVERAGE
+ """Delete a value from the cache.
+
+ The key will be whatever was passed
+ to the registry, processed by the
+ "key mangling" function, if any.
+
+ The behavior here should be idempotent,
+ that is, can be called any number of times
+ regardless of whether or not the
+ key exists.
+ """
+ raise NotImplementedError()
+
+ def delete_multi(self, keys): # pragma NO COVERAGE
+ """Delete multiple values from the cache.
+
+ The key will be whatever was passed
+ to the registry, processed by the
+ "key mangling" function, if any.
+
+ The behavior here should be idempotent,
+ that is, can be called any number of times
+ regardless of whether or not the
+ key exists.
+
+ .. versionadded:: 0.5.0
+
+ """
+ raise NotImplementedError()
diff --git a/libs/dogpile/cache/backends/__init__.py b/libs/dogpile/cache/backends/__init__.py
new file mode 100644
index 000000000..041f05a3e
--- /dev/null
+++ b/libs/dogpile/cache/backends/__init__.py
@@ -0,0 +1,22 @@
+from dogpile.cache.region import register_backend
+
+register_backend(
+ "dogpile.cache.null", "dogpile.cache.backends.null", "NullBackend")
+register_backend(
+ "dogpile.cache.dbm", "dogpile.cache.backends.file", "DBMBackend")
+register_backend(
+ "dogpile.cache.pylibmc", "dogpile.cache.backends.memcached",
+ "PylibmcBackend")
+register_backend(
+ "dogpile.cache.bmemcached", "dogpile.cache.backends.memcached",
+ "BMemcachedBackend")
+register_backend(
+ "dogpile.cache.memcached", "dogpile.cache.backends.memcached",
+ "MemcachedBackend")
+register_backend(
+ "dogpile.cache.memory", "dogpile.cache.backends.memory", "MemoryBackend")
+register_backend(
+ "dogpile.cache.memory_pickle", "dogpile.cache.backends.memory",
+ "MemoryPickleBackend")
+register_backend(
+ "dogpile.cache.redis", "dogpile.cache.backends.redis", "RedisBackend")
diff --git a/libs/dogpile/cache/backends/file.py b/libs/dogpile/cache/backends/file.py
new file mode 100644
index 000000000..309c055a2
--- /dev/null
+++ b/libs/dogpile/cache/backends/file.py
@@ -0,0 +1,447 @@
+"""
+File Backends
+------------------
+
+Provides backends that deal with local filesystem access.
+
+"""
+
+from __future__ import with_statement
+from ..api import CacheBackend, NO_VALUE
+from contextlib import contextmanager
+from ...util import compat
+from ... import util
+import os
+
+__all__ = 'DBMBackend', 'FileLock', 'AbstractFileLock'
+
+
+class DBMBackend(CacheBackend):
+ """A file-backend using a dbm file to store keys.
+
+ Basic usage::
+
+ from dogpile.cache import make_region
+
+ region = make_region().configure(
+ 'dogpile.cache.dbm',
+ expiration_time = 3600,
+ arguments = {
+ "filename":"/path/to/cachefile.dbm"
+ }
+ )
+
+ DBM access is provided using the Python ``anydbm`` module,
+ which selects a platform-specific dbm module to use.
+ This may be made to be more configurable in a future
+ release.
+
+ Note that different dbm modules have different behaviors.
+ Some dbm implementations handle their own locking, while
+ others don't. The :class:`.DBMBackend` uses a read/write
+ lockfile by default, which is compatible even with those
+ DBM implementations for which this is unnecessary,
+ though the behavior can be disabled.
+
+ The DBM backend by default makes use of two lockfiles.
+ One is in order to protect the DBM file itself from
+ concurrent writes, the other is to coordinate
+ value creation (i.e. the dogpile lock). By default,
+ these lockfiles use the ``flock()`` system call
+ for locking; this is **only available on Unix
+ platforms**. An alternative lock implementation, such as one
+ which is based on threads or uses a third-party system
+ such as `portalocker <https://pypi.python.org/pypi/portalocker>`_,
+ can be dropped in using the ``lock_factory`` argument
+ in conjunction with the :class:`.AbstractFileLock` base class.
+
+ Currently, the dogpile lock is against the entire
+ DBM file, not per key. This means there can
+ only be one "creator" job running at a time
+ per dbm file.
+
+ A future improvement might be to have the dogpile lock
+ using a filename that's based on a modulus of the key.
+ Locking on a filename that uniquely corresponds to the
+ key is problematic, since it's not generally safe to
+ delete lockfiles as the application runs, implying an
+ unlimited number of key-based files would need to be
+ created and never deleted.
+
+ Parameters to the ``arguments`` dictionary are
+ below.
+
+ :param filename: path of the filename in which to
+ create the DBM file. Note that some dbm backends
+ will change this name to have additional suffixes.
+ :param rw_lockfile: the name of the file to use for
+ read/write locking. If omitted, a default name
+ is used by appending the suffix ".rw.lock" to the
+ DBM filename. If False, then no lock is used.
+ :param dogpile_lockfile: the name of the file to use
+ for value creation, i.e. the dogpile lock. If
+ omitted, a default name is used by appending the
+ suffix ".dogpile.lock" to the DBM filename. If
+ False, then dogpile.cache uses the default dogpile
+ lock, a plain thread-based mutex.
+ :param lock_factory: a function or class which provides
+ for a read/write lock. Defaults to :class:`.FileLock`.
+ Custom implementations need to implement context-manager
+ based ``read()`` and ``write()`` functions - the
+ :class:`.AbstractFileLock` class is provided as a base class
+ which provides these methods based on individual read/write lock
+ functions. E.g. to replace the lock with the dogpile.core
+ :class:`.ReadWriteMutex`::
+
+ from dogpile.core.readwrite_lock import ReadWriteMutex
+ from dogpile.cache.backends.file import AbstractFileLock
+
+ class MutexLock(AbstractFileLock):
+ def __init__(self, filename):
+ self.mutex = ReadWriteMutex()
+
+ def acquire_read_lock(self, wait):
+ ret = self.mutex.acquire_read_lock(wait)
+ return wait or ret
+
+ def acquire_write_lock(self, wait):
+ ret = self.mutex.acquire_write_lock(wait)
+ return wait or ret
+
+ def release_read_lock(self):
+ return self.mutex.release_read_lock()
+
+ def release_write_lock(self):
+ return self.mutex.release_write_lock()
+
+ from dogpile.cache import make_region
+
+ region = make_region().configure(
+ "dogpile.cache.dbm",
+ expiration_time=300,
+ arguments={
+ "filename": "file.dbm",
+ "lock_factory": MutexLock
+ }
+ )
+
+ While the included :class:`.FileLock` uses ``os.flock()``, a
+ windows-compatible implementation can be built using a library
+ such as `portalocker <https://pypi.python.org/pypi/portalocker>`_.
+
+ .. versionadded:: 0.5.2
+
+
+
+ """
+ def __init__(self, arguments):
+ self.filename = os.path.abspath(
+ os.path.normpath(arguments['filename'])
+ )
+ dir_, filename = os.path.split(self.filename)
+
+ self.lock_factory = arguments.get("lock_factory", FileLock)
+ self._rw_lock = self._init_lock(
+ arguments.get('rw_lockfile'),
+ ".rw.lock", dir_, filename)
+ self._dogpile_lock = self._init_lock(
+ arguments.get('dogpile_lockfile'),
+ ".dogpile.lock",
+ dir_, filename,
+ util.KeyReentrantMutex.factory)
+
+ # TODO: make this configurable
+ if compat.py3k:
+ import dbm
+ else:
+ import anydbm as dbm
+ self.dbmmodule = dbm
+ self._init_dbm_file()
+
+ def _init_lock(self, argument, suffix, basedir, basefile, wrapper=None):
+ if argument is None:
+ lock = self.lock_factory(os.path.join(basedir, basefile + suffix))
+ elif argument is not False:
+ lock = self.lock_factory(
+ os.path.abspath(
+ os.path.normpath(argument)
+ ))
+ else:
+ return None
+ if wrapper:
+ lock = wrapper(lock)
+ return lock
+
+ def _init_dbm_file(self):
+ exists = os.access(self.filename, os.F_OK)
+ if not exists:
+ for ext in ('db', 'dat', 'pag', 'dir'):
+ if os.access(self.filename + os.extsep + ext, os.F_OK):
+ exists = True
+ break
+ if not exists:
+ fh = self.dbmmodule.open(self.filename, 'c')
+ fh.close()
+
+ def get_mutex(self, key):
+ # using one dogpile for the whole file. Other ways
+ # to do this might be using a set of files keyed to a
+ # hash/modulus of the key. the issue is it's never
+ # really safe to delete a lockfile as this can
+ # break other processes trying to get at the file
+ # at the same time - so handling unlimited keys
+ # can't imply unlimited filenames
+ if self._dogpile_lock:
+ return self._dogpile_lock(key)
+ else:
+ return None
+
+ @contextmanager
+ def _use_rw_lock(self, write):
+ if self._rw_lock is None:
+ yield
+ elif write:
+ with self._rw_lock.write():
+ yield
+ else:
+ with self._rw_lock.read():
+ yield
+
+ @contextmanager
+ def _dbm_file(self, write):
+ with self._use_rw_lock(write):
+ dbm = self.dbmmodule.open(
+ self.filename,
+ "w" if write else "r")
+ yield dbm
+ dbm.close()
+
+ def get(self, key):
+ with self._dbm_file(False) as dbm:
+ if hasattr(dbm, 'get'):
+ value = dbm.get(key, NO_VALUE)
+ else:
+ # gdbm objects lack a .get method
+ try:
+ value = dbm[key]
+ except KeyError:
+ value = NO_VALUE
+ if value is not NO_VALUE:
+ value = compat.pickle.loads(value)
+ return value
+
+ def get_multi(self, keys):
+ return [self.get(key) for key in keys]
+
+ def set(self, key, value):
+ with self._dbm_file(True) as dbm:
+ dbm[key] = compat.pickle.dumps(value,
+ compat.pickle.HIGHEST_PROTOCOL)
+
+ def set_multi(self, mapping):
+ with self._dbm_file(True) as dbm:
+ for key, value in mapping.items():
+ dbm[key] = compat.pickle.dumps(value,
+ compat.pickle.HIGHEST_PROTOCOL)
+
+ def delete(self, key):
+ with self._dbm_file(True) as dbm:
+ try:
+ del dbm[key]
+ except KeyError:
+ pass
+
+ def delete_multi(self, keys):
+ with self._dbm_file(True) as dbm:
+ for key in keys:
+ try:
+ del dbm[key]
+ except KeyError:
+ pass
+
+
+class AbstractFileLock(object):
+ """Coordinate read/write access to a file.
+
+ typically is a file-based lock but doesn't necessarily have to be.
+
+ The default implementation here is :class:`.FileLock`.
+
+ Implementations should provide the following methods::
+
+ * __init__()
+ * acquire_read_lock()
+ * acquire_write_lock()
+ * release_read_lock()
+ * release_write_lock()
+
+ The ``__init__()`` method accepts a single argument "filename", which
+ may be used as the "lock file", for those implementations that use a lock
+ file.
+
+ Note that multithreaded environments must provide a thread-safe
+ version of this lock. The recommended approach for file-
+ descriptor-based locks is to use a Python ``threading.local()`` so
+ that a unique file descriptor is held per thread. See the source
+ code of :class:`.FileLock` for an implementation example.
+
+
+ """
+
+ def __init__(self, filename):
+ """Constructor, is given the filename of a potential lockfile.
+
+ The usage of this filename is optional and no file is
+ created by default.
+
+ Raises ``NotImplementedError`` by default, must be
+ implemented by subclasses.
+ """
+ raise NotImplementedError()
+
+ def acquire(self, wait=True):
+ """Acquire the "write" lock.
+
+ This is a direct call to :meth:`.AbstractFileLock.acquire_write_lock`.
+
+ """
+ return self.acquire_write_lock(wait)
+
+ def release(self):
+ """Release the "write" lock.
+
+ This is a direct call to :meth:`.AbstractFileLock.release_write_lock`.
+
+ """
+ self.release_write_lock()
+
+ @contextmanager
+ def read(self):
+ """Provide a context manager for the "read" lock.
+
+ This method makes use of :meth:`.AbstractFileLock.acquire_read_lock`
+ and :meth:`.AbstractFileLock.release_read_lock`
+
+ """
+
+ self.acquire_read_lock(True)
+ try:
+ yield
+ finally:
+ self.release_read_lock()
+
+ @contextmanager
+ def write(self):
+ """Provide a context manager for the "write" lock.
+
+ This method makes use of :meth:`.AbstractFileLock.acquire_write_lock`
+ and :meth:`.AbstractFileLock.release_write_lock`
+
+ """
+
+ self.acquire_write_lock(True)
+ try:
+ yield
+ finally:
+ self.release_write_lock()
+
+ @property
+ def is_open(self):
+ """optional method."""
+ raise NotImplementedError()
+
+ def acquire_read_lock(self, wait):
+ """Acquire a 'reader' lock.
+
+ Raises ``NotImplementedError`` by default, must be
+ implemented by subclasses.
+ """
+ raise NotImplementedError()
+
+ def acquire_write_lock(self, wait):
+ """Acquire a 'write' lock.
+
+ Raises ``NotImplementedError`` by default, must be
+ implemented by subclasses.
+ """
+ raise NotImplementedError()
+
+ def release_read_lock(self):
+ """Release a 'reader' lock.
+
+ Raises ``NotImplementedError`` by default, must be
+ implemented by subclasses.
+ """
+ raise NotImplementedError()
+
+ def release_write_lock(self):
+ """Release a 'writer' lock.
+
+ Raises ``NotImplementedError`` by default, must be
+ implemented by subclasses.
+ """
+ raise NotImplementedError()
+
+
+class FileLock(AbstractFileLock):
+ """Use lockfiles to coordinate read/write access to a file.
+
+ Only works on Unix systems, using
+ `fcntl.flock() <http://docs.python.org/library/fcntl.html>`_.
+
+ """
+
+ def __init__(self, filename):
+ self._filedescriptor = compat.threading.local()
+ self.filename = filename
+
+ @util.memoized_property
+ def _module(self):
+ import fcntl
+ return fcntl
+
+ @property
+ def is_open(self):
+ return hasattr(self._filedescriptor, 'fileno')
+
+ def acquire_read_lock(self, wait):
+ return self._acquire(wait, os.O_RDONLY, self._module.LOCK_SH)
+
+ def acquire_write_lock(self, wait):
+ return self._acquire(wait, os.O_WRONLY, self._module.LOCK_EX)
+
+ def release_read_lock(self):
+ self._release()
+
+ def release_write_lock(self):
+ self._release()
+
+ def _acquire(self, wait, wrflag, lockflag):
+ wrflag |= os.O_CREAT
+ fileno = os.open(self.filename, wrflag)
+ try:
+ if not wait:
+ lockflag |= self._module.LOCK_NB
+ self._module.flock(fileno, lockflag)
+ except IOError:
+ os.close(fileno)
+ if not wait:
+ # this is typically
+ # "[Errno 35] Resource temporarily unavailable",
+ # because of LOCK_NB
+ return False
+ else:
+ raise
+ else:
+ self._filedescriptor.fileno = fileno
+ return True
+
+ def _release(self):
+ try:
+ fileno = self._filedescriptor.fileno
+ except AttributeError:
+ return
+ else:
+ self._module.flock(fileno, self._module.LOCK_UN)
+ os.close(fileno)
+ del self._filedescriptor.fileno
diff --git a/libs/dogpile/cache/backends/memcached.py b/libs/dogpile/cache/backends/memcached.py
new file mode 100644
index 000000000..6758a9980
--- /dev/null
+++ b/libs/dogpile/cache/backends/memcached.py
@@ -0,0 +1,364 @@
+"""
+Memcached Backends
+------------------
+
+Provides backends for talking to `memcached <http://memcached.org>`_.
+
+"""
+
+from ..api import CacheBackend, NO_VALUE
+from ...util import compat
+from ... import util
+import random
+import time
+
+__all__ = 'GenericMemcachedBackend', 'MemcachedBackend',\
+ 'PylibmcBackend', 'BMemcachedBackend', 'MemcachedLock'
+
+
+class MemcachedLock(object):
+ """Simple distributed lock using memcached.
+
+ This is an adaptation of the lock featured at
+ http://amix.dk/blog/post/19386
+
+ """
+
+ def __init__(self, client_fn, key, timeout=0):
+ self.client_fn = client_fn
+ self.key = "_lock" + key
+ self.timeout = timeout
+
+ def acquire(self, wait=True):
+ client = self.client_fn()
+ i = 0
+ while True:
+ if client.add(self.key, 1, self.timeout):
+ return True
+ elif not wait:
+ return False
+ else:
+ sleep_time = (((i + 1) * random.random()) + 2 ** i) / 2.5
+ time.sleep(sleep_time)
+ if i < 15:
+ i += 1
+
+ def release(self):
+ client = self.client_fn()
+ client.delete(self.key)
+
+
+class GenericMemcachedBackend(CacheBackend):
+ """Base class for memcached backends.
+
+ This base class accepts a number of paramters
+ common to all backends.
+
+ :param url: the string URL to connect to. Can be a single
+ string or a list of strings. This is the only argument
+ that's required.
+ :param distributed_lock: boolean, when True, will use a
+ memcached-lock as the dogpile lock (see :class:`.MemcachedLock`).
+ Use this when multiple
+ processes will be talking to the same memcached instance.
+ When left at False, dogpile will coordinate on a regular
+ threading mutex.
+ :param lock_timeout: integer, number of seconds after acquiring a lock that
+ memcached should expire it. This argument is only valid when
+ ``distributed_lock`` is ``True``.
+
+ .. versionadded:: 0.5.7
+
+ :param memcached_expire_time: integer, when present will
+ be passed as the ``time`` parameter to ``pylibmc.Client.set``.
+ This is used to set the memcached expiry time for a value.
+
+ .. note::
+
+ This parameter is **different** from Dogpile's own
+ ``expiration_time``, which is the number of seconds after
+ which Dogpile will consider the value to be expired.
+ When Dogpile considers a value to be expired,
+ it **continues to use the value** until generation
+ of a new value is complete, when using
+ :meth:`.CacheRegion.get_or_create`.
+ Therefore, if you are setting ``memcached_expire_time``, you'll
+ want to make sure it is greater than ``expiration_time``
+ by at least enough seconds for new values to be generated,
+ else the value won't be available during a regeneration,
+ forcing all threads to wait for a regeneration each time
+ a value expires.
+
+ The :class:`.GenericMemachedBackend` uses a ``threading.local()``
+ object to store individual client objects per thread,
+ as most modern memcached clients do not appear to be inherently
+ threadsafe.
+
+ In particular, ``threading.local()`` has the advantage over pylibmc's
+ built-in thread pool in that it automatically discards objects
+ associated with a particular thread when that thread ends.
+
+ """
+
+ set_arguments = {}
+ """Additional arguments which will be passed
+ to the :meth:`set` method."""
+
+ def __init__(self, arguments):
+ self._imports()
+ # using a plain threading.local here. threading.local
+ # automatically deletes the __dict__ when a thread ends,
+ # so the idea is that this is superior to pylibmc's
+ # own ThreadMappedPool which doesn't handle this
+ # automatically.
+ self.url = util.to_list(arguments['url'])
+ self.distributed_lock = arguments.get('distributed_lock', False)
+ self.lock_timeout = arguments.get('lock_timeout', 0)
+ self.memcached_expire_time = arguments.get(
+ 'memcached_expire_time', 0)
+
+ def has_lock_timeout(self):
+ return self.lock_timeout != 0
+
+ def _imports(self):
+ """client library imports go here."""
+ raise NotImplementedError()
+
+ def _create_client(self):
+ """Creation of a Client instance goes here."""
+ raise NotImplementedError()
+
+ @util.memoized_property
+ def _clients(self):
+ backend = self
+
+ class ClientPool(compat.threading.local):
+ def __init__(self):
+ self.memcached = backend._create_client()
+
+ return ClientPool()
+
+ @property
+ def client(self):
+ """Return the memcached client.
+
+ This uses a threading.local by
+ default as it appears most modern
+ memcached libs aren't inherently
+ threadsafe.
+
+ """
+ return self._clients.memcached
+
+ def get_mutex(self, key):
+ if self.distributed_lock:
+ return MemcachedLock(lambda: self.client, key,
+ timeout=self.lock_timeout)
+ else:
+ return None
+
+ def get(self, key):
+ value = self.client.get(key)
+ if value is None:
+ return NO_VALUE
+ else:
+ return value
+
+ def get_multi(self, keys):
+ values = self.client.get_multi(keys)
+ return [
+ NO_VALUE if key not in values
+ else values[key] for key in keys
+ ]
+
+ def set(self, key, value):
+ self.client.set(
+ key,
+ value,
+ **self.set_arguments
+ )
+
+ def set_multi(self, mapping):
+ self.client.set_multi(
+ mapping,
+ **self.set_arguments
+ )
+
+ def delete(self, key):
+ self.client.delete(key)
+
+ def delete_multi(self, keys):
+ self.client.delete_multi(keys)
+
+
+class MemcacheArgs(object):
+ """Mixin which provides support for the 'time' argument to set(),
+ 'min_compress_len' to other methods.
+
+ """
+ def __init__(self, arguments):
+ self.min_compress_len = arguments.get('min_compress_len', 0)
+
+ self.set_arguments = {}
+ if "memcached_expire_time" in arguments:
+ self.set_arguments["time"] = arguments["memcached_expire_time"]
+ if "min_compress_len" in arguments:
+ self.set_arguments["min_compress_len"] = \
+ arguments["min_compress_len"]
+ super(MemcacheArgs, self).__init__(arguments)
+
+pylibmc = None
+
+
+class PylibmcBackend(MemcacheArgs, GenericMemcachedBackend):
+ """A backend for the
+ `pylibmc <http://sendapatch.se/projects/pylibmc/index.html>`_
+ memcached client.
+
+ A configuration illustrating several of the optional
+ arguments described in the pylibmc documentation::
+
+ from dogpile.cache import make_region
+
+ region = make_region().configure(
+ 'dogpile.cache.pylibmc',
+ expiration_time = 3600,
+ arguments = {
+ 'url':["127.0.0.1"],
+ 'binary':True,
+ 'behaviors':{"tcp_nodelay": True,"ketama":True}
+ }
+ )
+
+ Arguments accepted here include those of
+ :class:`.GenericMemcachedBackend`, as well as
+ those below.
+
+ :param binary: sets the ``binary`` flag understood by
+ ``pylibmc.Client``.
+ :param behaviors: a dictionary which will be passed to
+ ``pylibmc.Client`` as the ``behaviors`` parameter.
+ :param min_compress_len: Integer, will be passed as the
+ ``min_compress_len`` parameter to the ``pylibmc.Client.set``
+ method.
+
+ """
+
+ def __init__(self, arguments):
+ self.binary = arguments.get('binary', False)
+ self.behaviors = arguments.get('behaviors', {})
+ super(PylibmcBackend, self).__init__(arguments)
+
+ def _imports(self):
+ global pylibmc
+ import pylibmc # noqa
+
+ def _create_client(self):
+ return pylibmc.Client(
+ self.url,
+ binary=self.binary,
+ behaviors=self.behaviors
+ )
+
+memcache = None
+
+
+class MemcachedBackend(MemcacheArgs, GenericMemcachedBackend):
+ """A backend using the standard
+ `Python-memcached <http://www.tummy.com/Community/software/\
+ python-memcached/>`_
+ library.
+
+ Example::
+
+ from dogpile.cache import make_region
+
+ region = make_region().configure(
+ 'dogpile.cache.memcached',
+ expiration_time = 3600,
+ arguments = {
+ 'url':"127.0.0.1:11211"
+ }
+ )
+
+ """
+ def _imports(self):
+ global memcache
+ import memcache # noqa
+
+ def _create_client(self):
+ return memcache.Client(self.url)
+
+
+bmemcached = None
+
+
+class BMemcachedBackend(GenericMemcachedBackend):
+ """A backend for the
+ `python-binary-memcached <https://github.com/jaysonsantos/\
+ python-binary-memcached>`_
+ memcached client.
+
+ This is a pure Python memcached client which
+ includes the ability to authenticate with a memcached
+ server using SASL.
+
+ A typical configuration using username/password::
+
+ from dogpile.cache import make_region
+
+ region = make_region().configure(
+ 'dogpile.cache.bmemcached',
+ expiration_time = 3600,
+ arguments = {
+ 'url':["127.0.0.1"],
+ 'username':'scott',
+ 'password':'tiger'
+ }
+ )
+
+ Arguments which can be passed to the ``arguments``
+ dictionary include:
+
+ :param username: optional username, will be used for
+ SASL authentication.
+ :param password: optional password, will be used for
+ SASL authentication.
+
+ """
+ def __init__(self, arguments):
+ self.username = arguments.get('username', None)
+ self.password = arguments.get('password', None)
+ super(BMemcachedBackend, self).__init__(arguments)
+
+ def _imports(self):
+ global bmemcached
+ import bmemcached
+
+ class RepairBMemcachedAPI(bmemcached.Client):
+ """Repairs BMemcached's non-standard method
+ signatures, which was fixed in BMemcached
+ ef206ed4473fec3b639e.
+
+ """
+
+ def add(self, key, value, timeout=0):
+ try:
+ return super(RepairBMemcachedAPI, self).add(
+ key, value, timeout)
+ except ValueError:
+ return False
+
+ self.Client = RepairBMemcachedAPI
+
+ def _create_client(self):
+ return self.Client(
+ self.url,
+ username=self.username,
+ password=self.password
+ )
+
+ def delete_multi(self, keys):
+ """python-binary-memcached api does not implements delete_multi"""
+ for key in keys:
+ self.delete(key)
diff --git a/libs/dogpile/cache/backends/memory.py b/libs/dogpile/cache/backends/memory.py
new file mode 100644
index 000000000..e2083f7f0
--- /dev/null
+++ b/libs/dogpile/cache/backends/memory.py
@@ -0,0 +1,124 @@
+"""
+Memory Backends
+---------------
+
+Provides simple dictionary-based backends.
+
+The two backends are :class:`.MemoryBackend` and :class:`.MemoryPickleBackend`;
+the latter applies a serialization step to cached values while the former
+places the value as given into the dictionary.
+
+"""
+
+from ..api import CacheBackend, NO_VALUE
+from ...util.compat import pickle
+
+
+class MemoryBackend(CacheBackend):
+ """A backend that uses a plain dictionary.
+
+ There is no size management, and values which
+ are placed into the dictionary will remain
+ until explicitly removed. Note that
+ Dogpile's expiration of items is based on
+ timestamps and does not remove them from
+ the cache.
+
+ E.g.::
+
+ from dogpile.cache import make_region
+
+ region = make_region().configure(
+ 'dogpile.cache.memory'
+ )
+
+
+ To use a Python dictionary of your choosing,
+ it can be passed in with the ``cache_dict``
+ argument::
+
+ my_dictionary = {}
+ region = make_region().configure(
+ 'dogpile.cache.memory',
+ arguments={
+ "cache_dict":my_dictionary
+ }
+ )
+
+
+ """
+ pickle_values = False
+
+ def __init__(self, arguments):
+ self._cache = arguments.pop("cache_dict", {})
+
+ def get(self, key):
+ value = self._cache.get(key, NO_VALUE)
+ if value is not NO_VALUE and self.pickle_values:
+ value = pickle.loads(value)
+ return value
+
+ def get_multi(self, keys):
+ ret = [
+ self._cache.get(key, NO_VALUE)
+ for key in keys]
+ if self.pickle_values:
+ ret = [
+ pickle.loads(value)
+ if value is not NO_VALUE else value
+ for value in ret
+ ]
+ return ret
+
+ def set(self, key, value):
+ if self.pickle_values:
+ value = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
+ self._cache[key] = value
+
+ def set_multi(self, mapping):
+ pickle_values = self.pickle_values
+ for key, value in mapping.items():
+ if pickle_values:
+ value = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
+ self._cache[key] = value
+
+ def delete(self, key):
+ self._cache.pop(key, None)
+
+ def delete_multi(self, keys):
+ for key in keys:
+ self._cache.pop(key, None)
+
+
+class MemoryPickleBackend(MemoryBackend):
+ """A backend that uses a plain dictionary, but serializes objects on
+ :meth:`.MemoryBackend.set` and deserializes :meth:`.MemoryBackend.get`.
+
+ E.g.::
+
+ from dogpile.cache import make_region
+
+ region = make_region().configure(
+ 'dogpile.cache.memory_pickle'
+ )
+
+ The usage of pickle to serialize cached values allows an object
+ as placed in the cache to be a copy of the original given object, so
+ that any subsequent changes to the given object aren't reflected
+ in the cached value, thus making the backend behave the same way
+ as other backends which make use of serialization.
+
+ The serialization is performed via pickle, and incurs the same
+ performance hit in doing so as that of other backends; in this way
+ the :class:`.MemoryPickleBackend` performance is somewhere in between
+ that of the pure :class:`.MemoryBackend` and the remote server oriented
+ backends such as that of Memcached or Redis.
+
+ Pickle behavior here is the same as that of the Redis backend, using
+ either ``cPickle`` or ``pickle`` and specifying ``HIGHEST_PROTOCOL``
+ upon serialize.
+
+ .. versionadded:: 0.5.3
+
+ """
+ pickle_values = True
diff --git a/libs/dogpile/cache/backends/null.py b/libs/dogpile/cache/backends/null.py
new file mode 100644
index 000000000..603cca3f6
--- /dev/null
+++ b/libs/dogpile/cache/backends/null.py
@@ -0,0 +1,62 @@
+"""
+Null Backend
+-------------
+
+The Null backend does not do any caching at all. It can be
+used to test behavior without caching, or as a means of disabling
+caching for a region that is otherwise used normally.
+
+.. versionadded:: 0.5.4
+
+"""
+
+from ..api import CacheBackend, NO_VALUE
+
+
+__all__ = ['NullBackend']
+
+
+class NullLock(object):
+ def acquire(self, wait=True):
+ return True
+
+ def release(self):
+ pass
+
+
+class NullBackend(CacheBackend):
+ """A "null" backend that effectively disables all cache operations.
+
+ Basic usage::
+
+ from dogpile.cache import make_region
+
+ region = make_region().configure(
+ 'dogpile.cache.null'
+ )
+
+ """
+
+ def __init__(self, arguments):
+ pass
+
+ def get_mutex(self, key):
+ return NullLock()
+
+ def get(self, key):
+ return NO_VALUE
+
+ def get_multi(self, keys):
+ return [NO_VALUE for k in keys]
+
+ def set(self, key, value):
+ pass
+
+ def set_multi(self, mapping):
+ pass
+
+ def delete(self, key):
+ pass
+
+ def delete_multi(self, keys):
+ pass
diff --git a/libs/dogpile/cache/backends/redis.py b/libs/dogpile/cache/backends/redis.py
new file mode 100644
index 000000000..d665320a7
--- /dev/null
+++ b/libs/dogpile/cache/backends/redis.py
@@ -0,0 +1,183 @@
+"""
+Redis Backends
+------------------
+
+Provides backends for talking to `Redis <http://redis.io>`_.
+
+"""
+
+from __future__ import absolute_import
+from ..api import CacheBackend, NO_VALUE
+from ...util.compat import pickle, u
+
+redis = None
+
+__all__ = 'RedisBackend',
+
+
+class RedisBackend(CacheBackend):
+ """A `Redis <http://redis.io/>`_ backend, using the
+ `redis-py <http://pypi.python.org/pypi/redis/>`_ backend.
+
+ Example configuration::
+
+ from dogpile.cache import make_region
+
+ region = make_region().configure(
+ 'dogpile.cache.redis',
+ arguments = {
+ 'host': 'localhost',
+ 'port': 6379,
+ 'db': 0,
+ 'redis_expiration_time': 60*60*2, # 2 hours
+ 'distributed_lock': True
+ }
+ )
+
+ Arguments accepted in the arguments dictionary:
+
+ :param url: string. If provided, will override separate host/port/db
+ params. The format is that accepted by ``StrictRedis.from_url()``.
+
+ .. versionadded:: 0.4.1
+
+ :param host: string, default is ``localhost``.
+
+ :param password: string, default is no password.
+
+ .. versionadded:: 0.4.1
+
+ :param port: integer, default is ``6379``.
+
+ :param db: integer, default is ``0``.
+
+ :param redis_expiration_time: integer, number of seconds after setting
+ a value that Redis should expire it. This should be larger than dogpile's
+ cache expiration. By default no expiration is set.
+
+ :param distributed_lock: boolean, when True, will use a
+ redis-lock as the dogpile lock.
+ Use this when multiple
+ processes will be talking to the same redis instance.
+ When left at False, dogpile will coordinate on a regular
+ threading mutex.
+
+ :param lock_timeout: integer, number of seconds after acquiring a lock that
+ Redis should expire it. This argument is only valid when
+ ``distributed_lock`` is ``True``.
+
+ .. versionadded:: 0.5.0
+
+ :param socket_timeout: float, seconds for socket timeout.
+ Default is None (no timeout).
+
+ .. versionadded:: 0.5.4
+
+ :param lock_sleep: integer, number of seconds to sleep when failed to
+ acquire a lock. This argument is only valid when
+ ``distributed_lock`` is ``True``.
+
+ .. versionadded:: 0.5.0
+
+ :param connection_pool: ``redis.ConnectionPool`` object. If provided,
+ this object supersedes other connection arguments passed to the
+ ``redis.StrictRedis`` instance, including url and/or host as well as
+ socket_timeout, and will be passed to ``redis.StrictRedis`` as the
+ source of connectivity.
+
+ .. versionadded:: 0.5.4
+
+
+ """
+
+ def __init__(self, arguments):
+ arguments = arguments.copy()
+ self._imports()
+ self.url = arguments.pop('url', None)
+ self.host = arguments.pop('host', 'localhost')
+ self.password = arguments.pop('password', None)
+ self.port = arguments.pop('port', 6379)
+ self.db = arguments.pop('db', 0)
+ self.distributed_lock = arguments.get('distributed_lock', False)
+ self.socket_timeout = arguments.pop('socket_timeout', None)
+
+ self.lock_timeout = arguments.get('lock_timeout', None)
+ self.lock_sleep = arguments.get('lock_sleep', 0.1)
+
+ self.redis_expiration_time = arguments.pop('redis_expiration_time', 0)
+ self.connection_pool = arguments.get('connection_pool', None)
+ self.client = self._create_client()
+
+ def _imports(self):
+ # defer imports until backend is used
+ global redis
+ import redis # noqa
+
+ def _create_client(self):
+ if self.connection_pool is not None:
+ # the connection pool already has all other connection
+ # options present within, so here we disregard socket_timeout
+ # and others.
+ return redis.StrictRedis(connection_pool=self.connection_pool)
+
+ args = {}
+ if self.socket_timeout:
+ args['socket_timeout'] = self.socket_timeout
+
+ if self.url is not None:
+ args.update(url=self.url)
+ return redis.StrictRedis.from_url(**args)
+ else:
+ args.update(
+ host=self.host, password=self.password,
+ port=self.port, db=self.db
+ )
+ return redis.StrictRedis(**args)
+
+ def get_mutex(self, key):
+ if self.distributed_lock:
+ return self.client.lock(u('_lock{0}').format(key),
+ self.lock_timeout, self.lock_sleep)
+ else:
+ return None
+
+ def get(self, key):
+ value = self.client.get(key)
+ if value is None:
+ return NO_VALUE
+ return pickle.loads(value)
+
+ def get_multi(self, keys):
+ if not keys:
+ return []
+ values = self.client.mget(keys)
+ return [
+ pickle.loads(v) if v is not None else NO_VALUE
+ for v in values]
+
+ def set(self, key, value):
+ if self.redis_expiration_time:
+ self.client.setex(key, self.redis_expiration_time,
+ pickle.dumps(value, pickle.HIGHEST_PROTOCOL))
+ else:
+ self.client.set(key, pickle.dumps(value, pickle.HIGHEST_PROTOCOL))
+
+ def set_multi(self, mapping):
+ mapping = dict(
+ (k, pickle.dumps(v, pickle.HIGHEST_PROTOCOL))
+ for k, v in mapping.items()
+ )
+
+ if not self.redis_expiration_time:
+ self.client.mset(mapping)
+ else:
+ pipe = self.client.pipeline()
+ for key, value in mapping.items():
+ pipe.setex(key, self.redis_expiration_time, value)
+ pipe.execute()
+
+ def delete(self, key):
+ self.client.delete(key)
+
+ def delete_multi(self, keys):
+ self.client.delete(*keys)
diff --git a/libs/dogpile/cache/exception.py b/libs/dogpile/cache/exception.py
new file mode 100644
index 000000000..adeb1b4a3
--- /dev/null
+++ b/libs/dogpile/cache/exception.py
@@ -0,0 +1,25 @@
+"""Exception classes for dogpile.cache."""
+
+
+class DogpileCacheException(Exception):
+ """Base Exception for dogpile.cache exceptions to inherit from."""
+
+
+class RegionAlreadyConfigured(DogpileCacheException):
+ """CacheRegion instance is already configured."""
+
+
+class RegionNotConfigured(DogpileCacheException):
+ """CacheRegion instance has not been configured."""
+
+
+class ValidationError(DogpileCacheException):
+ """Error validating a value or option."""
+
+
+class PluginNotFound(DogpileCacheException):
+ """The specified plugin could not be found.
+
+ .. versionadded:: 0.6.4
+
+ """
diff --git a/libs/dogpile/cache/plugins/__init__.py b/libs/dogpile/cache/plugins/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/libs/dogpile/cache/plugins/__init__.py
diff --git a/libs/dogpile/cache/plugins/mako_cache.py b/libs/dogpile/cache/plugins/mako_cache.py
new file mode 100644
index 000000000..61f4ffaf3
--- /dev/null
+++ b/libs/dogpile/cache/plugins/mako_cache.py
@@ -0,0 +1,90 @@
+"""
+Mako Integration
+----------------
+
+dogpile.cache includes a `Mako <http://www.makotemplates.org>`_ plugin
+that replaces `Beaker <http://beaker.groovie.org>`_
+as the cache backend.
+Setup a Mako template lookup using the "dogpile.cache" cache implementation
+and a region dictionary::
+
+ from dogpile.cache import make_region
+ from mako.lookup import TemplateLookup
+
+ my_regions = {
+ "local":make_region().configure(
+ "dogpile.cache.dbm",
+ expiration_time=360,
+ arguments={"filename":"file.dbm"}
+ ),
+ "memcached":make_region().configure(
+ "dogpile.cache.pylibmc",
+ expiration_time=3600,
+ arguments={"url":["127.0.0.1"]}
+ )
+ }
+
+ mako_lookup = TemplateLookup(
+ directories=["/myapp/templates"],
+ cache_impl="dogpile.cache",
+ cache_args={
+ 'regions':my_regions
+ }
+ )
+
+To use the above configuration in a template, use the ``cached=True``
+argument on any Mako tag which accepts it, in conjunction with the
+name of the desired region as the ``cache_region`` argument::
+
+ <%def name="mysection()" cached="True" cache_region="memcached">
+ some content that's cached
+ </%def>
+
+
+"""
+from mako.cache import CacheImpl
+
+
+class MakoPlugin(CacheImpl):
+ """A Mako ``CacheImpl`` which talks to dogpile.cache."""
+
+ def __init__(self, cache):
+ super(MakoPlugin, self).__init__(cache)
+ try:
+ self.regions = self.cache.template.cache_args['regions']
+ except KeyError:
+ raise KeyError(
+ "'cache_regions' argument is required on the "
+ "Mako Lookup or Template object for usage "
+ "with the dogpile.cache plugin.")
+
+ def _get_region(self, **kw):
+ try:
+ region = kw['region']
+ except KeyError:
+ raise KeyError(
+ "'cache_region' argument must be specified with 'cache=True'"
+ "within templates for usage with the dogpile.cache plugin.")
+ try:
+ return self.regions[region]
+ except KeyError:
+ raise KeyError("No such region '%s'" % region)
+
+ def get_and_replace(self, key, creation_function, **kw):
+ expiration_time = kw.pop("timeout", None)
+ return self._get_region(**kw).get_or_create(
+ key, creation_function,
+ expiration_time=expiration_time)
+
+ def get_or_create(self, key, creation_function, **kw):
+ return self.get_and_replace(key, creation_function, **kw)
+
+ def put(self, key, value, **kw):
+ self._get_region(**kw).put(key, value)
+
+ def get(self, key, **kw):
+ expiration_time = kw.pop("timeout", None)
+ return self._get_region(**kw).get(key, expiration_time=expiration_time)
+
+ def invalidate(self, key, **kw):
+ self._get_region(**kw).delete(key)
diff --git a/libs/dogpile/cache/proxy.py b/libs/dogpile/cache/proxy.py
new file mode 100644
index 000000000..15c6b5746
--- /dev/null
+++ b/libs/dogpile/cache/proxy.py
@@ -0,0 +1,95 @@
+"""
+Proxy Backends
+------------------
+
+Provides a utility and a decorator class that allow for modifying the behavior
+of different backends without altering the class itself or having to extend the
+base backend.
+
+.. versionadded:: 0.5.0 Added support for the :class:`.ProxyBackend` class.
+
+"""
+
+from .api import CacheBackend
+
+
+class ProxyBackend(CacheBackend):
+ """A decorator class for altering the functionality of backends.
+
+ Basic usage::
+
+ from dogpile.cache import make_region
+ from dogpile.cache.proxy import ProxyBackend
+
+ class MyFirstProxy(ProxyBackend):
+ def get(self, key):
+ # ... custom code goes here ...
+ return self.proxied.get(key)
+
+ def set(self, key, value):
+ # ... custom code goes here ...
+ self.proxied.set(key)
+
+ class MySecondProxy(ProxyBackend):
+ def get(self, key):
+ # ... custom code goes here ...
+ return self.proxied.get(key)
+
+
+ region = make_region().configure(
+ 'dogpile.cache.dbm',
+ expiration_time = 3600,
+ arguments = {
+ "filename":"/path/to/cachefile.dbm"
+ },
+ wrap = [ MyFirstProxy, MySecondProxy ]
+ )
+
+ Classes that extend :class:`.ProxyBackend` can be stacked
+ together. The ``.proxied`` property will always
+ point to either the concrete backend instance or
+ the next proxy in the chain that a method can be
+ delegated towards.
+
+ .. versionadded:: 0.5.0
+
+ """
+
+ def __init__(self, *args, **kwargs):
+ self.proxied = None
+
+ def wrap(self, backend):
+ ''' Take a backend as an argument and setup the self.proxied property.
+ Return an object that be used as a backend by a :class:`.CacheRegion`
+ object.
+ '''
+ assert(
+ isinstance(backend, CacheBackend) or
+ isinstance(backend, ProxyBackend))
+ self.proxied = backend
+ return self
+
+ #
+ # Delegate any functions that are not already overridden to
+ # the proxies backend
+ #
+ def get(self, key):
+ return self.proxied.get(key)
+
+ def set(self, key, value):
+ self.proxied.set(key, value)
+
+ def delete(self, key):
+ self.proxied.delete(key)
+
+ def get_multi(self, keys):
+ return self.proxied.get_multi(keys)
+
+ def set_multi(self, mapping):
+ self.proxied.set_multi(mapping)
+
+ def delete_multi(self, keys):
+ self.proxied.delete_multi(keys)
+
+ def get_mutex(self, key):
+ return self.proxied.get_mutex(key)
diff --git a/libs/dogpile/cache/region.py b/libs/dogpile/cache/region.py
new file mode 100644
index 000000000..a16a2a151
--- /dev/null
+++ b/libs/dogpile/cache/region.py
@@ -0,0 +1,1518 @@
+from __future__ import with_statement
+from .. import Lock, NeedRegenerationException
+from ..util import NameRegistry
+from . import exception
+from ..util import PluginLoader, memoized_property, coerce_string_conf
+from .util import function_key_generator, function_multi_key_generator
+from .api import NO_VALUE, CachedValue
+from .proxy import ProxyBackend
+from ..util import compat
+import time
+import datetime
+from numbers import Number
+from functools import wraps
+import threading
+
+_backend_loader = PluginLoader("dogpile.cache")
+register_backend = _backend_loader.register
+from . import backends # noqa
+
+value_version = 1
+"""An integer placed in the :class:`.CachedValue`
+so that new versions of dogpile.cache can detect cached
+values from a previous, backwards-incompatible version.
+
+"""
+
+
+class RegionInvalidationStrategy(object):
+ """Region invalidation strategy interface
+
+ Implement this interface and pass implementation instance
+ to :meth:`.CacheRegion.configure` to override default region invalidation.
+
+ Example::
+
+ class CustomInvalidationStrategy(RegionInvalidationStrategy):
+
+ def __init__(self):
+ self._soft_invalidated = None
+ self._hard_invalidated = None
+
+ def invalidate(self, hard=None):
+ if hard:
+ self._soft_invalidated = None
+ self._hard_invalidated = time.time()
+ else:
+ self._soft_invalidated = time.time()
+ self._hard_invalidated = None
+
+ def is_invalidated(self, timestamp):
+ return ((self._soft_invalidated and
+ timestamp < self._soft_invalidated) or
+ (self._hard_invalidated and
+ timestamp < self._hard_invalidated))
+
+ def was_hard_invalidated(self):
+ return bool(self._hard_invalidated)
+
+ def is_hard_invalidated(self, timestamp):
+ return (self._hard_invalidated and
+ timestamp < self._hard_invalidated)
+
+ def was_soft_invalidated(self):
+ return bool(self._soft_invalidated)
+
+ def is_soft_invalidated(self, timestamp):
+ return (self._soft_invalidated and
+ timestamp < self._soft_invalidated)
+
+ The custom implementation is injected into a :class:`.CacheRegion`
+ at configure time using the
+ :paramref:`.CacheRegion.configure.region_invalidator` parameter::
+
+ region = CacheRegion()
+
+ region = region.configure(region_invalidator=CustomInvalidationStrategy())
+
+ Invalidation strategies that wish to have access to the
+ :class:`.CacheRegion` itself should construct the invalidator given the
+ region as an argument::
+
+ class MyInvalidator(RegionInvalidationStrategy):
+ def __init__(self, region):
+ self.region = region
+ # ...
+
+ # ...
+
+ region = CacheRegion()
+ region = region.configure(region_invalidator=MyInvalidator(region))
+
+ .. versionadded:: 0.6.2
+
+ .. seealso::
+
+ :paramref:`.CacheRegion.configure.region_invalidator`
+
+ """
+
+ def invalidate(self, hard=True):
+ """Region invalidation.
+
+ :class:`.CacheRegion` propagated call.
+ The default invalidation system works by setting
+ a current timestamp (using ``time.time()``) to consider all older
+ timestamps effectively invalidated.
+
+ """
+
+ raise NotImplementedError()
+
+ def is_hard_invalidated(self, timestamp):
+ """Check timestamp to determine if it was hard invalidated.
+
+ :return: Boolean. True if ``timestamp`` is older than
+ the last region invalidation time and region is invalidated
+ in hard mode.
+
+ """
+
+ raise NotImplementedError()
+
+ def is_soft_invalidated(self, timestamp):
+ """Check timestamp to determine if it was soft invalidated.
+
+ :return: Boolean. True if ``timestamp`` is older than
+ the last region invalidation time and region is invalidated
+ in soft mode.
+
+ """
+
+ raise NotImplementedError()
+
+ def is_invalidated(self, timestamp):
+ """Check timestamp to determine if it was invalidated.
+
+ :return: Boolean. True if ``timestamp`` is older than
+ the last region invalidation time.
+
+ """
+
+ raise NotImplementedError()
+
+ def was_soft_invalidated(self):
+ """Indicate the region was invalidated in soft mode.
+
+ :return: Boolean. True if region was invalidated in soft mode.
+
+ """
+
+ raise NotImplementedError()
+
+ def was_hard_invalidated(self):
+ """Indicate the region was invalidated in hard mode.
+
+ :return: Boolean. True if region was invalidated in hard mode.
+
+ """
+
+ raise NotImplementedError()
+
+
+class DefaultInvalidationStrategy(RegionInvalidationStrategy):
+
+ def __init__(self):
+ self._is_hard_invalidated = None
+ self._invalidated = None
+
+ def invalidate(self, hard=True):
+ self._is_hard_invalidated = bool(hard)
+ self._invalidated = time.time()
+
+ def is_invalidated(self, timestamp):
+ return (self._invalidated is not None and
+ timestamp < self._invalidated)
+
+ def was_hard_invalidated(self):
+ return self._is_hard_invalidated is True
+
+ def is_hard_invalidated(self, timestamp):
+ return self.was_hard_invalidated() and self.is_invalidated(timestamp)
+
+ def was_soft_invalidated(self):
+ return self._is_hard_invalidated is False
+
+ def is_soft_invalidated(self, timestamp):
+ return self.was_soft_invalidated() and self.is_invalidated(timestamp)
+
+
+class CacheRegion(object):
+ """A front end to a particular cache backend.
+
+ :param name: Optional, a string name for the region.
+ This isn't used internally
+ but can be accessed via the ``.name`` parameter, helpful
+ for configuring a region from a config file.
+ :param function_key_generator: Optional. A
+ function that will produce a "cache key" given
+ a data creation function and arguments, when using
+ the :meth:`.CacheRegion.cache_on_arguments` method.
+ The structure of this function
+ should be two levels: given the data creation function,
+ return a new function that generates the key based on
+ the given arguments. Such as::
+
+ def my_key_generator(namespace, fn, **kw):
+ fname = fn.__name__
+ def generate_key(*arg):
+ return namespace + "_" + fname + "_".join(str(s) for s in arg)
+ return generate_key
+
+
+ region = make_region(
+ function_key_generator = my_key_generator
+ ).configure(
+ "dogpile.cache.dbm",
+ expiration_time=300,
+ arguments={
+ "filename":"file.dbm"
+ }
+ )
+
+ The ``namespace`` is that passed to
+ :meth:`.CacheRegion.cache_on_arguments`. It's not consulted
+ outside this function, so in fact can be of any form.
+ For example, it can be passed as a tuple, used to specify
+ arguments to pluck from \**kw::
+
+ def my_key_generator(namespace, fn):
+ def generate_key(*arg, **kw):
+ return ":".join(
+ [kw[k] for k in namespace] +
+ [str(x) for x in arg]
+ )
+ return generate_key
+
+
+ Where the decorator might be used as::
+
+ @my_region.cache_on_arguments(namespace=('x', 'y'))
+ def my_function(a, b, **kw):
+ return my_data()
+
+ .. seealso::
+
+ :func:`.function_key_generator` - default key generator
+
+ :func:`.kwarg_function_key_generator` - optional gen that also
+ uses keyword arguments
+
+ :param function_multi_key_generator: Optional.
+ Similar to ``function_key_generator`` parameter, but it's used in
+ :meth:`.CacheRegion.cache_multi_on_arguments`. Generated function
+ should return list of keys. For example::
+
+ def my_multi_key_generator(namespace, fn, **kw):
+ namespace = fn.__name__ + (namespace or '')
+
+ def generate_keys(*args):
+ return [namespace + ':' + str(a) for a in args]
+
+ return generate_keys
+
+ :param key_mangler: Function which will be used on all incoming
+ keys before passing to the backend. Defaults to ``None``,
+ in which case the key mangling function recommended by
+ the cache backend will be used. A typical mangler
+ is the SHA1 mangler found at :func:`.sha1_mangle_key`
+ which coerces keys into a SHA1
+ hash, so that the string length is fixed. To
+ disable all key mangling, set to ``False``. Another typical
+ mangler is the built-in Python function ``str``, which can be used
+ to convert non-string or Unicode keys to bytestrings, which is
+ needed when using a backend such as bsddb or dbm under Python 2.x
+ in conjunction with Unicode keys.
+ :param async_creation_runner: A callable that, when specified,
+ will be passed to and called by dogpile.lock when
+ there is a stale value present in the cache. It will be passed the
+ mutex and is responsible releasing that mutex when finished.
+ This can be used to defer the computation of expensive creator
+ functions to later points in the future by way of, for example, a
+ background thread, a long-running queue, or a task manager system
+ like Celery.
+
+ For a specific example using async_creation_runner, new values can
+ be created in a background thread like so::
+
+ import threading
+
+ def async_creation_runner(cache, somekey, creator, mutex):
+ ''' Used by dogpile.core:Lock when appropriate '''
+ def runner():
+ try:
+ value = creator()
+ cache.set(somekey, value)
+ finally:
+ mutex.release()
+
+ thread = threading.Thread(target=runner)
+ thread.start()
+
+
+ region = make_region(
+ async_creation_runner=async_creation_runner,
+ ).configure(
+ 'dogpile.cache.memcached',
+ expiration_time=5,
+ arguments={
+ 'url': '127.0.0.1:11211',
+ 'distributed_lock': True,
+ }
+ )
+
+ Remember that the first request for a key with no associated
+ value will always block; async_creator will not be invoked.
+ However, subsequent requests for cached-but-expired values will
+ still return promptly. They will be refreshed by whatever
+ asynchronous means the provided async_creation_runner callable
+ implements.
+
+ By default the async_creation_runner is disabled and is set
+ to ``None``.
+
+ .. versionadded:: 0.4.2 added the async_creation_runner
+ feature.
+
+ """
+
+ def __init__(
+ self,
+ name=None,
+ function_key_generator=function_key_generator,
+ function_multi_key_generator=function_multi_key_generator,
+ key_mangler=None,
+ async_creation_runner=None,
+ ):
+ """Construct a new :class:`.CacheRegion`."""
+ self.name = name
+ self.function_key_generator = function_key_generator
+ self.function_multi_key_generator = function_multi_key_generator
+ self.key_mangler = self._user_defined_key_mangler = key_mangler
+ self.async_creation_runner = async_creation_runner
+ self.region_invalidator = DefaultInvalidationStrategy()
+
+ def configure(
+ self, backend,
+ expiration_time=None,
+ arguments=None,
+ _config_argument_dict=None,
+ _config_prefix=None,
+ wrap=None,
+ replace_existing_backend=False,
+ region_invalidator=None
+ ):
+ """Configure a :class:`.CacheRegion`.
+
+ The :class:`.CacheRegion` itself
+ is returned.
+
+ :param backend: Required. This is the name of the
+ :class:`.CacheBackend` to use, and is resolved by loading
+ the class from the ``dogpile.cache`` entrypoint.
+
+ :param expiration_time: Optional. The expiration time passed
+ to the dogpile system. May be passed as an integer number
+ of seconds, or as a ``datetime.timedelta`` value.
+
+ .. versionadded 0.5.0
+ ``expiration_time`` may be optionally passed as a
+ ``datetime.timedelta`` value.
+
+ The :meth:`.CacheRegion.get_or_create`
+ method as well as the :meth:`.CacheRegion.cache_on_arguments`
+ decorator (though note: **not** the :meth:`.CacheRegion.get`
+ method) will call upon the value creation function after this
+ time period has passed since the last generation.
+
+ :param arguments: Optional. The structure here is passed
+ directly to the constructor of the :class:`.CacheBackend`
+ in use, though is typically a dictionary.
+
+ :param wrap: Optional. A list of :class:`.ProxyBackend`
+ classes and/or instances, each of which will be applied
+ in a chain to ultimately wrap the original backend,
+ so that custom functionality augmentation can be applied.
+
+ .. versionadded:: 0.5.0
+
+ .. seealso::
+
+ :ref:`changing_backend_behavior`
+
+ :param replace_existing_backend: if True, the existing cache backend
+ will be replaced. Without this flag, an exception is raised if
+ a backend is already configured.
+
+ .. versionadded:: 0.5.7
+
+ :param region_invalidator: Optional. Override default invalidation
+ strategy with custom implementation of
+ :class:`.RegionInvalidationStrategy`.
+
+ .. versionadded:: 0.6.2
+
+ """
+
+ if "backend" in self.__dict__ and not replace_existing_backend:
+ raise exception.RegionAlreadyConfigured(
+ "This region is already "
+ "configured with backend: %s. "
+ "Specify replace_existing_backend=True to replace."
+ % self.backend)
+
+ try:
+ backend_cls = _backend_loader.load(backend)
+ except PluginLoader.NotFound:
+ raise exception.PluginNotFound(
+ "Couldn't find cache plugin to load: %s" % backend)
+
+ if _config_argument_dict:
+ self.backend = backend_cls.from_config_dict(
+ _config_argument_dict,
+ _config_prefix
+ )
+ else:
+ self.backend = backend_cls(arguments or {})
+
+ if not expiration_time or isinstance(expiration_time, Number):
+ self.expiration_time = expiration_time
+ elif isinstance(expiration_time, datetime.timedelta):
+ self.expiration_time = int(
+ compat.timedelta_total_seconds(expiration_time))
+ else:
+ raise exception.ValidationError(
+ 'expiration_time is not a number or timedelta.')
+
+ if not self._user_defined_key_mangler:
+ self.key_mangler = self.backend.key_mangler
+
+ self._lock_registry = NameRegistry(self._create_mutex)
+
+ if getattr(wrap, '__iter__', False):
+ for wrapper in reversed(wrap):
+ self.wrap(wrapper)
+
+ if region_invalidator:
+ self.region_invalidator = region_invalidator
+
+ return self
+
+ def wrap(self, proxy):
+ ''' Takes a ProxyBackend instance or class and wraps the
+ attached backend. '''
+
+ # if we were passed a type rather than an instance then
+ # initialize it.
+ if type(proxy) == type:
+ proxy = proxy()
+
+ if not issubclass(type(proxy), ProxyBackend):
+ raise TypeError("Type %s is not a valid ProxyBackend"
+ % type(proxy))
+
+ self.backend = proxy.wrap(self.backend)
+
+ def _mutex(self, key):
+ return self._lock_registry.get(key)
+
+ class _LockWrapper(object):
+ """weakref-capable wrapper for threading.Lock"""
+ def __init__(self):
+ self.lock = threading.Lock()
+
+ def acquire(self, wait=True):
+ return self.lock.acquire(wait)
+
+ def release(self):
+ self.lock.release()
+
+ def _create_mutex(self, key):
+ mutex = self.backend.get_mutex(key)
+ if mutex is not None:
+ return mutex
+ else:
+ return self._LockWrapper()
+
+ # cached value
+ _actual_backend = None
+
+ @property
+ def actual_backend(self):
+ """Return the ultimate backend underneath any proxies.
+
+ The backend might be the result of one or more ``proxy.wrap``
+ applications. If so, derive the actual underlying backend.
+
+ .. versionadded:: 0.6.6
+
+ """
+ if self._actual_backend is None:
+ _backend = self.backend
+ while hasattr(_backend, 'proxied'):
+ _backend = _backend.proxied
+ self._actual_backend = _backend
+ return self._actual_backend
+
+ def invalidate(self, hard=True):
+ """Invalidate this :class:`.CacheRegion`.
+
+ The default invalidation system works by setting
+ a current timestamp (using ``time.time()``)
+ representing the "minimum creation time" for
+ a value. Any retrieved value whose creation
+ time is prior to this timestamp
+ is considered to be stale. It does not
+ affect the data in the cache in any way, and is
+ **local to this instance of :class:`.CacheRegion`.**
+
+ .. warning::
+
+ The :meth:`.CacheRegion.invalidate` method's default mode of
+ operation is to set a timestamp **local to this CacheRegion
+ in this Python process only**. It does not impact other Python
+ processes or regions as the timestamp is **only stored locally in
+ memory**. To implement invalidation where the
+ timestamp is stored in the cache or similar so that all Python
+ processes can be affected by an invalidation timestamp, implement a
+ custom :class:`.RegionInvalidationStrategy`.
+
+ Once set, the invalidation time is honored by
+ the :meth:`.CacheRegion.get_or_create`,
+ :meth:`.CacheRegion.get_or_create_multi` and
+ :meth:`.CacheRegion.get` methods.
+
+ The method supports both "hard" and "soft" invalidation
+ options. With "hard" invalidation,
+ :meth:`.CacheRegion.get_or_create` will force an immediate
+ regeneration of the value which all getters will wait for.
+ With "soft" invalidation, subsequent getters will return the
+ "old" value until the new one is available.
+
+ Usage of "soft" invalidation requires that the region or the method
+ is given a non-None expiration time.
+
+ .. versionadded:: 0.3.0
+
+ :param hard: if True, cache values will all require immediate
+ regeneration; dogpile logic won't be used. If False, the
+ creation time of existing values will be pushed back before
+ the expiration time so that a return+regen will be invoked.
+
+ .. versionadded:: 0.5.1
+
+ """
+ self.region_invalidator.invalidate(hard)
+
+ def configure_from_config(self, config_dict, prefix):
+ """Configure from a configuration dictionary
+ and a prefix.
+
+ Example::
+
+ local_region = make_region()
+ memcached_region = make_region()
+
+ # regions are ready to use for function
+ # decorators, but not yet for actual caching
+
+ # later, when config is available
+ myconfig = {
+ "cache.local.backend":"dogpile.cache.dbm",
+ "cache.local.arguments.filename":"/path/to/dbmfile.dbm",
+ "cache.memcached.backend":"dogpile.cache.pylibmc",
+ "cache.memcached.arguments.url":"127.0.0.1, 10.0.0.1",
+ }
+ local_region.configure_from_config(myconfig, "cache.local.")
+ memcached_region.configure_from_config(myconfig,
+ "cache.memcached.")
+
+ """
+ config_dict = coerce_string_conf(config_dict)
+ return self.configure(
+ config_dict["%sbackend" % prefix],
+ expiration_time=config_dict.get(
+ "%sexpiration_time" % prefix, None),
+ _config_argument_dict=config_dict,
+ _config_prefix="%sarguments." % prefix,
+ wrap=config_dict.get(
+ "%swrap" % prefix, None),
+ replace_existing_backend=config_dict.get(
+ "%sreplace_existing_backend" % prefix, False),
+ )
+
+ @memoized_property
+ def backend(self):
+ raise exception.RegionNotConfigured(
+ "No backend is configured on this region.")
+
+ @property
+ def is_configured(self):
+ """Return True if the backend has been configured via the
+ :meth:`.CacheRegion.configure` method already.
+
+ .. versionadded:: 0.5.1
+
+ """
+ return 'backend' in self.__dict__
+
+ def get(self, key, expiration_time=None, ignore_expiration=False):
+ """Return a value from the cache, based on the given key.
+
+ If the value is not present, the method returns the token
+ ``NO_VALUE``. ``NO_VALUE`` evaluates to False, but is separate from
+ ``None`` to distinguish between a cached value of ``None``.
+
+ By default, the configured expiration time of the
+ :class:`.CacheRegion`, or alternatively the expiration
+ time supplied by the ``expiration_time`` argument,
+ is tested against the creation time of the retrieved
+ value versus the current time (as reported by ``time.time()``).
+ If stale, the cached value is ignored and the ``NO_VALUE``
+ token is returned. Passing the flag ``ignore_expiration=True``
+ bypasses the expiration time check.
+
+ .. versionchanged:: 0.3.0
+ :meth:`.CacheRegion.get` now checks the value's creation time
+ against the expiration time, rather than returning
+ the value unconditionally.
+
+ The method also interprets the cached value in terms
+ of the current "invalidation" time as set by
+ the :meth:`.invalidate` method. If a value is present,
+ but its creation time is older than the current
+ invalidation time, the ``NO_VALUE`` token is returned.
+ Passing the flag ``ignore_expiration=True`` bypasses
+ the invalidation time check.
+
+ .. versionadded:: 0.3.0
+ Support for the :meth:`.CacheRegion.invalidate`
+ method.
+
+ :param key: Key to be retrieved. While it's typical for a key to be a
+ string, it is ultimately passed directly down to the cache backend,
+ before being optionally processed by the key_mangler function, so can
+ be of any type recognized by the backend or by the key_mangler
+ function, if present.
+
+ :param expiration_time: Optional expiration time value
+ which will supersede that configured on the :class:`.CacheRegion`
+ itself.
+
+ .. versionadded:: 0.3.0
+
+ :param ignore_expiration: if ``True``, the value is returned
+ from the cache if present, regardless of configured
+ expiration times or whether or not :meth:`.invalidate`
+ was called.
+
+ .. versionadded:: 0.3.0
+
+ """
+
+ if self.key_mangler:
+ key = self.key_mangler(key)
+ value = self.backend.get(key)
+ value = self._unexpired_value_fn(
+ expiration_time, ignore_expiration)(value)
+
+ return value.payload
+
+ def _unexpired_value_fn(self, expiration_time, ignore_expiration):
+ if ignore_expiration:
+ return lambda value: value
+ else:
+ if expiration_time is None:
+ expiration_time = self.expiration_time
+
+ current_time = time.time()
+
+ def value_fn(value):
+ if value is NO_VALUE:
+ return value
+ elif expiration_time is not None and \
+ current_time - value.metadata["ct"] > expiration_time:
+ return NO_VALUE
+ elif self.region_invalidator.is_invalidated(
+ value.metadata["ct"]):
+ return NO_VALUE
+ else:
+ return value
+
+ return value_fn
+
+ def get_multi(self, keys, expiration_time=None, ignore_expiration=False):
+ """Return multiple values from the cache, based on the given keys.
+
+ Returns values as a list matching the keys given.
+
+ E.g.::
+
+ values = region.get_multi(["one", "two", "three"])
+
+ To convert values to a dictionary, use ``zip()``::
+
+ keys = ["one", "two", "three"]
+ values = region.get_multi(keys)
+ dictionary = dict(zip(keys, values))
+
+ Keys which aren't present in the list are returned as
+ the ``NO_VALUE`` token. ``NO_VALUE`` evaluates to False,
+ but is separate from
+ ``None`` to distinguish between a cached value of ``None``.
+
+ By default, the configured expiration time of the
+ :class:`.CacheRegion`, or alternatively the expiration
+ time supplied by the ``expiration_time`` argument,
+ is tested against the creation time of the retrieved
+ value versus the current time (as reported by ``time.time()``).
+ If stale, the cached value is ignored and the ``NO_VALUE``
+ token is returned. Passing the flag ``ignore_expiration=True``
+ bypasses the expiration time check.
+
+ .. versionadded:: 0.5.0
+
+ """
+ if not keys:
+ return []
+
+ if self.key_mangler:
+ keys = list(map(lambda key: self.key_mangler(key), keys))
+
+ backend_values = self.backend.get_multi(keys)
+
+ _unexpired_value_fn = self._unexpired_value_fn(
+ expiration_time, ignore_expiration)
+ return [
+ value.payload if value is not NO_VALUE else value
+ for value in
+ (
+ _unexpired_value_fn(value) for value in
+ backend_values
+ )
+ ]
+
+ def get_or_create(
+ self, key, creator, expiration_time=None, should_cache_fn=None):
+ """Return a cached value based on the given key.
+
+ If the value does not exist or is considered to be expired
+ based on its creation time, the given
+ creation function may or may not be used to recreate the value
+ and persist the newly generated value in the cache.
+
+ Whether or not the function is used depends on if the
+ *dogpile lock* can be acquired or not. If it can't, it means
+ a different thread or process is already running a creation
+ function for this key against the cache. When the dogpile
+ lock cannot be acquired, the method will block if no
+ previous value is available, until the lock is released and
+ a new value available. If a previous value
+ is available, that value is returned immediately without blocking.
+
+ If the :meth:`.invalidate` method has been called, and
+ the retrieved value's timestamp is older than the invalidation
+ timestamp, the value is unconditionally prevented from
+ being returned. The method will attempt to acquire the dogpile
+ lock to generate a new value, or will wait
+ until the lock is released to return the new value.
+
+ .. versionchanged:: 0.3.0
+ The value is unconditionally regenerated if the creation
+ time is older than the last call to :meth:`.invalidate`.
+
+ :param key: Key to be retrieved. While it's typical for a key to be a
+ string, it is ultimately passed directly down to the cache backend,
+ before being optionally processed by the key_mangler function, so can
+ be of any type recognized by the backend or by the key_mangler
+ function, if present.
+
+ :param creator: function which creates a new value.
+
+ :param expiration_time: optional expiration time which will overide
+ the expiration time already configured on this :class:`.CacheRegion`
+ if not None. To set no expiration, use the value -1.
+
+ :param should_cache_fn: optional callable function which will receive
+ the value returned by the "creator", and will then return True or
+ False, indicating if the value should actually be cached or not. If
+ it returns False, the value is still returned, but isn't cached.
+ E.g.::
+
+ def dont_cache_none(value):
+ return value is not None
+
+ value = region.get_or_create("some key",
+ create_value,
+ should_cache_fn=dont_cache_none)
+
+ Above, the function returns the value of create_value() if
+ the cache is invalid, however if the return value is None,
+ it won't be cached.
+
+ .. versionadded:: 0.4.3
+
+ .. seealso::
+
+ :meth:`.CacheRegion.cache_on_arguments` - applies
+ :meth:`.get_or_create` to any function using a decorator.
+
+ :meth:`.CacheRegion.get_or_create_multi` - multiple key/value
+ version
+
+ """
+ orig_key = key
+ if self.key_mangler:
+ key = self.key_mangler(key)
+
+ def get_value():
+ value = self.backend.get(key)
+ if (value is NO_VALUE or value.metadata['v'] != value_version or
+ self.region_invalidator.is_hard_invalidated(
+ value.metadata["ct"])):
+ raise NeedRegenerationException()
+ ct = value.metadata["ct"]
+ if self.region_invalidator.is_soft_invalidated(ct):
+ ct = time.time() - expiration_time - .0001
+
+ return value.payload, ct
+
+ def gen_value():
+ created_value = creator()
+ value = self._value(created_value)
+
+ if not should_cache_fn or \
+ should_cache_fn(created_value):
+ self.backend.set(key, value)
+
+ return value.payload, value.metadata["ct"]
+
+ if expiration_time is None:
+ expiration_time = self.expiration_time
+
+ if (expiration_time is None and
+ self.region_invalidator.was_soft_invalidated()):
+ raise exception.DogpileCacheException(
+ "Non-None expiration time required "
+ "for soft invalidation")
+
+ if expiration_time == -1:
+ expiration_time = None
+
+ if self.async_creation_runner:
+ def async_creator(mutex):
+ return self.async_creation_runner(
+ self, orig_key, creator, mutex)
+ else:
+ async_creator = None
+
+ with Lock(
+ self._mutex(key),
+ gen_value,
+ get_value,
+ expiration_time,
+ async_creator) as value:
+ return value
+
+ def get_or_create_multi(
+ self, keys, creator, expiration_time=None, should_cache_fn=None):
+ """Return a sequence of cached values based on a sequence of keys.
+
+ The behavior for generation of values based on keys corresponds
+ to that of :meth:`.Region.get_or_create`, with the exception that
+ the ``creator()`` function may be asked to generate any subset of
+ the given keys. The list of keys to be generated is passed to
+ ``creator()``, and ``creator()`` should return the generated values
+ as a sequence corresponding to the order of the keys.
+
+ The method uses the same approach as :meth:`.Region.get_multi`
+ and :meth:`.Region.set_multi` to get and set values from the
+ backend.
+
+ If you are using a :class:`.CacheBackend` or :class:`.ProxyBackend`
+ that modifies values, take note this function invokes
+ ``.set_multi()`` for newly generated values using the same values it
+ returns to the calling function. A correct implementation of
+ ``.set_multi()`` will not modify values in-place on the submitted
+ ``mapping`` dict.
+
+ :param keys: Sequence of keys to be retrieved.
+
+ :param creator: function which accepts a sequence of keys and
+ returns a sequence of new values.
+
+ :param expiration_time: optional expiration time which will overide
+ the expiration time already configured on this :class:`.CacheRegion`
+ if not None. To set no expiration, use the value -1.
+
+ :param should_cache_fn: optional callable function which will receive
+ each value returned by the "creator", and will then return True or
+ False, indicating if the value should actually be cached or not. If
+ it returns False, the value is still returned, but isn't cached.
+
+ .. versionadded:: 0.5.0
+
+ .. seealso::
+
+
+ :meth:`.CacheRegion.cache_multi_on_arguments`
+
+ :meth:`.CacheRegion.get_or_create`
+
+ """
+
+ def get_value(key):
+ value = values.get(key, NO_VALUE)
+
+ if (value is NO_VALUE or value.metadata['v'] != value_version or
+ self.region_invalidator.is_hard_invalidated(
+ value.metadata['ct'])):
+ # dogpile.core understands a 0 here as
+ # "the value is not available", e.g.
+ # _has_value() will return False.
+ return value.payload, 0
+ else:
+ ct = value.metadata["ct"]
+ if self.region_invalidator.is_soft_invalidated(ct):
+ ct = time.time() - expiration_time - .0001
+
+ return value.payload, ct
+
+ def gen_value():
+ raise NotImplementedError()
+
+ def async_creator(key, mutex):
+ mutexes[key] = mutex
+
+ if expiration_time is None:
+ expiration_time = self.expiration_time
+
+ if (expiration_time is None and
+ self.region_invalidator.was_soft_invalidated()):
+ raise exception.DogpileCacheException(
+ "Non-None expiration time required "
+ "for soft invalidation")
+
+ if expiration_time == -1:
+ expiration_time = None
+
+ mutexes = {}
+
+ sorted_unique_keys = sorted(set(keys))
+
+ if self.key_mangler:
+ mangled_keys = [self.key_mangler(k) for k in sorted_unique_keys]
+ else:
+ mangled_keys = sorted_unique_keys
+
+ orig_to_mangled = dict(zip(sorted_unique_keys, mangled_keys))
+
+ values = dict(zip(mangled_keys, self.backend.get_multi(mangled_keys)))
+
+ for orig_key, mangled_key in orig_to_mangled.items():
+ with Lock(
+ self._mutex(mangled_key),
+ gen_value,
+ lambda: get_value(mangled_key),
+ expiration_time,
+ async_creator=lambda mutex: async_creator(orig_key, mutex)
+ ):
+ pass
+ try:
+ if mutexes:
+ # sort the keys, the idea is to prevent deadlocks.
+ # though haven't been able to simulate one anyway.
+ keys_to_get = sorted(mutexes)
+ new_values = creator(*keys_to_get)
+
+ values_w_created = dict(
+ (orig_to_mangled[k], self._value(v))
+ for k, v in zip(keys_to_get, new_values)
+ )
+
+ if not should_cache_fn:
+ self.backend.set_multi(values_w_created)
+ else:
+ values_to_cache = dict(
+ (k, v)
+ for k, v in values_w_created.items()
+ if should_cache_fn(v[0])
+ )
+
+ if values_to_cache:
+ self.backend.set_multi(values_to_cache)
+
+ values.update(values_w_created)
+ return [values[orig_to_mangled[k]].payload for k in keys]
+ finally:
+ for mutex in mutexes.values():
+ mutex.release()
+
+ def _value(self, value):
+ """Return a :class:`.CachedValue` given a value."""
+ return CachedValue(
+ value,
+ {
+ "ct": time.time(),
+ "v": value_version
+ })
+
+ def set(self, key, value):
+ """Place a new value in the cache under the given key."""
+
+ if self.key_mangler:
+ key = self.key_mangler(key)
+ self.backend.set(key, self._value(value))
+
+ def set_multi(self, mapping):
+ """Place new values in the cache under the given keys.
+
+ .. versionadded:: 0.5.0
+
+ """
+ if not mapping:
+ return
+
+ if self.key_mangler:
+ mapping = dict((
+ self.key_mangler(k), self._value(v))
+ for k, v in mapping.items())
+ else:
+ mapping = dict((k, self._value(v)) for k, v in mapping.items())
+ self.backend.set_multi(mapping)
+
+ def delete(self, key):
+ """Remove a value from the cache.
+
+ This operation is idempotent (can be called multiple times, or on a
+ non-existent key, safely)
+ """
+
+ if self.key_mangler:
+ key = self.key_mangler(key)
+
+ self.backend.delete(key)
+
+ def delete_multi(self, keys):
+ """Remove multiple values from the cache.
+
+ This operation is idempotent (can be called multiple times, or on a
+ non-existent key, safely)
+
+ .. versionadded:: 0.5.0
+
+ """
+
+ if self.key_mangler:
+ keys = list(map(lambda key: self.key_mangler(key), keys))
+
+ self.backend.delete_multi(keys)
+
+ def cache_on_arguments(
+ self, namespace=None,
+ expiration_time=None,
+ should_cache_fn=None,
+ to_str=compat.string_type,
+ function_key_generator=None):
+ """A function decorator that will cache the return
+ value of the function using a key derived from the
+ function itself and its arguments.
+
+ The decorator internally makes use of the
+ :meth:`.CacheRegion.get_or_create` method to access the
+ cache and conditionally call the function. See that
+ method for additional behavioral details.
+
+ E.g.::
+
+ @someregion.cache_on_arguments()
+ def generate_something(x, y):
+ return somedatabase.query(x, y)
+
+ The decorated function can then be called normally, where
+ data will be pulled from the cache region unless a new
+ value is needed::
+
+ result = generate_something(5, 6)
+
+ The function is also given an attribute ``invalidate()``, which
+ provides for invalidation of the value. Pass to ``invalidate()``
+ the same arguments you'd pass to the function itself to represent
+ a particular value::
+
+ generate_something.invalidate(5, 6)
+
+ Another attribute ``set()`` is added to provide extra caching
+ possibilities relative to the function. This is a convenience
+ method for :meth:`.CacheRegion.set` which will store a given
+ value directly without calling the decorated function.
+ The value to be cached is passed as the first argument, and the
+ arguments which would normally be passed to the function
+ should follow::
+
+ generate_something.set(3, 5, 6)
+
+ The above example is equivalent to calling
+ ``generate_something(5, 6)``, if the function were to produce
+ the value ``3`` as the value to be cached.
+
+ .. versionadded:: 0.4.1 Added ``set()`` method to decorated function.
+
+ Similar to ``set()`` is ``refresh()``. This attribute will
+ invoke the decorated function and populate a new value into
+ the cache with the new value, as well as returning that value::
+
+ newvalue = generate_something.refresh(5, 6)
+
+ .. versionadded:: 0.5.0 Added ``refresh()`` method to decorated
+ function.
+
+ ``original()`` on other hand will invoke the decorated function
+ without any caching::
+
+ newvalue = generate_something.original(5, 6)
+
+ .. versionadded:: 0.6.0 Added ``original()`` method to decorated
+ function.
+
+ Lastly, the ``get()`` method returns either the value cached
+ for the given key, or the token ``NO_VALUE`` if no such key
+ exists::
+
+ value = generate_something.get(5, 6)
+
+ .. versionadded:: 0.5.3 Added ``get()`` method to decorated
+ function.
+
+ The default key generation will use the name
+ of the function, the module name for the function,
+ the arguments passed, as well as an optional "namespace"
+ parameter in order to generate a cache key.
+
+ Given a function ``one`` inside the module
+ ``myapp.tools``::
+
+ @region.cache_on_arguments(namespace="foo")
+ def one(a, b):
+ return a + b
+
+ Above, calling ``one(3, 4)`` will produce a
+ cache key as follows::
+
+ myapp.tools:one|foo|3 4
+
+ The key generator will ignore an initial argument
+ of ``self`` or ``cls``, making the decorator suitable
+ (with caveats) for use with instance or class methods.
+ Given the example::
+
+ class MyClass(object):
+ @region.cache_on_arguments(namespace="foo")
+ def one(self, a, b):
+ return a + b
+
+ The cache key above for ``MyClass().one(3, 4)`` will
+ again produce the same cache key of ``myapp.tools:one|foo|3 4`` -
+ the name ``self`` is skipped.
+
+ The ``namespace`` parameter is optional, and is used
+ normally to disambiguate two functions of the same
+ name within the same module, as can occur when decorating
+ instance or class methods as below::
+
+ class MyClass(object):
+ @region.cache_on_arguments(namespace='MC')
+ def somemethod(self, x, y):
+ ""
+
+ class MyOtherClass(object):
+ @region.cache_on_arguments(namespace='MOC')
+ def somemethod(self, x, y):
+ ""
+
+ Above, the ``namespace`` parameter disambiguates
+ between ``somemethod`` on ``MyClass`` and ``MyOtherClass``.
+ Python class declaration mechanics otherwise prevent
+ the decorator from having awareness of the ``MyClass``
+ and ``MyOtherClass`` names, as the function is received
+ by the decorator before it becomes an instance method.
+
+ The function key generation can be entirely replaced
+ on a per-region basis using the ``function_key_generator``
+ argument present on :func:`.make_region` and
+ :class:`.CacheRegion`. If defaults to
+ :func:`.function_key_generator`.
+
+ :param namespace: optional string argument which will be
+ established as part of the cache key. This may be needed
+ to disambiguate functions of the same name within the same
+ source file, such as those
+ associated with classes - note that the decorator itself
+ can't see the parent class on a function as the class is
+ being declared.
+
+ :param expiration_time: if not None, will override the normal
+ expiration time.
+
+ May be specified as a callable, taking no arguments, that
+ returns a value to be used as the ``expiration_time``. This callable
+ will be called whenever the decorated function itself is called, in
+ caching or retrieving. Thus, this can be used to
+ determine a *dynamic* expiration time for the cached function
+ result. Example use cases include "cache the result until the
+ end of the day, week or time period" and "cache until a certain date
+ or time passes".
+
+ .. versionchanged:: 0.5.0
+ ``expiration_time`` may be passed as a callable to
+ :meth:`.CacheRegion.cache_on_arguments`.
+
+ :param should_cache_fn: passed to :meth:`.CacheRegion.get_or_create`.
+
+ .. versionadded:: 0.4.3
+
+ :param to_str: callable, will be called on each function argument
+ in order to convert to a string. Defaults to ``str()``. If the
+ function accepts non-ascii unicode arguments on Python 2.x, the
+ ``unicode()`` builtin can be substituted, but note this will
+ produce unicode cache keys which may require key mangling before
+ reaching the cache.
+
+ .. versionadded:: 0.5.0
+
+ :param function_key_generator: a function that will produce a
+ "cache key". This function will supersede the one configured on the
+ :class:`.CacheRegion` itself.
+
+ .. versionadded:: 0.5.5
+
+ .. seealso::
+
+ :meth:`.CacheRegion.cache_multi_on_arguments`
+
+ :meth:`.CacheRegion.get_or_create`
+
+ """
+ expiration_time_is_callable = compat.callable(expiration_time)
+
+ if function_key_generator is None:
+ function_key_generator = self.function_key_generator
+
+ def decorator(fn):
+ if to_str is compat.string_type:
+ # backwards compatible
+ key_generator = function_key_generator(namespace, fn)
+ else:
+ key_generator = function_key_generator(
+ namespace, fn,
+ to_str=to_str)
+
+ @wraps(fn)
+ def decorate(*arg, **kw):
+ key = key_generator(*arg, **kw)
+
+ @wraps(fn)
+ def creator():
+ return fn(*arg, **kw)
+ timeout = expiration_time() if expiration_time_is_callable \
+ else expiration_time
+ return self.get_or_create(key, creator, timeout,
+ should_cache_fn)
+
+ def invalidate(*arg, **kw):
+ key = key_generator(*arg, **kw)
+ self.delete(key)
+
+ def set_(value, *arg, **kw):
+ key = key_generator(*arg, **kw)
+ self.set(key, value)
+
+ def get(*arg, **kw):
+ key = key_generator(*arg, **kw)
+ return self.get(key)
+
+ def refresh(*arg, **kw):
+ key = key_generator(*arg, **kw)
+ value = fn(*arg, **kw)
+ self.set(key, value)
+ return value
+
+ decorate.set = set_
+ decorate.invalidate = invalidate
+ decorate.refresh = refresh
+ decorate.get = get
+ decorate.original = fn
+
+ return decorate
+ return decorator
+
+ def cache_multi_on_arguments(
+ self, namespace=None, expiration_time=None,
+ should_cache_fn=None,
+ asdict=False, to_str=compat.string_type,
+ function_multi_key_generator=None):
+ """A function decorator that will cache multiple return
+ values from the function using a sequence of keys derived from the
+ function itself and the arguments passed to it.
+
+ This method is the "multiple key" analogue to the
+ :meth:`.CacheRegion.cache_on_arguments` method.
+
+ Example::
+
+ @someregion.cache_multi_on_arguments()
+ def generate_something(*keys):
+ return [
+ somedatabase.query(key)
+ for key in keys
+ ]
+
+ The decorated function can be called normally. The decorator
+ will produce a list of cache keys using a mechanism similar to
+ that of :meth:`.CacheRegion.cache_on_arguments`, combining the
+ name of the function with the optional namespace and with the
+ string form of each key. It will then consult the cache using
+ the same mechanism as that of :meth:`.CacheRegion.get_multi`
+ to retrieve all current values; the originally passed keys
+ corresponding to those values which aren't generated or need
+ regeneration will be assembled into a new argument list, and
+ the decorated function is then called with that subset of
+ arguments.
+
+ The returned result is a list::
+
+ result = generate_something("key1", "key2", "key3")
+
+ The decorator internally makes use of the
+ :meth:`.CacheRegion.get_or_create_multi` method to access the
+ cache and conditionally call the function. See that
+ method for additional behavioral details.
+
+ Unlike the :meth:`.CacheRegion.cache_on_arguments` method,
+ :meth:`.CacheRegion.cache_multi_on_arguments` works only with
+ a single function signature, one which takes a simple list of
+ keys as arguments.
+
+ Like :meth:`.CacheRegion.cache_on_arguments`, the decorated function
+ is also provided with a ``set()`` method, which here accepts a
+ mapping of keys and values to set in the cache::
+
+ generate_something.set({"k1": "value1",
+ "k2": "value2", "k3": "value3"})
+
+ ...an ``invalidate()`` method, which has the effect of deleting
+ the given sequence of keys using the same mechanism as that of
+ :meth:`.CacheRegion.delete_multi`::
+
+ generate_something.invalidate("k1", "k2", "k3")
+
+ ...a ``refresh()`` method, which will call the creation
+ function, cache the new values, and return them::
+
+ values = generate_something.refresh("k1", "k2", "k3")
+
+ ...and a ``get()`` method, which will return values
+ based on the given arguments::
+
+ values = generate_something.get("k1", "k2", "k3")
+
+ .. versionadded:: 0.5.3 Added ``get()`` method to decorated
+ function.
+
+ Parameters passed to :meth:`.CacheRegion.cache_multi_on_arguments`
+ have the same meaning as those passed to
+ :meth:`.CacheRegion.cache_on_arguments`.
+
+ :param namespace: optional string argument which will be
+ established as part of each cache key.
+
+ :param expiration_time: if not None, will override the normal
+ expiration time. May be passed as an integer or a
+ callable.
+
+ :param should_cache_fn: passed to
+ :meth:`.CacheRegion.get_or_create_multi`. This function is given a
+ value as returned by the creator, and only if it returns True will
+ that value be placed in the cache.
+
+ :param asdict: if ``True``, the decorated function should return
+ its result as a dictionary of keys->values, and the final result
+ of calling the decorated function will also be a dictionary.
+ If left at its default value of ``False``, the decorated function
+ should return its result as a list of values, and the final
+ result of calling the decorated function will also be a list.
+
+ When ``asdict==True`` if the dictionary returned by the decorated
+ function is missing keys, those keys will not be cached.
+
+ :param to_str: callable, will be called on each function argument
+ in order to convert to a string. Defaults to ``str()``. If the
+ function accepts non-ascii unicode arguments on Python 2.x, the
+ ``unicode()`` builtin can be substituted, but note this will
+ produce unicode cache keys which may require key mangling before
+ reaching the cache.
+
+ .. versionadded:: 0.5.0
+
+ :param function_multi_key_generator: a function that will produce a
+ list of keys. This function will supersede the one configured on the
+ :class:`.CacheRegion` itself.
+
+ .. versionadded:: 0.5.5
+
+ .. seealso::
+
+ :meth:`.CacheRegion.cache_on_arguments`
+
+ :meth:`.CacheRegion.get_or_create_multi`
+
+ """
+ expiration_time_is_callable = compat.callable(expiration_time)
+
+ if function_multi_key_generator is None:
+ function_multi_key_generator = self.function_multi_key_generator
+
+ def decorator(fn):
+ key_generator = function_multi_key_generator(
+ namespace, fn,
+ to_str=to_str)
+
+ @wraps(fn)
+ def decorate(*arg, **kw):
+ cache_keys = arg
+ keys = key_generator(*arg, **kw)
+ key_lookup = dict(zip(keys, cache_keys))
+
+ @wraps(fn)
+ def creator(*keys_to_create):
+ return fn(*[key_lookup[k] for k in keys_to_create])
+
+ timeout = expiration_time() if expiration_time_is_callable \
+ else expiration_time
+
+ if asdict:
+ def dict_create(*keys):
+ d_values = creator(*keys)
+ return [
+ d_values.get(key_lookup[k], NO_VALUE)
+ for k in keys]
+
+ def wrap_cache_fn(value):
+ if value is NO_VALUE:
+ return False
+ elif not should_cache_fn:
+ return True
+ else:
+ return should_cache_fn(value)
+
+ result = self.get_or_create_multi(
+ keys, dict_create, timeout, wrap_cache_fn)
+ result = dict(
+ (k, v) for k, v in zip(cache_keys, result)
+ if v is not NO_VALUE)
+ else:
+ result = self.get_or_create_multi(
+ keys, creator, timeout,
+ should_cache_fn)
+
+ return result
+
+ def invalidate(*arg):
+ keys = key_generator(*arg)
+ self.delete_multi(keys)
+
+ def set_(mapping):
+ keys = list(mapping)
+ gen_keys = key_generator(*keys)
+ self.set_multi(dict(
+ (gen_key, mapping[key])
+ for gen_key, key
+ in zip(gen_keys, keys))
+ )
+
+ def get(*arg):
+ keys = key_generator(*arg)
+ return self.get_multi(keys)
+
+ def refresh(*arg):
+ keys = key_generator(*arg)
+ values = fn(*arg)
+ if asdict:
+ self.set_multi(
+ dict(zip(keys, [values[a] for a in arg]))
+ )
+ return values
+ else:
+ self.set_multi(
+ dict(zip(keys, values))
+ )
+ return values
+
+ decorate.set = set_
+ decorate.invalidate = invalidate
+ decorate.refresh = refresh
+ decorate.get = get
+
+ return decorate
+ return decorator
+
+
+def make_region(*arg, **kw):
+ """Instantiate a new :class:`.CacheRegion`.
+
+ Currently, :func:`.make_region` is a passthrough
+ to :class:`.CacheRegion`. See that class for
+ constructor arguments.
+
+ """
+ return CacheRegion(*arg, **kw)
diff --git a/libs/dogpile/cache/util.py b/libs/dogpile/cache/util.py
new file mode 100644
index 000000000..4b56f6429
--- /dev/null
+++ b/libs/dogpile/cache/util.py
@@ -0,0 +1,146 @@
+from hashlib import sha1
+import inspect
+from ..util import compat
+from ..util import langhelpers
+
+
+def function_key_generator(namespace, fn, to_str=compat.string_type):
+ """Return a function that generates a string
+ key, based on a given function as well as
+ arguments to the returned function itself.
+
+ This is used by :meth:`.CacheRegion.cache_on_arguments`
+ to generate a cache key from a decorated function.
+
+ An alternate function may be used by specifying
+ the :paramref:`.CacheRegion.function_key_generator` argument
+ for :class:`.CacheRegion`.
+
+ .. seealso::
+
+ :func:`.kwarg_function_key_generator` - similar function that also
+ takes keyword arguments into account
+
+ """
+
+ if namespace is None:
+ namespace = '%s:%s' % (fn.__module__, fn.__name__)
+ else:
+ namespace = '%s:%s|%s' % (fn.__module__, fn.__name__, namespace)
+
+ args = inspect.getargspec(fn)
+ has_self = args[0] and args[0][0] in ('self', 'cls')
+
+ def generate_key(*args, **kw):
+ if kw:
+ raise ValueError(
+ "dogpile.cache's default key creation "
+ "function does not accept keyword arguments.")
+ if has_self:
+ args = args[1:]
+
+ return namespace + "|" + " ".join(map(to_str, args))
+ return generate_key
+
+
+def function_multi_key_generator(namespace, fn, to_str=compat.string_type):
+
+ if namespace is None:
+ namespace = '%s:%s' % (fn.__module__, fn.__name__)
+ else:
+ namespace = '%s:%s|%s' % (fn.__module__, fn.__name__, namespace)
+
+ args = inspect.getargspec(fn)
+ has_self = args[0] and args[0][0] in ('self', 'cls')
+
+ def generate_keys(*args, **kw):
+ if kw:
+ raise ValueError(
+ "dogpile.cache's default key creation "
+ "function does not accept keyword arguments.")
+ if has_self:
+ args = args[1:]
+ return [namespace + "|" + key for key in map(to_str, args)]
+ return generate_keys
+
+
+def kwarg_function_key_generator(namespace, fn, to_str=compat.string_type):
+ """Return a function that generates a string
+ key, based on a given function as well as
+ arguments to the returned function itself.
+
+ For kwargs passed in, we will build a dict of
+ all argname (key) argvalue (values) including
+ default args from the argspec and then
+ alphabetize the list before generating the
+ key.
+
+ .. versionadded:: 0.6.2
+
+ .. seealso::
+
+ :func:`.function_key_generator` - default key generation function
+
+ """
+
+ if namespace is None:
+ namespace = '%s:%s' % (fn.__module__, fn.__name__)
+ else:
+ namespace = '%s:%s|%s' % (fn.__module__, fn.__name__, namespace)
+
+ argspec = inspect.getargspec(fn)
+ default_list = list(argspec.defaults or [])
+ # Reverse the list, as we want to compare the argspec by negative index,
+ # meaning default_list[0] should be args[-1], which works well with
+ # enumerate()
+ default_list.reverse()
+ # use idx*-1 to create the correct right-lookup index.
+ args_with_defaults = dict((argspec.args[(idx*-1)], default)
+ for idx, default in enumerate(default_list, 1))
+ if argspec.args and argspec.args[0] in ('self', 'cls'):
+ arg_index_start = 1
+ else:
+ arg_index_start = 0
+
+ def generate_key(*args, **kwargs):
+ as_kwargs = dict(
+ [(argspec.args[idx], arg)
+ for idx, arg in enumerate(args[arg_index_start:],
+ arg_index_start)])
+ as_kwargs.update(kwargs)
+ for arg, val in args_with_defaults.items():
+ if arg not in as_kwargs:
+ as_kwargs[arg] = val
+
+ argument_values = [as_kwargs[key]
+ for key in sorted(as_kwargs.keys())]
+ return namespace + '|' + " ".join(map(to_str, argument_values))
+ return generate_key
+
+
+def sha1_mangle_key(key):
+ """a SHA1 key mangler."""
+
+ return sha1(key).hexdigest()
+
+
+def length_conditional_mangler(length, mangler):
+ """a key mangler that mangles if the length of the key is
+ past a certain threshold.
+
+ """
+ def mangle(key):
+ if len(key) >= length:
+ return mangler(key)
+ else:
+ return key
+ return mangle
+
+# in the 0.6 release these functions were moved to the dogpile.util namespace.
+# They are linked here to maintain compatibility with older versions.
+
+coerce_string_conf = langhelpers.coerce_string_conf
+KeyReentrantMutex = langhelpers.KeyReentrantMutex
+memoized_property = langhelpers.memoized_property
+PluginLoader = langhelpers.PluginLoader
+to_list = langhelpers.to_list
diff --git a/libs/dogpile/core.py b/libs/dogpile/core.py
new file mode 100644
index 000000000..2bcfaf813
--- /dev/null
+++ b/libs/dogpile/core.py
@@ -0,0 +1,17 @@
+"""Compatibility namespace for those using dogpile.core.
+
+As of dogpile.cache 0.6.0, dogpile.core as a separate package
+is no longer used by dogpile.cache.
+
+Note that this namespace will not take effect if an actual
+dogpile.core installation is present.
+
+"""
+
+from .util import nameregistry # noqa
+from .util import readwrite_lock # noqa
+from .util.readwrite_lock import ReadWriteMutex # noqa
+from .util.nameregistry import NameRegistry # noqa
+from .lock import Lock # noqa
+from .lock import NeedRegenerationException # noqa
+from . import __version__ # noqa
diff --git a/libs/dogpile/lock.py b/libs/dogpile/lock.py
new file mode 100644
index 000000000..2ac22dcfe
--- /dev/null
+++ b/libs/dogpile/lock.py
@@ -0,0 +1,189 @@
+import time
+import logging
+
+log = logging.getLogger(__name__)
+
+
+class NeedRegenerationException(Exception):
+ """An exception that when raised in the 'with' block,
+ forces the 'has_value' flag to False and incurs a
+ regeneration of the value.
+
+ """
+
+NOT_REGENERATED = object()
+
+
+class Lock(object):
+ """Dogpile lock class.
+
+ Provides an interface around an arbitrary mutex
+ that allows one thread/process to be elected as
+ the creator of a new value, while other threads/processes
+ continue to return the previous version
+ of that value.
+
+ :param mutex: A mutex object that provides ``acquire()``
+ and ``release()`` methods.
+ :param creator: Callable which returns a tuple of the form
+ (new_value, creation_time). "new_value" should be a newly
+ generated value representing completed state. "creation_time"
+ should be a floating point time value which is relative
+ to Python's ``time.time()`` call, representing the time
+ at which the value was created. This time value should
+ be associated with the created value.
+ :param value_and_created_fn: Callable which returns
+ a tuple of the form (existing_value, creation_time). This
+ basically should return what the last local call to the ``creator()``
+ callable has returned, i.e. the value and the creation time,
+ which would be assumed here to be from a cache. If the
+ value is not available, the :class:`.NeedRegenerationException`
+ exception should be thrown.
+ :param expiretime: Expiration time in seconds. Set to
+ ``None`` for never expires. This timestamp is compared
+ to the creation_time result and ``time.time()`` to determine if
+ the value returned by value_and_created_fn is "expired".
+ :param async_creator: A callable. If specified, this callable will be
+ passed the mutex as an argument and is responsible for releasing the mutex
+ after it finishes some asynchronous value creation. The intent is for
+ this to be used to defer invocation of the creator callable until some
+ later time.
+
+ """
+
+ def __init__(
+ self,
+ mutex,
+ creator,
+ value_and_created_fn,
+ expiretime,
+ async_creator=None,
+ ):
+ self.mutex = mutex
+ self.creator = creator
+ self.value_and_created_fn = value_and_created_fn
+ self.expiretime = expiretime
+ self.async_creator = async_creator
+
+ def _is_expired(self, createdtime):
+ """Return true if the expiration time is reached, or no
+ value is available."""
+
+ return not self._has_value(createdtime) or (
+ self.expiretime is not None and
+ time.time() - createdtime > self.expiretime
+ )
+
+ def _has_value(self, createdtime):
+ """Return true if the creation function has proceeded
+ at least once."""
+ return createdtime > 0
+
+ def _enter(self):
+ value_fn = self.value_and_created_fn
+
+ try:
+ value = value_fn()
+ value, createdtime = value
+ except NeedRegenerationException:
+ log.debug("NeedRegenerationException")
+ value = NOT_REGENERATED
+ createdtime = -1
+
+ generated = self._enter_create(value, createdtime)
+
+ if generated is not NOT_REGENERATED:
+ generated, createdtime = generated
+ return generated
+ elif value is NOT_REGENERATED:
+ # we called upon the creator, and it said that it
+ # didn't regenerate. this typically means another
+ # thread is running the creation function, and that the
+ # cache should still have a value. However,
+ # we don't have a value at all, which is unusual since we just
+ # checked for it, so check again (TODO: is this a real codepath?)
+ try:
+ value, createdtime = value_fn()
+ return value
+ except NeedRegenerationException:
+ raise Exception(
+ "Generation function should "
+ "have just been called by a concurrent "
+ "thread.")
+ else:
+ return value
+
+ def _enter_create(self, value, createdtime):
+ if not self._is_expired(createdtime):
+ return NOT_REGENERATED
+
+ _async = False
+
+ if self._has_value(createdtime):
+ has_value = True
+ if not self.mutex.acquire(False):
+ log.debug(
+ "creation function in progress "
+ "elsewhere, returning")
+ return NOT_REGENERATED
+ else:
+ has_value = False
+ log.debug("no value, waiting for create lock")
+ self.mutex.acquire()
+
+ try:
+ log.debug("value creation lock %r acquired" % self.mutex)
+
+ if not has_value:
+ # we entered without a value, or at least with "creationtime ==
+ # 0". Run the "getter" function again, to see if another
+ # thread has already generated the value while we waited on the
+ # mutex, or if the caller is otherwise telling us there is a
+ # value already which allows us to use async regeneration. (the
+ # latter is used by the multi-key routine).
+ try:
+ value, createdtime = self.value_and_created_fn()
+ except NeedRegenerationException:
+ # nope, nobody created the value, we're it.
+ # we must create it right now
+ pass
+ else:
+ has_value = True
+ # caller is telling us there is a value and that we can
+ # use async creation if it is expired.
+ if not self._is_expired(createdtime):
+ # it's not expired, return it
+ log.debug("Concurrent thread created the value")
+ return value, createdtime
+
+ # otherwise it's expired, call creator again
+
+ if has_value and self.async_creator:
+ # we have a value we can return, safe to use async_creator
+ log.debug("Passing creation lock to async runner")
+
+ # so...run it!
+ self.async_creator(self.mutex)
+ _async = True
+
+ # and return the expired value for now
+ return value, createdtime
+
+ # it's expired, and it's our turn to create it synchronously, *or*,
+ # there's no value at all, and we have to create it synchronously
+ log.debug(
+ "Calling creation function for %s value",
+ "not-yet-present" if not has_value else
+ "previously expired"
+ )
+ return self.creator()
+ finally:
+ if not _async:
+ self.mutex.release()
+ log.debug("Released creation lock")
+
+ def __enter__(self):
+ return self._enter()
+
+ def __exit__(self, type, value, traceback):
+ pass
diff --git a/libs/dogpile/util/__init__.py b/libs/dogpile/util/__init__.py
new file mode 100644
index 000000000..91b075207
--- /dev/null
+++ b/libs/dogpile/util/__init__.py
@@ -0,0 +1,4 @@
+from .nameregistry import NameRegistry # noqa
+from .readwrite_lock import ReadWriteMutex # noqa
+from .langhelpers import PluginLoader, memoized_property, \
+ coerce_string_conf, to_list, KeyReentrantMutex # noqa
diff --git a/libs/dogpile/util/compat.py b/libs/dogpile/util/compat.py
new file mode 100644
index 000000000..d29bb1dac
--- /dev/null
+++ b/libs/dogpile/util/compat.py
@@ -0,0 +1,65 @@
+import sys
+
+py2k = sys.version_info < (3, 0)
+py3k = sys.version_info >= (3, 0)
+py32 = sys.version_info >= (3, 2)
+py27 = sys.version_info >= (2, 7)
+jython = sys.platform.startswith('java')
+win32 = sys.platform.startswith('win')
+
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading # noqa
+
+
+if py3k: # pragma: no cover
+ string_types = str,
+ text_type = str
+ string_type = str
+
+ if py32:
+ callable = callable
+ else:
+ def callable(fn):
+ return hasattr(fn, '__call__')
+
+ def u(s):
+ return s
+
+ def ue(s):
+ return s
+
+ import configparser
+ import io
+ import _thread as thread
+else:
+ string_types = basestring,
+ text_type = unicode
+ string_type = str
+
+ def u(s):
+ return unicode(s, "utf-8")
+
+ def ue(s):
+ return unicode(s, "unicode_escape")
+
+ import ConfigParser as configparser # noqa
+ import StringIO as io # noqa
+
+ callable = callable # noqa
+ import thread # noqa
+
+
+if py3k or jython:
+ import pickle
+else:
+ import cPickle as pickle # noqa
+
+
+def timedelta_total_seconds(td):
+ if py27:
+ return td.total_seconds()
+ else:
+ return (td.microseconds + (
+ td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
diff --git a/libs/dogpile/util/langhelpers.py b/libs/dogpile/util/langhelpers.py
new file mode 100644
index 000000000..4ff8e3e3e
--- /dev/null
+++ b/libs/dogpile/util/langhelpers.py
@@ -0,0 +1,123 @@
+import re
+import collections
+from . import compat
+
+
+def coerce_string_conf(d):
+ result = {}
+ for k, v in d.items():
+ if not isinstance(v, compat.string_types):
+ result[k] = v
+ continue
+
+ v = v.strip()
+ if re.match(r'^[-+]?\d+$', v):
+ result[k] = int(v)
+ elif re.match(r'^[-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?$', v):
+ result[k] = float(v)
+ elif v.lower() in ('false', 'true'):
+ result[k] = v.lower() == 'true'
+ elif v == 'None':
+ result[k] = None
+ else:
+ result[k] = v
+ return result
+
+
+class PluginLoader(object):
+ def __init__(self, group):
+ self.group = group
+ self.impls = {}
+
+ def load(self, name):
+ if name in self.impls:
+ return self.impls[name]()
+ else: # pragma NO COVERAGE
+ import pkg_resources
+ for impl in pkg_resources.iter_entry_points(
+ self.group, name):
+ self.impls[name] = impl.load
+ return impl.load()
+ else:
+ raise self.NotFound(
+ "Can't load plugin %s %s" % (self.group, name)
+ )
+
+ def register(self, name, modulepath, objname):
+ def load():
+ mod = __import__(modulepath, fromlist=[objname])
+ return getattr(mod, objname)
+ self.impls[name] = load
+
+ class NotFound(Exception):
+ """The specified plugin could not be found."""
+
+
+class memoized_property(object):
+ """A read-only @property that is only evaluated once."""
+ def __init__(self, fget, doc=None):
+ self.fget = fget
+ self.__doc__ = doc or fget.__doc__
+ self.__name__ = fget.__name__
+
+ def __get__(self, obj, cls):
+ if obj is None:
+ return self
+ obj.__dict__[self.__name__] = result = self.fget(obj)
+ return result
+
+
+def to_list(x, default=None):
+ """Coerce to a list."""
+ if x is None:
+ return default
+ if not isinstance(x, (list, tuple)):
+ return [x]
+ else:
+ return x
+
+
+class KeyReentrantMutex(object):
+
+ def __init__(self, key, mutex, keys):
+ self.key = key
+ self.mutex = mutex
+ self.keys = keys
+
+ @classmethod
+ def factory(cls, mutex):
+ # this collection holds zero or one
+ # thread idents as the key; a set of
+ # keynames held as the value.
+ keystore = collections.defaultdict(set)
+
+ def fac(key):
+ return KeyReentrantMutex(key, mutex, keystore)
+ return fac
+
+ def acquire(self, wait=True):
+ current_thread = compat.threading.current_thread().ident
+ keys = self.keys.get(current_thread)
+ if keys is not None and \
+ self.key not in keys:
+ # current lockholder, new key. add it in
+ keys.add(self.key)
+ return True
+ elif self.mutex.acquire(wait=wait):
+ # after acquire, create new set and add our key
+ self.keys[current_thread].add(self.key)
+ return True
+ else:
+ return False
+
+ def release(self):
+ current_thread = compat.threading.current_thread().ident
+ keys = self.keys.get(current_thread)
+ assert keys is not None, "this thread didn't do the acquire"
+ assert self.key in keys, "No acquire held for key '%s'" % self.key
+ keys.remove(self.key)
+ if not keys:
+ # when list of keys empty, remove
+ # the thread ident and unlock.
+ del self.keys[current_thread]
+ self.mutex.release()
diff --git a/libs/dogpile/util/nameregistry.py b/libs/dogpile/util/nameregistry.py
new file mode 100644
index 000000000..a5102b238
--- /dev/null
+++ b/libs/dogpile/util/nameregistry.py
@@ -0,0 +1,84 @@
+from .compat import threading
+import weakref
+
+
+class NameRegistry(object):
+ """Generates and return an object, keeping it as a
+ singleton for a certain identifier for as long as its
+ strongly referenced.
+
+ e.g.::
+
+ class MyFoo(object):
+ "some important object."
+ def __init__(self, identifier):
+ self.identifier = identifier
+
+ registry = NameRegistry(MyFoo)
+
+ # thread 1:
+ my_foo = registry.get("foo1")
+
+ # thread 2
+ my_foo = registry.get("foo1")
+
+ Above, ``my_foo`` in both thread #1 and #2 will
+ be *the same object*. The constructor for
+ ``MyFoo`` will be called once, passing the
+ identifier ``foo1`` as the argument.
+
+ When thread 1 and thread 2 both complete or
+ otherwise delete references to ``my_foo``, the
+ object is *removed* from the :class:`.NameRegistry` as
+ a result of Python garbage collection.
+
+ :param creator: A function that will create a new
+ value, given the identifier passed to the :meth:`.NameRegistry.get`
+ method.
+
+ """
+ _locks = weakref.WeakValueDictionary()
+ _mutex = threading.RLock()
+
+ def __init__(self, creator):
+ """Create a new :class:`.NameRegistry`.
+
+
+ """
+ self._values = weakref.WeakValueDictionary()
+ self._mutex = threading.RLock()
+ self.creator = creator
+
+ def get(self, identifier, *args, **kw):
+ """Get and possibly create the value.
+
+ :param identifier: Hash key for the value.
+ If the creation function is called, this identifier
+ will also be passed to the creation function.
+ :param \*args, \**kw: Additional arguments which will
+ also be passed to the creation function if it is
+ called.
+
+ """
+ try:
+ if identifier in self._values:
+ return self._values[identifier]
+ else:
+ return self._sync_get(identifier, *args, **kw)
+ except KeyError:
+ return self._sync_get(identifier, *args, **kw)
+
+ def _sync_get(self, identifier, *args, **kw):
+ self._mutex.acquire()
+ try:
+ try:
+ if identifier in self._values:
+ return self._values[identifier]
+ else:
+ self._values[identifier] = value = self.creator(identifier, *args, **kw)
+ return value
+ except KeyError:
+ self._values[identifier] = value = self.creator(identifier, *args, **kw)
+ return value
+ finally:
+ self._mutex.release()
diff --git a/libs/dogpile/util/readwrite_lock.py b/libs/dogpile/util/readwrite_lock.py
new file mode 100644
index 000000000..9b953edb8
--- /dev/null
+++ b/libs/dogpile/util/readwrite_lock.py
@@ -0,0 +1,132 @@
+from .compat import threading
+
+import logging
+log = logging.getLogger(__name__)
+
+
+class LockError(Exception):
+ pass
+
+
+class ReadWriteMutex(object):
+ """A mutex which allows multiple readers, single writer.
+
+ :class:`.ReadWriteMutex` uses a Python ``threading.Condition``
+ to provide this functionality across threads within a process.
+
+ The Beaker package also contained a file-lock based version
+ of this concept, so that readers/writers could be synchronized
+ across processes with a common filesystem. A future Dogpile
+ release may include this additional class at some point.
+
+ """
+
+ def __init__(self):
+ # counts how many asynchronous methods are executing
+ self.async_ = 0
+
+ # pointer to thread that is the current sync operation
+ self.current_sync_operation = None
+
+ # condition object to lock on
+ self.condition = threading.Condition(threading.Lock())
+
+ def acquire_read_lock(self, wait=True):
+ """Acquire the 'read' lock."""
+ self.condition.acquire()
+ try:
+ # see if a synchronous operation is waiting to start
+ # or is already running, in which case we wait (or just
+ # give up and return)
+ if wait:
+ while self.current_sync_operation is not None:
+ self.condition.wait()
+ else:
+ if self.current_sync_operation is not None:
+ return False
+
+ self.async_ += 1
+ log.debug("%s acquired read lock", self)
+ finally:
+ self.condition.release()
+
+ if not wait:
+ return True
+
+ def release_read_lock(self):
+ """Release the 'read' lock."""
+ self.condition.acquire()
+ try:
+ self.async_ -= 1
+
+ # check if we are the last asynchronous reader thread
+ # out the door.
+ if self.async_ == 0:
+ # yes. so if a sync operation is waiting, notifyAll to wake
+ # it up
+ if self.current_sync_operation is not None:
+ self.condition.notifyAll()
+ elif self.async_ < 0:
+ raise LockError("Synchronizer error - too many "
+ "release_read_locks called")
+ log.debug("%s released read lock", self)
+ finally:
+ self.condition.release()
+
+ def acquire_write_lock(self, wait=True):
+ """Acquire the 'write' lock."""
+ self.condition.acquire()
+ try:
+ # here, we are not a synchronous reader, and after returning,
+ # assuming waiting or immediate availability, we will be.
+
+ if wait:
+ # if another sync is working, wait
+ while self.current_sync_operation is not None:
+ self.condition.wait()
+ else:
+ # if another sync is working,
+ # we dont want to wait, so forget it
+ if self.current_sync_operation is not None:
+ return False
+
+ # establish ourselves as the current sync
+ # this indicates to other read/write operations
+ # that they should wait until this is None again
+ self.current_sync_operation = threading.currentThread()
+
+ # now wait again for asyncs to finish
+ if self.async_ > 0:
+ if wait:
+ # wait
+ self.condition.wait()
+ else:
+ # we dont want to wait, so forget it
+ self.current_sync_operation = None
+ return False
+ log.debug("%s acquired write lock", self)
+ finally:
+ self.condition.release()
+
+ if not wait:
+ return True
+
+ def release_write_lock(self):
+ """Release the 'write' lock."""
+ self.condition.acquire()
+ try:
+ if self.current_sync_operation is not threading.currentThread():
+ raise LockError("Synchronizer error - current thread doesn't "
+ "have the write lock")
+
+ # reset the current sync operation so
+ # another can get it
+ self.current_sync_operation = None
+
+ # tell everyone to get ready
+ self.condition.notifyAll()
+
+ log.debug("%s released write lock", self)
+ finally:
+ # everyone go !!
+ self.condition.release()