def test_acquire_fail_on_conflict(self): key = "lock" duration = 60 other_cluster = RedisLockBackend(self.cluster) other_cluster.acquire(key, duration) with pytest.raises(Exception): self.backend.acquire(key, duration)
def test_acquire_fail_on_conflict(self): key = 'lock' duration = 60 other_cluster = RedisLockBackend(self.cluster) other_cluster.acquire(key, duration) with pytest.raises(Exception): self.backend.acquire(key, duration)
def __init__(self, **options): self.cluster, options = get_cluster_from_options('SENTRY_DIGESTS_OPTIONS', options) self.locks = LockManager(RedisLockBackend(self.cluster)) self.namespace = options.pop('namespace', 'd') # Sets the time-to-live (in seconds) for records, timelines, and # digests. This can (and should) be a relatively high value, since # timelines, digests, and records should all be deleted after they have # been processed -- this is mainly to ensure stale data doesn't hang # around too long in the case of a configuration error. This should be # larger than the maximum scheduling delay to ensure data is not evicted # too early. self.ttl = options.pop('ttl', 60 * 60) super(RedisBackend, self).__init__(**options)
""" from __future__ import absolute_import from threading import local from raven.contrib.django.models import client from sentry.utils import redis from sentry.utils.locking.backends.redis import RedisLockBackend from sentry.utils.locking.manager import LockManager class State(local): request = None data = {} env = State() # COMPAT from sentry import search, tsdb # NOQA from .buffer import backend as buffer # NOQA from .digests import backend as digests # NOQA from .nodestore import backend as nodestore # NOQA from .quotas import backend as quotas # NOQA from .ratelimits import backend as ratelimiter # NOQA raven = client locks = LockManager(RedisLockBackend(redis.clusters.get('default')))
def backend(self): return RedisLockBackend(self.cluster)