Ejemplo n.º 1
0
def test_redis_raise_or_lock_locked_and_expired(redis, backend):
    lock = RedisLock(redis, "test", timeout=1)
    lock.acquire()
    time.sleep(1)  # wait for lock to expire

    backend.raise_or_lock(key="test", timeout=60)
    assert redis.get("test") is not None
Ejemplo n.º 2
0
class LockManager():

    def __init__(self, lock_name, lock_timeout=10):
        self.lock_name = lock_name
        self.lock_timeout = lock_timeout
        self.is_lock_free = False

    def __enter__(self):
        self.lock = Lock(
            redis_client,
            self.lock_name,
            blocking_timeout=1,
            timeout=self.lock_timeout,
        )
        try:
            self.is_lock_free = self.lock.acquire(blocking=False)
        except LockError:
            log.error(
                'lock acquire error',
                extra={'data': {'lock_name': self.lock_name}},
                exc_info=True,
            )
        return self

    def __exit__(self, type, value, traceback):
        if self.is_lock_free:
            try:
                self.lock.release()
            except LockError:
                log.error(
                    'lock release error',
                    extra={'data': {'lock_name': self.lock_name}},
                    exc_info=True,
                )
Ejemplo n.º 3
0
 def acquire(self, lox_name, lock_id, expires_seconds=None):
     key = self.key(lox_name, lock_id)
     lock = Lock(self.connection, key, timeout=expires_seconds)
     # retry logic is handled in core.lock, so no blocking here
     if lock.acquire(blocking=False):
         return BackendLock(key, lox_name, lock_id, provider_lock=lock)
     else:
         raise LockInUseException("Lock {} has been acquired previously, possibly by another thread/process, and is not available.".format(key))
Ejemplo n.º 4
0
def test_delay_expired(redis):
    lock = RedisLock(redis, "qo_example_a-1", timeout=1)
    lock.acquire()

    assert redis.get("qo_example_a-1") is not None

    time.sleep(1)
    example.delay(1)

    assert redis.get("qo_example_a-1") is None
Ejemplo n.º 5
0
def test_redis_raise_or_lock_locked(redis, backend):
    # Set to expire in 30 seconds!
    lock = RedisLock(redis, "test", timeout=30)
    lock.acquire()

    with pytest.raises(AlreadyQueued) as e:
        backend.raise_or_lock(key="test", timeout=60)

    assert e.value.countdown == 30.0
    assert e.value.message == "Expires in 30.0 seconds"
Ejemplo n.º 6
0
 def acquire(self, lox_name, lock_id, expires_seconds=None):
     key = self.key(lox_name, lock_id)
     lock = Lock(self.connection, key, timeout=expires_seconds)
     # retry logic is handled in core.lock, so no blocking here
     if lock.acquire(blocking=False):
         return BackendLock(key, lox_name, lock_id, provider_lock=lock)
     else:
         raise LockInUseException(
             "Lock {} has been acquired previously, possibly by another thread/process, and is not available."
             .format(key))
Ejemplo n.º 7
0
def test_apply_async_expired(redis):
    lock = RedisLock(redis, "qo_example_a-1", timeout=1)
    lock.acquire()

    assert redis.get("qo_example_a-1") is not None

    time.sleep(1)
    example.apply_async(args=(1, ))

    assert redis.get("qo_example_a-1") is None
Ejemplo n.º 8
0
 def __init__(self, ns: str, name: str, expiration: Union[int, float],
              redis_backend: RedisBackend) -> None:
     super().__init__(ns, name)
     self.__redis = redis_backend.get_redis_connection(ns)
     with _map_to_sdl_exception():
         redis_lockname = '{' + ns + '},' + self._lock_name
         self.__redis_lock = Lock(redis=self.__redis,
                                  name=redis_lockname,
                                  timeout=expiration)
         self._register_scripts()
Ejemplo n.º 9
0
 def __enter__(self):
     self.lock = Lock(
         redis_client,
         self.lock_name,
         blocking_timeout=1,
         timeout=self.lock_timeout,
     )
     try:
         self.is_lock_free = self.lock.acquire(blocking=False)
     except LockError:
         log.error(
             'lock acquire error',
             extra={'data': {'lock_name': self.lock_name}},
             exc_info=True,
         )
     return self
def aggregate_day_after_hour_aggregated(sender, left_boundary, right_boundary,
                                        **kwargs):
    aggregate_queryset = HourAggregate.objects.filter(
        timestamp__gte=left_boundary, timestamp__lt=right_boundary)

    aggregate_queryset.query.clear_ordering(force_empty=True)
    aggregate_queryset = aggregate_queryset.values(
        'admin_id', 'instance_id', 'instance_name',
        'source').annotate(value=Sum('value'))
    day_date = datetime(left_boundary.year,
                        left_boundary.month,
                        left_boundary.day,
                        tzinfo=pytz.utc)

    with Lock(redis=redis,
              name='day_aggregation_{}'.format(day_date.isoformat())):
        for aggregate_dict in aggregate_queryset.iterator():
            filter_data = {
                'timestamp': day_date,
                'instance_name': aggregate_dict.get('instance_name'),
                'source': aggregate_dict.get('source'),
                'admin_id': aggregate_dict.get('admin_id')
            }
            if not DayAggregate.objects.filter(**filter_data).update(
                    value=F('value') + aggregate_dict.get('value')):
                DayAggregate.objects.create(timestamp=day_date,
                                            **aggregate_dict)
def get_hash_cache(name, key, ex=None, none_callback=None, *args, **kwargs):
    """
    name          : redis hash name
    key           : redis 键名
    ex            : 超期时间 (单位:秒)
    none_callback : 返回None值,调用回调函数
    args          : 回调参数
    kwags         : 回调参数
    """
    cli = RedisPool().connection()
    with Lock(cli, _dist_lock_key(name), timeout=20):
        v = cli.hget(name, key)
        if v is None and none_callback:
            if DEBUG_CACHE:
                print "no redis cache hit. %s." % key
            v = none_callback(*args, **kwargs)
            try:
                nv = JSONDataEncoder.encode(v)
            except:
                nv = v
            cli.hset(name, key, nv)
            _set_expire_time(cli, name, ex)
        else:
            if DEBUG_CACHE:
                print "cache redis hit. %s." % key
            try:
                v = JSONDataEncoder.decode(v)
            except:
                pass
    return v
def get_hash_map_cache(name,
                       keys,
                       ex=None,
                       none_callback=None,
                       *args,
                       **kwargs):
    """
    name          : name
    keys          : key list
    ex            : 超期时间 (单位:秒)
    none_callback : 返回None值,调用回调函数
    args          : 回调参数
    kwags         : 回调参数
    """
    cli = RedisPool().connection()
    with Lock(cli, _dist_lock_key(name), timeout=20):
        v = cli.hmget(name, keys)
        if v is None and none_callback:
            if DEBUG_CACHE:
                print "no redis cache hit. %s." % keys
            v = none_callback(*args, **kwargs)
            cli.hmset(name, v)
            _set_expire_time(cli, name, ex)

        if v and (isinstance(keys, str) or
                  (isinstance(keys, list) and len(keys) == 1)):
            v = v[-1]
    return v
Ejemplo n.º 13
0
    def create_lock(cls, id, r_obj=None):
        import config
        from redis.lock import LuaLock as Lock

        redis_key = 'lock_%s_%s' % (cls.__name__, id)
        return Lock(r_obj or rds_tmp, redis_key,
                    sleep=config.REDIS_LOCK_SLEEP,
                    timeout=config.REDIS_LOCK_TIMEOUT)
def refresh_redis_cache(name, ex):
    """
        name          : name
        ex            : 超期时间 (单位:秒)
    """
    cli = RedisPool().connection()
    with Lock(cli, _dist_lock_key(name), timeout=20):
        _set_expire_time(cli, name, ex)
Ejemplo n.º 15
0
    def finalize(self):
        """
        Call before you generate a dataset
        This caches all the guids in the db at time of calling to a list
        creating a data structure which allows for fast access by the dataset object
        and fixing the length of the the dataset
        """

        lockname = self.key('lock')
        lk = Lock(self.redis, lockname)
        #logger.debug(f'getting lock {lockname}')
        lk.acquire()
        #logger.debug(f'getting lock {lockname}')
        self.redis.set(self.key('finalized'), 'FINALIZED')
        if lk.owned():
            lk.release()
            #logger.debug(f'released lock {lockname}')

        self.episodes = []
        self.episode_len = []
        for episode in range(self.num_episodes()):
            self.episodes.append(self.redis.lindex(self.key('episodes'), episode).decode())

        self.episodes = sorted(self.episodes)

        for episode_id in self.episodes:
            self.episode_len.append(len(Episode(self, self.redis, episode_id)))

        self.episode_off = []
        offset = 0
        for l in self.episode_len:
            self.episode_off.append(offset)
            offset += l

        self.finalized = True
Ejemplo n.º 16
0
 def __init__(self, unique_id=uuid4(), redis_server=None, *args, **kwargs):
     self._unique_id = unique_id
     self._redis_server = redis_server or StrictRedis()
     refresh_lock = Lock(redis=self._redis_server,
                         name='{0}_lock'.format(self._unique_id))
     super(RedisManagedOAuth2Mixin,
           self).__init__(*args, refresh_lock=refresh_lock, **kwargs)
     if self._access_token is None:
         self._get_and_update_current_tokens()
Ejemplo n.º 17
0
def redis_lock_from_event(event):
    from django_redis import get_redis_connection
    from redis.lock import Lock

    if not hasattr(event, '_lock') or not event._lock:
        rc = get_redis_connection("redis")
        event._lock = Lock(redis=rc,
                           name='pretix_event_%s' % event.id,
                           timeout=LOCK_TIMEOUT)
    return event._lock
Ejemplo n.º 18
0
def release_lock(lock_name, token):
    """
    Release a lock

    Args:
        lock_name (str): The lock key in redis
        token (bytes): The unique id used

    Returns:
        bool: True if the lock was successfully released
    """
    # this is a StrictRedis instance, we need this for the script installation that LuaLock uses
    redis = caches['redis'].client.get_client()
    lock = LuaLock(redis, lock_name)
    try:
        lock.do_release(token)
    except LockError:
        # If the lock is expired we don't want to raise an error
        pass
def set_hash_map_cache(name, mapping, ex=None):
    """
    name          : name
    mapping       : mapping dict
    ex            : 超期时间 (单位:秒)
    """
    cli = RedisPool().connection()
    with Lock(cli, _dist_lock_key(name), timeout=20):
        if DEBUG_CACHE:
            print "set redis hash map. %s %s" % (name, ex)
        cli.hmset(name, mapping)
        _set_expire_time(cli, name, ex)
    return True
Ejemplo n.º 20
0
    def greenlet_scheduler(self):

        redis_scheduler_lock_key = "%s:schedulerlock" % get_current_config(
        )["redis_prefix"]
        while True:
            with Lock(connections.redis,
                      redis_scheduler_lock_key,
                      timeout=self.config["scheduler_interval"] + 10,
                      blocking=False,
                      thread_local=False):
                self.scheduler.check()

            time.sleep(self.config["scheduler_interval"])
Ejemplo n.º 21
0
class RedisLock:
    '''
    An awaitable redis Lock. The event loop won't be blocked when acquiring the lock.
    '''
    def __init__(self, redi, name: str = "", sleep: int = 0.02, timeout:int = 5, blocking_timeout: int = 10, thread_local: bool = False):
        self._lock = RLock(redi, name, sleep=sleep, timeout=timeout, blocking_timeout=blocking_timeout, thread_local=thread_local)

    async def __aenter__(self):
        loop = asyncio.get_event_loop()
        await loop.run_in_executor(None, self._lock.acquire)
        return self

    async def __aexit__(self, *args):
        self._lock.release()

    async def acquire(self):
        '''Acquire the lock without locking the loop'''
        loop = asyncio.get_event_loop()
        return await loop.run_in_executor(None, self._lock.acquire)

    def release(self):
        '''Release the lock, this is not async for the sake of easier cleanup (e.g. registering `atexit`)'''
        self._lock.release()
Ejemplo n.º 22
0
 def wrapper(*args, **kwargs):
     init_transaction()
     with Lock(redis_lock, get_trans_lock_name(), timeout=5) as look:
         try:
             begin_transaction()
             result = func(*args, **kwargs)
             end_transaction()
             close_transaction(get_transaction_id())
             return result
         except Exception as e:
             rollback(get_transaction_id())
             raise e
         finally:
             release_transaction()
def set_cache(key, value, ex=None):
    """
    key           : redis 键名
    value         : redis 键值
    ex            : 超期时间 (单位:秒)
    """
    cli = RedisPool().connection()
    with Lock(cli, _dist_lock_key(key), timeout=20):
        if DEBUG_CACHE:
            print "set redis cache. %s %s" % (key, ex)
        try:
            v = JSONDataEncoder.encode(value)
        except:
            v = value
        cli.set(key, v, ex)
    return True
Ejemplo n.º 24
0
def redis_lock(name: str, **kwargs) -> Generator:
    """
    Acquire a Redis lock. This is a wrapper around redis.lock.Lock(), that also works in tests (there, the lock is
    always granted without any checks).

    Relevant kwargs are:
     - blocking_timeout: how many seconds to try to acquire the lock. Use 0 for a non-blocking lock.
       The default is None, which means we wait forever.
     - timeout: how many seconds to keep the lock for. The default is None, which means it remains locked forever.

    Raises redis.exceptions.LockError if the lock couldn't be acquired or released.
    """
    if settings.DJTRIGGERS_REDIS_URL.startswith('redis'):  # pragma: no cover
        with Lock(redis=Redis.from_url(settings.DJTRIGGERS_REDIS_URL), name=name, **kwargs):
            yield
    else:
        yield
Ejemplo n.º 25
0
    def raise_or_lock(self, key, timeout):
        """
        Checks if the task is locked and raises an exception, else locks
        the task. By default, the tasks and the key expire after 60 minutes.
        (meaning it will not be executed and the lock will clear).
        """
        acquired = Lock(self.redis,
                        key,
                        timeout=timeout,
                        blocking=self.blocking,
                        blocking_timeout=self.blocking_timeout).acquire()

        if not acquired:
            # Time remaining in milliseconds
            # https://redis.io/commands/pttl
            ttl = self.redis.pttl(key)
            raise AlreadyQueued(ttl / 1000.)
Ejemplo n.º 26
0
    def end(self):
        if self.p is not None:
            self.p.execute()

        lockname = self.rollout.key('lock')
        lk = Lock(self.redis, lockname)
        #logger.debug(f'getting lock {lockname}')
        lk.acquire()
        #logger.debug(f'got lock {lockname}')

        if not self.redis.exists(self.rollout.key('finalized')):
            self.redis.lpush(self.rollout.key('episodes'), self.id)
            self.redis.incrby(self.rollout.key('steps'), len(self))

        if lk.owned():
            lk.release()
def set_hash_cache(name, key, value, ex=None):
    """
    name          : redis hash name
    key           : redis 键名
    value         : redis 键值
    ex            : 超期时间 (单位:秒)
    """
    cli = RedisPool().connection()
    with Lock(cli, _dist_lock_key(name), timeout=20):
        if DEBUG_CACHE:
            print "set redis hash cache. %s %s" % (key, ex)
        try:
            v = JSONDataEncoder.encode(value)
        except:
            v = value
        cli.hset(name, key, v)
        _set_expire_time(cli, name, ex)
    return True
Ejemplo n.º 28
0
    def __init__(self,
                 persist_mode=False,
                 key_prefix='',
                 min_cache_time=5,
                 force_cache_time=False,
                 base='store',
                 path='cache',
                 redis_host='localhost',
                 redis_port=6379,
                 redis_db=1,
                 redis_file=None):
        self.__key_prefix = key_prefix
        self.__cache_key = '{}:cache'.format(key_prefix)
        self.__persist_mode = persist_mode
        self.__min_cache_time = min_cache_time
        self.__force_cache_time = force_cache_time
        self.__base_path = base
        self.__resource_cache = get_triple_store()
        self._r = get_kv(persist_mode,
                         redis_host,
                         redis_port,
                         redis_db,
                         redis_file,
                         base=base,
                         path=path)
        self.__lock = Lock(self._r, key_prefix)
        self.__mlock = TLock()
        self.__memory_graphs = {}
        self.__memory_order = []

        self.__resources_ts = {}

        # Clean temporal folders under 'base' (others than 'path' subfolder)
        for sub in filter(lambda x: x != path,
                          get_immediate_subdirectories(base)):
            shutil.rmtree('{}/{}'.format(self.__base_path, sub))

        for lock_key in self._r.keys('{}:l*'.format(self.__key_prefix)):
            self._r.delete(lock_key)

        self.__enabled = True
        self.__purge_th = Thread(target=self.__purge)
        self.__purge_th.daemon = True
        self.__purge_th.start()
Ejemplo n.º 29
0
    def __init__(self,
                 persist_mode=None,
                 key_prefix='',
                 min_cache_time=5,
                 force_cache_time=False,
                 base='store',
                 path='cache',
                 redis_host='localhost',
                 redis_port=6379,
                 redis_db=1,
                 redis_file=None,
                 graph_memory_limit=5000):
        self.__key_prefix = key_prefix
        self.__cache_key = '{}:cache'.format(key_prefix)
        self.__persist_mode = persist_mode
        self.__min_cache_time = min_cache_time
        self.__force_cache_time = force_cache_time
        self.__base_path = base
        self._r = get_kv(persist_mode,
                         redis_host,
                         redis_port,
                         redis_db,
                         redis_file,
                         base=base,
                         path=path)
        self.__lock = Lock(self._r, key_prefix)
        self.__mlock = TLock()
        self.__graph_memory_limit = graph_memory_limit
        self.__memory_graphs = {}
        self.__memory_order = []

        self.__resources_ts = {}

        for lock_key in self._r.keys('{}:l*'.format(self.__key_prefix)):
            self._r.delete(lock_key)

        self._r.delete(key_prefix)

        self.__enabled = True
        self.__purge_th = Thread(target=self.__purge)
        self.__purge_th.daemon = True
        self.__purge_th.start()
Ejemplo n.º 30
0
 def __init__(self, unique_id=uuid4(), redis_server=None, *args, **kwargs):
     """
     :param unique_id:
         An identifier for this auth object. Auth instances which wish to share tokens must use the same ID.
     :type unique_id:
         `unicode`
     :param redis_server:
         An instance of a Redis server, configured to talk to Redis.
     :type redis_server:
         :class:`Redis`
     """
     # pylint:disable=keyword-arg-before-vararg
     self._unique_id = unique_id
     self._redis_server = redis_server or StrictRedis()
     refresh_lock = Lock(redis=self._redis_server,
                         name='{0}_lock'.format(self._unique_id))
     super(RedisManagedOAuth2Mixin,
           self).__init__(*args, refresh_lock=refresh_lock, **kwargs)
     if self._access_token is None:
         self._get_and_update_current_tokens()
Ejemplo n.º 31
0
def _send_smses(send_deferred=False, backend=None, limit=None):
    # Get lock so there is only one sms sender at the same time.
    if send_deferred:
        send_lock_name = 'smsgateway_send_sms_deferred'
    else:
        send_lock_name = 'smsgateway_send_sms'

    with Lock(redis=Redis.from_url(settings.SMSGATEWAY_REDIS_URL),
              name='smsgateway-' + send_lock_name,
              blocking_timeout=0):
        successes, failures = 0, 0
        try:
            # Get SMSes that need to be sent (deferred or non-deferred)
            if send_deferred:
                to_send = QueuedSMS.objects.filter(priority=PRIORITY_DEFERRED)
            else:
                to_send = QueuedSMS.objects.exclude(priority=PRIORITY_DEFERRED)

            if isinstance(limit, int):
                to_send = to_send[:limit]

            # Send each SMS
            for sms in to_send:
                if backend:
                    sms_using = backend
                else:
                    sms_using = None if sms.using == '__none__' else sms.using
                if send(sms.to, sms.content, sms.signature, sms_using,
                        sms.reliable):
                    # Successfully sent, remove from queue
                    sms.delete()
                    successes += 1
                else:
                    # Failed to send, defer SMS
                    sms.defer()
                    failures += 1
        finally:
            if successes and failures:
                statsd.gauge('smsgateway.success_rate', successes / failures)
            else:
                statsd.gauge('smsgateway.success_rate', 1)