async def unset_lock(self, resource, lock_identifier): """ Unlock this instance :param resource: redis key to set :param lock_identifier: uniquie id of lock :raises: LockError if the lock resource acquired with different lock_identifier """ try: with await self.connect() as redis: await redis.evalsha(self.unset_lock_script_sha1, keys=[resource], args=[lock_identifier]) except aioredis.errors.ReplyError as exc: # script fault self.log.debug('Can not unset lock "%s" on %s', resource, repr(self)) raise LockError('Can not unset lock') from exc except (aioredis.errors.RedisError, OSError) as exc: self.log.error('Can not unset lock "%s" on %s: %s', resource, repr(self), repr(exc)) raise LockError('Can not set lock') from exc except asyncio.CancelledError: self.log.debug('Lock "%s" unset is cancelled on %s', resource, repr(self)) raise except Exception as exc: self.log.exception('Can not unset lock "%s" on %s', resource, repr(self)) raise else: self.log.debug('Lock "%s" is unset on %s', resource, repr(self))
async def set_lock(self, resource, lock_identifier, lock_timeout): """ Lock this instance and set lock expiration time to lock_timeout :param resource: redis key to set :param lock_identifier: uniquie id of lock :param lock_timeout: timeout for lock in seconds :raises: LockError if lock is not acquired """ lock_timeout_ms = int(lock_timeout * 1000) try: with await self.connect() as redis: await redis.evalsha(self.set_lock_script_sha1, keys=[resource], args=[lock_identifier, lock_timeout_ms]) except aioredis.errors.ReplyError as exc: # script fault self.log.debug('Can not set lock "%s" on %s', resource, repr(self)) raise LockError('Can not set lock') from exc except (aioredis.errors.RedisError, OSError) as exc: self.log.error('Can not set lock "%s" on %s: %s', resource, repr(self), repr(exc)) raise LockError('Can not set lock') from exc except asyncio.CancelledError: self.log.debug('Lock "%s" is cancelled on %s', resource, repr(self)) raise except Exception as exc: self.log.exception('Can not set lock "%s" on %s', resource, repr(self)) raise else: self.log.debug('Lock "%s" is set on %s', resource, repr(self))
async def unset_lock(self, resource, lock_identifier): """ Tries to unset the lock to all the redis instances :param resource: The resource string name to lock :param lock_identifier: The id of the lock. A unique string :return float: The elapsed time that took to lock the instances in iseconds :raises: LockError if the lock has not matching identifier in more then (N/2 - 1) instances """ start_time = time.time() successes = await asyncio.gather( *[i.unset_lock(resource, lock_identifier) for i in self.instances], return_exceptions=True) successful_remvoes = sum(s is None for s in successes) elapsed_time = time.time() - start_time unlocked = True if successful_remvoes >= int(len(self.instances) / 2) + 1 else False self.log.debug('Lock "%s" is unset on %d/%d instances in %s seconds', resource, successful_remvoes, len(self.instances), elapsed_time) if not unlocked: raise LockError('Can not release the lock') return elapsed_time
async def set_lock(self, resource, lock_identifier): """ Tries to set the lock to all the redis instances :param resource: The resource string name to lock :param lock_identifier: The id of the lock. A unique string :return float: The elapsed time that took to lock the instances in seconds :raises: LockError if the lock has not been set to at least (N/2 + 1) instances """ start_time = time.time() lock_timeout = self.lock_timeout successes = await asyncio.gather(*[ i.set_lock(resource, lock_identifier, lock_timeout) for i in self.instances ], return_exceptions=True) successful_sets = sum(s is None for s in successes) elapsed_time = time.time() - start_time locked = True if successful_sets >= int(len(self.instances) / 2) + 1 else False self.log.debug('Lock "%s" is set on %d/%d instances in %s seconds', resource, successful_sets, len(self.instances), elapsed_time) if not locked: raise LockError('Can not acquire the lock "%s"' % resource) return elapsed_time
async def get_lock_ttl(self, resource, lock_identifier=None): """ Tries to get the lock from all the redis instances :param resource: The resource string name to fetch :param lock_identifier: The id of the lock. A unique string :return float: The TTL of that lock reported by redis :raises: LockError if the lock has not been set to at least (N/2 + 1) instances """ start_time = time.monotonic() successes = await asyncio.gather(*[ i.get_lock_ttl(resource, lock_identifier) for i in self.instances ], return_exceptions=True) successful_list = [s for s in successes if not isinstance(s, Exception)] # should check if all the value are approx. the same with math.isclose... locked = True if len(successful_list) >= int(len(self.instances) / 2) + 1 else False success = all_equal(successful_list) and locked elapsed_time = time.monotonic() - start_time self.log.debug('Lock "%s" is set on %d/%d instances in %s seconds', resource, len(successful_list), len(self.instances), elapsed_time) if not success: raise LockError('Could not fetch the TTL for lock "%s"' % resource) return successful_list[0]
async def lock(self, resource): """ Tries to acquire de lock. If the lock is correctly acquired, the valid property of the returned lock is True. In case of fault the LockError exception will be raised :param resource: The string identifier of the resource to lock :return: :class:`aioredlock.Lock` :raises: LockError in case of fault """ lock_identifier = str(uuid.uuid4()) error = RuntimeError('Retry count less then one') try: # global try/except to catch CancelledError for n in range(self.retry_count): self.log.debug('Acquireing lock "%s" try %d/%d', resource, n + 1, self.retry_count) if n != 0: delay = random.uniform(self.retry_delay_min, self.retry_delay_max) await asyncio.sleep(delay) try: elapsed_time = await self.redis.set_lock( resource, lock_identifier) except LockError as exc: error = exc continue if self.lock_timeout - elapsed_time - self.drift <= 0: error = LockError('Lock timeout') self.log.debug('Timeout in acquireing the lock "%s"', resource) continue error = None break else: # break never reached raise error except Exception as exc: # cleanup in case of fault or cencellation will run in background async def cleanup(): self.log.debug('Cleaning up lock "%s"', resource) with contextlib.suppress(LockError): await self.redis.unset_lock(resource, lock_identifier) asyncio.ensure_future(cleanup()) raise return Lock(self, resource, lock_identifier, valid=True)
async def get_lock_ttl(self, resource, lock_identifier, register_scripts=False): """ Fetch this instance and set lock expiration time to lock_timeout :param resource: redis key to get :param lock_identifier: unique id of the lock to get :param register_scripts: register redis, usually already done, so 'False'. :raises: LockError if lock is not available """ try: with await self.connect() as redis: if register_scripts is True: await self._register_scripts(redis) ttl = await redis.evalsha( self.get_lock_ttl_script_sha1, keys=[resource], args=[lock_identifier] ) except aioredis.errors.ReplyError as exc: # script fault if exc.args[0].startswith('NOSCRIPT'): return await self.get_lock_ttl(resource, lock_identifier, register_scripts=True) self.log.debug('Can not get lock "%s" on %s', resource, repr(self)) raise LockError('Can not get lock') from exc except (aioredis.errors.RedisError, OSError) as exc: self.log.error('Can not get lock "%s" on %s: %s', resource, repr(self), repr(exc)) raise LockError('Can not get lock') from exc except asyncio.CancelledError: self.log.debug('Lock "%s" is cancelled on %s', resource, repr(self)) raise except Exception: self.log.exception('Can not get lock "%s" on %s', resource, repr(self)) raise else: self.log.debug('Lock "%s" with TTL %s is on %s', resource, ttl, repr(self)) return ttl
async def _set_lock(self, resource, lock_identifier, lease_time): error = RuntimeError('Retry count less then one') # Proportional drift time to the length of the lock # See https://redis.io/topics/distlock#is-the-algorithm-asynchronous for more info drift = lease_time * 0.01 + 0.002 try: # global try/except to catch CancelledError for n in range(self.retry_count): self.log.debug('Acquiring lock "%s" try %d/%d', resource, n + 1, self.retry_count) if n != 0: delay = random.uniform(self.retry_delay_min, self.retry_delay_max) await asyncio.sleep(delay) try: elapsed_time = await self.redis.set_lock( resource, lock_identifier, lease_time) except LockError as exc: error = exc continue if lease_time - elapsed_time - drift <= 0: error = LockError('Lock timeout') self.log.debug('Timeout in acquiring the lock "%s"', resource) continue error = None break else: # break never reached raise error except (Exception, asyncio.CancelledError): # cleanup in case of fault or cancellation will run in background async def cleanup(): self.log.debug('Cleaning up lock "%s"', resource) with contextlib.suppress(LockError): await self.redis.unset_lock(resource, lock_identifier) asyncio.ensure_future(cleanup()) raise