def extend(self, additional_time): if self.token is None: raise LockError("Cannot extend an unlocked lock") if self.timeout is None: raise LockError("Cannot extend a lock with no timeout") result = yield self.do_extend(additional_time) raise gen.Return(result)
def maybe_extend(self, timeout_factor=0.5): if not self.expire_at: raise LockError('Lock was not acquired or no timeout') left = self.expire_at - time.time() if left < 0: raise LockError('Lock was already expired') if left < (self.timeout * timeout_factor): self.extend(self.timeout - left)
def reacquire(self): """ Resets a TTL of an already acquired lock back to a timeout value. """ if self.local.token is None: raise LockError("Cannot reacquire an unlocked lock") if self.timeout is None: raise LockError("Cannot reacquire a lock with no timeout") return self.do_reacquire()
def reacquire(self): """ 重新获取之前的锁。超时时间也是之前的 """ if self.local.token is None: raise LockError("Cannot reacquire an unlocked lock") if self.timeout is None: raise LockError("Cannot reacquire a lock with no timeout") return self.do_reacquire()
def extend(self, additional_time, replace_ttl=False): """ 为已获得的锁增加时间 additional_time - 可以指定为整数或浮点数,两者都表示要添加的秒数。 replace_ttl - 如果为False(默认值),将 'additional_time' 添加到锁现有的 ttl中。如果为 True,将锁的 ttl替换为 'additional_time'。 """ if self.local.token is None: raise LockError("Cannot extend an unlocked lock") # 不能对没有设置超时时间的锁设置新的时间 if self.timeout is None: raise LockError("Cannot extend a lock with no timeout") return self.do_extend(additional_time, replace_ttl)
def extend(self, additional_time): """ Adds more time to an already acquired lock. ``additional_time`` can be specified as an integer or a float, both representing the number of seconds to add. """ if self.token is None: raise LockError("Cannot extend an unlocked lock") if self.timeout is None: raise LockError("Cannot extend a lock with no timeout") return self.do_extend(additional_time)
def acquire(self, requested_value: int = 1, score: float = 0.0) -> float: """ Use Redis to acquire a shared, distributed semaphore for this instance_id and return when the semaphore is acquired with the requested value. Returns: The final score incorporating exponential decay. Raises: LockError. """ if requested_value < 1 or requested_value > self._capacity: raise ValueError(f"'requested_value' must be between 1 and " f"'capacity': {self._capacity}") if self._acquired_value: raise LockError( "Cannot acquire an already acquired Semaphore instance") if not score: score = requested_value if score < 0: raise ValueError("'score' must be >= 0") ( # initialize by adding the iid to zset; set the name_iid key/value to 0 self._redis.pipeline() # type: ignore .zadd(self._zset_name, { self._iid: score }).pexpire(self._zset_name, self._timeoutms) # may have been created .set(self._name_iid, 0, px=self._timeoutms).execute()) while True: # spin until acquired self._acquired_value, score = self.lua_acquire( keys=[self._name], args=[self._iid, requested_value], client=self._redis, ) if self._acquired_value > 0: return float(score) if self._acquired_value == -2: raise LockError("Semaphore queue entry timed out") if self._acquired_value == -3: raise LockError("Semaphore params not found in Redis") time.sleep(self._sleepms / 1000) # sleep then spin
def release(self) -> Awaitable[None]: """Releases the already acquired lock""" expected_token = self.local.token if expected_token is None: raise LockError("Cannot release an unlocked lock") self.local.token = None return self.do_release(expected_token)
def cache_based_lock(lock_name, timeout=3600, blocking_timeout=None): if hasattr(cache, 'lock'): with cache.lock(lock_name, timeout=timeout, blocking_timeout=blocking_timeout): yield None else: # cache does not support locking, we use a poor mans solution is_free = cache.add(lock_name, 1, timeout) start_time = time() if not is_free: logger.warning( "Task is locked. Waiting for lock release of '{}'".format( lock_name)) while not is_free: if blocking_timeout and time() - start_time > blocking_timeout: raise LockError('Blocking timeout') sleep(0.5) is_free = cache.add(lock_name, 1, timeout) logger.info("Task is free to run") lock_time = time() try: yield None finally: if time() < lock_time + timeout: # the lock has not yet timed out, so it should be ours cache.delete(lock_name)
def reset_planning_window_for_team(org_code, team_id): team_env_key = "env_{}_{}".format(org_code, team_id) lock_key = "lock_env/env_{}_{}".format(org_code, team_id) log.debug(f"Trying to get lock for clearing env = {team_env_key}") flag = True with redis_conn.lock(lock_key, timeout=60, blocking_timeout=5) as lock: flag = False for key in redis_conn.scan_iter(f"{team_env_key}/*"): redis_conn.delete(key) if flag: raise LockError("Cannot get a lock ") log.info(f"All redis records are cleared for env = {team_env_key}") planner = get_active_planner(org_code=org_code, team_id=team_id, start_day=config.DEFAULT_START_DAY, nbr_of_days_planning_window=-1, force_reload=True) rl_env = planner["planner_env"] # rl_env.replay_env_to_redis() result_info = {"status": "OK", "config": rl_env.config} return result_info, planner
def extend(self, additional_time, replace_ttl=False): """ Adds more time to an already acquired lock. ``additional_time`` can be specified as an integer or a float, both representing the number of seconds to add. ``replace_ttl`` if False (the default), add `additional_time` to the lock's existing ttl. If True, replace the lock's ttl with `additional_time`. """ if self.local.token is None: raise LockError("Cannot extend an unlocked lock") if self.timeout is None: raise LockError("Cannot extend a lock with no timeout") return self.do_extend(additional_time, replace_ttl)
def ony_one_running_task(task: CeleryTask, lock_name_suffix: Optional[str] = None, blocking_timeout: int = 1, lock_timeout: Optional[int] = LOCK_TIMEOUT): """ Ensures one running task at the same, using `task` name as a unique key :param task: CeleryTask :param lock_name_suffix: A suffix for the lock name, in the case that the same task can be run at the same time when it has different arguments :param blocking_timeout: Waiting blocking timeout, it should be as small as possible to the worker can release the task :param lock_timeout: How long the lock will be stored, in case worker is halted so key is not stored forever in Redis :return: Instance of redis `Lock` :raises: LockError if lock cannot be acquired """ if WORKER_STOPPED: raise LockError('Worker is stopping') redis = get_redis() lock_name = f'tasks:{task.name}' if lock_name_suffix: lock_name = f'{lock_name}:{lock_name_suffix}' with redis.lock(lock_name, blocking_timeout=blocking_timeout, timeout=lock_timeout) as lock: ACTIVE_LOCKS.add(lock_name) yield lock ACTIVE_LOCKS.remove(lock_name) close_gevent_db_connection() # Need for django-db-geventpool
def do_extend(self, additional_time): additional_time = int(additional_time * 1000) if not bool(self.lua_extend(keys=[self.name], args=[self.local.token, additional_time], client=self.redis)): raise LockError("Cannot extend a lock that's no longer owned") return True
def release(self): "Releases the already acquired lock" expected_token = self.local.token if expected_token is None: raise LockError("Cannot release an unlocked lock") self.local.token = None self.do_release(expected_token)
def release(self): "Releases the already acquired lock" if self.token is None: raise LockError("Cannot release an unlocked lock") try: self.do_release() finally: self.token = None
def do_extend(self, additional_time): additional_time = int(additional_time * 1000) extended = yield self.lua_extend(keys=[self.name], args=[self.token, additional_time], client=self.redis) if not bool(extended): raise LockError("Cannot extend a lock that's no longer owned") raise gen.Return(True)
def release(self, token=None): " Releases the already acquired lock" if token is None: token = self.local.token if token is None: raise LockError("Cannot release an unlocked lock") self.local.token = None self.do_release(token)
def release(self): """ 释放已经获取的锁 """ expected_token = self.local.token if expected_token is None: raise LockError("Cannot release an unlocked lock") self.local.token = None self.do_release(expected_token)
def do_extend(self, additional_time): pipe = self.redis.pipeline() yield pipe.watch(self.name) lock_value = yield pipe.get(self.name) if lock_value != self.token: raise LockError("Cannot extend a lock that's no longer owned") expiration = yield pipe.pttl(self.name) if expiration is None or expiration < 0: expiration = 0 pipe.multi() pipe.pexpire(self.name, expiration + int(additional_time * 1000)) try: response = yield pipe.execute() except WatchError: raise LockError("Cannot extend a lock that's no longer owned") if not response[0]: raise LockError("Cannot extend a lock that's no longer owned") raise gen.Return(True)
def redis_lock(redis_instance, key): lock_key = "lock_{}".format(key) lock_id = str(uuid4()) try: lock_acquired = acquire_redis_lock(redis_instance, lock_key, lock_id) err = (None if lock_acquired else LockError( f"Unable to acquire redis lock in max tries, lock key: {lock_key}, lock_id: {lock_id}" )) yield lock_acquired, err finally: release_redis_lock(redis_instance, lock_key, lock_id)
def do_extend(self, additional_time): pipe = self.redis.pipeline() pipe.watch(self.name) lock_value = pipe.get(self.name) if lock_value != self.token: raise LockError("Cannot extend a lock that's no longer owned") expiration = pipe.pttl(self.name) if expiration is None or expiration < 0: # Redis evicted the lock key between the previous get() and now # we'll handle this when we call pexpire() expiration = 0 pipe.multi() pipe.pexpire(self.name, expiration + int(additional_time * 1000)) try: response = pipe.execute() except WatchError: # someone else acquired the lock raise LockError("Cannot extend a lock that's no longer owned") if not response[0]: # pexpire returns False if the key doesn't exist raise LockError("Cannot extend a lock that's no longer owned") return True
def __init__(self, redis, name, timeout=None, sleep=0.1, blocking=True, blocking_timeout=None, io_loop=None): self.io_loop = io_loop or IOLoop.current() self.token = None self.redis = redis self.name = name self.timeout = timeout self.sleep = sleep self.blocking = blocking self.blocking_timeout = blocking_timeout if self.timeout and self.sleep > self.timeout: raise LockError("'sleep' must be less than 'timeout'")
def __init__(self, redis, name, timeout=None, sleep=0.1, blocking=True, blocking_timeout=None): """ Create a new Lock instance named ``name`` using the Redis client supplied by ``redis``. ``timeout`` indicates a maximum life for the lock. By default, it will remain locked until release() is called. ``timeout`` can be specified as a float or integer, both representing the number of seconds to wait. ``sleep`` indicates the amount of time to sleep per loop iteration when the lock is in blocking mode and another client is currently holding the lock. ``blocking`` indicates whether calling ``acquire`` should block until the lock has been acquired or to fail immediately, causing ``acquire`` to return False and the lock not being acquired. Defaults to True. Note this value can be overridden by passing a ``blocking`` argument to ``acquire``. ``blocking_timeout`` indicates the maximum amount of time in seconds to spend trying to acquire the lock. A value of ``None`` indicates continue trying forever. ``blocking_timeout`` can be specified as a float or integer, both representing the number of seconds to wait. """ self.redis = redis self.name = name self.timeout = timeout self.sleep = sleep self.blocking = blocking self.blocking_timeout = blocking_timeout self.local = threading.local() self.local.token = None if self.timeout and self.sleep > self.timeout: raise LockError("'sleep' must be less than 'timeout'")
def release(self) -> int: """ Release an acquired lock. Returns: The released value. Raises: LockError. """ if not self._acquired_value: raise LockError("Cannot release a Semaphore that is not acquired") released_value = self.lua_release(keys=[self._name], args=[self._iid], client=self._redis) self._acquired_value = 0 return int(released_value)
def __init__(self, redis, name, timeout=None, sleep=0.1, blocking=True, blocking_timeout=None, thread_local=True): """ Create a new Lock instance named ``name`` using the Redis client supplied by ``redis``. ``timeout`` indicates a maximum life for the lock. By default, it will remain locked until release() is called. ``timeout`` can be specified as a float or integer, both representing the number of seconds to wait. ``sleep`` indicates the amount of time to sleep per loop iteration when the lock is in blocking mode and another client is currently holding the lock. ``blocking`` indicates whether calling ``acquire`` should block until the lock has been acquired or to fail immediately, causing ``acquire`` to return False and the lock not being acquired. Defaults to True. Note this value can be overridden by passing a ``blocking`` argument to ``acquire``. ``blocking_timeout`` indicates the maximum amount of time in seconds to spend trying to acquire the lock. A value of ``None`` indicates continue trying forever. ``blocking_timeout`` can be specified as a float or integer, both representing the number of seconds to wait. ``thread_local`` indicates whether the lock token is placed in thread-local storage. By default, the token is placed in thread local storage so that a thread only sees its token, not a token set by another thread. Consider the following timeline: time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds. thread-1 sets the token to "abc" time: 1, thread-2 blocks trying to acquire `my-lock` using the Lock instance. time: 5, thread-1 has not yet completed. redis expires the lock key. time: 5, thread-2 acquired `my-lock` now that it's available. thread-2 sets the token to "xyz" time: 6, thread-1 finishes its work and calls release(). if the token is *not* stored in thread local storage, then thread-1 would see the token value as "xyz" and would be able to successfully release the thread-2's lock. In some use cases it's necessary to disable thread local storage. For example, if you have code where one thread acquires a lock and passes that lock instance to a worker thread to release later. If thread local storage isn't disabled in this case, the worker thread won't see the token set by the thread that acquired the lock. Our assumption is that these cases aren't common and as such default to using thread local storage. """ self.redis = redis self.name = name self.timeout = timeout self.sleep = sleep self.blocking = blocking self.blocking_timeout = blocking_timeout self.thread_local = bool(thread_local) self.local = threading.local() if self.thread_local else dummy() self.local.token = None if self.timeout and self.sleep > self.timeout: raise LockError("'sleep' must be less than 'timeout'")
def do_release(self): if not bool( self.lua_release( keys=[self.name], args=[self.token], client=self.redis)): raise LockError("Cannot release a lock that's no longer owned")
def do_release(self, expected_token): released = yield self.lua_release(keys=[self.name], args=[expected_token], client=self.redis) if not bool(released): raise LockError("Cannot release a lock that's no longer owned")
def execute_release(pipe): lock_value = pipe.get(name) if lock_value != token: raise LockError("Cannot release a lock that's no longer owned") pipe.delete(name)
def release(self): expected_token = self.token if expected_token is None: raise LockError("Cannot release an unlocked lock") self.token = None self.do_release(expected_token)
def do_release(self, expected_token): lock_value = yield self.redis.get(self.name) if lock_value != expected_token: raise LockError("Cannot release a lock that's no longer owned") yield self.redis.delete(self.name)