def test_expire_less_than_timeout(conn): lock = Lock(conn, "foobar", expire=1) pytest.raises(TimeoutTooLarge, lock.acquire, blocking=True, timeout=2) lock = Lock(conn, "foobar", expire=1, auto_renewal=True) lock.acquire(blocking=True, timeout=2) lock.release()
def check_with_ehall(jasdm: str, day: int, jc: str, zylxdm: str): redis = StrictRedis(connection_pool=__Application.redis_pool) lock = Lock(redis, "Spider") if lock.acquire(): try: save_cookies(), save_time() cookies = json.loads(redis.hget("Spider", "cookies")) time_info = json.loads(redis.hget("Spider", "time_info")) redis.delete("Spider") res = requests.post( url= "http://ehallapp.nnu.edu.cn/jwapp/sys/jsjy/modules/jsjysq/cxyzjskjyqk.do", cookies=cookies, data={ 'XNXQDM': time_info['XNXQDM'][0], 'ZC': time_info['ZC'], 'JASDM': jasdm }).json() kcb = json.loads( res['datas']['cxyzjskjyqk']['rows'][0]['BY1'])[(day + 6) % 7] for row in kcb: if jc in row['JC'].split(',') and row['ZYLXDM'] in (zylxdm, ''): return True # 数据一致,待纠错 return False # 数据不一致,待更新 finally: lock.release()
def autofill_na_labels(): na_label_tracks = Track.query.filter(Track.label == u"Not Available").all() for na_track in na_label_tracks: other_track = Track.query.filter(db.and_( Track.artist == na_track.artist, Track.title == na_track.title, Track.album == na_track.album, Track.label != u"Not Available")).first() if other_track is not None: with Lock(redis_conn, 'track_{}'.format(other_track.id), expire=60, auto_renewal=True): # update TrackLogs to point to other Track TrackLog.query.\ filter(TrackLog.track_id == na_track.id).\ update({TrackLog.track_id: other_track.id}, synchronize_session=False) na_lock = Lock(redis_conn, 'track_{}'.format(na_track.id), expire=60, auto_renewal=True) if na_lock.acquire(timeout=1): db.session.delete(na_track) na_lock.release() db.session.commit() current_app.logger.info( "Trackman: Found a track with a label for track ID {0:d}, " "merged into {1:d}".format(na_track.id, other_track.id))
def continuous_migration(skip_files=None): """Task to continuously migrate what is pushed up by Legacy.""" if skip_files is None: skip_files = current_app.config.get( 'RECORDS_MIGRATION_SKIP_FILES', False, ) redis_url = current_app.config.get('CACHE_REDIS_URL') r = StrictRedis.from_url(redis_url) lock = Lock(r, 'continuous_migration', expire=120, auto_renewal=True) if lock.acquire(blocking=False): try: while r.llen('legacy_records'): raw_record = r.lrange('legacy_records', 0, 0) if raw_record: migrate_and_insert_record( zlib.decompress(raw_record[0]), skip_files=skip_files, ) db.session.commit() r.lpop('legacy_records') finally: lock.release() else: LOGGER.info("Continuous_migration already executed. Skipping.")
def test_expire_without_timeout(conn): first_lock = Lock(conn, 'expire', expire=2) second_lock = Lock(conn, 'expire', expire=1) first_lock.acquire() assert second_lock.acquire(blocking=False) is False assert second_lock.acquire() is True second_lock.release()
def test_reset(conn): lock = Lock(conn, "foobar") lock.reset() new_lock = Lock(conn, "foobar") new_lock.acquire(blocking=False) new_lock.release() pytest.raises(NotAcquired, lock.release)
def handle_overview(args: dict) -> dict: if not __Application.serve: return { 'status': 1, 'message': "service off", 'service': "off", 'data': [] } redis = StrictRedis(connection_pool=__Application.redis_pool) lock = Lock(redis, "Server-Overview") if lock.acquire(): try: if 'jasdm' not in args.keys() or not redis.hexists( "Overview", args['jasdm']): raise KeyError('jasdm') value = json.loads(redis.hget(name="Overview", key=args['jasdm'])) return { 'status': 0, 'message': "ok", 'service': "on", 'data': value } finally: lock.release()
def test_reset(redis_server): conn = StrictRedis(unix_socket_path=UDS_PATH) with Lock(conn, "foobar") as lock: lock.reset() new_lock = Lock(conn, "foobar") new_lock.acquire(blocking=False) new_lock.release()
def test_signal_cleanup_on_release(conn): """After releasing a lock, the signal key should not remain.""" lock = Lock(conn, 'foo') lock.acquire() lock.release() assert conn.llen('lock-signal:foo') == 0 assert conn.exists('lock-signal:foo') == 0
def test_signal_expiration(conn): """Signal keys expire within two seconds after releasing the lock.""" lock = Lock(conn, 'signal_expiration') lock.acquire() lock.release() time.sleep(2) assert conn.llen('lock-signal:signal_expiration') == 0
def distributed_lock(lock_name, expire=10, auto_renewal=True, blocking=False): """Context manager to acquire a lock visible by all processes. This lock is implemented through Redis in order to be globally visible. Args: lock_name (str): name of the lock to be acquired. expire (int): duration in seconds after which the lock is released if not renewed in the meantime. auto_renewal (bool): if ``True``, the lock is automatically renewed as long as the context manager is still active. blocking (bool): if ``True``, wait for the lock to be released. If ``False``, return immediately, raising :class:`DistributedLockError`. It is recommended to set ``expire`` to a small value and ``auto_renewal=True``, which ensures the lock gets released quickly in case the process is killed without limiting the time that can be spent holding the lock. Raises: DistributedLockError: when ``blocking`` is set to ``False`` and the lock is already acquired. """ if not lock_name: raise ValueError("Lock name not specified.") redis_url = current_app.config.get("CACHE_REDIS_URL") redis = StrictRedis.from_url(redis_url) lock = Lock(redis, lock_name, expire=expire, auto_renewal=auto_renewal) if lock.acquire(blocking=blocking): try: yield finally: lock.release() else: raise DistributedLockError("Cannot acquire lock for %s", lock_name)
def _execute_task_group(self, queue, tasks, all_task_ids): """ Executes the given tasks in the queue. Updates the heartbeat for task IDs passed in all_task_ids. This internal method is only meant to be called from within _process_from_queue. """ log = self.log.bind(queue=queue) locks = [] # Keep track of the acquired locks: If two tasks in the list require # the same lock we only acquire it once. lock_ids = set() ready_tasks = [] for task in tasks: if task.get('lock', False): if task.get('lock_key'): kwargs = task.get('kwargs', {}) lock_id = gen_unique_id( task['func'], None, {key: kwargs.get(key) for key in task['lock_key']}, ) else: lock_id = gen_unique_id( task['func'], task.get('args', []), task.get('kwargs', {}), ) if lock_id not in lock_ids: lock = Lock(self.connection, self._key('lock', lock_id), timeout=self.config['ACTIVE_TASK_UPDATE_TIMEOUT']) acquired = lock.acquire(blocking=False) if acquired: lock_ids.add(lock_id) locks.append(lock) else: log.info('could not acquire lock', task_id=task['id']) # Reschedule the task when = time.time() + self.config['LOCK_RETRY'] self._redis_move_task(queue, task['id'], ACTIVE, SCHEDULED, when) # Make sure to remove it from this list so we don't re-add # to the ACTIVE queue by updating the heartbeat. all_task_ids.remove(task['id']) continue ready_tasks.append(task) if not ready_tasks: return True, [] success = self._execute(queue, ready_tasks, log, locks, all_task_ids) for lock in locks: lock.release() return success, ready_tasks
def test_owner_id(conn): unique_identifier = b"foobar-identifier" lock = Lock(conn, "foobar-tok", expire=TIMEOUT/4, id=unique_identifier) lock_id = lock.id assert lock_id == unique_identifier lock.acquire(blocking=False) assert lock.get_owner_id() == unique_identifier lock.release()
def test_signal_expiration(conn): """Signal keys expire within one seconds after releasing the lock.""" lock = Lock(conn, 'foo') lock.acquire() lock.release() time.sleep(1) assert conn.llen('lock-signal:foo') == 0 assert conn.exists('lock-signal:foo') == 0
def test_owner_id(redis_server): conn = StrictRedis(unix_socket_path=UDS_PATH) unique_identifier = b"foobar-identifier" lock = Lock(conn, "foobar-tok", expire=TIMEOUT/4, id=unique_identifier) lock_id = lock.id assert lock_id == unique_identifier lock.acquire(blocking=False) assert lock.get_owner_id() == unique_identifier lock.release()
def lock_redis(app): redis_url = app.config.get('CACHE_REDIS_URL') redis = StrictRedis.from_url(redis_url) lock = Lock(redis, 'my_lock', expire=60) lock.acquire(blocking=False) yield lock.release()
def delete_track(track): lock = Lock(redis_conn, 'track_{}'.format(track.id), expire=60, auto_renewal=True) if lock.acquire(timeout=1): ret = db.session.delete(track) lock.release() return ret else: return False
def shuttle(): redis = StrictRedis(connection_pool=__Application.redis_pool) lock = Lock(redis, "Explore-Shuttle") if lock.acquire(): try: return jsonify( json.loads( redis.hget("Shuttle", str(datetime.datetime.now().weekday())))) finally: lock.release()
def wrapper(*args, **kwargs): client = get_redis_connection() lock = Lock(client, lock_name, expire=expire) try: if lock.acquire(blocking=False): return func(*args, **kwargs) else: logger.warning('another instance of %s is running', func_path) finally: lock.release()
def test_reset_all(conn): lock1 = Lock(conn, "foobar1") lock2 = Lock(conn, "foobar2") lock1.acquire(blocking=False) lock2.acquire(blocking=False) reset_all(conn) lock1 = Lock(conn, "foobar1") lock2 = Lock(conn, "foobar2") lock1.acquire(blocking=False) lock2.acquire(blocking=False) lock1.release() lock2.release()
def test_signal_expiration(conn, signal_expire, method): """Signal keys expire within two seconds after releasing the lock.""" lock = Lock(conn, 'signal_expiration', signal_expire=signal_expire) lock.acquire() if method == 'release': lock.release() elif method == 'reset_all': reset_all(conn) time.sleep(0.5) assert conn.exists('lock-signal:signal_expiration') time.sleep((signal_expire - 500) / 1000.0) assert conn.llen('lock-signal:signal_expiration') == 0
def test_reset_all(redis_server): conn = StrictRedis(unix_socket_path=UDS_PATH) lock1 = Lock(conn, "foobar1") lock2 = Lock(conn, "foobar2") lock1.acquire(blocking=False) lock2.acquire(blocking=False) reset_all(conn) lock1 = Lock(conn, "foobar1") lock2 = Lock(conn, "foobar2") lock1.acquire(blocking=False) lock2.acquire(blocking=False) lock1.release() lock2.release()
def test_auto_renewal(conn): lock = Lock(conn, 'lock_renewal', expire=3, auto_renewal=True) lock.acquire() assert isinstance(lock._lock_renewal_thread, InterruptableThread) assert not lock._lock_renewal_thread.should_exit assert lock._lock_renewal_interval == 2 time.sleep(3) assert conn.get(lock._name) == lock.id, "Key expired but it should have been getting renewed" lock.release() assert lock._lock_renewal_thread is None
def test_auto_renewal(conn): lock = Lock(conn, 'lock_renewal', expire=3, auto_renewal=True) lock.acquire() assert isinstance(lock._lock_renewal_thread, threading.Thread) assert not lock._lock_renewal_stop.is_set() assert isinstance(lock._lock_renewal_interval, float) assert lock._lock_renewal_interval == 2 time.sleep(3) assert conn.get(lock._name) == lock.id, "Key expired but it should have been getting renewed" lock.release() assert lock._lock_renewal_thread is None
def test_auto_renewal(conn): lock = Lock(conn, 'lock_renewal', expire=3, auto_renewal=True) lock.acquire() assert isinstance(lock._lock_renewal_thread, threading.Thread) assert not lock._lock_renewal_stop.is_set() assert isinstance(lock._lock_renewal_interval, float) assert lock._lock_renewal_interval == 2 time.sleep(3) assert maybe_decode(conn.get(lock._name)) == lock.id, "Key expired but it should have been getting renewed" lock.release() assert lock._lock_renewal_thread is None
def test_given_id(conn): """It is possible to extend a lock using another instance of Lock with the same name. """ name = 'foobar' key_name = 'lock:' + name orig = Lock(conn, name, expire=100, id=b"a") orig.acquire() pytest.raises(TypeError, Lock, conn, name, id=object()) lock = Lock(conn, name, id=b"a") pytest.raises(AlreadyAcquired, lock.acquire) lock.extend(100) lock.release() # this works, note that this ain't the object that acquired the lock pytest.raises(NotAcquired, orig.release) # and this fails because lock was released above assert conn.ttl(key_name) == -2
def reset(): redis = StrictRedis(connection_pool=__Application.redis_pool) lock1 = Lock(redis, "Server-Empty") lock2 = Lock(redis, "Server-Overview") if lock1.acquire(blocking=False): try: redis.delete("Empty") reset_empty() finally: lock1.release() if lock2.acquire(blocking=False): try: redis.delete("Overview") reset_overview() finally: lock2.release()
def redis_locking_context(lock_name, expire=120, auto_renewal=True): """Locked Context Manager to perform operations on Redis.""" if not lock_name: raise RedisLockError('Lock name not specified.') redis_url = app.config.get('CACHE_REDIS_URL') redis = StrictRedis.from_url(redis_url) lock = Lock(redis, lock_name, expire=expire, auto_renewal=auto_renewal) if lock.acquire(blocking=False): try: yield redis finally: lock.release() else: raise RedisLockError('Can not acquire Redis lock for %s', lock_name)
def test_expire(self): conn = StrictRedis(unix_socket_path=UDS_PATH) with Lock(conn, "foobar", expire=TIMEOUT/4): with TestProcess(sys.executable, __file__, 'daemon', 'test_expire') as proc: with self.dump_on_error(proc.read): name = 'lock:foobar' self.wait_for_strings(proc.read, TIMEOUT, 'Getting %r ...' % name, 'Got lock for %r.' % name, 'Releasing %r.' % name, 'UNLOCK_SCRIPT not cached.', 'DIED.', ) lock = Lock(conn, "foobar") try: self.assertEqual(lock.acquire(blocking=False), True) finally: lock.release()
def test_expire(conn): with Lock(conn, "foobar", expire=TIMEOUT/4): with TestProcess(sys.executable, HELPER, 'test_expire') as proc: with dump_on_error(proc.read): name = 'lock:foobar' wait_for_strings( proc.read, TIMEOUT, 'Getting %r ...' % name, 'Got lock for %r.' % name, 'Releasing %r.' % name, 'UNLOCK_SCRIPT not cached.', 'DIED.', ) lock = Lock(conn, "foobar") try: assert lock.acquire(blocking=False) == True finally: lock.release()
def legacy_orcid_arrays(): """Generator to fetch token data from redis. Yields: list: user data in the form of [orcid, token, email, name] """ redis_url = current_app.config.get('CACHE_REDIS_URL') r = StrictRedis.from_url(redis_url) lock = Lock(r, 'import_legacy_orcid_tokens', expire=120, auto_renewal=True) if lock.acquire(blocking=False): try: while r.llen('legacy_orcid_tokens'): yield loads(r.lrange('legacy_orcid_tokens', 0, 1)[0]) r.lpop('legacy_orcid_tokens') finally: lock.release() else: logger.info("Import_legacy_orcid_tokens already executed. Skipping.")
def main(): redis = StrictRedis(connection_pool=__Application.redis_pool) lock = Lock(redis, "Spider") try: if lock.acquire(blocking=False): try: logging.info("开始课程信息收集工作...") logging.info("初始化工作环境...") redis.delete("Spider") # 采集基础信息 prepare() # 采集详细信息 core() # 校正并归并数据 correct_and_merge() # 将数据放入生产环境 service.copy_to_pro() if os.getenv("env") == "pro" else None now = time.time() * 1000 logging.info( "本轮课程信息收集工作成功完成. 共计耗时 %f seconds", (int(now) - int(os.getenv("startup_time"))) / 1000 ) finally: lock.release() else: logging.warning("Terminated for another process locked [%s]", "Spider") except SystemExit or KeyboardInterrupt as e: raise e except Exception as e: __Application.send_email( subject="【南师教室】错误报告", message=f"{type(e), e}\n" f"{e.__traceback__.tb_frame.f_globals['__file__']}:{e.__traceback__.tb_lineno}\n" ) logging.error(f"{type(e), e}") logging.info("Exit with code %d", -1) exit(-1) finally: # 从Redis清除缓存数据 redis.delete("Spider")
def test_expire(conn): with Lock(conn, "foobar", expire=TIMEOUT / 4): with TestProcess(sys.executable, HELPER, "test_expire") as proc: with dump_on_error(proc.read): name = "lock:foobar" wait_for_strings( proc.read, TIMEOUT, "Getting %r ..." % name, "Got lock for %r." % name, "Releasing %r." % name, "UNLOCK_SCRIPT not cached.", "DIED.", ) lock = Lock(conn, "foobar") try: assert lock.acquire(blocking=False) == True finally: lock.release()
def continuous_migration(): """Task to continuously migrate what is pushed up by Legacy.""" # XXX: temp redis url when we use continuous migration in kb8s redis_url = current_app.config.get("MIGRATION_REDIS_URL") if redis_url is None: redis_url = current_app.config.get("CACHE_REDIS_URL") LOGGER.debug("Connected to REDIS", redis_url=redis_url) r = StrictRedis.from_url(redis_url) lock = Lock(r, "continuous_migration", expire=120, auto_renewal=True) message = _next_message(r) if not message: LOGGER.debug("No records to migrate.") return if not lock.acquire(blocking=False): LOGGER.info("Continuous_migration already executed. Skipping.") return try: num_of_records = r.llen(QUEUE) LOGGER.info("Starting migration of records.", records_total=num_of_records) while message: if message == b"END": r.lpop(QUEUE) task = migrate_from_mirror(disable_orcid_push=False) wait_for_all_tasks(task) LOGGER.info("Migration finished.") break raw_record = zlib.decompress(message) (recid, ) = insert_into_mirror([raw_record]) LOGGER.debug("Inserted record into mirror.", recid=recid) r.lpop(QUEUE) message = _next_message(r) else: LOGGER.info("Waiting for more records...") finally: lock.release()
def handle_empty(args: Dict[str, str]) -> Dict[str, Any]: if not __Application.serve: return { 'status': 1, 'message': "service off", 'service': "off", 'data': [] } redis = StrictRedis(connection_pool=__Application.redis_pool) lock = Lock(redis, "Server-Empty") if lock.acquire(): try: if 'day' not in args.keys() or not args['day'].isdigit() or not ( 0 <= int(args['day']) <= 6): raise KeyError('day') elif 'dqjc' not in args.keys() or not args['dqjc'].isdigit(): raise KeyError('dqjc') elif 'jxl' not in args.keys() or not redis.hexists( "Empty", f"{args['jxl']}_{args['day']}"): raise KeyError('jxl') jxl, day, dqjc = args['jxl'], int(args['day']), int(args['dqjc']) value = json.loads( redis.hget(name="Empty", key=f"{args['jxl']}_{args['day']}")) classrooms = [] for classroom in value: if classroom['jc_ks'] <= dqjc <= classroom['jc_js']: classrooms.append(classroom) for i in range(len(classrooms)): classrooms[i]['id'] = i + 1 return { 'status': 0, 'message': "ok", 'service': "on", 'data': classrooms } finally: lock.release()
def release_locker(label_info: str, locker: redis_lock.Lock) -> bool: try: if not locker: return False if label_info in CacheRedisAdapter.dictionary_locker \ and CacheRedisAdapter.dictionary_locker[label_info]: locker.release() logger.warning( 'CacheRedisAdapter recycle_locker Already Available') return False CacheRedisAdapter.dictionary_locker[label_info] = locker locker.release() return True except Exception as ex: logger.error( 'CacheRedisAdapter recycle_locker Exception: {}'.format(ex)) return None
def distributed_lock(lock_name, expire=10, auto_renewal=True, blocking=False): """Context manager to acquire a lock visible by all processes. This lock is implemented through Redis in order to be globally visible. Args: lock_name (str): name of the lock to be acquired. expire (int): duration in seconds after which the lock is released if not renewed in the meantime. auto_renewal (bool): if ``True``, the lock is automatically renewed as long as the context manager is still active. blocking (bool): if ``True``, wait for the lock to be released. If ``False``, return immediately, raising :class:`DistributedLockError`. It is recommended to set ``expire`` to a small value and ``auto_renewal=True``, which ensures the lock gets released quickly in case the process is killed without limiting the time that can be spent holding the lock. Raises: DistributedLockError: when ``blocking`` is set to ``False`` and the lock is already acquired. """ if not lock_name: raise ValueError('Lock name not specified.') redis_url = app.config.get('CACHE_REDIS_URL') redis = StrictRedis.from_url(redis_url) lock = Lock(redis, lock_name, expire=expire, auto_renewal=auto_renewal) if lock.acquire(blocking=blocking): try: yield finally: lock.release() else: raise DistributedLockError('Cannot acquire lock for %s', lock_name)
def test_release_from_nonblocking_leaving_garbage(conn): for _ in range(10): lock = Lock(conn, 'release_from_nonblocking') lock.acquire(blocking=False) lock.release() assert conn.llen('lock-signal:release_from_nonblocking') == 1
def test_bogus_release(conn): lock = Lock(conn, "foobar-tok") pytest.raises(NotAcquired, lock.release) lock.release(force=True)
def test_reset(conn): with Lock(conn, "foobar") as lock: lock.reset() new_lock = Lock(conn, "foobar") new_lock.acquire(blocking=False) new_lock.release()
def test_get_owner_id(conn): lock = Lock(conn, "foobar-tok") lock.acquire() assert lock.get_owner_id() == lock.id lock.release()
def test_bogus_release(conn): lock = Lock(conn, "foobar-tok") pytest.raises(NotAcquired, lock.release) lock.acquire() lock2 = Lock(conn, "foobar-tok", id=lock.id) lock2.release()