def test_signal_cleanup_on_reset_all(conn): """After resetting all locks, no signal keys should not remain.""" lock = Lock(conn, 'foo') lock.acquire() reset_all(conn) assert conn.llen('lock-signal:foo') == 0 assert conn.exists('lock-signal:foo') == 0
def reset_lock(database): """ Used to reset the locks :param database: The database you are modifying :return: """ redis_lock.reset_all(database)
def initialize(): try: if CacheRedisAdapter.is_initialized: return True if "HOST" not in CACHE_REDIS_CONFIGURATION or "PORT" not in CACHE_REDIS_CONFIGURATION: return False host = CACHE_REDIS_CONFIGURATION["HOST"] port = CACHE_REDIS_CONFIGURATION["PORT"] CacheRedisAdapter.client_cache = redis.Redis(host=host, port=port) redis_lock.reset_all(redis_client=CacheRedisAdapter.client_cache) if not CacheRedisAdapter.client_cache: logger.error('CacheRedisAdapter Initialization Failed') return False CacheRedisAdapter.client_cache.config_resetstat() CacheRedisAdapter.client_cache.flushall() CacheRedisAdapter.is_initialized = True logger.info('CacheRedisAdapter Initialization Success') return True except Exception as ex: logger.error( 'CacheRedisAdapter Initialization Exception: {}'.format(ex)) return False
def test_reset_all_signalizes(make_conn, make_process): """Call to reset_all() causes LPUSH to all signal keys, so blocked waiters become unblocked.""" def workerfn(unblocked): conn = make_conn() lock1 = Lock(conn, 'lock1') lock2 = Lock(conn, 'lock2') if lock1.acquire() and lock2.acquire(): unblocked.value = 1 unblocked = multiprocessing.Value('B', 0) conn = make_conn() lock1 = Lock(conn, 'lock1') lock2 = Lock(conn, 'lock2') lock1.acquire() lock2.acquire() worker = make_process(target=workerfn, args=(unblocked,)) worker.start() worker.join(0.5) reset_all(conn) worker.join(0.5) assert unblocked.value == 1
def handle(self, *args, **options): self.import_photos(options['paths']) ensure_raw_processing_tasks() process_raw_tasks() process_generate_thumbnails_tasks() redis_lock.reset_all(r) process_classify_images_tasks() self.handleLocaltion() self.handleObject()
def test_signal_expiration(conn, signal_expire, method): """Signal keys expire within two seconds after releasing the lock.""" lock = Lock(conn, 'signal_expiration', signal_expire=signal_expire) lock.acquire() if method == 'release': lock.release() elif method == 'reset_all': reset_all(conn) time.sleep(0.5) assert conn.exists('lock-signal:signal_expiration') time.sleep((signal_expire - 500) / 1000.0) assert conn.llen('lock-signal:signal_expiration') == 0
def test_reset_all(conn): lock1 = Lock(conn, "foobar1") lock2 = Lock(conn, "foobar2") lock1.acquire(blocking=False) lock2.acquire(blocking=False) reset_all(conn) lock1 = Lock(conn, "foobar1") lock2 = Lock(conn, "foobar2") lock1.acquire(blocking=False) lock2.acquire(blocking=False) lock1.release() lock2.release()
def reset_lock(database): """ Used to reset the locks :param database: The database you are modifying :return: """ logger.info('Reseting database locks') logger.debug('Call Successful: %s', 'reset_lock: call successful', extra=d) logger.debug('Locking: %s', 'reset_lock: all locks have been reset', extra=d) redis_lock.reset_all(database) logger.info('Database locks reset')
def test_reset_all(redis_server): conn = StrictRedis(unix_socket_path=UDS_PATH) lock1 = Lock(conn, "foobar1") lock2 = Lock(conn, "foobar2") lock1.acquire(blocking=False) lock2.acquire(blocking=False) reset_all(conn) lock1 = Lock(conn, "foobar1") lock2 = Lock(conn, "foobar2") lock1.acquire(blocking=False) lock2.acquire(blocking=False) lock1.release() lock2.release()
def reset_all_locks(config: dict) -> None: """ A helper method to reset all locks Typically used to clean things up after crashes or on startup when not auto-expiring keys. Args: config(dict): The configuration details for the 'lock' section of the config file Returns: None """ client = StrictRedis(host=config['redis']['host'], port=config['redis']['port'], db=config['redis']['db']) redis_lock.reset_all(client)
def run( self, project, port=8088, mode="sync", local=True, dev=True, config=None, workers_num=3, run_name=None, lock_type="redis" ): *_, project_name = os.path.abspath(project).split("/") STATE.mset(dict( project_name=project_name, project_path=os.path.expanduser(project), )) # loading worker config config_path = os.path.join(project, 'config.json') if config is None else config with open(config_path, "r") as f: STATE.mset(json.load(f)) STATE.mset(dict( port=port, mode=mode, workers_num=workers_num, project=project, local=local, run_id=uuid1().hex if run_name is None else run_name, lock_type=lock_type )) redis_lock.reset_all(REDIS) if dev: STATE["status"] = "serving" self._run_dev() else: STATE["status"] = "idle" self._run_gunicron()
def test_reset_all_signalizes(make_conn, make_process): """Call to reset_all() causes LPUSH to all signal keys, so blocked waiters become unblocked.""" def workerfn(unblocked): conn = make_conn() lock1 = Lock(conn, 'lock1') lock2 = Lock(conn, 'lock2') if lock1.acquire() and lock2.acquire(): unblocked.value = 1 unblocked = multiprocessing.Value('B', 0) conn = make_conn() lock1 = Lock(conn, 'lock1') lock2 = Lock(conn, 'lock2') lock1.acquire() lock2.acquire() worker = make_process(target=workerfn, args=(unblocked, )) worker.start() worker.join(0.5) reset_all(conn) worker.join(0.5) assert unblocked.value == 1
def reset_all(self): """ Forcibly deletes all locks if its remains (like a crash reason). Use this with care. """ reset_all(self.__client)
def setUp(self, *args): redis_lock.reset_all(get_redis_connection("default")) self.service = WeiboCaptureService('http://weibo.com/')
def setUp(self, *args): redis_lock.reset_all(get_redis_connection("default")) self.service = BrowserService()
def reset(self): redis_lock.reset_all(self._client)
def reset_lock(): r = get_redis_connection() redis_lock.reset_all(r)
def refresh_lock(self): """ Reset all lock for chrome resource """ redis_lock.reset_all(get_redis_connection("default"))
def handle(self, *args, **options): redis_lock.reset_all(redis_connection)
import redis import redis_lock LOCK_TIMEOUT = 3 redis_lock.reset_all() class RedisClient(object): def __init__(self, host='localhost', port=6379): self.connection = redis.StrictRedis(host, port) def keys(self, pattern): return self.connection.keys(pattern) def get(self, key): return self.connection.get(key) def set(self, key, value): lock = redis_lock.Lock(self.connection, key, expire=LOCK_TIMEOUT) if lock.acquire(blocking=False): self.connection.set(key, value) lock.release() return True return False def delete(self, key): self.connection.delete(key) redis_client = RedisClient()
def clear_all(self): """Clears all locks""" self.logger.critical( 'caution: clearing all locks; collision safety is voided') redis_lock.reset_all(self.client)
def setUp(self): redis_lock.reset_all(get_redis_connection("default"))
def reset_locks(): conn = StrictRedis() redis_lock.reset_all(conn)