def __init__(self, verbose: bool = False): self.lock_manager = Redlock([{ "host": "localhost", "port": 6379, "db": 0 }]) self.verbose = verbose
def dlock(key, ttl, **kwargs): """ 分布式锁 :param key: 分布式锁ID :param ttl: 分布式锁生存时间 :param kwargs: 可选参数字典 :return: None """ resource_servers = [{ 'host': REDIS_HOST, 'port': REDIS_PORT, 'db': REDIS_DB, 'password': REDIS_PASSWORD }] dl = Redlock(resource_servers) # 获取锁 lock = dl.lock(key, ttl) # if ret is False: # detail = u'acquire lock[%s] error' % key # raise AcquireLockError(detail) yield lock # 释放锁 if isinstance(lock, Lock): dl.unlock(lock)
class RedisLock(object): def __init__(self, redis_host='127.0.0.1', redis_port=6379): self._lock_manager = Redlock([{ "host": redis_host, "port": redis_port, "db": 0, }]) @contextmanager def lock(self, lock_id, timeout=10000): ''' Context manager to acquire global distributed lock across all applications using the same redis. Uses implementation of redis distlock @see https://redis.io/topics/distlock @lock_id string uniq id of the lock @timeout integer timeout after which the lock will be released (in milliseconds) ''' lock = self._lock_manager.lock(lock_id, timeout) if lock is False: raise LockFailed() try: yield lock finally: self._lock_manager.unlock(lock)
class RedisClient(object): def __init__(self, host, port=6379, db=0): self.redis = redis.StrictRedis(host=host, port=port, db=db) self.redlock = Redlock([self.redis]) def __getattr__(self, name): method = getattr(self.redis, name) return method def acquireDLock(self, resource, ttl=300): """ 获取分布式锁。 ttl: 锁的过期时间,单位是秒,默认300秒。 """ resource = 'lock_' + resource try: return self.redlock.lock(resource, ttl * 1000) except MultipleRedlockException as e: logging.error('Error acquiring dlock: %s', e) return False def releaseDLock(self, lock): """ 释放分布式锁 """ try: self.redlock.unlock(lock) return True except MultipleRedlockException as e: logging.error('Error releasing dlock: %s', e) return False
def lightning_order_with_redlock() -> None: """ Lightning order with Redlock algorithm. :return: None """ r = redis.Redis() dlm = Redlock([{ 'host': 'localhost', 'port': 6379, 'db': 0 }, ]) # Stands for "distributed lock manager" lock = None try: # Try to acquire the lock lock = dlm.lock(LOCK_KEY, 30000) # If not acquiring the lock, block here # Business codes remaining = int(r.get('stock')) if remaining > 0: r.set('stock', str(remaining - 1)) print(f'Deducted stock, {remaining - 1} remaining') else: print('Failed to deduct stock') except MultipleRedlockException as e: print(e) finally: # Release the lock dlm.unlock(lock)
def setUp(self): try: self.redlock = Redlock([{"host": "localhost"}]) self.dstlock = Redlock([{"host": "localhost", "port": 6379, "socket_timeout": 0.5}, {"host": "localhost", "port": 6380, "socket_timeout": 0.5}, {"host": "localhost", "port": 6381, "socket_timeout": 0.5}]) except Exception as e: pass
class RedisStore(AbstractStore): """Redis-based backend for deployments with replicas > 1""" def __init__(self, url: str): logger.info("Connecting to Redis on {}..".format(url)) self._redis = redis.StrictRedis.from_url(url) self._redlock = Redlock([url]) def set(self, key, value): self._redis.set(key, json.dumps(value, separators=(",", ":"))) def get(self, key): value = self._redis.get(key) if value: return json.loads(value.decode("utf-8")) def acquire_lock(self): return self._redlock.lock("update", 10000) def release_lock(self, lock): self._redlock.unlock(lock) def publish(self, event_type, event_data): self._redis.publish( "default", "{}:{}".format(event_type, json.dumps(event_data, separators=(",", ":"))), ) def listen(self): p = self._redis.pubsub() p.subscribe("default") for message in p.listen(): if message["type"] == "message": event_type, data = message["data"].decode("utf-8").split( ":", 1) yield (event_type, json.loads(data)) def create_screen_token(self): """Generate a new screen token and store it in Redis""" data = generate_token_data() token = data["token"] self._redis.set("screen-tokens:{}".format(token), json.dumps(data)) return token def redeem_screen_token(self, token: str, remote_addr: str): """Validate the given token and bind it to the IP""" redis_key = "screen-tokens:{}".format(token) data = self._redis.get(redis_key) if not data: raise ValueError("Invalid token") data = json.loads(data.decode("utf-8")) data = check_token(token, remote_addr, data) self._redis.set(redis_key, json.dumps(data))
def __init__(self, redis_connection=None, locker=None, *args, **kwargs): self.__redis_connection = redis_connection if self.__redis_connection is None: self.__redis_connection = StrictRedis.from_url( current_app.conf.CELERY_REDIS_SCHEDULER_URL) self._schedule = EntryProxy(self.__redis_connection) self._locker = locker if self._locker is None: self._locker = Redlock( [current_app.conf.CELERY_REDIS_SCHEDULER_URL]) super(ProbitScheduler, self).__init__(*args, **kwargs)
class RedisStore(AbstractStore): '''Redis-based backend for deployments with replicas > 1''' def __init__(self, url: str): logger.info('Connecting to Redis on {}..'.format(url)) self._redis = redis.StrictRedis.from_url(url) self._redlock = Redlock([url]) def set(self, key, value): self._redis.set(key, json.dumps(value, separators=(',', ':'))) def get(self, key): value = self._redis.get(key) if value: return json.loads(value.decode('utf-8')) def acquire_lock(self): return self._redlock.lock('update', 10000) def release_lock(self, lock): self._redlock.unlock(lock) def publish(self, event_type, event_data): self._redis.publish( 'default', '{}:{}'.format(event_type, json.dumps(event_data, separators=(',', ':')))) def listen(self): p = self._redis.pubsub() p.subscribe('default') for message in p.listen(): if message['type'] == 'message': event_type, data = message['data'].decode('utf-8').split( ':', 1) yield (event_type, json.loads(data)) def create_screen_token(self): '''Generate a new screen token and store it in Redis''' data = generate_token_data() token = data['token'] self._redis.set('screen-tokens:{}'.format(token), json.dumps(data)) return token def redeem_screen_token(self, token: str, remote_addr: str): '''Validate the given token and bind it to the IP''' redis_key = 'screen-tokens:{}'.format(token) data = self._redis.get(redis_key) if not data: raise ValueError('Invalid token') data = json.loads(data.decode('utf-8')) data = check_token(token, remote_addr, data) self._redis.set(redis_key, json.dumps(data))
def select_gpu(redis_conf=None, timeout=10000, shuffle=True): """Sets the CUDA_VISIBLE_DEVICES environment variable :param redis_conf: Redis configuration passed to redlock-py :param timeout: Timeout of the lock in milliseconds, default 10000 :param shuffle: Shuffles the available GPU list, default True """ if len(os.environ.get('CUDA_VISIBLE_DEVICES', 'unset')) == 0: # Environment variable empty return "", None gpu_status = str(subprocess.check_output(['nvidia-smi', 'pmon', '-c', '1'])) # Example of expected result from nvidia-smi: # # gpu pid type sm mem enc dec command # # Idx # C/G % % % % name # 0 25729 C 94 57 0 0 python # 1 - - - - - - - gpu_status = map(lambda x: x.split(), gpu_status.split("\n")[2:-1]) # Check if the GPU is not already used by the current process pid = os.getpid() gpu_pids = map(lambda x: x[:2], gpu_status) for gpu, p in gpu_pids: if p == '-': continue if pid == int(p): return int(gpu), None gpu_status = filter(lambda x: x[7] == '-', gpu_status) if shuffle: # Suffle GPUs list random.shuffle(gpu_status) if redis_conf is None: redis_conf = {'unix_socket_path': '/var/run/redis/redis-server.sock'} dlm = Redlock([redis_conf]) if len(gpu_status) > 0: for gpu_ in gpu_status: gpu = gpu_[0] gpu_lock = dlm.lock("{}:gpu{}".format(platform.node(), gpu), timeout) if gpu_lock != False: os.environ['CUDA_VISIBLE_DEVICES'] = gpu def unlock(): return dlm.unlock(gpu_lock) return int(gpu), unlock raise Exception("No GPU available!")
def __init__(self, key, host='127.0.0.1', port=6379, db=0): self.key = key # redis-bitmap的key self.redis_cli = redis.StrictRedis(host=host, port=port, db=db, charset='utf-8') self.red_lock = Redlock([ { "host": host, "port": port, "db": db }, ])
def __init__(self, nl_rest_url, data_key, user="******"): self.user = user self._dlm = Redlock([ { "host": "redis_trade", "port": 6379, "db": 0 }, ]) self.error = None self.nl_rest_url = nl_rest_url self.data_key = data_key self.token = self._get_token self.data = self._get_data
class RedisStore(AbstractStore): '''Redis-based backend for deployments with replicas > 1''' def __init__(self, url: str): logger.info('Connecting to Redis on {}..'.format(url)) self._redis = redis.StrictRedis.from_url(url) self._redlock = Redlock([url]) def set(self, key, value): self._redis.set(key, json.dumps(value, separators=(',', ':'))) def get(self, key): value = self._redis.get(key) if value: return json.loads(value.decode('utf-8')) def acquire_lock(self): return self._redlock.lock('update', 10000) def release_lock(self, lock): self._redlock.unlock(lock) def publish(self, event_type, event_data): self._redis.publish('default', '{}:{}'.format(event_type, json.dumps(event_data, separators=(',', ':')))) def listen(self): p = self._redis.pubsub() p.subscribe('default') for message in p.listen(): if message['type'] == 'message': event_type, data = message['data'].decode('utf-8').split(':', 1) yield (event_type, json.loads(data)) def create_screen_token(self): '''Generate a new screen token and store it in Redis''' data = generate_token_data() token = data['token'] self._redis.set('screen-tokens:{}'.format(token), json.dumps(data)) return token def redeem_screen_token(self, token: str, remote_addr: str): '''Validate the given token and bind it to the IP''' redis_key = 'screen-tokens:{}'.format(token) data = self._redis.get(redis_key) if not data: raise ValueError('Invalid token') data = json.loads(data.decode('utf-8')) data = check_token(token, remote_addr, data) self._redis.set(redis_key, json.dumps(data))
def __init__(self, conf, prefix=None, ttl_sec=60, backlog_path=None): self.backlog_lock = Lock() self.conf = conf self.redlock = Redlock([{"host": conf['backend']['host'], "port": int(conf['backend']['port']), "db": 13, "password": conf['backend']['password']}, ], retry_count=100000, retry_delay=0.01) if prefix is not None: self.prefix = prefix + "-" self.ttl = ttl_sec * 1000 self.backlog_filename_pat = re.compile(self.prefix + '([\w\d\.\-\\/]+?)\-backlog\.([\w\d\-]+)') self.backlog = {} self.backlog_path = backlog_path # Should be abs path to the dir containing backlog pickles self.is_dumping = False if not os.path.exists(self.backlog_path): os.makedirs(self.backlog_path) self.reload()
def redlock(self): return Redlock([{ 'host': 'localhost', 'port': 6379, 'db': 0 }], retry_count=5)
def __init__(self, redis_connection=None, locker=None, *args, **kwargs): self.__redis_connection = redis_connection if self.__redis_connection is None: self.__redis_connection = StrictRedis.from_url(current_app.conf.CELERY_REDIS_SCHEDULER_URL) self._schedule = EntryProxy(self.__redis_connection) self._locker = locker if self._locker is None: self._locker = Redlock([current_app.conf.CELERY_REDIS_SCHEDULER_URL]) super(ChardScheduler, self).__init__(*args, **kwargs)
def setUp(self): super().setUp() self.lock = RedisLocker(current_app.config['REDIS_URL']) another_lock = Redlock([ current_app.config['REDIS_URL'], ], retry_count=1) self.another_locker = another_lock
def __init__(self, *args, **kwargs): if hasattr(current_app.conf, 'CELERY_REDIS_SCHEDULER_URL'): logger.info('backend scheduler using %s', current_app.conf.CELERY_REDIS_SCHEDULER_URL) else: logger.info('backend scheduler using %s', DEFAULT_REDIS_URI) self.update_interval = current_app.conf.get( 'UPDATE_INTERVAL') or datetime.timedelta(seconds=10) # how long we should hold on to the redis lock in seconds if 'CELERY_REDIS_SCHEDULER_LOCK_TTL' in current_app.conf: lock_ttl = current_app.conf.CELERY_REDIS_SCHEDULER_LOCK_TTL else: lock_ttl = 30 if lock_ttl < self.update_interval.seconds: lock_ttl = self.update_interval.seconds * 2 self.lock_ttl = lock_ttl self._dirty = set( ) # keeping modified entries by name for sync later on self._schedule = {} # keeping dynamic schedule from redis DB here # self.data is used for statically configured schedule try: self.schedule_url = current_app.conf.CELERY_REDIS_SCHEDULER_URL except AttributeError: self.schedule_url = DEFAULT_REDIS_URI self.rdb = StrictRedis.from_url(self.schedule_url) logger.info('Setting RedLock provider to {}'.format(self.schedule_url)) self.dlm = Redlock([self.rdb]) self._secure_cronlock = \ lock_factory(self.dlm, 'celery:beat:task_lock', self.lock_ttl) self._last_updated = None self.Entry.scheduler = self self.Entry.rdb = self.rdb # This will launch setup_schedule if not lazy super(RedisScheduler, self).__init__(*args, **kwargs) logger.info('Scheduler ready')
class TestRedlock(unittest.TestCase): def setUp(self): self.redlock = Redlock([{"host": "localhost"}]) def test_lock(self): lock = self.redlock.lock("pants", 100) self.assertEquals(lock.resource, "pants") self.redlock.unlock(lock) lock = self.redlock.lock("pants", 10) self.redlock.unlock(lock) def test_blocked(self): lock = self.redlock.lock("pants", 1000) bad = self.redlock.lock("pants", 10) self.assertFalse(bad) self.redlock.unlock(lock) def test_bad_connection_info(self): with self.assertRaises(Warning): Redlock([{"cat": "hog"}])
class SlaveSpider(Spider): name = 'slave_spider' default_retry_count = 3 default_retry_delay = 1 def __init__(self): self.dlm = Redlock([{"host": "42.96.132.158", "port": 6379, "db": 0, 'password': "******"}, ]) self.list = [] self.driver = PhantomJS() def start_requests(self): self.load_payload() while True: my_lock = self.dlm.lock('URLSLOCK', 1000) while isinstance(my_lock, bool): pass len = self.dlm.servers[0].llen('urls') while len > 0: url = self.dlm.servers[0].lpop('urls') self.dlm.unlock(my_lock) urls = self.get_urls(url=url) for u in urls: yield Request(url=u, callback=self.parse) log.msg('the slave spider is over!', log.WARNING) def parse(self, response): log.msg(response.url+' is over', log.INFO) def get_urls(self, url): for l in self.list: temp = url + l yield temp def load_payload(self): f = open('Xspider/payload.txt', 'r') for line in f.readlines(): line = line.strip('\n') self.list.append(line) f.close()
def __init__(self): global eval_pool config = get_config() self.redis_client = redis.StrictRedis(host=config.host(), port=config.port(), db=0) self.dlm = Redlock([ { "host": "localhost", "port": 6379, "db": 0 }, ], retry_count=10)
class Bloom: def __init__(self, key, host='127.0.0.1', port=6379, db=0): self.key = key # redis-bitmap的key self.redis_cli = redis.StrictRedis(host=host, port=port, db=db, charset='utf-8') self.red_lock = Redlock([ { "host": host, "port": port, "db": db }, ]) def add(self, value): value_unique_lock = self.red_lock.lock(value, 60 * 1000) if value_unique_lock is False: return False point_list = self.get_positions(value) for b in point_list: exists = self.redis_cli.getbit(self.key, b) if not exists: break else: return False for b in point_list: self.redis_cli.setbit(self.key, b, 1) return True def contains(self, value): point_list = self.get_positions(value) for b in point_list: exists = self.redis_cli.getbit(self.key, b) if not exists: return False return True @staticmethod def get_positions(value): point0 = mmh3.hash(value, 40, signed=False) point1 = mmh3.hash(value, 41, signed=False) point2 = mmh3.hash(value, 42, signed=False) point3 = mmh3.hash(value, 43, signed=False) point4 = mmh3.hash(value, 44, signed=False) point5 = mmh3.hash(value, 45, signed=False) point6 = mmh3.hash(value, 46, signed=False) point7 = mmh3.hash(value, 47, signed=False) return [point0, point1, point2, point3, point4, point5, point6, point7]
class RedisLocker(ILocker): def __init__(self, redis_con): self.dlm = Redlock([redis_con, ], retry_count=1) self._locks = {} def _lock(self, key, timeout): """ Trying to acquire lock If not success then sleep (timeout // 10) milliseconds and try again """ lock_key = f'lock/{key}' sleep_time = timeout / (10 * 1000) while True: lock = self.dlm.lock(lock_key, timeout) if lock: break time.sleep(sleep_time) self._locks[lock_key] = lock def _unlock(self, key): lock_key = f'lock/{key}' lock = self._locks[lock_key] self.dlm.unlock(lock)
def __init__(self): config = get_config() startup_nodes = map( lambda x: { "host": x, "port": "%s" % (config.port()) }, config.cluster_nodes()) self.redis_client = rediscluster.StrictRedisCluster( startup_nodes=startup_nodes, decode_responses=True) self.dlm = Redlock([ { "host": "localhost", "port": 6379, "db": 0 }, ], retry_count=10)
def provide_default_basics(cls): """ provide default basics """ config = Configuration.load(os.path.join(root_path, cls.CONF_PATH), Ini).get(cls.CONF_SECTION) redis_host = config.get('host', cls.DEFAULT_DB_HOST) redis_port = int(config.get('port', cls.DEFAULT_DB_PORT)) redis_db_num = int(config.get('dbnum', cls.DEFAULT_DB_NUM)) redlock = Redlock([{ 'host': redis_host, 'port': redis_port, 'db': redis_db_num }]) logger = Logger.get('lock', cls.LOG_PATH) return (logger, redlock)
class TestRedlock(APITestCase): def setUp(self): self.redlock = Redlock([{"host": "localhost"}]) def test_lock(self): lock = self.redlock.lock("pants", 100) print(lock) self.assertEqual(lock.resource, "pants") self.redlock.unlock(lock) lock = self.redlock.lock("pants", 10) print(lock) self.redlock.unlock(lock)
class GPUManager: """Class for allocating GPUs.""" def __init__(self, verbose: bool = False): self.lock_manager = Redlock([ { "host": "localhost", "port": 6379, "db": 0 }, ]) self.verbose = verbose def get_free_gpu(self): """ If some GPUs are available, try reserving one by checking out an exclusive redis lock. If none available or can't get lock, sleep and check again. """ while True: gpu_ind = self._get_free_gpu() if gpu_ind is not None: return gpu_ind if self.verbose: print(f'pid {os.getpid()} sleeping') time.sleep(GPU_LOCK_TIMEOUT / 1000) def _get_free_gpu(self): try: available_gpu_inds = [ gpu.index for gpu in gpustat.GPUStatCollection.new_query() if gpu.memory_used < 0.5 * gpu.memory_total ] except Exception: return [0] # Return dummy GPU index if no CUDA GPUs are installed if available_gpu_inds: gpu_ind = np.random.choice(available_gpu_inds) if self.verbose: print(f'pid {os.getpid()} picking gpu {gpu_ind}') if self.lock_manager.lock(f'gpu_{gpu_ind}', GPU_LOCK_TIMEOUT): return int(gpu_ind) if self.verbose: print(f'pid {os.getpid()} couldnt get lock') return None
def __init__(self, name, **redis_kwargs): """The default connection parameters are: host='localhost', port=6379, db=0 The work queue is identified by "name". The library may create other keys with "name" as a prefix. """ self._db = redis.StrictRedis(**redis_kwargs) self.lock_manager = Redlock([redis_kwargs]) # The session ID will uniquely identify this "worker". self._session = str(uuid.uuid4()) # Work queue is implemented as two queues: main, and processing. # Work is initially in main, and moved to processing when a client picks it up. self._main_q_key = name self._processing_q_key = name + ":processing" self._lease_key_prefix = name + ":leased_by_session:" self._gc_lock_key = name + ":gc-lock" self._gc_lock = RedisDistributedLock(self.lock_manager, self._gc_lock_key) self._leases = {}
def __init__(self, redis_host=None, redis_port=None, redis_db=None, producer_queue_id=None, consumer_queue_id=None): assert redis_host assert redis_port assert redis_db assert producer_queue_id assert consumer_queue_id pool = redis.ConnectionPool(host=redis_host, port=redis_port, db=redis_db) self._broker = redis.Redis(connection_pool=pool) self._consumer_queue_id = producer_queue_id self._consumer_queue_id = consumer_queue_id self._producer_queue_id = producer_queue_id self._lock_manager = Redlock([{ 'host': redis_host, 'port': redis_port, 'db': redis_db}, ])
def id(self): deviceIP = self.client.host self.session_id = self.get_redis_session(deviceIP) if not self.check_session_invalid(self.session_id, deviceIP): local_ip = self.get_local_ip() redisobj = RedisConfig() redis_server = redisobj.get_redis_servers(True) redis_conf_ls = [{ "host": s["host"], "port": s["port"], "db": s["dbno"] } for s in redis_server] lock_mgmt = Redlock(redis_conf_ls) device_lock = lock_mgmt.lock( local_ip + "_" + deviceIP + '_' + self.username + '_device_lock_calabash', 30 * 1000) tmp_count = 0 while isinstance(device_lock, bool) and not device_lock and tmp_count < 1000: tmp_count += 1 time.sleep(0.5) self.session_id = self.get_redis_session(deviceIP) if self.check_session_invalid(self.session_id, deviceIP): return self.session_id device_lock = lock_mgmt.lock( local_ip + "_" + deviceIP + '_' + self.username + '_device_lock_calabash', 30 * 1000) self.session_id = self.get_redis_session(deviceIP) if self.check_session_invalid(self.session_id, deviceIP): return self.session_id self.authenticate(self.username, self.password) #set sessionid to redis self.set_session_to_redis(self.session_id, deviceIP) lock_mgmt.unlock(device_lock) return self.session_id
if len(sys.argv) < 4: print('Invoke the script in the form "python kafka-consumer.py kafka_connection_string topic traces_path"') exit() kafka_connection = sys.argv[1] topic = sys.argv[2] traces_path = sys.argv[3] if not traces_path[-1] == '/': traces_path += "/" # To consume messages consumer = KafkaConsumer(topic, group_id='traces_cache', bootstrap_servers=[kafka_connection]) consumer.commit() dlm = Redlock([{"host": "localhost", "port": 6379, "db": 0}, ]) for message in consumer: key = message.key.decode('utf-8') lock = False while not lock: lock = dlm.lock(key, 5000) with open(traces_path + key, 'ab') as f: f.write(message.value) dlm.unlock(lock) consumer.task_done(message) consumer.commit()
def setUp(self): self.redlock = Redlock([{"host": "localhost"}])
class ProbitScheduler(Scheduler): def __init__(self, redis_connection=None, locker=None, *args, **kwargs): self.__redis_connection = redis_connection if self.__redis_connection is None: self.__redis_connection = StrictRedis.from_url(current_app.conf.CELERY_REDIS_SCHEDULER_URL) self._schedule = EntryProxy(self.__redis_connection) self._locker = locker if self._locker is None: self._locker = Redlock([current_app.conf.CELERY_REDIS_SCHEDULER_URL]) super(ProbitScheduler, self).__init__(*args, **kwargs) def setup_schedule(self): self.install_default_entries(self._schedule) self._merge(self.app.conf.CELERYBEAT_SCHEDULE) def get_schedule(self): return self._schedule schedule = property(get_schedule) # This isn't inherited anymore? Do we want to do this? def sync(self): # Reload the schedule from the collection self._schedule = EntryProxy(self.__redis_connection) # I'm not sure what reserve() is intended to do, but it does not do what we # need it to do, so we define a _lock() method as well. def maybe_due(self, entry, publisher=None): is_due, next_time_to_run = entry.is_due() if not is_due: return next_time_to_run lock = self._lock(entry.name) if not lock: return next_time_to_run try: # Now that we have the lock, double-check the timestamps on the # entry before executing it. entry = self._schedule.sync(entry.name) if entry is None: return next_time_to_run is_due, next_time_to_run = entry.is_due() if not is_due: return next_time_to_run return Scheduler.maybe_due(self, entry, publisher) finally: self._unlock(lock) def _lock(self, name): return self._locker.lock(name, 1000) def _unlock(self, lock): self._locker.unlock(lock) def _merge(self, schedule): """schedule_keys = self.__redis_connection.hgetall(ENTRY_LIST_KEY).keys() if len(schedule_keys) > 0: self.__redis_connection.hdel(ENTRY_LIST_KEY, *schedule_keys)""" for name, entry_dict in schedule.items(): entry = ScheduleEntry(name, **entry_dict) if name not in self._schedule: self._schedule[name] = entry else: # _lock() the existing entry so that these values aren't changed # while we're merging them. lock = self._lock(name) if lock: try: existing = self._schedule.sync(name) if existing: entry.last_run_at = existing.last_run_at entry.total_run_count = existing.total_run_count self._schedule[name] = entry finally: self._unlock(lock)
def __init__(self, redis_engine, lock_ttl=1000): self.redis = redis_engine self.redlock = Redlock([redis_engine]) self.lock_ttl = lock_ttl
def __init__(self): self.dlm = Redlock([{"host": "42.96.132.158", "port": 6379, "db": 0, 'password': "******"}, ]) self.list = [] self.driver = PhantomJS()
def __init__(self, url: str): logger.info('Connecting to Redis on {}..'.format(url)) self._redis = redis.StrictRedis.from_url(url) self._redlock = Redlock([url])
if not traces_path[-1] == '/': traces_path += '/' if not cache_path[-1] == '/': cache_path += '/' if not storage_path[-1] == '/': storage_path += '/' os.makedirs(cache_path, exist_ok=True) os.makedirs(storage_path, exist_ok=True) db = client[db_name] dlm = Redlock([{"host": "localhost", "port": 6379, "db": 0}, ]) # Load plugins plugins = ['time', 'choice', 'var'] analyzers = [] for plugin in plugins: mod = importlib.import_module('plugins.' + plugin) Analyzer = getattr(mod, 'Analyzer') analyzers.append(Analyzer()) gameplays = db.gameplays.find({'modified': True}) for gameplay in gameplays: try: gameplay_file = '%s' % gameplay['_id']
class ConsumerQueue(object): def __init__(self, redis_host=None, redis_port=None, redis_db=None, producer_queue_id=None, consumer_queue_id=None): assert redis_host assert redis_port assert redis_db assert producer_queue_id assert consumer_queue_id pool = redis.ConnectionPool(host=redis_host, port=redis_port, db=redis_db) self._broker = redis.Redis(connection_pool=pool) self._consumer_queue_id = producer_queue_id self._consumer_queue_id = consumer_queue_id self._producer_queue_id = producer_queue_id self._lock_manager = Redlock([{ 'host': redis_host, 'port': redis_port, 'db': redis_db}, ]) def dequeue_task(self): """ Pulls the descriptor for a task that is ready for processing returns a descriptor or None if no tasks are present """ # do we still have work items in this worker's queue descriptor = self._broker.lindex(self._consumer_queue_id, 0) if not descriptor: # check the global work queue for work descriptor = self._broker.rpoplpush(self._producer_queue_id, self._consumer_queue_id) return descriptor def retire_task(self, descriptor=None): """ Mark a task as having been successfully dispatched """ assert descriptor popped_id = self._broker.lpop(self._consumer_queue_id) assert popped_id == descriptor return popped_id def lock_work_item(self, descriptor=None, lock_timeout=None): """ Locks a task to prevent other consumers from double processing """ assert descriptor assert lock_timeout lock = self._lock_manager.lock(descriptor, lock_timeout) return lock def unlock_task(self, lock=None): assert lock self._lock_manager.unlock(lock)
class TestRedlock(unittest.TestCase): def setUp(self): self.redlock = Redlock([{"host": "localhost"}]) def test_lock(self): lock = self.redlock.lock("pants", 100) self.assertEqual(lock.resource, "pants") self.redlock.unlock(lock) lock = self.redlock.lock("pants", 10) self.redlock.unlock(lock) def test_blocked(self): lock = self.redlock.lock("pants", 1000) bad = self.redlock.lock("pants", 10) self.assertFalse(bad) self.redlock.unlock(lock) def test_bad_connection_info(self): with self.assertRaises(Warning): Redlock([{"cat": "hog"}]) def test_py3_compatible_encoding(self): lock = self.redlock.lock("pants", 1000) key = self.redlock.servers[0].get("pants") self.assertEquals(lock.key, key) def test_ttl_not_int_trigger_exception_value_error(self): with self.assertRaises(ValueError): self.redlock.lock("pants", 1000.0) def test_multiple_redlock_exception(self): ex1 = Exception("Redis connection error") ex2 = Exception("Redis command timed out") exc = MultipleRedlockException([ex1, ex2]) exc_str = str(exc) self.assertIn('connection error', exc_str) self.assertIn('command timed out', exc_str)
class CacheManager(object): def __init__(self, redis_engine, lock_ttl=1000): self.redis = redis_engine self.redlock = Redlock([redis_engine]) self.lock_ttl = lock_ttl @staticmethod def get_lock_key(key): return "%s:lock" % key def lock(self, key, nowait=False): lock_key = self.get_lock_key(key) l = False while not l: l = self.redlock.lock(lock_key, self.lock_ttl) if l or nowait: return l time.sleep(0.3) def unlock(self, lock): self.redlock.unlock(lock) def get_item(self, key, recover=None, ttl=None, arguments=([], {}), **kw): item_key = self.redis.get(key) value = self.redis.get(item_key) if item_key else None # return None if cannot recover the value if value is None and recover is None: return None # Trying to recover the value if value is None: return self.set_item(key, lambda: recover(*arguments[0], **arguments[1]), ttl=ttl) else: return deserialize(value) def set_item(self, key, value, ttl=None): old_item_key = self.redis.get(key) if old_item_key is None: # this is a new item # locking it to prevent concurrency violation lock = self.lock(key, nowait=True) if not lock: # it seems this item is loading in another thread # so, waiting for that: # wait & make sure the object is reloaded, the release the lock self.unlock(self.lock(key)) return self.get_item(key) # check if item loaded v = self.get_item(key, None) if v: self.unlock(lock) return v else: lock = None try: guid = uuid.uuid1() item_key = "%s:%s" % (key, guid) value = value() if callable(value) else value self.redis.set(item_key, serialize(value), ex=ttl) self.redis.set(key, item_key, ex=ttl) if old_item_key: self.redis.delete(old_item_key) return value finally: if lock: self.unlock(lock) def hget_item(self, key, dict_key, recover=None, ttl=None, arguments=([], {}), **kw): item_key = self.redis.get(key) value = self.redis.hget(item_key, dict_key) if item_key else None # return None if cannot recover the value if value is None and recover is None: return None # Trying to recover the value if value is None: return self.hset_item(key, dict_key, lambda: recover(*arguments[0], **arguments[1]), ttl=ttl) else: return deserialize(value) def hset_item(self, key, dict_key, value, ttl=None): old_item_key = self.redis.get(key) if old_item_key is None: # this is a new item # locking it to prevent concurrency violation lock = self.lock(key, nowait=True) if not lock: # it seems this item is loading in another thread # so, waiting for that: # wait & make sure the object is reloaded, the release the lock self.unlock(self.lock(key)) return self.hget_item(key, dict_key) # check if item loaded v = self.hget_item(key, dict_key, None) if v: self.unlock(lock) return v else: lock = None try: guid = uuid.uuid1() item_key = "%s:%s" % (key, guid) value = value() if callable(value) else value self.redis.hset(item_key, dict_key, serialize(value)) self.redis.expire(item_key, time=ttl) self.redis.set(key, item_key, ex=ttl) if old_item_key: self.redis.delete(old_item_key) return value finally: if lock: self.unlock(lock) def del_item(self, *keys): item_keys_to_remove = [] for key in keys: item_key = self.redis.get(key) if item_key is not None: item_keys_to_remove.append(item_key) self.redis.delete(*item_keys_to_remove) self.redis.delete(*keys) def get_list(self, key, recover=None, ttl=None, arguments=([], {}), key_extractor=None): # First, get the keys list value = self.redis.get(key) if value is None and recover is None: return None, None # Trying to recover the value if value is None: return self.set_list( key, lambda: recover(*arguments[0], **arguments[1]), ttl=ttl, key_extractor=key_extractor ) else: metadata, item_keys = deserialize(value) return metadata, [self.get_item(item_key) for item_key in item_keys] def set_list(self, key, value, ttl=None, key_extractor=None): # locking it to prevent concurrency violation lock = self.lock(key) # check if item loaded v = self.get_list(key) if v[1]: self.unlock(lock) return v # packed: (value_metadata, value) if key_extractor is None: def key_extractor(x): return x["id"] try: value_metadata, value = value() if callable(value) else value item_keys = [] assert isinstance(value, collections.Iterable), "Value must be iterable" for item_value in value: item_key = create_cache_key(key.split(":")[0], key_extractor(item_value)) item_keys.append(item_key) self.set_item(item_key, item_value, ttl=ttl) self.redis.set(key, serialize((value_metadata, item_keys)), ex=ttl) return value_metadata, value finally: if lock: self.unlock(lock) def invalidate_item(self, key): item_key = self.redis.get(key) self.redis.delete(key) if item_key: self.redis.delete(item_key) def invalidate_list(self, key): self.redis.delete(key) def decorate(self, namespace, list_=False, ttl=None, key_extractor=None): def decorator(func): def wrapper(*args, **kwargs): cache_key = create_cache_key(namespace, kwargs) cache_params = dict(ttl=ttl, recover=func, arguments=(args, kwargs)) if list_: cache_method = self.get_list cache_params["key_extractor"] = key_extractor else: cache_method = self.get_item return cache_method(cache_key, **cache_params) return wrapper return decorator
kafka_connection = sys.argv[1] topic = sys.argv[2] traces_path = sys.argv[3] if not traces_path[-1] == '/': traces_path += "/" # To consume messages consumer = KafkaConsumer(topic, group_id='traces_cache', bootstrap_servers=[kafka_connection]) consumer.commit() dlm = Redlock([ { "host": "localhost", "port": 6379, "db": 0 }, ]) for message in consumer: key = message.key.decode('utf-8') lock = False while not lock: lock = dlm.lock(key, 5000) with open(traces_path + key, 'ab') as f: f.write(message.value) dlm.unlock(lock) consumer.task_done(message)
# host = '42.96.132.158' # psd = 'wjdh84928399' # r = redis.Redis(host=host, password=psd, db=0, port=6379, socket_timeout=10) # url = r.lpop('urls') # f = open('Xspider/payload.txt', 'r') # list = [] # for line in f.readlines(): # line = line.strip('\n') # url = url + line # list.append(url) # print list from redlock import Redlock from redlock import MultipleRedlockException import time dlm = Redlock([{"host": "42.96.132.158", "port": 6379, "db": 0, 'password': "******"}, ]) try: dlm.servers[0].flushall() my_lock = dlm.lock('LOCK', 1000) while True: if isinstance(my_lock, bool): print 'wait' time.sleep(0.5) else: print 'dosomething' dlm.servers[0].lpush('urls', time.time()) dlm.unlock(my_lock) time.sleep(0.5) except MultipleRedlockException, e: raise e