def lock(name, validity, redis, retry_count=3, retry_delay=200, **kwargs): if retry_count < 0: retry_count = 0 is_blocking = True else: is_blocking = False while True: err = None try: dlm = redlock.Redlock(redis, retry_count=retry_count + 1, retry_delay=retry_delay / 1000.0) lock = dlm.lock(name, validity) if lock is False: log("failed") err = 1 else: log("ok") print(lock.key) return 0 except Exception as e: log("error %s" % e) err = 3 if is_blocking: # redlock already slept for retry-delay continue else: return err
def __init__(self, **config): self.config = config self.dlm = redlock.Redlock([ config['server'], ], retry_count=config['retry_count'], retry_delay=config['retry_delay']) self.dlm_lock = None
def __init__(self): self.lock_time = 5000 self.dlm = redlock.Redlock(config.redis_servers) self.resource = "reduce" self.lock = None self.queue = 'task_finished' self.redis_client = store.RedisStorage() self.redis_final_result_key = "result" self.total = 31 pass
def unlock(name, key, redis, **kwargs): try: dlm = redlock.Redlock(redis) lock = redlock.Lock(0, name, key) dlm.unlock(lock) except Exception as e: log("Error: %s" % e) return 3 log("ok") return 0
def run_unlock(redis, name, key, **_): try: dlm = redlock.Redlock(redis) lock = redlock.Lock(0, name, key) dlm.unlock(lock) except (redlock.CannotObtainLock, redlock.MultipleRedlockException) as e: log.error('Error: %s', e) return 3 log.info("ok") return 0
def __init__(self, uri='https://skimdb.npmjs.com/registry/'): self.base_uri = uri self.logger = logging.getLogger('producer') self.redis = redis.Redis('localhost', 6379) self.redlock = redlock.Redlock([ { "host": "localhost", "port": 6379, "db": 0 }, ]) self.logger.info("producer started")
def acquire_lock(self, expires=1000, is_blocking=True): if is_blocking: curr_retry_delay = self.retry_delay while True: try: self.red_lock = redlock.Redlock([{"host": cnt.REDIS_SERVER, "port": cnt.REDIS_PORT, "db": 0}, ], retry_count=self.retry_count, retry_delay=curr_retry_delay) self.lock = self.red_lock.lock(self.name, ttl=expires) assert self.lock != False return True except Exception as e: logger.exception("error!!!") curr_retry_delay = min(0.2, 2*curr_retry_delay) else: self.red_lock = redlock.Redlock([{"host": cnt.REDIS_SERVER, "port": cnt.REDIS_PORT, "db": 0}, ], retry_count=self.retry_count, retry_delay=self.retry_delay) self.lock = self.red_lock.lock(self.name, ttl=expires) return True if self.lock != False else False
def run_lock(redis, name, key, validity, retry_delay, timeout, force, **_): lock_value = key or redlock.get_unique_id() dlm = redlock.Redlock(redis) t0 = time_ms() while True: try: lock = dlm.lock(name, lock_value, validity, force=force) print('Locked name:%s, key:%s, validity:%s' % (name, lock.key, validity)) return 0 except redlock.MultipleRedlockException: if timeout < 0 or time_ms() < (t0 + timeout): time.sleep(retry_delay / 1000.0) else: log.info('Lock timeout') return 1
def lock(self, name, validity, retry_count=-1, retry_delay=200): if retry_count < 0: retry_count = 0 is_blocking = True else: is_blocking = False while True: dlm = redlock.Redlock([self._redis], retry_count=retry_count + 1, retry_delay=retry_delay / 1000.0) lock = dlm.lock(name, validity) if lock: return lock if is_blocking: # redlock already slept for retry-delay continue
def main(unused_argv): workerID = WORKER pool = redis.ConnectionPool(host=REDISHOST, port=6379) r = redis.Redis(connection_pool=pool) dlm = redlock.Redlock([ { "host": REDISHOST, "port": 6379, "db": 0 }, ]) while True: time.sleep(1) ret = r.hmget(TASKNAME, 'epoch', 'status') t = task(TASKNAME, int(ret[0]), int(ret[1])) if t.status == GROWING: epochpath = DATAROOT + '/' + str(t.epoch) dirs = os.listdir(epochpath) for d in dirs: modelpath = os.path.join(epochpath, d) if os.path.isdir(modelpath): mod_lock = dlm.lock(modelpath, 10800 * 1000) if not mod_lock: continue if os.path.exists(modelpath + '/' + 'result'): continue modelfilepath = modelpath + '/' + d print('training modelpath:', modelfilepath) msc = ModelString.load(modelfilepath) mod = msc.load_model() msc.load_weights(mod, by_name=True) loss, acc, np = train(mod, msc, t.epoch + 1) ##calculate fitness ##fitness ret = Modresult(acc, acc - np, modelpath) msc.save_weights(mod) msc.save() ret.save() dlm.unlock(mod_lock)
def __init__( self, actor_capacity, nb_actor, redis_servor, host_redis, port_redis, synchronize_actors_with_learner, ): self.actor_capacity = actor_capacity self.nb_actor = nb_actor self.full_capacity = nb_actor * actor_capacity self.actor_full = False # Used to track if actor memory is full, only for each actor... self.memory_full = False # Used to track actual capacity, only for the learner... self.redis_servor = redis_servor self.synchronise_actors_with_learner = synchronize_actors_with_learner if self.synchronise_actors_with_learner: redlock_manager = redlock.Redlock([{"host": host_redis, "port": port_redis, "db": 0}]) redlock_manager.retry_count = cst.RETRY_COUNT redlock_manager.retry_delay = cst.RETRY_DELAY self.redlock_manager = redlock_manager
def lock(self): self.dlm = redlock.Redlock(self.servers) self.r = self.dlm.lock(self.resource, self.ttl) if not self.r: return False return True
def unlock(self, lock): dlm = redlock.Redlock([self._redis]) lock = redlock.Lock(0, lock.resource, lock.key) dlm.unlock(lock)
def get_lock_manager(): """Get an instance of redlock for the configured redis servers.""" return redlock.Redlock([redis_uri_string])
# https://medium.com/better-programming/introduction-to-apscheduler-86337f3bb4a6 import logging, time from apscheduler.jobstores.redis import RedisJobStore from apscheduler.schedulers.blocking import BlockingScheduler #from apscheduler.schedulers.background import BackgroundScheduler import redlock from . import APP, jobs JOBSTORES = {'default': RedisJobStore(host=APP.config.get('REDIS_HOST'), db=APP.config.get('REDIS_JOBSTORE_DB'))} SCHED = BlockingScheduler(jobstores=JOBSTORES) #SCHED = BackgroundScheduler(daemon=True, jobstores=JOBSTORES) logging.getLogger('apscheduler').setLevel(logging.WARNING) # https://github.com/SPSCommerce/redlock-py DLM = redlock.Redlock(APP.config.get('REDLOCK_CONN'), retry_count=3, retry_delay=0.2) # in seconds # @SCHED.scheduled_job('interval', hours=1) def import_data(): try: my_lock = DLM.lock('import_data', 10000) # in milliseconds if my_lock: jobs.import_data.queue() logging.info('import_data') time.sleep(1) DLM.unlock(my_lock) except redlock.MultipleRedlockException as exc: logging.exception(exc) SCHED.add_job(import_data, 'cron', hour='*')
def __init__(self): self.lock_time = 5000 self.dlm = redlock.Redlock(config.redis_servers) self.queue = 'part_task_finished' self.redis_client = store.RedisStorage() self.lock = None
import redlock addrs = [{ "host": "localhost", "port": 6379, "db": 0 }, { "host": "localhost", "port": 6380, "db": 0 }, { "host": "localhost", "port": 6382, "db": 0 }] dlm = redlock.Redlock(addrs) success = dlm.lock("user-lck-aaa", 5000) if success: print('lock success') dlm.unlock('user-lck-laoqian') else: print('lock failed')
def run_command(redis, name, key, validity, retry_delay, cmd, termseq, restart_cmd, **_): termseq = parse_termseq(termseq) dlm = redlock.Redlock(redis) lock_value = key or redlock.get_unique_id() def get_lock(): log.debug('Polling for lock name:%s, key:%s', name, lock_value) t0 = datetime.datetime.now() lock = None while not lock and state['running']: try: lock = dlm.lock(name, lock_value, validity) log.debug('Got lock name:%s, key:%s in %s', name, lock_value, datetime.datetime.now() - t0) return lock except redlock.MultipleRedlockException: time.sleep(retry_delay / 1000.0) def run_proc_with_lock(proc, lock): """ Run proc, until finished or lock is lost """ t0 = time_ms() while lock: while time_ms() < (t0 + lock.validity * 0.5): delta_t = (t0 + lock.validity * 0.5) - time_ms() sleep_time = min(delta_t, lock.validity * 0.1, 100.0) time.sleep(sleep_time / 1000.0) return_code = proc.poll() if isinstance(return_code, int): log.info('Process exited with exit code %s', return_code) log.info('Release lock name:%s, key:%s', name, lock_value) dlm.unlock(lock) return return_code # subprocess still running, extend lock t0 = time_ms() try: log.debug('Extend lock name:%s, key:%s, validity:%s', name, lock_value, validity) lock = dlm.lock(name, lock_value, validity) except redlock.MultipleRedlockException: lock = None # lost lock, kill subprocess log.info('Lost lock name:%s', name) return terminate_proc(proc, termseq) def terminate_proc(proc, termseq): # first check if the subprocess has already exited return_code = proc.poll() if isinstance(return_code, int): return return_code # run through termseq for sig, timeout in termseq: t0 = time_ms() logging.debug('Send signal %s to pid %s, wait shutdown for %sms', sig, proc.pid, int(timeout)) proc.send_signal(sig) while (t0 + timeout) > time_ms(): time.sleep(0.05) return_code = proc.poll() if isinstance(return_code, int): return return_code def sighandler(signum, _, proc): if signum in (signal.SIGINT, signal.SIGTERM): state['running'] = False terminate_proc(proc, termseq) def __inner(): lock = get_lock() proc = subprocess.Popen(cmd) log.info('Run [%s] %s', proc.pid, ' '.join(cmd)) handler = functools.partial(sighandler, proc=proc) signal.signal(signal.SIGINT, handler) signal.signal(signal.SIGTERM, handler) return run_proc_with_lock(proc, lock) if restart_cmd: while state['running']: __inner() else: return __inner()
import os import sys import optparse import redlock registry = [] from flask_redis import FlaskRedis redis_store = FlaskRedis() # 35 times x 200 ms = 7 seconds trying to acquire, 200 millis each time rlock = redlock.Redlock([ { "host": "localhost", "port": 6379, "db": 0 }, ], retry_count=35) from .application import app from .db import db, upgrader redis_store.init_app(app) assert db is not None # ignore pyflakes from flask import render_template @app.route("/")
import time from datetime import datetime from urllib.parse import urlparse import pytz import redlock from dateutil.parser import parse from googleapiclient.http import MediaFileUpload, MediaInMemoryUpload from berkeleytime.settings import IS_LOCALHOST from forms.utils import get_config_dict, CACHED_SHEETS, DRIVE_SERVICE if IS_LOCALHOST: dlm = redlock.Redlock([ { 'host': 'redis', 'port': 6379 }, ]) else: REDIS = urlparse(os.environ.get('REDIS_URL')) dlm = redlock.Redlock([ { 'host': REDIS.hostname, 'port': REDIS.port, 'password': REDIS.password }, ]) class ExpiredException(Exception): pass
from cccatalog.api.models import ShortenedLink from cccatalog import settings from urllib.parse import urlparse from rest_framework import serializers # Create a lock inside of Redis to ensure that multiple server workers don't # try to create the same shortened URL. __parsed_redis_url = urlparse(settings.CACHES['locks']['LOCATION']) __host, __port = __parsed_redis_url.netloc.split(':') __db_num = __parsed_redis_url.path[1] if __parsed_redis_url.path else None __password = os.environ.get("REDIS_PASSWORD") # Clients will attempt to acquire the lock infinitely with a 1 second delay. url_lock = redlock.Redlock([{ "host": __host, "port": __port, "db": __db_num, "password": __password }], retry_count=1, retry_delay=1000) class ShortenedLinkResponseSerializer(Serializer): shortened_url = URLField( help_text="A shortened link on the `shares.cc` domain.") class ShortenedLinkSerializer(ModelSerializer): """ A single shortened URL, mapping a shortened path at shares.cc to a full URL elsewhere on the CC Catalog platform. """