def wrapper(*args, **kwargs): job_id = get_named_arg('job_id', func, args, kwargs) logging.debug('args: %s -- kwargs: %s', args, kwargs) job = models.Job.query.get(job_id) logger = get_instance_logger(job.instance, task_id=job_id) task = args[func.func_code.co_varnames.index('self')] try: lock = redis.lock('tyr.lock|' + job.instance.name, timeout=self.timeout) locked = lock.acquire(blocking=False) except ConnectionError: logging.exception('Exception with redis while locking. Retrying in 10sec') task.retry(countdown=10, max_retries=10) if not locked: countdown = 300 logger.info('lock on %s retry %s in %s sec', job.instance.name, func.__name__, countdown) task.retry(countdown=countdown, max_retries=10) else: try: logger.debug('lock acquired on %s for %s', job.instance.name, func.__name__) return func(*args, **kwargs) finally: logger.debug('release lock on %s for %s', job.instance.name, func.__name__) # sometimes we are disconnected from redis when we want to release the lock, # so we retry only the release try: retrying.Retrying(stop_max_attempt_number=5, wait_fixed=1000).call( lock_release, lock, logger ) except ValueError: # LockError(ValueError) since redis 3.0 logger.exception( "impossible to release lock: continue but following task may be locked :(" )
def wrapper(*args, **kwargs): job_id = get_named_arg('job_id', func, args, kwargs) logging.debug('args: %s -- kwargs: %s', args, kwargs) job = models.Job.query.get(job_id) logger = get_instance_logger(job.instance) lock = redis.lock('tyr.lock|' + job.instance.name, timeout=self.timeout) if not lock.acquire(blocking=False): logger.info('lock on %s retry %s in 300sec', job.instance.name, func.__name__) task = args[func.func_code.co_varnames.index('self')] task.retry(countdown=60, max_retries=10) else: try: logger.debug('lock acquired on %s for %s', job.instance.name, func.__name__) return func(*args, **kwargs) finally: logger.debug('release lock on %s for %s', job.instance.name, func.__name__) lock.release()