def cleanup_tasks(): # in case of cold restart of the workers, there might be jobs that still have their "lock" object, but aren't really # going to run. this job removes them. lock_keys = redis_connection.keys( "query_hash_job:*") # TODO: use set instead of keys command if not lock_keys: return query_tasks = [ QueryTask(job_id=j) for j in redis_connection.mget(lock_keys) ] logger.info("Found %d locks", len(query_tasks)) inspect = celery.control.inspect() active_tasks = inspect.active() if active_tasks is None: active_tasks = [] else: active_tasks = active_tasks.values() all_tasks = set() for task_list in active_tasks: for task in task_list: all_tasks.add(task['id']) logger.info("Active jobs count: %d", len(all_tasks)) for i, t in enumerate(query_tasks): if t.ready(): # if locked task is ready already (failed, finished, revoked), we don't need the lock anymore logger.warning("%s is ready (%s), removing lock.", lock_keys[i], t.celery_status) redis_connection.delete(lock_keys[i])
def cleanup_tasks(): # in case of cold restart of the workers, there might be jobs that still have their "lock" object, but aren't really # going to run. this job removes them. lock_keys = redis_connection.keys("query_hash_job:*") # TODO: use set instead of keys command if not lock_keys: return query_tasks = [QueryTask(job_id=j) for j in redis_connection.mget(lock_keys)] logger.info("Found %d locks", len(query_tasks)) inspect = celery.control.inspect() active_tasks = inspect.active() if active_tasks is None: active_tasks = [] else: active_tasks = active_tasks.values() all_tasks = set() for task_list in active_tasks: for task in task_list: all_tasks.add(task['id']) logger.info("Active jobs count: %d", len(all_tasks)) for i, t in enumerate(query_tasks): if t.ready(): # if locked task is ready already (failed, finished, revoked), we don't need the lock anymore logger.warning("%s is ready (%s), removing lock.", lock_keys[i], t.celery_status) redis_connection.delete(lock_keys[i])