Exemple #1
0
def main():
    r = redis.Redis(host='redis')

    # Keep track of "how many tasks are in progress"
    conclusion_logger = ConclusionLogger(r)

    while True:
        time.sleep(randomize_sleep_time(CLEANER_SLEEPS_FOR))
        log(f"Waking up cleaner {name()}")
        conclusion_logger.update_todos_length()

        todos_len = get_todos_len(r)
        log(f'For {name()} still {todos_len} tasks to execute')

        # TODO - instead of retrieving all the keys, maybe use SSCAN
        in_progress_items = random_delay(lambda: r.smembers('in_progress'))

        # Some logging in case the workload appears to be done, for the
        # current batch at least
        log(f"Cleaner {name()} items in progress: {in_progress_items}")
        conclusion_logger.log_conclusions()

        if not in_progress_items:
            continue

        in_progress_lock_keys = [f'{int(num)}-lock' for num in in_progress_items]

        in_progress_responses = random_delay(lambda: r.mget(in_progress_lock_keys))

        keys_to_reset = [
            int(item)
            for resp, item in zip(in_progress_responses, in_progress_items)
            if resp is None
        ]

        if not ONLY_LOG_SUMMARY:
            reportable_keys = f"{keys_to_reset if len(keys_to_reset) < 200 else '>= too keys. Will not display them'}"
        else:
            reportable_keys = ""

        log(f"!!!~~~Cleaner {name()} will reset {len(keys_to_reset)} keys. "
            f"{reportable_keys}", force=True
            )
        pipeline = r.pipeline()

        for idx, (resp, lock_key, key) in enumerate(zip(in_progress_responses, in_progress_lock_keys, in_progress_items)):
            if resp is None:
                pipeline.delete(lock_key)
                pipeline.smove(IN_PROGRESS_QUEUE_NAME, TODOS_QUEUE_NANE, key)

        random_delay(lambda: pipeline.execute())
def get_redis_set_len(r, set_name):
    """
    :param redis.Redis r:
    :return: the length or -1 -> if unknown
    """
    try:
        return int(random_delay(lambda: r.scard(set_name)))
    except:
        return -1
def get_redis_set_members(r, set_name):
    """

    :param redis.Redis r:
    :param set_name:
    :return:
    """
    try:
        return [int(i) for i in random_delay(lambda: r.smembers(set_name))]
    except:
        return []
def get_redis_list_members(r, set_name, begin=0, end=-1):
    """

    :param redis.Redis r:
    :param set_name:
    :return:
    """
    try:
        return [int(i) for i in random_delay(lambda: r.lrange(set_name, begin, end))]
    except:
        return []
Exemple #5
0
def _request_work(r):
    """
    This just retries to get work, until redis is up and running
    :param r:
    :return:
    """
    while True:
        try:
            return random_delay(lambda: r.srandmember(TODOS_QUEUE_NANE))
        except Exception as err:
            log(f"Worker {name()} - exception: {err}", force=True)
            pass  # redis not ready
Exemple #6
0
def pull_work(r):
    workload = _request_work(r)
    # workload = random_delay(r.srandmember, TODOS_QUEUE_NANE)

    while workload is not None:
        workload = int(workload)
        # problem here: Maybe after moving the task, the cleaner
        # comes and moves if right back... we'll do at-least-once processing then
        # did_move = r.smove(TODOS_QUEUE_NANE, IN_PROGRESS_QUEUE_NAME, workload)
        did_move = random_delay(lambda: r.smove(TODOS_QUEUE_NANE, IN_PROGRESS_QUEUE_NAME, workload))

        if did_move:
            pipeline = random_delay(lambda: r.pipeline())
            random_delay(lambda: pipeline.set(f"{workload}-lock", LOCK_VALUE))
            random_delay(lambda: pipeline.pexpire(f"{workload}-lock", int(TIMEOUT_WORK * 1000)))
            random_delay(lambda: pipeline.execute())
            yield workload

        workload = _request_work(r)
Exemple #7
0
def acknowledge_completion(workload, result, r):
    """
    :param workload:
    :param result:
    :param redis.Redis r:
    :return: success status
    """
    ttl = r.ttl(f"{workload}-lock")
    if ttl >= -1:
        pipeline = random_delay(lambda: r.pipeline())
        random_delay(lambda: pipeline.smove(IN_PROGRESS_QUEUE_NAME, DONE_QUEUE_NAME, workload))

        # now... it can happen that the task is moved back
        # to "todos" by the cleaner at this point, so it will
        # be processed twice. We're going for at-least-once
        # processing (vs at most once processing)
        random_delay(lambda: pipeline.delete(f"{workload}-lock"))
        random_delay(lambda: pipeline.execute())
        log(f"Worker {name()} executed workload: {workload} to get {result}")
        return True
    else:
        log(f"Worker {name()} - :( sadly our work was rejected for workload {workload} even though we gor {result}")
        return False