Exemple #1
0
def poll_for_jobs(registered_functions, queue_name='default', rv_ttl=60):
    """
    Poll the given queue for jobs, complete it if it matches one of the
    registered functions, and place return value of the function call
    on the results queue (given as part of the job).

    Parameters
    ----------
    registered_functions: dict
        Mapping from a name that is received as part of the job to
        actual bound function to call.
    queue_name: string ['default']
        Name of the queue to wait on.
    rv_ttl: float [60]
        Result's time to live, in seconds.
    """
    redis_conn = util.get_redis_conn()
    print("poll_for_jobs: now listening on {}".format(queue_name))
    while True:
        msg = redis_conn.blpop(queue_name)
        function_name, kwargs, key = cPickle.loads(msg[1])
        try:
            assert(function_name in registered_functions)
            rv = registered_functions[function_name](**kwargs)
        except Exception as e:
            rv = e
        if rv is not None:
            redis_conn.set(key, cPickle.dumps(rv))
            redis_conn.expire(key, rv_ttl)
Exemple #2
0
def submit_job(function_name, kwargs, queue_name='default'):
    """
    Parameters
    ----------
    function_name: string
        Name of the function that will be expected
        by the waiting process.
    kwargs: dict
        Keyword arguments to pass to the function.
    queue_name: string ['default']
        Name of the queue to wait on.
    """
    redis_conn = util.get_redis_conn()
    key = '%s:result:%s' % (queue_name, str(uuid.uuid4()))
    redis_conn.rpush(
        queue_name, cPickle.dumps((function_name, kwargs, key)))
    return DelayedResult(key, redis_conn)
Exemple #3
0
        Number of CPUs that each worker will use.
    mem: int [1500]
        Maximum amount of memory that each worker will use, in MB.
    max_time: string ['2:0:0']
        Maximum amount of time a worker may live, as 'hours:minutes:seconds'.
    result_ttl: int [43200]
        Time that the result of a job lives in Redis for reading, in seconds.
    timeout: int [320]
        Time that the worker has to respond once it starts a job, in seconds.
    async: boolean [True]
        If False, will run job synchronously in same thread---useful to debug.
    """
    assert(chunk_size > 0)

    # Establish connection to Redis queue.
    redis_conn = util.get_redis_conn()
    fq = rq.Queue('failed', connection=redis_conn)
    q = rq.Queue(name, connection=redis_conn, async=async)

    # Check the failed queue for old jobs and get rid of them.
    failed_jobs = [j.cancel() for j in fq.get_jobs() if j.origin == name]
    print("Canceled {} failed jobs are left over from previous run.".format(
        len(failed_jobs)))

    # Empty the queue and fill it with jobs (result_ttl is the caching
        # time of results, in seconds).
    print("Queing jobs...")
    q.empty()
    t = time.time()
    if chunk_size > 1:
        chunked_args_list = [