Пример #1
0
def ids_to_json(id_list):
    return json.dumps({'jobs': [job_queue.fetch_snapshot(job_id)._asdict()
                       for job_id in id_list]})


@route('/api/all')
def all_jobs():
    return ids_to_json(job_queue.debug_fetch_all_ids())


@route('/api/inprogress')
def inprogress_jobs():
    return ids_to_json(job_queue.debug_fetch_inprogress_ids())


@route('/job/<job_id:path>')
def job_snapshot(job_id):
    try:
        job = job_queue.fetch_snapshot(job_id)
    except Exception as e:
        logging.exception(e)
        return

    return template(TEMPLATE_JOB_VIEW, job=job)


if __name__ == '__main__':
    job_queue = jobs.connect_to_queue()
    run(reloader=True, debug=True, server='gevent')
Пример #2
0
DEFAULT_REDIS = 'localhost:6379'


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    _arg = parser.add_argument
    _arg('--redis', metavar='PATH|HOST:PORT', type=str, default=DEFAULT_REDIS,
         action='store', help='Redis endpoint specified as host:port - '
         'default: %s' % DEFAULT_REDIS)
    _arg('--flushdb', action='store_const', default=False, const=True)
    _arg('jobs', metavar='JOBTYPE_USERDATA_:ARGS', type=str, nargs='*')
    args = parser.parse_args()

    endpoint = args.redis.split(':', 1)
    if len(endpoint) == 2:
        job_queue = jobs.connect_to_queue(endpoint[0], int(endpoint[1]))
    else:
        job_queue = jobs.connect_to_unix_socket_queue(endpoint[0])

    if args.flushdb:
        job_queue.redis_connection.flushall()

    if len(args.jobs) == 0:
        sys.exit(0)

    def job_from_def(job_def):
        count, job_type, userdata, args = job_def
        count = int(count or '1')
        assert count >= 1
        job_ids = []
        for i_job in xrange(count):
Пример #3
0
         'would run the command /bin/parser up to a maximum of 4 '
         'times in parallel with the job type identified as \'parse\', using '
         'automatic heartbeats. COMMAND can also contain fixed arguments')
    args = parser.parse_args()
    if (args.redis_unix_socket != '') and (args.redis_tcp != ''):
        print('error: both TCP and unix socket endpoints specified, please'
              ' choose one or the other')
        sys.exit(1)

    try:
        if args.redis_unix_socket:
            job_queue = jobs.connect_to_unix_socket_queue(
                args.redis_unix_socket)
        else:
            endpoint = args.redis_tcp or DEFAULT_REDIS
            job_queue = jobs.connect_to_queue(**parse_hostport(endpoint))
    except redis.exceptions.ConnectionError, error:
        print('error: Could not connect to job queue: %s' % error)
        sys.exit(1)

    runner_args = parse_mappings(job_queue, args.mapping)
    greenlets = [gevent.spawn(runner_greenlet, *argtuple)
                 for argtuple in runner_args]
    greenlets.append(gevent.spawn(monitor_greenlet, job_queue,
                                  args.monitor_interval,
                                  args.orphaned_timeout))

    gevent.signal(signal.SIGINT, lambda: signal_handler(job_queue, greenlets))

    log.info('main: Spawned %d greenlets. Waiting on jobs...', len(greenlets))
    gevent.joinall(greenlets)