Exemple #1
0
def list_workers_api():
    workers_list = Worker.all()
    rq_workers = []
    for worker in workers_list:
        host_ip_using_name = "N/A"
        try:
            host_ip_using_name = socket.gethostbyname(worker.hostname)
        except socket.gaierror as addr_error:
            pass

        rq_workers.append({
            'worker_name':
            worker.name,
            'listening_on':
            ', '.join(queue.name for queue in worker.queues),
            'status':
            worker.get_state()
            if not is_suspended(get_current_connection()) else "suspended",
            'host_ip':
            host_ip_using_name,
            'current_job_id':
            worker.get_current_job_id(),
            'failed_jobs':
            worker.failed_job_count,
        })
    return {
        'data': rq_workers,
    }
Exemple #2
0
def worker(cli_config, burst, logging_level, name, results_ttl, worker_ttl,
           job_monitoring_interval, verbose, quiet, sentry_dsn,
           exception_handler, pid, queues, rate_limit, **options):
    """Starts an RQ worker."""

    settings = read_config_file(cli_config.config) if cli_config.config else {}
    # Worker specific default arguments
    queues = queues or settings.get('QUEUES', ['default'])
    sentry_dsn = sentry_dsn or settings.get('SENTRY_DSN')

    if pid:
        with open(os.path.expanduser(pid), "w") as fp:
            fp.write(str(os.getpid()))

    setup_loghandlers_from_args(verbose, quiet)

    try:

        cleanup_ghosts(cli_config.connection)
        exception_handlers = []
        for h in exception_handler:
            exception_handlers.append(import_attribute(h))

        if is_suspended(cli_config.connection):
            click.secho(
                'RQ is currently suspended, to resume job execution run "rq resume"',
                fg='red')
            sys.exit(1)

        queues = [
            cli_config.queue_class(queue,
                                   connection=cli_config.connection,
                                   job_class=cli_config.job_class)
            for queue in queues
        ]
        worker = cli_config.worker_class(
            queues,
            name=name,
            connection=cli_config.connection,
            default_worker_ttl=worker_ttl,
            default_result_ttl=results_ttl,
            job_monitoring_interval=job_monitoring_interval,
            job_class=cli_config.job_class,
            queue_class=cli_config.queue_class,
            exception_handlers=exception_handlers or None,
            limit=rate_limit)

        # Should we configure Sentry?
        if sentry_dsn:
            from raven import Client
            from raven.transport.http import HTTPTransport
            from rq.contrib.sentry import register_sentry
            client = Client(sentry_dsn, transport=HTTPTransport)
            register_sentry(client, worker)

        worker.work(burst=burst, logging_level=logging_level)
    except ConnectionError as e:
        print(e)
        sys.exit(1)
Exemple #3
0
def worker(url, config, burst, name, worker_class, job_class, queue_class,
           connection_class, path, results_ttl, worker_ttl, verbose, quiet,
           sentry_dsn, exception_handler, pid, queues):
    """Starts an RQ worker."""

    if path:
        sys.path = path.split(':') + sys.path

    settings = read_config_file(config) if config else {}
    # Worker specific default arguments
    queues = queues or settings.get('QUEUES', ['default'])
    sentry_dsn = sentry_dsn or settings.get('SENTRY_DSN')

    if pid:
        with open(os.path.expanduser(pid), "w") as fp:
            fp.write(str(os.getpid()))

    setup_loghandlers_from_args(verbose, quiet)

    connection_class = import_attribute(connection_class)
    conn = connect(url, config, connection_class)
    cleanup_ghosts(conn)
    worker_class = import_attribute(worker_class)
    queue_class = import_attribute(queue_class)
    exception_handlers = []
    for h in exception_handler:
        exception_handlers.append(import_attribute(h))

    if is_suspended(conn):
        click.secho(
            'RQ is currently suspended, to resume job execution run "rq resume"',
            fg='red')
        sys.exit(1)

    try:

        queues = [queue_class(queue, connection=conn) for queue in queues]
        w = worker_class(queues,
                         name=name,
                         connection=conn,
                         default_worker_ttl=worker_ttl,
                         default_result_ttl=results_ttl,
                         job_class=job_class,
                         queue_class=queue_class,
                         exception_handlers=exception_handlers or None)

        # Should we configure Sentry?
        if sentry_dsn:
            from raven import Client
            from raven.transport.http import HTTPTransport
            from rq.contrib.sentry import register_sentry
            client = Client(sentry_dsn, transport=HTTPTransport)
            register_sentry(client, w)

        w.work(burst=burst)
    except ConnectionError as e:
        print(e)
        sys.exit(1)
Exemple #4
0
def worker(cli_config, burst, logging_level, name, results_ttl,
           worker_ttl, job_monitoring_interval, disable_job_desc_logging, verbose, quiet, sentry_dsn,
           exception_handler, pid, disable_default_exception_handler, max_jobs, with_scheduler,
           queues, log_format, date_format, **options):
    """Starts an RQ worker."""
    settings = read_config_file(cli_config.config) if cli_config.config else {}
    # Worker specific default arguments
    queues = queues or settings.get('QUEUES', ['default'])
    sentry_dsn = sentry_dsn or settings.get('SENTRY_DSN')
    name = name or settings.get('NAME')

    if pid:
        with open(os.path.expanduser(pid), "w") as fp:
            fp.write(str(os.getpid()))

    setup_loghandlers_from_args(verbose, quiet, date_format, log_format)

    try:

        cleanup_ghosts(cli_config.connection)
        exception_handlers = []
        for h in exception_handler:
            exception_handlers.append(import_attribute(h))

        if is_suspended(cli_config.connection):
            click.secho('RQ is currently suspended, to resume job execution run "rq resume"', fg='red')
            sys.exit(1)

        queues = [cli_config.queue_class(queue,
                                         connection=cli_config.connection,
                                         job_class=cli_config.job_class)
                  for queue in queues]
        worker = cli_config.worker_class(
            queues, name=name, connection=cli_config.connection,
            default_worker_ttl=worker_ttl, default_result_ttl=results_ttl,
            job_monitoring_interval=job_monitoring_interval,
            job_class=cli_config.job_class, queue_class=cli_config.queue_class,
            exception_handlers=exception_handlers or None,
            disable_default_exception_handler=disable_default_exception_handler,
            log_job_description=not disable_job_desc_logging
        )

        # Should we configure Sentry?
        if sentry_dsn:
            from rq.contrib.sentry import register_sentry
            register_sentry(sentry_dsn)

        # if --verbose or --quiet, override --logging_level
        if verbose or quiet:
            logging_level = None

        worker.work(burst=burst, logging_level=logging_level,
                    date_format=date_format, log_format=log_format,
                    max_jobs=max_jobs, with_scheduler=with_scheduler)
    except ConnectionError as e:
        print(e)
        sys.exit(1)
Exemple #5
0
def worker(url, config, burst, name, worker_class, job_class, queue_class, path, results_ttl, worker_ttl,
           verbose, quiet, sentry_dsn, exception_handler, pid, queues):
    """Starts an RQ worker."""

    if path:
        sys.path = path.split(':') + sys.path

    settings = read_config_file(config) if config else {}
    # Worker specific default arguments
    queues = queues or settings.get('QUEUES', ['default'])
    sentry_dsn = sentry_dsn or settings.get('SENTRY_DSN')

    if pid:
        with open(os.path.expanduser(pid), "w") as fp:
            fp.write(str(os.getpid()))

    setup_loghandlers_from_args(verbose, quiet)

    conn = connect(url, config)
    cleanup_ghosts(conn)
    worker_class = import_attribute(worker_class)
    queue_class = import_attribute(queue_class)
    exception_handlers = []
    for h in exception_handler:
        exception_handlers.append(import_attribute(h))

    if is_suspended(conn):
        click.secho('RQ is currently suspended, to resume job execution run "rq resume"', fg='red')
        sys.exit(1)

    try:

        queues = [queue_class(queue, connection=conn) for queue in queues]
        w = worker_class(queues,
                         name=name,
                         connection=conn,
                         default_worker_ttl=worker_ttl,
                         default_result_ttl=results_ttl,
                         job_class=job_class,
                         queue_class=queue_class,
                         exception_handlers=exception_handlers or None)

        # Should we configure Sentry?
        if sentry_dsn:
            from raven import Client
            from rq.contrib.sentry import register_sentry
            from raven.transport.http import HTTPTransport
            client = Client(sentry_dsn, transport=HTTPTransport)
            register_sentry(client, w)

        w.work(burst=burst)
    except ConnectionError as e:
        print(e)
        sys.exit(1)
Exemple #6
0
def manage_workers():
    log.info(f"Starting core service in {CONFIG_ENV} env")
    log.debug(f"Using Redis connection {REDIS_HOST}:{REDIS_PORT}")

    remove_zombie_workers()
    # remove_stale_workers()
    # start main worker
    with Connection(CONN):
        if is_suspended(CONN):
            log.info("Resuming connection for startup")
            resume(CONN)

        requeue_terminated_fail_jobs()

        log.info("Starting initial workers")
        log.debug("Starting worker for BACKTEST queue")
        spawn_worker("backtest")

        log.debug("Starting worker for PAPER queues")
        spawn_worker("paper")

        log.debug("Starting worker for LIVE queues")
        spawn_worker("live")

        log.debug("Starting worker for TA queue")
        spawn_worker("ta")

        # create paper/live queues when needed
        while not is_suspended(CONN):
            for q in QUEUE_NAMES:
                required = workers_required(q)
                for i in range(required):
                    spawn_worker(q, burst=True)
                    time.sleep(5)

            time.sleep(2)
        else:
            log.warning("Instance is shutting down")
Exemple #7
0
Fichier : cli.py Projet : nvie/rq
def worker(cli_config, burst, logging_level, name, results_ttl,
           worker_ttl, job_monitoring_interval, verbose, quiet, sentry_dsn,
           exception_handler, pid, disable_default_exception_handler, queues,
           log_format, date_format, **options):
    """Starts an RQ worker."""
    settings = read_config_file(cli_config.config) if cli_config.config else {}
    # Worker specific default arguments
    queues = queues or settings.get('QUEUES', ['default'])
    sentry_dsn = sentry_dsn or settings.get('SENTRY_DSN')
    name = name or settings.get('NAME')

    if pid:
        with open(os.path.expanduser(pid), "w") as fp:
            fp.write(str(os.getpid()))

    setup_loghandlers_from_args(verbose, quiet, date_format, log_format)

    try:

        cleanup_ghosts(cli_config.connection)
        exception_handlers = []
        for h in exception_handler:
            exception_handlers.append(import_attribute(h))

        if is_suspended(cli_config.connection):
            click.secho('RQ is currently suspended, to resume job execution run "rq resume"', fg='red')
            sys.exit(1)

        queues = [cli_config.queue_class(queue,
                                         connection=cli_config.connection,
                                         job_class=cli_config.job_class)
                  for queue in queues]
        worker = cli_config.worker_class(
            queues, name=name, connection=cli_config.connection,
            default_worker_ttl=worker_ttl, default_result_ttl=results_ttl,
            job_monitoring_interval=job_monitoring_interval,
            job_class=cli_config.job_class, queue_class=cli_config.queue_class,
            exception_handlers=exception_handlers or None,
            disable_default_exception_handler=disable_default_exception_handler
        )

        # Should we configure Sentry?
        if sentry_dsn:
            from rq.contrib.sentry import register_sentry
            register_sentry(sentry_dsn)

        worker.work(burst=burst, logging_level=logging_level, date_format=date_format, log_format=log_format)
    except ConnectionError as e:
        print(e)
        sys.exit(1)
Exemple #8
0
def list_workers_api():
    workers_list = Worker.all()
    rq_workers = []
    for worker in workers_list:
        rq_workers.append({
            'worker_name':
            worker.name,
            'listening_on':
            ', '.join(queue.name for queue in worker.queues),
            'status':
            worker.get_state()
            if not is_suspended(get_current_connection()) else "suspended",
            'current_job_id':
            worker.get_current_job_id(),
            'success_jobs':
            worker.successful_job_count,
            'failed_jobs':
            worker.failed_job_count,
        })

    return {
        'data': rq_workers,
    }
Exemple #9
0
def get_workers_dashboard():
    return render_template(
        'rqmonitor/workers.html',
        is_suspended=is_suspended(connection=get_current_connection()))