Ejemplo n.º 1
0
def main():
    args = parse_args()

    if args.path:
        sys.path = args.path.split(':') + sys.path

    settings = {}
    if args.config:
        settings = read_config_file(args.config)

    setup_default_arguments(args, settings)

    # Other default arguments
    if args.sentry_dsn is None:
        args.sentry_dsn = settings.get('SENTRY_DSN', None)

    setup_loghandlers(args)
    setup_redis(args)
    try:
        queues = map(Queue, args.queues)
        w = Worker(queues, name=args.name)
        w.push_exc_handler(do_job_failure_handler_have_a_rest)

        # Should we configure Sentry?
        if args.sentry_dsn:
            from raven import Client
            from rq.contrib.sentry import register_sentry
            client = Client(args.sentry_dsn)
            register_sentry(client, w)

        w.work(burst=args.burst)
    except ConnectionError as e:
        print(e)
        sys.exit(1)
Ejemplo n.º 2
0
    def __init__(self, *args, **kw):
        super().__init__(*args, **kw)
        register_sentry(client, self)

        self.logger = logbook.Logger(f"WORKER:{os.getpid()}")
        logger_group.add_logger(self.logger)
        setup_logging()
Ejemplo n.º 3
0
def main():
    args = parse_args()

    if args.path:
        sys.path = args.path.split(':') + sys.path

    settings = {}
    if args.config:
        settings = read_config_file(args.config)

    setup_default_arguments(args, settings)

    # Other default arguments
    if args.sentry_dsn is None:
        args.sentry_dsn = settings.get('SENTRY_DSN', None)

    setup_loghandlers(args)
    setup_redis(args)
    try:
        queues = map(Queue, args.queues)
        w = Worker(queues, name=args.name)

        # Should we configure Sentry?
        if args.sentry_dsn:
            from raven import Client
            from rq.contrib.sentry import register_sentry
            client = Client(args.sentry_dsn)
            register_sentry(client, w)

        w.work(burst=args.burst)
    except ConnectionError as e:
        print(e)
        sys.exit(1)
Ejemplo n.º 4
0
    def start_worker(self, node=HOSTNAME, logging_level="INFO"):
        if manager.debug:
            logger.warn("running in debug mode")
            return

        name = "{0}:{1}".format(node, uuid.uuid4().hex[:8])
        q_names = self.queues
        cleanup_ghosts(manager.connection)

        rqs = [
            Queue(queue, connection=manager.connection) for queue in q_names
        ]

        worker = FlaskWorker(
            rqs,
            name=name,
            connection=manager.connection,
            default_worker_ttl=manager.worker_ttl,
            default_result_ttl=manager.result_ttl,
            job_class=Job,
            queue_class=Queue,
            exception_handlers=None,
        )

        if manager.sentry_dsn:
            client = Client(manager.sentry_dsn)
            register_sentry(client, worker)

        worker.work(burst=False, logging_level=logging_level)
Ejemplo n.º 5
0
def manage_workers():
    # import before starting worker to loading during worker process
    # from kryptos.strategy import Strategy
    # from app.extensions import jsonrpc
    # from kryptos.utils.outputs import in_docker

    # start main worker
    with Connection(CONN):
        log.info("Starting initial ML worker")

        backtest_worker = Worker(["ml"])
        register_sentry(client, backtest_worker)
        multiprocessing.Process(target=backtest_worker.work,
                                kwargs={
                                    "logging_level": "ERROR"
                                }).start()

        while True:
            q = Queue("ml", connection=CONN)
            required = len(q)
            # log.info(f"{required} workers required for {q.name}")
            for i in range(required):
                log.info(f"Creating {q.name} worker")
                worker = Worker([q.name])
                register_sentry(client, worker)
                multiprocessing.Process(target=worker.work,
                                        kwargs={
                                            "burst": True,
                                            "logging_level": "ERROR"
                                        }).start()

            time.sleep(5)
Ejemplo n.º 6
0
Archivo: cli.py Proyecto: momofarm/rq
def worker(cli_config, burst, logging_level, name, results_ttl, worker_ttl,
           job_monitoring_interval, verbose, quiet, sentry_dsn,
           exception_handler, pid, queues, rate_limit, **options):
    """Starts an RQ worker."""

    settings = read_config_file(cli_config.config) if cli_config.config else {}
    # Worker specific default arguments
    queues = queues or settings.get('QUEUES', ['default'])
    sentry_dsn = sentry_dsn or settings.get('SENTRY_DSN')

    if pid:
        with open(os.path.expanduser(pid), "w") as fp:
            fp.write(str(os.getpid()))

    setup_loghandlers_from_args(verbose, quiet)

    try:

        cleanup_ghosts(cli_config.connection)
        exception_handlers = []
        for h in exception_handler:
            exception_handlers.append(import_attribute(h))

        if is_suspended(cli_config.connection):
            click.secho(
                'RQ is currently suspended, to resume job execution run "rq resume"',
                fg='red')
            sys.exit(1)

        queues = [
            cli_config.queue_class(queue,
                                   connection=cli_config.connection,
                                   job_class=cli_config.job_class)
            for queue in queues
        ]
        worker = cli_config.worker_class(
            queues,
            name=name,
            connection=cli_config.connection,
            default_worker_ttl=worker_ttl,
            default_result_ttl=results_ttl,
            job_monitoring_interval=job_monitoring_interval,
            job_class=cli_config.job_class,
            queue_class=cli_config.queue_class,
            exception_handlers=exception_handlers or None,
            limit=rate_limit)

        # Should we configure Sentry?
        if sentry_dsn:
            from raven import Client
            from raven.transport.http import HTTPTransport
            from rq.contrib.sentry import register_sentry
            client = Client(sentry_dsn, transport=HTTPTransport)
            register_sentry(client, worker)

        worker.work(burst=burst, logging_level=logging_level)
    except ConnectionError as e:
        print(e)
        sys.exit(1)
Ejemplo n.º 7
0
 def worker():
     """ run a worker process """
     worker = get_worker()
     sentry = flask.current_app.extensions.get('sentry')
     if sentry is not None:
         from rq.contrib.sentry import register_sentry
         register_sentry(sentry.client, worker)
     worker.work()
Ejemplo n.º 8
0
Archivo: cli.py Proyecto: valohai/rq
def worker(url, config, burst, name, worker_class, job_class, queue_class,
           connection_class, path, results_ttl, worker_ttl, verbose, quiet,
           sentry_dsn, exception_handler, pid, queues):
    """Starts an RQ worker."""

    if path:
        sys.path = path.split(':') + sys.path

    settings = read_config_file(config) if config else {}
    # Worker specific default arguments
    queues = queues or settings.get('QUEUES', ['default'])
    sentry_dsn = sentry_dsn or settings.get('SENTRY_DSN')

    if pid:
        with open(os.path.expanduser(pid), "w") as fp:
            fp.write(str(os.getpid()))

    setup_loghandlers_from_args(verbose, quiet)

    connection_class = import_attribute(connection_class)
    conn = connect(url, config, connection_class)
    cleanup_ghosts(conn)
    worker_class = import_attribute(worker_class)
    queue_class = import_attribute(queue_class)
    exception_handlers = []
    for h in exception_handler:
        exception_handlers.append(import_attribute(h))

    if is_suspended(conn):
        click.secho(
            'RQ is currently suspended, to resume job execution run "rq resume"',
            fg='red')
        sys.exit(1)

    try:

        queues = [queue_class(queue, connection=conn) for queue in queues]
        w = worker_class(queues,
                         name=name,
                         connection=conn,
                         default_worker_ttl=worker_ttl,
                         default_result_ttl=results_ttl,
                         job_class=job_class,
                         queue_class=queue_class,
                         exception_handlers=exception_handlers or None)

        # Should we configure Sentry?
        if sentry_dsn:
            from raven import Client
            from raven.transport.http import HTTPTransport
            from rq.contrib.sentry import register_sentry
            client = Client(sentry_dsn, transport=HTTPTransport)
            register_sentry(client, w)

        w.work(burst=burst)
    except ConnectionError as e:
        print(e)
        sys.exit(1)
Ejemplo n.º 9
0
def worker(cli_config, burst, logging_level, name, results_ttl,
           worker_ttl, job_monitoring_interval, disable_job_desc_logging, verbose, quiet, sentry_dsn,
           exception_handler, pid, disable_default_exception_handler, max_jobs, with_scheduler,
           queues, log_format, date_format, **options):
    """Starts an RQ worker."""
    settings = read_config_file(cli_config.config) if cli_config.config else {}
    # Worker specific default arguments
    queues = queues or settings.get('QUEUES', ['default'])
    sentry_dsn = sentry_dsn or settings.get('SENTRY_DSN')
    name = name or settings.get('NAME')

    if pid:
        with open(os.path.expanduser(pid), "w") as fp:
            fp.write(str(os.getpid()))

    setup_loghandlers_from_args(verbose, quiet, date_format, log_format)

    try:

        cleanup_ghosts(cli_config.connection)
        exception_handlers = []
        for h in exception_handler:
            exception_handlers.append(import_attribute(h))

        if is_suspended(cli_config.connection):
            click.secho('RQ is currently suspended, to resume job execution run "rq resume"', fg='red')
            sys.exit(1)

        queues = [cli_config.queue_class(queue,
                                         connection=cli_config.connection,
                                         job_class=cli_config.job_class)
                  for queue in queues]
        worker = cli_config.worker_class(
            queues, name=name, connection=cli_config.connection,
            default_worker_ttl=worker_ttl, default_result_ttl=results_ttl,
            job_monitoring_interval=job_monitoring_interval,
            job_class=cli_config.job_class, queue_class=cli_config.queue_class,
            exception_handlers=exception_handlers or None,
            disable_default_exception_handler=disable_default_exception_handler,
            log_job_description=not disable_job_desc_logging
        )

        # Should we configure Sentry?
        if sentry_dsn:
            from rq.contrib.sentry import register_sentry
            register_sentry(sentry_dsn)

        # if --verbose or --quiet, override --logging_level
        if verbose or quiet:
            logging_level = None

        worker.work(burst=burst, logging_level=logging_level,
                    date_format=date_format, log_format=log_format,
                    max_jobs=max_jobs, with_scheduler=with_scheduler)
    except ConnectionError as e:
        print(e)
        sys.exit(1)
Ejemplo n.º 10
0
def main():
    from seplis.api.connections import database
    with Connection(connection=database.queue_redis):
        w = Worker(database.queue)
        if config['sentry_dsn']:
            from raven import Client
            from rq.contrib.sentry import register_sentry
            client = Client('sync+' + config['sentry_dsn'])
            register_sentry(client, w)
        w.work()
Ejemplo n.º 11
0
def main():
    from seplis.api.connections import database     
    with Connection(connection=database.queue_redis):
        w = Worker(database.queue)
        if config['sentry_dsn']:
            from raven import Client
            from rq.contrib.sentry import register_sentry
            client = Client('sync+'+config['sentry_dsn'])
            register_sentry(client, w)    
        w.work()
Ejemplo n.º 12
0
 def test_failure_capture(self):
     """Test failure is captured by Sentry SDK"""
     from sentry_sdk import Hub
     hub = Hub.current
     self.assertIsNone(hub.last_event_id())
     queue = Queue(connection=self.testconn)
     queue.enqueue(div_by_zero)
     worker = SimpleWorker(queues=[queue], connection=self.testconn)
     register_sentry('https://[email protected]/123')
     worker.work(burst=True)
     self.assertIsNotNone(hub.last_event_id())
Ejemplo n.º 13
0
 def test_failure_capture(self):
     """Test failure is captured by Sentry SDK"""
     from sentry_sdk import Hub
     hub = Hub.current
     self.assertIsNone(hub.last_event_id())
     queue = Queue(connection=self.testconn)
     queue.enqueue(div_by_zero)
     worker = SimpleWorker(queues=[queue], connection=self.testconn)
     register_sentry('https://[email protected]/123')
     worker.work(burst=True)
     self.assertIsNotNone(hub.last_event_id())
Ejemplo n.º 14
0
Archivo: cli.py Proyecto: AlexLSB/rq
def worker(url, config, burst, name, worker_class, job_class, queue_class, path, results_ttl, worker_ttl,
           verbose, quiet, sentry_dsn, exception_handler, pid, queues):
    """Starts an RQ worker."""

    if path:
        sys.path = path.split(':') + sys.path

    settings = read_config_file(config) if config else {}
    # Worker specific default arguments
    queues = queues or settings.get('QUEUES', ['default'])
    sentry_dsn = sentry_dsn or settings.get('SENTRY_DSN')

    if pid:
        with open(os.path.expanduser(pid), "w") as fp:
            fp.write(str(os.getpid()))

    setup_loghandlers_from_args(verbose, quiet)

    conn = connect(url, config)
    cleanup_ghosts(conn)
    worker_class = import_attribute(worker_class)
    queue_class = import_attribute(queue_class)
    exception_handlers = []
    for h in exception_handler:
        exception_handlers.append(import_attribute(h))

    if is_suspended(conn):
        click.secho('RQ is currently suspended, to resume job execution run "rq resume"', fg='red')
        sys.exit(1)

    try:

        queues = [queue_class(queue, connection=conn) for queue in queues]
        w = worker_class(queues,
                         name=name,
                         connection=conn,
                         default_worker_ttl=worker_ttl,
                         default_result_ttl=results_ttl,
                         job_class=job_class,
                         queue_class=queue_class,
                         exception_handlers=exception_handlers or None)

        # Should we configure Sentry?
        if sentry_dsn:
            from raven import Client
            from rq.contrib.sentry import register_sentry
            from raven.transport.http import HTTPTransport
            client = Client(sentry_dsn, transport=HTTPTransport)
            register_sentry(client, w)

        w.work(burst=burst)
    except ConnectionError as e:
        print(e)
        sys.exit(1)
Ejemplo n.º 15
0
def workers(ctx, queues):
    app = ctx.obj['app']
    with Connection(redis_store._redis_client), app.test_request_context():
        qs = map(Queue, queues) or [Queue()]
        worker = Worker(qs)
        g.is_rq_process = True

        sentry = app.extensions.get('sentry')
        if sentry is not None:
            from rq.contrib.sentry import register_sentry
            register_sentry(sentry.client, worker)
        worker.work()
Ejemplo n.º 16
0
def workers(ctx, queues):
    app = ctx.obj['app']
    with Connection(redis_store._redis_client), app.test_request_context():
        qs = map(Queue, queues) or [Queue()]
        worker = Worker(qs)
        g.is_rq_process = True

        sentry = app.extensions.get('sentry')
        if sentry is not None:
            from rq.contrib.sentry import register_sentry
            register_sentry(sentry.client, worker)
        worker.work()
Ejemplo n.º 17
0
    def handle(self, *args, **options):
        pid = options.get('pid')
        if pid:
            with open(os.path.expanduser(pid), "w") as fp:
                fp.write(str(os.getpid()))
        sentry_dsn = options.get('sentry-dsn')
        if sentry_dsn is None:
            sentry_dsn = getattr(settings, 'SENTRY_DSN', None)

        # Verbosity is defined by default in BaseCommand for all commands
        verbosity = options.get('verbosity')
        if verbosity >= 2:
            level = 'DEBUG'
        elif verbosity == 0:
            level = 'WARNING'
        else:
            level = 'INFO'
        setup_loghandlers(level)

        sentry_dsn = options.get('sentry-dsn') or getattr(
            settings, 'SENTRY_DSN', None)
        try:
            # Instantiate a worker
            worker_kwargs = {
                'worker_class': options['worker_class'],
                'queue_class': options['queue_class'],
                'job_class': options['job_class'],
                'name': options['name'],
                'default_worker_ttl': options['worker_ttl'],
            }
            w = get_worker(*args, **worker_kwargs)

            # Call use_connection to push the redis connection into LocalStack
            # without this, jobs using RQ's get_current_job() will fail
            use_connection(w.connection)
            # Close any opened DB connection before any fork
            reset_db_connections()

            if sentry_dsn:
                try:
                    from rq.contrib.sentry import register_sentry
                    register_sentry(sentry_dsn)
                except ImportError:
                    self.stdout.write(
                        self.style.ERROR(
                            "Please install sentry-sdk using `pip install sentry-sdk`"
                        ))
                    sys.exit(1)

            w.work(burst=options.get('burst', False))
        except ConnectionError as e:
            print(e)
            sys.exit(1)
Ejemplo n.º 18
0
def main():  # pragma: no cover
    utils.setup_logging()
    config.log()
    gh_pr.monkeypatch_github()
    r = utils.get_redis_for_rq()
    if config.FLUSH_REDIS_ON_STARTUP:
        r.flushall()
    with rq.Connection(r):
        worker = rq.Worker(['default'])
        if config.SENTRY_URL:
            client = raven.Client(config.SENTRY_URL, transport=HTTPTransport)
            register_sentry(client, worker)
        worker.work()
Ejemplo n.º 19
0
def main():
    args = parse_args()

    if args.path:
        sys.path = args.path.split(':') + sys.path

    settings = {}
    if args.config:
        settings = read_config_file(args.config)

    setup_default_arguments(args, settings)

    # Worker specific default arguments
    if not args.queues:
        args.queues = settings.get('QUEUES', ['default'])

    if args.sentry_dsn is None:
        args.sentry_dsn = settings.get('SENTRY_DSN',
                                       os.environ.get('SENTRY_DSN', None))

    if args.pid:
        with open(os.path.expanduser(args.pid), "w") as fp:
            fp.write(str(os.getpid()))

    setup_loghandlers_from_args(args)
    setup_redis(args)

    cleanup_ghosts()
    worker_class = import_attribute(args.worker_class)
    queue_class = import_attribute(args.queue_class)

    try:
        queues = list(map(queue_class, args.queues))
        w = worker_class(queues,
                         name=args.name,
                         default_worker_ttl=args.worker_ttl,
                         default_result_ttl=args.results_ttl,
                         job_class=args.job_class)

        # Should we configure Sentry?
        if args.sentry_dsn:
            from raven import Client
            from rq.contrib.sentry import register_sentry
            client = Client(args.sentry_dsn)
            register_sentry(client, w)

        w.work(burst=args.burst)
    except ConnectionError as e:
        print(e)
        sys.exit(1)
Ejemplo n.º 20
0
def worker(url, config, burst, name, worker_class, job_class, queue_class,
           path, results_ttl, worker_ttl, verbose, quiet, sentry_dsn, pid,
           queues):
    """Starts an RQ worker."""

    if path:
        sys.path = path.split(':') + sys.path

    settings = read_config_file(config) if config else {}
    # Worker specific default arguments
    url = url or settings.get('REDIS_URL')
    queues = queues or settings.get('QUEUES', ['default'])
    sentry_dsn = sentry_dsn or settings.get('SENTRY_DSN')

    if pid:
        with open(os.path.expanduser(pid), "w") as fp:
            fp.write(str(os.getpid()))

    setup_loghandlers_from_args(verbose, quiet)

    conn = connect(url)
    cleanup_ghosts(conn)
    worker_class = import_attribute(worker_class)
    queue_class = import_attribute(queue_class)

    if worker_class.paused():
        click.secho("The worker has been paused, run reset_paused", fg='red')
        sys.exit(1)

    try:

        queues = [queue_class(queue, connection=conn) for queue in queues]
        w = worker_class(queues,
                         name=name,
                         connection=conn,
                         default_worker_ttl=worker_ttl,
                         default_result_ttl=results_ttl,
                         job_class=job_class)

        # Should we configure Sentry?
        if sentry_dsn:
            from raven import Client
            from rq.contrib.sentry import register_sentry
            client = Client(sentry_dsn)
            register_sentry(client, w)

        w.work(burst=burst)
    except ConnectionError as e:
        print(e)
        sys.exit(1)
Ejemplo n.º 21
0
    def __init__(self, fqdn, worker_id):
        basename = '%s-%003d' % (fqdn, worker_id)

        self._redis = redis.StrictRedis.from_url(utils.get_redis_url())

        super(MergifyWorker, self).__init__(
            ["%s-high" % basename, "%s-low" % basename],
            connection=self._redis)

        self.push_exc_handler(self._retry_handler)

        if config.SENTRY_URL:
            client = raven.Client(config.SENTRY_URL, transport=HTTPTransport)
            register_sentry(client, self)
Ejemplo n.º 22
0
Archivo: cli.py Proyecto: nvie/rq
def worker(cli_config, burst, logging_level, name, results_ttl,
           worker_ttl, job_monitoring_interval, verbose, quiet, sentry_dsn,
           exception_handler, pid, disable_default_exception_handler, queues,
           log_format, date_format, **options):
    """Starts an RQ worker."""
    settings = read_config_file(cli_config.config) if cli_config.config else {}
    # Worker specific default arguments
    queues = queues or settings.get('QUEUES', ['default'])
    sentry_dsn = sentry_dsn or settings.get('SENTRY_DSN')
    name = name or settings.get('NAME')

    if pid:
        with open(os.path.expanduser(pid), "w") as fp:
            fp.write(str(os.getpid()))

    setup_loghandlers_from_args(verbose, quiet, date_format, log_format)

    try:

        cleanup_ghosts(cli_config.connection)
        exception_handlers = []
        for h in exception_handler:
            exception_handlers.append(import_attribute(h))

        if is_suspended(cli_config.connection):
            click.secho('RQ is currently suspended, to resume job execution run "rq resume"', fg='red')
            sys.exit(1)

        queues = [cli_config.queue_class(queue,
                                         connection=cli_config.connection,
                                         job_class=cli_config.job_class)
                  for queue in queues]
        worker = cli_config.worker_class(
            queues, name=name, connection=cli_config.connection,
            default_worker_ttl=worker_ttl, default_result_ttl=results_ttl,
            job_monitoring_interval=job_monitoring_interval,
            job_class=cli_config.job_class, queue_class=cli_config.queue_class,
            exception_handlers=exception_handlers or None,
            disable_default_exception_handler=disable_default_exception_handler
        )

        # Should we configure Sentry?
        if sentry_dsn:
            from rq.contrib.sentry import register_sentry
            register_sentry(sentry_dsn)

        worker.work(burst=burst, logging_level=logging_level, date_format=date_format, log_format=log_format)
    except ConnectionError as e:
        print(e)
        sys.exit(1)
Ejemplo n.º 23
0
def main():
    args = parse_args()

    if args.path:
        sys.path = args.path.split(':') + sys.path

    settings = {}
    if args.config:
        settings = read_config_file(args.config)

    setup_default_arguments(args, settings)

    # Worker specific default arguments
    if not args.queues:
        args.queues = settings.get('QUEUES', ['default'])

    if args.sentry_dsn is None:
        args.sentry_dsn = settings.get('SENTRY_DSN',
                                       os.environ.get('SENTRY_DSN', None))

    if args.pid:
        with open(os.path.expanduser(args.pid), "w") as fp:
            fp.write(str(os.getpid()))

    setup_loghandlers_from_args(args)
    setup_redis(args)

    cleanup_ghosts()
    worker_class = import_attribute(args.worker_class)

    try:
        queues = list(map(Queue, args.queues))
        w = worker_class(queues,
                         name=args.name,
                         default_worker_ttl=args.worker_ttl,
                         default_result_ttl=args.results_ttl,
                         job_class=args.job_class)

        # Should we configure Sentry?
        if args.sentry_dsn:
            from raven import Client
            from rq.contrib.sentry import register_sentry
            client = Client(args.sentry_dsn)
            register_sentry(client, w)

        w.work(burst=args.burst)
    except ConnectionError as e:
        print(e)
        sys.exit(1)
Ejemplo n.º 24
0
Archivo: cli.py Proyecto: jtushman/rq
def worker(url, config, burst, name, worker_class, job_class, queue_class, path, results_ttl, worker_ttl,
           verbose, quiet, sentry_dsn, pid, queues):
    """Starts an RQ worker."""

    if path:
        sys.path = path.split(':') + sys.path

    settings = read_config_file(config) if config else {}
    # Worker specific default arguments
    url = url or settings.get('REDIS_URL')
    queues = queues or settings.get('QUEUES', ['default'])
    sentry_dsn = sentry_dsn or settings.get('SENTRY_DSN')

    if pid:
        with open(os.path.expanduser(pid), "w") as fp:
            fp.write(str(os.getpid()))

    setup_loghandlers_from_args(verbose, quiet)

    conn = connect(url)
    cleanup_ghosts(conn)
    worker_class = import_attribute(worker_class)
    queue_class = import_attribute(queue_class)

    if worker_class.paused():
        click.secho("The worker has been paused, run reset_paused", fg='red')
        sys.exit(1)

    try:

        queues = [queue_class(queue, connection=conn) for queue in queues]
        w = worker_class(queues,
                         name=name,
                         connection=conn,
                         default_worker_ttl=worker_ttl,
                         default_result_ttl=results_ttl,
                         job_class=job_class)

        # Should we configure Sentry?
        if sentry_dsn:
            from raven import Client
            from rq.contrib.sentry import register_sentry
            client = Client(sentry_dsn)
            register_sentry(client, w)

        w.work(burst=burst)
    except ConnectionError as e:
        print(e)
        sys.exit(1)
Ejemplo n.º 25
0
    def handle(self, *args, **options):
        pid = options.get('pid')
        if pid:
            with open(os.path.expanduser(pid), "w") as fp:
                fp.write(str(os.getpid()))
        sentry_dsn = options.get('sentry-dsn')
        if sentry_dsn is None:
            sentry_dsn = getattr(settings, 'SENTRY_DSN', None)

        # Verbosity is defined by default in BaseCommand for all commands
        verbosity = options.get('verbosity')
        if verbosity >= 2:
            level = 'DEBUG'
        elif verbosity == 0:
            level = 'WARNING'
        else:
            level = 'INFO'
        setup_loghandlers(level)

        try:
            # Instantiate a worker
            worker_kwargs = {
                'worker_class': options['worker_class'],
                'queue_class': options['queue_class'],
                'job_class': options['job_class'],
                'name': options['name'],
                'default_worker_ttl': options['worker_ttl'],
            }
            w = get_worker(*args, **worker_kwargs)

            # Call use_connection to push the redis connection into LocalStack
            # without this, jobs using RQ's get_current_job() will fail
            use_connection(w.connection)
            # Close any opened DB connection before any fork
            reset_db_connections()

            if sentry_dsn:
                try:
                    from rq.contrib.sentry import register_sentry
                    register_sentry(sentry_dsn)
                except ImportError:
                    self.stdout.write(self.style.ERROR("Please install sentry-sdk using `pip install sentry-sdk`"))
                    sys.exit(1)

            w.work(burst=options.get('burst', False))
        except ConnectionError as e:
            print(e)
            sys.exit(1)
Ejemplo n.º 26
0
def main():  # pragma: no cover
    parser = argparse.ArgumentParser(description='Mergify RQ Worker.')
    parser.add_argument('--fqdn',
                        help='FQDN of the node',
                        default=utils.get_fqdn())
    parser.add_argument("worker_id", type=int, help='Worker ID')
    args = parser.parse_args()

    sentry_client = utils.prepare_service()

    worker = MergifyWorker(args.fqdn, args.worker_id)

    if sentry_client:
        register_sentry(sentry_client, worker)

    worker.work()
Ejemplo n.º 27
0
def main():
    args = parse_args()

    if args.path:
        sys.path = args.path.split(':') + sys.path

    settings = {}
    if args.config:
        settings = read_config_file(args.config)

    setup_default_arguments(args, settings)

    # Other default arguments
    if args.sentry_dsn is None:
        args.sentry_dsn = settings.get('SENTRY_DSN',
                                       os.environ.get('SENTRY_DSN', None))

    if args.verbose and args.quiet:
        raise RuntimeError(
            "Flags --verbose and --quiet are mutually exclusive.")

    if args.verbose:
        level = 'DEBUG'
    elif args.quiet:
        level = 'WARNING'
    else:
        level = 'INFO'
    setup_loghandlers(level)
    setup_redis(args)

    cleanup_ghosts()

    try:
        queues = map(Queue, args.queues)
        w = Worker(queues, name=args.name)

        # Should we configure Sentry?
        if args.sentry_dsn:
            from raven import Client
            from rq.contrib.sentry import register_sentry
            client = Client(args.sentry_dsn)
            register_sentry(client, w)

        w.work(burst=args.burst)
    except ConnectionError as e:
        print(e)
        sys.exit(1)
Ejemplo n.º 28
0
def main():
    args = parse_args()

    if args.path:
        sys.path = args.path.split(':') + sys.path

    settings = {}
    if args.config:
        settings = read_config_file(args.config)

    setup_default_arguments(args, settings)

    # Other default arguments
    if args.sentry_dsn is None:
        args.sentry_dsn = settings.get('SENTRY_DSN',
                                       os.environ.get('SENTRY_DSN', None))

    if args.verbose and args.quiet:
        raise RuntimeError("Flags --verbose and --quiet are mutually exclusive.")

    if args.verbose:
        level = 'DEBUG'
    elif args.quiet:
        level = 'WARNING'
    else:
        level = 'INFO'
    setup_loghandlers(level)
    setup_redis(args)

    cleanup_ghosts()

    try:
        queues = map(Queue, args.queues)
        w = Worker(queues, name=args.name)

        # Should we configure Sentry?
        if args.sentry_dsn:
            from raven import Client
            from rq.contrib.sentry import register_sentry
            client = Client(args.sentry_dsn)
            register_sentry(client, w)

        w.work(burst=args.burst)
    except ConnectionError as e:
        print(e)
        sys.exit(1)
Ejemplo n.º 29
0
    def test_work_fails(self):
        """Non importable jobs should be put on the failed queue event with sentry"""
        q = Queue()
        failed_q = get_failed_queue()

        # Action
        q.enqueue('_non.importable.job')
        self.assertEqual(q.count, 1)

        w = Worker([q])
        register_sentry(FakeSentry(), w)

        w.work(burst=True)

        # Postconditions
        self.assertEqual(failed_q.count, 1)
        self.assertEqual(q.count, 0)
Ejemplo n.º 30
0
    def test_work_fails(self):
        """Non importable jobs should be put on the failed queue event with sentry"""
        q = Queue()
        failed_q = get_failed_queue()

        # Action
        q.enqueue('_non.importable.job')
        self.assertEquals(q.count, 1)

        w = Worker([q])
        register_sentry(FakeSentry(), w)

        w.work(burst=True)

        # Postconditions
        self.assertEquals(failed_q.count, 1)
        self.assertEquals(q.count, 0)
Ejemplo n.º 31
0
    def handle(self, *args, **options):

        logger = logging.getLogger(__name__)
        queue_name = args[0] if args else 'link_indexing'

        with Connection(redis.Redis(**settings.RQ_DATABASE)):
            queue = Queue(queue_name)
            peon = Worker(queue)

            if HAS_SENTRY:
                sclient = Client(settings.SENTRY_DSN)
                register_sentry(sclient, peon)

            try:
                peon.work()
            except redis.exceptions.ConnectionError:
                raise CommandError('Redis did not respond')
Ejemplo n.º 32
0
def main():
    args = parse_args()

    if args.path:
        sys.path = args.path.split(':') + sys.path

    settings = {}
    if args.config:
        settings = read_config_file(args.config)

    setup_default_arguments(args, settings)

    # Worker specific default arguments
    if not args.queues:
        args.queues = settings.get('QUEUES', ['default'])

    if args.sentry_dsn is None:
        args.sentry_dsn = settings.get('SENTRY_DSN',
                                       os.environ.get('SENTRY_DSN', None))

    if args.pid:
        with open(os.path.expanduser(args.pid), "w") as fp:
            fp.write(str(os.getpid())) # 把 pid 写到 home 目录下

    setup_loghandlers_from_args(args)
    setup_redis(args) # 启动 redis,创建好 connection 压入栈內,下面的 Queue、Worker 后续都会取到

    cleanup_ghosts() # 清理 ghost worker
    worker_class = import_attribute(args.worker_class)

    try:
        queues = list(map(Queue, args.queues)) # 实例化 queue,默认是 default queue
        w = worker_class(queues, name=args.name) # 实例化 worker

        # Should we configure Sentry?
        # raven 是 Sentry(用于监控代码发生异常时通知维护人员)的 python 客户端
        if args.sentry_dsn:
            from raven import Client
            from rq.contrib.sentry import register_sentry
            client = Client(args.sentry_dsn)
            register_sentry(client, w)

        w.work(burst=args.burst) # 启动 worker
    except ConnectionError as e:
        print(e)
        sys.exit(1)
Ejemplo n.º 33
0
    def handle(self, *args, **options):
        pid = options.get('pid')
        if pid:
            with open(os.path.expanduser(pid), "w") as fp:
                fp.write(str(os.getpid()))
        sentry_dsn = options.get('sentry-dsn')
        try:
            # Instantiate a worker
            worker_class = import_attribute(options['worker_class'])
            queues = get_queues(*args,
                                queue_class=import_attribute(
                                    options['queue_class']))
            w = worker_class(queues,
                             connection=queues[0].connection,
                             name=options['name'],
                             exception_handlers=get_exception_handlers()
                             or None,
                             default_worker_ttl=options['worker_ttl'])

            # Call use_connection to push the redis connection into LocalStack
            # without this, jobs using RQ's get_current_job() will fail
            use_connection(w.connection)
            # Close any opened DB connection before any fork
            reset_db_connections()

            if sentry_dsn:
                try:
                    from raven import Client
                    from raven.transport.http import HTTPTransport
                    from rq.contrib.sentry import register_sentry
                    client = Client(sentry_dsn, transport=HTTPTransport)
                    register_sentry(client, w)
                except ImportError:
                    self.stdout.write(
                        self.style.ERROR(
                            "Please install sentry. For example `pip install raven`"
                        ))
                    sys.exit(1)

            w.work(burst=options.get('burst', False))
        except ConnectionError as e:
            print(e)
            sys.exit(1)
Ejemplo n.º 34
0
Archivo: rqworker.py Proyecto: pib/rq
def main():
    args = parse_args()

    if args.path:
        sys.path = args.path.split(':') + sys.path

    settings = {}
    if args.config:
        settings = read_config_file(args.config)

    setup_default_arguments(args, settings)

    # Worker specific default arguments
    if not args.queues:
        args.queues = settings.get('QUEUES', ['default'])

    if args.sentry_dsn is None:
        args.sentry_dsn = settings.get('SENTRY_DSN',
                                       os.environ.get('SENTRY_DSN', None))

    setup_loghandlers_from_args(args)
    setup_redis(args)

    cleanup_ghosts()

    try:
        queues = map(Queue, args.queues)
        w = Worker(queues, name=args.name)

        # Should we configure Sentry?
        if args.sentry_dsn:
            from raven import Client
            from rq.contrib.sentry import register_sentry
            client = Client(args.sentry_dsn)
            register_sentry(client, w)

        w.work(burst=args.burst)
    except ConnectionError as e:
        print(e)
        sys.exit(1)
Ejemplo n.º 35
0
    def handle(self, *args, **options):
        pid = options.get('pid')
        if pid:
            with open(os.path.expanduser(pid), "w") as fp:
                fp.write(str(os.getpid()))
        sentry_dsn = options.get('sentry-dsn') or getattr(settings, 'SENTRY_DSN', None)
        try:
            # Instantiate a worker
            worker_kwargs = {
                'worker_class': options['worker_class'],
                'queue_class': options['queue_class'],
                'job_class': options['job_class'],
                'name': options['name'],
                'default_worker_ttl': options['worker_ttl'],
            }
            w = get_worker(*args, **worker_kwargs)

            # Call use_connection to push the redis connection into LocalStack
            # without this, jobs using RQ's get_current_job() will fail
            use_connection(w.connection)
            # Close any opened DB connection before any fork
            reset_db_connections()

            if sentry_dsn:
                try:
                    from raven import Client
                    from raven.transport.http import HTTPTransport
                    from rq.contrib.sentry import register_sentry
                    client = Client(sentry_dsn, transport=HTTPTransport)
                    register_sentry(client, w)
                except ImportError:
                    self.stdout.write(self.style.ERROR("Please install sentry. For example `pip install raven`"))
                    sys.exit(1)

            w.work(burst=options.get('burst', False))
        except ConnectionError as e:
            print(e)
            sys.exit(1)
Ejemplo n.º 36
0
    def __init__(self, *args, **kwargs):
        from django.conf import settings

        super().__init__(*args, **kwargs)
        client = Client(settings.SENTRY_DSN, transport=HTTPTransport)
        register_sentry(client, self)
Ejemplo n.º 37
0
def work():
    with Connection(conn):
        worker = Worker(map(Queue, listen), exc_handler=my_handler)
        register_sentry(client, worker)
        worker.work()
Ejemplo n.º 38
0
    def __init__(self, *args, **kwargs):
        super(ManabiWorker, self).__init__(*args, **kwargs)

        dsn = settings.RAVEN_CONFIG['dsn']
        client = Client(dsn, transport=HTTPTransport)
        register_sentry(client, self)
Ejemplo n.º 39
0
def work():
    with Connection(conn):
        worker = Worker(map(Queue, listen), name=str(uuid.uuid1()))
        register_sentry(client, worker)
        worker.work()
Ejemplo n.º 40
0
#!/usr/bin/env python3
import os

from flask_rq import get_worker
from raven import Client
from raven.transport.http import HTTPTransport
from rq.contrib.sentry import register_sentry

from server import create_app

if __name__ == '__main__':
    # default to dev config
    env = os.environ.get('OK_ENV', 'dev')
    app = create_app('settings/{0!s}.py'.format(env))
    with app.app_context():
        worker = get_worker()
        sentry_dsn = os.getenv('SENTRY_DSN')
        if sentry_dsn:
            client = Client(sentry_dsn, transport=HTTPTransport)
            register_sentry(client, worker)
        worker.work()
Ejemplo n.º 41
0
 def __init__(self, *args, **kwargs):
     super(SentryAwareWorker, self).__init__(*args, **kwargs)
     dsn = os.environ.get('SENTRY_DSN')
     client = Client(dsn, transport=HTTPTransport)
     register_sentry(client, self)
Ejemplo n.º 42
0
import sys
import os
from rq import Queue, Worker, Connection
from rq.contrib.sentry import register_sentry
from rq.logutils import setup_loghandlers

current_dir = os.path.dirname(os.path.abspath(__file__))
source_dir = os.path.dirname(current_dir)

sys.path.insert(0, source_dir)

if __name__ == '__main__':
    # Tell rq what Redis connection to use
    from pyhackers.app import start_app
    start_app(soft=True)
    from pyhackers.sentry import sentry_client

    setup_loghandlers("DEBUG")

    with Connection():
        q = Queue()
        w = Worker(q)

        register_sentry(sentry_client, w)
        w.work()
Ejemplo n.º 43
0
    def handle(self, *args, **options):
        pid = options.get('pid')
        if pid:
            with open(os.path.expanduser(pid), "w") as fp:
                fp.write(str(os.getpid()))
        sentry_dsn = options.get('sentry-dsn')
        if sentry_dsn is None:
            sentry_dsn = getattr(settings, 'SENTRY_DSN', None)

        # Verbosity is defined by default in BaseCommand for all commands
        verbosity = options.get('verbosity')
        if verbosity >= 2:
            level = 'DEBUG'
        elif verbosity == 0:
            level = 'WARNING'
        else:
            level = 'INFO'
        setup_loghandlers(level)

        sentry_dsn = options.get('sentry-dsn') or getattr(
            settings, 'SENTRY_DSN', None)
        try:
            # Instantiate a worker
            worker_kwargs = {
                'worker_class': options['worker_class'],
                'queue_class': options['queue_class'],
                'job_class': options['job_class'],
                'name': options['name'],
                'default_worker_ttl': options['worker_ttl'],
            }
            w = get_worker(*args, **worker_kwargs)

            # Call use_connection to push the redis connection into LocalStack
            # without this, jobs using RQ's get_current_job() will fail
            use_connection(w.connection)
            # Close any opened DB connection before any fork
            reset_db_connections()

            if sentry_dsn:
                try:
                    from raven import Client
                    from raven.transport.http import HTTPTransport
                    from rq.contrib.sentry import register_sentry

                    from raven.exceptions import InvalidDsn
                    try:
                        client = Client(sentry_dsn, transport=HTTPTransport)
                        register_sentry(client, w)
                    except InvalidDsn:
                        self.stdout.write(
                            self.style.ERROR(
                                "Invalid DSN. If you use `sentry-sdk` package you have to disable the django-rq sentry plugin by setting `--sentry-dsn=\"\"`."
                            ))
                        sys.exit(1)
                except ImportError:
                    self.stdout.write(
                        self.style.ERROR(
                            "Please install sentry. For example `pip install raven`"
                        ))
                    sys.exit(1)

            w.work(burst=options.get('burst', False))
        except ConnectionError as e:
            print(e)
            sys.exit(1)
Ejemplo n.º 44
0
def work():
  with Connection(conn):
    worker = Worker(map(Queue, listen), name=str(uuid.uuid1()))
    register_sentry(client, worker)
    worker.work()
Ejemplo n.º 45
0
from rq import Worker, Queue, Connection
from extensions import redis_conn
from {{cookiecutter.app_name}}.app import create_app
import sys
from raven import Client
from raven.transport.http import HTTPTransport
from rq.contrib.sentry import register_sentry

app = create_app()
client = Client(app.config.get('SENTRY_DSN'), transport=HTTPTransport)

listen = ['default']
if len(sys.argv) > 1:
    listen = [queue_name.strip() for queue_name in sys.argv[1:]]

if __name__ == '__main__':
    with Connection(redis_conn):
        with app.app_context():
            worker = Worker(map(Queue, listen))
            register_sentry(client, worker)
            worker.work()
Ejemplo n.º 46
0
if args.pid:
    with open(os.path.expanduser(args.pid), "w") as fp:
        fp.write(str(os.getpid()))

setup_loghandlers_from_args(args)
setup_redis(args)

cleanup_ghosts()
worker_class = import_attribute(args.worker_class)

try:
    queues = list(map(Queue, args.queues))
    w = worker_class(queues,
                     name=args.name,
                     default_worker_ttl=args.worker_ttl,
                     default_result_ttl=args.results_ttl)

    # Should we configure Sentry?
    if args.sentry_dsn:
        from raven import Client
        from rq.contrib.sentry import register_sentry
        client = Client(args.sentry_dsn)
        register_sentry(client, w)

    w.push_exc_handler(retry_handler)
    w.work(burst=args.burst)
except ConnectionError as e:
    print(e)
    sys.exit(1)