Beispiel #1
0
def worker(cli_config, burst, logging_level, name, results_ttl, worker_ttl,
           job_monitoring_interval, verbose, quiet, sentry_dsn,
           exception_handler, pid, queues, rate_limit, **options):
    """Starts an RQ worker."""

    settings = read_config_file(cli_config.config) if cli_config.config else {}
    # Worker specific default arguments
    queues = queues or settings.get('QUEUES', ['default'])
    sentry_dsn = sentry_dsn or settings.get('SENTRY_DSN')

    if pid:
        with open(os.path.expanduser(pid), "w") as fp:
            fp.write(str(os.getpid()))

    setup_loghandlers_from_args(verbose, quiet)

    try:

        cleanup_ghosts(cli_config.connection)
        exception_handlers = []
        for h in exception_handler:
            exception_handlers.append(import_attribute(h))

        if is_suspended(cli_config.connection):
            click.secho(
                'RQ is currently suspended, to resume job execution run "rq resume"',
                fg='red')
            sys.exit(1)

        queues = [
            cli_config.queue_class(queue,
                                   connection=cli_config.connection,
                                   job_class=cli_config.job_class)
            for queue in queues
        ]
        worker = cli_config.worker_class(
            queues,
            name=name,
            connection=cli_config.connection,
            default_worker_ttl=worker_ttl,
            default_result_ttl=results_ttl,
            job_monitoring_interval=job_monitoring_interval,
            job_class=cli_config.job_class,
            queue_class=cli_config.queue_class,
            exception_handlers=exception_handlers or None,
            limit=rate_limit)

        # Should we configure Sentry?
        if sentry_dsn:
            from raven import Client
            from raven.transport.http import HTTPTransport
            from rq.contrib.sentry import register_sentry
            client = Client(sentry_dsn, transport=HTTPTransport)
            register_sentry(client, worker)

        worker.work(burst=burst, logging_level=logging_level)
    except ConnectionError as e:
        print(e)
        sys.exit(1)
Beispiel #2
0
def worker(cli_config, burst, logging_level, name, results_ttl,
           worker_ttl, job_monitoring_interval, disable_job_desc_logging, verbose, quiet, sentry_dsn,
           exception_handler, pid, disable_default_exception_handler, max_jobs, with_scheduler,
           queues, log_format, date_format, **options):
    """Starts an RQ worker."""
    settings = read_config_file(cli_config.config) if cli_config.config else {}
    # Worker specific default arguments
    queues = queues or settings.get('QUEUES', ['default'])
    sentry_dsn = sentry_dsn or settings.get('SENTRY_DSN')
    name = name or settings.get('NAME')

    if pid:
        with open(os.path.expanduser(pid), "w") as fp:
            fp.write(str(os.getpid()))

    setup_loghandlers_from_args(verbose, quiet, date_format, log_format)

    try:

        cleanup_ghosts(cli_config.connection)
        exception_handlers = []
        for h in exception_handler:
            exception_handlers.append(import_attribute(h))

        if is_suspended(cli_config.connection):
            click.secho('RQ is currently suspended, to resume job execution run "rq resume"', fg='red')
            sys.exit(1)

        queues = [cli_config.queue_class(queue,
                                         connection=cli_config.connection,
                                         job_class=cli_config.job_class)
                  for queue in queues]
        worker = cli_config.worker_class(
            queues, name=name, connection=cli_config.connection,
            default_worker_ttl=worker_ttl, default_result_ttl=results_ttl,
            job_monitoring_interval=job_monitoring_interval,
            job_class=cli_config.job_class, queue_class=cli_config.queue_class,
            exception_handlers=exception_handlers or None,
            disable_default_exception_handler=disable_default_exception_handler,
            log_job_description=not disable_job_desc_logging
        )

        # Should we configure Sentry?
        if sentry_dsn:
            from rq.contrib.sentry import register_sentry
            register_sentry(sentry_dsn)

        # if --verbose or --quiet, override --logging_level
        if verbose or quiet:
            logging_level = None

        worker.work(burst=burst, logging_level=logging_level,
                    date_format=date_format, log_format=log_format,
                    max_jobs=max_jobs, with_scheduler=with_scheduler)
    except ConnectionError as e:
        print(e)
        sys.exit(1)
Beispiel #3
0
Datei: cli.py Projekt: selwin/rq
def worker(cli_config, burst, name, results_ttl,
           worker_ttl, verbose, quiet, sentry_dsn, exception_handler,
           pid, queues, **options):
    """Starts an RQ worker."""

    settings = read_config_file(cli_config.config) if cli_config.config else {}
    # Worker specific default arguments
    queues = queues or settings.get('QUEUES', ['default'])
    sentry_dsn = sentry_dsn or settings.get('SENTRY_DSN')

    if pid:
        with open(os.path.expanduser(pid), "w") as fp:
            fp.write(str(os.getpid()))

    setup_loghandlers_from_args(verbose, quiet)

    try:

        cleanup_ghosts(cli_config.connection)
        exception_handlers = []
        for h in exception_handler:
            exception_handlers.append(import_attribute(h))

        if is_suspended(cli_config.connection):
            click.secho('RQ is currently suspended, to resume job execution run "rq resume"', fg='red')
            sys.exit(1)

        queues = [cli_config.queue_class(queue,
                                         connection=cli_config.connection,
                                         job_class=cli_config.job_class)
                  for queue in queues]
        worker = cli_config.worker_class(queues,
                                         name=name,
                                         connection=cli_config.connection,
                                         default_worker_ttl=worker_ttl,
                                         default_result_ttl=results_ttl,
                                         job_class=cli_config.job_class,
                                         queue_class=cli_config.queue_class,
                                         exception_handlers=exception_handlers or None)

        # Should we configure Sentry?
        if sentry_dsn:
            from raven import Client
            from raven.transport.http import HTTPTransport
            from rq.contrib.sentry import register_sentry
            client = Client(sentry_dsn, transport=HTTPTransport)
            register_sentry(client, worker)

        worker.work(burst=burst)
    except ConnectionError as e:
        print(e)
        sys.exit(1)
Beispiel #4
0
 def test_config_file(self):
     settings = read_config_file("tests.dummy_settings")
     self.assertIn("REDIS_HOST", settings)
     self.assertEqual(settings['REDIS_HOST'], "testhost.example.com")
Beispiel #5
0
 def test_config_file(self):
     settings = read_config_file("tests.dummy_settings")
     self.assertIn("REDIS_HOST", settings)
     self.assertEqual(settings['REDIS_HOST'], "testhost.example.com")
Beispiel #6
0
 def test_config_file(self):
     settings = read_config_file('tests.config_files.dummy')
     self.assertIn('REDIS_HOST', settings)
     self.assertEqual(settings['REDIS_HOST'], 'testhost.example.com')
Beispiel #7
0
 def test_config_file(self):
     settings = read_config_file('tests.config_files.dummy')
     self.assertIn('REDIS_HOST', settings)
     self.assertEqual(settings['REDIS_HOST'], 'testhost.example.com')
Beispiel #8
0
 def test_reading_dsn_from_file(self):
     settings = read_config_file('tests.config_files.sentry')
     self.assertIn('SENTRY_DSN', settings)
     self.assertEqual(settings['SENTRY_DSN'], 'https://[email protected]/123')
Beispiel #9
0
 def test_reading_dsn_from_file(self):
     settings = read_config_file('tests.config_files.sentry')
     self.assertIn('SENTRY_DSN', settings)
     self.assertEqual(settings['SENTRY_DSN'], 'https://[email protected]/123')
Beispiel #10
0
def main():

    # set up minimal argparser to get -c option
    parser = argparse.ArgumentParser(
        add_help=False  # help will be picked up later when we redfine parser
    )
    parser.add_argument('-c', "--config", help='Use an rq config file')
    args, remaining_argv = parser.parse_known_args()

    # config, pass 1: read environment vars
    config = {
        KEY_HOST : os.environ.get('RQ_REDIS_HOST', 'localhost'),
        KEY_PORT : int(os.environ.get('RQ_REDIS_PORT', 6379)),
        KEY_DB : int(os.environ.get('RQ_REDIS_DB', 0)),
        KEY_PASSWORD : os.environ.get('RQ_REDIS_PASSWORD'),
        KEY_URL : os.environ.get('RQ_REDIS_URL')
    }

    # config, pass 2: read config file
    if args.config:
        # bit of a hack, this, but does allow helpers.read_config_file to work...
        sys.path.insert( 0, os.path.dirname(os.path.realpath(args.config)) )
        rq_config = helpers.read_config_file( args.config )
        # map rq settings to our own config dict
        config[KEY_URL] = rq_config.get("REDIS_URL", config[KEY_URL])
        config[KEY_HOST] = rq_config.get("REDIS_HOST", config[KEY_HOST])
        config[KEY_PORT] = rq_config.get("REDIS_PORT", config[KEY_PORT])
        config[KEY_DB] = rq_config.get("REDIS_DB", config[KEY_DB])
        config[KEY_PASSWORD] = rq_config.get("REDIS_PASSWORD",config[KEY_PASSWORD])

    # config, pass 3: read commandline args. overwrites any other config.
    parser = argparse.ArgumentParser(
        parents=[parser]  # inherit from existing parser
    )
    parser.add_argument('-H', '--host', default=config[KEY_HOST], help="Redis host")
    parser.add_argument('-p', '--port', default=config[KEY_PORT], type=int, help="Redis port number")
    parser.add_argument('-d', '--db', default=config[KEY_DB], type=int, help="Redis database")
    parser.add_argument('-P', '--password', default=config[KEY_PASSWORD], help="Redis password")
    parser.add_argument('--verbose', '-v', action='store_true', default=False, help='Show more output')
    parser.add_argument('--url', '-u', default=config[KEY_URL]
        , help='URL describing Redis connection details. \
            Overrides other connection arguments if supplied.')
    parser.add_argument('-i', '--interval', default=60.0, type=float
        , help="How often the scheduler checks for new jobs to add to the \
            queue (in seconds, can be floating-point for more precision).")
    parser.add_argument('--path', default='.', help='Specify the import path.')
    parser.add_argument('--pid', help='A filename to use for the PID file.', metavar='FILE')
    
    args = parser.parse_args( remaining_argv )
    
    if args.path:
        sys.path = args.path.split(':') + sys.path
    
    if args.pid:
        pid = str(os.getpid())
        filename = args.pid
        with open(filename, 'w') as f:
            f.write(pid)
    
    if args.url is not None:
        connection = Redis.from_url(args.url)
    else:
        connection = Redis(args.host, args.port, args.db, args.password)

    if args.verbose:
        level = 'DEBUG'
    else:
        level = 'INFO'
    setup_loghandlers(level)

    scheduler = Scheduler(connection=connection, interval=args.interval)
    scheduler.run()