def worker(cli_config, burst, logging_level, name, results_ttl, worker_ttl, job_monitoring_interval, verbose, quiet, sentry_dsn, exception_handler, pid, queues, rate_limit, **options): """Starts an RQ worker.""" settings = read_config_file(cli_config.config) if cli_config.config else {} # Worker specific default arguments queues = queues or settings.get('QUEUES', ['default']) sentry_dsn = sentry_dsn or settings.get('SENTRY_DSN') if pid: with open(os.path.expanduser(pid), "w") as fp: fp.write(str(os.getpid())) setup_loghandlers_from_args(verbose, quiet) try: cleanup_ghosts(cli_config.connection) exception_handlers = [] for h in exception_handler: exception_handlers.append(import_attribute(h)) if is_suspended(cli_config.connection): click.secho( 'RQ is currently suspended, to resume job execution run "rq resume"', fg='red') sys.exit(1) queues = [ cli_config.queue_class(queue, connection=cli_config.connection, job_class=cli_config.job_class) for queue in queues ] worker = cli_config.worker_class( queues, name=name, connection=cli_config.connection, default_worker_ttl=worker_ttl, default_result_ttl=results_ttl, job_monitoring_interval=job_monitoring_interval, job_class=cli_config.job_class, queue_class=cli_config.queue_class, exception_handlers=exception_handlers or None, limit=rate_limit) # Should we configure Sentry? if sentry_dsn: from raven import Client from raven.transport.http import HTTPTransport from rq.contrib.sentry import register_sentry client = Client(sentry_dsn, transport=HTTPTransport) register_sentry(client, worker) worker.work(burst=burst, logging_level=logging_level) except ConnectionError as e: print(e) sys.exit(1)
def worker(cli_config, burst, logging_level, name, results_ttl, worker_ttl, job_monitoring_interval, disable_job_desc_logging, verbose, quiet, sentry_dsn, exception_handler, pid, disable_default_exception_handler, max_jobs, with_scheduler, queues, log_format, date_format, **options): """Starts an RQ worker.""" settings = read_config_file(cli_config.config) if cli_config.config else {} # Worker specific default arguments queues = queues or settings.get('QUEUES', ['default']) sentry_dsn = sentry_dsn or settings.get('SENTRY_DSN') name = name or settings.get('NAME') if pid: with open(os.path.expanduser(pid), "w") as fp: fp.write(str(os.getpid())) setup_loghandlers_from_args(verbose, quiet, date_format, log_format) try: cleanup_ghosts(cli_config.connection) exception_handlers = [] for h in exception_handler: exception_handlers.append(import_attribute(h)) if is_suspended(cli_config.connection): click.secho('RQ is currently suspended, to resume job execution run "rq resume"', fg='red') sys.exit(1) queues = [cli_config.queue_class(queue, connection=cli_config.connection, job_class=cli_config.job_class) for queue in queues] worker = cli_config.worker_class( queues, name=name, connection=cli_config.connection, default_worker_ttl=worker_ttl, default_result_ttl=results_ttl, job_monitoring_interval=job_monitoring_interval, job_class=cli_config.job_class, queue_class=cli_config.queue_class, exception_handlers=exception_handlers or None, disable_default_exception_handler=disable_default_exception_handler, log_job_description=not disable_job_desc_logging ) # Should we configure Sentry? if sentry_dsn: from rq.contrib.sentry import register_sentry register_sentry(sentry_dsn) # if --verbose or --quiet, override --logging_level if verbose or quiet: logging_level = None worker.work(burst=burst, logging_level=logging_level, date_format=date_format, log_format=log_format, max_jobs=max_jobs, with_scheduler=with_scheduler) except ConnectionError as e: print(e) sys.exit(1)
def worker(cli_config, burst, name, results_ttl, worker_ttl, verbose, quiet, sentry_dsn, exception_handler, pid, queues, **options): """Starts an RQ worker.""" settings = read_config_file(cli_config.config) if cli_config.config else {} # Worker specific default arguments queues = queues or settings.get('QUEUES', ['default']) sentry_dsn = sentry_dsn or settings.get('SENTRY_DSN') if pid: with open(os.path.expanduser(pid), "w") as fp: fp.write(str(os.getpid())) setup_loghandlers_from_args(verbose, quiet) try: cleanup_ghosts(cli_config.connection) exception_handlers = [] for h in exception_handler: exception_handlers.append(import_attribute(h)) if is_suspended(cli_config.connection): click.secho('RQ is currently suspended, to resume job execution run "rq resume"', fg='red') sys.exit(1) queues = [cli_config.queue_class(queue, connection=cli_config.connection, job_class=cli_config.job_class) for queue in queues] worker = cli_config.worker_class(queues, name=name, connection=cli_config.connection, default_worker_ttl=worker_ttl, default_result_ttl=results_ttl, job_class=cli_config.job_class, queue_class=cli_config.queue_class, exception_handlers=exception_handlers or None) # Should we configure Sentry? if sentry_dsn: from raven import Client from raven.transport.http import HTTPTransport from rq.contrib.sentry import register_sentry client = Client(sentry_dsn, transport=HTTPTransport) register_sentry(client, worker) worker.work(burst=burst) except ConnectionError as e: print(e) sys.exit(1)
def run(name: str, queue: str, connection_host: str, connection_port: int = 6379, connection_password: str = None, connection_over_tls: bool = False, connection_db: int = 0, verbose: bool = False): date_format = "%Y-%m-%dT%H:%M:%S" log_format = "%(asctime)s.%(msecs)03d [%(levelname)s] %(message)s" setup_loghandlers_from_args(verbose, False, date_format, log_format) params = { "ssl": connection_over_tls, "password": connection_password, "host": connection_host, "port": connection_port, "db": connection_db } try: c = Redis(**params) cleanup_ghosts(c) worker = Worker([queue], name=name, connection=c, default_worker_ttl=420, default_result_ttl=500, job_monitoring_interval=30, job_class=Job, queue_class=Queue, exception_handlers=None) worker.work(burst=False, logging_level="DEBUG" if verbose else "INFO", date_format=date_format, log_format=log_format) except ConnectionError as e: print(e) sys.exit(1)