def __init__(self, url=None, config=None, worker_class=DEFAULT_WORKER_CLASS, job_class=DEFAULT_JOB_CLASS, queue_class=DEFAULT_QUEUE_CLASS, connection_class=DEFAULT_CONNECTION_CLASS, path=None, *args, **kwargs): self._connection = None self.url = url self.config = config if path: for pth in path: sys.path.append(pth) try: self.worker_class = import_attribute(worker_class) except (ImportError, AttributeError) as exc: raise click.BadParameter(str(exc), param_hint='--worker-class') try: self.job_class = import_attribute(job_class) except (ImportError, AttributeError) as exc: raise click.BadParameter(str(exc), param_hint='--job-class') try: self.queue_class = import_attribute(queue_class) except (ImportError, AttributeError) as exc: raise click.BadParameter(str(exc), param_hint='--queue-class') try: self.connection_class = import_attribute(connection_class) except (ImportError, AttributeError) as exc: raise click.BadParameter(str(exc), param_hint='--connection-class')
def worker(url, config, burst, name, worker_class, job_class, queue_class, path, results_ttl, worker_ttl, verbose, quiet, sentry_dsn, exception_handler, pid, queues): """Starts an RQ worker.""" if path: sys.path = path.split(':') + sys.path settings = read_config_file(config) if config else {} # Worker specific default arguments queues = queues or settings.get('QUEUES', ['default']) sentry_dsn = sentry_dsn or settings.get('SENTRY_DSN') if pid: with open(os.path.expanduser(pid), "w") as fp: fp.write(str(os.getpid())) setup_loghandlers_from_args(verbose, quiet) conn = connect(url, config) cleanup_ghosts(conn) worker_class = import_attribute(worker_class) queue_class = import_attribute(queue_class) exception_handlers = [] for h in exception_handler: exception_handlers.append(import_attribute(h)) if is_suspended(conn): click.secho('RQ is currently suspended, to resume job execution run "rq resume"', fg='red') sys.exit(1) try: queues = [queue_class(queue, connection=conn) for queue in queues] w = worker_class(queues, name=name, connection=conn, default_worker_ttl=worker_ttl, default_result_ttl=results_ttl, job_class=job_class, queue_class=queue_class, exception_handlers=exception_handlers or None) # Should we configure Sentry? if sentry_dsn: from raven import Client from rq.contrib.sentry import register_sentry from raven.transport.http import HTTPTransport client = Client(sentry_dsn, transport=HTTPTransport) register_sentry(client, w) w.work(burst=burst) except ConnectionError as e: print(e) sys.exit(1)
def main(): args = parse_args() if args.path: sys.path = args.path.split(':') + sys.path settings = {} if args.config: settings = read_config_file(args.config) setup_default_arguments(args, settings) # Worker specific default arguments if not args.queues: args.queues = settings.get('QUEUES', ['default']) if args.sentry_dsn is None: args.sentry_dsn = settings.get('SENTRY_DSN', os.environ.get('SENTRY_DSN', None)) if args.pid: with open(os.path.expanduser(args.pid), "w") as fp: fp.write(str(os.getpid())) setup_loghandlers_from_args(args) setup_redis(args) cleanup_ghosts() worker_class = import_attribute(args.worker_class) queue_class = import_attribute(args.queue_class) try: queues = list(map(queue_class, args.queues)) w = worker_class(queues, name=args.name, default_worker_ttl=args.worker_ttl, default_result_ttl=args.results_ttl, job_class=args.job_class) # Should we configure Sentry? if args.sentry_dsn: from raven import Client from rq.contrib.sentry import register_sentry client = Client(args.sentry_dsn) register_sentry(client, w) w.work(burst=args.burst) except ConnectionError as e: print(e) sys.exit(1)
def worker(url, config, burst, name, worker_class, job_class, queue_class, path, results_ttl, worker_ttl, verbose, quiet, sentry_dsn, pid, queues): """Starts an RQ worker.""" if path: sys.path = path.split(':') + sys.path settings = read_config_file(config) if config else {} # Worker specific default arguments url = url or settings.get('REDIS_URL') queues = queues or settings.get('QUEUES', ['default']) sentry_dsn = sentry_dsn or settings.get('SENTRY_DSN') if pid: with open(os.path.expanduser(pid), "w") as fp: fp.write(str(os.getpid())) setup_loghandlers_from_args(verbose, quiet) conn = connect(url) cleanup_ghosts(conn) worker_class = import_attribute(worker_class) queue_class = import_attribute(queue_class) if worker_class.paused(): click.secho("The worker has been paused, run reset_paused", fg='red') sys.exit(1) try: queues = [queue_class(queue, connection=conn) for queue in queues] w = worker_class(queues, name=name, connection=conn, default_worker_ttl=worker_ttl, default_result_ttl=results_ttl, job_class=job_class) # Should we configure Sentry? if sentry_dsn: from raven import Client from rq.contrib.sentry import register_sentry client = Client(sentry_dsn) register_sentry(client, w) w.work(burst=burst) except ConnectionError as e: print(e) sys.exit(1)
def get_queue(self, name=None): """ Returns an RQ queue instance with the given name, e.g.:: default_queue = rq.get_queue() low_queue = rq.get_queue('low') :param name: Name of the queue to return, defaults to :attr:`~flask_rq2.RQ.default_queue`. :type name: str :return: An RQ queue instance. :rtype: ``rq.queue.Queue`` """ if not name: name = self.default_queue queue = self._queue_instances.get(name) if queue is None: queue_cls = import_attribute(self.queue_class) queue = queue_cls( name=name, default_timeout=self.default_timeout, is_async=self._is_async, connection=self.connection, job_class=self.job_class ) self._queue_instances[name] = queue return queue
def __init__(self, app=None, default_timeout=None, is_async=None, **kwargs): """ Initialize the RQ interface. :param app: Flask application :type app: :class:`flask.Flask` :param default_timeout: The default timeout in seconds to use for jobs, defaults to RQ's default of 180 seconds per job :type default_timeout: int :param is_async: Whether or not to run jobs asynchronously or in-process, defaults to ``True`` :type is_async: bool """ if default_timeout is not None: self.default_timeout = default_timeout self._is_async = is_async if 'async' in kwargs: self._is_async = kwargs['async'] warnings.warn('The `async` keyword is deprecated. ' 'Use `is_async` instead', DeprecationWarning) self._jobs = [] self._exception_handlers = [] self._queue_instances = {} self._functions_cls = import_attribute(self.functions_class) self._ready_to_connect = False self._connection = None if app is not None: self.init_app(app)
def get_scheduler(self, interval=None, queue=None): """ When installed returns a ``rq_scheduler.Scheduler`` instance to schedule job execution, e.g.:: scheduler = rq.get_scheduler(interval=10) :param interval: Time in seconds of the periodic check for scheduled jobs. :type interval: int :param queue: Name of the queue to enqueue in, defaults to :attr:`~flask_rq2.RQ.scheduler_queue`. :type queue: str """ if interval is None: interval = self.scheduler_interval if not queue: queue = self.scheduler_queue scheduler_cls = import_attribute(self.scheduler_class) scheduler = scheduler_cls( queue_name=queue, interval=interval, connection=self.connection, ) return scheduler
def get_exception_handlers(): """ Custom exception handlers could be defined in settings.py: RQ = { 'EXCEPTION_HANDLERS': ['path.to.handler'], } """ return [import_attribute(path) for path in EXCEPTION_HANDLERS]
def test_get_scheduler(rq): scheduler = rq.get_scheduler() assert isinstance(scheduler, Scheduler) assert isinstance(scheduler, import_attribute(rq.scheduler_class)) assert scheduler.queue_name == rq.scheduler_queue assert scheduler._interval == rq.scheduler_interval assert scheduler.connection == rq.connection
def test_get_queue(rq): assert rq._queue_instances == {} queue = rq.get_queue() assert rq._queue_instances != {} assert queue in rq._queue_instances.values() assert isinstance(queue, Queue) assert isinstance(queue, import_attribute(rq.queue_class)) assert queue.name == rq.default_queue assert queue._default_timeout == rq.default_timeout assert queue._is_async == rq._is_async assert queue.connection == rq.connection
def get_exception_handlers(): """ Custom exception handlers could be defined in settings.py: RQ = { 'EXCEPTION_HANDLERS': ['path.to.handler'], } """ RQ = getattr(settings, 'RQ', {}) exception_handlers = [] for path in RQ.get('EXCEPTION_HANDLERS', []): handler = import_attribute(path) exception_handlers.append(handler) return exception_handlers
def worker(cli_config, burst, logging_level, name, results_ttl, worker_ttl, job_monitoring_interval, verbose, quiet, sentry_dsn, exception_handler, pid, disable_default_exception_handler, queues, log_format, date_format, **options): """Starts an RQ worker.""" settings = read_config_file(cli_config.config) if cli_config.config else {} # Worker specific default arguments queues = queues or settings.get('QUEUES', ['default']) sentry_dsn = sentry_dsn or settings.get('SENTRY_DSN') name = name or settings.get('NAME') if pid: with open(os.path.expanduser(pid), "w") as fp: fp.write(str(os.getpid())) setup_loghandlers_from_args(verbose, quiet, date_format, log_format) try: cleanup_ghosts(cli_config.connection) exception_handlers = [] for h in exception_handler: exception_handlers.append(import_attribute(h)) if is_suspended(cli_config.connection): click.secho('RQ is currently suspended, to resume job execution run "rq resume"', fg='red') sys.exit(1) queues = [cli_config.queue_class(queue, connection=cli_config.connection, job_class=cli_config.job_class) for queue in queues] worker = cli_config.worker_class( queues, name=name, connection=cli_config.connection, default_worker_ttl=worker_ttl, default_result_ttl=results_ttl, job_monitoring_interval=job_monitoring_interval, job_class=cli_config.job_class, queue_class=cli_config.queue_class, exception_handlers=exception_handlers or None, disable_default_exception_handler=disable_default_exception_handler ) # Should we configure Sentry? if sentry_dsn: from rq.contrib.sentry import register_sentry register_sentry(sentry_dsn) worker.work(burst=burst, logging_level=logging_level, date_format=date_format, log_format=log_format) except ConnectionError as e: print(e) sys.exit(1)
def get_queue_class(config): """ Return queue class from config or from RQ settings, otherwise return DjangoRQ """ RQ = getattr(settings, 'RQ', {}) queue_class = DjangoRQ if 'QUEUE_CLASS' in config: queue_class = config.get('QUEUE_CLASS') elif 'QUEUE_CLASS' in RQ: queue_class = RQ.get('QUEUE_CLASS') if isinstance(queue_class, six.string_types): queue_class = import_attribute(queue_class) return queue_class
def get_job_class(job_class=None): """ Return job class from RQ settings, otherwise return Job. If `job_class` is not None, it is used as an override (can be python import path as string). """ RQ = getattr(settings, 'RQ', {}) if job_class is None: job_class = RQ.get('JOB_CLASS', Job) if isinstance(job_class, six.string_types): job_class = import_attribute(job_class) return job_class
def get_worker(self, *queues): """ Returns an RQ worker instance for the given queue names, e.g.:: configured_worker = rq.get_worker() default_worker = rq.get_worker('default') default_low_worker = rq.get_worker('default', 'low') :param \\*queues: Names of queues the worker should act on, falls back to the configured queues. """ if not queues: queues = self.queues queues = [self.get_queue(name) for name in queues] worker_cls = import_attribute(self.worker_class) worker = worker_cls( queues, connection=self.connection, job_class=self.job_class, queue_class=self.queue_class, ) for exception_handler in self._exception_handlers: worker.push_exc_handler(import_attribute(exception_handler)) return worker
def init_backends(self, app): """ Initialize the RQ backends with a closure so the RQ job class is aware of the Flask app context. """ BaseJob = import_attribute(self.job_path) BaseQueue = import_attribute(self.queue_path) BaseWorker = import_attribute(self.worker_path) class AppJob(BaseJob): def perform(self): with app.app_context(): return super(AppJob, self).perform() class AppQueue(BaseQueue): job_class = AppJob class AppWorker(BaseWorker): queue_class = AppQueue job_class = AppJob self.job_cls = AppJob self.queue_cls = AppQueue self.worker_cls = AppWorker self.scheduler_cls = Scheduler self.module_path = 'flask_rq2.backend_%s' % app.name self.module = types.ModuleType(self.module_path) self.module.__path__ = [] sys.modules[self.module_path] = self.module for backend_type in ['job', 'queue', 'worker']: backend_cls = getattr(self, '%s_cls' % backend_type) setattr(self.module, backend_cls.__name__, backend_cls) setattr(self, 'app_%s_path' % backend_type, '%s.%s' % (self.module_path, backend_cls.__name__))
def get_worker_class(worker_class=None): """ Return worker class from RQ settings, otherwise return Worker. If `worker_class` is not None, it is used as an override (can be python import path as string). """ RQ = getattr(settings, 'RQ', {}) if worker_class is None: worker_class = Worker if 'WORKER_CLASS' in RQ: worker_class = RQ.get('WORKER_CLASS') if isinstance(worker_class, six.string_types): worker_class = import_attribute(worker_class) return worker_class
def main(): args = parse_args() if args.path: sys.path = args.path.split(':') + sys.path settings = {} if args.config: settings = read_config_file(args.config) setup_default_arguments(args, settings) # Worker specific default arguments if not args.queues: args.queues = settings.get('QUEUES', ['default']) if args.sentry_dsn is None: args.sentry_dsn = settings.get('SENTRY_DSN', os.environ.get('SENTRY_DSN', None)) if args.pid: with open(os.path.expanduser(args.pid), "w") as fp: fp.write(str(os.getpid())) # 把 pid 写到 home 目录下 setup_loghandlers_from_args(args) setup_redis(args) # 启动 redis,创建好 connection 压入栈內,下面的 Queue、Worker 后续都会取到 cleanup_ghosts() # 清理 ghost worker worker_class = import_attribute(args.worker_class) try: queues = list(map(Queue, args.queues)) # 实例化 queue,默认是 default queue w = worker_class(queues, name=args.name) # 实例化 worker # Should we configure Sentry? # raven 是 Sentry(用于监控代码发生异常时通知维护人员)的 python 客户端 if args.sentry_dsn: from raven import Client from rq.contrib.sentry import register_sentry client = Client(args.sentry_dsn) register_sentry(client, w) w.work(burst=args.burst) # 启动 worker except ConnectionError as e: print(e) sys.exit(1)
def test_queue_job(app): rq = RQ(app, is_async=True) rq.connection.flushdb() rq.job(add) job1 = add.queue(1, 2) assert isinstance(job1, import_attribute(rq.job_class)) assert job1.args == (1, 2) assert job1.kwargs == {} assert job1.timeout == add.helper.timeout == rq.default_timeout job2 = add.queue(3, 4, description='job 2') assert job2.description == 'job 2' job3_id = uuid.uuid4().hex job3 = add.queue(5, 6, job_id=job3_id) assert job3.id == job3_id job4 = add.queue(7, 8, depends_on=job3) assert job4.dependency.id == job3.id other_queue = 'other_queue' job5 = add.queue(9, 10, queue=other_queue) # job will be scheduled in the other queue eventually assert job5.origin == other_queue job6 = add.queue(11, 12) result = job6.perform() assert result == 23 queue = rq.get_queue() assert job1 in queue.jobs assert job2 in queue.jobs assert job3 in queue.jobs # job 4 is a dependency on job 3, so not queued yet assert job4 not in queue.jobs assert job3.result is None assert job4.result is None response = rq.get_worker('default').work(True) assert response assert job4.dependency.result == 11 assert job4.result == 15 assert len(queue.jobs) == 0
def main(): args = parse_args() if args.path: sys.path = args.path.split(':') + sys.path settings = {} if args.config: settings = read_config_file(args.config) setup_default_arguments(args, settings) # Worker specific default arguments if not args.queues: args.queues = settings.get('QUEUES', ['default']) if args.sentry_dsn is None: args.sentry_dsn = settings.get('SENTRY_DSN', os.environ.get('SENTRY_DSN', None)) if args.pid: with open(os.path.expanduser(args.pid), "w") as fp: fp.write(str(os.getpid())) setup_loghandlers_from_args(args) setup_redis(args) cleanup_ghosts() worker_class = import_attribute(args.worker_class) try: queues = list(map(Queue, args.queues)) w = worker_class(queues, name=args.name) # Should we configure Sentry? if args.sentry_dsn: from raven import Client from rq.contrib.sentry import register_sentry client = Client(args.sentry_dsn) register_sentry(client, w) w.work(burst=args.burst) except ConnectionError as e: print(e) sys.exit(1)
def get_worker(self, *queues): """ Returns an RQ worker instance for the given queue names, e.g.:: configured_worker = rq.get_worker() default_worker = rq.get_worker('default') default_low_worker = rq.get_worker('default', 'low') :param \*queues: Names of queues the worker should act on, falls back to the configured queues. """ if not queues: queues = self.queues queues = [self.get_queue(name) for name in queues] worker = self.worker_cls(queues, connection=self.connection) for exception_handler in self._exception_handlers: worker.push_exc_handler(import_attribute(exception_handler)) return worker
def __init__(self, queues, name=None, default_result_ttl=None, connection=None, exception_handlers=None, default_worker_ttl=None, job_class=None): self.connection = connection # TODO: assert against empty queues. # TODO: test worker creation without global connection. queues = [self.queue_class(name=q, connection=connection) if isinstance(q, text_type) else q for q in ensure_list(queues)] self._name = name self.queues = queues self.validate_queues() self._exc_handlers = [] if default_result_ttl is None: default_result_ttl = DEFAULT_RESULT_TTL self.default_result_ttl = default_result_ttl if default_worker_ttl is None: default_worker_ttl = DEFAULT_WORKER_TTL self.default_worker_ttl = default_worker_ttl self._state = 'starting' self._stop_requested = False self.failed_queue = get_failed_queue(connection=self.connection) self.last_cleaned_at = None # By default, push the "move-to-failed-queue" exception handler onto # the stack if exception_handlers is None: self.push_exc_handler(self.move_to_failed_queue) elif isinstance(exception_handlers, list): for h in exception_handlers: self.push_exc_handler(h) elif exception_handlers is not None: self.push_exc_handler(exception_handlers) if job_class is not None: if isinstance(job_class, string_types): job_class = import_attribute(job_class) self.job_class = job_class
def get_queue_class(config=None, queue_class=None): """ Return queue class from config or from RQ settings, otherwise return DjangoRQ. If ``queue_class`` is provided, it takes priority. The full priority list for queue class sources: 1. ``queue_class`` argument 2. ``QUEUE_CLASS`` in ``config`` argument 3. ``QUEUE_CLASS`` in base settings (``RQ``) """ RQ = getattr(settings, 'RQ', {}) if queue_class is None: queue_class = RQ.get('QUEUE_CLASS', DjangoRQ) if config: queue_class = config.get('QUEUE_CLASS', queue_class) if isinstance(queue_class, six.string_types): queue_class = import_attribute(queue_class) return queue_class
def get_scheduler(name='default', queue=None, interval=60): """ Returns an RQ Scheduler instance using parameters defined in ``RQ_QUEUES`` """ RQ = getattr(settings, 'RQ', {}) scheduler_class = RQ.get('SCHEDULER_CLASS', DjangoScheduler) if isinstance(scheduler_class, six.string_types): scheduler_class = import_attribute(scheduler_class) if queue is None: queue = get_queue(name) return scheduler_class(queue_name=name, interval=interval, queue=queue, job_class=queue.job_class, connection=get_connection(name))
if not args.queues: args.queues = settings.get('QUEUES', ['default']) if args.sentry_dsn is None: args.sentry_dsn = settings.get('SENTRY_DSN', os.environ.get('SENTRY_DSN', None)) if args.pid: with open(os.path.expanduser(args.pid), "w") as fp: fp.write(str(os.getpid())) setup_loghandlers_from_args(args) setup_redis(args) cleanup_ghosts() worker_class = import_attribute(args.worker_class) try: queues = list(map(Queue, args.queues)) w = worker_class(queues, name=args.name, default_worker_ttl=args.worker_ttl, default_result_ttl=args.results_ttl) # Should we configure Sentry? if args.sentry_dsn: from raven import Client from rq.contrib.sentry import register_sentry client = Client(args.sentry_dsn) register_sentry(client, w)
:param app: Flask application :type app: :class:`flask.Flask` :param default_timeout: The default timeout in seconds to use for jobs, defaults to RQ's default of 180 seconds per job :type default_timeout: int :param async: Whether or not to run jobs asynchronously or in-process, defaults to ``True`` :type async: bool """ if default_timeout is not None: self.default_timeout = default_timeout self._async = async self._jobs = [] self._exception_handlers = [] self._queue_instances = {} self._functions_cls = import_attribute(self.functions_path) if app is not None: self.init_app(app) @property def connection(self): ctx = stack.top if ctx is not None: if not hasattr(ctx, 'rq_redis'): ctx.rq_redis = self._connect() return ctx.rq_redis def _connect(self): return redis.from_url(self.url)
def _connect(self): connection_class = import_attribute(self.connection_class) return connection_class.from_url(self.redis_url)
def worker(cli_config, burst, logging_level, name, results_ttl, worker_ttl, job_monitoring_interval, disable_job_desc_logging, verbose, quiet, sentry_dsn, exception_handler, pid, disable_default_exception_handler, max_jobs, queues, log_format, date_format, **options): """Starts an RQ worker.""" settings = read_config_file(cli_config.config) if cli_config.config else {} # Worker specific default arguments queues = queues or settings.get('QUEUES', ['default']) sentry_dsn = sentry_dsn or settings.get('SENTRY_DSN') name = name or settings.get('NAME') if pid: with open(os.path.expanduser(pid), "w") as fp: fp.write(str(os.getpid())) setup_loghandlers_from_args(verbose, quiet, date_format, log_format) try: cleanup_ghosts(cli_config.connection) exception_handlers = [] for h in exception_handler: exception_handlers.append(import_attribute(h)) if is_suspended(cli_config.connection): click.secho( 'RQ is currently suspended, to resume job execution run "rq resume"', fg='red') sys.exit(1) queues = [ cli_config.queue_class(queue, connection=cli_config.connection, job_class=cli_config.job_class) for queue in queues ] worker = cli_config.worker_class( queues, name=name, connection=cli_config.connection, default_worker_ttl=worker_ttl, default_result_ttl=results_ttl, job_monitoring_interval=job_monitoring_interval, job_class=cli_config.job_class, queue_class=cli_config.queue_class, exception_handlers=exception_handlers or None, disable_default_exception_handler=disable_default_exception_handler, log_job_description=not disable_job_desc_logging) # Should we configure Sentry? if sentry_dsn: from rq.contrib.sentry import register_sentry register_sentry(sentry_dsn) # if --verbose or --quiet, override --logging_level if verbose or quiet: logging_level = None worker.work(burst=burst, logging_level=logging_level, date_format=date_format, log_format=log_format, max_jobs=max_jobs) except ConnectionError as e: print(e) sys.exit(1)