def maybe_patch_concurrency(argv=None, short_opts=None, long_opts=None, patches=None): """Apply eventlet/gevent monkeypatches. With short and long opt alternatives that specify the command line option to set the pool, this makes sure that anything that needs to be patched is completed as early as possible. (e.g., eventlet/gevent monkey patches). """ argv = argv if argv else sys.argv short_opts = short_opts if short_opts else ['-P'] long_opts = long_opts if long_opts else ['--pool'] patches = patches if patches else {'eventlet': _patch_eventlet, 'gevent': _patch_gevent} try: pool = _find_option_with_arg(argv, short_opts, long_opts) except KeyError: pass else: try: patcher = patches[pool] except KeyError: pass else: patcher() # set up eventlet/gevent environments ASAP from celery import concurrency concurrency.get_implementation(pool)
def maybe_patch_concurrency( argv=sys.argv, short_opts=["-P"], long_opts=["--pool"], patches={"eventlet": _patch_eventlet, "gevent": _patch_gevent}, ): """With short and long opt alternatives that specify the command line option to set the pool, this makes sure that anything that needs to be patched is completed as early as possible. (e.g. eventlet/gevent monkey patches).""" try: pool = _find_option_with_arg(argv, short_opts, long_opts) except KeyError: pass else: try: patcher = patches[pool] except KeyError: pass else: patcher() # set up eventlet/gevent environments ASAP. from celery import concurrency concurrency.get_implementation(pool)
def maybe_patch_concurrency(self, argv, short_opts=None, long_opts=None): try: pool = self._find_option_with_arg(argv, short_opts, long_opts) except KeyError: pass else: from celery import concurrency # set up eventlet/gevent environments ASAP. concurrency.get_implementation(pool)
def execute_from_commandline(self, argv=None): if argv is None: argv = list(sys.argv) try: pool = argv[argv.index('-P') + 1] except ValueError: pass else: # set up eventlet/gevent environments ASAP. concurrency.get_implementation(pool) return super(WorkerCommand, self).execute_from_commandline(argv)
def setup_instance(self, queues=None, ready_callback=None, pidfile=None, include=None, **kwargs): self.pidfile = pidfile self.app.loader.init_worker() self.setup_defaults(kwargs, namespace='celeryd') self.setup_queues(queues) self.setup_includes(include) # Set default concurrency if not self.concurrency: try: self.concurrency = cpu_count() except NotImplementedError: self.concurrency = 2 # Options self.loglevel = mlevel(self.loglevel) self.ready_callback = ready_callback or self.on_consumer_ready self.use_eventloop = self.should_use_eventloop() signals.worker_init.send(sender=self) # Initialize boot steps self.pool_cls = _concurrency.get_implementation(self.pool_cls) self.components = [] self.namespace = Namespace(app=self.app).apply(self, **kwargs)
def setup_instance( self, queues=None, ready_callback=None, pidfile=None, include=None, use_eventloop=None, **kwargs ): self.pidfile = pidfile self.setup_queues(queues) self.setup_includes(include) # Set default concurrency if not self.concurrency: try: self.concurrency = cpu_count() except NotImplementedError: self.concurrency = 2 # Options self.loglevel = mlevel(self.loglevel) self.ready_callback = ready_callback or self.on_consumer_ready # this connection is not established, only used for params self._conninfo = self.app.connection() self.use_eventloop = self.should_use_eventloop() if use_eventloop is None else use_eventloop self.options = kwargs signals.worker_init.send(sender=self) # Initialize bootsteps self.pool_cls = _concurrency.get_implementation(self.pool_cls) self.steps = [] self.on_init_namespace() self.namespace = self.Namespace( app=self.app, on_start=self.on_start, on_close=self.on_close, on_stopped=self.on_stopped ) self.namespace.apply(self, **kwargs)
def __init__(self, loglevel=None, hostname=None, ready_callback=noop, queues=None, app=None, pidfile=None, use_eventloop=None, **kwargs): self.app = app_or_default(app or self.app) self._shutdown_complete = Event() self.setup_defaults(kwargs, namespace='celeryd') self.app.select_queues(queues) # select queues subset. # Options self.loglevel = loglevel or self.loglevel self.hostname = hostname or socket.gethostname() self.ready_callback = ready_callback self._finalize = Finalize(self, self.stop, exitpriority=1) self.pidfile = pidfile self.pidlock = None # this connection is not established, only used for params self._conninfo = self.app.connection() self.use_eventloop = ( self.should_use_eventloop() if use_eventloop is None else use_eventloop ) # Update celery_include to have all known task modules, so that we # ensure all task modules are imported in case an execv happens. task_modules = set(task.__class__.__module__ for task in self.app.tasks.itervalues()) self.app.conf.CELERY_INCLUDE = tuple( set(self.app.conf.CELERY_INCLUDE) | task_modules, ) # Initialize boot steps self.pool_cls = _concurrency.get_implementation(self.pool_cls) self.components = [] self.namespace = Namespace(app=self.app).apply(self, **kwargs)
def run(self, hostname=None, pool_cls=None, app=None, uid=None, gid=None, loglevel=None, logfile=None, pidfile=None, statedb=None, **kwargs): maybe_drop_privileges(uid=uid, gid=gid) # Pools like eventlet/gevent needs to patch libs as early # as possible. pool_cls = (concurrency.get_implementation(pool_cls) or self.app.conf.worker_pool) if self.app.IS_WINDOWS and kwargs.get('beat'): self.die('-B option does not work on Windows. ' 'Please run celery beat as a separate service.') hostname = self.host_format(default_nodename(hostname)) if loglevel: try: loglevel = mlevel(loglevel) except KeyError: # pragma: no cover self.die('Unknown level {0!r}. Please use one of {1}.'.format( loglevel, '|'.join( l for l in LOG_LEVELS if isinstance(l, string_t)))) worker = self.app.Worker( hostname=hostname, pool_cls=pool_cls, loglevel=loglevel, logfile=logfile, # node format handled by celery.app.log.setup pidfile=self.node_format(pidfile, hostname), statedb=self.node_format(statedb, hostname), **kwargs ) worker.start() return worker.exitcode
def __init__(self, loglevel=None, hostname=None, ready_callback=noop, queues=None, app=None, pidfile=None, **kwargs): self.app = app_or_default(app or self.app) # all new threads start without a current app, so if an app is not # passed on to the thread it will fall back to the "default app", # which then could be the wrong app. So for the worker # we set this to always return our app. This is a hack, # and means that only a single app can be used for workers # running in the same process. set_default_app(self.app) self._shutdown_complete = Event() self.setup_defaults(kwargs, namespace="celeryd") self.app.select_queues(queues) # select queues subset. # Options self.loglevel = loglevel or self.loglevel self.hostname = hostname or socket.gethostname() self.ready_callback = ready_callback self._finalize = Finalize(self, self.stop, exitpriority=1) self.pidfile = pidfile self.pidlock = None self.use_eventloop = (detect_environment() == "default" and self.app.broker_connection().is_evented) # Initialize boot steps self.pool_cls = _concurrency.get_implementation(self.pool_cls) self.components = [] self.namespace = Namespace(app=self.app).apply(self, **kwargs)
def run(self, *args, **kwargs): kwargs.pop("app", None) # Pools like eventlet/gevent needs to patch libs as early # as possible. from celery import concurrency kwargs["pool"] = concurrency.get_implementation( kwargs.get("pool") or self.app.conf.CELERYD_POOL) return self.app.Worker(**kwargs).run()
def run(self): freeze_support() kwargs = {} if self.options.daemon: kwargs['logfile'] = zenPath('log', 'zenjobs.log') kwargs['loglevel'] = self.options.logseverity kwargs["pool_cls"] = concurrency.get_implementation( kwargs.get("pool_cls") or self.celery.conf.CELERYD_POOL) return self.celery.Worker(**kwargs).run()
def run(self): self.log.info('Daemon %s starting up', type(self).__name__) freeze_support() kwargs = {} if self.options.daemon: kwargs['logfile'] = zenPath('log', 'zenjobs.log') kwargs['loglevel'] = self.options.logseverity kwargs["pool_cls"] = concurrency.get_implementation( kwargs.get("pool_cls") or current_app.conf.CELERYD_POOL) self.worker = CeleryZenWorker(**kwargs) self.worker.run() # blocking call self.log.info('Daemon %s has shut down', type(self).__name__)
def run(self, *args, **kwargs): kwargs.pop("app", None) # Pools like eventlet/gevent needs to patch libs as early # as possible. kwargs["pool_cls"] = concurrency.get_implementation( kwargs.get("pool_cls") or self.app.conf.CELERYD_POOL) if self.app.IS_WINDOWS and kwargs.get("beat"): self.die("-B option does not work on Windows. " "Please run celerybeat as a separate service.") loglevel = kwargs.get("loglevel") if loglevel: try: kwargs["loglevel"] = mlevel(loglevel) except KeyError: # pragma: no cover self.die("Unknown level %r. Please use one of %s." % ( loglevel, "|".join(l for l in LOG_LEVELS.keys() if isinstance(l, basestring)))) return self.app.Worker(**kwargs).run()
def run(self, hostname=None, pool_cls=None, loglevel=None, app=None, **kwargs): # Pools like eventlet/gevent needs to patch libs as early # as possible. pool_cls = concurrency.get_implementation(pool_cls) or self.app.conf.CELERYD_POOL if self.app.IS_WINDOWS and kwargs.get("beat"): self.die("-B option does not work on Windows. " "Please run celery beat as a separate service.") hostname = self.simple_format(hostname) if loglevel: try: loglevel = mlevel(loglevel) except KeyError: # pragma: no cover self.die( "Unknown level {0!r}. Please use one of {1}.".format( loglevel, "|".join(l for l in LOG_LEVELS if isinstance(l, string_t)) ) ) return self.app.Worker(hostname=hostname, pool_cls=pool_cls, loglevel=loglevel, **kwargs).start()
def run(self, *args, **kwargs): kwargs.pop('app', None) # Pools like eventlet/gevent needs to patch libs as early # as possible. kwargs['pool_cls'] = concurrency.get_implementation( kwargs.get('pool_cls') or self.app.conf.CELERYD_POOL) if self.app.IS_WINDOWS and kwargs.get('beat'): self.die('-B option does not work on Windows. ' 'Please run celerybeat as a separate service.') loglevel = kwargs.get('loglevel') if loglevel: try: kwargs['loglevel'] = mlevel(loglevel) except KeyError: # pragma: no cover self.die('Unknown level {0!r}. Please use one of {1}.'.format( loglevel, '|'.join(l for l in LOG_LEVELS.keys() if isinstance(l, basestring)))) return self.app.Worker(**kwargs).run()
def setup_instance(self, queues=None, ready_callback=None, pidfile=None, include=None, use_eventloop=None, exclude_queues=None, **kwargs): self.pidfile = pidfile self.setup_queues(queues, exclude_queues) self.setup_includes(str_to_list(include)) # Set default concurrency if not self.concurrency: try: self.concurrency = cpu_count() except NotImplementedError: self.concurrency = 2 # Options self.loglevel = mlevel(self.loglevel) self.ready_callback = ready_callback or self.on_consumer_ready # this connection is not established, only used for params self._conninfo = self.app.connection() self.use_eventloop = (self.should_use_eventloop() if use_eventloop is None else use_eventloop) self.options = kwargs signals.worker_init.send(sender=self) # Initialize bootsteps self.pool_cls = _concurrency.get_implementation(self.pool_cls) self.steps = [] self.on_init_blueprint() self.blueprint = self.Blueprint(app=self.app, on_start=self.on_start, on_close=self.on_close, on_stopped=self.on_stopped) self.blueprint.apply(self, **kwargs)
def __init__(self, loglevel=None, hostname=None, ready_callback=noop, queues=None, app=None, pidfile=None, **kwargs): self.app = app_or_default(app or self.app) # all new threads start without a current app, so if an app is not # passed on to the thread it will fall back to the "default app", # which then could be the wrong app. So for the worker # we set this to always return our app. This is a hack, # and means that only a single app can be used for workers # running in the same process. set_default_app(self.app) self.app.finalize() trace._tasks = self.app._tasks self._shutdown_complete = Event() self.setup_defaults(kwargs, namespace='celeryd') self.app.select_queues(queues) # select queues subset. # Options self.loglevel = loglevel or self.loglevel self.hostname = hostname or socket.gethostname() self.ready_callback = ready_callback self._finalize = Finalize(self, self.stop, exitpriority=1) self.pidfile = pidfile self.pidlock = None self.use_eventloop = (detect_environment() == 'default' and self.app.broker_connection().is_evented and not self.app.IS_WINDOWS) # Update celery_include to have all known task modules, so that we # ensure all task modules are imported in case an execv happens. task_modules = set(task.__class__.__module__ for task in self.app.tasks.itervalues()) self.app.conf.CELERY_INCLUDE = tuple( set(self.app.conf.CELERY_INCLUDE) | task_modules, ) # Initialize boot steps self.pool_cls = _concurrency.get_implementation(self.pool_cls) self.components = [] self.namespace = Namespace(app=self.app).apply(self, **kwargs)
def setup_instance(self, queues=None, ready_callback=None, pidfile=None, include=None, use_eventloop=None, exclude_queues=None, **kwargs): self.pidfile = pidfile self.setup_queues(queues, exclude_queues) self.setup_includes(str_to_list(include)) # Set default concurrency if not self.concurrency: try: self.concurrency = cpu_count() except NotImplementedError: self.concurrency = 2 # Options self.loglevel = mlevel(self.loglevel) self.ready_callback = ready_callback or self.on_consumer_ready # this connection won't establish, only used for params self._conninfo = self.app.connection_for_read() self.use_eventloop = ( self.should_use_eventloop() if use_eventloop is None else use_eventloop ) self.options = kwargs signals.worker_init.send(sender=self) # Initialize bootsteps self.pool_cls = _concurrency.get_implementation(self.pool_cls) self.steps = [] self.on_init_blueprint() self.blueprint = self.Blueprint( steps=self.app.steps['worker'], on_start=self.on_start, on_close=self.on_close, on_stopped=self.on_stopped, ) self.blueprint.apply(self, **kwargs)
def run(self, hostname=None, pool_cls=None, app=None, uid=None, gid=None, loglevel=None, logfile=None, pidfile=None, statedb=None, **kwargs): maybe_drop_privileges(uid=uid, gid=gid) # Pools like eventlet/gevent needs to patch libs as early # as possible. pool_cls = concurrency.get_implementation( pool_cls) or self.app.conf.worker_pool if self.app.IS_WINDOWS and kwargs.get("beat"): self.die("-B option does not work on Windows. " "Please run celery beat as a separate service.") hostname = self.host_format(default_nodename(hostname)) if loglevel: try: loglevel = mlevel(loglevel) except KeyError: # pragma: no cover self.die("Unknown level {0!r}. Please use one of {1}.".format( loglevel, "|".join(l for l in LOG_LEVELS if isinstance(l, string_t)), )) worker = self.app.Worker( hostname=hostname, pool_cls=pool_cls, loglevel=loglevel, logfile=logfile, # node format handled by celery.app.log.setup pidfile=self.node_format(pidfile, hostname), statedb=self.node_format(statedb, hostname), **kwargs) worker.start() return worker.exitcode
def create_pool(self, queue, pool_cls=None, loglevel=None, logfile=None, pidfile=None, state_db=None): kwargs = { 'autoscale': '200,1', 'queues': queue, } pool_cls = concurrency.get_implementation(pool_cls) or self.app.conf.CELERYD_POOL hostname = '%s::%s' % (self.hostname, queue) pool = self.app.Worker( hostname=hostname, pool_cls=pool_cls, loglevel=loglevel, logfile=logfile, pidfile=node_format(pidfile, hostname), state_db=node_format(state_db, hostname), without_mingle=False, **kwargs ) start_daemon_thread(pool.start) if self.check_pool_start(hostname): return {'name': hostname, 'pool': pool} else: raise CouldNotStartException('Work Pool')
def __init__(self, loglevel=None, hostname=None, ready_callback=noop, queues=None, app=None, pidfile=None, **kwargs): self.app = app_or_default(app or self.app) # all new threads start without a current app, so if an app is not # passed on to the thread it will fall back to the "default app", # which then could be the wrong app. So for the worker # we set this to always return our app. This is a hack, # and means that only a single app can be used for workers # running in the same process. set_default_app(self.app) self.app.finalize() trace._tasks = self.app._tasks self._shutdown_complete = Event() self.setup_defaults(kwargs, namespace="celeryd") self.app.select_queues(queues) # select queues subset. # Options self.loglevel = loglevel or self.loglevel self.hostname = hostname or socket.gethostname() self.ready_callback = ready_callback self._finalize = Finalize(self, self.stop, exitpriority=1) self.pidfile = pidfile self.pidlock = None self.use_eventloop = (detect_environment() == "default" and self.app.broker_connection().is_evented) # Initialize boot steps self.pool_cls = _concurrency.get_implementation(self.pool_cls) self.components = [] self.namespace = Namespace(app=self.app).apply(self, **kwargs)
def __init__(self, concurrency=None, logfile=None, loglevel=None, send_events=None, hostname=None, ready_callback=noop, embed_clockservice=False, pool_cls=None, consumer_cls=None, mediator_cls=None, eta_scheduler_cls=None, schedule_filename=None, task_time_limit=None, task_soft_time_limit=None, max_tasks_per_child=None, pool_putlocks=None, db=None, prefetch_multiplier=None, eta_scheduler_precision=None, disable_rate_limits=None, autoscale=None, autoscaler_cls=None, scheduler_cls=None, app=None): self.app = app_or_default(app) conf = self.app.conf # Options self.loglevel = loglevel or self.loglevel self.concurrency = concurrency or conf.CELERYD_CONCURRENCY self.logfile = logfile or conf.CELERYD_LOG_FILE self.logger = self.app.log.get_default_logger() if send_events is None: send_events = conf.CELERY_SEND_EVENTS self.send_events = send_events self.pool_cls = _concurrency.get_implementation(pool_cls or conf.CELERYD_POOL) self.consumer_cls = consumer_cls or conf.CELERYD_CONSUMER self.mediator_cls = mediator_cls or conf.CELERYD_MEDIATOR self.eta_scheduler_cls = eta_scheduler_cls or \ conf.CELERYD_ETA_SCHEDULER self.autoscaler_cls = autoscaler_cls or \ conf.CELERYD_AUTOSCALER self.schedule_filename = schedule_filename or \ conf.CELERYBEAT_SCHEDULE_FILENAME self.scheduler_cls = scheduler_cls or conf.CELERYBEAT_SCHEDULER self.hostname = hostname or socket.gethostname() self.embed_clockservice = embed_clockservice self.ready_callback = ready_callback self.task_time_limit = task_time_limit or \ conf.CELERYD_TASK_TIME_LIMIT self.task_soft_time_limit = task_soft_time_limit or \ conf.CELERYD_TASK_SOFT_TIME_LIMIT self.max_tasks_per_child = max_tasks_per_child or \ conf.CELERYD_MAX_TASKS_PER_CHILD self.pool_putlocks = pool_putlocks or \ conf.CELERYD_POOL_PUTLOCKS self.eta_scheduler_precision = eta_scheduler_precision or \ conf.CELERYD_ETA_SCHEDULER_PRECISION self.prefetch_multiplier = prefetch_multiplier or \ conf.CELERYD_PREFETCH_MULTIPLIER self.timer_debug = SilenceRepeated(self.logger.debug, max_iterations=10) self.db = db or conf.CELERYD_STATE_DB self.disable_rate_limits = disable_rate_limits or \ conf.CELERY_DISABLE_RATE_LIMITS self._finalize = Finalize(self, self.stop, exitpriority=1) self._finalize_db = None if self.db: persistence = state.Persistent(self.db) self._finalize_db = Finalize(persistence, persistence.save, exitpriority=5) # Queues if self.disable_rate_limits: self.ready_queue = FastQueue() self.ready_queue.put = self.process_task else: self.ready_queue = TaskBucket(task_registry=registry.tasks) self.logger.debug("Instantiating thread components...") # Threads + Pool + Consumer self.autoscaler = None max_concurrency = None min_concurrency = concurrency if autoscale: max_concurrency, min_concurrency = autoscale self.pool = instantiate(self.pool_cls, min_concurrency, logger=self.logger, initializer=process_initializer, initargs=(self.app, self.hostname), maxtasksperchild=self.max_tasks_per_child, timeout=self.task_time_limit, soft_timeout=self.task_soft_time_limit, putlocks=self.pool_putlocks) self.priority_timer = instantiate(self.pool.Timer) if not self.eta_scheduler_cls: # Default Timer is set by the pool, as e.g. eventlet # needs a custom implementation. self.eta_scheduler_cls = self.pool.Timer if autoscale: self.autoscaler = instantiate(self.autoscaler_cls, self.pool, max_concurrency=max_concurrency, min_concurrency=min_concurrency, logger=self.logger) self.mediator = None if not self.disable_rate_limits: self.mediator = instantiate(self.mediator_cls, self.ready_queue, app=self.app, callback=self.process_task, logger=self.logger) self.scheduler = instantiate(self.eta_scheduler_cls, precision=eta_scheduler_precision, on_error=self.on_timer_error, on_tick=self.on_timer_tick) self.beat = None if self.embed_clockservice: self.beat = beat.EmbeddedService( app=self.app, logger=self.logger, schedule_filename=self.schedule_filename, scheduler_cls=self.scheduler_cls) prefetch_count = self.concurrency * self.prefetch_multiplier self.consumer = instantiate(self.consumer_cls, self.ready_queue, self.scheduler, logger=self.logger, hostname=self.hostname, send_events=self.send_events, init_callback=self.ready_callback, initial_prefetch_count=prefetch_count, pool=self.pool, priority_timer=self.priority_timer, app=self.app) # The order is important here; # the first in the list is the first to start, # and they must be stopped in reverse order. self.components = filter(None, (self.pool, self.mediator, self.scheduler, self.beat, self.autoscaler, self.consumer))
def __init__( self, concurrency=None, logfile=None, loglevel=None, send_events=None, hostname=None, ready_callback=noop, embed_clockservice=False, pool_cls=None, consumer_cls=None, mediator_cls=None, eta_scheduler_cls=None, schedule_filename=None, task_time_limit=None, task_soft_time_limit=None, max_tasks_per_child=None, pool_putlocks=None, db=None, prefetch_multiplier=None, eta_scheduler_precision=None, queues=None, disable_rate_limits=None, autoscale=None, autoscaler_cls=None, scheduler_cls=None, app=None, ): self.app = app_or_default(app) conf = self.app.conf # Options self.loglevel = loglevel or self.loglevel self.concurrency = concurrency or conf.CELERYD_CONCURRENCY self.logfile = logfile or conf.CELERYD_LOG_FILE self.logger = self.app.log.get_default_logger() if send_events is None: send_events = conf.CELERY_SEND_EVENTS self.send_events = send_events self.pool_cls = _concurrency.get_implementation(pool_cls or conf.CELERYD_POOL) self.consumer_cls = consumer_cls or conf.CELERYD_CONSUMER self.mediator_cls = mediator_cls or conf.CELERYD_MEDIATOR self.eta_scheduler_cls = eta_scheduler_cls or conf.CELERYD_ETA_SCHEDULER self.autoscaler_cls = autoscaler_cls or conf.CELERYD_AUTOSCALER self.schedule_filename = schedule_filename or conf.CELERYBEAT_SCHEDULE_FILENAME self.scheduler_cls = scheduler_cls or conf.CELERYBEAT_SCHEDULER self.hostname = hostname or socket.gethostname() self.embed_clockservice = embed_clockservice self.ready_callback = ready_callback self.task_time_limit = task_time_limit or conf.CELERYD_TASK_TIME_LIMIT self.task_soft_time_limit = task_soft_time_limit or conf.CELERYD_TASK_SOFT_TIME_LIMIT self.max_tasks_per_child = max_tasks_per_child or conf.CELERYD_MAX_TASKS_PER_CHILD self.pool_putlocks = pool_putlocks or conf.CELERYD_POOL_PUTLOCKS self.eta_scheduler_precision = eta_scheduler_precision or conf.CELERYD_ETA_SCHEDULER_PRECISION self.prefetch_multiplier = prefetch_multiplier or conf.CELERYD_PREFETCH_MULTIPLIER self.timer_debug = SilenceRepeated(self.logger.debug, max_iterations=10) self.db = db or conf.CELERYD_STATE_DB self.disable_rate_limits = disable_rate_limits or conf.CELERY_DISABLE_RATE_LIMITS # FIXME # For some reason disable rate limits does not work currently, # needs to be fixed for v2.2.0. self.disable_rate_limits = False self.queues = queues self._finalize = Finalize(self, self.stop, exitpriority=1) self._finalize_db = None if self.db: persistence = state.Persistent(self.db) self._finalize_db = Finalize(persistence, persistence.save, exitpriority=5) # Queues if self.disable_rate_limits: self.ready_queue = FastQueue() self.ready_queue.put = self.process_task else: self.ready_queue = TaskBucket(task_registry=registry.tasks) self.logger.debug("Instantiating thread components...") # Threads + Pool + Consumer self.autoscaler = None max_concurrency = None min_concurrency = concurrency if autoscale: max_concurrency, min_concurrency = autoscale self.pool = instantiate( self.pool_cls, min_concurrency, logger=self.logger, initializer=process_initializer, initargs=(self.app, self.hostname), maxtasksperchild=self.max_tasks_per_child, timeout=self.task_time_limit, soft_timeout=self.task_soft_time_limit, putlocks=self.pool_putlocks, ) if not self.eta_scheduler_cls: # Default Timer is set by the pool, as e.g. eventlet # needs a custom implementation. self.eta_scheduler_cls = self.pool.Timer if autoscale: self.autoscaler = instantiate( self.autoscaler_cls, self.pool, max_concurrency=max_concurrency, min_concurrency=min_concurrency, logger=self.logger, ) self.mediator = None if not self.disable_rate_limits: self.mediator = instantiate( self.mediator_cls, self.ready_queue, app=self.app, callback=self.process_task, logger=self.logger ) self.scheduler = instantiate( self.eta_scheduler_cls, precision=eta_scheduler_precision, on_error=self.on_timer_error, on_tick=self.on_timer_tick, ) self.beat = None if self.embed_clockservice: self.beat = beat.EmbeddedService( app=self.app, logger=self.logger, schedule_filename=self.schedule_filename, scheduler_cls=self.scheduler_cls, ) prefetch_count = self.concurrency * self.prefetch_multiplier self.consumer = instantiate( self.consumer_cls, self.ready_queue, self.scheduler, logger=self.logger, hostname=self.hostname, send_events=self.send_events, init_callback=self.ready_callback, initial_prefetch_count=prefetch_count, pool=self.pool, queues=self.queues, app=self.app, ) # The order is important here; # the first in the list is the first to start, # and they must be stopped in reverse order. self.components = filter( None, (self.pool, self.mediator, self.scheduler, self.beat, self.autoscaler, self.consumer) )
def convert(self, value, param, ctx): # Pools like eventlet/gevent needs to patch libs as early # as possible. return concurrency.get_implementation( value) or ctx.obj.app.conf.worker_pool