def __init__(self, *args, **kwargs): self._dirty = set() self._finalize = Finalize(self, self.sync, exitpriority=5) Scheduler.__init__(self, *args, **kwargs) self.max_interval = (kwargs.get('max_interval') or self.app.conf.CELERYBEAT_MAX_LOOP_INTERVAL or DEFAULT_MAX_INTERVAL)
def __init__(self, loglevel=None, hostname=None, logger=None, ready_callback=noop, queues=None, app=None, **kwargs): self.app = app_or_default(app) self._shutdown_complete = threading.Event() self.setup_defaults(kwargs, namespace="celeryd") self.app.select_queues(queues) # select queues subset. # Options self.loglevel = loglevel or self.loglevel self.logger = self.app.log.get_default_logger() self.hostname = hostname or socket.gethostname() self.ready_callback = ready_callback self.timer_debug = SilenceRepeated(self.logger.debug, max_iterations=10) self._finalize = Finalize(self, self.stop, exitpriority=1) self._finalize_db = None # Initialize boot steps self.pool_cls = _concurrency.get_implementation(self.pool_cls) self.components = [] self.namespace = Namespace(app=self.app, logger=self.logger).apply(self, **kwargs)
def __init__(self, loglevel=None, hostname=None, ready_callback=noop, queues=None, app=None, pidfile=None, **kwargs): self.app = app_or_default(app or self.app) self._shutdown_complete = Event() self.setup_defaults(kwargs, namespace='celeryd') self.app.select_queues(queues) # select queues subset. # Options self.loglevel = loglevel or self.loglevel self.hostname = hostname or socket.gethostname() self.ready_callback = ready_callback self._finalize = Finalize(self, self.stop, exitpriority=1) self.pidfile = pidfile self.pidlock = None self.use_eventloop = self.should_use_eventloop() # Update celery_include to have all known task modules, so that we # ensure all task modules are imported in case an execv happens. task_modules = set(task.__class__.__module__ for task in self.app.tasks.itervalues()) self.app.conf.CELERY_INCLUDE = tuple( set(self.app.conf.CELERY_INCLUDE) | task_modules, ) # Initialize boot steps self.pool_cls = _concurrency.get_implementation(self.pool_cls) self.components = [] self.namespace = Namespace(app=self.app).apply(self, **kwargs)
def __init__(self, loglevel=None, hostname=None, ready_callback=noop, queues=None, app=None, **kwargs): self.app = app_or_default(app or self.app) # all new threads start without a current app, so if an app is not # passed on to the thread it will fall back to the "default app", # which then could be the wrong app. So for the worker # we set this to always return our app. This is a hack, # and means that only a single app can be used for workers # running in the same process. set_default_app(self.app) self._shutdown_complete = threading.Event() self.setup_defaults(kwargs, namespace="celeryd") self.app.select_queues(queues) # select queues subset. # Options self.loglevel = loglevel or self.loglevel self.hostname = hostname or socket.gethostname() self.ready_callback = ready_callback self._finalize = Finalize(self, self.stop, exitpriority=1) self._finalize_db = None # Initialize boot steps self.pool_cls = _concurrency.get_implementation(self.pool_cls) self.components = [] self.namespace = Namespace(app=self.app).apply(self, **kwargs)
def __init__(self, app=None, hostname=None, **kwargs): self.app = app_or_default(app or self.app) self.hostname = hostname or socket.gethostname() self.app.loader.init_worker() self.on_before_init(**kwargs) self._finalize = Finalize(self, self.stop, exitpriority=1) self.setup_instance(**self.prepare_args(**kwargs))
def __init__(self, channel, prefetch_count=0): self.channel = channel self.prefetch_count = prefetch_count or 0 self._delivered = OrderedDict() self._delivered.restored = False self._on_collect = Finalize(self, self.restore_unacked_once, exitpriority=1)
def __init__(self, app=None, hostname=None, **kwargs): self.app = app_or_default(app or self.app) setup_worker_optimizations(self.app) self.hostname = hostname or socket.gethostname() self.on_before_init(**kwargs) self._finalize = Finalize(self, self.stop, exitpriority=1) self._shutdown_complete = Event() self.setup_instance(**self.prepare_args(**kwargs))
def __init__(self, drain_events): self.drain_events = drain_events self.inbound = _Queue() self.mutex = Lock() self.poll_request = Condition(self.mutex) self.shutdown = Event() self.stopped = Event() self._on_collect = Finalize(self, self.close) Thread.__init__(self) self.setDaemon(False) self.started = False
def __init__(self, app=None, hostname=None, **kwargs): self.app = app_or_default(app or self.app) # all new threads start without a current app, so if an app is not # passed on to the thread it will fall back to the "default app", # which then could be the wrong app. So for the worker # we set this to always return our app. This is a hack, # and means that only a single app can be used for workers # running in the same process. set_default_app(self.app) self.app.finalize() trace._tasks = self.app._tasks # optimization self.hostname = hostname or socket.gethostname() self.on_before_init(**kwargs) self._finalize = Finalize(self, self.stop, exitpriority=1) self._shutdown_complete = Event() self.setup_instance(**self.prepare_args(**kwargs))
def __init__(self, loglevel=None, hostname=None, ready_callback=noop, queues=None, app=None, pidfile=None, **kwargs): self.app = app_or_default(app or self.app) # all new threads start without a current app, so if an app is not # passed on to the thread it will fall back to the "default app", # which then could be the wrong app. So for the worker # we set this to always return our app. This is a hack, # and means that only a single app can be used for workers # running in the same process. set_default_app(self.app) self.app.finalize() trace._tasks = self.app._tasks self._shutdown_complete = Event() self.setup_defaults(kwargs, namespace='celeryd') self.app.select_queues(queues) # select queues subset. # Options self.loglevel = loglevel or self.loglevel self.hostname = hostname or socket.gethostname() self.ready_callback = ready_callback self._finalize = Finalize(self, self.stop, exitpriority=1) self.pidfile = pidfile self.pidlock = None self.use_eventloop = (detect_environment() == 'default' and self.app.broker_connection().is_evented and not self.app.IS_WINDOWS) # Update celery_include to have all known task modules, so that we # ensure all task modules are imported in case an execv happens. task_modules = set(task.__class__.__module__ for task in self.app.tasks.itervalues()) self.app.conf.CELERY_INCLUDE = tuple( set(self.app.conf.CELERY_INCLUDE) | task_modules, ) # Initialize boot steps self.pool_cls = _concurrency.get_implementation(self.pool_cls) self.components = [] self.namespace = Namespace(app=self.app).apply(self, **kwargs)
def __init__(self, concurrency=None, logfile=None, loglevel=None, send_events=None, hostname=None, ready_callback=noop, embed_clockservice=False, pool_cls=None, consumer_cls=None, mediator_cls=None, eta_scheduler_cls=None, schedule_filename=None, task_time_limit=None, task_soft_time_limit=None, max_tasks_per_child=None, pool_putlocks=None, db=None, prefetch_multiplier=None, eta_scheduler_precision=None, disable_rate_limits=None, autoscale=None, autoscaler_cls=None, scheduler_cls=None, app=None): self.app = app_or_default(app) conf = self.app.conf # Options self.loglevel = loglevel or self.loglevel self.concurrency = concurrency or conf.CELERYD_CONCURRENCY self.logfile = logfile or conf.CELERYD_LOG_FILE self.logger = self.app.log.get_default_logger() if send_events is None: send_events = conf.CELERY_SEND_EVENTS self.send_events = send_events self.pool_cls = _concurrency.get_implementation(pool_cls or conf.CELERYD_POOL) self.consumer_cls = consumer_cls or conf.CELERYD_CONSUMER self.mediator_cls = mediator_cls or conf.CELERYD_MEDIATOR self.eta_scheduler_cls = eta_scheduler_cls or \ conf.CELERYD_ETA_SCHEDULER self.autoscaler_cls = autoscaler_cls or \ conf.CELERYD_AUTOSCALER self.schedule_filename = schedule_filename or \ conf.CELERYBEAT_SCHEDULE_FILENAME self.scheduler_cls = scheduler_cls or conf.CELERYBEAT_SCHEDULER self.hostname = hostname or socket.gethostname() self.embed_clockservice = embed_clockservice self.ready_callback = ready_callback self.task_time_limit = task_time_limit or \ conf.CELERYD_TASK_TIME_LIMIT self.task_soft_time_limit = task_soft_time_limit or \ conf.CELERYD_TASK_SOFT_TIME_LIMIT self.max_tasks_per_child = max_tasks_per_child or \ conf.CELERYD_MAX_TASKS_PER_CHILD self.pool_putlocks = pool_putlocks or \ conf.CELERYD_POOL_PUTLOCKS self.eta_scheduler_precision = eta_scheduler_precision or \ conf.CELERYD_ETA_SCHEDULER_PRECISION self.prefetch_multiplier = prefetch_multiplier or \ conf.CELERYD_PREFETCH_MULTIPLIER self.timer_debug = SilenceRepeated(self.logger.debug, max_iterations=10) self.db = db or conf.CELERYD_STATE_DB self.disable_rate_limits = disable_rate_limits or \ conf.CELERY_DISABLE_RATE_LIMITS self._finalize = Finalize(self, self.stop, exitpriority=1) self._finalize_db = None if self.db: persistence = state.Persistent(self.db) self._finalize_db = Finalize(persistence, persistence.save, exitpriority=5) # Queues if self.disable_rate_limits: self.ready_queue = FastQueue() self.ready_queue.put = self.process_task else: self.ready_queue = TaskBucket(task_registry=registry.tasks) self.logger.debug("Instantiating thread components...") # Threads + Pool + Consumer self.autoscaler = None max_concurrency = None min_concurrency = concurrency if autoscale: max_concurrency, min_concurrency = autoscale self.pool = instantiate(self.pool_cls, min_concurrency, logger=self.logger, initializer=process_initializer, initargs=(self.app, self.hostname), maxtasksperchild=self.max_tasks_per_child, timeout=self.task_time_limit, soft_timeout=self.task_soft_time_limit, putlocks=self.pool_putlocks) self.priority_timer = instantiate(self.pool.Timer) if not self.eta_scheduler_cls: # Default Timer is set by the pool, as e.g. eventlet # needs a custom implementation. self.eta_scheduler_cls = self.pool.Timer if autoscale: self.autoscaler = instantiate(self.autoscaler_cls, self.pool, max_concurrency=max_concurrency, min_concurrency=min_concurrency, logger=self.logger) self.mediator = None if not self.disable_rate_limits: self.mediator = instantiate(self.mediator_cls, self.ready_queue, app=self.app, callback=self.process_task, logger=self.logger) self.scheduler = instantiate(self.eta_scheduler_cls, precision=eta_scheduler_precision, on_error=self.on_timer_error, on_tick=self.on_timer_tick) self.beat = None if self.embed_clockservice: self.beat = beat.EmbeddedService( app=self.app, logger=self.logger, schedule_filename=self.schedule_filename, scheduler_cls=self.scheduler_cls) prefetch_count = self.concurrency * self.prefetch_multiplier self.consumer = instantiate(self.consumer_cls, self.ready_queue, self.scheduler, logger=self.logger, hostname=self.hostname, send_events=self.send_events, init_callback=self.ready_callback, initial_prefetch_count=prefetch_count, pool=self.pool, priority_timer=self.priority_timer, app=self.app) # The order is important here; # the first in the list is the first to start, # and they must be stopped in reverse order. self.components = filter(None, (self.pool, self.mediator, self.scheduler, self.beat, self.autoscaler, self.consumer))
def __init__(self, *args, **kwargs): self._dirty = set() self._finalize = Finalize(self, self.sync, exitpriority=5) Scheduler.__init__(self, *args, **kwargs) self.max_interval = 5