def test_start_stop_process(self): try: import _multiprocessing # noqa except ImportError: raise SkipTest('multiprocessing not available') from billiard.process import Process s = beat.EmbeddedService(self.app) self.assertIsInstance(s, Process) self.assertIsInstance(s.service, beat.Service) s.service = MockService() class _Popen(object): terminated = False def terminate(self): self.terminated = True with patch('celery.platforms.close_open_fds'): s.run() self.assertTrue(s.service.started) s._popen = _Popen() s.stop() self.assertTrue(s.service.stopped) self.assertTrue(s._popen.terminated)
def test_start_stop_threaded(self): s = beat.EmbeddedService(self.app, thread=True) from threading import Thread assert isinstance(s, Thread) assert isinstance(s.service, beat.Service) s.service = MockService() s.run() assert s.service.started s.stop() assert s.service.stopped
def test_start_stop_threaded(self): s = beat.EmbeddedService(thread=True) from threading import Thread self.assertIsInstance(s, Thread) self.assertIsInstance(s.service, beat.Service) s.service = MockService() s.run() self.assertTrue(s.service.started) s.stop() self.assertTrue(s.service.stopped)
def test_start_stop_process(self): s = beat.EmbeddedService() from multiprocessing import Process self.assertIsInstance(s, Process) self.assertIsInstance(s.service, beat.Service) s.service = MockService() class _Popen(object): terminated = False def terminate(self): self.terminated = True s.run() self.assertTrue(s.service.started) s._popen = _Popen() s.stop() self.assertTrue(s.service.stopped) self.assertTrue(s._popen.terminated)
def xxx_start_stop_process(self): from billiard.process import Process s = beat.EmbeddedService(self.app) assert isinstance(s, Process) assert isinstance(s.service, beat.Service) s.service = MockService() class _Popen(object): terminated = False def terminate(self): self.terminated = True with patch('celery.platforms.close_open_fds'): s.run() assert s.service.started s._popen = _Popen() s.stop() assert s.service.stopped assert s._popen.terminated
def test_start_stop_process(self): try: from multiprocessing import Process except ImportError: raise SkipTest("multiprocessing not available") s = beat.EmbeddedService() self.assertIsInstance(s, Process) self.assertIsInstance(s.service, beat.Service) s.service = MockService() class _Popen(object): terminated = False def terminate(self): self.terminated = True s.run() self.assertTrue(s.service.started) s._popen = _Popen() s.stop() self.assertTrue(s.service.stopped) self.assertTrue(s._popen.terminated)
def __init__(self, concurrency=None, logfile=None, loglevel=None, send_events=None, hostname=None, ready_callback=noop, embed_clockservice=False, pool_cls=None, consumer_cls=None, mediator_cls=None, eta_scheduler_cls=None, schedule_filename=None, task_time_limit=None, task_soft_time_limit=None, max_tasks_per_child=None, pool_putlocks=None, db=None, prefetch_multiplier=None, eta_scheduler_precision=None, disable_rate_limits=None, autoscale=None, autoscaler_cls=None, scheduler_cls=None, app=None): self.app = app_or_default(app) conf = self.app.conf # Options self.loglevel = loglevel or self.loglevel self.concurrency = concurrency or conf.CELERYD_CONCURRENCY self.logfile = logfile or conf.CELERYD_LOG_FILE self.logger = self.app.log.get_default_logger() if send_events is None: send_events = conf.CELERY_SEND_EVENTS self.send_events = send_events self.pool_cls = _concurrency.get_implementation(pool_cls or conf.CELERYD_POOL) self.consumer_cls = consumer_cls or conf.CELERYD_CONSUMER self.mediator_cls = mediator_cls or conf.CELERYD_MEDIATOR self.eta_scheduler_cls = eta_scheduler_cls or \ conf.CELERYD_ETA_SCHEDULER self.autoscaler_cls = autoscaler_cls or \ conf.CELERYD_AUTOSCALER self.schedule_filename = schedule_filename or \ conf.CELERYBEAT_SCHEDULE_FILENAME self.scheduler_cls = scheduler_cls or conf.CELERYBEAT_SCHEDULER self.hostname = hostname or socket.gethostname() self.embed_clockservice = embed_clockservice self.ready_callback = ready_callback self.task_time_limit = task_time_limit or \ conf.CELERYD_TASK_TIME_LIMIT self.task_soft_time_limit = task_soft_time_limit or \ conf.CELERYD_TASK_SOFT_TIME_LIMIT self.max_tasks_per_child = max_tasks_per_child or \ conf.CELERYD_MAX_TASKS_PER_CHILD self.pool_putlocks = pool_putlocks or \ conf.CELERYD_POOL_PUTLOCKS self.eta_scheduler_precision = eta_scheduler_precision or \ conf.CELERYD_ETA_SCHEDULER_PRECISION self.prefetch_multiplier = prefetch_multiplier or \ conf.CELERYD_PREFETCH_MULTIPLIER self.timer_debug = SilenceRepeated(self.logger.debug, max_iterations=10) self.db = db or conf.CELERYD_STATE_DB self.disable_rate_limits = disable_rate_limits or \ conf.CELERY_DISABLE_RATE_LIMITS self._finalize = Finalize(self, self.stop, exitpriority=1) self._finalize_db = None if self.db: persistence = state.Persistent(self.db) self._finalize_db = Finalize(persistence, persistence.save, exitpriority=5) # Queues if self.disable_rate_limits: self.ready_queue = FastQueue() self.ready_queue.put = self.process_task else: self.ready_queue = TaskBucket(task_registry=registry.tasks) self.logger.debug("Instantiating thread components...") # Threads + Pool + Consumer self.autoscaler = None max_concurrency = None min_concurrency = concurrency if autoscale: max_concurrency, min_concurrency = autoscale self.pool = instantiate(self.pool_cls, min_concurrency, logger=self.logger, initializer=process_initializer, initargs=(self.app, self.hostname), maxtasksperchild=self.max_tasks_per_child, timeout=self.task_time_limit, soft_timeout=self.task_soft_time_limit, putlocks=self.pool_putlocks) self.priority_timer = instantiate(self.pool.Timer) if not self.eta_scheduler_cls: # Default Timer is set by the pool, as e.g. eventlet # needs a custom implementation. self.eta_scheduler_cls = self.pool.Timer if autoscale: self.autoscaler = instantiate(self.autoscaler_cls, self.pool, max_concurrency=max_concurrency, min_concurrency=min_concurrency, logger=self.logger) self.mediator = None if not self.disable_rate_limits: self.mediator = instantiate(self.mediator_cls, self.ready_queue, app=self.app, callback=self.process_task, logger=self.logger) self.scheduler = instantiate(self.eta_scheduler_cls, precision=eta_scheduler_precision, on_error=self.on_timer_error, on_tick=self.on_timer_tick) self.beat = None if self.embed_clockservice: self.beat = beat.EmbeddedService( app=self.app, logger=self.logger, schedule_filename=self.schedule_filename, scheduler_cls=self.scheduler_cls) prefetch_count = self.concurrency * self.prefetch_multiplier self.consumer = instantiate(self.consumer_cls, self.ready_queue, self.scheduler, logger=self.logger, hostname=self.hostname, send_events=self.send_events, init_callback=self.ready_callback, initial_prefetch_count=prefetch_count, pool=self.pool, priority_timer=self.priority_timer, app=self.app) # The order is important here; # the first in the list is the first to start, # and they must be stopped in reverse order. self.components = filter(None, (self.pool, self.mediator, self.scheduler, self.beat, self.autoscaler, self.consumer))
def __init__(self, concurrency=None, logfile=None, loglevel=None, send_events=conf.SEND_EVENTS, hostname=None, ready_callback=noop, embed_clockservice=False, pool_cls=conf.CELERYD_POOL, listener_cls=conf.CELERYD_LISTENER, mediator_cls=conf.CELERYD_MEDIATOR, eta_scheduler_cls=conf.CELERYD_ETA_SCHEDULER, schedule_filename=conf.CELERYBEAT_SCHEDULE_FILENAME, task_time_limit=conf.CELERYD_TASK_TIME_LIMIT, task_soft_time_limit=conf.CELERYD_TASK_SOFT_TIME_LIMIT, max_tasks_per_child=conf.CELERYD_MAX_TASKS_PER_CHILD, pool_putlocks=conf.CELERYD_POOL_PUTLOCKS, disable_rate_limits=conf.DISABLE_RATE_LIMITS, db=conf.CELERYD_STATE_DB, scheduler_cls=conf.CELERYBEAT_SCHEDULER): # Options self.loglevel = loglevel or self.loglevel self.concurrency = concurrency or self.concurrency self.logfile = logfile or self.logfile self.logger = log.get_default_logger() self.hostname = hostname or socket.gethostname() self.embed_clockservice = embed_clockservice self.ready_callback = ready_callback self.send_events = send_events self.task_time_limit = task_time_limit self.task_soft_time_limit = task_soft_time_limit self.max_tasks_per_child = max_tasks_per_child self.pool_putlocks = pool_putlocks self.timer_debug = log.SilenceRepeated(self.logger.debug, max_iterations=10) self.db = db self._finalize = Finalize(self, self.stop, exitpriority=1) if self.db: persistence = state.Persistent(self.db) Finalize(persistence, persistence.save, exitpriority=5) # Queues if disable_rate_limits: self.ready_queue = FastQueue() self.ready_queue.put = self.process_task else: self.ready_queue = TaskBucket(task_registry=registry.tasks) self.logger.debug("Instantiating thread components...") # Threads + Pool + Consumer self.pool = instantiate(pool_cls, self.concurrency, logger=self.logger, initializer=process_initializer, initargs=(self.hostname, ), maxtasksperchild=self.max_tasks_per_child, timeout=self.task_time_limit, soft_timeout=self.task_soft_time_limit, putlocks=self.pool_putlocks) self.mediator = None if not disable_rate_limits: self.mediator = instantiate(mediator_cls, self.ready_queue, callback=self.process_task, logger=self.logger) self.scheduler = instantiate( eta_scheduler_cls, precision=conf.CELERYD_ETA_SCHEDULER_PRECISION, on_error=self.on_timer_error, on_tick=self.on_timer_tick) self.beat = None if self.embed_clockservice: self.beat = beat.EmbeddedService( logger=self.logger, schedule_filename=schedule_filename, scheduler_cls=scheduler_cls) prefetch_count = self.concurrency * conf.CELERYD_PREFETCH_MULTIPLIER self.listener = instantiate(listener_cls, self.ready_queue, self.scheduler, logger=self.logger, hostname=self.hostname, send_events=self.send_events, init_callback=self.ready_callback, initial_prefetch_count=prefetch_count, pool=self.pool) # The order is important here; # the first in the list is the first to start, # and they must be stopped in reverse order. self.components = filter(None, (self.pool, self.mediator, self.scheduler, self.beat, self.listener))