def on_worker_init(self): """Called when the worker starts. Uses :func:`celery.discovery.autodiscover` to automatically discover any ``tasks.py`` files in the applications listed in ``INSTALLED_APPS``. """ from celery.discovery import autodiscover autodiscover()
def main(concurrency=DAEMON_CONCURRENCY, daemon=False, loglevel=DAEMON_LOG_LEVEL, logfile=DAEMON_LOG_FILE, discard=False, pidfile=DAEMON_PID_FILE, queue_wakeup_after=QUEUE_WAKEUP_AFTER): """Run the celery daemon.""" if settings.DATABASE_ENGINE == "sqlite3" and concurrency > 1: import warnings warnings.warn("The sqlite3 database engine doesn't support " "concurrency. We'll be using a single process only.", UserWarning) concurrency = 1 if discard: discarded_count = discard_all() what = "message" if discarded_count > 1: what = "messages" sys.stderr.write("Discard: Erased %d %s from the queue.\n" % ( discarded_count, what)) if daemon: sys.stderr.write("Launching celeryd in the background...\n") pidfile_handler = PIDFile(pidfile) pidfile_handler.check() daemonize(pidfile=pidfile_handler) atexit.register(remove_pidfile, pidfile) else: logfile = None # log to stderr when not running as daemon. discovery.autodiscover() celeryd = WorkController(concurrency=concurrency, loglevel=loglevel, logfile=logfile, queue_wakeup_after=queue_wakeup_after, is_detached=daemon) try: celeryd.run() except Exception, e: emergency_error(logfile, "celeryd raised exception %s: %s\n%s" % ( e.__class__, e, traceback.format_exc()))
def autodiscover(self): """Autodiscovers tasks using :func:`celery.discovery.autodiscover`.""" discovery.autodiscover()
def assertDiscovery(self): apps = autodiscover() self.assertTrue(apps) tasks.autodiscover() self.assertTrue("c.unittest.SomeAppTask" in tasks) self.assertEquals(tasks["c.unittest.SomeAppTask"].run(), 42)
def run_worker(concurrency=DAEMON_CONCURRENCY, detach=False, loglevel=DAEMON_LOG_LEVEL, logfile=DAEMON_LOG_FILE, discard=False, pidfile=DAEMON_PID_FILE, umask=0, uid=None, gid=None, supervised=False, working_directory=None, chroot=None, statistics=None, **kwargs): """Starts the celery worker server.""" print("Celery %s is starting." % __version__) if statistics: settings.CELERY_STATISTICS = statistics if not concurrency: concurrency = multiprocessing.cpu_count() if settings.DATABASE_ENGINE == "sqlite3" and concurrency > 1: import warnings warnings.warn("The sqlite3 database engine doesn't support " "concurrency. We'll be using a single process only.", UserWarning) concurrency = 1 # Setup logging if not isinstance(loglevel, int): loglevel = LOG_LEVELS[loglevel.upper()] if not detach: logfile = None # log to stderr when not running in the background. if discard: discarded_count = discard_all() what = discarded_count > 1 and "messages" or "message" print("discard: Erased %d %s from the queue.\n" % ( discarded_count, what)) # Dump configuration to screen so we have some basic information # when users sends e-mails. print(STARTUP_INFO_FMT % { "vhost": settings.AMQP_VHOST, "host": settings.AMQP_SERVER, "port": settings.AMQP_PORT, "exchange": conf.AMQP_EXCHANGE, "exchange_type": conf.AMQP_EXCHANGE_TYPE, "consumer_queue": conf.AMQP_CONSUMER_QUEUE, "consumer_rkey": conf.AMQP_CONSUMER_ROUTING_KEY, "publisher_rkey": conf.AMQP_PUBLISHER_ROUTING_KEY, "concurrency": concurrency, "loglevel": loglevel, "pidfile": pidfile, "statistics": settings.CELERY_STATISTICS and "ON" or "OFF", }) print("Celery has started.") if detach: if not CAN_DETACH: raise RuntimeError( "This operating system doesn't support detach. ") from daemon import DaemonContext # Since without stderr any errors will be silently suppressed, # we need to know that we have access to the logfile if logfile: open(logfile, "a").close() pidlock = acquire_pidlock(pidfile) if not umask: umask = 0 uid = uid and int(uid) or os.geteuid() gid = gid and int(gid) or os.getegid() working_directory = working_directory or os.getcwd() context = DaemonContext(chroot_directory=chroot, working_directory=working_directory, umask=umask, pidfile=pidlock, uid=uid, gid=gid) context.open() discovery.autodiscover() def run_worker(): worker = WorkController(concurrency=concurrency, loglevel=loglevel, logfile=logfile, is_detached=detach) try: worker.start() except Exception, e: emergency_error(logfile, "celeryd raised exception %s: %s\n%s" % ( e.__class__, e, traceback.format_exc()))