Example #1
0
 def startup_info(self):
     return STARTUP_INFO_FMT % {
         "conninfo": info.format_broker_info(),
         "logfile": self.logfile or "@stderr",
         "loglevel": conf.LOG_LEVELS[self.loglevel],
         "schedule": self.schedule,
     }
Example #2
0
def run_monitor(loglevel=conf.CELERYMON_LOG_LEVEL,
        logfile=conf.CELERYMON_LOG_FILE, http_port=8989, **kwargs):
    """Starts the celery monitor."""

    print("celerymon %s is starting." % celery.__version__)

    # Setup logging
    if not isinstance(loglevel, int):
        loglevel = conf.LOG_LEVELS[loglevel.upper()]

    # Dump configuration to screen so we have some basic information
    # when users sends e-mails.
    print(STARTUP_INFO_FMT % {
            "http_port": http_port,
            "conninfo": info.format_broker_info(),
    })

    from celery.log import setup_logger, redirect_stdouts_to_logger
    print("celerymon has started.")
    arg_start = "manage" in sys.argv[0] and 2 or 1
    platform.set_process_title("celerymon",
                               info=" ".join(sys.argv[arg_start:]))

    def _run_monitor():
        logger = setup_logger(loglevel, logfile)
        monitor = MonitorService(logger=logger, http_port=http_port)
        try:
            monitor.start()
        except Exception, e:
            emergency_error(logfile,
                    "celerymon raised exception %s: %s\n%s" % (
                            e.__class__, e, traceback.format_exc()))
        while(True):
            pass
 def startup_info(self):
     return STARTUP_INFO_FMT % {
         "conninfo": info.format_broker_info(),
         "logfile": self.logfile or "@stderr",
         "loglevel": conf.LOG_LEVELS[self.loglevel],
         "schedule": self.schedule,
     }
Example #4
0
 def connect(self, conn=None):
     if conn:
         conn.close()
     self.say("-> connecting to %s." % info.format_broker_info())
     conn = establish_connection()
     conn.connect()
     self.say("-> connected.")
     return conn
Example #5
0
 def startup_info(self, beat):
     return STARTUP_INFO_FMT % {
         "conninfo": info.format_broker_info(),
         "logfile": self.logfile or "[stderr]",
         "loglevel": LOG_LEVELS[self.loglevel],
         "loader": get_full_cls_name(self.loader.__class__),
         "scheduler": get_full_cls_name(beat.scheduler.__class__),
         "scheduler_info": beat.scheduler.info,
         "hmax_interval": humanize_seconds(beat.max_interval),
         "max_interval": beat.max_interval,
     }
Example #6
0
    def startup_info(self):
        tasklist = ""
        if self.loglevel <= logging.INFO:
            include_builtins = self.loglevel <= logging.DEBUG
            tasklist = self.tasklist(include_builtins=include_builtins)

        return STARTUP_INFO_FMT % {
            "conninfo": info.format_broker_info(),
            "queues": info.format_routing_table(indent=8),
            "concurrency": self.concurrency,
            "loglevel": conf.LOG_LEVELS[self.loglevel],
            "logfile": self.logfile or "[stderr]",
            "celerybeat": self.run_clockservice and "ON" or "OFF",
            "events": self.events and "ON" or "OFF",
            "tasks": tasklist,
            "loader": get_full_cls_name(self.loader.__class__),
        }
Example #7
0
def run_clockservice(loglevel=conf.CELERYBEAT_LOG_LEVEL,
        logfile=conf.CELERYBEAT_LOG_FILE,
        schedule=conf.CELERYBEAT_SCHEDULE_FILENAME, **kwargs):
    """Starts the celerybeat clock server."""

    print("celerybeat %s is starting." % celery.__version__)

    # Setup logging
    if not isinstance(loglevel, int):
        loglevel = conf.LOG_LEVELS[loglevel.upper()]

    # Run the worker init handler.
    # (Usually imports task modules and such.)
    from celery.loaders import current_loader
    current_loader().init_worker()


    # Dump configuration to screen so we have some basic information
    # when users sends e-mails.

    print(STARTUP_INFO_FMT % {
            "conninfo": info.format_broker_info(),
            "logfile": logfile or "@stderr",
            "loglevel": conf.LOG_LEVELS[loglevel],
            "schedule": schedule,
    })

    print("celerybeat has started.")
    arg_start = "manage" in sys.argv[0] and 2 or 1
    platform.set_process_title("celerybeat",
                               info=" ".join(sys.argv[arg_start:]))

    def _run_clock():
        from celery.log import setup_logger
        logger = setup_logger(loglevel, logfile)
        clockservice = ClockService(logger=logger, schedule_filename=schedule)

        try:
            install_sync_handler(clockservice)
            clockservice.start()
        except Exception, e:
            emergency_error(logfile,
                    "celerybeat raised exception %s: %s\n%s" % (
                            e.__class__, e, traceback.format_exc()))
Example #8
0
    def startup_info(self):
        tasklist = ""
        if self.loglevel <= logging.INFO:
            include_builtins = self.loglevel <= logging.DEBUG
            tasklist = self.tasklist(include_builtins=include_builtins)

        queues = self.defaults.get_queues()

        return STARTUP_INFO_FMT % {
            "conninfo": info.format_broker_info(),
            "queues": info.format_queues(queues, indent=8),
            "concurrency": self.concurrency,
            "loglevel": LOG_LEVELS[self.loglevel],
            "logfile": self.logfile or "[stderr]",
            "celerybeat": self.run_clockservice and "ON" or "OFF",
            "events": self.events and "ON" or "OFF",
            "tasks": tasklist,
            "loader": get_full_cls_name(self.loader.__class__),
        }
Example #9
0
def run_worker(concurrency=conf.CELERYD_CONCURRENCY,
        loglevel=conf.CELERYD_LOG_LEVEL, logfile=conf.CELERYD_LOG_FILE,
        hostname=None,
        discard=False, run_clockservice=False, events=False, **kwargs):
    """Starts the celery worker server."""

    hostname = hostname or socket.gethostname()

    print("celery@%s v%s is starting." % (hostname, celery.__version__))

    from celery.loaders import current_loader, load_settings
    loader = current_loader()
    settings = load_settings()

    if not concurrency:
        concurrency = multiprocessing.cpu_count()

    if conf.CELERY_BACKEND == "database" \
            and settings.DATABASE_ENGINE == "sqlite3" and \
            concurrency > 1:
        import warnings
        warnings.warn("The sqlite3 database engine doesn't support "
                "concurrency. We'll be using a single process only.",
                UserWarning)
        concurrency = 1

    # Setup logging
    if not isinstance(loglevel, int):
        loglevel = conf.LOG_LEVELS[loglevel.upper()]

    if discard:
        discarded_count = discard_all()
        what = discarded_count > 1 and "messages" or "message"
        print("discard: Erased %d %s from the queue.\n" % (
                discarded_count, what))

    # Run the worker init handler.
    # (Usually imports task modules and such.)
    loader.on_worker_init()

    # Dump configuration to screen so we have some basic information
    # when users sends e-mails.

    tasklist = ""
    if loglevel <= logging.INFO:
        from celery.registry import tasks
        tasklist = tasks.keys()
        if not loglevel <= logging.DEBUG:
            tasklist = filter(lambda s: not s.startswith("celery."), tasklist)
        tasklist = TASK_LIST_FMT % "\n".join("        . %s" % task
                                                for task in sorted(tasklist))

    print(STARTUP_INFO_FMT % {
            "conninfo": info.format_broker_info(),
            "queues": info.format_routing_table(indent=8),
            "concurrency": concurrency,
            "loglevel": conf.LOG_LEVELS[loglevel],
            "logfile": logfile or "[stderr]",
            "celerybeat": run_clockservice and "ON" or "OFF",
            "events": events and "ON" or "OFF",
            "tasks": tasklist,
            "loader": loader.__class__.__module__,
    })

    print("Celery has started.")
    set_process_status("Running...")

    def run_worker():
        worker = WorkController(concurrency=concurrency,
                                loglevel=loglevel,
                                logfile=logfile,
                                hostname=hostname,
                                embed_clockservice=run_clockservice,
                                send_events=events)

        # Install signal handler so SIGHUP restarts the worker.
        install_worker_restart_handler(worker)

        from celery import signals
        signals.worker_init.send(sender=worker)

        try:
            worker.start()
        except Exception, e:
            emergency_error(logfile, "celeryd raised exception %s: %s\n%s" % (
                            e.__class__, e, traceback.format_exc()))
Example #10
0
 def test_broker_info(self):
     info.format_broker_info()