Exemplo n.º 1
0
def run_clockservice(detach=False, loglevel=conf.CELERYBEAT_LOG_LEVEL,
        logfile=conf.CELERYBEAT_LOG_FILE, pidfile=conf.CELERYBEAT_PID_FILE,
        umask=0, uid=None, gid=None, working_directory=None, chroot=None,
        schedule=conf.CELERYBEAT_SCHEDULE_FILENAME, **kwargs):
    """Starts the celerybeat clock server."""

    print("celerybeat %s is starting." % __version__)

    # Setup logging
    if not isinstance(loglevel, int):
        loglevel = conf.LOG_LEVELS[loglevel.upper()]
    if not detach:
        logfile = None # log to stderr when not running in the background.

    # Dump configuration to screen so we have some basic information
    # when users sends e-mails.

    print(STARTUP_INFO_FMT % {
            "conninfo": get_connection_info(),
            "exchange": conf.AMQP_EXCHANGE,
            "exchange_type": conf.AMQP_EXCHANGE_TYPE,
            "consumer_queue": conf.AMQP_CONSUMER_QUEUE,
            "consumer_rkey": conf.AMQP_CONSUMER_ROUTING_KEY,
            "publisher_rkey": conf.AMQP_PUBLISHER_ROUTING_KEY,
            "loglevel": loglevel,
            "pidfile": pidfile,
            "schedule": schedule,
    })

    print("celerybeat has started.")
    arg_start = "manage" in sys.argv[0] and 2 or 1
    platform.set_process_title("celerybeat",
                               info=" ".join(sys.argv[arg_start:]))
    from celery.log import setup_logger, redirect_stdouts_to_logger
    on_stop = noop
    if detach:
        context, on_stop = platform.create_daemon_context(logfile, pidfile,
                                        chroot_directory=chroot,
                                        working_directory=working_directory,
                                        umask=umask)
        context.open()
        logger = setup_logger(loglevel, logfile)
        redirect_stdouts_to_logger(logger, loglevel)
        platform.set_effective_user(uid, gid)

    # Run the worker init handler.
    # (Usually imports task modules and such.)
    current_loader.on_worker_init()

    def _run_clock():
        logger = setup_logger(loglevel, logfile)
        clockservice = ClockService(logger=logger, is_detached=detach,
                                    schedule_filename=schedule)

        try:
            clockservice.start()
        except Exception, e:
            emergency_error(logfile,
                    "celerybeat raised exception %s: %s\n%s" % (
                            e.__class__, e, traceback.format_exc()))
Exemplo n.º 2
0
 def redirect_stdouts_to_logger(self):
     from celery import log
     handled = log.setup_logging_subsystem(loglevel=self.loglevel,
                                           logfile=self.logfile)
     # Redirect stdout/stderr to our logger.
     if not handled:
         logger = log.get_default_logger()
         log.redirect_stdouts_to_logger(logger, loglevel=logging.WARNING)
Exemplo n.º 3
0
    def setup_logging(self):
        from celery import log

        handled = log.setup_logging_subsystem(loglevel=self.loglevel, logfile=self.logfile)
        logger = log.get_default_logger(name="celery.beat")
        if self.redirect_stdouts and not handled:
            log.redirect_stdouts_to_logger(logger, loglevel=self.redirect_stdouts_level)
        return logger
Exemplo n.º 4
0
 def test_redirect_stdouts(self):
     logger = self.setup_logger(loglevel=logging.ERROR, logfile=None, root=False)
     try:
         with wrap_logger(logger) as sio:
             redirect_stdouts_to_logger(logger, loglevel=logging.ERROR)
             logger.error("foo")
             self.assertIn("foo", sio.getvalue())
     finally:
         sys.stdout, sys.stderr = sys.__stdout__, sys.__stderr__
Exemplo n.º 5
0
 def redirect_stdouts_to_logger(self):
     from celery import log
     handled = log.setup_logging_subsystem(loglevel=self.loglevel,
                                           logfile=self.logfile)
     # Redirect stdout/stderr to our logger.
     if not handled:
         logger = log.get_default_logger()
         if self.redirect_stdouts:
             log.redirect_stdouts_to_logger(
                 logger, loglevel=self.redirect_stdouts_level)
Exemplo n.º 6
0
 def test_redirect_stdouts(self):
     logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,
                                root=False)
     try:
         with wrap_logger(logger) as sio:
             redirect_stdouts_to_logger(logger, loglevel=logging.ERROR)
             logger.error("foo")
             self.assertIn("foo", sio.getvalue())
     finally:
         sys.stdout, sys.stderr = sys.__stdout__, sys.__stderr__
Exemplo n.º 7
0
def setup_log(**args):
    logbook.SyslogHandler().push_application()
    logbook.StreamHandler(sys.stderr, bubble=True).push_application()
    redirect_stdouts_to_logger(args['logger'])  # logs to local syslog
    if os.path.exists('/dev/log'):
        h = logging.handlers.SysLogHandler('/dev/log')
    else:
        h = logging.handlers.SysLogHandler()
    h.setLevel(args['loglevel'])
    formatter = logging.Formatter(logging.BASIC_FORMAT)
    h.setFormatter(formatter)
    args['logger'].addHandler(h)
Exemplo n.º 8
0
def setup_log(**args):
    logbook.SyslogHandler().push_application()
    logbook.StreamHandler(sys.stderr, bubble=True).push_application()
    redirect_stdouts_to_logger(args['logger']) # logs to local syslog
    if os.path.exists('/dev/log'):
        h = logging.handlers.SysLogHandler('/dev/log')
    else:
        h = logging.handlers.SysLogHandler()
    h.setLevel(args['loglevel'])
    formatter = logging.Formatter(logging.BASIC_FORMAT)
    h.setFormatter(formatter)
    args['logger'].addHandler(h)
Exemplo n.º 9
0
def setup_log(**args):
    # redirect stdout and stderr to logger
    redirect_stdouts_to_logger(args['logger'])
    # logs to local syslog
    hl = SysLogHandler('/dev/log',
                       facility=SysLogHandler.facility_names['syslog'])
    # setting log level
    hl.setLevel(args['loglevel'])
    # setting log format
    formatter = Formatter(frontend_config.log.prefix + BASIC_FORMAT)
    hl.setFormatter(formatter)
    # add new handler to logger
    args['logger'].addHandler(hl)
Exemplo n.º 10
0
def setup_log(**args): 
    # redirect stdout and stderr to logger
    redirect_stdouts_to_logger(args['logger'])
    # logs to local syslog
    #syslog = SysLogHandler(address=settings.SYSLOG_FILE, facility=logging.handlers.SysLogHandler.LOG_LOCAL3)
    syslog = SysLogHandler(address=settings.SYSLOG_FILE, facility=logging.handlers.SysLogHandler.LOG_LOCAL3)
    # setting log level
    syslog.setLevel(args['loglevel'])
    # setting log format 
    formatter = logging.Formatter('dbaas: #celery %(name)s %(message)s')
    syslog.setFormatter(formatter)
    # add new handler to logger
    args['logger'].addHandler(syslog)
Exemplo n.º 11
0
def setup_log(**args):
    # redirect stdout and stderr to logger
    redirect_stdouts_to_logger(args['logger'])
    # logs to local syslog
    hl = SysLogHandler('/dev/log',
                       facility=SysLogHandler.facility_names['syslog'])
    # setting log level
    hl.setLevel(args['loglevel'])
    # setting log format
    formatter = Formatter(probe_config.log.prefix + BASIC_FORMAT)
    hl.setFormatter(formatter)
    # add new handler to logger
    args['logger'].addHandler(hl)
Exemplo n.º 12
0
def setup_log(**args):
    # redirect stdout and stderr to logger
    redirect_stdouts_to_logger(args['logger'])
    # logs to local syslog
    #syslog = SysLogHandler(address=settings.SYSLOG_FILE, facility=logging.handlers.SysLogHandler.LOG_LOCAL3)
    syslog = SysLogHandler(address=settings.SYSLOG_FILE,
                           facility=logging.handlers.SysLogHandler.LOG_LOCAL3)
    # setting log level
    syslog.setLevel(args['loglevel'])
    # setting log format
    formatter = logging.Formatter('dbaas: #celery %(name)s %(message)s')
    syslog.setFormatter(formatter)
    # add new handler to logger
    args['logger'].addHandler(syslog)
Exemplo n.º 13
0
def init_logs_stdout(level):
    """Load logging config, and set log levels based on flags"""

    logging_level = LEVELS.get(level, 'warn')

    # Add the logging handler to the root logger.  This will be a file or
    # stdout depending on the presence of the logfile parameter.
    #
    # Note that what we are doing here is just a simplified version of what the
    # standard logging.basicConfig is doing.  An important difference is that
    # we add our handler every time init_logs() is called, whereas basicConfig
    # does nothing if there is at least one handler (any handler) present.
    # This allows us to call init_logs multiple times during the unittest, to
    # reinstall our handler after nose (actually its logcapture plugin) throws
    # it away.
    found = False
    for hdlr in LOG.handlers:
        if (isinstance(hdlr, logging.FileHandler)
            or isinstance(hdlr, logging.StreamHandler)):
            found = True

    if not found:
        filename = FLAGS.get('logfile', '')
        if filename:
            hdlr = logging.FileHandler(filename, 'a')
        else:
            hdlr = logging.StreamHandler()

        hdlr.setFormatter(
            logging.Formatter(LOGGING_STDOUT_FORMAT, None))
        LOG.addHandler(hdlr)

    LOG.setLevel(logging_level)
    RISK_LOG.setLevel(logging_level)
    HAZARD_LOG.setLevel(logging_level)

    # capture java logging (this is what celeryd does with the workers, we use
    # exactly the same system for bin/openquakes and the likes)
    if not isinstance(sys.stdout, LoggingProxy):
        redirect_stdouts_to_logger(LOG)
Exemplo n.º 14
0
 def redirect_stdouts_to_logger(self):
     from celery import log
     # Redirect stdout/stderr to our logger.
     logger = log.setup_logger(loglevel=self.loglevel, logfile=self.logfile)
     log.redirect_stdouts_to_logger(logger, loglevel=logging.WARNING)
Exemplo n.º 15
0
def run_worker(concurrency=conf.DAEMON_CONCURRENCY, detach=False,
        loglevel=conf.DAEMON_LOG_LEVEL, logfile=conf.DAEMON_LOG_FILE,
        discard=False, pidfile=conf.DAEMON_PID_FILE, umask=0,
        uid=None, gid=None, working_directory=None,
        chroot=None, statistics=None, run_clockservice=False, **kwargs):
    """Starts the celery worker server."""

    print("Celery %s is starting." % __version__)

    if statistics is not None:
        settings.CELERY_STATISTICS = statistics

    if not concurrency:
        concurrency = multiprocessing.cpu_count()

    if conf.CELERY_BACKEND == "database" \
            and settings.DATABASE_ENGINE == "sqlite3" and \
            concurrency > 1:
        import warnings
        warnings.warn("The sqlite3 database engine doesn't support "
                "concurrency. We'll be using a single process only.",
                UserWarning)
        concurrency = 1

    # Setup logging
    if not isinstance(loglevel, int):
        loglevel = conf.LOG_LEVELS[loglevel.upper()]
    if not detach:
        logfile = None # log to stderr when not running in the background.

    if discard:
        discarded_count = discard_all()
        what = discarded_count > 1 and "messages" or "message"
        print("discard: Erased %d %s from the queue.\n" % (
                discarded_count, what))

    # Dump configuration to screen so we have some basic information
    # when users sends e-mails.

    print(STARTUP_INFO_FMT % {
            "conninfo": get_connection_info(),
            "exchange": conf.AMQP_EXCHANGE,
            "exchange_type": conf.AMQP_EXCHANGE_TYPE,
            "consumer_queue": conf.AMQP_CONSUMER_QUEUE,
            "consumer_rkey": conf.AMQP_CONSUMER_ROUTING_KEY,
            "publisher_rkey": conf.AMQP_PUBLISHER_ROUTING_KEY,
            "concurrency": concurrency,
            "loglevel": loglevel,
            "pidfile": pidfile,
            "statistics": settings.CELERY_STATISTICS and "ON" or "OFF",
            "celerybeat": run_clockservice and "ON" or "OFF",
    })

    print("Celery has started.")
    if detach:
        from celery.log import setup_logger, redirect_stdouts_to_logger
        context = platform.create_daemon_context(logfile, pidfile,
                                        chroot_directory=chroot,
                                        working_directory=working_directory,
                                        umask=umask,
                                        uid=uid,
                                        gid=gid)
        context.open()
        logger = setup_logger(loglevel, logfile)
        redirect_stdouts_to_logger(logger, loglevel)

    # Run the worker init handler.
    # (Usually imports task modules and such.)
    current_loader.on_worker_init()

    def run_worker():
        worker = WorkController(concurrency=concurrency,
                                loglevel=loglevel,
                                logfile=logfile,
                                embed_clockservice=run_clockservice,
                                is_detached=detach)

        # Install signal handler that restarts celeryd on SIGHUP,
        # (only on POSIX systems)
        install_worker_restart_handler(worker)

        try:
            worker.start()
        except Exception, e:
            emergency_error(logfile, "celeryd raised exception %s: %s\n%s" % (
                            e.__class__, e, traceback.format_exc()))
Exemplo n.º 16
0
 def with_wrap_logger(sio):
     redirect_stdouts_to_logger(logger, loglevel=logging.ERROR)
     logger.error("foo")
     self.assertIn("foo", sio.getvalue())
Exemplo n.º 17
0
def run_worker(concurrency=DAEMON_CONCURRENCY, detach=False,
        loglevel=DAEMON_LOG_LEVEL, logfile=DAEMON_LOG_FILE, discard=False,
        pidfile=DAEMON_PID_FILE, umask=0, uid=None, gid=None,
        supervised=False, working_directory=None, chroot=None,
        statistics=None, **kwargs):
    """Starts the celery worker server."""

    # set SIGCLD back to the default SIG_DFL (before python-daemon overrode
    # it) lets the parent wait() for the terminated child process and stops
    # the 'OSError: [Errno 10] No child processes' problem.

    if hasattr(signal, "SIGCLD"): # Make sure the platform supports signals.
        signal.signal(signal.SIGCLD, signal.SIG_DFL)

    print("Celery %s is starting." % __version__)

    if statistics is not None:
        settings.CELERY_STATISTICS = statistics

    if not concurrency:
        concurrency = multiprocessing.cpu_count()

    if conf.CELERY_BACKEND == "database" \
            and settings.DATABASE_ENGINE == "sqlite3" and \
            concurrency > 1:
        import warnings
        warnings.warn("The sqlite3 database engine doesn't support "
                "concurrency. We'll be using a single process only.",
                UserWarning)
        concurrency = 1

    # Setup logging
    if not isinstance(loglevel, int):
        loglevel = LOG_LEVELS[loglevel.upper()]
    if not detach:
        logfile = None # log to stderr when not running in the background.

    if discard:
        discarded_count = discard_all()
        what = discarded_count > 1 and "messages" or "message"
        print("discard: Erased %d %s from the queue.\n" % (
                discarded_count, what))

    # Dump configuration to screen so we have some basic information
    # when users sends e-mails.
    print(STARTUP_INFO_FMT % {
            "vhost": getattr(settings, "AMQP_VHOST", "(default)"),
            "host": getattr(settings, "AMQP_SERVER", "(default)"),
            "port": getattr(settings, "AMQP_PORT", "(default)"),
            "exchange": conf.AMQP_EXCHANGE,
            "exchange_type": conf.AMQP_EXCHANGE_TYPE,
            "consumer_queue": conf.AMQP_CONSUMER_QUEUE,
            "consumer_rkey": conf.AMQP_CONSUMER_ROUTING_KEY,
            "publisher_rkey": conf.AMQP_PUBLISHER_ROUTING_KEY,
            "concurrency": concurrency,
            "loglevel": loglevel,
            "pidfile": pidfile,
            "statistics": settings.CELERY_STATISTICS and "ON" or "OFF",
    })

    print("Celery has started.")
    if detach:
        if not CAN_DETACH:
            raise RuntimeError(
                    "This operating system doesn't support detach. ")
        from daemon import DaemonContext
        from celery.log import setup_logger, redirect_stdouts_to_logger

        # Since without stderr any errors will be silently suppressed,
        # we need to know that we have access to the logfile
        if logfile:
            open(logfile, "a").close()

        pidlock = acquire_pidlock(pidfile)
        if umask is None:
            umask = 0
        if uid is None:
            uid = os.geteuid()
        if gid is None:
            gid = os.getegid()
        working_directory = working_directory or os.getcwd()
        context = DaemonContext(chroot_directory=chroot,
                                working_directory=working_directory,
                                umask=umask,
                                pidfile=pidlock,
                                uid=uid,
                                gid=gid)
        context.open()
        logger = setup_logger(loglevel, logfile)
        redirect_stdouts_to_logger(logger, loglevel)

    # Run the worker init handler.
    # (Usually imports task modules and such.)
    current_loader.on_worker_init()

    def run_worker():
        worker = WorkController(concurrency=concurrency,
                                loglevel=loglevel,
                                logfile=logfile,
                                is_detached=detach)

        # Install signal handler that restarts celeryd on SIGHUP,
        # (only on POSIX systems)
        install_restart_signal_handler(worker)

        try:
            worker.start()
        except Exception, e:
            emergency_error(logfile, "celeryd raised exception %s: %s\n%s" % (
                            e.__class__, e, traceback.format_exc()))
Exemplo n.º 18
0
 def redirect_stdouts_to_logger(self):
     from celery import log
     # Redirect stdout/stderr to our logger.
     logger = log.setup_logger(loglevel=self.loglevel,
                               logfile=self.logfile)
     log.redirect_stdouts_to_logger(logger, loglevel=logging.WARNING)
Exemplo n.º 19
0
 def with_wrap_logger(sio):
     redirect_stdouts_to_logger(logger, loglevel=logging.ERROR)
     logger.error("foo")
     self.assertIn("foo", sio.getvalue())