Exemplo n.º 1
0
    def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None,
                                colorize=None, hostname=None, **kwargs):
        if self.already_setup:
            return
        if logfile and hostname:
            logfile = node_format(logfile, hostname)
        self.already_setup = True
        loglevel = mlevel(loglevel or self.loglevel)
        format = format or self.format
        colorize = self.supports_color(colorize, logfile)
        reset_multiprocessing_logger()
        receivers = signals.setup_logging.send(
            sender=None, loglevel=loglevel, logfile=logfile,
            format=format, colorize=colorize,
        )

        if not receivers:
            root = logging.getLogger()

            if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER:
                root.handlers = []
                get_logger('celery').handlers = []
                get_logger('celery.task').handlers = []
                get_logger('celery.redirected').handlers = []

            # Configure root logger
            self._configure_logger(
                root, logfile, loglevel, format, colorize, **kwargs
            )

            # Configure the multiprocessing logger
            self._configure_logger(
                get_multiprocessing_logger(),
                logfile, loglevel if MP_LOG else logging.ERROR,
                format, colorize, **kwargs
            )

            signals.after_setup_logger.send(
                sender=None, logger=root,
                loglevel=loglevel, logfile=logfile,
                format=format, colorize=colorize,
            )

            # then setup the root task logger.
            self.setup_task_loggers(loglevel, logfile, colorize=colorize)

        try:
            stream = logging.getLogger().handlers[0].stream
        except (AttributeError, IndexError):
            pass
        else:
            set_default_encoding_file(stream)

        # This is a hack for multiprocessing's fork+exec, so that
        # logging before Process.run works.
        logfile_name = logfile if isinstance(logfile, string_t) else ''
        os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel),
                          _MP_FORK_LOGFILE_=logfile_name,
                          _MP_FORK_LOGFORMAT_=format)
        return receivers
Exemplo n.º 2
0
    def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None,
                                colorize=None, hostname=None, **kwargs):
        if self.already_setup:
            return
        if logfile and hostname:
            logfile = node_format(logfile, hostname)
        self.already_setup = True
        loglevel = mlevel(loglevel or self.loglevel)
        format = format or self.format
        colorize = self.supports_color(colorize, logfile)
        reset_multiprocessing_logger()
        receivers = signals.setup_logging.send(
            sender=None, loglevel=loglevel, logfile=logfile,
            format=format, colorize=colorize,
        )

        if not receivers:
            root = logging.getLogger()

            if self.app.conf.worker_hijack_root_logger:
                root.handlers = []
                get_logger('celery').handlers = []
                get_logger('celery.task').handlers = []
                get_logger('celery.redirected').handlers = []

            # Configure root logger
            self._configure_logger(
                root, logfile, loglevel, format, colorize, **kwargs
            )

            # Configure the multiprocessing logger
            self._configure_logger(
                get_multiprocessing_logger(),
                logfile, loglevel if MP_LOG else logging.ERROR,
                format, colorize, **kwargs
            )

            signals.after_setup_logger.send(
                sender=None, logger=root,
                loglevel=loglevel, logfile=logfile,
                format=format, colorize=colorize,
            )

            # then setup the root task logger.
            self.setup_task_loggers(loglevel, logfile, colorize=colorize)

        try:
            stream = logging.getLogger().handlers[0].stream
        except (AttributeError, IndexError):
            pass
        else:
            set_default_encoding_file(stream)

        # This is a hack for multiprocessing's fork+exec, so that
        # logging before Process.run works.
        logfile_name = logfile if isinstance(logfile, string_t) else ''
        os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel),
                          _MP_FORK_LOGFILE_=logfile_name,
                          _MP_FORK_LOGFORMAT_=format)
        return receivers
Exemplo n.º 3
0
def detach(path,
           argv,
           logfile=None,
           pidfile=None,
           uid=None,
           gid=None,
           umask=None,
           working_directory=None,
           fake=False,
           app=None,
           executable=None,
           hostname=None):
    hostname = default_nodename(hostname)
    logfile = node_format(logfile, hostname)
    pidfile = node_format(pidfile, hostname)
    fake = 1 if C_FAKEFORK else fake
    with detached(logfile,
                  pidfile,
                  uid,
                  gid,
                  umask,
                  working_directory,
                  fake,
                  after_forkers=False):
        try:
            if executable is not None:
                path = executable
            os.execv(path, [path] + argv)
        except Exception:
            if app is None:
                from celery import current_app
                app = current_app
            app.log.setup_logging_subsystem('ERROR',
                                            logfile,
                                            hostname=hostname)
            logger.critical("Can't exec %r",
                            ' '.join([path] + argv),
                            exc_info=True)
        return EX_FAILURE
Exemplo n.º 4
0
def detach(path, argv, logfile=None, pidfile=None, uid=None,
           gid=None, umask=None, working_directory=None, fake=False, app=None,
           executable=None, hostname=None):
    hostname = default_nodename(hostname)
    logfile = node_format(logfile, hostname)
    pidfile = node_format(pidfile, hostname)
    fake = 1 if C_FAKEFORK else fake
    with detached(logfile, pidfile, uid, gid, umask, working_directory, fake,
                  after_forkers=False):
        try:
            if executable is not None:
                path = executable
            os.execv(path, [path] + argv)
        except Exception:
            if app is None:
                from celery import current_app
                app = current_app
            app.log.setup_logging_subsystem(
                'ERROR', logfile, hostname=hostname)
            logger.critical("Can't exec %r", ' '.join([path] + argv),
                            exc_info=True)
        return EX_FAILURE
Exemplo n.º 5
0
 def create_pool(self, queue, pool_cls=None, loglevel=None, logfile=None, pidfile=None, state_db=None):
     kwargs = {
         'autoscale': '200,1',
         'queues': queue,
     }
     pool_cls = concurrency.get_implementation(pool_cls) or self.app.conf.CELERYD_POOL
     hostname = '%s::%s' % (self.hostname, queue)
     pool = self.app.Worker(
         hostname=hostname,
         pool_cls=pool_cls,
         loglevel=loglevel,
         logfile=logfile,
         pidfile=node_format(pidfile, hostname),
         state_db=node_format(state_db, hostname),
         without_mingle=False,
         **kwargs
     )
     start_daemon_thread(pool.start)
     if self.check_pool_start(hostname):
         return {'name': hostname, 'pool': pool}
     else:
         raise CouldNotStartException('Work Pool')
Exemplo n.º 6
0
 def node_format(self, s, nodename, **extra):
     return node_format(s, nodename, **extra)
Exemplo n.º 7
0
 def node_format(self, s, nodename, **extra):
     return node_format(s, nodename, **extra)