Example #1
0
    def run(self, hostname=None, pool_cls=None, app=None, uid=None, gid=None,
            loglevel=None, logfile=None, pidfile=None, statedb=None,
            **kwargs):
        maybe_drop_privileges(uid=uid, gid=gid)
        # Pools like eventlet/gevent needs to patch libs as early
        # as possible.
        pool_cls = (concurrency.get_implementation(pool_cls) or
                    self.app.conf.worker_pool)
        if self.app.IS_WINDOWS and kwargs.get('beat'):
            self.die('-B option does not work on Windows.  '
                     'Please run celery beat as a separate service.')
        hostname = self.host_format(default_nodename(hostname))
        if loglevel:
            try:
                loglevel = mlevel(loglevel)
            except KeyError:  # pragma: no cover
                self.die('Unknown level {0!r}.  Please use one of {1}.'.format(
                    loglevel, '|'.join(
                        l for l in LOG_LEVELS if isinstance(l, string_t))))

        worker = self.app.Worker(
            hostname=hostname, pool_cls=pool_cls, loglevel=loglevel,
            logfile=logfile,  # node format handled by celery.app.log.setup
            pidfile=self.node_format(pidfile, hostname),
            statedb=self.node_format(statedb, hostname), **kwargs
        )
        worker.start()
        return worker.exitcode
Example #2
0
    def setup_instance(self, queues=None, ready_callback=None,
            pidfile=None, include=None, **kwargs):
        self.pidfile = pidfile
        self.app.loader.init_worker()
        self.setup_defaults(kwargs, namespace='celeryd')
        self.setup_queues(queues)
        self.setup_includes(include)

        # Set default concurrency
        if not self.concurrency:
            try:
                self.concurrency = cpu_count()
            except NotImplementedError:
                self.concurrency = 2

        # Options
        self.loglevel = mlevel(self.loglevel)
        self.ready_callback = ready_callback or self.on_consumer_ready
        self.use_eventloop = self.should_use_eventloop()

        signals.worker_init.send(sender=self)

        # Initialize boot steps
        self.pool_cls = _concurrency.get_implementation(self.pool_cls)
        self.components = []
        self.namespace = Namespace(app=self.app).apply(self, **kwargs)
Example #3
0
    def setup_instance(
        self, queues=None, ready_callback=None, pidfile=None, include=None, use_eventloop=None, **kwargs
    ):
        self.pidfile = pidfile
        self.setup_queues(queues)
        self.setup_includes(include)

        # Set default concurrency
        if not self.concurrency:
            try:
                self.concurrency = cpu_count()
            except NotImplementedError:
                self.concurrency = 2

        # Options
        self.loglevel = mlevel(self.loglevel)
        self.ready_callback = ready_callback or self.on_consumer_ready

        # this connection is not established, only used for params
        self._conninfo = self.app.connection()
        self.use_eventloop = self.should_use_eventloop() if use_eventloop is None else use_eventloop
        self.options = kwargs

        signals.worker_init.send(sender=self)

        # Initialize bootsteps
        self.pool_cls = _concurrency.get_implementation(self.pool_cls)
        self.steps = []
        self.on_init_namespace()
        self.namespace = self.Namespace(
            app=self.app, on_start=self.on_start, on_close=self.on_close, on_stopped=self.on_stopped
        )
        self.namespace.apply(self, **kwargs)
Example #4
0
    def setup_task_loggers(self, loglevel=None, logfile=None, format=None,
                           colorize=None, propagate=False, **kwargs):
        """Setup the task logger.

        If `logfile` is not specified, then `sys.stderr` is used.

        Returns logger object.

        """
        loglevel = mlevel(loglevel or self.loglevel)
        format = format or self.task_format
        colorize = self.supports_color(colorize, logfile)

        logger = self.setup_handlers(
            get_logger('celery.task'),
            logfile, format, colorize,
            formatter=TaskFormatter, **kwargs
        )
        logger.setLevel(loglevel)
        logger.propagate = int(propagate)    # this is an int for some reason.
                                             # better to not question why.
        signals.after_setup_task_logger.send(
            sender=None, logger=logger,
            loglevel=loglevel, logfile=logfile,
            format=format, colorize=colorize,
        )
        return logger
Example #5
0
File: log.py Project: n1ywb/celery
    def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None, colorize=None, **kwargs):
        if Logging._setup:
            return
        Logging._setup = True
        loglevel = mlevel(loglevel or self.loglevel)
        format = format or self.format
        colorize = self.supports_color(colorize, logfile)
        reset_multiprocessing_logger()
        if not is_py3k:
            ensure_process_aware_logger()
        receivers = signals.setup_logging.send(
            sender=None, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize
        )
        if not receivers:
            root = logging.getLogger()

            if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER:
                root.handlers = []

            for logger in root, get_multiprocessing_logger():
                if logger is not None:
                    self.setup_handlers(logger, logfile, format, colorize, **kwargs)
                    if loglevel:
                        logger.setLevel(loglevel)
                    signals.after_setup_logger.send(
                        sender=None, logger=logger, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize
                    )
            # then setup the root task logger.
            self.setup_task_loggers(loglevel, logfile, colorize=colorize)

        # This is a hack for multiprocessing's fork+exec, so that
        # logging before Process.run works.
        logfile_name = logfile if isinstance(logfile, basestring) else ""
        os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel), _MP_FORK_LOGFILE_=logfile_name, _MP_FORK_LOGFORMAT_=format)
        return receivers
Example #6
0
    def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None,
                                colorize=None, hostname=None, **kwargs):
        if self.already_setup:
            return
        if logfile and hostname:
            logfile = node_format(logfile, hostname)
        Logging._setup = True
        loglevel = mlevel(loglevel or self.loglevel)
        format = format or self.format
        colorize = self.supports_color(colorize, logfile)
        reset_multiprocessing_logger()
        receivers = signals.setup_logging.send(
            sender=None, loglevel=loglevel, logfile=logfile,
            format=format, colorize=colorize,
        )

        if not receivers:
            root = logging.getLogger()

            if self.app.conf.worker_hijack_root_logger:
                root.handlers = []
                get_logger('celery').handlers = []
                get_logger('celery.task').handlers = []
                get_logger('celery.redirected').handlers = []

            # Configure root logger
            self._configure_logger(
                root, logfile, loglevel, format, colorize, **kwargs
            )

            # Configure the multiprocessing logger
            self._configure_logger(
                get_multiprocessing_logger(),
                logfile, loglevel if MP_LOG else logging.ERROR,
                format, colorize, **kwargs
            )

            signals.after_setup_logger.send(
                sender=None, logger=root,
                loglevel=loglevel, logfile=logfile,
                format=format, colorize=colorize,
            )

            # then setup the root task logger.
            self.setup_task_loggers(loglevel, logfile, colorize=colorize)

        try:
            stream = logging.getLogger().handlers[0].stream
        except (AttributeError, IndexError):
            pass
        else:
            set_default_encoding_file(stream)

        # This is a hack for multiprocessing's fork+exec, so that
        # logging before Process.run works.
        logfile_name = logfile if isinstance(logfile, string_t) else ''
        os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel),
                          _MP_FORK_LOGFILE_=logfile_name,
                          _MP_FORK_LOGFORMAT_=format)
        return receivers
Example #7
0
File: log.py Project: axiak/celery
    def get_default_logger(self, loglevel=None, name="celery"):
        """Get default logger instance.

        :keyword loglevel: Initial log level.

        """
        logger = logging.getLogger(name)
        if loglevel is not None:
            logger.setLevel(mlevel(loglevel))
        return logger
    def __init__(
        self,
        hostname=None,
        purge=False,
        beat=False,
        queues=None,
        include=None,
        app=None,
        pidfile=None,
        autoscale=None,
        autoreload=False,
        no_execv=False,
        no_color=None,
        **kwargs
    ):
        self.app = app = app_or_default(app or self.app)
        self.hostname = hostname or socket.gethostname()

        # this signal can be used to set up configuration for
        # workers by name.
        signals.celeryd_init.send(sender=self.hostname, instance=self, conf=self.app.conf)

        self.setup_defaults(kwargs, namespace="celeryd")
        if not self.concurrency:
            try:
                self.concurrency = cpu_count()
            except NotImplementedError:
                self.concurrency = 2
        self.purge = purge
        self.beat = beat
        self.use_queues = [] if queues is None else queues
        self.queues = None
        self.include = include
        self.pidfile = pidfile
        self.autoscale = None
        self.autoreload = autoreload
        self.no_color = no_color
        self.no_execv = no_execv
        if autoscale:
            max_c, _, min_c = autoscale.partition(",")
            self.autoscale = [int(max_c), min_c and int(min_c) or 0]
        self._isatty = isatty(sys.stdout)

        self.colored = app.log.colored(self.logfile, enabled=not no_color if no_color is not None else no_color)

        if isinstance(self.use_queues, basestring):
            self.use_queues = self.use_queues.split(",")
        if self.include:
            if isinstance(self.include, basestring):
                self.include = self.include.split(",")
            app.conf.CELERY_INCLUDE = tuple(app.conf.CELERY_INCLUDE) + tuple(self.include)
        self.loglevel = mlevel(self.loglevel)
Example #9
0
    def setup_logging_subsystem(self, loglevel=None, logfile=None,
                                format=None, colorize=None, **kwargs):
        if Logging._setup:
            return
        Logging._setup = True
        loglevel = mlevel(loglevel or self.loglevel)
        format = format or self.format
        colorize = self.supports_color(colorize, logfile)
        reset_multiprocessing_logger()
        if not PY3:
            ensure_process_aware_logger()
        receivers = signals.setup_logging.send(
            sender=None, loglevel=loglevel, logfile=logfile,
            format=format, colorize=colorize,
        )

        if not receivers:
            root = logging.getLogger()

            if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER:
                root.handlers = []

            # Configure root logger
            self._configure_logger(
                root, logfile, loglevel, format, colorize, **kwargs
            )

            # Configure the multiprocessing logger
            self._configure_logger(
                get_multiprocessing_logger(),
                logfile, loglevel if MP_LOG else logging.ERROR,
                format, colorize, **kwargs
            )

            signals.after_setup_logger.send(
                sender=None, logger=root,
                loglevel=loglevel, logfile=logfile,
                format=format, colorize=colorize,
            )

            # then setup the root task logger.
            self.setup_task_loggers(loglevel, logfile, colorize=colorize)

        # This is a hack for multiprocessing's fork+exec, so that
        # logging before Process.run works.
        logfile_name = logfile if isinstance(logfile, string_t) else ''
        os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel),
                          _MP_FORK_LOGFILE_=logfile_name,
                          _MP_FORK_LOGFORMAT_=format)
        return receivers
Example #10
0
 def setup(self, loglevel=None, logfile=None, redirect_stdouts=False,
           redirect_level='WARNING', colorize=None, hostname=None):
     loglevel = mlevel(loglevel)
     handled = self.setup_logging_subsystem(
         loglevel, logfile, colorize=colorize, hostname=hostname,
     )
     if not handled:
         if redirect_stdouts:
             self.redirect_stdouts(redirect_level)
     os.environ.update(
         CELERY_LOG_LEVEL=str(loglevel) if loglevel else '',
         CELERY_LOG_FILE=str(logfile) if logfile else '',
     )
     return handled
Example #11
0
 def run(self, *args, **kwargs):
     kwargs.pop("app", None)
     # Pools like eventlet/gevent needs to patch libs as early
     # as possible.
     kwargs["pool_cls"] = concurrency.get_implementation(
                 kwargs.get("pool_cls") or self.app.conf.CELERYD_POOL)
     if self.app.IS_WINDOWS and kwargs.get("beat"):
         self.die("-B option does not work on Windows.  "
                  "Please run celerybeat as a separate service.")
     loglevel = kwargs.get("loglevel")
     if loglevel:
         try:
             kwargs["loglevel"] = mlevel(loglevel)
         except KeyError:  # pragma: no cover
             self.die("Unknown level %r. Please use one of %s." % (
                 loglevel, "|".join(l for l in LOG_LEVELS.keys()
                   if isinstance(l, basestring))))
     return self.app.Worker(**kwargs).run()
Example #12
0
    def run(self, hostname=None, pool_cls=None, loglevel=None, app=None, **kwargs):
        # Pools like eventlet/gevent needs to patch libs as early
        # as possible.
        pool_cls = concurrency.get_implementation(pool_cls) or self.app.conf.CELERYD_POOL
        if self.app.IS_WINDOWS and kwargs.get("beat"):
            self.die("-B option does not work on Windows.  " "Please run celery beat as a separate service.")
        hostname = self.simple_format(hostname)
        if loglevel:
            try:
                loglevel = mlevel(loglevel)
            except KeyError:  # pragma: no cover
                self.die(
                    "Unknown level {0!r}. Please use one of {1}.".format(
                        loglevel, "|".join(l for l in LOG_LEVELS if isinstance(l, string_t))
                    )
                )

        return self.app.Worker(hostname=hostname, pool_cls=pool_cls, loglevel=loglevel, **kwargs).start()
Example #13
0
 def run(self, *args, **kwargs):
     kwargs.pop('app', None)
     # Pools like eventlet/gevent needs to patch libs as early
     # as possible.
     kwargs['pool_cls'] = concurrency.get_implementation(
                 kwargs.get('pool_cls') or self.app.conf.CELERYD_POOL)
     if self.app.IS_WINDOWS and kwargs.get('beat'):
         self.die('-B option does not work on Windows.  '
                  'Please run celerybeat as a separate service.')
     loglevel = kwargs.get('loglevel')
     if loglevel:
         try:
             kwargs['loglevel'] = mlevel(loglevel)
         except KeyError:  # pragma: no cover
             self.die('Unknown level {0!r}. Please use one of {1}.'.format(
                 loglevel, '|'.join(l for l in LOG_LEVELS.keys()
                   if isinstance(l, basestring))))
     return self.app.Worker(**kwargs).run()
Example #14
0
    def __init__(self, hostname=None, discard=False, embed_clockservice=False,
            queues=None, include=None, app=None, pidfile=None,
            autoscale=None, autoreload=False, **kwargs):
        self.app = app = app_or_default(app or self.app)
        self.hostname = hostname or socket.gethostname()

        # this signal can be used to set up configuration for
        # workers by name.
        signals.celeryd_init.send(sender=self.hostname, instance=self,
                                  conf=self.app.conf)

        self.setup_defaults(kwargs, namespace="celeryd")
        if not self.concurrency:
            self.concurrency = cpu_count()
        self.discard = discard
        self.embed_clockservice = embed_clockservice
        if self.app.IS_WINDOWS and self.embed_clockservice:
            self.die("-B option does not work on Windows.  "
                     "Please run celerybeat as a separate service.")
        self.use_queues = [] if queues is None else queues
        self.queues = None
        self.include = [] if include is None else include
        self.pidfile = pidfile
        self.autoscale = None
        self.autoreload = autoreload
        if autoscale:
            max_c, _, min_c = autoscale.partition(",")
            self.autoscale = [int(max_c), min_c and int(min_c) or 0]
        self._isatty = isatty(sys.stdout)

        self.colored = app.log.colored(self.logfile)

        if isinstance(self.use_queues, basestring):
            self.use_queues = self.use_queues.split(",")
        if isinstance(self.include, basestring):
            self.include = self.include.split(",")

        try:
            self.loglevel = mlevel(self.loglevel)
        except KeyError:
            self.die("Unknown level %r. Please use one of %s." % (
                        self.loglevel,
                        "|".join(l for l in LOG_LEVELS.keys()
                                    if isinstance(l, basestring))))
Example #15
0
File: log.py Project: axiak/celery
    def setup_logger(self, loglevel=None, logfile=None,
            format=None, colorize=None, name="celery", root=True,
            app=None, **kwargs):
        """Setup the :mod:`multiprocessing` logger.

        If `logfile` is not specified, then `sys.stderr` is used.

        Returns logger object.

        """
        loglevel = mlevel(loglevel or self.loglevel)
        format = format or self.format
        if colorize is None:
            colorize = self.supports_color(logfile)

        if not root or self.app.conf.CELERYD_HIJACK_ROOT_LOGGER:
            return self._setup_logger(self.get_default_logger(loglevel, name),
                                      logfile, format, colorize, **kwargs)
        self.setup_logging_subsystem(loglevel, logfile,
                                     format, colorize, **kwargs)
        return self.get_default_logger(name=name)
Example #16
0
 def setup(self,
           loglevel=None,
           logfile=None,
           redirect_stdouts=False,
           redirect_level='WARNING',
           colorize=None,
           hostname=None):
     loglevel = mlevel(loglevel)
     handled = self.setup_logging_subsystem(
         loglevel,
         logfile,
         colorize=colorize,
         hostname=hostname,
     )
     if not handled:
         if redirect_stdouts:
             self.redirect_stdouts(redirect_level)
     os.environ.update(
         CELERY_LOG_LEVEL=str(loglevel) if loglevel else '',
         CELERY_LOG_FILE=str(logfile) if logfile else '',
     )
     return handled
Example #17
0
    def setup_instance(self, queues=None, ready_callback=None, pidfile=None,
                       include=None, use_eventloop=None, exclude_queues=None,
                       **kwargs):
        self.pidfile = pidfile
        self.setup_queues(queues, exclude_queues)
        self.setup_includes(str_to_list(include))

        # Set default concurrency
        if not self.concurrency:
            try:
                self.concurrency = cpu_count()
            except NotImplementedError:
                self.concurrency = 2

        # Options
        self.loglevel = mlevel(self.loglevel)
        self.ready_callback = ready_callback or self.on_consumer_ready

        # this connection won't establish, only used for params
        self._conninfo = self.app.connection_for_read()
        self.use_eventloop = (
            self.should_use_eventloop() if use_eventloop is None
            else use_eventloop
        )
        self.options = kwargs

        signals.worker_init.send(sender=self)

        # Initialize bootsteps
        self.pool_cls = _concurrency.get_implementation(self.pool_cls)
        self.steps = []
        self.on_init_blueprint()
        self.blueprint = self.Blueprint(
            steps=self.app.steps['worker'],
            on_start=self.on_start,
            on_close=self.on_close,
            on_stopped=self.on_stopped,
        )
        self.blueprint.apply(self, **kwargs)
Example #18
0
    def setup_instance(self, queues=None, ready_callback=None, pidfile=None,
                       include=None, use_eventloop=None, exclude_queues=None,
                       **kwargs):
        self.pidfile = pidfile
        self.setup_queues(queues, exclude_queues)
        self.setup_includes(str_to_list(include))

        # Set default concurrency
        if not self.concurrency:
            try:
                self.concurrency = cpu_count()
            except NotImplementedError:
                self.concurrency = 2

        # Options
        self.loglevel = mlevel(self.loglevel)
        self.ready_callback = ready_callback or self.on_consumer_ready

        # this connection won't establish, only used for params
        self._conninfo = self.app.connection_for_read()
        self.use_eventloop = (
            self.should_use_eventloop() if use_eventloop is None
            else use_eventloop
        )
        self.options = kwargs

        signals.worker_init.send(sender=self)

        # Initialize bootsteps
        self.pool_cls = _concurrency.get_implementation(self.pool_cls)
        self.steps = []
        self.on_init_blueprint()
        self.blueprint = self.Blueprint(
            steps=self.app.steps['worker'],
            on_start=self.on_start,
            on_close=self.on_close,
            on_stopped=self.on_stopped,
        )
        self.blueprint.apply(self, **kwargs)
    def run(self,
            hostname=None,
            pool_cls=None,
            app=None,
            uid=None,
            gid=None,
            loglevel=None,
            logfile=None,
            pidfile=None,
            statedb=None,
            **kwargs):
        maybe_drop_privileges(uid=uid, gid=gid)
        # Pools like eventlet/gevent needs to patch libs as early
        # as possible.
        pool_cls = concurrency.get_implementation(
            pool_cls) or self.app.conf.worker_pool
        if self.app.IS_WINDOWS and kwargs.get("beat"):
            self.die("-B option does not work on Windows.  "
                     "Please run celery beat as a separate service.")
        hostname = self.host_format(default_nodename(hostname))
        if loglevel:
            try:
                loglevel = mlevel(loglevel)
            except KeyError:  # pragma: no cover
                self.die("Unknown level {0!r}.  Please use one of {1}.".format(
                    loglevel,
                    "|".join(l for l in LOG_LEVELS if isinstance(l, string_t)),
                ))

        worker = self.app.Worker(
            hostname=hostname,
            pool_cls=pool_cls,
            loglevel=loglevel,
            logfile=logfile,  # node format handled by celery.app.log.setup
            pidfile=self.node_format(pidfile, hostname),
            statedb=self.node_format(statedb, hostname),
            **kwargs)
        worker.start()
        return worker.exitcode
Example #20
0
    def setup_task_loggers(self,
                           loglevel=None,
                           logfile=None,
                           format=None,
                           colorize=None,
                           propagate=False,
                           **kwargs):
        """Setup the task logger.

        If `logfile` is not specified, then `sys.stderr` is used.

        Will return the base task logger object.

        """
        loglevel = mlevel(loglevel or self.loglevel)
        format = format or self.task_format
        colorize = self.supports_color(colorize, logfile)

        logger = self.setup_handlers(get_logger('celery.task'),
                                     logfile,
                                     format,
                                     colorize,
                                     formatter=TaskFormatter,
                                     **kwargs)
        logger.setLevel(loglevel)
        # this is an int for some reason, better to not question why.
        logger.propagate = int(propagate)
        signals.after_setup_task_logger.send(
            sender=None,
            logger=logger,
            loglevel=loglevel,
            logfile=logfile,
            format=format,
            colorize=colorize,
        )
        return logger
Example #21
0
File: log.py Project: axiak/celery
    def setup_logging_subsystem(self, loglevel=None, logfile=None,
            format=None, colorize=None, **kwargs):
        if Logging._setup:
            return
        loglevel = mlevel(loglevel or self.loglevel)
        format = format or self.format
        if colorize is None:
            colorize = self.supports_color(logfile)
        reset_multiprocessing_logger()
        if not is_py3k:
            ensure_process_aware_logger()
        receivers = signals.setup_logging.send(sender=None,
                        loglevel=loglevel, logfile=logfile,
                        format=format, colorize=colorize)
        if not receivers:
            root = logging.getLogger()

            if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER:
                root.handlers = []

            for logger in filter(None, (root, get_multiprocessing_logger())):
                self._setup_logger(logger, logfile, format, colorize, **kwargs)
                if loglevel:
                    logger.setLevel(loglevel)
                signals.after_setup_logger.send(sender=None, logger=logger,
                                            loglevel=loglevel, logfile=logfile,
                                            format=format, colorize=colorize)

        # This is a hack for multiprocessing's fork+exec, so that
        # logging before Process.run works.
        os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel),
                          _MP_FORK_LOGFILE_=logfile or "",
                          _MP_FORK_LOGFORMAT_=format)
        Logging._setup = True

        return receivers
Example #22
0
    def setup_instance(self, queues=None, ready_callback=None, pidfile=None,
                       include=None, use_eventloop=None, **kwargs):
        self.pidfile = pidfile
        self.setup_defaults(kwargs, namespace='celeryd')
        self.setup_queues(queues)
        self.setup_includes(include)

        # Set default concurrency
        if not self.concurrency:
            try:
                self.concurrency = cpu_count()
            except NotImplementedError:
                self.concurrency = 2

        # Options
        self.loglevel = mlevel(self.loglevel)
        self.ready_callback = ready_callback or self.on_consumer_ready
        # this connection is not established, only used for params
        self._conninfo = self.app.connection()
        self.use_eventloop = (
            self.should_use_eventloop() if use_eventloop is None
            else use_eventloop
        )
        self.options = kwargs

        signals.worker_init.send(sender=self)

        # Initialize bootsteps
        self.pool_cls = _concurrency.get_implementation(self.pool_cls)
        self.steps = []
        self.on_init_namespace()
        self.namespace = self.Namespace(app=self.app,
                                        on_start=self.on_start,
                                        on_close=self.on_close,
                                        on_stopped=self.on_stopped)
        self.namespace.apply(self, **kwargs)
Example #23
0
File: log.py Project: axiak/celery
    def setup_task_logger(self, loglevel=None, logfile=None, format=None,
            colorize=None, task_name=None, task_id=None, propagate=False,
            app=None, **kwargs):
        """Setup the task logger.

        If `logfile` is not specified, then `sys.stderr` is used.

        Returns logger object.

        """
        loglevel = mlevel(loglevel or self.loglevel)
        format = format or self.task_format
        if colorize is None:
            colorize = self.supports_color(logfile)

        logger = self._setup_logger(self.get_task_logger(loglevel, task_name),
                                    logfile, format, colorize, **kwargs)
        logger.propagate = int(propagate)    # this is an int for some reason.
                                             # better to not question why.
        signals.after_setup_task_logger.send(sender=None, logger=logger,
                                     loglevel=loglevel, logfile=logfile,
                                     format=format, colorize=colorize)
        return LoggerAdapter(logger, {"task_id": task_id,
                                      "task_name": task_name})
Example #24
0
File: log.py Project: mahak/celery
 def setup(self,
           loglevel=None,
           logfile=None,
           redirect_stdouts=False,
           redirect_level='WARNING',
           colorize=None,
           hostname=None):
     loglevel = mlevel(loglevel)
     handled = self.setup_logging_subsystem(
         loglevel,
         logfile,
         colorize=colorize,
         hostname=hostname,
     )
     if not handled and redirect_stdouts:
         self.redirect_stdouts(redirect_level)
     os.environ.update(
         CELERY_LOG_LEVEL=str(loglevel) if loglevel else '',
         CELERY_LOG_FILE=str(logfile) if logfile else '',
     )
     warnings.filterwarnings('always', category=CDeprecationWarning)
     warnings.filterwarnings('always', category=CPendingDeprecationWarning)
     logging.captureWarnings(True)
     return handled
Example #25
0
    def setup_logging_subsystem(self,
                                loglevel=None,
                                logfile=None,
                                format=None,
                                colorize=None,
                                hostname=None,
                                **kwargs):
        if self.already_setup:
            return
        if logfile and hostname:
            logfile = node_format(logfile, hostname)
        self.already_setup = True
        loglevel = mlevel(loglevel or self.loglevel)
        format = format or self.format
        colorize = self.supports_color(colorize, logfile)
        reset_multiprocessing_logger()
        receivers = signals.setup_logging.send(
            sender=None,
            loglevel=loglevel,
            logfile=logfile,
            format=format,
            colorize=colorize,
        )

        if not receivers:
            root = logging.getLogger()

            if self.app.conf.worker_hijack_root_logger:
                root.handlers = []
                get_logger('celery').handlers = []
                get_logger('celery.task').handlers = []
                get_logger('celery.redirected').handlers = []

            # Configure root logger
            self._configure_logger(root, logfile, loglevel, format, colorize,
                                   **kwargs)

            # Configure the multiprocessing logger
            self._configure_logger(get_multiprocessing_logger(), logfile,
                                   loglevel if MP_LOG else logging.ERROR,
                                   format, colorize, **kwargs)

            signals.after_setup_logger.send(
                sender=None,
                logger=root,
                loglevel=loglevel,
                logfile=logfile,
                format=format,
                colorize=colorize,
            )

            # then setup the root task logger.
            self.setup_task_loggers(loglevel, logfile, colorize=colorize)

        try:
            stream = logging.getLogger().handlers[0].stream
        except (AttributeError, IndexError):
            pass
        else:
            set_default_encoding_file(stream)

        # This is a hack for multiprocessing's fork+exec, so that
        # logging before Process.run works.
        logfile_name = logfile if isinstance(logfile, string_t) else ''
        os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel),
                          _MP_FORK_LOGFILE_=logfile_name,
                          _MP_FORK_LOGFORMAT_=format)
        return receivers
Example #26
0
 def __init__(self, app):
     self.app = app
     self.loglevel = mlevel(logging.WARN)
     self.format = self.app.conf.worker_log_format
     self.task_format = self.app.conf.worker_task_log_format
     self.colorize = self.app.conf.worker_log_color
Example #27
0
 def __init__(self, app):
     self.app = app
     self.loglevel = mlevel(self.app.conf.CELERYD_LOG_LEVEL)
     self.format = self.app.conf.CELERYD_LOG_FORMAT
     self.task_format = self.app.conf.CELERYD_TASK_LOG_FORMAT
     self.colorize = self.app.conf.CELERYD_LOG_COLOR
Example #28
0
File: log.py Project: axiak/celery
 def get_task_logger(self, loglevel=None, name=None):
     logger = logging.getLogger(name or "celery.task.default")
     if loglevel is not None:
         logger.setLevel(mlevel(loglevel))
     return logger
Example #29
0
 def __init__(self, app):
     self.app = app
     self.loglevel = mlevel(self.app.conf.CELERYD_LOG_LEVEL)
     self.format = self.app.conf.CELERYD_LOG_FORMAT
     self.task_format = self.app.conf.CELERYD_TASK_LOG_FORMAT
     self.colorize = self.app.conf.CELERYD_LOG_COLOR
Example #30
0
 def __init__(self, app):
     self.app = app
     self.loglevel = mlevel(logging.WARN)
     self.format = self.app.conf.worker_log_format
     self.task_format = self.app.conf.worker_task_log_format
     self.colorize = self.app.conf.worker_log_color
Example #31
0
 def convert(self, value, param, ctx):
     value = value.upper()
     value = super().convert(value, param, ctx)
     return mlevel(value)