def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None, colorize=None, hostname=None, **kwargs): if self.already_setup: return if logfile and hostname: logfile = node_format(logfile, hostname) self.already_setup = True loglevel = mlevel(loglevel or self.loglevel) format = format or self.format colorize = self.supports_color(colorize, logfile) reset_multiprocessing_logger() receivers = signals.setup_logging.send( sender=None, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize, ) if not receivers: root = logging.getLogger() if self.app.conf.worker_hijack_root_logger: root.handlers = [] get_logger('celery').handlers = [] get_logger('celery.task').handlers = [] get_logger('celery.redirected').handlers = [] # Configure root logger self._configure_logger( root, logfile, loglevel, format, colorize, **kwargs ) # Configure the multiprocessing logger self._configure_logger( get_multiprocessing_logger(), logfile, loglevel if MP_LOG else logging.ERROR, format, colorize, **kwargs ) signals.after_setup_logger.send( sender=None, logger=root, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize, ) # then setup the root task logger. self.setup_task_loggers(loglevel, logfile, colorize=colorize) try: stream = logging.getLogger().handlers[0].stream except (AttributeError, IndexError): pass else: set_default_encoding_file(stream) # This is a hack for multiprocessing's fork+exec, so that # logging before Process.run works. logfile_name = logfile if isinstance(logfile, string_t) else '' os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel), _MP_FORK_LOGFILE_=logfile_name, _MP_FORK_LOGFORMAT_=format) return receivers
def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None, colorize=None, hostname=None, **kwargs): if self.already_setup: return if logfile and hostname: logfile = node_format(logfile, hostname) Logging._setup = True loglevel = mlevel(loglevel or self.loglevel) format = format or self.format colorize = self.supports_color(colorize, logfile) reset_multiprocessing_logger() receivers = signals.setup_logging.send( sender=None, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize, ) if not receivers: root = logging.getLogger() if self.app.conf.worker_hijack_root_logger: root.handlers = [] get_logger('celery').handlers = [] get_logger('celery.task').handlers = [] get_logger('celery.redirected').handlers = [] # Configure root logger self._configure_logger( root, logfile, loglevel, format, colorize, **kwargs ) # Configure the multiprocessing logger self._configure_logger( get_multiprocessing_logger(), logfile, loglevel if MP_LOG else logging.ERROR, format, colorize, **kwargs ) signals.after_setup_logger.send( sender=None, logger=root, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize, ) # then setup the root task logger. self.setup_task_loggers(loglevel, logfile, colorize=colorize) try: stream = logging.getLogger().handlers[0].stream except (AttributeError, IndexError): pass else: set_default_encoding_file(stream) # This is a hack for multiprocessing's fork+exec, so that # logging before Process.run works. logfile_name = logfile if isinstance(logfile, string_t) else '' os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel), _MP_FORK_LOGFILE_=logfile_name, _MP_FORK_LOGFORMAT_=format) return receivers
def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None, colorize=None, **kwargs): if Logging._setup: return Logging._setup = True loglevel = mlevel(loglevel or self.loglevel) format = format or self.format colorize = self.supports_color(colorize, logfile) reset_multiprocessing_logger() if not is_py3k: ensure_process_aware_logger() receivers = signals.setup_logging.send( sender=None, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize ) if not receivers: root = logging.getLogger() if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER: root.handlers = [] for logger in root, get_multiprocessing_logger(): if logger is not None: self.setup_handlers(logger, logfile, format, colorize, **kwargs) if loglevel: logger.setLevel(loglevel) signals.after_setup_logger.send( sender=None, logger=logger, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize ) # then setup the root task logger. self.setup_task_loggers(loglevel, logfile, colorize=colorize) # This is a hack for multiprocessing's fork+exec, so that # logging before Process.run works. logfile_name = logfile if isinstance(logfile, basestring) else "" os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel), _MP_FORK_LOGFILE_=logfile_name, _MP_FORK_LOGFORMAT_=format) return receivers
def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None, colorize=None, **kwargs): if Logging._setup: return Logging._setup = True loglevel = mlevel(loglevel or self.loglevel) format = format or self.format colorize = self.supports_color(colorize, logfile) reset_multiprocessing_logger() if not PY3: ensure_process_aware_logger() receivers = signals.setup_logging.send( sender=None, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize, ) if not receivers: root = logging.getLogger() if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER: root.handlers = [] # Configure root logger self._configure_logger(root, logfile, loglevel, format, colorize, **kwargs) # Configure the multiprocessing logger self._configure_logger(get_multiprocessing_logger(), logfile, loglevel if MP_LOG else logging.ERROR, format, colorize, **kwargs) signals.after_setup_logger.send( sender=None, logger=root, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize, ) # then setup the root task logger. self.setup_task_loggers(loglevel, logfile, colorize=colorize) # This is a hack for multiprocessing's fork+exec, so that # logging before Process.run works. logfile_name = logfile if isinstance(logfile, string_t) else '' os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel), _MP_FORK_LOGFILE_=logfile_name, _MP_FORK_LOGFORMAT_=format) return receivers
def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None, colorize=None, **kwargs): if Logging._setup: return Logging._setup = True loglevel = mlevel(loglevel or self.loglevel) format = format or self.format colorize = self.supports_color(colorize, logfile) reset_multiprocessing_logger() if not PY3: ensure_process_aware_logger() receivers = signals.setup_logging.send( sender=None, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize, ) if not receivers: root = logging.getLogger() if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER: root.handlers = [] # Configure root logger self._configure_logger( root, logfile, loglevel, format, colorize, **kwargs ) # Configure the multiprocessing logger self._configure_logger( get_multiprocessing_logger(), logfile, loglevel if MP_LOG else logging.ERROR, format, colorize, **kwargs ) signals.after_setup_logger.send( sender=None, logger=root, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize, ) # then setup the root task logger. self.setup_task_loggers(loglevel, logfile, colorize=colorize) # This is a hack for multiprocessing's fork+exec, so that # logging before Process.run works. logfile_name = logfile if isinstance(logfile, string_t) else '' os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel), _MP_FORK_LOGFILE_=logfile_name, _MP_FORK_LOGFORMAT_=format) return receivers
def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None, colorize=None, **kwargs): if Logging._setup: return Logging._setup = True loglevel = mlevel(loglevel or self.loglevel) format = format or self.format if colorize is None: colorize = self.supports_color(logfile) reset_multiprocessing_logger() if not is_py3k: ensure_process_aware_logger() receivers = signals.setup_logging.send(sender=None, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize) if not receivers: root = logging.getLogger() if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER: root.handlers = [] for logger in filter(None, (root, get_multiprocessing_logger())): self.setup_handlers(logger, logfile, format, colorize, **kwargs) if loglevel: logger.setLevel(loglevel) signals.after_setup_logger.send(sender=None, logger=logger, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize) # then setup the root task logger. self.setup_task_loggers(loglevel, logfile, colorize=colorize) # This is a hack for multiprocessing's fork+exec, so that # logging before Process.run works. logfile_name = logfile if isinstance(logfile, basestring) else "" os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel), _MP_FORK_LOGFILE_=logfile_name, _MP_FORK_LOGFORMAT_=format) return receivers
def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None, colorize=None, **kwargs): if Logging._setup: return loglevel = mlevel(loglevel or self.loglevel) format = format or self.format if colorize is None: colorize = self.supports_color(logfile) reset_multiprocessing_logger() if not is_py3k: ensure_process_aware_logger() receivers = signals.setup_logging.send(sender=None, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize) if not receivers: root = logging.getLogger() if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER: root.handlers = [] for logger in filter(None, (root, get_multiprocessing_logger())): self._setup_logger(logger, logfile, format, colorize, **kwargs) if loglevel: logger.setLevel(loglevel) signals.after_setup_logger.send(sender=None, logger=logger, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize) # This is a hack for multiprocessing's fork+exec, so that # logging before Process.run works. os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel), _MP_FORK_LOGFILE_=logfile or "", _MP_FORK_LOGFORMAT_=format) Logging._setup = True return receivers