def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None, colorize=None, **kwargs): if Logging._setup: return Logging._setup = True loglevel = mlevel(loglevel or self.loglevel) format = format or self.format colorize = self.supports_color(colorize, logfile) reset_multiprocessing_logger() if not is_py3k: ensure_process_aware_logger() receivers = signals.setup_logging.send( sender=None, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize ) if not receivers: root = logging.getLogger() if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER: root.handlers = [] for logger in root, get_multiprocessing_logger(): if logger is not None: self.setup_handlers(logger, logfile, format, colorize, **kwargs) if loglevel: logger.setLevel(loglevel) signals.after_setup_logger.send( sender=None, logger=logger, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize ) # then setup the root task logger. self.setup_task_loggers(loglevel, logfile, colorize=colorize) # This is a hack for multiprocessing's fork+exec, so that # logging before Process.run works. logfile_name = logfile if isinstance(logfile, basestring) else "" os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel), _MP_FORK_LOGFILE_=logfile_name, _MP_FORK_LOGFORMAT_=format) return receivers
def autodiscover_tasks(self, packages, related_name='tasks'): if self.conf.CELERY_FORCE_BILLIARD_LOGGING: # we'll use billiard's processName instead of # multiprocessing's one in all the loggers # created after this call ensure_process_aware_logger() self.loader.autodiscover_tasks(packages, related_name)
def autodiscover_tasks(self, packages, related_name="tasks"): if self.conf.CELERY_FORCE_BILLIARD_LOGGING: # we'll use billiard's processName instead of # multiprocessing's one in all the loggers # created after this call ensure_process_aware_logger() self.loader.autodiscover_tasks(packages, related_name)
def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None, colorize=None, **kwargs): if Logging._setup: return Logging._setup = True loglevel = mlevel(loglevel or self.loglevel) format = format or self.format colorize = self.supports_color(colorize, logfile) reset_multiprocessing_logger() if not PY3: ensure_process_aware_logger() receivers = signals.setup_logging.send( sender=None, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize, ) if not receivers: root = logging.getLogger() if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER: root.handlers = [] # Configure root logger self._configure_logger(root, logfile, loglevel, format, colorize, **kwargs) # Configure the multiprocessing logger self._configure_logger(get_multiprocessing_logger(), logfile, loglevel if MP_LOG else logging.ERROR, format, colorize, **kwargs) signals.after_setup_logger.send( sender=None, logger=root, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize, ) # then setup the root task logger. self.setup_task_loggers(loglevel, logfile, colorize=colorize) # This is a hack for multiprocessing's fork+exec, so that # logging before Process.run works. logfile_name = logfile if isinstance(logfile, string_t) else '' os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel), _MP_FORK_LOGFILE_=logfile_name, _MP_FORK_LOGFORMAT_=format) return receivers
def _autodiscover_tasks(self, packages, related_name='tasks', **kwargs): # argument may be lazy packages = packages() if isinstance(packages, Callable) else packages if self.conf.CELERY_FORCE_BILLIARD_LOGGING: # we'll use billiard's processName instead of # multiprocessing's one in all the loggers # created after this call ensure_process_aware_logger() self.loader.autodiscover_tasks(packages, related_name)
def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None, colorize=None, **kwargs): if Logging._setup: return Logging._setup = True loglevel = mlevel(loglevel or self.loglevel) format = format or self.format colorize = self.supports_color(colorize, logfile) reset_multiprocessing_logger() if not PY3: ensure_process_aware_logger() receivers = signals.setup_logging.send( sender=None, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize, ) if not receivers: root = logging.getLogger() if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER: root.handlers = [] # Configure root logger self._configure_logger( root, logfile, loglevel, format, colorize, **kwargs ) # Configure the multiprocessing logger self._configure_logger( get_multiprocessing_logger(), logfile, loglevel if MP_LOG else logging.ERROR, format, colorize, **kwargs ) signals.after_setup_logger.send( sender=None, logger=root, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize, ) # then setup the root task logger. self.setup_task_loggers(loglevel, logfile, colorize=colorize) # This is a hack for multiprocessing's fork+exec, so that # logging before Process.run works. logfile_name = logfile if isinstance(logfile, string_t) else '' os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel), _MP_FORK_LOGFILE_=logfile_name, _MP_FORK_LOGFORMAT_=format) return receivers
def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None, colorize=None, **kwargs): if Logging._setup: return Logging._setup = True loglevel = mlevel(loglevel or self.loglevel) format = format or self.format if colorize is None: colorize = self.supports_color(logfile) reset_multiprocessing_logger() if not is_py3k: ensure_process_aware_logger() receivers = signals.setup_logging.send(sender=None, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize) if not receivers: root = logging.getLogger() if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER: root.handlers = [] for logger in filter(None, (root, get_multiprocessing_logger())): self.setup_handlers(logger, logfile, format, colorize, **kwargs) if loglevel: logger.setLevel(loglevel) signals.after_setup_logger.send(sender=None, logger=logger, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize) # then setup the root task logger. self.setup_task_loggers(loglevel, logfile, colorize=colorize) # This is a hack for multiprocessing's fork+exec, so that # logging before Process.run works. logfile_name = logfile if isinstance(logfile, basestring) else "" os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel), _MP_FORK_LOGFILE_=logfile_name, _MP_FORK_LOGFORMAT_=format) return receivers
def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None, colorize=None, **kwargs): if Logging._setup: return loglevel = mlevel(loglevel or self.loglevel) format = format or self.format if colorize is None: colorize = self.supports_color(logfile) reset_multiprocessing_logger() if not is_py3k: ensure_process_aware_logger() receivers = signals.setup_logging.send(sender=None, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize) if not receivers: root = logging.getLogger() if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER: root.handlers = [] for logger in filter(None, (root, get_multiprocessing_logger())): self._setup_logger(logger, logfile, format, colorize, **kwargs) if loglevel: logger.setLevel(loglevel) signals.after_setup_logger.send(sender=None, logger=logger, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize) # This is a hack for multiprocessing's fork+exec, so that # logging before Process.run works. os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel), _MP_FORK_LOGFILE_=logfile or "", _MP_FORK_LOGFORMAT_=format) Logging._setup = True return receivers
ImproperlyConfigured, WorkerShutdown, WorkerTerminate, ) from celery.utils.log import ensure_process_aware_logger from celery.worker import state from celery.tests.case import ( AppCase, Mock, SkipTest, WhateverIO, patch, skip_if_pypy, skip_if_jython, ) ensure_process_aware_logger() class WorkerAppCase(AppCase): def tearDown(self): super(WorkerAppCase, self).tearDown() trace.reset_worker_optimizations() def disable_stdouts(fun): @wraps(fun) def disable(*args, **kwargs): prev_out, prev_err = sys.stdout, sys.stderr prev_rout, prev_rerr = sys.__stdout__, sys.__stderr__
from celery import signals from celery import current_app from celery.apps import worker as cd from celery.bin.celeryd import WorkerCommand, main as celeryd_main from celery.exceptions import ImproperlyConfigured, SystemTerminate from celery.utils.log import ensure_process_aware_logger from celery.worker import state from celery.tests.utils import ( AppCase, WhateverIO, skip_if_pypy, skip_if_jython, ) ensure_process_aware_logger() def disable_stdouts(fun): @wraps(fun) def disable(*args, **kwargs): prev_out, prev_err = sys.stdout, sys.stderr prev_rout, prev_rerr = sys.__stdout__, sys.__stderr__ sys.stdout = sys.__stdout__ = WhateverIO() sys.stderr = sys.__stderr__ = WhateverIO() try: return fun(*args, **kwargs) finally: sys.stdout = prev_out sys.stderr = prev_err
def test_patches(self): ensure_process_aware_logger() with in_sighandler(): logging.getLoggerClass().log(get_logger('test'))
def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None, colorize=None, hostname=None, **kwargs): if self.already_setup: return if logfile and hostname: logfile = node_format(logfile, hostname) self.already_setup = True loglevel = mlevel(loglevel or self.loglevel) format = format or self.format colorize = self.supports_color(colorize, logfile) reset_multiprocessing_logger() ensure_process_aware_logger() receivers = signals.setup_logging.send( sender=None, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize, ) if not receivers: root = logging.getLogger() if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER: root.handlers = [] get_logger('celery').handlers = [] get_logger('celery.task').handlers = [] get_logger('celery.redirected').handlers = [] # Configure root logger self._configure_logger(root, logfile, loglevel, format, colorize, **kwargs) # Configure the multiprocessing logger self._configure_logger(get_multiprocessing_logger(), logfile, loglevel if MP_LOG else logging.ERROR, format, colorize, **kwargs) signals.after_setup_logger.send( sender=None, logger=root, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize, ) # then setup the root task logger. self.setup_task_loggers(loglevel, logfile, colorize=colorize) try: stream = logging.getLogger().handlers[0].stream except (AttributeError, IndexError): pass else: set_default_encoding_file(stream) # This is a hack for multiprocessing's fork+exec, so that # logging before Process.run works. logfile_name = logfile if isinstance(logfile, string_t) else '' os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel), _MP_FORK_LOGFILE_=logfile_name, _MP_FORK_LOGFORMAT_=format) return receivers
def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None, colorize=None, hostname=None, **kwargs): if self.already_setup: return if logfile and hostname: logfile = node_format(logfile, hostname) self.already_setup = True loglevel = mlevel(loglevel or self.loglevel) format = format or self.format colorize = self.supports_color(colorize, logfile) reset_multiprocessing_logger() ensure_process_aware_logger() receivers = signals.setup_logging.send( sender=None, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize, ) if not receivers: root = logging.getLogger() if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER: root.handlers = [] get_logger('celery').handlers = [] get_logger('celery.task').handlers = [] get_logger('celery.redirected').handlers = [] # Configure root logger self._configure_logger( root, logfile, loglevel, format, colorize, **kwargs ) # Configure the multiprocessing logger self._configure_logger( get_multiprocessing_logger(), logfile, loglevel if MP_LOG else logging.ERROR, format, colorize, **kwargs ) signals.after_setup_logger.send( sender=None, logger=root, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize, ) # then setup the root task logger. self.setup_task_loggers(loglevel, logfile, colorize=colorize) try: stream = logging.getLogger().handlers[0].stream except (AttributeError, IndexError): pass else: set_default_encoding_file(stream) # This is a hack for multiprocessing's fork+exec, so that # logging before Process.run works. logfile_name = logfile if isinstance(logfile, string_t) else '' os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel), _MP_FORK_LOGFILE_=logfile_name, _MP_FORK_LOGFORMAT_=format) return receivers