def setUp(self): self.logger = logging.getLogger() # Add 2 handlers. self.logger.addHandler(logging.Handler()) self.logger.addHandler(logging.Handler()) # `TestCase.setUp()` removes the handlers just added. super(TestRemoveLoggingHandlers, self).setUp()
def test_get_handler(self): l_f1 = logging.Handler() l_f2 = logging.Handler() self.m_obj.add_handler("h1", l_f1) self.m_obj.add_handler("h2", l_f2) self.assertEqual(l_f1, self.m_obj.get_handler("h1")) self.assertEqual(l_f2, self.m_obj.get_handler("h2")) self.assertNotEqual(self.m_obj.get_handler("h2"), self.m_obj.get_handler("h1")) with self.assertRaises(error.XtdError): self.m_obj.get_handler("unknown")
def test_logging(wandb_init): root_logger = logging.getLogger() root_logger.setLevel("DEBUG") root_logs = [] root_handler = logging.Handler() root_handler.emit = lambda x: root_logs.append(x.msg) root_logger.addHandler(root_handler) wandb_logger = logging.getLogger("wandb") wandb_handler = logging.Handler() wandb_logs = [] wandb_handler.emit = lambda x: wandb_logs.append(x.msg) wandb_logger.addHandler(wandb_handler) wandb_child_logger = logging.getLogger("wandb.x.y.z") wandb_child_handler = logging.Handler() wandb_child_logs = [] wandb_child_handler.emit = lambda x: wandb_child_logs.append(x.msg) wandb_child_logger.addHandler(wandb_child_handler) root_logger.info("info1") root_logger.warn("warn1") run = wandb_init() root_logger.info("info2") root_logger.warn("warn2") wandb_logger.info("info3") wandb_logger.warn("warn3") wandb_child_logger.info("info4") wandb_child_logger.info("warn4") run.finish() root_logger.info("info5") root_logger.warn("warn5") # Work around unknown test flake WB-6348 try: root_logs.remove("git repository is invalid") except ValueError: pass assert root_logs == ["info1", "warn1", "info2", "warn2", "info5", "warn5"] assert not any([msg in wandb_logs for msg in root_logs]) assert all( [msg in wandb_logs for msg in ["info3", "warn3", "info4", "warn4"]]) assert wandb_child_logs == ["info4", "warn4"]
def test_add_handler_to_logger_does_as_expected(): logger = logging.getLogger("TEST_CODE42_CLI") formatter = logging.Formatter() handler = logging.Handler() add_handler_to_logger(logger, handler, formatter) assert handler in logger.handlers assert handler.formatter == formatter
def test_fix_logging_module(self): return # Test that it issues the already imported warning. warnings.filterwarnings('error', '.*module already imported.*') self.assertRaises(UserWarning, atfork.stdlib_fixer.fix_logging_module) # Now let it run, ignoring rather than raising the warning. warnings.filterwarnings('ignore', '.*module already imported.*') atfork.stdlib_fixer.fix_logging_module() self.assertTrue(logging.fixed_for_atfork) # Test that the fixup is never installed twice. old_acquire_lock = logging._acquireLock try: logging._acquireLock = lambda: self.fail('fixup ran a second time') finally: logging._acquireLock = old_acquire_lock atfork.stdlib_fixer.fix_logging_module() orig_atfork = atfork.atfork logging_handler_atfork_calls = [] def fake_atfork(prepare, parent, child): raise Exception("test") print("fake_atfork begin") logging_handler_atfork_calls.append((prepare, parent, child)) atfork.atfork = fake_atfork try: handler = logging.Handler(level=logging.DEBUG) finally: atfork.atfork = orig_atfork self.assertEqual([ (handler.lock.acquire, handler.lock.release, handler.lock.release) ], logging_handler_atfork_calls)
def __init__(self): QtCore.QObject.__init__(self) self.scene = None self.frame = None self.quadtree = None self.covariance = None self.log = SceneLogModel(self) self._ = SignalProxy(self._sigQuadtreeChanged, rateLimit=5, delay=0, slot=lambda: self.sigQuadtreeChanged.emit(object)) self._log_handler = logging.Handler() self._log_handler.setLevel(logging.DEBUG) self._log_handler.emit = self.sigLogRecord.emit logging.root.addHandler(self._log_handler) self.qtproxy = QSceneQuadtreeProxy(self) self.worker_thread = QtCore.QThread() self.moveToThread(self.worker_thread) self.worker_thread.start()
def configure_logger(logger, debug=False, stdout=None, stderr=None): """Configure logger to output logging modules to console via stdout and stderr. Parameters ---------- logger : logging.Logger Logger instance to configure. debug : bool, optional If True, DEBUG level messages output to stdout. (Default: False) stdout : file handle, optional Stream to which to write DEBUG and INFO messages. (Default: sys.stdout) stderr : file handle, optional Stream to which to write WARNING, ERROR, and CRITICAL messages. (Default: sys.stderr) """ # Set the log level of logger (either to DEBUG or INFO). level = logging.DEBUG if debug else logging.INFO logger.setLevel(level) # Get rid of any extant logging handlers that are installed. while logger.handlers: top_level_logger.handlers.pop() # Install custom-configured handler and formatter. fmt = CustomFormatter() handler = logging.Handler() handler = CustomStreamHandler(stdout=stdout, stderr=stderr, formatter=fmt) logger.addHandler(handler)
def __init__(self): self.record = None handler = logging.Handler() handler.setLevel(logging.DEBUG) self.logger = logging.getLogger('pytest.fluent') self.logger.setLevel(logging.DEBUG) self.logger.addFilter(self)
def create_logger(self): """ 根据初始化信息创建logger对象 :return: logger对象 """ # logging.basicConfig(level=self.LEVEL,format=self.FORMAT,datefmt=self.DATEFMT) logger = logging.getLogger(__name__) # 初始化记录器 logger.setLevel(self.LEVEL) logformater = logging.Formatter(self.FORMAT) if self.FILENAME: loghandler = logging.FileHandler( os.path.join(self.log_dirs, self.FILENAME)) loghandler.setFormatter(logformater) logger.addHandler(loghandler) # loghandler.close() # print(dir(loghandler)) else: loghandler = logging.Handler() loghandler.setFormatter(logformater) logger.addHandler(loghandler) filter = logging.Filter(__name__) logger.addFilter(filter) return logger
def config_logger(name=__name__, level=logging.DEBUG, tango_logging=False): def tango_handler_emit(logger_handler, record): try: msg = logger_handler.format(record) if logger_handler.level >= logging.CRITICAL: tango.server.Device.fatal_stream(msg) elif logger_handler.level >= logging.ERROR: tango.server.Device.error_stream(msg) elif logger_handler.level >= logging.WARNING: tango.server.Device.warn_stream(msg) elif logger_handler.level >= logging.INFO: tango.server.Device.info_stream(msg) elif logger_handler.level >= logging.DEBUG: tango.server.Device.debug_stream(msg) except Exception: logger_handler.handleError(record) logger = logging.getLogger(name) #logger.setLevel(level) if not logger.hasHandlers(): logger.propagate = False f_str = '%(asctime)s,%(msecs)3d %(levelname)-7s %(filename)s %(funcName)s(%(lineno)s) %(message)s' log_formatter = logging.Formatter(f_str, datefmt='%H:%M:%S') console_handler = logging.StreamHandler() console_handler.setFormatter(log_formatter) logger.addHandler(console_handler) logger.setLevel(level) # add tango logger if tango_logging: tango_handler = logging.Handler() tango_handler.setFormatter(log_formatter) tango_handler.emit = tango_handler_emit logger.addHandler(tango_handler) return logger
def __init__(self, spool): QtCore.QObject.__init__(self) self.spool = spool self.scene = None self.frame = None self.quadtree = None self.covariance = None self.aps = None self.log = SceneLogModel(self) self._ = SignalProxy(self._sigQuadtreeChanged, rateLimit=10, delay=0, slot=lambda: self.sigQuadtreeChanged.emit()) self._log_handler = logging.Handler() self._log_handler.setLevel(logging.DEBUG) self._log_handler.emit = self.sigLogRecord.emit logging.root.addHandler(self._log_handler) self._download_status = None if pyrocko_download_callback: pyrocko_download_callback(self.download_progress) self.qtproxy = QSceneQuadtreeProxy(self) self.worker_thread = QtCore.QThread() self.moveToThread(self.worker_thread) self.worker_thread.start()
def init_logger(cls, path, debug=False, more=None, fmt=None): def emit(rec): msg = rec.getMessage() ei = rec.exc_info tb = '' if ei: em = f'{ei[0].__name__}: {ei[1]}' msg = em if msg == '' else ': '.join((msg, em)) tb = ei[2] elif rec.stack_info: tb = rec.stack_info if tb.startswith("Stack (most recent call last):\n"): tb = tb[31:] rec.msg = msg.rstrip() tb = rec.exc_text = tb and cls.traceback_str(tb).strip()\ .replace("\n ", "\n") or '' if more: rec.exc_text += (tb and "\n") + (try_(more) or '') rec.args = rec.stack_info = None logging.captureWarnings(True) lgr = logging.getLogger() h = logging.Handler() h.emit = emit lgr.addHandler(h) lgh = cls.LogHandler(path) lgh.setFormatter(fmt or cls.init_logger_format()) lgr.addHandler(lgh) debug and lgr.addHandler(DebugStreamHandler()) return lgh
def test__get_existing_logger_no_logger_present(mock_logger): mock_logger.handlers = [logging.Handler()] with patch( "cloudformation_cli_python_lib.log_delivery.logging.getLogger", return_value=mock_logger, ): actual = ProviderLogHandler._get_existing_logger() assert actual is None
def get_tags_for_path(path): import_session = ImportSession(None, logging.Handler(), path, None) import_session.set_config(BEETS_CONFIG["import"]) import_task_factory = ImportTaskFactory(path, import_session) for t in import_task_factory.tasks(): if type(t) is beets.importer.ImportTask: t.lookup_candidates() yield t
def test__get_existing_logger_logger_present(mock_logger, mock_session): expected = ProviderLogHandler("g", "s", mock_session) mock_logger.handlers = [logging.Handler(), expected] with patch( "cloudformation_cli_python_lib.log_delivery.logging.getLogger", return_value=mock_logger, ): actual = ProviderLogHandler._get_existing_logger() assert actual == expected
def test_logging(wandb_init): root_logger = logging.getLogger() root_logger.setLevel("DEBUG") root_logs = [] root_handler = logging.Handler() root_handler.emit = lambda x: root_logs.append(x.msg) root_logger.addHandler(root_handler) wandb_logger = logging.getLogger("wandb") wandb_handler = logging.Handler() wandb_logs = [] wandb_handler.emit = lambda x: wandb_logs.append(x.msg) wandb_logger.addHandler(wandb_handler) wandb_child_logger = logging.getLogger("wandb.x.y.z") wandb_child_handler = logging.Handler() wandb_child_logs = [] wandb_child_handler.emit = lambda x: wandb_child_logs.append(x.msg) wandb_child_logger.addHandler(wandb_child_handler) root_logger.info("info1") root_logger.warn("warn1") run = wandb_init() root_logger.info("info2") root_logger.warn("warn2") wandb_logger.info("info3") wandb_logger.warn("warn3") wandb_child_logger.info("info4") wandb_child_logger.info("warn4") run.finish() root_logger.info("info5") root_logger.warn("warn5") assert root_logs == ["info1", "warn1", "info2", "warn2", "info5", "warn5"] assert not any([msg in wandb_logs for msg in root_logs]) assert all( [msg in wandb_logs for msg in ["info3", "warn3", "info4", "warn4"]]) assert wandb_child_logs == ["info4", "warn4"]
def setup_logging(): ''' Sets up the stats logger. ''' stat_logger = logging.getLogger('stats') stats_log_handler = logging.Handler(level='INFO') log_format = '%(asctime)s::%(levelname)s::[%(module)s:%(lineno)d]::[%(threadName)s] %(message)s' stats_log_handler.setFormatter(log_format) stat_logger.addHandler(stats_log_handler) return stat_logger
def __init__(self, levelColor=None, parent=None): super().__init__(parent) if levelColor is None: self.levelColors = self.defaultLevelColors else: self.levelColors = levelColor self.handler = logging.Handler() self.handler.emit = self.emit formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') self.handler.setFormatter(formatter)
def random_log_record(extra={}): handler = logging.Handler() handler.setLevel(logging.DEBUG) handler.emit = mock.MagicMock() log = logging.getLogger(uuid.uuid4().hex) log.setLevel(logging.DEBUG) log.addHandler(handler) rand_level = logging.getLevelName(random.choice([10, 20, 30, 40, 50])) log_func = getattr(log, rand_level.lower()) log_func(uuid.uuid4().hex, extra=extra) log_record = handler.emit.call_args[0][0] log.removeHandler(handler) return log_record
def __init__(self, job_id, job_pid, timeout=1): self.selflogger = logging.getLogger('oq.job.%s.supervisor' % job_id) self.selflogger.info('Entering supervisor for job %s', job_id) logger_name = 'oq.job.%s' % job_id key = '%s.#' % logger_name super(SupervisorLogMessageConsumer, self).__init__(timeout=timeout, routing_key=key) self.job_id = job_id self.job_pid = job_pid self.joblogger = logging.getLogger(logger_name) self.jobhandler = logging.Handler(logging.ERROR) self.jobhandler.emit = self.log_callback self.joblogger.addHandler(self.jobhandler) # Failure counter check delay value self.fcc_delay_value = 0
def initUI(self): self.logger.info('.initUI() entered') self.setWindowTitle(self.title) self.setGeometry(self.left, self.top, self.width, self.height) self.logger_edit.setReadOnly(True) self.logger_edit.show() log_handler = logging.Handler() log_handler_formatter = logging.Formatter( '# [%(asctime)s] %(levelname)-s: %(message)s\n', datefmt='%Y-%m-%d %H:%M:%S') log_handler.emit = lambda record: self.logger_edit.insertPlainText( log_handler_formatter.format(record)) log_handler.setLevel(logging.INFO) self.logger.addHandler(log_handler)
def __init__(self, *args, **kw): _BaseOutput.__init__(self, *args, **kw) self._start = time.time() # log msg ring buffer self._records = deque(maxlen=100) self._lock = threading.Lock() self._stats = {} # highjack the root handler, remove existing and replace with one # that feeds our ring buffer h = logging.Handler() root = logging.getLogger('') h.formatter = root.handlers[0].formatter h.emit = self._emit root.handlers = (h, ) self._handler = h
def __init__(self, scene_model, *args, **kwargs): QtCore.QObject.__init__(self, *args, **kwargs) self.worker_thread = QtCore.QThread() self.moveToThread(self.worker_thread) self.worker_thread.start() self.model = None self.log = SceneLogModel(self) self.sources = SourceModel(self) self._log_handler = logging.Handler() self._log_handler.emit = self.sigLogRecord.emit self.cursor_tracker = CursorTracker() self.setModel(scene_model)
def config_log(args=None): bare_frmtr = logging.Formatter('%(message)s') if args is None: handler = logging.Handler() else: handler = logging.FileHandler( os.path.join(args.checkdir, f'{args.model}{args.size}_stats.txt'), 'w') handler.setLevel(logging.INFO) handler.setFormatter(bare_frmtr) console = logging.StreamHandler() console.setLevel(logging.DEBUG) console.setFormatter(bare_frmtr) rootlogger = logging.getLogger() rootlogger.setLevel(logging.DEBUG) rootlogger.addHandler(console) rootlogger.addHandler(handler) logging.getLogger('matplotlib.font_manager').disabled = True
def log_level_n(self): # Mostly people use logging.DEBUG and logging.INFO to setLevel() and # level=blah ... logging internally populates these constants with # numbers but internally also has a translator for going number->name # and name->number; it just has to be upper case. h = logging.Handler() try: h.setLevel(int(self.log_level)) return h.level except ValueError: try: # logging.DEBUG, debug, DEBUG, etc h.setLevel(self.log_level.upper().strip().split('.')[-1]) return h.level except ValueError: raise ValueError( f"log_level='{self.log_level}'" " is not understood (even with help) by python logging") # can we get here?? no? return logging.DEBUG
def test_autotest_logging_handle_error(self): record = logging.LogRecord('test', logging.DEBUG, __file__, 0, 'MESSAGE', 'ARGS', None) try: raise RuntimeError('Exception context needed for the test.') except RuntimeError: setup_modules._autotest_logging_handle_error( logging.Handler(), record) else: self.fail() self.assert_autotest_logging_handle_error_called() stderr_repr = repr(self.stderr_str) self.assertTrue(('MESSAGE' in self.stderr_str), stderr_repr) self.assertTrue(('ARGS' in self.stderr_str), stderr_repr) self.assertTrue(('Exception' in self.stderr_str), stderr_repr) self.assertTrue(('setup_modules_unittest.py' in self.stderr_str), stderr_repr) self.assertTrue(('disabled.\n' in self.stderr_str), stderr_repr) # Make sure this was turned off by our handle_error. self.assertFalse(logging.raiseExceptions)
def __init__(self, sandbox_model=None, *args, **kwargs): QtCore.QObject.__init__(self) self.model = sandbox_model self.log = SceneLogModel(self) self.sources = SourceModel(self) self.cursor_tracker = CursorTracker(self) self._log_handler = logging.Handler() self._log_handler.setLevel(logging.DEBUG) self._log_handler.emit = self.sigLogRecord.emit logging.root.setLevel(logging.DEBUG) logging.root.addHandler(self._log_handler) self.worker_thread = QtCore.QThread() self.moveToThread(self.worker_thread) self.worker_thread.start() if self.model: self.setModel(self.model)
def initlog(log_level=LOG_LEVEL): import logging FORMAT = \ "%(asctime)s %(levelname)-8s[%(filename)s:%(lineno)d(%(funcName)s)] %(message)s" hdlr = logging.Handler() formatter = logging.Formatter(FORMAT) hdlr.setFormatter(formatter) logger = logging.getLogger("oss") logger.addHandler(hdlr) if "DEBUG" == log_level.upper(): logger.setLevel(logging.DEBUG) elif "INFO" == log_level.upper(): logger.setLevel(logging.INFO) elif "WARNING" == log_level.upper(): logger.setLevel(logging.WARNING) elif "ERROR" == log_level.upper(): logger.setLevel(logging.ERROR) elif "CRITICAL" == log_level.upper(): logger.setLevel(logging.CRITICAL) else: logger.setLevel(logging.ERROR) return logger
def __init__(self, job_id, job_pid, timeout=1): self.job_id = job_id job = OqJob.objects.get(id=job_id) self.calc_id = job.calculation.id if job.hazard_calculation is not None: self.calc_domain = 'hazard' else: self.calc_domain = 'risk' self.selflogger = logging.getLogger('oq.%s.%s.supervisor' % (self.calc_domain, self.calc_id)) self.selflogger.debug('Entering supervisor for %s calc %s' % (self.calc_domain, self.calc_id)) logger_name = 'oq.%s.%s' % (self.calc_domain, self.calc_id) key = '%s.#' % logger_name super(SupervisorLogMessageConsumer, self).__init__(timeout=timeout, routing_key=key) self.job_pid = job_pid self.joblogger = logging.getLogger(logger_name) self.jobhandler = logging.Handler(logging.ERROR) self.jobhandler.emit = self.log_callback self.joblogger.addHandler(self.jobhandler) # Failure counter check delay value self.fcc_delay_value = 0
def __init__(self, logname): log_name = logname logging.basicConfig( level=logging.DEBUG, format= '%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S', filename=log_name, filemode='w') console = logging.StreamHandler() console.setLevel(logging.INFO) formatter = logging.Formatter( '%(name)-12s: %(levelname)-8s %(message)s') console.setFormatter(formatter) logging.getLogger('').addHandler(console) __content__ = pd.read_csv('receivers.csv') __receiver_list__ = [j for i in __content__.values for j in i] self.receivers = '' for i in __receiver_list__: self.receivers += i self.receivers += ';' self.handler = logging.Handler()