def set_logger(self, logger_name): if self._mylogger is not None: raise err.LoggerException( 'WARNING!!! The logger {0} has been initialized already.'. format(self._mylogger)) logging.root = logging.RootLogger(logging.WARNING) logger = logging.getLogger(logger_name) self._mylogger = logger self._mylogger.handlers = [] self._mylogger.setLevel(logging.DEBUG)
def logging_sandbox(): # Monkeypatch a replacement root logger, so that our changes to logging # configuration don't persist outside of the test root_logger = logging.RootLogger(logging.WARNING) with mock.patch.object(logging, "root", root_logger): with mock.patch.object(logging.Logger, "root", root_logger): with mock.patch.object(logging.Logger, "manager", logging.Manager(root_logger)): yield
def test_prepare_logging(self): root = logging.RootLogger(logging.DEBUG) filepath = os.path.join(self.tmp, 'test.log') logging_utils.prepare_logging(filepath, root) root.debug('foo') with open(filepath, 'rb') as f: result = f.read() # It'd be nice to figure out a way to ensure it's properly in UTC but it's # tricky to do reliably. expected = _LOG_HEADER + ' D: foo\n$' self.assertTrue(re.match(expected, result), (expected, result))
def patch_logging_for_tracking(): import logging import six if six.PY2: log_fn = _log_py2 else: log_fn = _log setattr(logging.RootLogger, "_log", log_fn) logging.root = logging.RootLogger(logging.WARNING) logging.Logger.manager = logging.Manager(logging.root) setattr(logging.Logger, "_log", log_fn)
def reset(): loguru.logger.remove() loguru.logger.__init__({}, None, False, False, False, False, 0) loguru._logger.Logger._levels = default_levels.copy() loguru._logger.Logger._min_level = float("inf") loguru._logger.Logger._extra_class = {} loguru._logger.Logger._handlers = {} loguru._logger.Logger._handlers_count = itertools.count() loguru._logger.Logger._enabled = {} loguru._logger.Logger._activation_list = [] logging.Logger.manager.loggerDict.clear() logging.root = logging.RootLogger(logging.WARNING)
def __init__(self, log_type="default"): self.logType = log_type self.log = logging.RootLogger(logging.DEBUG) self.logHandler = logging.StreamHandler() self.log.addHandler(self.logHandler) self.logFmt = logging.Formatter( f"[%(asctime)s {self.logType} %(levelname)s]:%(message)s") self.logHandler.setFormatter(self.logFmt) self.info = self.log.info self.warning = self.log.warning self.debug = self.log.debug self.error = self.log.error
def __init__(self, config): self.logger = logging.RootLogger(logging.DEBUG) self.logger.propagate = False self.format = logging.Formatter( '%(asctime)s - %(levelname)s - %(filename)s - %(funcName)s - %(lineno)s - %(message)s' ) self.config = config for level in self.config.LOG_HANDLER: level_name = logging._levelToName.get(level) self.add_handler(self.logger, level, level_name) self.add_console(self.logger, level) if self.config.DEBUG: self.add_console(self.logger, logging.DEBUG)
def __init__(self, loglevel): # initialize a new manager with an instantiation of a custom Logger # instance class _IsolatedLogger(logging.Logger): pass self.logger = logging.RootLogger(loglevel) _IsolatedLogger.root = self.logger _IsolatedLogger.manager = logging.Manager(self.logger) self.handler = QueueForIteratorHandler() self.handler.setFormatter(JSONFormatter()) self.handler.setLevel(loglevel) self.logger.addHandler(self.handler) self.logger.setLevel(loglevel)
def test_prepare_logging(self): root = logging.RootLogger(logging.DEBUG) tmp_dir = tempfile.mkdtemp(prefix='logging_utils_test') try: filepath = os.path.join(tmp_dir, 'test.log') logging_utils.prepare_logging(filepath, root) root.debug('foo') with open(filepath, 'rb') as f: result = f.read() finally: shutil.rmtree(tmp_dir) # It'd be nice to figure out a way to ensure it's properly in UTC but it's # tricky to do reliably. self.assertTrue(re.match(_LOG_HEADER + 'DEBUG foo\n$', result), result)
def test_rotating_log_config_is_loaded___logger_is_set(self, level): log_file_path = os.path.abspath(os.path.join("tmp", "log_file.txt")) with patch('logging.root', logging.RootLogger(logging.NOTSET)): set_rotating_logger(log_file_path=log_file_path, log_level=level, max_file_size=100, max_backups=10) logger = logging.getLogger() self.assertEqual(level, logger.level) self.assertEqual(1, len(logger.handlers)) handler = logger.handlers[0] self.assertIsInstance(handler, RotatingFileHandler) self.assertEqual(log_file_path, handler.baseFilename) self.assertEqual(100, handler.maxBytes) self.assertEqual(10, handler.backupCount)
def test_logger_decorator(self): name = 'name' with patch.object(logging, 'getLogger', return_value=logging.RootLogger( logging.DEBUG)) as log1: with patch.object(logging.Logger, 'info', return_value=None) as log2: @logger(name) def fun(): return 3.1415 fun() log1.assert_called_once_with(name) log2.assert_called_once_with('3.14%')
def test_basic_output(): stream = io.StringIO() formatter = logging.Formatter( "%(asctime)s %(levelname)-8s %(processName)-5s %(threadName)-5s %(name)-12s %(message)s" ) stream_handler = logging.StreamHandler(stream) stream_handler.setFormatter(formatter) root_logger = logging.RootLogger(logging.DEBUG) root_logger.addHandler(stream_handler) manager = logging.Manager(root_logger) logger = manager.getLogger("the-logger-1") logger.info("I am the log message n=%d m=%s", 123, "qwerty") assert "INFO MainProcess MainThread the-logger-1 I am the log message n=123 m=qwerty\n" in stream.getvalue( )
def resetLogging(): """ Reset the handlers and loggers so that we can rerun the tests starting from a blank slate. """ __pragma__("skip") logging._handlerList = [] import weakref logging._handlers = weakref.WeakValueDictionary() logging.root = logging.RootLogger(logging.WARNING) logging.Logger.root = logging.root logging.Logger.manager = logging.Manager(logging.root) logging.root.manager = logging.Logger.manager __pragma__("noskip") if __envir__.executor_name == __envir__.transpiler_name: logging._resetLogging()
def get_root_logger(): # type: () -> Logger """ :return: """ if Logger._root_logger is None: logging.setLoggerClass(LazyLogger) root_logger = logging.RootLogger(Logger.DEBUG) root_logger = logging.root root_logger.addHandler(MyHandler()) logging_level = CriticalSettings.get_logging_level() xbmc.log('get_root_logger logging_level: ' + str(logging_level), xbmc.LOGDEBUG) root_logger.setLevel(logging_level) Trace.enable_all() root_logger.addFilter(Trace()) Logger._root_logger = root_logger return Logger._root_logger
def test_log_config_is_loaded___logger_is_updated(self, level): with patch('logging.root', logging.RootLogger(logging.NOTSET)): read_log_config({ 'LOG_FILE': '/tmp/log_file.txt', 'LOG_LEVEL': level, 'LOG_MAX_SIZE_IN_BYTES': 100, 'LOG_BACKUP_COUNT': 10, }) logger = logging.getLogger() self.assertEqual(level, logger.level) self.assertEqual(1, len(logger.handlers)) handler = logger.handlers[0] self.assertIsInstance(handler, RotatingFileHandler) self.assertEqual('/tmp/log_file.txt', handler.baseFilename) self.assertEqual(100, handler.maxBytes) self.assertEqual(10, handler.backupCount)
def test_my_json_output(): stream = io.StringIO() formatter = MyJsonFormatter() stream_handler = logging.StreamHandler(stream) stream_handler.setFormatter(formatter) root_logger = logging.RootLogger(logging.DEBUG) root_logger.addHandler(stream_handler) manager = logging.Manager(root_logger) logger = manager.getLogger("the-logger-1") logger.info("I am the log message n=%d m=%s", 123, "qwerty", extra={'someExtra': 'hi there!'}) obj = json.loads(stream.getvalue()) assert obj['timestamp'] != '' assert obj['name'] == 'the-logger-1' assert obj['level'] == 'INFO' assert obj['message'] == 'I am the log message n=123 m=qwerty' assert obj['someExtra'] == 'hi there!'
def test_json_logging(decider_mock, tmp_path, coloredlogs_module, capsys): """Ensure JSON logging configuration is set up correctly.""" # Setup environment root_logger = lg.RootLogger("WARNING") root_logger_patch = mock.patch.object(lg, "root", root_logger) # Run function parser = seddy_main.build_parser() args = parser.parse_args( ["-J", "decider", str(tmp_path / "workflows.json"), "spam", "eggs"]) with root_logger_patch: seddy_main.run_app(args) # Check logging configuration root_logger.warning("spam %s", "eggs") assert json.loads(capsys.readouterr().err) == { "levelname": "WARNING", "name": "root", "timestamp": mock.ANY, "message": "spam eggs", }
def init_logger(): #logger=logging.getLogger(PROG) logger = logging.RootLogger(logging.DEBUG) console_handler = logging.StreamHandler() console_handler.setLevel(logging.INFO) if settings["CONF_DRY_RUN"] or settings["CONF_VERBOSE_RUN"]: console_handler.setLevel(logging.INFO) if settings["CONF_DEBUG_RUN"]: console_handler.setLevel(logging.DEBUG) console_handler.setFormatter(CustomFormatter("%(message)s")) logger.addHandler(console_handler) file_handler = logging.FileHandler(settings["CONF_LOG_PATH"]) file_handler.setLevel(logging.INFO) file_handler.setFormatter( logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")) logger.addHandler(file_handler) if settings["MAIL_ENABLE_REPORT"]: mail_handler = logging.FileHandler(mailTmpFile) mail_handler.setLevel(logging.INFO) mail_handler.setFormatter( logging.Formatter("%(levelname)s\t\t%(message)s")) logger.addHandler(mail_handler) return logger
def test_ensure_basic_logging_output(): with mock.patch('logging.root', new=logging.RootLogger(level=logging.INFO)) as root: # Root handlers must be empty for basicConfig to do anything assert logging.root.handlers == [] buffer = StringIO() fmt = "%(levelname)s:%(name)s:%(message)s" @epsel.ensure_basic_logging(level=logging.INFO, format=fmt, stream=buffer) def process(x): root.info("Got {x!r}".format(x=x)) return x * 2 assert buffer.getvalue() == "" process(1) assert buffer.getvalue() == "INFO:root:Got 1\n" process(2) assert buffer.getvalue() == "INFO:root:Got 1\nINFO:root:Got 2\n" process(3) assert buffer.getvalue( ) == "INFO:root:Got 1\nINFO:root:Got 2\nINFO:root:Got 3\n"
def _config(props, category=None): logging.shutdown() logging.root = logging.RootLogger(logging.WARNING) logging.Logger.root = logging.root logging.Logger.manager = logging.Manager(logging.Logger.root) try: repoWideThresh = props["log4j.threshold"].strip() logging.getLogger().setLevel(_LEVEL_TRANS[repoWideThresh.strip().upper()]) except KeyError: logging.getLogger().setLevel(logging.NOTSET) # First load up all loggers and set their levels loggers = {} rootLoggerCfg = props["log4j.rootLogger"].split(",") if rootLoggerCfg[0].strip().upper() in _LEVEL_TRANS.keys(): logging.getLogger().setLevel(_LEVEL_TRANS[rootLoggerCfg[0].strip().upper()]) del rootLoggerCfg[0] else: logging.getLogger().setLevel(logging.NOTSET) loggers[logging.getLogger()] = [x.strip() for x in rootLoggerCfg] configuredLoggers = filter(lambda x: x.startswith("log4j.logger."), props.keys()) for logger in configuredLoggers: loggerCfg = props[logger].split(",") pyname = logger[len("log4j.logger."):] if loggerCfg[0].strip() in _LEVEL_TRANS.keys(): logging.getLogger(pyname).setLevel(_LEVEL_TRANS[loggerCfg[0].strip().upper()]) else: logging.getLogger(pyname).setLevel(logging.NOTSET) loggers[logging.getLogger(pyname)] = [x.strip() for x in loggerCfg[1:]] # Process category tags for selected appending if (category != None): categories = filter(lambda x: x.startswith("log4j.category."), props.keys()) for cat in categories: pyname = cat[len("log4j.category."):] if (pyname == category): categoryCfg = props[cat].split(",") for i in range(1,len(categoryCfg)): layout = None appenderKey = "log4j.appender."+str(categoryCfg[i].strip()) appenderClass = props[appenderKey] klass = _import_handler(appenderClass) handler = klass() setattr(handler, "threshold", _LEVEL_TRANS[categoryCfg[0].strip()]) # Deal with appender options appenderOptions = filter(lambda x: x.startswith(appenderKey+"."), props.keys()) for appenderOption in appenderOptions: opt = appenderOption[len(appenderKey+"."):] value = props[appenderOption].strip() if opt.lower().endswith("layout"): layoutClass = value klass = _import_layout(layoutClass) layout = klass() layoutOptions = filter(lambda x: x.startswith(appenderKey+".layout."), props.keys()) for layoutOption in layoutOptions: opt = layoutOption[len(appenderKey+".layout."):] value = props[layoutOption].strip() setattr(layout, opt, value) elif opt.lower().endswith("filter"): pass elif opt.lower().endswith("errorhandler"): pass elif opt.lower().endswith("threshold"): setattr(handler, opt.lower(), _LEVEL_TRANS[value]) else: setattr(handler, opt, value) handler.activateOptions() logging.getLogger(str(pyname)).addHandler(handler) if layout: handler.setFormatter(layout) # check additive tags to avoid additive logging to the root loggers additivities = filter(lambda x: x.startswith("log4j.additivity."), props.keys()) for additive in additivities: pyname = additive[len("log4j.additivity."):] if (pyname == category): if ( (str(props[additive]).strip().upper()) == "FALSE" ): return # Now deal with root logging appenders for logger, appenders in loggers.items(): for appender in appenders: layout = None appenderKey = "log4j.appender."+str(appender) appenderClass = props[appenderKey] klass = _import_handler(appenderClass) handler = klass() # Deal with appender options appenderOptions = filter(lambda x: x.startswith(appenderKey+"."), props.keys()) for appenderOption in appenderOptions: opt = appenderOption[len(appenderKey+"."):] value = props[appenderOption].strip() if opt.lower().endswith("layout"): layoutClass = value klass = _import_layout(layoutClass) layout = klass() layoutOptions = filter(lambda x: x.startswith(appenderKey+".layout."), props.keys()) for layoutOption in layoutOptions: opt = layoutOption[len(appenderKey+".layout."):] value = props[layoutOption].strip() setattr(layout, opt, value) elif opt.lower().endswith("filter"): pass elif opt.lower().endswith("errorhandler"): pass elif opt.lower().endswith("threshold"): setattr(handler, opt.lower(), _LEVEL_TRANS[value]) else: setattr(handler, opt, value) handler.activateOptions() logger.addHandler(handler) if layout: handler.setFormatter(layout)
def _reset_logging(): logging.root = logging.RootLogger(logging.WARNING) logging.Logger.root = logging.root logging.Logger.manager = logging.Manager(logging.Logger.root)
class MockLogger(logging.Logger): root = logging.RootLogger(logging.WARNING) manager = logging.Manager(root)
def setUp(self): self._orig_root_logger = logging.root logging.root = logging.RootLogger(logging.WARNING)
def startupASLogger(addrOfStarter, logEndpoint, logDefs, transportClass, aggregatorAddress): # Dirty trick here to completely re-initialize logging in this # process... something the standard Python logging interface does # not allow via the API. We also do not want to run # logging.shutdown() because (a) that does not do enough to reset, # and (b) it shuts down handlers, but we want to leave the # parent's handlers alone. Dirty trick here to completely # re-initialize logging in this process... something the standard # Python logging interface does not allow via the API. logging.root = logging.RootLogger(logging.WARNING) logging.Logger.root = logging.root logging.Logger.manager = logging.Manager(logging.Logger.root) if logDefs: dictConfig(logDefs) else: logging.basicConfig() # Disable thesplog from within the logging process (by setting the # logfile size to zero) to try to avoid recursive logging loops. thesplog_control(logging.WARNING, False, 0) #logging.info('ActorSystem Logging Initialized') transport = transportClass(logEndpoint) setProcName('logger', transport.myAddress) transport.scheduleTransmit( None, TransmitIntent(addrOfStarter, LoggerConnected())) fdup = None last_exception_time = None exception_count = 0 while True: try: r = transport.run(None) if isinstance(r, Thespian__UpdateWork): transport.scheduleTransmit( TransmitIntent(transport.myAddress, r)) continue logrecord = r.message if isinstance(logrecord, LoggerExitRequest): logging.info('ActorSystem Logging Shutdown') return elif isinstance(logrecord, LoggerFileDup): fdup = getattr(logrecord, 'fname', None) elif isinstance(logrecord, LogAggregator): aggregatorAddress = logrecord.aggregatorAddress elif isinstance(logrecord, logging.LogRecord): logging.getLogger(logrecord.name).handle(logrecord) if fdup: with open(fdup, 'a') as ldf: ldf.write('%s\n' % str(logrecord)) if aggregatorAddress and \ logrecord.levelno >= logging.WARNING: transport.scheduleTransmit( None, TransmitIntent(aggregatorAddress, logrecord)) else: logging.warn('Unknown message rcvd by logger: %s' % str(logrecord)) except Exception as ex: thesplog('Thespian Logger aborting (#%d) with error %s', exception_count, ex, exc_info=True) if last_exception_time is None or \ last_exception_time.view().expired(): last_exception_time = ExpirationTimer(timedelta(seconds=1)) exception_count = 0 else: exception_count += 1 if exception_count >= MAX_LOGGING_EXCEPTIONS_PER_SECOND: thesplog( 'Too many Thespian Logger exceptions (#%d in %s); exiting!', exception_count, timedelta(seconds=1) - last_exception_time.view().remaining()) return
def parse(argv): '''Parse configuration from flags and/or configuration file.''' # load flags defined in other modules (is there a better way to do this?) import spartan.expr.operator.local import spartan.expr.operator.optimize import spartan.cluster import spartan.worker import spartan.util if FLAGS._parsed: return FLAGS._parsed = True if hasattr(appdirs, 'user_config_dir'): # user_config_dir new to v1.3.0 config_file = appdirs.user_config_dir('spartan') + '/spartan.ini' else: config_file = appdirs.user_data_dir('spartan') + '/spartan.ini' config_dir = os.path.dirname(config_file) if not os.path.exists(config_dir): try: os.makedirs(config_dir, mode=0755) except: print >> sys.stderr, 'Failed to create config directory.' if os.path.exists(config_file): print >> sys.stderr, 'Loading configuration from %s' % (config_file) # Prepend configuration options to the flags array so that they # are overridden by user flags. try: config = ConfigParser.ConfigParser() config.read(config_file) if config.has_section('flags'): for name, value in config.items('flags'): argv.insert(0, '--%s=%s' % (name, value)) except: print >> sys.stderr, 'Failed to parse config file: %s' % config_file sys.exit(1) parser = argparse.ArgumentParser(formatter_class=SortingHelpFormatter) for name, flag in FLAGS: parser.add_argument('--' + name, type=str, help=flag.help) if os.getenv('SPARTAN_OPTS'): argv += os.getenv('SPARTAN_OPTS').split(' ') parsed_flags, rest = parser.parse_known_args(argv) for name, flag in FLAGS: if getattr(parsed_flags, name) is not None: #print >>sys.stderr, 'Parsing: %s : %s' % (name, getattr(parsed_flags, name)) flag.parse(getattr(parsed_flags, name)) # reset loggers so that basic config works logging.root = logging.RootLogger(logging.WARNING) logging.basicConfig( format= '%(created)f %(hostname)s:%(pid)s %(filename)s:%(lineno)s [%(funcName)s] %(message)s', level=FLAGS.log_level, stream=sys.stderr) for f in rest: if f.startswith('-'): print >> sys.stderr, '>>> Unknown flag: %s (ignored)' % f if FLAGS.print_options: print 'Configuration status:' for name, flag in sorted(FLAGS): print ' >> ', name, '\t', flag.val return rest
import logging log = logging.RootLogger(logging.INFO) import redis from operator import itemgetter import time import pytz from datetime import datetime from operator import itemgetter from eventmanager import services import os os.environ['DJANGO_SETTINGS_MODULE'] = 'mydjango.settings' import django django.setup() import requests from graphqlendpoint.models import Agent ''' users = [] url = 'http://ot-ws:5000/api/ot/objects/' payload = { "objectclass": "Agent", "filter": "", "variables": []} req = requests.post(url, payload) print(requests) agents = Agent.objects.all() for item in req:
def reset(): loguru.logger.remove() loguru.logger.__init__(loguru._logger.Core(), None, 0, False, False, False, False, None, {}) logging.Logger.manager.loggerDict.clear() logging.root = logging.RootLogger(logging.WARNING)
the only thing in this module to care about is the base. ''' import logging, warnings, array, bisect class DuplicateSymbol(Warning): pass class UninitializedSymbol(Warning): pass logging.root = logging.RootLogger(logging.DEBUG) # symbol scopes class Scope(object): pass class LocalScope(Scope): '''A symbol that is local to a symboltable''' class GlobalScope(Scope): '''A symbol that is global and available to other symboltables'''
""" Copyright (c) 2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import logging import sys EXAMPLE_LOGGER_NAME = "example" root = logging.RootLogger(logging.WARNING) manager = logging.Manager(root) logger = manager.getLogger(EXAMPLE_LOGGER_NAME) logger.setLevel(logging.INFO) hdl = logging.StreamHandler(stream=sys.stdout) hdl.setFormatter(logging.Formatter("%(levelname)s:%(name)s:%(message)s")) hdl.setLevel(logging.INFO) logger.addHandler(hdl)
def reset_logging(): """Resets the logging module to its initial state so that we can re-register all kinds of logging logic for unit testing purposes.""" logging.root = logging.RootLogger(logging.WARNING) logging.Logger.root = logging.root logging.Logger.manager = logging.Manager(logging.Logger.root)