def severityLevel(self): for pluginXbrlMethod in pluginClassMethods("ModelTestcaseVariation.ExpectedSeverity"): severityLevelName = pluginXbrlMethod(self) if severityLevelName: # ignore plug in if not a plug-in-recognized test case return logging._checkLevel(severityLevelName) # default behavior without plugins # SEC error cases have <assert severity={err|wrn}>... if XmlUtil.descendant(self, None, "assert", attrName="severity", attrValue="wrn") is not None: return logging._checkLevel("WARNING") return logging._checkLevel("INCONSISTENCY")
def test_default_settings(): settings.clear_all() assert settings.DEBUG.get() is False assert settings.PROFILE.get() is False assert settings.QUIET.get() is False assert settings.LOGGING_LEVEL.get() == logging._checkLevel('INFO') with patch.dict(environ, {'DEBUG': 't'}): settings.clear_all() assert settings.LOGGING_LEVEL.get() == logging._checkLevel('DEBUG') settings.clear_all()
def get_logconfig(level=None): """ Get logging configuration from environment. Returns: level, stdout_loglevel, files >>> get_logconfig() (20, 20, {}) >>> get_logconfig(20) (20, 20, {}) >>> get_logconfig('20') (20, 20, {}) >>> get_logconfig('INFO') (20, 20, {}) >>> import os >>> os.environ['STDOUT_LOGLEVEL'] = 'INFO' >>> get_logconfig() (20, 20, {}) """ if level is None: level = os.environ.get('LOGLEVEL') if level is None: level = logging.INFO elif isinstance(level, (int, float)): level = int(level) elif level == str(level) and level.isdigit(): level = int(level) else: level = logging._checkLevel(level.upper()) files = {} for levelname in levelnames: if levelname == str(levelname): env_varname = levelname.upper() + '_LOGFILE' if env_varname in os.environ: files[levelname] = os.environ[env_varname] if 'STDOUT_LOGLEVEL' in os.environ: stdout_loglevel = logging._checkLevel( os.environ['STDOUT_LOGLEVEL'].upper() ) else: stdout_loglevel = level # color_stdout = 'COLOR_STDOUT' in os.environ return level, stdout_loglevel, files
def __init__(self, cntlr, options=None, filesource=None, entrypointfiles=None, sourceZipStream=None, responseZipStream=None, errorCaptureLevel=None): self.cntlr = cntlr self.options = options self.filesource = filesource self.entrypointfiles = entrypointfiles self.sourceZipStream = sourceZipStream self.responseZipStream = responseZipStream self.submissionType = None self.reports = [] self.renderedFiles = set() # filing-level rendered files self.reportZip = None if responseZipStream: self.setReportZipStreamMode('w') else: try: #zipOutputFile only present with EdgarRenderer plugin options if options and options.zipOutputFile: if not os.path.isabs(options.zipOutputFile): zipOutDir = os.path.dirname(filesource.basefile) zipOutFile = os.path.join(zipOutDir,options.zipOutputFile) else: zipOutFile = options.zipOutputFile self.reportZip = zipfile.ZipFile(zipOutFile, 'w', zipfile.ZIP_DEFLATED, True) except AttributeError: self.reportZip = None self.errorCaptureLevel = errorCaptureLevel or logging._checkLevel("INCONSISTENCY") self.errors = [] self.arelleUnitTests = {} # copied from each instance loaded
def __init__(self, logger, level): self.records = [] self.level = logging._checkLevel(level) if isinstance(logger, basestring): self.logger = logging.getLogger(logger) else: self.logger = logger
def _level(level): """ Converts the provided logging level value into the best representation of it, so that it may be used to update a logger's level of representation. This method takes into account the current interpreter version so that no problem occur. :type level: String/int :param level: The level value that is meant to be converted into the best representation possible. :rtype: int :return: The best representation of the level so that it may be used freely for the setting of logging levels under the current running interpreter. """ level_t = type(level) if level_t == int: return level if level == None: return level if level == "SILENT": return log.SILENT if hasattr(logging, "_checkLevel"): return logging._checkLevel(level) return logging.getLevelName(level)
def addToLog(self, message, messageCode="", messageArgs=None, file="", refs=None, level=logging.INFO): """Add a simple info message to the default logger :param message: Text of message to add to log. :type message: str : param messageArgs: optional dict of message format-string key-value pairs :type messageArgs: dict :param messageCode: Message code (e.g., a prefix:id of a standard error) :param messageCode: str :param file: File name (and optional line numbers) pertaining to message :type file: str """ if self.logger is not None: if messageArgs: args = (message, messageArgs) else: args = (message,) # pass no args if none provided if refs is None: refs = [] if isinstance(file, (tuple,list,set)): for _file in file: refs.append( {"href": _file} ) elif isinstance(file, _STR_BASE): refs.append( {"href": file} ) if isinstance(level, _STR_BASE): level = logging._checkLevel(level) self.logger.log(level, *args, extra={"messageCode":messageCode,"refs":refs}) else: try: print(message) except UnicodeEncodeError: # extra parentheses in print to allow for 3-to-2 conversion print((message .encode(sys.stdout.encoding, 'backslashreplace') .decode(sys.stdout.encoding, 'strict')))
def configure_handler(self, config): """Configure a handler from a dictionary.""" config_copy = dict(config) # for restoring in case of error formatter = config.pop('formatter', None) if formatter: try: formatter = self.config['formatters'][formatter] except Exception as e: raise ValueError('Unable to set formatter ' '%r: %s' % (formatter, e)) level = config.pop('level', None) filters = config.pop('filters', None) if '()' in config: c = config.pop('()') if not callable(c): c = self.resolve(c) factory = c else: cname = config.pop('class') klass = self.resolve(cname) #Special case for handler which refers to another handler if issubclass(klass, logging.handlers.MemoryHandler) and\ 'target' in config: try: th = self.config['handlers'][config['target']] if not isinstance(th, logging.Handler): config.update(config_copy) # restore for deferred cfg raise TypeError('target not configured yet') config['target'] = th except Exception as e: raise ValueError('Unable to set target handler ' '%r: %s' % (config['target'], e)) elif issubclass(klass, logging.handlers.SMTPHandler) and\ 'mailhost' in config: config['mailhost'] = self.as_tuple(config['mailhost']) elif issubclass(klass, logging.handlers.SysLogHandler) and\ 'address' in config: config['address'] = self.as_tuple(config['address']) factory = klass kwargs = dict([(k, config[k]) for k in config if valid_ident(k)]) try: result = factory(**kwargs) except TypeError as te: if "'stream'" not in str(te): raise #The argument name changed from strm to stream #Retry with old name. #This is so that code can be used with older Python versions #(e.g. by Django) kwargs['strm'] = kwargs.pop('stream') result = factory(**kwargs) if formatter: result.setFormatter(formatter) if level is not None: result.setLevel(logging._checkLevel(level)) if filters: self.add_filters(result, filters) return result
def configure_handler(self, config): """Configure a handler from a dictionary.""" config_copy = dict(config) # for restoring in case of error formatter = config.pop("formatter", None) if formatter: try: formatter = self.config["formatters"][formatter] except Exception as e: raise ValueError("Unable to set formatter " "%r: %s" % (formatter, e)) level = config.pop("level", None) filters = config.pop("filters", None) if "()" in config: c = config.pop("()") if not callable(c): c = self.resolve(c) factory = c else: cname = config.pop("class") klass = self.resolve(cname) # Special case for handler which refers to another handler if issubclass(klass, logging.handlers.MemoryHandler) and "target" in config: try: th = self.config["handlers"][config["target"]] if not isinstance(th, logging.Handler): config.update(config_copy) # restore for deferred cfg raise TypeError("target not configured yet") config["target"] = th except Exception as e: raise ValueError("Unable to set target handler " "%r: %s" % (config["target"], e)) elif issubclass(klass, logging.handlers.SMTPHandler) and "mailhost" in config: config["mailhost"] = self.as_tuple(config["mailhost"]) elif issubclass(klass, logging.handlers.SysLogHandler) and "address" in config: config["address"] = self.as_tuple(config["address"]) factory = klass props = config.pop(".", None) kwargs = dict([(k, config[k]) for k in config if valid_ident(k)]) try: result = factory(**kwargs) except TypeError as te: if "'stream'" not in str(te): raise # The argument name changed from strm to stream # Retry with old name. # This is so that code can be used with older Python versions # (e.g. by Django) kwargs["strm"] = kwargs.pop("stream") result = factory(**kwargs) if formatter: result.setFormatter(formatter) if level is not None: result.setLevel(logging._checkLevel(level)) if filters: self.add_filters(result, filters) if props: for name, value in props.items(): setattr(result, name, value) return result
def log_pretty(self, level, prefixtext, data, *args, **kwargs): try: import json str = json.dumps(data, indent=8, encoding="utf-8") except: str = pprint.pformat(data, indent=1, depth=4, width=20) if prefixtext: prefixtext = prefixtext + "\n" self.log(logging._checkLevel(level), prefixtext + " " + str, *args, **kwargs)
def initialize(): # 加载配置文件 settings.load(os.path.join(HOME_DIR, ARGS.settings)) loglevel = logging._checkLevel(ARGS.loglevel) # 日志配置 log_format = '[%(asctime)-15s %(levelname)s:%(name)s:%(module)s] %(message)s' logging.basicConfig(level=loglevel, format=log_format)
def configure_handler(self, config): """Configure a handler from a dictionary.""" config_copy = dict(config) # for restoring in case of error formatter = self.convert(config.pop('formatter', None)) if formatter: try: formatter = self.config['formatters'][formatter] except Exception as e: raise ValueError('Unable to set formatter {}'.format(str(formatter))) from e level = self.convert(config.pop('level', None)) filters = self.convert(config.pop('filters', None)) if '()' in config.keys(): c = self.convert(config.pop('()')) if not callable(c): c = self.resolve(c) factory = c else: cname = self.convert(config.pop('class')) klass = self.resolve(cname) # @note - issubclass does not seem to be implemented yet # which makes it difficult to use this. #Special case for handler which refers to another handler # if issubclass(klass, logging.handlers.MemoryHandler) and ('target' in config): # try: # th = self.config['handlers'][config['target']] # if not isinstance(th, logging.Handler): # config.update(config_copy) # restore for deferred cfg # raise TypeError('target not configured yet') # config['target'] = th # except Exception as e: # raise ValueError('Unable to set target handler {}'.format(config['target'])) from e factory = klass props = self.convert(config.pop('.', None)) data = [(k, self.convert(config[k])) for k in config.keys() if valid_ident(k)] kwargs = dict(data) try: result = factory(**kwargs) except TypeError as te: if "'stream'" not in str(te): raise te #The argument name changed from strm to stream #Retry with old name. #This is so that code can be used with older Python versions #(e.g. by Django) kwargs['strm'] = kwargs.pop('stream') result = factory(**kwargs) if formatter: result.setFormatter(formatter) if level is not None: result.setLevel(logging._checkLevel(level)) if filters: self.add_filters(result, filters) if props: for name, value in props.items(): setattr(result, name, value) return result
def _level_check(value): try: value = int(value) except ValueError: pass try: value = logging._checkLevel(value) except: raise validate.VdtTypeError(value) return value
def configure_handler(self, config): """Configure a handler from a dictionary.""" formatter = config.pop('formatter', None) if formatter: try: formatter = self.config['formatters'][formatter] except StandardError as e: raise ValueError('Unable to set formatter ' '%r: %s' % (formatter, e)) level = config.pop('level', None) filters = config.pop('filters', None) if '()' in config: c = config.pop('()') if not hasattr(c, '__call__') and \ hasattr(types, 'ClassType') and \ isinstance(c, types.ClassType): c = self.resolve(c) factory = c else: klass = self.resolve(config.pop('class')) # Special case for handler which refers to another handler if issubclass(klass, logging.handlers.MemoryHandler) and\ 'target' in config: try: config['target'] = \ self.config['handlers'][config['target']] except StandardError as e: raise ValueError('Unable to set target handler ' '%r: %s' % (config['target'], e)) elif issubclass(klass, logging.handlers.SMTPHandler) and\ 'mailhost' in config: config['mailhost'] = self.as_tuple(config['mailhost']) elif issubclass(klass, logging.handlers.SysLogHandler) and\ 'address' in config: config['address'] = self.as_tuple(config['address']) factory = klass kwargs = dict((k, config[k]) for k in config if valid_ident(k)) try: result = factory(**kwargs) except TypeError as te: if "'stream'" not in str(te): raise # The argument name changed from strm to stream # Retry with old name. # This is so that code can be used with older Python versions # (e.g. by Django) kwargs['strm'] = kwargs.pop('stream') result = factory(**kwargs) if formatter: result.setFormatter(formatter) if level is not None: result.setLevel(_checkLevel(level)) if filters: self.add_filters(result, filters) return result
def __init__(self, logger_name="", level=logging.DEBUG): self.logger_name = logger_name if isinstance(level, basestring): if is_py2: self.level = logging._checkLevel(level.upper()) else: self.level = logging._nameToLevel[level.upper()] else: self.level = level
def __init__(self, level): """Setup the object with a logger and a loglevel and start the thread """ super(LogPipe, self).__init__(name='LogPipe') self.daemon = False self.level = logging._checkLevel(level) self.fdRead, self.fdWrite = os.pipe() self.pipeReader = os.fdopen(self.fdRead) self._finished = threading.Event() self.start()
def get_level(level): try: return int(level) except TypeError: return logging.NOTSET except ValueError: lv = str(level).upper() try: return logging._checkLevel(lv) except ValueError: return logging.NOTSET
def init_logging(config: LogConfigDict, workflow_id: str, banner=True): global _LOGGING_CONFIGURED if _LOGGING_CONFIGURED: import warnings warnings.warn(UserWarning("bigflow.log is already configured - skip")) return _LOGGING_CONFIGURED = True gcp_project_id = config['gcp_project_id'] log_name = config.get('log_name', workflow_id) log_level = config.get('log_level', 'INFO') run_uuid = str(uuid.uuid4()) labels = { 'workflow_id': workflow_id, 'run_uuid': run_uuid, } root = logging.getLogger() if not root.handlers: # logs are not configured yet - print to stderr logging.basicConfig(level=log_level) elif log_level: root.setLevel(min(root.level, logging._checkLevel(log_level))) full_log_name = f"projects/{gcp_project_id}/logs/{log_name}" infrastructure_logs = get_infrastructure_bigflow_project_logs(gcp_project_id) workflow_logs_link = prepare_gcp_logs_link( _generate_cl_log_view_query({'logName=': full_log_name, 'labels.workflow_id=': workflow_id})) this_execution_logs_link = prepare_gcp_logs_link( _generate_cl_log_view_query({'logName=': full_log_name, 'labels.run_uuid=': run_uuid})) if banner: logger.info(dedent(f""" *************************LOGS LINK************************* Infrastructure logs:{infrastructure_logs} Workflow logs (all runs): {workflow_logs_link} Only this run logs: {this_execution_logs_link} ***********************************************************""")) gcp_logger_handler = create_gcp_log_handler(gcp_project_id, log_name, labels) gcp_logger_handler.setLevel(log_level or logging.INFO) # Disable logs from 'google.cloud.logging' gclogging_logger = logging.getLogger("google.cloud.logging") gclogging_logger.setLevel(logging.WARNING) gclogging_logger.propagate = False gclogging_logger.addHandler(logging.StreamHandler()) # TODO: add formatter? root.addHandler(gcp_logger_handler) sys.excepthook = _uncaught_exception_handler(logging.getLogger('uncaught_exception'))
def set_logging(logging_level=logging.INFO): format = "%(asctime)s (%(threadName)-9s) " \ "(%(funcName)-8s) [%(levelname)s]: %(message)s" # log_lvl = logging.INFO # logging.INFO / logging.DEBUG / logging.WARNING try: log_lvl = logging._checkLevel(logging_level) except Exception: log_lvl = logging.INFO logging.basicConfig(format=format, level=log_lvl, datefmt="%d/%m/%Y %H:%M:%S")
def configure_handler(self, config): """Configure a handler from a dictionary.""" formatter = config.pop('formatter', None) if formatter: try: formatter = self.config['formatters'][formatter] except Exception as e: raise ValueError('Unable to set formatter ' '%r: %s' % (formatter, e)) level = config.pop('level', None) filters = config.pop('filters', None) if '()' in config: c = config.pop('()') if not callable(c): c = self.resolve(c) factory = c else: klass = self.resolve(config.pop('class')) #Special case for handler which refers to another handler if issubclass(klass, logging.handlers.MemoryHandler) and\ 'target' in config: try: config['target'] = self.config['handlers'][ config['target']] except Exception as e: raise ValueError('Unable to set target handler ' '%r: %s' % (config['target'], e)) elif issubclass(klass, logging.handlers.SMTPHandler) and\ 'mailhost' in config: config['mailhost'] = self.as_tuple(config['mailhost']) elif issubclass(klass, logging.handlers.SysLogHandler) and\ 'address' in config: config['address'] = self.as_tuple(config['address']) factory = klass kwargs = dict([(k, config[k]) for k in config if valid_ident(k)]) try: result = factory(**kwargs) except TypeError as te: if "'stream'" not in str(te): raise #The argument name changed from strm to stream #Retry with old name. #This is so that code can be used with older Python versions #(e.g. by Django) kwargs['strm'] = kwargs.pop('stream') result = factory(**kwargs) if formatter: result.setFormatter(formatter) if level is not None: result.setLevel(logging._checkLevel(level)) if filters: self.add_filters(result, filters) return result
def set_level(self, level): try: logging.root.setLevel(level) for key,value in logger.manager.loggerDict.items(): try: level = logging._checkLevel(level) value.level = level except Exception as e: print key, "Error %s" % e pass except Exception as e: logger.error("%(exception)s", {'exception':e})
def __init__(self, level=logging.NOTSET): """ Initializes the instance - basically setting the formatter to None and the filter list to empty. """ logging.Filterer.__init__(self) self._name = None self.level = logging._checkLevel(level) self.formatter = None # Add the handler to the global _handlerList (for cleanup on shutdown) _addMultiProcHandlerRef(self) self.createLock()
def configured_logger(name, config=None, level=None, handlers=None): '''Configured logger. ''' with process_global('lock'): logconfig = original = process_global('_config_logging') # if the logger was not configured, do so. if not logconfig: logconfig = deepcopy(LOGGING_CONFIG) if config: update_config(logconfig, config) original = logconfig process_global('_config_logging', logconfig, True) else: loggers = logconfig.get('loggers') if loggers and name in loggers: return logging.getLogger(name) logconfig = deepcopy(logconfig) logconfig['disable_existing_loggers'] = False logconfig.pop('loggers', None) logconfig.pop('root', None) if level is None: level = logging.NOTSET else: try: level = int(level) except ValueError: lv = str(level).upper() try: level = logging._checkLevel(lv) except ValueError: level = logging.NOTSET # No loggers configured. This means no logconfig setting # parameter was used. Set up the root logger with default # loggers if level == logging.NOTSET: handlers = ['silent'] else: handlers = handlers or ['console'] level = logging.getLevelName(level) if 'loggers' not in logconfig: logconfig['loggers'] = {} l = {'level': level, 'handlers': handlers, 'propagate': False} original['loggers'][name] = l logconfig['loggers'][name] = l # if not original.get('root'): logconfig['root'] = {'handlers': handlers, 'level': level} if logconfig: dictConfig(logconfig) return logging.getLogger(name)
def get_logger(self, name): if name in self.loggers: return logging.getLogger(name) dict_config = self.config.logging.to_dict() ns_hierarchy = name.split(".") config_name = name for i in range(len(name) - 1, -1, -1): config_name = ".".join(ns_hierarchy[:i]) if config_name in dict_config['loggers']: break assert config_name in dict_config[ 'loggers'], 'Undefined logger: %s' % name self.loggers.add(name) logger_conf = dict_config['loggers'][config_name] logger = logging.getLogger(name) logger.propagate = logger_conf['propagate'] # noinspection PyProtectedMember logger.setLevel(logging._checkLevel(logger_conf['level'])) dict_configurator = logging.config.DictConfigurator(dict_config) formatters = dict_configurator.config.get('formatters', {}) for fname in formatters: try: formatters[fname] = dict_configurator.configure_formatter( formatters[fname]) except Exception as e: raise ValueError('Unable to configure formatter %r: %s' % (fname, e)) for handler_name in logger_conf['handlers']: if handler_name not in self.handlers: # important to use configuration passed to dict_configurator instead of dict_config # because it has been processed to change file handler = dict_configurator.configure_handler( dict_configurator.config['handlers'][handler_name]) self.handlers[handler_name] = handler handler = self.handlers[handler_name] logger.addHandler(handler) if 'filters' in logger_conf: raise NotImplementedError('Not support filters') return logger
def __init__(self, name, app, logtype): logging_cfg = app.config["LOGGING"] logger = logging.getLogger(name) logger.setLevel(logging._checkLevel(logging_cfg[logtype]["level"])) httpsHandler = GelfHTTPHandler(host=app.config["GRAYLOG_HOST"], port=app.config["GRAYLOG_PORT"], path=app.config["GRAYLOG_PATH"], localname=app.config["GRAYLOG_SOURCE"]) logger.addHandler(httpsHandler) self.logger = logger self.extra = {**logging_cfg["extra"], **logging_cfg[logtype]["extra"]}
def create_logger(stream=None): level = logging.INFO if config.log_level: level = logging._checkLevel(config.log_level.upper()) handler = logging.StreamHandler(stream or stdout) handler.setFormatter(logging.Formatter('[%(name)s] %(asctime)s %(message)s')) handler.setLevel(level) logger = logging.getLogger('mlrun') if not len(logger.handlers): logger.addHandler(handler) logger.setLevel(level) logger.propagate = False return logger
def test_set_valid_logger_level(self): logging_conf = [('subscription_manager.managercli', "ERROR"), ('rhsm', "WARNING"), ('rhsm-app', "CRITICAL"), ('rhsm-app.rhsmd', "DEBUG")] for logger_name, log_level in logging_conf: self.rhsm_config.set('logging', logger_name, log_level) logutil.init_logger() for logger_name, log_level in logging_conf: real_log_level = logging.getLogger(logger_name).getEffectiveLevel() self.assertEqual(real_log_level, logging._checkLevel(log_level))
def log(level, msg, *args, **kwargs): """Log message at the given level. Parameters ---------- level : Union[int,str] The logging level value. msg: str The message. """ level = _logging._checkLevel(level) get_logger().log(level, _detailed_msg(msg), *args, **kwargs)
def _callback(self, _binding, qualname, args, _kwargs): _, name = qualname.rsplit('.', 1) if name == 'log': level, args = _checkLevel(args[0]), args[1:] elif name == 'exception': level = logging.ERROR else: level = _checkLevel(name.upper()) if len(args) > 1: message, args = args[0], args[1:] else: message, args = args[0], () if level >= self._level: self._calls.append(( message % args if args else message, message, args, getLevelName(level) ))
def is_valid_log_verbosity(verbosity): """ >>> is_valid_log_verbosity('WARNING') True >>> is_valid_log_verbosity('INFO') True >>> is_valid_log_verbosity('DEBUG') True >>> is_valid_log_verbosity('SOMETHINGELSE') False :type verbosity: str|int :rtype: bool """ try: logging._checkLevel(verbosity) return True except (ValueError, TypeError): return False
def setup_logging(log_level, log_destination): log_level = log_level.upper() try: log_level = LOG_LEVELS[log_level] except KeyError: raise RuntimeError('Invalid logging level {!r}'.format(log_level)) if log_level == 'SILENT': logger = logging.getLogger() logger.disabled = True logger.setLevel(logging.CRITICAL) return if log_destination == 'syslog': fmt = logging.Formatter( '{processName}[{process}]: {name}: {message}', style='{') handler = logging.handlers.SysLogHandler( '/dev/log', facility=logging.handlers.SysLogHandler.LOG_DAEMON) handler.setFormatter(fmt) elif log_destination == 'stderr': handler = EdgeDBLogHandler() else: fmt = logging.Formatter( '{levelname} {process} {asctime} {name}: {message}', style='{') handler = logging.FileHandler(log_destination) handler.setFormatter(fmt) log_level = logging._checkLevel(log_level) logger = logging.getLogger() logger.setLevel(log_level) logger.addHandler(handler) # Channel warnings into logging system logging.captureWarnings(True) # Show DeprecationWarnings by default ... warnings.simplefilter('default', category=DeprecationWarning) # ... except for some third-party` modules. for ignored_module in IGNORE_DEPRECATIONS_IN: warnings.filterwarnings('ignore', category=DeprecationWarning, module=ignored_module) if not debug.flags.log_metrics: log_metrics = logging.getLogger('edb.server.metrics') log_metrics.setLevel(logging.ERROR)
def main(print_func=None): parser = argparse.ArgumentParser(description='Start the visdom server.') parser.add_argument('-port', metavar='port', type=int, default=DEFAULT_PORT, help='port to run the server on.') parser.add_argument('-env_path', metavar='env_path', type=str, default=DEFAULT_ENV_PATH, help='path to serialized session to reload.') parser.add_argument('-logging_level', metavar='logger_level', default='INFO', help='logging level (default = INFO). Can take logging ' 'level name or int (example: 20)') parser.add_argument('-readonly', help='start in readonly mode', action='store_true') parser.add_argument('-enable_login', default=False, action='store_true', help='start the server with authentication') parser.add_argument('-force_new_cookie', default=False, action='store_true', help='start the server with the new cookie, ' 'available when -enable_login provided') FLAGS = parser.parse_args() try: logging_level = int(FLAGS.logging_level) except (ValueError,): try: logging_level = logging._checkLevel(FLAGS.logging_level) except ValueError: raise KeyError( "Invalid logging level : {0}".format(FLAGS.logging_level) ) logging.getLogger().setLevel(logging_level) if FLAGS.enable_login: username = input("Please input your username: "******"Please input your password: "******"username": username, "password": hash_password(hash_password(password)) } if not os.path.isfile(DEFAULT_ENV_PATH + "COOKIE_SECRET"): set_cookie() elif FLAGS.force_new_cookie: set_cookie() else: user_credential = None start_server(port=FLAGS.port, env_path=FLAGS.env_path, readonly=FLAGS.readonly, print_func=print_func, user_credential=user_credential)
def configure_handler(self, config): """Configure a handler from a dictionary.""" formatter = config.pop("formatter", None) if formatter: try: formatter = self.config["formatters"][formatter] except StandardError as e: raise ValueError("Unable to set formatter %r: %s" % (formatter, e)) level = config.pop("level", None) filters = config.pop("filters", None) if "()" in config: c = config.pop("()") if not hasattr(c, "__call__") and hasattr(types, "ClassType") and type(c) != types.ClassType: c = self.resolve(c) factory = c else: cname = config.pop("class") klass = self.resolve(cname) if issubclass(klass, logging.handlers.MemoryHandler) and "target" in config: try: th = self.config["handlers"][config["target"]] if not isinstance(th, logging.Handler): config["class"] = cname raise StandardError("target not configured yet") config["target"] = th except StandardError as e: raise ValueError("Unable to set target handler %r: %s" % (config["target"], e)) elif issubclass(klass, logging.handlers.SMTPHandler) and "mailhost" in config: config["mailhost"] = self.as_tuple(config["mailhost"]) elif issubclass(klass, logging.handlers.SysLogHandler) and "address" in config: config["address"] = self.as_tuple(config["address"]) factory = klass kwargs = dict([(k, config[k]) for k in config if valid_ident(k)]) try: result = factory(**kwargs) except TypeError as te: if "'stream'" not in str(te): raise kwargs["strm"] = kwargs.pop("stream") result = factory(**kwargs) if formatter: result.setFormatter(formatter) if level is not None: result.setLevel(logging._checkLevel(level)) if filters: self.add_filters(result, filters) return result
def common_logger_config(self, logger, config, incremental = False): level = config.get('level', None) if level is not None: logger.setLevel(logging._checkLevel(level)) if not incremental: for h in logger.handlers[:]: logger.removeHandler(h) handlers = config.get('handlers', None) if handlers: self.add_handlers(logger, handlers) filters = config.get('filters', None) if filters: self.add_filters(logger, filters)
def common_logger_config(logger_config, logger, incremental=False): """ Perform configuration which is common to root and non-root loggers. """ level = logger_config.get('level', None) if level is not None: logger.setLevel(logging._checkLevel(level)) if not incremental: # Remove any existing handlers for h in logger.handlers[:]: logger.removeHandler(h) handlers = logger_config.get('handlers', None) if handlers: add_handlers(logger, handlers)
def configure_handler(self, config): """Configure a handler from a dictionary.""" formatter = config.pop('formatter', None) if formatter: try: formatter = self.config['formatters'][formatter] except StandardError as e: raise ValueError('Unable to set formatter %r: %s' % (formatter, e)) level = config.pop('level', None) filters = config.pop('filters', None) if '()' in config: c = config.pop('()') if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType: c = self.resolve(c) factory = c else: cname = config.pop('class') klass = self.resolve(cname) if issubclass(klass, logging.handlers.MemoryHandler) and 'target' in config: try: th = self.config['handlers'][config['target']] if not isinstance(th, logging.Handler): config['class'] = cname raise StandardError('target not configured yet') config['target'] = th except StandardError as e: raise ValueError('Unable to set target handler %r: %s' % (config['target'], e)) elif issubclass(klass, logging.handlers.SMTPHandler) and 'mailhost' in config: config['mailhost'] = self.as_tuple(config['mailhost']) elif issubclass(klass, logging.handlers.SysLogHandler) and 'address' in config: config['address'] = self.as_tuple(config['address']) factory = klass kwargs = dict([ (k, config[k]) for k in config if valid_ident(k) ]) try: result = factory(**kwargs) except TypeError as te: if "'stream'" not in str(te): raise kwargs['strm'] = kwargs.pop('stream') result = factory(**kwargs) if formatter: result.setFormatter(formatter) if level is not None: result.setLevel(logging._checkLevel(level)) if filters: self.add_filters(result, filters) return result
def check_log_level(log_level_name, default_value): # type: (str, int) -> int """ Verifies a logging level string - returns the requested log level if the supplied log level name is valid, default_value otherwise. >>> check_log_level("warning", logging.ERROR) == logging.WARNING True >>> check_log_level("invalid_level", logging.ERROR) == logging.ERROR True """ try: return logging._checkLevel(log_level_name.upper()) except ValueError: return default_value
def saliencyCalc(trained_result_path, eval_mode=True, logger=None, loglevel=logging.WARN): if logger is None: rv = logging._checkLevel(loglevel) logger = getLogger(__name__) stream_handler = logging.StreamHandler() stream_handler.setLevel(rv) stream_handler.setFormatter( logging.Formatter("[%(levelname)s]:%(message)s")) logger.addHandler(stream_handler) logger.setLevel(rv) SaliencyCalc(trained_result_path, eval_mode, logger=logger)()
def value_to_python_log_level(config_val, evar): """ Convert an evar value into a Python logging level constant. :param str config_val: The env var value. :param EnvironmentVariable evar: The EVar object we are validating a value for. :return: A validated string. :raises: ValueError if the log level is invalid. """ if not config_val: config_val = evar.default_val config_val = config_val.upper() # noinspection PyProtectedMember return logging._checkLevel(config_val)
def write_log(self, log): log_data = self.manager.pop(log['pool'], log['uuid']) if not log_data: self.expired_logs_count += 1 self.scribe_log('Got expired log for pool {}'.format(log['pool'])) return log_data = json.loads(log_data) log_data['asctime'] = datetime.datetime.fromtimestamp( log_data['created']) # Форматируем оригинальное сообщение лога message = self.OUR_FORMATTER % log_data if 'exc_text' in log_data: message = u'{}\n{}'.format(message, log_data['exc_text']) logger = self.get_pool_logger(name=log['pool']) level = logging._checkLevel(log_data['levelname']) logger.log(level, message)
def test_set_valid_logger_level(self): logging_conf = [ ('subscription_manager.managercli', "ERROR"), ('rhsm', "WARNING"), ('rhsm-app', "CRITICAL"), ('rhsm-app.rhsmd', "DEBUG") ] for logger_name, log_level in logging_conf: self.rhsm_config.set('logging', logger_name, log_level) logutil.init_logger() for logger_name, log_level in logging_conf: real_log_level = logging.getLogger(logger_name).getEffectiveLevel() self.assertEqual(real_log_level, logging._checkLevel(log_level))
def __init__(self, name, level=logging.DEBUG): """ Initialize the logger with a name and an optional level. :param name: :param level: """ logging.Filterer.__init__(self) self.name = name self.level = logging._checkLevel(level) self.parent = None self.propagate = 1 self.handlers = [] self.disabled = 0 self._cache = {} colorama.init(autoreset=True)
def common_logger_config(self, logger, config, incremental=False): """ Perform configuration which is common to root and non-root loggers. """ level = config.get('level', None) if level is not None: logger.setLevel(logging._checkLevel(level)) if not incremental: for h in logger.handlers[:]: logger.removeHandler(h) handlers = config.get('handlers', None) if handlers: self.add_handlers(logger, handlers) filters = config.get('filters', None) if filters: self.add_filters(logger, filters)
def configure_handler(self, config): """Configure a handler from a dictionary.""" formatter = config.pop("formatter", None) if formatter: try: formatter = self.config["formatters"][formatter] except StandardError as e: raise ValueError("Unable to set formatter " "%r: %s" % (formatter, e)) level = config.pop("level", None) filters = config.pop("filters", None) if "()" in config: c = config.pop("()") if not hasattr(c, "__call__") and hasattr(types, "ClassType") and type(c) != types.ClassType: c = self.resolve(c) factory = c else: klass = self.resolve(config.pop("class")) # Special case for handler which refers to another handler if issubclass(klass, logging.handlers.MemoryHandler) and "target" in config: try: config["target"] = self.config["handlers"][config["target"]] except StandardError as e: raise ValueError("Unable to set target handler " "%r: %s" % (config["target"], e)) elif issubclass(klass, logging.handlers.SMTPHandler) and "mailhost" in config: config["mailhost"] = self.as_tuple(config["mailhost"]) elif issubclass(klass, logging.handlers.SysLogHandler) and "address" in config: config["address"] = self.as_tuple(config["address"]) factory = klass kwargs = dict((k, config[k]) for k in config if valid_ident(k)) try: result = factory(**kwargs) except TypeError as te: if "'stream'" not in str(te): raise # The argument name changed from strm to stream # Retry with old name. # This is so that code can be used with older Python versions # (e.g. by Django) kwargs["strm"] = kwargs.pop("stream") result = factory(**kwargs) if formatter: result.setFormatter(formatter) if level is not None: result.setLevel(_checkLevel(level)) if filters: self.add_filters(result, filters) return result
def info(message, also_console=True, newline=True, time_prefix=True): """ Log an informational message """ msg = _process_msg(message) logger.info(msg) try: level = _logging._checkLevel(BuiltIn().get_variable_value( "${LOG LEVEL}", _logging.INFO)) if time_prefix is True: msg = "{0} {1}".format(datetime.now().strftime("%H:%M:%S.%f")[:-3], msg) if level <= _logging.INFO and also_console is True: logger.console("[INFO] {0}".format(msg), newline) except RobotNotRunningError: # if we're not in a Robot context (i.e. unit test), set the level to debug level = _logging.INFO
def inspect(self, log_level=None): """ If log_level is not None, log combined inspection report at this level. """ citekeys = self.unique_citekeys_by("dealiased_id") reports = [] for citekey in citekeys: report = citekey.inspect() if not report: continue reports.append(f"{citekey.dealiased_id} -- {report}") report = "\n".join(reports) if reports and log_level is not None: log_level = logging._checkLevel(log_level) msg = f"Inspection of dealiased citekeys revealed potential problems:\n{report}" logging.log(log_level, msg) return report
def common_logger_config(self, logger, config, incremental=False): """ Perform configuration which is common to root and non-root loggers. """ level = config.get("level", None) if level is not None: logger.setLevel(_checkLevel(level)) if not incremental: # Remove any existing handlers for h in logger.handlers[:]: logger.removeHandler(h) handlers = config.get("handlers", None) if handlers: self.add_handlers(logger, handlers) filters = config.get("filters", None) if filters: self.add_filters(logger, filters)
def configureLogging(logPath, logfile, loglevel=logging.INFO): try: loglevel = logging._checkLevel(loglevel) except (ValueError, TypeError): loglevel = logging.INFO logToFile = True if logfile else False formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)-8s - %(message)s') formatter1 = logging.Formatter( '%(asctime)s - %(levelname)-8s - %(message)s', datefmt='%Y-%m-%d %H:%M') # prepare the logger logging.setLoggerClass(MyLogger) logger = logging.getLogger(__name__) logger.setLevel(logging.TRACE) # prepare stream handler strmhandler = logging.StreamHandler() strmhandler.setLevel(loglevel) strmhandler.setFormatter(formatter1) if logToFile: # create the log file path if needed if not os.path.exists(logPath): os.makedirs(logPath) logfile = os.path.join(logPath, logfile) rfhandler = logging.handlers.RotatingFileHandler( logfile, 'a', 1000000, 5) # 5 x 1M files rfhandler.setLevel(loglevel) rfhandler.setFormatter(formatter) strmhandler.setLevel( logging.INFO ) # if we log to a file limit the console traces to INFO level logger.addHandler(rfhandler) logger.addHandler(strmhandler) logger.propagate = False # prevent propagation to default (console) logger return logger
def addToLog(self, message, messageCode="", messageArgs=None, file="", refs=None, level=logging.INFO): """Add a simple info message to the default logger :param message: Text of message to add to log. :type message: str : param messageArgs: optional dict of message format-string key-value pairs :type messageArgs: dict :param messageCode: Message code (e.g., a prefix:id of a standard error) :param messageCode: str :param file: File name (and optional line numbers) pertaining to message :type file: str """ if self.logger is not None: if messageArgs: args = (message, messageArgs) else: args = (message, ) # pass no args if none provided if refs is None: refs = [] if isinstance(file, (tuple, list, set)): for _file in file: refs.append({"href": _file}) elif isinstance(file, _STR_BASE): refs.append({"href": file}) if isinstance(level, _STR_BASE): level = logging._checkLevel(level) self.logger.log(level, *args, extra={ "messageCode": messageCode, "refs": refs }) else: try: print(message % (messageArgs or {})) except UnicodeEncodeError: # extra parentheses in print to allow for 3-to-2 conversion print((message.encode(sys.stdout.encoding, 'backslashreplace').decode( sys.stdout.encoding, 'strict')))
def __init__(self, name, level=logging.NOTSET): """ Initialize the logger with a name and an optional level. """ logging.Filterer.__init__(self) self.name = name self.level = logging._checkLevel(level) self.parent = None self.propagate = True self.handlers = [] self.disabled = False self.enable = _Handles(self) # registration to Manage if self.name not in self.manager.loggerDict.keys(): self.manager.loggerDict[self.name] = self
def configure(self): """Do the configuration.""" config = self.config if 'version' not in config: raise ValueError("dictionary doesn't specify a version") if config['version'] != 1: raise ValueError("Unsupported version: %s" % config['version']) incremental = config.pop('incremental', False) EMPTY_DICT = {} logging._acquireLock() try: if incremental: handlers = config.get('handlers', EMPTY_DICT) # incremental handler config only if handler name # ties in to logging._handlers (Python 2.7) if sys.version_info[:2] == (2, 7): for name in handlers: if name not in logging._handlers: raise ValueError('No handler found with ' 'name %r' % name) else: try: handler = logging._handlers[name] handler_config = handlers[name] level = handler_config.get('level', None) if level: handler.setLevel(_checkLevel(level)) except StandardError, e: raise ValueError('Unable to configure handler ' '%r: %s' % (name, e)) loggers = config.get('loggers', EMPTY_DICT) for name in loggers: try: self.configure_logger(name, loggers[name], True) except StandardError, e: raise ValueError('Unable to configure logger ' '%r: %s' % (name, e)) root = config.get('root', None) if root: try: self.configure_root(root, True) except StandardError, e: raise ValueError('Unable to configure root ' 'logger: %s' % e)
def __init__(self, test_case, logger_name, level): super(_AssertLogsContext, self).__init__(test_case) self.logger_name = logger_name if level: if isinstance(level, basestring): if is_py2: self.level = logging._checkLevel(String(level).upper()) else: self.level = logging._nameToLevel[String( level).upper()] else: self.level = level else: self.level = logging.INFO self.msg = None
def __init__( self, name=None, logger=None, log_start='Timer {timer.name!r} started at {timer.time_start}', log_stop='Timer {timer.name!r} stopped at {timer.time_stop}. Duration is {timer.duration}s', log_level=logging.DEBUG, log_name=None, laps_store=0, stat_template=None, **kw): super(Timer, self).__init__(name=name, **kw) if stat_template is not None: self.stat_template = stat_template self.laps_store = laps_store self.log_level = log_level and logging._checkLevel( log_level) or logging.NOTSET _stream = None if logger is None or isinstance(logger, logging.Logger): self.logger = logger elif isinstance(logger, basestring) and logger in {'stderr', 'stdout'}: _stream = getattr(sys, logger) elif isinstance(getattr(logger, 'write', None), Callable): _stream = logger else: raise ValueError( "Logger specification is wrong. {!r} given, but 'stderr', 'stdout' or Logger instance required." .format(logger)) if _stream: _handler = logging.StreamHandler(_stream) self.logger = logging.Logger(name=log_name, level=self.log_level) self.logger.addHandler(_handler) self.log_start = log_start self.log_stop = log_stop self.duration_sum_last = 0 self.duration_sum = 0 self.duration_min = None self.duration_max = None self.lap_count = 0 self.lap_timer = None self.laps = [] self.__dict__.update(kw)
def _process_commands(msg): """ Processes logger commands """ def get(key): r = msg.get(key) if r is not None: if not isinstance(r, dict): r = {None: r} else: return {} return r lowerLevels = get("lowerLevels") # less verbose raiseLevels = get("raiseLevels") # more verbose setLevels = get("setLevels") for k, v in lowerLevels.items(): logger = core.getLogger(k) level = logging._checkLevel(v) if not l.isEnabledFor(level + 1): logger.setLevel(v) for k, v in raiseLevels.items(): logger = core.getLogger(k) if not l.isEnabledFor(v): logger.setLevel(v) for k, v in setLevels.items(): logger = core.getLogger(k) logger.setLevel(v) message = msg.get("message", None) if message: level = msg.get("level", "DEBUG") if isinstance(level, str): import logging if not level.isalpha(): level = logging.DEBUG else: level = level.upper() level = getattr(logging, level, logging.DEBUG) sub = msg.get("subsystem", "<external>") logging.getLogger(sub).log(level, message)
def main(): parser = argparse.ArgumentParser( description='Start the ParlAI-MTurk task managing server.') parser.add_argument('--port', metavar='port', type=int, default=DEFAULT_PORT, help='port to run the server on.') parser.add_argument('--hostname', metavar='hostname', type=str, default=DEFAULT_HOSTNAME, help='host to run the server on.') parser.add_argument('--sandbox', dest='sandbox', action='store_true', default=False, help='Run the server using sandbox data') parser.add_argument('--db_file', metavar='db_file', type=str, default=DEFAULT_DB_FILE, help='name of database to use (in core/run_data)') parser.add_argument('--logging_level', metavar='logger_level', default='INFO', help='logging level (default = INFO). Can take logging' ' level name or int (example: 20)') FLAGS = parser.parse_args() if FLAGS.sandbox: if FLAGS.db_file == DEFAULT_DB_FILE: FLAGS.db_file = DEFAULT_SB_DB_FILE logging_level = logging._checkLevel(FLAGS.logging_level) logging.getLogger().setLevel(logging_level) rebuild_source() start_server(port=FLAGS.port, hostname=FLAGS.hostname, db_file=FLAGS.db_file, is_sandbox=FLAGS.sandbox)