def set_log_level(logger: logging.Logger, log_level: Union[int, bool]) -> None: if log_level is not None and log_level is not False: if isinstance(log_level, bool): log_level = logging.DEBUG elif not isinstance(log_level, int): raise ValueError('log_level must be a boolean, integer or None') if logger.getEffectiveLevel() != log_level: logger.debug('Changing log_level from %d to %d' % (logger.getEffectiveLevel(), log_level)) logger.setLevel(log_level)
def has_level_handler(logger: logging.Logger): """ Check if there is a handler in the logging chain that will handle the given logger's :meth:`effective level <~logging.Logger.getEffectiveLevel>` :param logger: logging.Logger :return: """ """ Check if there is a handler in the logging chain that will handle the given logger's :meth:`effective level <~logging.Logger.getEffectiveLevel>`. """ level = logger.getEffectiveLevel() current = logger while current: if any(handler.level <= level for handler in current.handlers): return True if not current.propagate: break current = current.parent return False
async def _download_from_asyncgen( items: AsyncGenerator, params: DownloadParams, tcp_connections: int = 64, nb_workers: int = 64, batch_size: int = 16, retries: int = 1, logger: logging.Logger = None, ): """Asynchronous downloader that takes an interable and downloads it Args: items (Union[Generator, AsyncGenerator]): (async/sync) generator that yiels a standardized dict of urls params (DownloadParams): Download parameter dict tcp_connections (int, optional): Maximum number of concurrent TCP connections. Defaults to 128. nb_workers (int, optional): Maximum number of workers. Defaults to 64. batch_size (int, optional): Maximum queue batch size. Defaults to 16. retries (int, optional): Maximum number of attempts. Defaults to 1. logger (logging.Logger, optional): Logger object. Defaults to None. Raises: NotImplementedError: If generator turns out to be invalid. """ queue = asyncio.Queue(nb_workers) progressbar = tqdm(smoothing=0, unit=" Downloads", disable=logger.getEffectiveLevel() > logging.INFO) stats = {"failed": 0, "skipped": 0, "success": 0} retry_options = ExponentialRetry(attempts=retries) async with RetryClient( connector=aiohttp.TCPConnector(limit=tcp_connections), raise_for_status=True, retry_options=retry_options, trust_env=True, ) as session: loop = asyncio.get_event_loop() workers = [ loop.create_task( _download_queue(queue, session, stats, params=params, progressbar=progressbar, logger=logger)) for _ in range(nb_workers) ] # get chunks from async generator and add to async queue async with aiostream.stream.chunks(items, batch_size).stream() as chnk: async for batch in chnk: await queue.put(batch) await queue.join() for w in workers: w.cancel() return stats
def _updateLogger(self, logger: logging.Logger): """Update the logger group to reflect the currently selected logger. If ther is no current logger (logger is None), then the logger group is cleared and disabled. """ if logger is None or not logger.level: checked = self._levelButtonGroup.checkedButton() if checked is not None: self._levelButtonGroup.setExclusive(False) checked.setChecked(False) self._levelButtonGroup.setExclusive(True) self._checkLoggerEnabled.setCheckable(logger is not None) for button in self._levelButtonGroup.buttons(): button.setCheckable(logger is not None) if logger is None: self._effectiveLevel.setText("") else: self._checkLoggerEnabled.setChecked(not logger.disabled) self._effectiveLevel.setText(str(logger.getEffectiveLevel())) if logger.level: button = self._buttonForForLogLevel(logger.level) if button is not None: button.setChecked(True)
def set_logger_verbosity(log: logging.Logger, quieter: int, louder: int) -> None: """Set the appropriate logging level from -q and -v counts.""" # The effective level is reset everytime `main()` is called. curr_level = log.getEffectiveLevel() curr_level += quieter * 10 curr_level -= louder * 10 log.setLevel(clamp(curr_level, low=logging.DEBUG, high=logging.CRITICAL))
def __init__(self, base_logger: logging.Logger): self.logger = base_logger.getChild("MPLogger") self.__queue = multiprocessing.Queue(-1) self.__logger_level = base_logger.getEffectiveLevel() self.__listener = threading.Thread(name="MPLoggerListener", target=self.__listener) self.__listener_shutdown = threading.Event() self.__listener_exc_info = None
def get_current_verbosity_level(logger: logging.Logger) -> int: """Get the current verbosity level based on logging level.""" level = logger.getEffectiveLevel() if level < logging.DEBUG: return 2 elif level == logging.DEBUG: return 1 return 0
def install_coloredlogs(logger: logging.Logger) -> None: level_styles = dict(coloredlogs.DEFAULT_LEVEL_STYLES) level_styles["debug"]["color"] = 242 coloredlogs.install( level=logger.getEffectiveLevel(), logger=logger, level_styles=level_styles, fmt=LOGGING_FORMAT, )
def dump_analysis(self, logger: logging.Logger, ep_name: str): ''' Analyze the partitioning information and log the analysis :param logger: Logger to use :param ep_name: Execution provider name to use in the log messages ''' num_nodes = self.num_nodes + self.num_nodes_in_subgraphs logger.info(f'{self.num_partitions} partitions with a total of {self.num_supported_nodes}/{num_nodes} ' f'nodes can be handled by the {ep_name} EP.') if self.num_nodes_in_subgraphs: logger.info(f'{self.num_nodes_in_subgraphs} nodes are in subgraphs, which are currently not handled.') if self.supported_groups: logger.info(f'Partition sizes: [{", ".join([str(len(partition)) for partition in self.supported_groups])}]') logger.info(f'Unsupported nodes due to operator={self.nodes_unsupported_due_to_op}') if self.nodes_unsupported_due_to_dynamic_input: logger.info('Unsupported nodes due to input having a dynamic shape=%d', self.nodes_unsupported_due_to_dynamic_input) if logger.getEffectiveLevel() <= logging.DEBUG: # Enable this manually if you need to look at specific partitions. # for group in supported_groups: # logger.debug(f'Nodes in group: {",".join([f"{node.name}:{node.op_type}" for node in group])}') if self.unsupported_ops: logger.info(f'Unsupported ops: {",".join(sorted(self.unsupported_ops))}') caveats = self.supported_ops_checker.get_caveats() if caveats: indent = ' ' * 5 logger.debug('Caveats that have not been checked and may result in a node not being supported: ' f'{"".join([os.linesep + indent + caveat for caveat in caveats])}') pct_nodes_using_ep = self.num_supported_nodes / num_nodes * 100 if self.num_partitions == 0: logger.info(f"{ep_name} cannot run any nodes in this model.") elif self.num_partitions == 1: if pct_nodes_using_ep > 75: logger.info(f"{ep_name} should work well for this model as there is one partition " f"covering {pct_nodes_using_ep:.1f}% of the nodes in the model.") elif pct_nodes_using_ep > 50: logger.info( f"{ep_name} may work well for this model, however only {pct_nodes_using_ep:.1f}% of nodes " "will use it. Performance testing is required to validate.") else: logger.info( f"{ep_name} will probably not work will for this model as only {pct_nodes_using_ep:.2f}% " "of nodes will use it.") elif self.num_partitions == 2 and pct_nodes_using_ep > 75: logger.info(f"{ep_name} can be considered for this model as there are two partitions " f"covering {pct_nodes_using_ep:.1f}% of the nodes. " "Performance testing is required to validate.") else: logger.info(f"{ep_name} is not recommended with this model as there are {self.num_partitions} partitions " f"covering {pct_nodes_using_ep:.1f}% of the nodes in the model. " "This will most likely result in worse performance than just using the CPU EP.")
def capture_logs( logger: logging.Logger, level: logging._Level ) -> Generator[List[logging.LogRecord], None, None]: old_level = logger.getEffectiveLevel() logger.setLevel(level) handler = CaptureHandler(level=level) logger.addHandler(handler) yield handler.records logger.removeHandler(handler) logger.setLevel(old_level)
def _decorateLoggerItem(self, item: QListWidgetItem, logger: logging.Logger) -> None: """Decorate an entry in the logger list reflecting the properties of the logger. """ item.setForeground(self._colorForLogLevel(logger.getEffectiveLevel())) font = item.font() font.setBold(bool(logger.level)) item.setFont(font) item.setBackground(Qt.lightGray if logger.disabled else Qt.white)
def checker(model_path, logger: logging.Logger): model = onnx.load(model_path) model_with_shape_info = onnx.shape_inference.infer_shapes(model) # create lookup map for efficiency value_to_shape = {} for v in model_with_shape_info.graph.input: value_to_shape[v.name] = v for v in model_with_shape_info.graph.output: value_to_shape[v.name] = v for v in model_with_shape_info.graph.value_info: value_to_shape[v.name] = v dynamic_inputs, num_dynamic_values = check_shapes(model_with_shape_info.graph) def check_ep(ep_name, checker_func): logger.info(f"Checking {ep_name}") # check with shape info first so supported nodes takes into account values with dynamic shapes partition_info = checker_func(model_with_shape_info, value_to_shape) if logger.getEffectiveLevel() <= logging.DEBUG: partition_info.dump_analysis(logger, ep_name) suitability = partition_info.suitability() logger.info(f"Model should perform well with {ep_name} as is: {suitability.name}") if suitability != PartitioningInfo.TryWithEP.YES and dynamic_inputs: logger.info("Checking if model will perform better if the dynamic shapes are fixed...") partition_info_with_fixed_shapes = checker_func(model_with_shape_info) if logger.getEffectiveLevel() <= logging.DEBUG: # analyze and log detailed info logger.info('Partition information if the model was updated to make the shapes fixed:') partition_info_with_fixed_shapes.dump_analysis(logger, ep_name) fixed_shape_suitability = partition_info_with_fixed_shapes.suitability() logger.info(f"Model should perform well with {ep_name} if modified to have fixed input shapes: " f"{fixed_shape_suitability.name}") if fixed_shape_suitability != PartitioningInfo.TryWithEP.NO: logger.info('Shapes can be altered using python -m onnxruntime.tools.make_dynamic_shape_fixed') if fixed_shape_suitability.value > suitability.value: suitability = fixed_shape_suitability return suitability nnapi_suitability = check_ep("NNAPI", check_nnapi_partitions) coreml_suitability = check_ep("CoreML", check_coreml_partitions) if (nnapi_suitability != PartitioningInfo.TryWithEP.YES or coreml_suitability != PartitioningInfo.TryWithEP.YES) \ and logger.getEffectiveLevel() > logging.DEBUG: logger.info('Re-run with log level of DEBUG for more details on the NNAPI/CoreML issues.') logger.info('---------------') return nnapi_suitability != PartitioningInfo.TryWithEP.NO or coreml_suitability != PartitioningInfo.TryWithEP.NO
def __init__(self, logger: Logger): super().__init__() # https://waymoot.org/home/python_string/ self.text = deque(maxlen=10000) self.propagate_level = logger.getEffectiveLevel() # random url because without authorization!!! DebugView.url = f"/api/{DOMAIN}/{uuid.uuid4()}" logger.addHandler(self) logger.setLevel(logging.DEBUG)
def force_debug_messages(logger: logging.Logger) -> Iterator[None]: """Force emitting debug messages for a code section. Keyword arguments: logger: The logger to manipulate. """ level = logger.getEffectiveLevel() logger.setLevel(logging.DEBUG) try: yield finally: logger.setLevel(level)
def install_coloredlogs( logger: logging.Logger, force: bool = False ) -> Iterator[Callable[[str], None]]: """ contextmanager to set up our logger customizations; yields a function to set the status line at the bottom of the screen (if stderr isatty, else it does nothing) """ level_styles = {} field_styles = {} fmt = LOGGING_FORMAT enable = force or (sys.stderr.isatty() and "NO_COLOR" not in os.environ) if enable: level_styles = dict(coloredlogs.DEFAULT_LEVEL_STYLES) level_styles["debug"]["color"] = 242 level_styles["notice"] = {"color": "green", "bold": True} level_styles["error"]["bold"] = True level_styles["warning"]["bold"] = True level_styles["info"] = {} field_styles = dict(coloredlogs.DEFAULT_FIELD_STYLES) field_styles["asctime"] = {"color": "blue"} field_styles["name"] = {"color": "magenta"} fmt = LOGGING_FORMAT_STDERR # monkey-patch _StatusLineStandardErrorHandler over coloredlogs.StandardErrorHandler for # coloredlogs.install() to instantiate coloredlogs.StandardErrorHandler = _StatusLineStandardErrorHandler sys.stderr.write(ANSI.HIDE_CURSOR) # hide cursor try: coloredlogs.install( level=logger.getEffectiveLevel(), logger=logger, level_styles=level_styles, field_styles=field_styles, fmt=fmt, ) yield ( lambda status: _StatusLineStandardErrorHandler._singleton.set_status( # pyre-fixme status ) if _StatusLineStandardErrorHandler._singleton else None ) finally: if enable: sys.stderr.write(ANSI.CLEAR) # wipe the status line sys.stderr.write(ANSI.SHOW_CURSOR) # un-hide cursor
def _has_level_handler(logger: logging.Logger) -> bool: """Check if there is a handler in the logging chain that will handle the given logger's effective level. """ level = logger.getEffectiveLevel() current = logger while current: if any(handler.level <= level for handler in current.handlers): return True if not current.propagate: break current = current.parent # type: ignore return False
def install_coloredlogs(logger: logging.Logger) -> None: level_styles = {} field_styles = {} if sys.stderr.isatty() and "NO_COLOR" not in os.environ: level_styles = dict(coloredlogs.DEFAULT_LEVEL_STYLES) level_styles["debug"]["color"] = 242 level_styles["notice"] = {"color": "magenta"} level_styles["info"] = {} field_styles = None coloredlogs.install( level=logger.getEffectiveLevel(), logger=logger, level_styles=level_styles, field_styles=field_styles, fmt=LOGGING_FORMAT, )
def new_log_file(logger: logging.Logger, suffix: str, file_type: str = "tcl") -> logging.Logger: """Create new logger and log file from existing logger. The new logger will be create in the same directory as the existing logger file and will be named as the existing log file with the requested suffix. :param logger: existing logger :param suffix: string to add to the existing log file name to create the new log file name. :param file_type: logger file type (tcl. txt. etc.) """ file_handler = None for handler in logger.handlers: if isinstance(handler, logging.FileHandler): file_handler = handler new_logger = logging.getLogger(file_type + suffix) if file_handler: logger_file_name = path.splitext(file_handler.baseFilename)[0] tcl_logger_file_name = logger_file_name + "-" + suffix + "." + file_type new_logger.addHandler(logging.FileHandler(tcl_logger_file_name, "w")) new_logger.setLevel(logger.getEffectiveLevel()) return new_logger
def install_coloredlogs(logger: logging.Logger) -> None: level_styles = {} field_styles = {} if sys.stderr.isatty() and "NO_COLOR" not in os.environ: level_styles = dict(coloredlogs.DEFAULT_LEVEL_STYLES) level_styles["debug"]["color"] = 242 level_styles["notice"] = {"color": "green", "bold": True} level_styles["error"]["bold"] = True level_styles["warning"]["bold"] = True level_styles["info"] = {} field_styles = dict(coloredlogs.DEFAULT_FIELD_STYLES) field_styles["asctime"] = {"color": "blue"} field_styles["name"] = {"color": "magenta"} coloredlogs.install( level=logger.getEffectiveLevel(), logger=logger, level_styles=level_styles, field_styles=field_styles, fmt=LOGGING_FORMAT, )
def add_logger(self, logger: logging.Logger): if self._is_root: raise RuntimeError( f"Adding loggers to the root group is not allowed") validate_logger_object(logger) with self._lock: if logger in self: raise ValueError( f"Logger '{logger}' already added to this group") if self._loggers: logger.setLevel(self._level) logger.propagate = self._propagate for handler in self._handlers: logger.addHandler(handler) else: # This is the first logger in the group, will update initial group configuration self._level = logger.getEffectiveLevel() logger.setLevel(self._level) self._propagate = bool(logger.propagate) self._loggers.add(logger)
def getEffectiveLevel(x): return DEBUG if app.debug else Logger.getEffectiveLevel(x)
class ProgressAndLog(object): """ Subclass of Logger, this class combines 2 functionnalities: -print messages -report computing progress Logger API: http://docs.python.org/library/logging.html#logger-objects traditional logging functions still work ======================================== #boilerplate initialization >>> from logging import DEBUG, INFO, WARNING >>> verbose_logger = get_logger("test.logging.ver", verbosity_offset=-10) >>> standard_logger = get_logger("test.logging.std") >>> laconic_logger = get_logger("test.logging.lac", verbosity_offset=+10) >>> verbose_logger.debug("Message must be displayed") [test.logging.ver] Message must be displayed >>> standard_logger.debug("Message mustn't be displayed") >>> laconic_logger.debug("Message mustn't be displayed") >>> verbose_logger.info("Message must be displayed") [test.logging.ver] Message must be displayed >>> standard_logger.info("Message must be displayed") [test.logging.std] Message must be displayed >>> laconic_logger.info("Message mustn't be displayed") >>> verbose_logger.warning("Message must be displayed") [test.logging.ver] Message must be displayed >>> standard_logger.warning("Message must be displayed") [test.logging.std] Message must be displayed >>> laconic_logger.warning("Message must be displayed") [test.logging.lac] Message must be displayed corner cases ============ mixing of progress messages and dots ------------------------------------ >>> logger = get_logger("test.mix_progress_dots") >>> logger.progress_every(1) >>> logger.set_dot_string('x') >>> logger.progress_step() x [test.mix_progress_dots] Iteration 1 done >>> logger.progress_reset() >>> for count in range(4): ... logger.progress_step() x [test.mix_progress_dots] Iteration 1 done x [test.mix_progress_dots] Iteration 2 done x [test.mix_progress_dots] Iteration 3 done x [test.mix_progress_dots] Iteration 4 done >>> logger.progress_reset() >>> logger.set_offset(+0) >>> logger.dot_every(100) >>> logger.progress_every(1000) >>> for count in range(2000): ... logger.progress_step() xxxxxxxxxx [test.mix_progress_dots] Iteration 1000 done xxxxxxxxxx [test.mix_progress_dots] Iteration 2000 done """ def __init__(self, name, verbosity_offset, logfile=None, timestamp=False): """ Parameters ---------- name: string is likely to end up between brackets at the beggining of each message verbosity_offset: integer see add_to_offset and the like logfile: see ProgressAndLog.add_logfile timestamp: boolean, defaults to False defines: - whether the first logfile (or stdout) will contain timestamps - default value for future calls to add_logfile """ self.logger = Logger(name) # overwritten by set_offset # this is an emulation of the Logger level for dots self._offset = verbosity_offset self._iterations = 0 self._progress_every = 0 self._dot_every = 1 self._percent_print_every = 0 self._next_percent_print = _NEVER_PERCENT_VALUE self._percent_target = _NEVER_PERCENT_VALUE self._logfiles = [] self._dot_logfiles = [] self._timestamp = timestamp self.add_logfile(logfile, timestamp=timestamp) self.set_offset(verbosity_offset) self._dot_string = DEFAULT_DOT_CHAR for name in 'debug info warning critical log'.split(): setattr(self, name, _textlogger_factory(self, getattr(self.logger, name))) def add_logfile(self, logfile, dots=True, timestamp=None): """ Parameters ---------- logfile: string or open file, optional, default: sys.stdout we'll log messages and progress there. if a string is supplied, it is assumed to be the path where to log The file will be created or appended to. dots: boolean do you want dots printed in this logfile? timestamp: boolean, defaults to None do you want logs to be prefixed by a timestamp? if unset (None), the value set at object initialization (in __init__) is reused """ if logfile is None: logfile = sys.stdout elif isinstance(logfile, basestring): logfile = open(logfile, 'ab') if timestamp is None: timestamp = self._timestamp if timestamp: log_format = "[%(asctime)s][%(name)s] %(message)s" else: log_format = "[%(name)s] %(message)s" formatter = Formatter(fmt=log_format) handler = StreamHandler(logfile) handler.setFormatter(formatter) self.logger.addHandler(handler) self._logfiles.append(logfile) if dots: self._dot_logfiles.append(logfile) def msg(self, message, verbosity=None, msgvars=()): """ Prints out an msg. Conditionnal to verbosity settings Parameters ---------- message: text string verbosity: optional; boolean or integer. if False, the message is only displayed when the logger is as verbose as DEBUG or more msgvars: tuple or anything: mapping (dict), string, lists... as suitable for text suitable for substitution. message will be logged as ``message % msgvars`` tuple is handled specifically, all the rest is used in a single placeholder msgvars allows for late evaluation of string formatting, therefore the formatting is not performed if the message should not be displayed at all Always print if verbosity not specified ------------------------------------- #boilerplate initialization >>> from logging import DEBUG, INFO, WARNING >>> verbose_logger = get_logger("test.msg.ver", verbosity_offset=-10) >>> standard_logger = get_logger("test.msg.std") >>> laconic_logger = get_logger("test.msg.lac", verbosity_offset=+10) >>> verbose_logger.msg("Message must be displayed") [test.msg.ver] Message must be displayed >>> standard_logger.msg("Message must be displayed") [test.msg.std] Message must be displayed >>> laconic_logger.msg("Message must be displayed") [test.msg.lac] Message must be displayed Print according to verbosity ------------------------------------- - verbosity=False => DEBUG - verbosity=True => always print - verbosity=int => use int as verb level Maybe this makes the case of ``verbosity in (0, 1)`` counter intuitive? False >>> verbose_logger.msg("Message must be displayed", verbosity=False) [test.msg.ver] Message must be displayed >>> standard_logger.msg("Message must'nt be displayed", ... verbosity=False) >>> laconic_logger.msg("Message must'nt be displayed", verbosity=False) True >>> verbose_logger.msg("Message must be displayed", verbosity=True) [test.msg.ver] Message must be displayed >>> standard_logger.msg("Message must be displayed", verbosity=True) [test.msg.std] Message must be displayed >>> laconic_logger.msg("Message must be displayed", verbosity=True) [test.msg.lac] Message must be displayed 0 (same for 1) >>> verbose_logger.msg("Message mustn' be displayed", verbosity=0) >>> standard_logger.msg("Message must'nt be displayed", verbosity=0) >>> laconic_logger.msg("Message must'nt be displayed", verbosity=0) DEBUG (10) >>> verbose_logger.msg("Message must be displayed", verbosity=DEBUG) [test.msg.ver] Message must be displayed >>> standard_logger.msg("Message must'nt be displayed", ... verbosity=DEBUG) >>> laconic_logger.msg("Message must'nt be displayed", verbosity=DEBUG) INFO (20) >>> verbose_logger.msg("Message must be displayed", verbosity=INFO) [test.msg.ver] Message must be displayed >>> standard_logger.msg("Message must be displayed", verbosity=INFO) [test.msg.std] Message must be displayed >>> laconic_logger.msg("Message must'nt be displayed", verbosity=INFO) WARN (30) >>> verbose_logger.msg("Message must be displayed", verbosity=WARNING) [test.msg.ver] Message must be displayed >>> standard_logger.msg("Message must be displayed", verbosity=WARNING) [test.msg.std] Message must be displayed >>> laconic_logger.msg("Message must be displayed", verbosity=WARNING) [test.msg.lac] Message must be displayed Play with verbosity offset -------------------------- For absolute verbosity, use setLevel (see below). >>> laconic_logger.msg("Message must'nt be displayed", verbosity=False) >>> laconic_logger.set_offset(-10) # relative to general level ... # which is INFO >>> laconic_logger.msg("Message must be displayed", verbosity=False) [test.msg.lac] Message must be displayed >>> laconic_logger.add_to_offset(20) ... #add_to_offset will get us back to the initial +10 value >>> laconic_logger.msg("Message must'nt be displayed", verbosity=False) Add some formatting -------------------- >>> logger = get_logger("test.msg_placeholder") >>> logger.msg("Message with 1 placeholder [[%s]]", ... msgvars="placed_data") [test.msg_placeholder] Message with 1 placeholder [[placed_data]] >>> logger.msg("Message with 2 placeholders [[%s]] [[%s]]", ... msgvars=("data 1", "data 2")) #doctest: +NORMALIZE_WHITESPACE [test.msg_placeholder] Message with 2 placeholders [[data 1]] [[data 2]] >>> logger.msg("Message with dict formatting [[%(value 1)s, " ... "%(value 2)s]]", msgvars={"value 1": "aaa", "value 2": "bbb"},) [test.msg_placeholder] Message with dict formatting [[aaa, bbb]] """ if verbosity in (True, None): verbosity = CRITICAL elif verbosity is False: verbosity = DEBUG self._set_out_type(TEXT) if isinstance(msgvars, tuple): # Logger.log wants tuples to be given as *args self.logger.log(verbosity, message, *msgvars) else: self.logger.log(verbosity, message, msgvars) def dot(self, verbosity=None, dot_string=None): """ Spits out a dot. Conditionnal to verbosity settings Parameters ---------- verbosity: optional, see ProgressAndLog.msg dot_string: optional, string this string, likely "." or "x"... will be used this time only This allows to tell information about the computation going on. Examples:: [5][3][2][3][2][8][2][3] or ....X........X............... #boilerplate initialization >>> from logging import DEBUG, INFO, WARNING >>> verbose_logger = get_logger("ver", verbosity_offset=-10) >>> standard_logger = get_logger("std") >>> laconic_logger = get_logger("lac", verbosity_offset=+10) dot overriding >>> for time in range(2): ... verbose_logger.dot(dot_string="[more than a dot]") [more than a dot][more than a dot] no verbosity: always output >>> verbose_logger.dot() # doctest: +NORMALIZE_WHITESPACE . >>> standard_logger.dot() # doctest: +NORMALIZE_WHITESPACE . >>> laconic_logger.dot() # doctest: +NORMALIZE_WHITESPACE . verbosity=False -> DEBUG >>> verbose_logger.dot(verbosity=False) . >>> standard_logger.dot(verbosity=False) >>> laconic_logger.dot(verbosity=False) verbosity=True -> always print >>> verbose_logger.dot(verbosity=True) . >>> standard_logger.dot(verbosity=True) . >>> laconic_logger.dot(verbosity=True) . verbosity=PROGRESS for instance >>> verbose_logger.dot(verbosity=PROGRESS) . >>> standard_logger.dot(verbosity=PROGRESS) . >>> laconic_logger.dot(verbosity=PROGRESS) """ output = False if verbosity in (True, None): # True: explicitely asked to spit the dot. # None, default value: always spit a dot. output = True elif verbosity is False: # Only spitting the dot if we're a very verbose logger output = self._offset < 0 else: # Not None or a bool? expecting an int output = self._offset <= REFERENCE_LEVEL - verbosity if output: if dot_string is None: dot_string = self._dot_string self._set_out_type(DOT) for logfile in self._dot_logfiles: logfile.write(dot_string) def offset(self): """ Returns ------- integer: Current verbosity offset See also -------- :func:`monologue.core.set_offset` :func:`monologue.core.add_to_offset` """ return self._offset def set_offset(self, offset): """ Sets verbosity offset above/below standard verbosity level. Parameters ---------- offset: integer makes sense between -5 and +35 See also -------- :func:`monologue.core.offset` :func:`monologue.core.add_to_offset` >>> logger = get_logger("test.offset") >>> logger.set_offset(+10) >>> logger.offset() 10 >>> logger.getEffectiveLevel() 25 """ self._offset = offset self.logger.setLevel(offset + REFERENCE_LEVEL) def add_to_offset(self, value): """ add an integer value to current verbosity offset Parameters ---------- offset: integer See also -------- :func:`monologue.core.offset` :func:`monologue.core.set_offset` """ self.set_offset(self._offset + value) def setLevel(self, level): """ Gently overrides Logger.setLevel. This method is renamed at runtime (when building the class), and the docstring is replaced. """ self._offset = level - REFERENCE_LEVEL self.logger.setLevel(level) def progress_every(self, value): """ parameters ---------- value: int Configures ProgressAndLog.progress_step() to spit out an informative line - if <value> is < 1: never - else: once every for every <value> times #boilerplate initialization >>> logger = get_logger("test.progress_every") >>> logger.setLevel(PROGRESS) >>> logger.progress_reset() #testing progress alone: prevent dots >>> logger.dot_every(0) >>> logger.progress_every(1) >>> for count in range(3): ... logger.progress_step() [test.progress_every] Iteration 1 done [test.progress_every] Iteration 2 done [test.progress_every] Iteration 3 done >>> logger.progress_reset() >>> logger.progress_every(1000) >>> for count in range(2000): ... logger.progress_step() [test.progress_every] Iteration 1000 done [test.progress_every] Iteration 2000 done """ self._progress_every = value def dot_every(self, value): """ parameters ---------- value: int Configures ProgressAndLog.progress_step() to spit out a dot - if <value> is < 1: never - else: once every for every <value> times #boilerplate initialization >>> logger = get_logger("test.dot_every") >>> logger.setLevel(PROGRESS) >>> logger.set_dot_string('x') #ensure indicators are blank >>> logger.progress_reset() >>> logger.dot_every(10) >>> for count in range(9): ... logger.progress_step() >>> logger.progress_step() x >>> for count in range(90): ... logger.progress_step() xxxxxxxxx """ self._dot_every = value def set_dot_string(self, dot_string): """ Set the string to be used to mark progression Parameters ---------- dot_string: optional, string this string, likely "." or "x"... will be used this time only """ self._dot_string = dot_string def progress_reset(self): """ Call this to reset the number of iterations performed. Subsequent iterations will be numbered 1, 2 etc """ self._iterations = 0 self._next_percent_print = self.percent_print_every def _maybe_dot(self): """ Puts a progress related dot if conditions are met. Method is related to `step()` """ if self._dot_every > 0 \ and self.logger.getEffectiveLevel() <= PROGRESS \ and not self._iterations % self._dot_every: self.dot() def getEffectiveLevel(self): """ FIXME: let's setattr this method instead of wrapping it """ return self.logger.getEffectiveLevel() def _maybe_iteration_msg(self): """ Outputs a progress related message if conditions are met. Method is related to `step()` """ if self._progress_every < 1: return if not self._iterations % self._progress_every: message = "Iteration %d done" % self._iterations self.msg(message, verbosity=PROGRESS) def _maybe_percentage_msg(self): """ Outputs progress percentage if conditions are met. Resets counters until next time. Method is related to `step()` """ if self._percent_target <= 0: return current_percentage = 100 * (self._iterations / self._percent_target) if current_percentage < self._next_percent_print: return self.msg("%d%%" % self._next_percent_print) self._next_percent_print += self._percent_print_every def progress_step(self): """ Call this every time you perform a loop. If a message or a dot needs to be spit every 1000 iterations, this function will take care. #boilerplate initialization >>> logger = get_logger("test.progress_step") >>> logger.setLevel(PROGRESS) >>> logger.set_dot_string('x') Testing dots alone ~~~~~~~~~~~~~~~~~~ >>> for count in range(10): ... logger.progress_step() xxxxxxxxxx #Keep next to previous test (dots alone) >>> logger.info('eat newline after xxxxxxxx') <BLANKLINE> [test.progress_step] eat newline after xxxxxxxx Testing progress alone ~~~~~~~~~~~~~~~~~~~~~~~ >>> logger.dot_every(0) >>> for count in range(90): ... logger.progress_step() """ self._iterations += 1 # keep dot first, it's prettier. self._maybe_dot() self._maybe_iteration_msg() self._maybe_percentage_msg() def percent_target(self, value): """ Call this to set the number of expected iterations Also call percent_print_every() >>> logger = get_logger("test.percent") >>> logger.percent_print_every(10) ... # every 10 percent, requires a target >>> logger.percent_target(1000) # the scale. rename function? >>> logger.dot_every(0) >>> logger.progress_every(0) >>> for count in range(2000): ... logger.progress_step() [test.percent] 0% [test.percent] 10% [test.percent] 20% [test.percent] 30% [test.percent] 40% [test.percent] 50% [test.percent] 60% [test.percent] 70% [test.percent] 80% [test.percent] 90% [test.percent] 100% [test.percent] 110% [test.percent] 120% [test.percent] 130% [test.percent] 140% [test.percent] 150% [test.percent] 160% [test.percent] 170% [test.percent] 180% [test.percent] 190% [test.percent] 200% """ self._percent_target = value def progress_complete(self, verbosity=None): """ Call this upon completion to print out a message with the number of performed iterations. Iterations counting will be reset verbosity has the same meaning as in ProgressAndLog.dot and ProgressAndLog.msg >>> logger = get_logger("test.progress_complete") # inhibit dots >>> logger.set_offset(+10) >>> for count in range(2000): ... logger.progress_step() >>> logger.progress_complete() [test.progress_complete] Successfully completed 2000 iterations """ self.msg("Successfully completed %d iterations" % self._iterations, verbosity=verbosity) self._iterations = 0 self._next_percent_print = _NEVER_PERCENT_VALUE self._percent_target = _NEVER_PERCENT_VALUE def percent_print_every(self, value): """ We'll print some progress information every that percent """ self._percent_print_every = value def _set_out_type(self, new): """ As we don't want to mix progress dots and text on the same line, we insert a linebreak whenever the output type changes. Parameters ---------- new: DOT or TEXT """ for logfile in self._dot_logfiles: if logfile not in _OUT_TYPES: _OUT_TYPES[logfile] = TEXT last_out = TEXT else: last_out = _OUT_TYPES[logfile] if new == last_out: continue elif new == TEXT: logfile.write(os.linesep) _OUT_TYPES[logfile] = new
def debug_exc_log(lg: logging.Logger, exc: Exception, msg: str = "Exception in RSS"): if lg.getEffectiveLevel() <= logging.DEBUG: lg.exception(msg, exc_info=exc)
def show_log_level(logger: logging.Logger): """Helper to show threshold log level of a logger.""" level = logger.getEffectiveLevel() logger.log(level, 'Logger {n!r} level: {t}'.format(n=logger.name, t=logging.getLevelName(level)))
def ignore_warnings(logger: logging.Logger) -> int: """Ignore logging warnings.""" log_lvl_buff = logger.getEffectiveLevel() logger.setLevel(logging.ERROR) yield log_lvl_buff logger.setLevel(log_lvl_buff)
def debug_exc_log(lg: logging.Logger, exc: Exception, msg: str = None) -> None: """Logs an exception if logging is set to DEBUG level""" if lg.getEffectiveLevel() <= logging.DEBUG: if msg is None: msg = f"{exc}" lg.exception(msg, exc_info=exc)
def getEffectiveLevel(x): return DEBUG if self.debug else Logger.getEffectiveLevel(x)
def getEffectiveLevel(self): if app.debug: return logging.DEBUG else: return Logger.getEffectiveLevel(self)