示例#1
0
    def get_crystals(self):
        log_filter = KyberContextFilter()
        handler = RichHandler()
        handler.setFormatter(
            logging.Formatter(
                "[{name}] Agent: {agent} User: {agent_username} => {message}",
                style="{"))

        for root, _, files in os.walk(self.crystal_location):
            for crystal in files:
                crystal_file = self.crystal_location / root / crystal
                if (crystal_file.suffix == ".py"
                        and not crystal_file.stem == "example"
                        and not crystal_file.stem.startswith("__")
                        and crystal_file.name != "__init__.py"):
                    try:
                        c = self.load(crystal_file)
                        c.deathstar = self.deathstar
                        c.log = logging.getLogger(
                            f"deathstar.kybercrystals.{crystal_file.stem}")
                        c.log.propagate = False
                        c.log.addHandler(handler)
                        c.log.addFilter(log_filter)

                        self.loaded.append(c)
                    except Exception as e:
                        log.error(f'Failed loading "{crystal_file}": {e}')

        log.debug(f"Loaded {len(self.loaded)} kyber crystal(s)")
示例#2
0
def test_exception():
    console = Console(
        file=io.StringIO(),
        force_terminal=True,
        width=140,
        color_system=None,
        _environ={},
    )
    handler_with_tracebacks = RichHandler(console=console,
                                          enable_link_path=False,
                                          rich_tracebacks=True)
    formatter = logging.Formatter("FORMATTER %(message)s %(asctime)s")
    handler_with_tracebacks.setFormatter(formatter)
    log.addHandler(handler_with_tracebacks)
    log.error("foo")
    try:
        1 / 0
    except ZeroDivisionError:
        log.exception("message")

    render = handler_with_tracebacks.console.file.getvalue()
    print(render)

    assert "FORMATTER foo" in render
    assert "ZeroDivisionError" in render
    assert "message" in render
    assert "division by zero" in render
示例#3
0
    def verbosity(self, level: Union[str, int]):
        """
        Sets logging configuration for scvi based on chosen level of verbosity.

        Sets "scvi" logging level to `level`
        If "scvi" logger has no StreamHandler, add one.
        Else, set its level to `level`.
        """
        self._verbosity = level
        scvi_logger.setLevel(level)
        has_streamhandler = False
        for handler in scvi_logger.handlers:
            if isinstance(handler, RichHandler):
                handler.setLevel(level)
                logger.info(
                    "'scvi' logger already has a StreamHandler, set its level to {}."
                    .format(level))
                has_streamhandler = True
        if not has_streamhandler:
            console = Console(force_terminal=True)
            if console.is_jupyter is True:
                console.is_jupyter = False
            ch = RichHandler(show_path=False, console=console, show_time=False)
            formatter = logging.Formatter("%(message)s")
            ch.setFormatter(formatter)
            scvi_logger.addHandler(ch)
            logger.debug(
                "Added StreamHandler with custom formatter to 'scvi' logger.")
def cli(gom, dry_run, debug):
    """gom is a command line tool for interacting with your github
    organizations.
    """
    if debug:
        logging.basicConfig(level=logging.DEBUG,
                            format="%(message)s",
                            datefmt="[%X] ",
                            handlers=[RichHandler()])
        # Uncomment this if you want to see the raw requests and responses by the PyGithub API
        # Warning: it produces a TON of debug output
        # from github import enable_console_debug_logging
        # enable_console_debug_logging()
    else:
        logging.basicConfig(level=logging.INFO,
                            format="%(message)s",
                            datefmt="[%X] ",
                            handlers=[RichHandler()])
    if os.environ.get('GOM_GITHUB_TOKEN') is None:
        click.secho(
            "Error: missing required environment variable: GOM_GITHUB_TOKEN",
            fg='red')
        exit(1)

    # Create a GOM object and remember it as as the context object. From
    # this point onwards other commands can refer to it by using the
    # @pass_gom decorator.
    gom.obj = GOM(dry_run)
示例#5
0
def configure_logging(config):
    """Configure the root logger based on user config."""
    rootlogger = logging.getLogger()
    while rootlogger.handlers:
        rootlogger.handlers.pop()

    try:
        if config["path"]:
            logfile_path = os.path.expanduser(config["path"])
        else:
            logfile_path = config["path"]
    except KeyError:
        logfile_path = DEFAULT_LOG_FILENAME

    if logfile_path:
        logdir = os.path.dirname(os.path.realpath(logfile_path))
        if not os.path.isdir(logdir):
            os.makedirs(logdir)

    log_level = get_logging_level(config.get("level", "info"))
    rootlogger.setLevel(log_level)
    formatter_str = set_formatter_string(config)
    formatter = logging.Formatter(formatter_str)
    handler = None

    if config.get("rich") is not False:
        handler = RichHandler(
            rich_tracebacks=True,
            show_time=config.get("timestamp", True),
            show_path=config.get("extended", True),
        )

    if logfile_path:
        file_handler = RotatingFileHandler(
            logfile_path, maxBytes=config.get("file-size", 50e6)
        )
        file_handler.setLevel(log_level)
        file_handler.setFormatter(formatter)
        rootlogger.addHandler(file_handler)

    if config.get("console"):
        handler = logging.StreamHandler()
        handler.setFormatter(formatter)

    # If we still don't have the handler, we are assuming that
    # the user wants to switch off logging, let's log only
    # Critical errors
    if not handler:
        handler = logging.StreamHandler()
        handler.setFormatter(formatter)
        log_level = get_logging_level("critical")

    if config.get("filter") and handler:
        handler.addFilter(ParsingFilter(config, config["filter"]))
    if handler:
        handler.setLevel(log_level)
        rootlogger.addHandler(handler)

    _LOGGER.info("=" * 40)
    _LOGGER.info(_("Started opsdroid %s."), __version__)
示例#6
0
def test_stderr_and_stdout_are_none(monkeypatch):
    # This test is specifically to handle cases when using pythonw on
    # windows and stderr and stdout are set to None.
    # See https://bugs.python.org/issue13807

    monkeypatch.setattr("sys.stdout", None)
    monkeypatch.setattr("sys.stderr", None)

    console = Console(_environ={})
    target_handler = RichHandler(console=console)
    actual_record: Optional[logging.LogRecord] = None

    def mock_handle_error(record):
        nonlocal actual_record
        actual_record = record

    target_handler.handleError = mock_handle_error
    log.addHandler(target_handler)

    try:
        1 / 0
    except ZeroDivisionError:
        log.exception("message")

    finally:
        log.removeHandler(target_handler)

    assert actual_record is not None
    assert "message" in actual_record.msg
示例#7
0
    def verbosity(self, level: Union[str, int]):
        """
        Sets logging configuration for scvi based on chosen level of verbosity.

        If "scvi" logger has no StreamHandler, add one.
        Else, set its level to `level`.

        Parameters
        ----------
        level
            Sets "scvi" logging level to `level`
        force_terminal
            Rich logging option, set to False if piping to file output.
        """
        self._verbosity = level
        scvi_logger.setLevel(level)
        if len(scvi_logger.handlers) == 0:
            console = Console(force_terminal=True)
            if console.is_jupyter is True:
                console.is_jupyter = False
            ch = RichHandler(show_path=False, console=console, show_time=False)
            formatter = logging.Formatter("%(message)s")
            ch.setFormatter(formatter)
            scvi_logger.addHandler(ch)
        else:
            scvi_logger.setLevel(level)
示例#8
0
def setup():
    sys.setrecursionlimit(2000)  # Default is ~900

    rich_handler = RichHandler()
    rich_handler.setFormatter(Formatter("%(message)s"))

    basicConfig(level="DEBUG", datefmt="[%X]", handlers=[rich_handler])
示例#9
0
def set_rich_logger(config_logger, verbosity):
    """Will set the RichHandler of the logger.

    Parameter
    ----------
    config_logger :class:
        Config object of the logger.
    """
    theme = _parse_theme(config_logger)
    global console
    console = Console(theme=theme)
    # These keywords Are Highlighted specially.
    RichHandler.KEYWORDS = [
        "Played",
        "animations",
        "scene",
        "Reading",
        "Writing",
        "script",
        "arguments",
        "Invalid",
        "Aborting",
        "module",
        "File",
        "Rendering",
        "Rendered",
    ]
    rich_handler = RichHandler(
        console=console, show_time=config_logger.getboolean("log_timestamps"))
    global logger
    rich_handler.setLevel(verbosity)
    logger.addHandler(rich_handler)
示例#10
0
def app(ctx, strategy, verbose):
    """
    Automate and mantain a cryptocurrency-based portfolio tracking the market index.

    STRATEGY: path to the .toml strategy file - see README for more info

    Run the script without any commands to start an interactive shell.
    """

    # configure logging for the application
    log = logging.getLogger()
    log.setLevel(logging.INFO if not verbose else logging.DEBUG)
    rich_handler = RichHandler()
    rich_handler.setFormatter(
        logging.Formatter(fmt="%(message)s", datefmt="[%X]"))
    log.addHandler(rich_handler)
    log.propagate = False

    # initialise application
    data = toml.load(strategy)
    if not validate_strategy(data):
        sys.exit()

    currency = data["currency"]
    portfolio = Portfolio(data["portfolio"], currency)
    exchange_platform = data["exchange"]["platform"]
    exchange = EXCHANGES[exchange_platform](data["exchange"]["key"],
                                            data["exchange"]["secret"])
    with console.status("[bold green]Connecting to exchange..."):
        portfolio.connect(exchange)
    ctx.obj = State(portfolio, exchange, currency)
示例#11
0
文件: logging.py 项目: ggirelli/ifpd2
def add_log_file_handler(path: str, logger_name: Optional[str] = None) -> None:
    if os.path.isdir(path):
        raise AssertionError
    log_dir = os.path.dirname(path)
    if not (os.path.isdir(log_dir) or log_dir == ""):
        raise AssertionError
    fh = RichHandler(console=Console(file=open(path, mode="w+")), markup=True)
    fh.setLevel(logging.INFO)
    logging.getLogger(logger_name).addHandler(fh)
    logging.getLogger(logger_name).info(f"[green]Log to[/]: '{path}'")
示例#12
0
def setup_logging(level=logging.INFO):
    logger = logging.getLogger()

    logger.setLevel(level)
    formatter = logging.Formatter("%(message)s")

    stream_handler = RichHandler(show_time=False, rich_tracebacks=True)
    stream_handler.setFormatter(formatter)
    logger.addHandler(stream_handler)

    return logger
示例#13
0
    def reset_logging_handler(self):
        """
        Resets "scvi" log handler to a basic RichHandler().

        This is useful if piping outputs to a file.
        """
        scvi_logger.removeHandler(scvi_logger.handlers[0])
        ch = RichHandler(show_path=False, show_time=False)
        formatter = logging.Formatter("%(message)s")
        ch.setFormatter(formatter)
        scvi_logger.addHandler(ch)
示例#14
0
def get_logger(module_name):
    logger = logging.getLogger(module_name)
    handler = RichHandler(rich_tracebacks=True,
                          console=console,
                          tracebacks_show_locals=True)
    handler.setFormatter(
        logging.Formatter(
            "[ %(threadName)s:%(funcName)s:%(lineno)d ] - %(message)s"))
    logger.addHandler(handler)
    logger.setLevel(logging.DEBUG)
    return logger
示例#15
0
 def rich_formatter(date_format: str = None,
                    stream: IO[str] = None,
                    rich_tracebacks: bool = False,
                    **_: Any) -> logging.Handler:
     handler = RichHandler(
         console=Console(file=stream or sys.stderr),
         log_time_format=date_format or DateFormat.rich.value,
         rich_tracebacks=rich_tracebacks,
     )
     formatter = logging.Formatter("%(message)s")
     handler.setFormatter(formatter)
     return handler
示例#16
0
def setup_custom_logger(name):
    formatter = logging.Formatter(fmt="{message:s}", style="{")
    handler = RichHandler(show_time=False,
                          markup=True,
                          rich_tracebacks=True,
                          show_path=False)
    handler.setFormatter(formatter)

    logger = logging.getLogger(name)
    logger.setLevel(logging.INFO)
    logger.addHandler(handler)
    logger.addFilter(DuplicateFilter())
    return logger
示例#17
0
def setup_logger(level="INFO", logfile=None):
    """Setup a logger that uses RichHandler to write the same message both in stdout
    and in a log file called logfile. Level of information can be customized and
    dumping a logfile is optional.
    """
    logger = logging.getLogger()

    # Set up level of information
    possible_levels = ["INFO", "DEBUG"]
    if level not in possible_levels:
        raise ValueError(
            "Passed wrong level for the logger. Allowed levels are: {}".format(
                ', '.join(possible_levels)))
    logger.setLevel(getattr(logging, level))

    formatter = logging.Formatter("%(message)s")

    # Set up stream handler (for stdout)
    stream_handler = RichHandler(show_time=False, rich_tracebacks=True)
    stream_handler.setFormatter(formatter)
    logger.addHandler(stream_handler)

    # Set up file handler (for logfile)
    if logfile:
        file_handler = RichHandler(show_time=False,
                                   rich_tracebacks=True,
                                   console=Console(file=open(logfile, "wt")))
        file_handler.setFormatter(formatter)
        logger.addHandler(file_handler)

    return logger
示例#18
0
def main():
    LOG_FILENAME = "chortest.log"
    log_file_handler = FileHandler(LOG_FILENAME)
    log_file_handler.setFormatter(
        Formatter("[%(asctime)s] - %(levelname)s - %(message)s"))
    rich_handler = RichHandler()
    rich_handler.setFormatter(Formatter("%(message)s"))

    basicConfig(level="DEBUG",
                datefmt="[%X]",
                handlers=[rich_handler, log_file_handler])

    # try:
    app(prog_name="chortest", )
示例#19
0
def test_exception_with_extra_lines():
    console = Console(
        file=io.StringIO(),
        force_terminal=True,
        width=140,
        color_system=None,
        _environ={},
    )
    handler_extra_lines = RichHandler(
        console=console,
        enable_link_path=False,
        markup=True,
        rich_tracebacks=True,
        tracebacks_extra_lines=5,
    )
    log.addHandler(handler_extra_lines)

    try:
        1 / 0
    except ZeroDivisionError:
        log.exception("message")

    render = handler_extra_lines.console.file.getvalue()
    print(render)

    assert "ZeroDivisionError" in render
    assert "message" in render
    assert "division by zero" in render
示例#20
0
def setup_custom_logger(filename: str = "", debug: bool = False):
    """Creates logger and sets the levels."""

    logger = logging.getLogger(__name__)

    # Config file logger
    if filename != "":
        try:
            original_umask = os.umask(0)  # User file-creation mode mask
            file_handler = logging.FileHandler(filename=filename)
            fh_formatter = logging.Formatter(
                "%(asctime)s::%(levelname)s::" +
                "%(name)s::%(lineno)d::%(message)s")
            file_handler.setFormatter(fh_formatter)
            file_handler.setLevel(logging.DEBUG)
            logger.addHandler(file_handler)
        except OSError as ose:
            sys.exit(f"Logging to file failed: {ose}")
        finally:
            os.umask(original_umask)

    # Config stream logger
    # if debug:
    try:
        richhandler = RichHandler(
            rich_tracebacks=True,
            log_time_format="[%Y-%m-%d %H:%M:%S]",
            level=logging.DEBUG if debug else logging.WARNING,
        )
        logger.addHandler(richhandler)

    except OSError as ose:
        sys.exit(f"Logging to console failed: {ose}")

    return logger
示例#21
0
def main():
    logging.basicConfig(level="INFO",
                        format="%(message)s",
                        datefmt="[%X]",
                        handlers=[RichHandler()])

    parser = ArgumentParser()
    parser.add_argument(
        "--delete",
        "-d",
        help="When a file is compressed, delete the original file",
        action="store_true",
    )
    parser.add_argument(
        "--threshold",
        "-t",
        type=float,
        default=DEFAULT_THRESHOLD,
        help=
        "Fraction of file size under which the file will be compressed (default: {})"
        .format(DEFAULT_THRESHOLD),
    )
    parser.add_argument("file_list", nargs="+")
    args = parser.parse_args()

    filenames = [Path(x) for x in args.file_list]

    for cur_filename in filenames:
        process_file(cur_filename,
                     threshold=args.threshold,
                     delete=args.delete)
示例#22
0
def main():
    ''' Command line entry point
    '''

    # handle user interrupt
    signal.signal(signal.SIGTERM, handle_interrupt)
    signal.signal(signal.SIGINT, handle_interrupt)

    args = parse_commands()

    if args.loud or LOUD:
        level = logging.INFO
    else:
        level = logging.WARNING

    logging.basicConfig(
        level=level,
        format='[dim]%(name)s[/dim]\t%(message)s',
        handlers=[RichHandler(
            markup=True,
            show_path=SHOW_PATH,
        )])

    logger.info('Verbosity turned on')

    call_func(args)
示例#23
0
def main(args=None):
    if args is None:
        args = sys.argv[1:]

    parser = argparse.ArgumentParser(
        epilog=get_command_list(),
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument("command", type=str, nargs=1, choices=COMMANDS.keys())
    parser.add_argument("--loglevel", type=str, default="INFO")
    args, extra = parser.parse_known_args()

    # Set up a simple console logger
    logging.basicConfig(level=args.loglevel)

    logging.basicConfig(
        level=args.loglevel,
        format="%(message)s",
        datefmt="[%X]",
        handlers=[RichHandler()],
    )

    try:
        if args.command[0] in COMMANDS:
            COMMANDS[args.command[0]]["function"](args, *extra)
    except Exception as e:
        logger.exception(e)
        raise
示例#24
0
def main(
        newick_tree_input: Path,
        lineage_report: Path,
        aa_mutation_matrix: Path = typer.Option(None),
        leaflist: Path = typer.Option(Path('leaflist')),
        metadata_output: Path = typer.Option(Path('metadata.merged.tsv')),
):
    from rich.traceback import install
    install(show_locals=True, width=120, word_wrap=True)
    logging.basicConfig(
        format="%(message)s",
        datefmt="[%Y-%m-%d %X]",
        level=logging.INFO,
        handlers=[RichHandler(rich_tracebacks=True, tracebacks_show_locals=True)],
    )
    df_lineage_report = pd.read_csv(lineage_report, index_col=0)
    if aa_mutation_matrix:
        df_aa_change = pd.read_table(aa_mutation_matrix, index_col=0)
        df_out = pd.concat([df_lineage_report, df_aa_change], axis=1)
    else:
        df_out = df_lineage_report
    df_out.to_csv(metadata_output, sep='\t', index=True)

    tree = Phylo.read(newick_tree_input, 'newick')
    with open(leaflist, 'w') as fout:
        for node in tree.get_terminals():
            fout.write(f'{node.name}\n')
示例#25
0
def get_logger(level: str = "INFO"):
    fmt = "%(message)s"
    logging.basicConfig(
        level=level, format=fmt, datefmt="[%X]", handlers=[RichHandler(markup=True)]
    )
    logger = logging.getLogger("rich")
    return logger
def main(
    sequences: Path,
    fasta_output: Path = typer.Option(Path('input_sequences.correctedID.fasta'),
                                          help='FASTA Sequences with correct ID for Pangolin Analysis.'),
):
    from rich.traceback import install
    install(show_locals=True, width=200, word_wrap=True)
    logging.basicConfig(
        format="%(message)s",
        datefmt="[%Y-%m-%d %X]",
        level=logging.INFO,
        handlers=[RichHandler(rich_tracebacks=True, tracebacks_show_locals=True, locals_max_string=200)],
    )
    with open(fasta_output, 'w') as fout:
        if '.gz' in Path(sequences).suffixes:
            logging.info(f'Input Sequences {sequences} in gz format provided')
            with gzip.open(sequences, 'rt') as fin:
                for name, seq in SimpleFastaParser(fin):
                    header = re.sub(r'[\|\s].*$', "", name)
                    fout.write(f'>{header}\n{seq}\n')
        else:
            logging.info(f'Input Sequences {sequences} in unzip format provided')
            with open(sequences, 'rt') as fin:
                for name, seq in SimpleFastaParser(fin):
                    header = re.sub(r'[\|\s].*$', "", name)
                    fout.write(f'>{header}\n{seq}\n')
def main():
    options = getargparser().parse_args()
    console_handler = RichHandler(show_time=False)
    if options.log:
        logging.basicConfig(level=logging.DEBUG,
                            format="%(funcName)s:%(levelname)s:%(message)s",
                            filename=options.log,
                            filemode="w")
        console = console_handler
        console.setLevel(logging.WARNING)
        console.setFormatter(logging.Formatter('%(message)s'))
        logger.addHandler(console)
    else:
        logging.basicConfig(level=logging.INFO,
                            format="%(message)s",
                            handlers=[console_handler])
    rules = download_config(options.archives)
    if logger.isEnabledFor(logging.INFO):
        logger.info('Rules:\n%s', pformat(rules))
    page_data = per_documents_data(options.document_metadata)
    for page in track(page_data, description='Analyzing images ...'):
        page.update(
            find_allowed_facsimile(options.image_root, page["img"],
                                   rules.get(page["repo"], {}))._asdict())
    if options.output:
        write_json(page_data, options.output)

    writer = csv.DictWriter(options.csv, fieldnames=list(page_data[0]))
    writer.writeheader()
    writer.writerows(page_data)
    options.csv.close()
示例#28
0
def setup_logging(level='info'):
    """Initialize logging settings."""
    from logging import basicConfig, NOTSET, DEBUG, INFO
    from logging import WARNING, ERROR, CRITICAL
    from rich.console import Console
    from rich.logging import RichHandler
    import os

    console = Console(width=160)

    levels = \
        {
            'notset': NOTSET,
            'debug': DEBUG,
            'info': INFO,
            'warning': WARNING,
            'error': ERROR,
            'critical': CRITICAL
        }

    if level.lower() in ['debug', 'error']:
        os.environ['GRPC_VERBOSITY'] = level.upper()

    level = levels.get(level.lower(), levels['notset'])

    basicConfig(level=level,
                format='%(message)s',
                datefmt='[%X]',
                handlers=[RichHandler(console=console)])
示例#29
0
def _configure_logger(logger, show_level=True):
    """ Provide the given logger with the most basic configuration possible to be used.

    Args:
        logger (logging.Logger): Logger to be configured
        show_level (bool): Whether to display the logging level in the record. Defaults to True
    """
    state = get_current_context().obj

    level = state.verbosity
    logger.setLevel(level)

    logger.propagate = False

    handler = RichHandler(level=level,
                          console=console,
                          show_level=show_level,
                          show_path=False,
                          enable_link_path=False,
                          markup=True,
                          rich_tracebacks=True,
                          tracebacks_show_locals=True,
                          log_time_format=_TIME_FORMAT)

    logger.handlers = []
    logger.addHandler(handler)
示例#30
0
def main():
    parser = ArgumentParser(prog="build_hdf5_database.py")
    parser.add_argument(
        "--database-name",
        "-d",
        type=str,
        default=DEFAULT_DATABASE_NAME,
        help="""Name of the file that will contain the database.
        The default is {default}""".format(default=DEFAULT_DATABASE_NAME),
    )
    parser.add_argument(
        "--start-from-scratch",
        action="store_true",
        default=False,
        help="""If true, any existing database will be removed and a new one will
        be created from scratch. CAUTION: this might take a lot of time!""",
    )
    parser.add_argument(
        "--update-hdf5",
        default=False,
        action="store_true",
        help=
        """If specified, MJD timestamps that are not found in HDF5 files will
        be saved back in the files. This requires write-access to the HDF5 files.""",
    )
    parser.add_argument("path",
                        type=str,
                        help="Path where the HDF5 files are stored")
    args = parser.parse_args()

    path = Path(args.path)

    log.basicConfig(level="INFO",
                    format="%(message)s",
                    handlers=[RichHandler()])

    log.info(
        f'looking for a database in "{path}" with name "{args.database_name}"')

    db_path = path / args.database_name
    if db_path.is_file():
        log.info(f'an existing database has been found in "{path}"')

        if args.start_from_scratch:
            log.info(
                '"--start-from-scratch" was specified, so I am removing the database'
            )
            db_path.unlink()
            log.info(f'database "{db_path}" was removed from disk')

    log.info(f"going to scan {path} for HDF5 files…")
    ds = DataStorage(
        path,
        database_name=args.database_name,
        update_database=True,
        update_hdf5=args.update_hdf5,
    )
    log.info(
        "the database has been updated and now contains {} entries".format(
            len(ds.get_list_of_files())))