Example #1
0
def test_keep_extra(writer):
    logger.configure(extra=dict(test=123))
    logger.add(writer, format="{extra[test]}")
    logger.opt().debug("")
    logger.opt().log(50, "")

    assert writer.read() == "123\n123\n"
Example #2
0
def get_app() -> FastAPI:
    # Logger configuration
    logger.configure(handlers=[{
        "sink": sys.stdout,
        "level": envs.LOG_LEVEL,
        "format": envs.LOGURU_FORMAT
    }])

    # Levels criation
    logger.level('REQUEST RECEIVED', no=37, color="<yellow>")
    logger.level('REQUEST DONE', no=38, color="<yellow>")
    logger.level('LOG ROUTE', no=39, color="<light-green>")

    async def startup_event():
        logger.info("Starting API")

    async def shutdown_event():
        logger.info("API shutdown")

    if envs.RUNNING_ENV == 'dev':
        # Logger output file
        logger.add("./logs/test.log",
                   level=0,
                   format=envs.LOGURU_FORMAT,
                   rotation='500 MB')
        logger.add("./logs/test_error.log",
                   level=40,
                   format=envs.LOGURU_FORMAT,
                   rotation='500 MB')

    # API Instance
    app = FastAPI(
        title='Plug and Play API',
        description="Project plug and play architecture.",
        version=__version__,
        root_path=envs.FASTAPI_ROOT_PATH,
        on_startup=[startup_event],
        on_shutdown=[shutdown_event],
    )

    # CORS
    app.add_middleware(
        CORSMiddleware,
        allow_origins=["*"],
        allow_credentials=True,
        allow_methods=["*"],
        allow_headers=["*"],
    )

    # Router
    app.include_router(example.router,
                       prefix='/example',
                       tags=['Route example'],
                       responses={**DEFAULT_RESPONSES_JSON})

    # API Modules
    Middleware(app)
    ExceptionHandler(app)

    return app
Example #3
0
def set_logger(filename="unnamed"):
    log_dir = Path(__logs_dir__)
    if not log_dir.exists():
        log_dir.mkdir(exist_ok=True)

    log_format = (
        "<red>[{extra[base]}]</red>"
        "<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | "
        "<level>{level: <8}</level> | "
        "<cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan>:<level>{message}</level>"
    )
    config = {
        "handlers": [
            # dict(sink=sys.stdout, format=log_format, level="DEBUG"),
            dict(
                sink=f"{log_dir.joinpath(filename)}.log",
                format=log_format,
                level="DEBUG",
            ),
        ],
        "extra": {
            "base": "unknown"
        },
    }
    logger.configure(**config)
    global __BASE_LOGGER
    __BASE_LOGGER = logger
Example #4
0
    def init_logger(cls, verbosity=None, logfile=None, prefix=''):
        if verbosity:
            cls.set_verbosity(verbosity)

        if logfile is None:
            logfile = os.path.join(__flow__, '.runtest.log')

        _logger.configure(handlers=[
            dict(sink=sys.stdout,
                 format='<level>{extra[prefix]}{message}</level>'),
            dict(sink=logfile),
        ],
                          levels=[
                              dict(name=LogLevels.NORMAL, no=60, color=''),
                              dict(name=LogLevels.IMPORTANT,
                                   no=60,
                                   color='<b>'),
                              dict(name=LogLevels.SUCCESS,
                                   no=60,
                                   color='<b><g>'),
                              dict(name=LogLevels.FAILED,
                                   no=60,
                                   color='<b><r>'),
                              dict(name=LogLevels.WARN, no=60, color='<b><y>'),
                          ])
        cls.logger = _logger.bind(timer=GlobalTimer(), prefix=prefix)
        # increase depth for methods out and _write
        cls.logger._depth = 2
        return cls.logger
def setup_logging(config: Configuration) -> None:
    logging.basicConfig(handlers=[InterceptLoguruHandler()], level=0)

    if config.debug:
        logger.configure(
            handlers=[
                {
                    "sink":
                    sys.stdout,
                    "level":
                    "INFO",
                    "format":
                    ("<green>{time:YYYY-MM-DDTHH:mm:ss.SSS}</green> | <level>{level: <8}</level> | "
                     " <cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level> | "
                     "<level>{extra!s}</level>"),
                    "enqueue":
                    True,
                },
            ],
            patcher=exception_patcher,
        )
    else:
        logger.configure(
            handlers=[{
                "sink": sys.stdout,
                "serialize": True,
                "colorize": False,
                "level": "INFO",
                "enqueue": True,
                "backtrace": False,
            }],
            patcher=exception_patcher,
        )
Example #6
0
def init_logger(path,
                level='INFO',
                max_bytes=DEFAULT_MAX_BYTES,
                backup_count=5):
    # 使用多进程安全的handler作为loguru的sink
    sink = ConcurrentTimeRotatingFileHandler(path,
                                             maxBytes=max_bytes,
                                             backupCount=backup_count)
    log_config = {
        "handlers": [
            {
                "sink": sys.stdout,
                "format": LOG_FORMAT,
                "level": level,
                "colorize": False
            },
            # enqueue置为False吧,它因为启动了一个内置线程来使用queue写入日志保证多进程安全,
            # 这样似乎会与gunicorn等等webserver产生冲突,因此先置为False,原因具体再查  --by lt
            {
                "sink": sink,
                "format": LOG_FORMAT,
                "level": level,
                "enqueue": False,
                "colorize": False
            },
        ]
    }
    logger.configure(**log_config)
Example #7
0
    def update_log_level(self, verbosity: Optional[int]):
        if verbosity is None:
            return
        self.verbosity = verbosity
        quiet_list = [
            "ERROR",
            "CRITICAL",
        ]
        loud_list = ["DEBUG", "TRACE"]
        verbosity_name: str
        if verbosity == 0:
            verbosity_name = "WARNING"
        elif verbosity >= 0:
            verbosity_name = loud_list[min(len(loud_list), verbosity) - 1]
        else:
            verbosity_name = quiet_list[min(len(quiet_list), -verbosity) - 1]

        from loguru import logger
        import sys

        logger.remove()
        if self.verbosity is None:
            return
        logger.configure()
        if self.verbosity > 0:
            logger.add(sink=sys.stderr,
                       level=verbosity_name,
                       colorize=sys.stderr.isatty())
            logger.opt(colors=True)
        else:
            logger.add(sink=sys.stderr,
                       level=verbosity_name,
                       colorize=False,
                       format="{message}")
        logger.debug(f"Verbosity set to level {verbosity} ({verbosity_name})")
Example #8
0
def configure_logger(verbosity):
    """Configure the scaffoldgraph cli logger to use tqdm handler.

    Parameters
    ----------
    verbosity : int
        Select the output verbosity. 0 is the lowest verbosity
        'CRITICAL' and 4 is the highest verbosity 'DEBUG'. If
        < 0 or > 4 the maximum verbosity is selected.

    """
    config = {'handlers': []}
    logger.enable('scaffoldgraph')

    if verbosity == 0:
        tqdm_handler['sink'].level = logging.CRITICAL
        tqdm_handler['level'] = 'CRITICAL'
    elif verbosity == 1:
        tqdm_handler['sink'].level = logging.ERROR
        tqdm_handler['level'] = 'ERROR'
    elif verbosity == 2:
        tqdm_handler['sink'].level = logging.WARNING
        tqdm_handler['level'] = 'WARNING'
    elif verbosity == 3:
        tqdm_handler['sink'].level = logging.INFO
        tqdm_handler['level'] = 'INFO'
    elif verbosity == 4:
        tqdm_handler['sink'].level = logging.DEBUG
        tqdm_handler['level'] = 'DEBUG'
    else:  # if < 0 or > 4 is supplied set logger to max level (DEBUG)
        tqdm_handler['sink'].level = logging.DEBUG
        tqdm_handler['level'] = 'DEBUG'

    config["handlers"].append(tqdm_handler)
    logger.configure(**config)
Example #9
0
def test_start_using_bound(writer):
    logger.configure(extra={"a": -1})
    logger_bound = logger.bind(a=0)
    logger_bound.start(writer, format="{extra[a]} {message}")
    logger.debug("A")
    logger_bound.debug("B")

    assert writer.read() == "-1 A\n0 B\n"
Example #10
0
def test_reset_previous_handlers(writer):
    logger.add(writer, format="{message}")

    logger.configure(handlers=[])

    logger.debug("Test")

    assert writer.read() == ""
Example #11
0
def test_reset_previous_extra(writer):
    logger.configure(extra={"a": 123})
    logger.add(writer, format="{extra[a]}", catch=False)

    logger.configure(extra={})

    with pytest.raises(KeyError):
        logger.debug("Nope")
Example #12
0
def test_reset_previous_patcher(writer):
    logger.configure(patcher=lambda r: r.update(a=123))
    logger.add(writer, format="{extra[a]}", catch=False)

    logger.configure(patcher=lambda r: None)

    with pytest.raises(KeyError):
        logger.debug("Nope")
Example #13
0
def test_add_using_patched(writer):
    logger.configure(patch=lambda r: r["extra"].update(a=-1))
    logger_patched = logger.patch(lambda r: r["extra"].update(a=0))
    logger_patched.add(writer, format="{extra[a]} {message}")
    logger.debug("A")
    logger_patched.debug("B")

    assert writer.read() == "-1 A\n0 B\n"
Example #14
0
def initial(level='DEBUG', logfile=None, stdout=True):
    handlers = []
    if stdout:
        handlers.append(dict(sink=sys.stderr, level=level))
    if logfile:
        if os.path.isfile(logfile):
            os.remove(logfile)
        handlers.append(dict(sink=logfile, encoding='UTF-8', level=level))
    logger.configure(handlers=handlers)
Example #15
0
def test_override_configured(writer):
    logger.configure(patch=lambda r: r["extra"].update(a=123, b=678))
    logger2 = logger.patch(lambda r: r["extra"].update(a=456))

    logger2.add(writer, format="{extra[a]} {extra[b]} {message}")

    logger2.debug("!")

    assert writer.read() == "456 678 !\n"
Example #16
0
def test_extra(writer):
    extra = {"a": 1, "b": 9}

    logger.add(writer, format="{extra[a]} {extra[b]}")
    logger.configure(extra=extra)

    logger.debug("")

    assert writer.read() == "1 9\n"
Example #17
0
def test_activation(writer):
    activation = [("tests", False), ("tests.test_configure", True)]

    logger.add(writer, format="{message}")
    logger.configure(activation=activation)

    logger.debug("Logging")

    assert writer.read() == "Logging\n"
Example #18
0
def test_override_configured(writer):
    logger.configure(extra={"a": 1})
    logger2 = logger.bind(a=2)

    logger2.start(writer, format="{extra[a]} {message}")

    logger2.debug("?")

    assert writer.read() == "2 ?\n"
Example #19
0
def base_logger():
    """Initialize hyperglass logging instance."""
    _loguru_logger.remove()
    _loguru_logger.add(_get_rich(),
                       format=_FMT_BASIC,
                       level="INFO",
                       enqueue=True)
    _loguru_logger.configure(levels=_LOG_LEVELS)
    return _loguru_logger
Example #20
0
def init_log():

    if SYSTEM == 'Windows':
        rotation = None
    elif SYSTEM == 'Linux':
        rotation = '1 day'
    else:
        rotation = None

    _format = '{time:YY-MM-DD HH:mm:ss.SSS} - {process.name} - {thread.name} - {function} - {line} - {level} - {message}'
    logger.remove()
    handlers = [
        {
            # 'sink': 'log/error-{time:YYMMDD}.log',
            'sink': 'log/error.log',
            # 'sink': write,
            'format': _format,
            'level': 'ERROR',
            'rotation': rotation,
            'enqueue': True,
            'encoding': 'utf-8',
            'backtrace': True
        },
        {
            'sink': 'log/log.log',
            'format': _format,
            'level': 'INFO',
            'rotation': rotation,
            'enqueue': True,
            'encoding': 'utf-8',
            'backtrace': True
        }
    ]

    if DEBUG:
        handlers.append({
            'sink': 'log/debug.log',
            'format': _format,
            'level': 'DEBUG',
            'rotation': rotation,
            'enqueue': True,
            'encoding': 'utf-8',
            'backtrace': True
        })

    handlers.append({
        'sink': logging.StreamHandler(),
        'format': _format,
        'level': 'DEBUG',
        'enqueue': True,
        'backtrace': True
    })

    logger.configure(handlers=handlers)

    return logger
Example #21
0
def setup_app_logging(config: Settings) -> None:
    """Prepare custom logging for our application."""
    LOGGERS = ("uvicorn.asgi", "uvicorn.access")
    for logger_name in LOGGERS:
        logging_logger = logging.getLogger(logger_name)

    logger.configure(handlers=[{
        "sink": sys.stderr,
        "level": config.logging.LOGGING_LEVEL
    }])
Example #22
0
def test_dont_reset_previous_levels(writer):
    logger.level("abc", no=30)

    logger.configure(levels=[])

    logger.add(writer, format="{level} {message}")

    logger.log("abc", "Test")

    assert writer.read() == "abc Test\n"
Example #23
0
def configure_logging(log_level):
    if log_level not in ALLOWED_LOG_LEVELS:
        raise ValueError(f"Invalid LOG_LEVEL {log_level}")
    logger.configure(handlers=[
        dict(
            sink=sys.stderr,
            level=log_level,
        ),
    ], )
    logging.basicConfig(level=log_level, handlers=[InterceptHandler()])
Example #24
0
def init(project_id, curr_date):
    env = os.getenv("AIBEE_IDC", "")
    log_level = "DEBUG"
    if env == "dev":
        log_level = "DEBUG"
    logger.configure(handlers=[{
        "sink": sys.stdout,
        "level": log_level,
    }])
    logger.add("{}/{}_{}.log".format(const.LOG_DIR, project_id, curr_date))
Example #25
0
def test_dont_reset_by_default(writer):
    logger.configure(extra={"a": 1}, patcher=lambda r: r["extra"].update(b=2))
    logger.level("b", no=30)
    logger.add(writer, format="{level} {extra[a]} {extra[b]} {message}")

    logger.configure()

    logger.log("b", "Test")

    assert writer.read() == "b 1 2 Test\n"
Example #26
0
def test_contextualize_after_configure(writer):
    logger.add(writer, format="{message} {extra[foobar]}")

    with logger.contextualize(foobar="baz"):
        logger.configure(extra={"foobar": "baz_2"})
        logger.info("A")

    logger.info("B")

    assert writer.read() == "A baz\nB baz_2\n"
Example #27
0
def test_dont_reset_by_default(writer):
    logger.configure(extra={"a": 1})
    logger.level("b", no=30)
    logger.add(writer, format="{level} {extra[a]} {message}")

    logger.configure()

    logger.log("b", "Test")

    assert writer.read() == "b 1 Test\n"
Example #28
0
def clear_log():
    Env.log_file.unlink()
    Env.log_file.touch()

    logger.configure(handlers=[
        dict(sink=sys.stdout),
        dict(sink=Env.log_file, colorize=True)
    ])

    logger.info('--- log file cleared ---')
    return redirect('/log')
def main():
    """ main activity of code"""
    args = parse_cmd_arguments()
    console_level, log_file_level = set_the_level(args.debug)
    if console_level:
        console_handler = dict(sink=sys.stderr, format=FORMATTER, level=console_level)
        file_handler = dict(sink=LOG_FILE, format=FORMATTER, backtrace=True, level=log_file_level)
        logger.configure(handlers=[console_handler, file_handler])
    data = load_rentals_file(args.input)
    new_data = calculate_additional_fields(data)
    save_to_json(args.output, new_data)
Example #30
0
def setup_logger() -> None:
    """Configure loguru logger."""
    L_LEVEL = logging.DEBUG if settings.LOGS.LEVEL == "debug" else logging.INFO
    logging.basicConfig(handlers=[InterceptHandler(level=L_LEVEL)],
                        level=L_LEVEL)
    logger.configure(handlers=[{
        "sink": sys.stderr,
        "level": L_LEVEL
    }, {
        "sink": settings.LOGS.FILE
    }])