Esempio n. 1
0
def main():
    """
    Ensure you application will create an empty database if one doesn’t exist
    when the app is first run. Call it customers.db
    """

    # Standalone function to initialize logging
    logger.add(stdout, level='WARNING')
    logger.add("logfile_{time}.txt", level='INFO')
    logger.enable(__name__)

    with Connection():
        util_drop_all()

    ingest_customer_csv(CSV_PATH_DBG + CUST_CSV_FILENAME)
    ingest_product_csv(CSV_PATH_DBG + PROD_CSV_FILENAME)
    ingest_rental_csv(CSV_PATH_DBG + RNTL_CSV_FILENAME)

    db_dict = show_available_products()

    print(db_dict)

    db_dict = show_rentals('prd002')

    print(db_dict)
Esempio n. 2
0
def main_callback(
    ctx: typer.Context,
    light_id: int = typer.Option(
        0,
        "--light-id",
        "-l",
        show_default=True,
        help="Which light to operate on, see list output.",
    ),
    all_lights: bool = typer.Option(False,
                                    "--all",
                                    "-a",
                                    is_flag=True,
                                    help="Operate on all lights."),
    debug: bool = typer.Option(
        False,
        "--debug",
        "-D",
        is_flag=True,
        help="Enable logging",
    ),
):
    """Control USB attached LED lights like a Human™

    Make a USB attached LED light turn on, off and blink; all from the
    comfort of your very own command-line. If your platform supports
    HIDAPI (Linux, MacOS, Windows and probably others), then you can use
    busylight with supported lights!
    """

    ctx.obj = ALL_LIGHTS if all_lights else light_id

    if debug:
        logger.enable("busylight")
Esempio n. 3
0
def config_server_cli(context, host='localhost', port=6563, verbose=False):
    context.ensure_object(dict)
    context.obj['host'] = host
    context.obj['port'] = port

    if verbose:
        logger.enable('panoptes')
Esempio n. 4
0
def run(config, debug, no_cache):
    # noinspection PyArgumentList
    logging.basicConfig(handlers=[InterceptHandler()], level=0)

    logger.remove()
    logger.enable("discord_chan")
    logger.add(sys.stderr, level="INFO", filter="discord_chan")
    logger.add(sys.stderr, level="ERROR", filter="discord")

    config = ConfigBox(config_dict(config))

    if not config.enviroment.bool("disable"):
        load_environ(
            **dict([var for var in config.enviroment.items() if var[0] != "disable"])
        )

    if debug:
        asyncio.get_event_loop().set_debug(True)
        logging.getLogger("asyncio").setLevel(logging.DEBUG)

    kwargs = {}
    if no_cache:
        kwargs["guild_subscriptions"] = False
        kwargs["fetch_offline_members"] = False

    bot = discord_chan.DiscordChan(config, **kwargs)

    # Todo: make sure to remove this debug call
    # bot.dispatch('ready')

    loop = asyncio.get_event_loop()
    with start_monitor(
        loop, monitor=discord_chan.DiscordChanMonitor, locals={"bot": bot}
    ):
        bot.run()
Esempio n. 5
0
def common_fix_or_check(context, verbose: int, files,
                        check_only: bool) -> None:
    """Common CLI code for both "fix" and "check" commands."""
    if verbose:
        level = logging.INFO if verbose == 1 else logging.DEBUG

        # https://loguru.readthedocs.io/en/stable/resources/recipes.html#changing-the-level-of-an-existing-handler
        # https://github.com/Delgan/loguru/issues/138#issuecomment-525594566
        logger.remove()
        logger.add(sys.stderr, level=logging.getLevelName(level))

        logger.enable(PROJECT_NAME)

    nit = get_nitpick(context)
    try:
        for fuss in nit.run(*files, autofix=not check_only):
            nit.echo(fuss.pretty)
    except QuitComplainingError as err:
        for fuss in err.violations:
            click.echo(fuss.pretty)
        raise Exit(2) from err

    click.secho(Reporter.get_counts())
    if Reporter.manual or Reporter.fixed:
        raise Exit(1)
Esempio n. 6
0
def configure_logger(verbosity):
    """Configure the scaffoldgraph cli logger to use tqdm handler"""

    config = {'handlers': []}
    logger.enable('scaffoldgraph')

    if verbosity == 0:
        tqdm_handler['sink'].level = logging.CRITICAL
        tqdm_handler['level'] = 'CRITICAL'
    elif verbosity == 1:
        tqdm_handler['sink'].level = logging.ERROR
        tqdm_handler['level'] = 'ERROR'
    elif verbosity == 2:
        tqdm_handler['sink'].level = logging.WARNING
        tqdm_handler['level'] = 'WARNING'
    elif verbosity == 3:
        tqdm_handler['sink'].level = logging.INFO
        tqdm_handler['level'] = 'INFO'
    elif verbosity == 4:
        tqdm_handler['sink'].level = logging.DEBUG
        tqdm_handler['level'] = 'DEBUG'
    else:  # if < 0 or > 4 is supplied set logger to max level (DEBUG)
        tqdm_handler['sink'].level = logging.DEBUG
        tqdm_handler['level'] = 'DEBUG'

    config["handlers"].append(tqdm_handler)
    logger.configure(**config)
Esempio n. 7
0
def db(
    ctx: click.Context,
    config: Path,
    verbose: int,
) -> None:
    """DB migration tool for asynpg.
    """

    if verbose == 0:
        logger.disable('asyncpg-migrate')
    else:
        logger.enable('asyncpg-migrate')
        verbosity = {
            1: 'INFO',
            2: 'DEBUG',
            3: 'TRACE',
        }
        logger.add(
            sys.stderr,
            format='{time} {message}',
            filter='asyncpg-migrate',
            level=verbosity.get(verbose, 'TRACE'),
        )

    logger.debug(
        'Flags are config={config}, verbose={verbose}',
        config=config,
        verbose=verbose,
    )

    ctx.ensure_object(dict)
    ctx.obj['configuration_file_path'] = config
Esempio n. 8
0
def configure_logger(verbosity):
    """Configure the scaffoldgraph cli logger to use tqdm handler.

    Parameters
    ----------
    verbosity : int
        Select the output verbosity. 0 is the lowest verbosity
        'CRITICAL' and 4 is the highest verbosity 'DEBUG'. If
        < 0 or > 4 the maximum verbosity is selected.

    """
    config = {'handlers': []}
    logger.enable('scaffoldgraph')

    if verbosity == 0:
        tqdm_handler['sink'].level = logging.CRITICAL
        tqdm_handler['level'] = 'CRITICAL'
    elif verbosity == 1:
        tqdm_handler['sink'].level = logging.ERROR
        tqdm_handler['level'] = 'ERROR'
    elif verbosity == 2:
        tqdm_handler['sink'].level = logging.WARNING
        tqdm_handler['level'] = 'WARNING'
    elif verbosity == 3:
        tqdm_handler['sink'].level = logging.INFO
        tqdm_handler['level'] = 'INFO'
    elif verbosity == 4:
        tqdm_handler['sink'].level = logging.DEBUG
        tqdm_handler['level'] = 'DEBUG'
    else:  # if < 0 or > 4 is supplied set logger to max level (DEBUG)
        tqdm_handler['sink'].level = logging.DEBUG
        tqdm_handler['level'] = 'DEBUG'

    config["handlers"].append(tqdm_handler)
    logger.configure(**config)
Esempio n. 9
0
def main():
    logger.enable(__name__)
    processor = Jamboree()
    example_handler = ExampleSearchHandler()
    example_handler.processor = processor
    example_handler["name"] = "Boi Gurl"
    example_handler["category"] = "marketsx"
    example_handler["sample_tags"] = ["four", "five", "six"]
    # TODO: Figure out subcategory update
    example_handler["subcategories"] = {
        "hello": "world",
        "country": "US",
    }
    example_handler["secondsub"] = {"my": "jac"}
    example_handler.replacement["secondsub"] = {"my": "jack"}
    example_handler["live"] = False

    # example_handler.remove()
    example_handler.insert(allow_duplicates=True)
    records1 = example_handler.find()

    records2 = example_handler.find()

    records3 = example_handler.find()

    logger.warning((records1, len(records1)))
    logger.success((records2, len(records2)))
    logger.error((records3, len(records3)))
Esempio n. 10
0
def main(folder=DEFAULT_DATA_FOLDER,
         keep_going=True,
         narrow_field=False,
         wide_field=False,
         verbose=False):
    if verbose:
        logger.enable('panoptes')
    else:
        logger.remove(0)

    if not os.path.exists(folder):
        logger.warning(f"Warning, data folder {folder} does not exist, will create.")

    # --no_narrow_field is the default, so the the args list below ignores args.no_narrow_field.
    dl = Downloader(
        data_folder=folder,
        keep_going=keep_going,
        narrow_field=narrow_field,
        wide_field=wide_field,
        verbose=verbose)

    success = dl.download_all_files()
    if success:
        logger.success(f'Downloaded all files')

    return success
Esempio n. 11
0
def run_script(add_to_parser, run_from_args, argc=None):
    """
    Run script from command line

    :param add_to_parser: function that takes an argument parser and adds information to it
    :param run_from_args: function that runs the script based on the arguments of the parser
    :param argc: command line arguments
    """
    module_name = add_to_parser.__module__
    if module_name != run_from_args.__module__:
        raise ValueError(
            "run_script expected add_to_parser and run_from_args to be defined in the same module"
        )
    module = sys.modules[module_name]
    doc_string = module.__doc__

    logger.enable('mcot.core')
    script_logger = logger.opt(depth=1)
    parser = argparse.ArgumentParser(doc_string)
    add_to_parser(parser)

    args = parser.parse_args(argc)

    setup_log()

    script_logger.info('starting script')
    try:
        run_from_args(args)
    except Exception:
        script_logger.exception('failed script')
        raise
    script_logger.info('finished script')
    def test_stderr(self, capsys, command, ignore_stderr_regexp, out_regexp,
                    expected):
        from loguru import logger

        import subprocrunner

        logger.remove()
        logger.add(sys.stderr, level="DEBUG")
        logger.enable("test")
        subprocrunner.set_logger(True)

        runner = SubprocessRunner(command,
                                  ignore_stderr_regexp=ignore_stderr_regexp)
        runner.run()

        assert is_null_string(runner.stdout.strip())
        assert is_not_null_string(runner.stderr.strip())
        assert runner.returncode != 0

        out, err = capsys.readouterr()
        print("[sys stdout]\n{}\n".format(out))
        print("[sys stderr]\n{}\n".format(err))
        print("[proc stdout]\n{}\n".format(runner.stdout))
        print("[proc stderr]\n{}\n".format(runner.stderr))

        actual = out_regexp.search(err) is not None
        assert actual == expected
Esempio n. 13
0
def write_logs(request):
    # put logs in tests/logs
    log_path = Path("tests") / "logs"

    # tidy logs in subdirectories based on test module and class names
    module = request.module
    class_ = request.cls
    name = request.node.name + ".log"

    if module:
        log_path /= module.__name__.replace("tests.", "")
    if class_:
        log_path /= class_.__name__

    log_path.mkdir(parents=True, exist_ok=True)

    # append last part of the name
    log_path /= name

    # enable the logger
    logger.remove()
    logger.configure(handlers=[{
        "sink": log_path,
        "level": "TRACE",
        "mode": "w"
    }])
    logger.enable("my_package")
Esempio n. 14
0
    def transform(self, training_data: pd.DataFrame) -> pd.DataFrame:
        """
        Transform training data.

        :param training_data: Training data.
        :type training_data: pd.DataFrame
        :return: Transformed training data.
        :rtype: pd.DataFrame
        """
        if not self.use_transform:
            return training_data

        logger.debug(f"Transforming dataset via {self.__class__.__name__}")
        training_data = training_data.copy()
        if self.output_column not in training_data.columns:
            training_data[self.output_column] = None

        logger.disable("dialogy")
        for i, row in tqdm(training_data.iterrows(), total=len(training_data)):
            transcripts = self.make_transform_values(row[self.input_column])
            entities = self.get_entities(transcripts)
            is_empty_series = isinstance(
                row[self.output_column],
                pd.Series) and (row[self.output_column].isnull())
            is_row_nonetype = row[self.output_column] is None
            is_row_nan = pd.isna(row[self.output_column])
            if is_empty_series or is_row_nonetype or is_row_nan:
                training_data.at[i, self.output_column] = entities
            else:
                training_data.at[i, self.output_column] = (
                    row[self.output_column] + entities)
        logger.enable("dialogy")
        return training_data
Esempio n. 15
0
def main():
    args = vars(parse_args())

    logger.enable("graphqa.data.lddt")
    logger.remove()
    logger.add(sys.stderr, filter=LogFilter(args.pop("verbose")), level=0)

    run_lddt(**args)
Esempio n. 16
0
def set_logger(is_enable):
    if is_enable:
        logger.enable(MODULE_NAME)
    else:
        logger.disable(MODULE_NAME)

    simplesqlite.set_logger(is_enable)
    subprocrunner.set_logger(is_enable)
Esempio n. 17
0
        def run_func(*args, **kwargs):
            if not self.enable_log:
                logger.disable(__name__)
                print(f"Logging Disabled for Function: {func.__name__}")

            result = func(*args)
            logger.enable(__name__)
            return result
Esempio n. 18
0
def test_log_before_disable(writer):
    logger.add(writer, format="{message}")
    logger.enable("")
    logger.debug("yes")
    logger.disable("tests")
    logger.debug("nope")
    result = writer.read()
    assert result == "yes\n"
Esempio n. 19
0
def memory_db():
    from mycartable.database import init_database

    logger.disable("")
    db = init_database(Database())
    add_database_to_types(db)

    logger.enable("")
    return db
Esempio n. 20
0
def test_log_before_disable_f_globals_name_absent(writer,
                                                  f_globals_name_absent):
    logger.add(writer, format="{message}")
    logger.enable(None)
    logger.debug("yes")
    logger.disable(None)
    logger.debug("nope")
    result = writer.read()
    assert result == "yes\n"
Esempio n. 21
0
def set_logger(is_enable, propagation_depth=1):
    if is_enable:
        logger.enable(MODULE_NAME)
    else:
        logger.disable(MODULE_NAME)

    if propagation_depth <= 0:
        return

    dataproperty.set_logger(is_enable, propagation_depth - 1)
Esempio n. 22
0
def caplog(_caplog):
    class PropogateHandler(logging.Handler):
        def emit(self, record):
            logging.getLogger(record.name).handle(record)

    logger.enable('panoptes')
    handler_id = logger.add(PropogateHandler(), format="{message}")
    yield _caplog
    with suppress(ValueError):
        logger.remove(handler_id)
Esempio n. 23
0
    def configure_logging(self, level, sink=sys.stderr):
        """ Sets up logging verbosity.

        Args:
            level (str):
                ERROR, WARNING, INFO, DEBUG, TRACE
            sink: stream or file for logs
        """
        log.enable('libvis')
        log.level(level)
Esempio n. 24
0
def set_logger(is_enable: bool, propagation_depth: int = 1) -> None:
    if is_enable:
        logger.enable(MODULE_NAME)
    else:
        logger.disable(MODULE_NAME)

    if propagation_depth <= 0:
        return

    subprocrunner.set_logger(is_enable, propagation_depth - 1)
Esempio n. 25
0
def main():
    """
    Ensure you application will create an empty database if one doesn’t exist
    when the app is first run. Call it customers.db
    """

    # Standalone function to initialize logging

    logger.add(stdout, level='INFO')
    logger.add("logfile_{time}.txt", level='DEBUG')
    logger.enable(__name__)
Esempio n. 26
0
def test_enable(writer, name, should_log):
    logger.add(writer, format="{message}")
    logger.disable("")
    logger.enable(name)
    logger.debug("message")
    result = writer.read()

    if should_log:
        assert result == "message\n"
    else:
        assert result == ""
Esempio n. 27
0
def caplog(caplog: _logging.LogCaptureFixture) -> _logging.LogCaptureFixture:
    class LoguruHandler(logging.Handler):
        def emit(self, record: logging.LogRecord) -> None:
            logging.getLogger(record.name).handle(record)

    logger.enable('axion')
    handler_id = logger.add(
        LoguruHandler(),
        format='{message}',
    )
    yield caplog
    logger.remove(handler_id)
    logger.disable('axion')
Esempio n. 28
0
def main():
    """
    Ensure you application will create an empty database if one doesn’t exist
    when the app is first run. Call it customers.db
    """

    # Standalone function to initialize logging
    logger.add(stdout, level='WARNING')
    logger.add("logfile_{time}.txt", level='INFO')
    logger.enable(__name__)

    # TODO: load the CSV file using generator and doing profiling
    ingest_csv()
Esempio n. 29
0
def enable_logger(sink=sys.stderr, level="WARNING"):
    """
    Enable the logging of messages.

    Configure the ``logger`` variable imported from ``loguru``.

    Args:
        sink (file): An opened file pointer, or stream handler. Default to standard error.
        level (str): The log level to use. Possible values are TRACE, DEBUG, INFO, WARNING, ERROR, CRITICAL.
            Default to WARNING.
    """
    logger.remove()
    logger.configure(handlers=[{"sink": sink, "level": level}])
    logger.enable("aria2p")
Esempio n. 30
0
def configure_logging(modifier=0,
                      *,
                      username=None,
                      debug=False,
                      log_to_file=False):
    logger.remove()

    verbosity = 4 + modifier

    if verbosity < 0:
        verbosity = 0
    elif verbosity > 7:
        verbosity = 7

    log_level = VERBOSITY_LOG_LEVELS[verbosity]
    logger.add(sys.stdout, level=log_level, format=LOG_FORMAT, backtrace=False)

    if debug:
        logger.enable('audio_metadata')
        logger.enable('google_music')
        logger.enable('google_music-proto')
        logger.enable('google_music_utils')

    if log_to_file:
        log_dir = ensure_log_dir(username=username)
        log_file = (log_dir /
                    time.strftime('%Y-%m-%d_%H-%M-%S')).with_suffix('.log')

        logger.success("Logging to file: {}", log_file)

        logger.add(log_file,
                   level=log_level,
                   format=LOG_FORMAT,
                   backtrace=False)