Esempio n. 1
0
def output(
    print_json: bool = Option(False, "--json", help=_json_help),
    organization: str = Option("", "-o", "--organization", help=_organization_help),
    environment: str = Option("", "-e", "--environment", help=_environment_help),
    graph: Path = Option("", "-g", "--graph-path", help=_graph_help),
    store_name: str = Argument(..., exists=True, help=_store_name_help),
):
    """List data sent to an output port of a from the most recent run of a node"""
    ids = IdLookup(
        environment_name=environment,
        organization_name=organization,
        explicit_graph_path=graph,
    )

    with abort_on_error("Could not get node data"):
        data = list(
            paginated_output_data(ids.environment_id, ids.graph_id, store_name)
        )
    _print_objects(data, print_json)
Esempio n. 2
0
def branch(
    repository: Path = Argument(..., help="The path to the bare repository."),
    local: bool = True,
    remote: bool = False,
) -> None:
    """Get the branches of a repository."""
    from pygit2 import Repository  # pylint: disable=import-outside-toplevel
    from typer import echo  # pylint: disable=import-outside-toplevel

    repo = Repository(repository)
    branches: Iterable[str]
    if local and remote:
        branches = repo.branches
    elif local:
        branches = repo.branches.local
    elif remote:
        branches = repo.branches.remote
    for branch_name in branches:
        echo(branch_name)
Esempio n. 3
0
def raster_tile_cache(
        dataset: str = Option(..., help="Dataset name."),
        version: str = Option(..., help="Version number."),
        zoom_level: int = Option(..., help="Zoom level."),
        implementation: str = Option(
            ..., help="Implementation name/pixel meaning."),
        target_bucket: str = Option(..., help="Target bucket."),
        skip_empty_tiles: bool = Option(
            False,
            "--skip_empty_tiles",
            help="Do not write empty tiles to tile cache."),
        bit_depth: int = Option(8, help="Number of bits per channel to use."),
        tile_set_prefix: str = Argument(..., help="Tile prefix."),
):
    LOGGER.info(f"Raster tile set asset prefix: {tile_set_prefix}")

    tiles: List[Tuple[str, str]] = get_input_tiles(tile_set_prefix)

    # If there are no files, what can we do? Just exit I guess!
    if not tiles:
        LOGGER.info("No input files! I guess we're good then?")
        return

    sub_processes = max(math.floor(NUM_PROCESSES / len(tiles)), 1)
    LOGGER.info(f"Using {sub_processes} sub-processes per process")

    args = ((
        tile,
        dataset,
        version,
        target_bucket,
        implementation,
        zoom_level,
        skip_empty_tiles,
        sub_processes,
        bit_depth,
    ) for tile in tiles)

    # Cannot use normal pool here, since we run sub-processes
    # https://stackoverflow.com/a/61470465/1410317
    with ProcessPoolExecutor(max_workers=NUM_PROCESSES) as executor:
        for tile in executor.map(create_tiles, args):
            LOGGER.info(f"Finished processing tile {tile}")
Esempio n. 4
0
def doi_process(
    ids: List[str] = Argument(...,
                              help="Valid DOI/arXivID(s) or file (*.bib, *.txt)"),
    dir: Path = Option(getcwd, '--dir', '-d', help="Directory to download"),
    scihub: str = Option(config.__scihub__, '--scihub',
                         '-s', help="Valid Sci-Hub URL")
):
    global operator
    try:
        assert not scihub.startswith(
            "http"), 'Argument Error: Invalid URL, example: sci-hub.tf'
        scihub = "https://" + scihub
        assert path.exists(dir), 'Argument Error: Invalid path.'
        if ids[0].lower().endswith('.bib'):
            assert path.exists(ids[0]), 'Argument Error: Invalid file path.'
            ids_list = operator.parseBibTex(ids[0])
        elif ids[0].lower().endswith('.txt'):
            assert path.exists(ids[0]), 'Argument Error: Invalid file path.'
            ids_list = operator.parseTxt(ids[0])
        else:
            ids_list = ids
    except AssertionError as e:
        echo(e.args[0], err=True)
        raise Exit()
    if not ids_list:
        echo("There is no valid DOI.", err=True)
        raise Exit()
    articles = list()
    for d in ids_list:
        if d.startswith('arXiv'):
            articles.append({
                "article_url": urljoin('https://arxiv.org/abs', d.split(':')[1]),
                "file_name": f"{d.replace(':', '-')}.pdf",
                "warning_str": d})
        else:
            articles.append({
                "article_url": urljoin(scihub, d),
                "file_name": f"{d.replace('/', '-')}.pdf",
                "warning_str": d})
    task = "ID"
    missing, log_file = operator.download(task, articles, dir)
    echo(f" {' '*len(task)} | {missing} missing: {log_file}")
Esempio n. 5
0
def webhook(
        explicit_graph: Path = Option(None,
                                      "--graph",
                                      "-g",
                                      exists=True,
                                      help=_graph_help),
        name: str = Argument(..., help=_webhook_name_help),
):
    """Add a new webhook node to a graph"""
    ids = IdLookup(explicit_graph_path=explicit_graph)

    with abort_on_error("Adding webhook failed"):
        editor = GraphConfigEditor(ids.graph_file_path)
        editor.add_webhook(name, id=random_node_id())
        editor.write()

    sprint(f"\n[success]Created webhook [b]{name}")
    sprint(
        f"\n[info]Once you've deployed the graph, use "
        f"[code]patterns list webhooks[/code] to get the url of the webhook")
Esempio n. 6
0
def generate_keys(
        domain: str = Argument(
            ..., help="Domain to generate DKIM keys for"),  # noqa: B008
) -> None:
    client = from_env()
    build_kolombo_image(client, "dkim-gen")

    step(f"Generating DKIM keys for domain: {domain}")
    client.containers.run(
        "kolombo-dkim-gen",
        domain,
        stderr=True,
        auto_remove=True,
        volumes=["/etc/kolombo/dkim_keys:/etc/opendkim/keys"],
    )

    dkim_txt = read_dkim_txt_record(domain)
    info(
        f"[b]TXT[/] record for [b u]mail._domainkey.{domain}[/] is: {dkim_txt}"
    )
Esempio n. 7
0
def delete(
    graph_id: str = Option(""),
    force: bool = Option(False, "-f", "--force", help=_force_help),
    graph: Path = Argument(None, exists=True, help=_graph_help),
):
    """Delete a graph from the Patterns studio.

    This will not delete any files locally.
    """
    ids = IdLookup(
        explicit_graph_path=graph,
        explicit_graph_id=graph_id,
    )

    with abort_on_error("Deleting graph failed"):
        if not force:
            Confirm.ask(f"Delete graph {ids.graph_name}?")
        delete_graph(ids.graph_id)

    sprint(f"[success]Graph deleted from Patterns studio.")
Esempio n. 8
0
def in_(
        when: str = Argument(None, help="Set a specific time to clock in"),
        out: str = Option(None, help="Set time to clock out"),
        m: str = Option(None, help="Journal Message to add to record"),
):
    """Clock into a job, or add a job day"""
    if when is not None and len(when.split("-")) > 3:
        when, out = parse_date_time_junction(when)
    else:
        if out is not None:
            out = parse_date_and_time(out)
        if when is not None:
            when = parse_date_and_time(when)

    if when is not None and out is not None:
        print(
            f"Creating entry for {when:%Y-%m-%d}: {when:%H:%M:%S} to {out:%H:%M:%S}"
        )
        c = Clok(
            time_in=when,
            time_out=out,
            date_key=get_date_key(when),
            month_key=get_month(when),
            week_key=get_week(when),
        )
        c.update_span()
        c.save()
        if m is not None:
            c.add_journal(m)
    elif when is not None and out is None:
        Clok.clock_in_when(when, verbose=True)
        if m is not None:
            Clok.get_last_record().add_journal(m)
        State.set_clok(Clok.get_last_record())
    else:
        Clok.clock_in(verbose=True)
        if m is not None:
            Clok.get_last_record().add_journal(m)
        State.set_clok(Clok.get_last_record())
Esempio n. 9
0
def main(
        db_path: Path = Argument(Path("pypms.sqlite"),
                                 help="sensor messages DB"),
        samples: int = Option(4, "--samples", "-n"),
        interval: int = Option(20, "--interval", "-i"),
):
    """
    Read raw messages from 2 different sensors
    (PMSx003 on /dev/ttyUSB0 and MCU680 on /dev/ttyUSB1)
    and store them on a sqlite DB.

    After reading the sensor, decode all messages on DB and print them.
    """

    # get DB context manager
    message_db = pypms_db(db_path)

    reader = dict(
        pms=SensorReader("PMSx003", "/dev/ttyUSB0", interval, samples),
        bme=SensorReader("MCU680", "/dev/ttyUSB1", interval, samples),
    )

    # read from each sensor and write to DB
    with message_db() as db, reader["pms"] as pms, reader["bme"] as bme:
        # read one obs from each sensor at the time
        with progressbar(zip(pms(raw=True), bme(raw=True)),
                         length=samples,
                         label="reading sensors") as progress:
            for pms_obs, env_obs in progress:
                write_message(db, pms.sensor, pms_obs)
                write_message(db, bme.sensor, env_obs)

    # read and decode all messages on the DB
    with message_db() as db:
        # extract obs from one sensor at the time
        for sensor in [r.sensor for r in reader.values()]:
            print(sensor)
            for obs in read_obs(db, sensor):
                print(obs)
Esempio n. 10
0
def cheapest(
        search_term: str = Argument(..., help="Search term for product."),
        n: int = Option(default=5,
                        help="Max. number of cheapest findings to show."),
        username: str = Option(None, help="Username of Picnic account."),
        password: str = Option(None, help="Password of Picnic account."),
        country_code: str = Option(default="DE", help="Country code of shop."),
):
    # connect to picnic api
    if (not username) or (not password):
        print("loading credentials from config file...")
        username, password, country_code = credentials_from_file(
            path=CREDENTIALS_PATH)
    picnic = PicnicAPI(username=username,
                       password=password,
                       country_code=country_code)
    # search items, filter cheapest
    items = search_items(picnic_api=picnic, term=search_term)
    cheapest_items = filter_cheapest_items(items=items, max_n=n)
    # format results
    for item in cheapest_items:
        print(f"* {item.name}: {item.unit_quantity_sub}")
Esempio n. 11
0
def heat(
    top: float = Argument(
        ..., help='The heat diffusion coefficient for the top bars.'),
    fv: bool = FV,
    grid: int = GRID,
    nt: int = NT,
    rect: bool = RECT,
):
    problem = InstationaryProblem(
        StationaryProblem(
            domain=RectDomain(top='dirichlet', bottom='neumann'),
            diffusion=LincombFunction(
                [
                    ConstantFunction(1., dim_domain=2),
                    ExpressionFunction(
                        '(x[..., 0] > 0.45) * (x[..., 0] < 0.55) * (x[..., 1] < 0.7) * 1.',
                        dim_domain=2),
                    ExpressionFunction(
                        '(x[..., 0] > 0.35) * (x[..., 0] < 0.40) * (x[..., 1] > 0.3) * 1. + '
                        '(x[..., 0] > 0.60) * (x[..., 0] < 0.65) * (x[..., 1] > 0.3) * 1.',
                        dim_domain=2)
                ], [
                    1., 100.
                    - 1.,
                    ExpressionParameterFunctional('top - 1.', {'top': 1})
                ]),
            rhs=ConstantFunction(value=0., dim_domain=2),
            dirichlet_data=ConstantFunction(value=0., dim_domain=2),
            neumann_data=ExpressionFunction(
                '(x[..., 0] > 0.45) * (x[..., 0] < 0.55) * -1000.',
                dim_domain=
                2),
        ),
        T=1.,
        initial_data=ExpressionFunction(
            '(x[..., 0] > 0.45) * (x[..., 0] < 0.55) * (x[..., 1] < 0.7) * 10.',
            dim_domain=2))
    mu = {'top': top}
    solve(problem, mu, fv, rect, grid, nt)
Esempio n. 12
0
async def add_user(
        email: str = Argument(..., help="Email for new user"),  # noqa: B008
) -> None:
    from kolombo.models import Domain, User

    if "@" not in email:
        error(f"Email '{email}' does not contain '@'!")
        exit(1)

    domain = email.split("@", maxsplit=1)[1].strip()
    if not domain:
        error("Domain part MUST NOT be empty string!")
        exit(1)
    elif not await Domain.objects.filter(active=True, actual=domain).exists():
        error(f"Domain '{domain}' is not added (or inactive)!")
        warning(
            f"You can add it via [code]kolombo domain add {domain} mx.{domain}[/code]"
        )
        exit(1)
    elif await User.objects.filter(email=email).exists():
        error(f"User with email '{email}' already exists!")
        exit(1)

    started(f"Adding [code]{email}[/] user")

    password = prompt(f"{email} password",
                      hide_input=True,
                      confirmation_prompt=True)
    step("Saving to database")
    await _save_user(email, password, domain)

    step("Updating virtual files (addresses and mailbox map)")
    active_users = await User.all_active()
    update_virtual_files(active_users)

    warning("Run command [code]kolombo run[/] to reload Kolombo")

    finished(f"User '{email}' added!")
Esempio n. 13
0
def add(
    name: str = Argument(..., help="Name of the peer."),
    config_path: Path = common.OPTION_CONFIG_PATH,
    port: Optional[int] = common.OPTION_PORT,
    ipv4_address: Optional[str] = common.OPTION_IPV4_ADDRESS,
    ipv6_address: Optional[str] = common.OPTION_IPV6_ADDRESS,
    site: Optional[str] = None,
):
    """
    Add a new peer.
    """
    config = MainConfig.load(config_path)
    try:
        config.add_peer(
            name,
            IPv4Address(ipv4_address) if ipv4_address else None,
            IPv6Address(ipv6_address) if ipv6_address else None,
            port,
        )
    except DuplicatePeerError as e:
        echo(str(e), err=True)

    config.save(config_path)
Esempio n. 14
0
def install(ext_path: str = Argument(
    Path(), help="The path of the extension")) -> None:
    """Build and install an extension"""

    if not GLOBAL_EXTENSIONS_DIR.exists():
        os.mkdir(GLOBAL_EXTENSIONS_DIR)

    extension_path = Path(ext_path).resolve()
    assert extension_path.joinpath("package.json").exists()

    _build_extension(extension_path, True, False)

    module, metadata = _get_extensions_metadata(extension_path)
    src = Path(extension_path).joinpath(module.__name__, metadata[0]["src"])
    dest = GLOBAL_EXTENSIONS_DIR.joinpath(metadata[0]["dest"])

    clean_dir(dest)

    shutil.copytree(src, dest, symlinks=True)
    print(f"""
    Extension installed:
        Path:  {dest}
    """)
Esempio n. 15
0
def main(
    network_type: NetworkType = Argument(..., help="type of the VAE network"),
    bottleneck_dim: int = Option(16,
                                 "--bottleneck_dim",
                                 "-n",
                                 help="size of the VAE bottleneck"),
    lr: float = Option(0.001, "--lr", "-r", help="learning rate for training"),
    batch_size: int = Option(...,
                             "--batch_size",
                             "-b",
                             help="batch size for training"),
    epochs: int = Option(..., "--epochs", "-e", help="epochs to train"),
    device: str = Option("cpu",
                         "--device",
                         "-d",
                         help='device to train on, e.g. "cuda:0"'),
    logdir: str = Option(
        "./results",
        "--logdir",
        "-l",
        help="directory to log the models and event file to",
    ),
):
    """Run the training for a VAE.
    """

    mnist_data = dataset.MyMNIST()

    if network_type == NetworkType.mlp:
        net = model.MLPVAE((1, 32, 32), bottleneck_dim)
    else:
        net = model.CNNVAE((1, 32, 32), bottleneck_dim)

    optim = torch.optim.Adam(net.parameters(), lr)
    vae_trainer = trainer.Trainer(net, mnist_data, optim, batch_size, device,
                                  logdir)
    vae_trainer.train(epochs)
Esempio n. 16
0
def assemble(
    filepth: Path = Argument(
        ...,
        exists=True,
        file_okay=True,
        dir_okay=True,
        readable=True,
        resolve_path=True,
    ),
    out: Path = Option(None),
):
    if filepth.suffix != ".asm":
        typer.echo("The file name must end with `.asm`")
        raise typer.Exit(code=1)

    if out is None:
        out = filepth.parent.joinpath(f"{filepth.stem}.hack")

    try:
        parser = Parser(filepth.read_text())
    except ValueError as err:
        typer.echo(err)
        raise typer.Exit(1)

    assembler = Assembler(parser)

    try:
        assembly = assembler.assemble()
    except (InvalidCommandException, InvalidMnemonicError, AddressOutOfRange) as err:
        typer.echo(err)
        raise typer.Exit(code=1)

    typer.echo(f"Writing to {out}")
    with out.open("w") as f:
        f.writelines([x + "\n" for x in assembly])

    typer.echo("Done")
Esempio n. 17
0
def start(service: Service = Argument(...,
                                      help="Which service to start")) -> None:
    """
    Start one of the memezer services.
    """
    if service == Service.wsgi:
        import sys
        import time
        from signal import SIGINT, SIGTERM, signal
        from subprocess import Popen

        run_args = [
            "gunicorn",
            "-k",
            "uvicorn.workers.UvicornWorker",
            "--config",
            "gunicorn.conf.py",
            "memezer.app:wsgi",
        ]
        gunicorn_master_proc = Popen(run_args)

        def kill_proc(signum: Any, frame: Any) -> None:
            gunicorn_master_proc.terminate()
            gunicorn_master_proc.wait()
            sys.exit(0)

        signal(SIGINT, kill_proc)
        signal(SIGTERM, kill_proc)

        while True:
            time.sleep(1)

    if service == Service.worker:
        from .queue import queue

        with queue.open():
            queue.run_worker()  # type: ignore
Esempio n. 18
0
def install_component(component_file: str = Argument(
    ..., help="The path to the component file containing the data")):
    """Function to install a component:

    - Takes as input a YAML file
    - Parses it as a dictionary through the Pydantic model
        TunableComponentsModel
    - Send it as a POST request to /components
    """
    api_client = Client(
        base_url=f"http://{api_settings.api_host}:{api_settings.api_port}",
        proxies={},
    )
    component = TunableComponentsModel.from_yaml(component_file)
    logger.debug(f"Sending component data {component.dict()}"
                 "to endpoint"
                 f"http: // {api_settings.api_host}: {api_settings.api_port}"
                 f"{api_settings.component_endpoint}")
    request = api_client.post(api_settings.component_endpoint,
                              json=component.dict())
    logger.info("Successfully registered components.")
    if not 200 <= request.status_code < 400:
        raise Exception("Could not create component with status code"
                        f"{request.status_code}")
Esempio n. 19
0
def main(
    db_path: Path = Argument(Path("pypms.sqlite"), help="sensor measurements DB"),
    samples: int = Option(4, "--samples", "-n"),
    interval: int = Option(20, "--interval", "-i"),
):
    """
    Read measurements from 2 different sensors
    (PMSx003 on /dev/ttyUSB0 and MCU680 on /dev/ttyUSB1)
    and store them on a sqlite DB as a "tall table" with a "wide table" view for each sensor.

    After reading the sensors, get all measurements from the DB amd print them by sensor.
    """

    # get DB context manager
    measurements_db = pypms_db(db_path)

    reader = dict(
        pms=SensorReader("PMSx003", "/dev/ttyUSB0", interval, samples),
        bme=SensorReader("MCU680", "/dev/ttyUSB1", interval, samples),
    )

    # read from each sensor and write to DB
    with measurements_db() as db, reader["pms"] as pms, reader["bme"] as bme:
        # read one obs from each sensor at the time
        with progressbar(zip(pms(), bme()), length=samples, label="reading sensors") as progress:
            for pms_obs, env_obs in progress:
                write_measurements(db, pms.sensor, pms_obs)
                write_measurements(db, bme.sensor, env_obs)

    # read all measurements on the DB and reconstruct sensor.Data objects
    with measurements_db() as db:
        # extract obs from one sensor at the time
        for sensor in [r.sensor for r in reader.values()]:
            print(sensor)
            for obs in read_obs(db, sensor):
                print(obs)
Esempio n. 20
0
def import_(file_path: str = Argument(
    None,
    help="the path of the file to import. Only json files are supported at this "
    "time.",
)):
    """Import an exported json file to the database."""
    if os.path.isfile(file_path):
        with open(file_path) as f:
            dump_obj = json.loads(f.read())
        time_clok_jobs = dump_obj["time_clok_jobs"]
        time_clok = dump_obj["time_clok"]
        time_clok_state = dump_obj["time_clok_state"]
        time_clok_journal = dump_obj["time_clok_journal"]

        jobs = []
        for job in time_clok_jobs:
            jobs.append(Job(**job))
        add_items_to_database(jobs)

        cloks = []
        for clok in time_clok:
            cloks.append(Clok(**clok))
        add_items_to_database(cloks)

        journals = []
        for journal in time_clok_journal:
            journals.append(Journal(**journal))
        add_items_to_database(journals)
        try:
            s = State(**time_clok_state[0])
            s.save()
        except IntegrityError:
            pass

    else:
        raise FileNotFoundError(f"'{file_path}' does not exist.")
Esempio n. 21
0
def grant_read_access(
    hw_title: str = Argument(default=...,
                             metavar="📝hw",
                             help="target homework to grant access to"),
    dry: bool = Opt.DRY,
    yes: bool = Opt.ACCEPT_ALL,
):
    """Make TAs being able to read all homework repos

    "team-slug": the field auto generated by github.

    For example: the team-slug of "2019 Teaching-team" would be "2019_teaching-team".
    """
    DoCheck(gh_config_valid=True)

    org, team = settings.github.org, settings.grant.reader_team

    if not (yes or typer.confirm(
            f"Grant read access of {org}/{hw_title} to {org}/{team}?")):
        raise typer.Abort()

    # additional checks
    print("check reader team exists")
    print("check have repo with hw")
Esempio n. 22
0
def build(
    ctx: Context,
    name: Optional[str] = Argument(None, help="Name of images to build"),
    builder_file: Optional[str] = Option(
        None,
        "--builder-file",
        "-b",
        help="builder.yml file to use. By default files are searched accross repo",
    ),
    *,
    context: Optional[Path] = Option(
        None, "--context", "-c", help="Docker context to consider when performing build"
    ),
    file: Optional[Path] = Option(
        None,
        "--dockerfile",
        "--file",
        "-f",
        help="Custom Dockerfile to use when building image",
    ),
    tags: Optional[List[str]] = Option(
        None, "-t", "--tags", help="Custom tags to give to produced image"
    ),
    labels: Optional[List[str]] = Option(
        None, "-l", "--label", help="Additional labels to give to the produced image"
    ),
    platforms: Optional[List[str]] = Option(
        None, "-p", "--platform", help="Platforms to build the image for"
    ),
    add_hosts: Optional[List[str]] = Option(
        None,
        "-h",
        "--add-host",
        help="Add known hosts entries into the generated docker image",
    ),
    push: bool = Option(False, "--push", help="Push the image to registry after build"),
    load: bool = Option(
        False, "--load", help="Load the image into local docker engine after build"
    ),
    dump: bool = Option(
        False,
        "--dump",
        help="Dump the image filesystem into the current directory after build",
    ),
    build_args: Optional[List[str]] = Option(
        None, "--build-args", help="Additional build arguments"
    ),
    builder: Optional[str] = Option(
        None,
        "--builder",
        help="Custom builder to use (see --builder option for 'docker buildx build' command)",
    ),
    cache: bool = Option(
        True, "--cache", help="Cache generated layers to speed up build"
    ),
    cache_from: Optional[str] = Option(
        None,
        "--cache-from",
        help="Reuse cache from given location. Can be a remote docker image",
    ),
    cache_to: Optional[str] = Option(
        None,
        "--cache-to",
        help="Store intermediate layer and produced cache into given destination",
    ),
    network: Optional[str] = Option(
        None, "--network", help="Use a specific network mode during build"
    ),
    output: Optional[str] = Option(
        None, "--output", help="Custom output for 'docker buildx build' command"
    ),
    progress: str = Option("auto", "--progress", help="Progress display mode"),
    pull: bool = Option(False, "--pull", help="Always pull images before build"),
    secrets: Optional[List[str]] = Option(
        None, "--secret", help="Secrets to mount during build"
    ),
    # Don't know what those two options are for
    allow: Optional[List[str]] = None,
    ssh: Optional[str] = None,
    target: Optional[str] = None,
) -> None:
    """Build a docker image."""
    if builder_file:
        try:
            images = [Image.from_file(builder_file)]
        except ValidationError as err:
            console.print(err, style="red")
            exit(1)
    elif name:
        catalog = Catalog.from_directory(Path.cwd())
        try:
            images = [next(image for image in catalog.images if image.name == name)]
        except StopIteration:
            console.print(
                f"Build config for image {style_str(name, 'bold')} does not exist",
                style="red",
            )
            exit(1)
    else:
        try:
            images = Catalog.from_directory(Path.cwd()).images
        except ValidationError as err:
            console.print(err, style="red")
            exit(1)

    _add_hosts = map_string_to_dict(add_hosts)
    _labels = map_string_to_dict(labels or [])
    names = [tag for tag in tags] if tags else []

    is_key = True
    kwargs = {}
    for extra_arg in ctx.args:
        if is_key:
            key = extra_arg
            if key.startswith("--"):
                key = key[2:]
            elif key.startswith("-"):
                key = key[1:]
            if "=" in extra_arg:
                key, value = key.split("=")
                key = key.replace("-", "_").upper()
                kwargs[key] = value
                is_key = True
                continue
            else:
                is_key = False
                continue
        key = key.replace("-", "_").upper()
        kwargs[key] = extra_arg
        is_key = True
    _user_build_args = map_string_to_dict(build_args or [])
    _user_build_args = {**_user_build_args, **kwargs}

    if dump:
        _output = {"type": "local", "dest": "."}
    elif output:
        _output = map_string_to_dict(output)
    else:
        _output = {}

    if progress.lower() in ("0", "false", "no", "n"):
        _progress: Union[bool, str] = False
    else:
        _progress = progress

    # TODO: Is there some order ?
    for image in images:
        _build_args = {
            arg.name.upper(): arg.default
            for arg in image.build.build_args
            if arg.default
        }

        _build_args.update(
            {key.upper(): value for key, value in _user_build_args.items()}
        )
        _names = names or [image.get_name()]

        logger.debug(f"Building image {_names[0]} with build arguments: {_build_args}")
        if len(_names) > 1:
            for name in _names[1:]:
                logger.debug(f"Using additional tag: {name}")

        build_context = BuildContext(
            context_path=context or image.build.context,
            add_hosts=_add_hosts or image.build.add_hosts,
            allow=list(allow or []),
            build_args=_build_args,
            builder=builder,
            cache=cache,
            cache_from=cache_from,
            cache_to=cache_to,
            file=file or image.build.file,
            labels={**image.labels, **_labels},
            load=load,
            network=network,
            output=_output,
            platforms=list(platforms or []) or image.platforms,
            progress=_progress,
            pull=pull,
            push=push,
            secrets=secrets,
            ssh=ssh,
            tags=_names,
            target=target,
        )
        docker.buildx.build(**build_context.dict())
Esempio n. 23
0
def main(
        exp_min: float = Argument(..., help='Minimal exponent'),
        exp_max: float = Argument(..., help='Maximal exponent'),
        ei_snapshots: int = Argument(
            ..., help='Number of snapshots for empirical interpolation.'),
        ei_size: int = Argument(..., help='Number of interpolation DOFs.'),
        snapshots: int = Argument(
            ..., help='Number of snapshots for basis generation.'),
        rb_size: int = Argument(..., help='Size of the reduced basis.'),
        cache_region: Choices('none memory disk persistent') = Option(
            'disk',
            help='Name of cache region to use for caching solution snapshots.'
        ),
        ei_alg: Choices('ei_greedy deim') = Option(
            'ei_greedy', help='Interpolation algorithm to use.'),
        grid: int = Option(60, help='Use grid with (2*NI)*NI elements.'),
        grid_type: Choices('rect tria') = Option('rect',
                                                 help='Type of grid to use.'),
        initial_data: Choices('sin bump') = Option(
            'sin', help='Select the initial data (sin, bump).'),
        ipython_engines:
    int = Option(
        0,
        help=
        'If positive, the number of IPython cluster engines to use for parallel greedy search. '
        'If zero, no parallelization is performed.'),
        ipython_profile: str = Option(
            None, help='IPython profile to use for parallelization.'),
        lxf_lambda: float = Option(
            1., help='Parameter lambda in Lax-Friedrichs flux.'),
        periodic:
    bool = Option(
        True,
        help
        ='If not, solve with dirichlet boundary conditions on left and bottom boundary.'
    ),
        nt: int = Option(100, help='Number of time steps.'),
        num_flux: Choices('lax_friedrichs engquist_osher') = Option(
            'engquist_osher', help='Numerical flux to use.'),
        plot_err: bool = Option(False, help='Plot error.'),
        plot_ei_err: bool = Option(False,
                                   help='Plot empirical interpolation error.'),
        plot_error_landscape: bool = Option(
            False,
            help='Calculate and show plot of reduction error vs. basis sizes.'
        ),
        plot_error_landscape_M: int = Option(
            10, help='Number of collateral basis sizes to test.'),
        plot_error_landscape_N: int = Option(
            10, help='Number of basis sizes to test.'),
        plot_solutions: bool = Option(False,
                                      help='Plot some example solutions.'),
        test: int = Option(
            10,
            help='Number of snapshots to use for stochastic error estimation.'
        ),
        vx: float = Option(1., help='Speed in x-direction.'),
        vy: float = Option(1., help='Speed in y-direction.'),
):
    """Model order reduction of a two-dimensional Burgers-type equation
    (see pymor.analyticalproblems.burgers) using the reduced basis method
    with empirical operator interpolation.
    """
    print('Setup Problem ...')
    problem = burgers_problem_2d(vx=vx,
                                 vy=vy,
                                 initial_data_type=initial_data.value,
                                 parameter_range=(exp_min, exp_max),
                                 torus=periodic)

    print('Discretize ...')
    if grid_type == 'rect':
        grid *= 1. / math.sqrt(2)
    fom, _ = discretize_instationary_fv(
        problem,
        diameter=1. / grid,
        grid_type=RectGrid if grid_type == 'rect' else TriaGrid,
        num_flux=num_flux.value,
        lxf_lambda=lxf_lambda,
        nt=nt)

    if cache_region != 'none':
        # building a cache_id is only needed for persistent CacheRegions
        cache_id = (
            f"pymordemos.burgers_ei {vx} {vy} {initial_data}"
            f"{periodic} {grid} {grid_type} {num_flux} {lxf_lambda} {nt}")
        fom.enable_caching(cache_region.value, cache_id)

    print(fom.operator.grid)

    print(f'The parameters are {fom.parameters}')

    if plot_solutions:
        print('Showing some solutions')
        Us = ()
        legend = ()
        for mu in problem.parameter_space.sample_uniformly(4):
            print(f"Solving for exponent = {mu['exponent']} ... ")
            sys.stdout.flush()
            Us = Us + (fom.solve(mu), )
            legend = legend + (f"exponent: {mu['exponent']}", )
        fom.visualize(Us,
                      legend=legend,
                      title='Detailed Solutions',
                      block=True)

    pool = new_parallel_pool(ipython_num_engines=ipython_engines,
                             ipython_profile=ipython_profile)
    eim, ei_data = interpolate_operators(
        fom, ['operator'],
        problem.parameter_space.sample_uniformly(ei_snapshots),
        error_norm=fom.l2_norm,
        product=fom.l2_product,
        max_interpolation_dofs=ei_size,
        alg=ei_alg.value,
        pool=pool)

    if plot_ei_err:
        print('Showing some EI errors')
        ERRs = ()
        legend = ()
        for mu in problem.parameter_space.sample_randomly(2):
            print(f"Solving for exponent = \n{mu['exponent']} ... ")
            sys.stdout.flush()
            U = fom.solve(mu)
            U_EI = eim.solve(mu)
            ERR = U - U_EI
            ERRs = ERRs + (ERR, )
            legend = legend + (f"exponent: {mu['exponent']}", )
            print(f'Error: {np.max(fom.l2_norm(ERR))}')
        fom.visualize(ERRs,
                      legend=legend,
                      title='EI Errors',
                      separate_colorbars=True)

        print('Showing interpolation DOFs ...')
        U = np.zeros(U.dim)
        dofs = eim.operator.interpolation_dofs
        U[dofs] = np.arange(1, len(dofs) + 1)
        U[eim.operator.source_dofs] += int(len(dofs) / 2)
        fom.visualize(fom.solution_space.make_array(U),
                      title='Interpolation DOFs')

    print('RB generation ...')

    reductor = InstationaryRBReductor(eim)

    greedy_data = rb_greedy(
        fom,
        reductor,
        problem.parameter_space.sample_uniformly(snapshots),
        use_error_estimator=False,
        error_norm=lambda U: np.max(fom.l2_norm(U)),
        extension_params={'method': 'pod'},
        max_extensions=rb_size,
        pool=pool)

    rom = greedy_data['rom']

    print('\nSearching for maximum error on random snapshots ...')

    tic = time.perf_counter()

    mus = problem.parameter_space.sample_randomly(test)

    def error_analysis(N, M):
        print(f'N = {N}, M = {M}: ', end='')
        rom = reductor.reduce(N)
        rom = rom.with_(operator=rom.operator.with_cb_dim(M))
        l2_err_max = -1
        mumax = None
        for mu in mus:
            print('.', end='')
            sys.stdout.flush()
            u = rom.solve(mu)
            URB = reductor.reconstruct(u)
            U = fom.solve(mu)
            l2_err = np.max(fom.l2_norm(U - URB))
            l2_err = np.inf if not np.isfinite(l2_err) else l2_err
            if l2_err > l2_err_max:
                l2_err_max = l2_err
                mumax = mu
        print()
        return l2_err_max, mumax

    error_analysis = np.frompyfunc(error_analysis, 2, 2)

    real_rb_size = len(reductor.bases['RB'])
    real_cb_size = len(ei_data['basis'])
    if plot_error_landscape:
        N_count = min(real_rb_size - 1, plot_error_landscape_N)
        M_count = min(real_cb_size - 1, plot_error_landscape_M)
        Ns = np.linspace(1, real_rb_size, N_count).astype(np.int)
        Ms = np.linspace(1, real_cb_size, M_count).astype(np.int)
    else:
        Ns = np.array([real_rb_size])
        Ms = np.array([real_cb_size])

    N_grid, M_grid = np.meshgrid(Ns, Ms)

    errs, err_mus = error_analysis(N_grid, M_grid)
    errs = errs.astype(np.float)

    l2_err_max = errs[-1, -1]
    mumax = err_mus[-1, -1]
    toc = time.perf_counter()
    t_est = toc - tic

    print('''
    *** RESULTS ***

    Problem:
       parameter range:                    ({exp_min}, {exp_max})
       h:                                  sqrt(2)/{grid}
       grid-type:                          {grid_type}
       initial-data:                       {initial_data}
       lxf-lambda:                         {lxf_lambda}
       nt:                                 {nt}
       not-periodic:                       {periodic}
       num-flux:                           {num_flux}
       (vx, vy):                           ({vx}, {vy})

    Greedy basis generation:
       number of ei-snapshots:             {ei_snapshots}
       prescribed collateral basis size:   {ei_size}
       actual collateral basis size:       {real_cb_size}
       number of snapshots:                {snapshots}
       prescribed basis size:              {rb_size}
       actual basis size:                  {real_rb_size}
       elapsed time:                       {greedy_data[time]}

    Stochastic error estimation:
       number of samples:                  {test}
       maximal L2-error:                   {l2_err_max}  (mu = {mumax})
       elapsed time:                       {t_est}
    '''.format(**locals()))

    sys.stdout.flush()
    if plot_error_landscape:
        import matplotlib.pyplot as plt
        import mpl_toolkits.mplot3d  # NOQA
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        # rescale the errors since matplotlib does not support logarithmic scales on 3d plots
        # https://github.com/matplotlib/matplotlib/issues/209
        surf = ax.plot_surface(M_grid,
                               N_grid,
                               np.log(np.minimum(errs, 1)) / np.log(10),
                               rstride=1,
                               cstride=1,
                               cmap='jet')
        plt.show()
    if plot_err:
        U = fom.solve(mumax)
        URB = reductor.reconstruct(rom.solve(mumax))
        fom.visualize(
            (U, URB, U - URB),
            legend=('Detailed Solution', 'Reduced Solution', 'Error'),
            title='Maximum Error Solution',
            separate_colorbars=True)

    global test_results
    test_results = (ei_data, greedy_data)
Esempio n. 24
0
def main(
    backend: Choices('pymor fenics') = Argument(..., help='Discretization toolkit to use.'),
    alg: Choices('greedy adaptive_greedy pod') = Argument(..., help='The model reduction algorithm to use.'),
    snapshots: int = Argument(
        ...,
        help='greedy/pod: number of training set parameters\n\n'
             'adaptive_greedy: size of validation set.'
    ),
    rbsize: int = Argument(..., help='Size of the reduced basis.'),
    test: int = Argument(..., help='Number of test parameters for reduction error estimation.'),
):
    """Reduced basis approximation of the heat equation."""
    # discretize
    ############
    if backend == 'pymor':
        fom = discretize_pymor()
    elif backend == 'fenics':
        fom = discretize_fenics()
    else:
        raise NotImplementedError
    parameter_space=fom.parameters.space(1, 100)

    # select reduction algorithm with error estimator
    #################################################
    coercivity_estimator = ExpressionParameterFunctional('1.', fom.parameters)
    reductor = ParabolicRBReductor(fom, product=fom.h1_0_semi_product, coercivity_estimator=coercivity_estimator)

    # generate reduced model
    ########################
    if alg == 'greedy':
        rom = reduce_greedy(fom, reductor, parameter_space, snapshots, rbsize)
    elif alg == 'adaptive_greedy':
        rom = reduce_adaptive_greedy(fom, reductor, parameter_space, snapshots, rbsize)
    elif alg == 'pod':
        rom = reduce_pod(fom, reductor, parameter_space, snapshots, rbsize)
    else:
        raise NotImplementedError

    # evaluate the reduction error
    ##############################
    results = reduction_error_analysis(
        rom, fom=fom, reductor=reductor, error_estimator=True,
        error_norms=[lambda U: DT * np.sqrt(np.sum(fom.h1_0_semi_norm(U)[1:]**2))],
        error_norm_names=['l^2-h^1'],
        condition=False, test_mus=parameter_space.sample_randomly(test, seed=999), plot=True
    )

    # show results
    ##############
    print(results['summary'])
    import matplotlib.pyplot as plt
    plt.show()

    # write results to disk
    #######################
    from pymor.core.pickle import dump
    dump(rom, open('reduced_model.out', 'wb'))
    results.pop('figure')  # matplotlib figures cannot be serialized
    dump(results, open('results.out', 'wb'))

    # visualize reduction error for worst-approximated mu
    #####################################################
    mumax = results['max_error_mus'][0, -1]
    U = fom.solve(mumax)
    U_RB = reductor.reconstruct(rom.solve(mumax))
    if backend == 'fenics':  # right now the fenics visualizer does not support time trajectories
        U = U[len(U) - 1].copy()
        U_RB = U_RB[len(U_RB) - 1].copy()
    fom.visualize((U, U_RB, U - U_RB), legend=('Detailed Solution', 'Reduced Solution', 'Error'),
                  separate_colorbars=True)

    return results
Esempio n. 25
0
def check_env(cmd: str = Argument(
    metavar="cmd",
    default=...,
    help="command name to check",
)):
    """Check the environemnt is okay"""
def main(n: int = Argument(..., min=0, help="The input n of fact(n)")) -> None:
    """Compute factorial of a given input."""
    colorama.init(autoreset=True, strip=False)

    print(f"fact({colorama.Fore.CYAN}{n}{colorama.Fore.RESET}) = "
          f"{colorama.Fore.GREEN}{factorial(n)}{colorama.Fore.RESET}")
Esempio n. 27
0
def main(grid_intervals: int = Argument(..., help='Grid interval count.'),
         training_samples: int = Argument(
             ...,
             help='Number of samples used for training the reduced basis.')):
    """Example script for solving linear PDE-constrained parameter optimization problems"""

    fom, mu_bar = create_fom(grid_intervals)

    parameter_space = fom.parameters.space(0, np.pi)
    ranges = parameter_space.ranges['diffusion']

    initial_guess = fom.parameters.parse([0.25, 0.5])

    def fom_objective_functional(mu):
        return fom.output(mu)

    def fom_gradient_of_functional(mu):
        return fom.output_d_mu(fom.parameters.parse(mu),
                               return_array=True,
                               use_adjoint=True)

    from functools import partial
    from scipy.optimize import minimize
    from time import perf_counter

    opt_fom_minimization_data = {
        'num_evals': 0,
        'evaluations': [],
        'evaluation_points': [],
        'time': np.inf
    }
    tic = perf_counter()
    opt_fom_result = minimize(partial(record_results, fom_objective_functional,
                                      fom.parameters.parse,
                                      opt_fom_minimization_data),
                              initial_guess.to_numpy(),
                              method='L-BFGS-B',
                              jac=fom_gradient_of_functional,
                              bounds=(ranges, ranges),
                              options={'ftol': 1e-15})
    opt_fom_minimization_data['time'] = perf_counter() - tic

    reference_mu = opt_fom_result.x

    from pymor.algorithms.greedy import rb_greedy
    from pymor.reductors.coercive import CoerciveRBReductor
    from pymor.parameters.functionals import MinThetaParameterFunctional

    coercivity_estimator = MinThetaParameterFunctional(
        fom.operator.coefficients, mu_bar)

    training_set = parameter_space.sample_uniformly(training_samples)
    training_set_simple = [mu['diffusion'] for mu in training_set]

    RB_reductor = CoerciveRBReductor(fom,
                                     product=fom.energy_product,
                                     coercivity_estimator=coercivity_estimator)
    RB_greedy_data = rb_greedy(fom, RB_reductor, training_set, atol=1e-2)
    rom = RB_greedy_data['rom']

    def rom_objective_functional(mu):
        return rom.output(mu)

    def rom_gradient_of_functional(mu):
        return rom.output_d_mu(fom.parameters.parse(mu),
                               return_array=True,
                               use_adjoint=True)

    opt_rom_minimization_data = {
        'num_evals': 0,
        'evaluations': [],
        'evaluation_points': [],
        'time': np.inf,
        'offline_time': RB_greedy_data['time']
    }

    tic = perf_counter()
    opt_rom_result = minimize(partial(record_results, rom_objective_functional,
                                      fom.parameters.parse,
                                      opt_rom_minimization_data),
                              initial_guess.to_numpy(),
                              method='L-BFGS-B',
                              jac=rom_gradient_of_functional,
                              bounds=(ranges, ranges),
                              options={'ftol': 1e-15})
    opt_rom_minimization_data['time'] = perf_counter() - tic

    print("\nResult of optimization with FOM model and adjoint gradient")
    report(opt_fom_result, fom.parameters.parse, opt_fom_minimization_data,
           reference_mu)
    print("Result of optimization with ROM model and adjoint gradient")
    report(opt_rom_result, fom.parameters.parse, opt_rom_minimization_data,
           reference_mu)
def cli(
    repo_root: str = Argument(..., help="the fullpath to the git repository"),
    tags_regex: str = Option(
        "^v[0-9]", help="regex to select tags to show on changelog"
    ),
    starting_rev: str = Option(
        None,
        help="starting revision (if not set latest tag starting with "
        "ghtc_changelog_start if exists, else first git commit)",
    ),
    remove_duplicates_entries: bool = Option(
        True, help="if True, remove duplicate entries"
    ),
    unreleased: bool = Option(
        True, help="if True, add a section about unreleased changes"
    ),
    override_file: str = Option(
        ".ghtc_overrides.ini", help="the path/name of the 'commit overrides' file"
    ),
    include_type: List[str] = Option(
        [],
        help="include (only) given conventional types in changelog (can be used "
        "multiple times, all types by default), available types: %s" % ALL_TYPES,
    ),
    title: str = "CHANGELOG",
    unreleased_title: str = "[Unreleased]",
    debug: bool = Option(False, help="add debug values for each changelog entry"),
):
    overrides = Overrides(override_file)
    overrides.parse()
    repo = Repo(repo_root)
    previous_tag = starting_rev
    context: Dict[str, Any] = {
        "TITLE": title,
        "UNRELEASED_TAG_TIMESTAMP": UNRELEASED_TAG_TIMESTAMP,
        "TAGS": [],
    }
    tags = get_tags(repo, tags_regex)
    if len(include_type) == 0:
        # if include_type is empty, we consider we want all types
        included_cats = [x.name.lower() for x in list(ConventionalCommitType)]
    else:
        included_cats = [x.strip().lower() for x in include_type]
    if unreleased:
        tags.append(None)
    for tag in tags:
        if tag is None:
            tag_name = unreleased_title
            tag_date = UNRELEASED_TAG_TIMESTAMP
            rev = None
        else:
            tag_name = tag.name
            tag_date = tag.object.authored_date
            rev = tag_name
        reverted_commits = []
        for commit in get_commits_between(repo, previous_tag, rev):
            reverted_commit = get_reverted_commit(commit)
            if reverted_commit is not None:
                reverted_commits.append(reverted_commit)
        lines: Dict[ConventionalCommitType, List[ChangelogLine]] = {}
        for commit in get_commits_between(repo, previous_tag, rev):
            if commit.hexsha in reverted_commits:
                continue
            msg: Optional[ConventionalCommitMessage] = None
            if commit.hexsha in overrides.commits:
                msg = overrides.commits[commit.hexsha]
                if msg is None:
                    # ignored message
                    continue
            else:
                msg = parse(commit.message)
            if msg is None:
                continue
            cat = msg.type
            if cat.name.lower() not in included_cats:
                continue
            cline = ChangelogLine(msg, commit.hexsha, commit.committed_date)
            if cat not in lines:
                lines[cat] = []
            if remove_duplicates_entries and cline in lines[cat]:
                continue
            lines[cat].insert(0, cline)
        entry = ChangelogEntryForATag(tag_name, tag_date, lines)
        if tag is not None or len(lines) > 0:
            context["TAGS"].append(entry)
        context["DEBUG"] = debug
        previous_tag = tag
    print(render_template(context))
Esempio n. 29
0
from typer import Argument, Option
from .. import settings

# Source

source = Argument(
    default=None,
    help="Data source [default: stdin]",
)

type = Option(
    default=None,
    help='Specify type e.g. "package"',
)

# File

path = Option(
    default=None,
    help=
    "Specify the data path explicitly (e.g. you need to use it if your data is JSON)",
)

scheme = Option(
    default=None,
    help="Specify scheme  [default: inferred]",
)

format = Option(
    default=None,
    help="Specify format  [default: inferred]",
Esempio n. 30
0
def main(
        subreddit: str = Argument(..., help=HelpMessages.subreddit),
        output_dir: str = Option("./data/", help=HelpMessages.output_dir),
        batch_size: int = Option(10, help=HelpMessages.batch_size),
        laps: int = Option(3, help=HelpMessages.laps),
        reddit_id: str = Option(..., help=HelpMessages.reddit_id),
        reddit_secret: str = Option(..., help=HelpMessages.reddit_secret),
        reddit_username: str = Option(..., help=HelpMessages.reddit_username),
        utc_after: Optional[str] = Option(None, help=HelpMessages.utc_after),
        utc_before: Optional[str] = Option(None, help=HelpMessages.utc_before),
        debug: bool = Option(False, help=HelpMessages.debug),
):
    """
    Download all the submissions and relative comments from a subreddit.
    """

    # Init
    direction, out_manager = init_locals(debug,
                                         output_dir,
                                         subreddit,
                                         utc_after,
                                         utc_before,
                                         run_args=locals())
    pushshift_api, reddit_api = init_clients(reddit_id, reddit_secret,
                                             reddit_username)
    logger.info(f"Start download: "
                f"UTC range: [{utc_before}, {utc_after}], "
                f"direction: `{direction}`, "
                f"batch size: {batch_size}, "
                f"total submissions to fetch: {batch_size * laps}")

    # Start the gathering
    for lap in range(laps):
        lap_message = f"Lap {lap}/{laps} completed in ""{minutes:.1f}m | " \
                      f"[new/tot]: {len(out_manager.comments_list)}/{out_manager.total_comments_counter}"
        with Timer(text=lap_message, logger=logger.info):

            # Reset the data already stored
            out_manager.reset_lists()

            # Fetch data in the `direction` way
            submissions_generator = pushshift_api.search_submissions(
                subreddit=subreddit,
                limit=batch_size,
                sort='desc',
                sort_type='created_utc',
                after=utc_after if direction == "after" else None,
                before=utc_before if direction == "before" else None,
            )

            for sub in submissions_generator:
                # Fetch the submission data
                submission_fetcher(sub, out_manager)

                # Fetch the submission's comments
                comments_fetcher(sub, out_manager, reddit_api)

                # Calculate the UTC seen range
                utc_after, utc_before = utc_range_calculator(
                    sub.created_utc, utc_after, utc_before)

            # Store data (submission and comments)
            out_manager.store(lap)

        logger.debug(f"utc_after: {utc_after} , utc_before: {utc_before}")
    out_manager.store_utc_params(utc_newer=utc_after, utc_older=utc_before)

    assert utc_before < utc_after, f"utc_before '{utc_before}' should be less than utc_after '{utc_after}'"
    logger.info(
        f"Stop download: lap {laps}/{laps} [total]: {out_manager.total_comments_counter}"
    )