Exemple #1
0
def backup_directory(ftp_obj, local_dir, remote_dir):
    os.chdir(local_dir)
    try:
        ftp_obj.cwd(remote_dir)
        files, directories = get_files_directories(ftp_obj)

        with typer.progressbar(files, label="Скачивание файлов") as progress:
            for f in progress:
                try:
                    ftp_obj.retrbinary('RETR ' + f, open(f, 'wb').write)
                except ftplib.error_perm:
                    typer.echo("Skipped " +
                               typer.style(f, fg=typer.colors.RED) +
                               " due to permissions")

        with typer.progressbar(directories,
                               label="Скачивание папок") as progress:
            for d in progress:
                newremote = remote_dir + d + '/'
                newlocal = local_dir + '/' + d + '/'
                os.mkdir(newlocal)
                backup_directory(ftp_obj, newlocal, newremote)
    except ftplib.error_perm:
        typer.secho("Skipped remote dir due to permissions",
                    fg=typer.colors.RED)
Exemple #2
0
def generate_gif(
    directory: Path = typer.Argument(
        ..., help="The directory to scan for ascii art"),
    output: Path = typer.Argument(
        ..., help="The name of the file the gif should be written to"),
    frame_length: int = typer.Option(default=150,
                                     help="The length of a frame, in ms"),
):
    """Generate an animated GIF from a series of ascii art files

    The files in DIRECTORY will be alphabetized, turned in to images, and smashed together to create an animated GIF

    The resulting gif will be written to OUTPUT
    """
    file_names = []
    typer.secho("Loading Image Files...")
    with os.scandir(directory) as it, typer.progressbar(it) as progress:
        for entry in progress:
            if entry.is_file() and entry.name.endswith('.ascii'):
                file_names.append(entry.path)

    file_names.sort()

    typer.secho("Generating Frames...")
    frames = []
    with typer.progressbar(file_names) as progress:
        for fname in progress:
            with open(fname, 'r') as f:
                frames.append(create_frame(f.readlines()))

    typer.secho("Writing GIF...")
    frames_to_gif(output, frames, frame_length)

    typer.secho("done", bold=True, fg="green")
def liftBed(
    fin: Path,
    fout: Path,
    chainfile: Path,
    liftOverPath: Path,
) -> Tuple[Set[str]]:
    console.print(f"Lifting [green]BED[/] file [blue]{fin.name}[/]...")
    params = {
        "LIFTOVER_BIN": liftOverPath.resolve(),
        "OLD": fin,
        "CHAIN": chainfile,
        "NEW": fout,
        "UNLIFTED": f"{fout}.unlifted",
    }

    check_output(params.values())
    # record lifted/unliftd rs
    unlifted_lines = Path(params["UNLIFTED"]).read_text().split("\n")
    console.print(f"Processing [red]unlifted[/] {fout.name}.unlifted")
    with progressbar(unlifted_lines) as unlifted:
        unlifted_set = {
            ln.strip().split()[-1]
            for ln in unlifted if len(ln) > 0 and ln[0] != "#"
        }

    console.print(f"Processing [red]new[/] {fout.name}")
    new_bed_lines = Path(params["NEW"]).read_text().split("\n")
    with progressbar(new_bed_lines) as new_bed:
        lifted_set = {
            ln.strip().split()[-1]
            for ln in new_bed if len(ln) != 0 and ln[0] != "#"
        }

    return lifted_set, unlifted_set, True
Exemple #4
0
def cli_pull(folder_struct_list, upload_file_name, upload_file_abs_path):
    """
    Replace local ipynb with (Remote)ipynb in google colab
    :param folder_struct_list:
    :param upload_file_name:
    :param upload_file_abs_path:
    """
    AUTH_USER_ID = check_all_config()

    total = 100
    with typer.progressbar(length=total) as progress:
        drive = drive_auth()
        progress.update(15)

        COLAB_NB_FOLD_ID = get_colab_folder_id(drive)
        progress.update(30)

        final_folder_id = fold_struct_gen(drive, COLAB_NB_FOLD_ID,
                                          folder_struct_list)
        progress.update(45)

        download_file(drive, upload_file_name, upload_file_abs_path,
                      final_folder_id)
        progress.update(100)

        message = f"\n local {upload_file_name} updated with the one in  google drive"
        message = typer.style(message, fg=typer.colors.GREEN, bold=True)
        typer.echo(message)
Exemple #5
0
def mesures_thematiques(
    mesures_dir=paths.mesures_markdown_dir,
    thematiques_file=paths.thematique_markdown_file,
) -> None:
    """
    Regenerate (overwrite) markdown files using thematiques
    """
    markdown = load_md(thematiques_file)
    thematiques = build_thematiques(markdown)
    thematiques_lookup = {thematiques[id]: id for id in thematiques.keys()}

    mesures_files = glob.glob(os.path.join(mesures_dir, "*.md"))

    with typer.progressbar(mesures_files) as progress:
        for filename in progress:
            md = load_md(filename)
            mesure = build_mesure(md)
            # -- extract this for future regen function on mesures --
            climat_pratic = mesure["climat_pratic"]
            mesure["climat_pratic_id"] = thematiques_lookup[climat_pratic]
            # ---
            md = mesure_to_markdown(mesure)
            write(filename, md)

    typer.echo(f"All {len(mesures_files)} 'mesures' were regenerated.")
Exemple #6
0
def translate(
    input_folder: Path,
    source_lang: str,
    target_lang: str,
    auth_key: str,
    input_glob: str,
    output_folder: t.Optional[Path] = None,
    output_format: ag.GraphFormat = ag.GraphFormat.ARGUEBUF,
    clean: bool = False,
    overwrite: bool = False,
    parallel: bool = True,
    start: int = 1,
) -> None:
    if not output_folder:
        output_folder = input_folder

    if clean:
        shutil.rmtree(output_folder)
        output_folder.mkdir()

    path_pairs = model.PathPair.create(input_folder, output_folder, input_glob, ".json")
    translator = graph_translator.Translator(auth_key, source_lang, target_lang)
    bar: t.Iterable[model.PathPair]

    with typer.progressbar(
        path_pairs[start - 1 :],
        item_show_func=model.PathPair.label,
        show_pos=True,
    ) as bar:
        for path_pair in bar:
            if overwrite or not path_pair.target.exists():
                graph = ag.Graph.from_file(path_pair.source)
                translator.translate_graph(graph, parallel)
                graph.to_file(path_pair.target, output_format)
Exemple #7
0
def inject(dat: typer.FileText = typer.Option(..., "-dat", "-d"), config: typer.FileTextWrite = typer.Option(..., "-config", "-c", mode="a")):
    """
    Injects a comment/code block at the end of a file
    """
    try:
        typer.secho("Initializing Injection...",
                    fg=typer.colors.BRIGHT_CYAN)
        time.sleep(0.8)
        typer.secho("Reading files...", fg=typer.colors.CYAN)
        time.sleep(1.0)
        num = 0
        for line in dat:
            n = config.write(line)
            num += n
            time.sleep(0.6)
        config.write("\n\n")

        with typer.progressbar(range(100), label="Processing") as progress:
            for value in progress:
                time.sleep(0.01)
        typer.echo(f"Processed {num} letters.")
        typer.secho("Injection Status: SUCCESS",
                    fg=typer.colors.BRIGHT_GREEN, bold=True)
    except:
        typer.secho("Injection Status: FAILED",
                    fg=typer.colors.BRIGHT_RED, bold=True, err=True)
        typer.echo("Some unknown error occured.")
        typer.Abort()
Exemple #8
0
def export(
    input: str = typer.Option(
        ...,
        "--input",
        "-i",
        help="The URI specifying how to access the input preserve database.",
    ),
    output: str = typer.Option(
        ...,
        "--output",
        "-o",
        help="The URI specifying how to access the input preserve database.",
    ),
):
    assert input != output

    with Halo(text=f"Opening input preserve: {input}.", spinner="dots") as sp:
        in_preserve_db = preserve.from_uri(input)
        sp.succeed()

    with Halo(text=f"Opening output preserve: {output}.",
              spinner="dots") as sp:
        out_preserve_db = preserve.from_uri(output)
        sp.succeed()

    with typer.progressbar(list(in_preserve_db),
                           label="Exporting data") as progress:
        for idx in progress:
            out_preserve_db[idx] = in_preserve_db[idx]

    in_preserve_db.close()
    out_preserve_db.close()
Exemple #9
0
def main(
        start_date: str = typer.Argument(..., help=start_date_help),
        end_date: Optional[str] = typer.Argument(None, help=end_date_help),
):
    month_from, year_from = start_date.split("/")
    month_to, year_to = (end_date.split("/") if end_date else (
        month_from,
        year_from,
    ))

    typer.echo(
        f"Period: between {month_from}-{year_from} and {month_to}-{year_to}")
    typer.echo("Fetching released albums for the period on Metal Archives...")
    albums = get_released_albums(month_from=month_from,
                                 month_to=month_to,
                                 year_from=year_from,
                                 year_to=year_to)
    typer.echo(f"Found [{len(albums)}] albums on Metal Archives")

    typer.echo("Setting up Spotify API...")
    load_dotenv()
    spotify_api = setup_spotify()

    typer.echo("Fetching album data on Spotify...")
    with typer.progressbar(albums) as progress:
        for album in progress:
            spotify_album = search_album(album, spotify_api)
            if spotify_album:
                album["spotify_album"] = spotify_album
                album["date"] = spotify_album.release_date

    markdown_table = Table(albums)
    markdown_table.print_markdown()
Exemple #10
0
def main(
    model: Supported = Argument(Supported.default, help="sensor model"),
    port: str = Argument("/dev/ttyUSB0", help="serial port"),
    db_path: Path = Argument(Path("pypms.sqlite"), help="sensor messages DB"),
    samples: int = Option(4, "--samples", "-n"),
    interval: int = Option(20, "--interval", "-i"),
):
    """
    Read raw messages from a supported sensor and store them on a sqlite DB.
    After reading the sensor, decode all messages on DB and print them.
    """

    # get DB context manager
    message_db = pypms_db(db_path)
    sensor = Sensor[model]

    # read from sensor and write to DB
    with message_db() as db, SensorReader(sensor, port, interval, samples) as reader:
        # read one obs from each sensor at the time
        with progressbar(reader(raw=True), length=samples, label=f"reading {sensor}") as progress:
            for obs in progress:
                write_message(db, sensor, obs)

    # read and decode all messages on the DB
    with message_db() as db:
        # extract obs from one sensor at the time
        print(sensor)
        for obs in read_obs(db, sensor):
            print(obs)
Exemple #11
0
def cli_open(folder_struct_list, upload_file_name, upload_file_abs_path):

    AUTH_USER_ID = check_all_config()

    total = 100
    with typer.progressbar(length=total) as progress:
        drive = drive_auth()
        progress.update(15)
        COLAB_NB_FOLD_ID = get_colab_folder_id(drive)
        progress.update(30)
        final_folder_id = fold_struct_gen(drive, COLAB_NB_FOLD_ID,
                                          folder_struct_list)
        progress.update(45)
        new_file_metadata = get_file_meta(upload_file_name, final_folder_id)
        progress.update(60)
        new_file_id = create_new_file(drive, new_file_metadata,
                                      upload_file_abs_path, upload_file_name,
                                      final_folder_id)
        progress.update(75)
        colab_url = f'https://colab.research.google.com/drive/{new_file_id}?authuser={AUTH_USER_ID}'
        drive_folder_url = f'https://drive.google.com/drive/u/{AUTH_USER_ID}/folders/{final_folder_id}'
        progress.update(90)
        webbrowser.open(url=colab_url)
        progress.update(100)

        message = f"\n {upload_file_name} added to drive folder"
        message = typer.style(message, fg=typer.colors.GREEN, bold=True)
        typer.echo(message)
        message = f"\n drive folder url: {drive_folder_url}"
        message = typer.style(message, fg=typer.colors.CYAN, bold=True)
        typer.echo(message)

        message = f"\n colab file url: {colab_url}"
        message = typer.style(message, fg=typer.colors.BRIGHT_CYAN, bold=True)
        typer.echo(message)
Exemple #12
0
def translate(
    folder_in: Path,
    folder_out: Path,
    source_lang: str,
    target_lang: str,
    auth_key: str,
    input_glob: str,
    output_suffix: str,
    clean: bool = False,
    overwrite: bool = False,
    start: int = 1,
) -> None:
    if clean:
        shutil.rmtree(folder_out)
        folder_out.mkdir()

    paths = model.PathPair.create(folder_in, folder_out, input_glob,
                                  output_suffix)
    translator = dl.Translator(auth_key, dl.Language(source_lang),
                               dl.Language(target_lang))

    with typer.progressbar(
            paths[start - 1:],
            item_show_func=model.PathPair.label,
            show_pos=True,
    ) as bar:
        for path_pair in bar:
            if overwrite or not path_pair.target.exists():
                with path_pair.source.open("r") as file:
                    source_text = file.read()

                target_text = translator.translate_text(source_text)

                with path_pair.target.open("w") as file:
                    file.write(target_text)
def shared(
    markdown_dir: str = paths.shared_markdown_dir,
    typescript: bool = True,
    client_output_dir: str = paths.shared_client_models_dir,
    python: bool = True,
    api_output_dir: str = paths.shared_api_models_dir,
) -> None:  # pragma: no cover
    """
    Generate shared definitions.
    """
    files = glob.glob(os.path.join(markdown_dir, "*.md"))
    with typer.progressbar(files) as progress:
        for filename in progress:
            if filename[-6:] == "poc.md":
                continue

            typer.echo(f"Processing {filename}...")
            md = load_md(filename)

            if typescript:
                outputs = render_markdown_as_typescript(md)
                for name, content in outputs:
                    write(os.path.join(client_output_dir, name), content)

            if python:
                outputs = render_markdown_as_python(md)
                for name, content in outputs:
                    write(os.path.join(api_output_dir, name), content)

    typer.echo(f"Processed {len(files)} shared definitions.")
Exemple #14
0
def mesures(
    doc_file: str = typer.Option("../referentiels/sources/citergie.docx",
                                 "--docx", "-d"),
    correspondance_xlsx: str = typer.Option(
        "../referentiels/sources/correspondance_citergie_climat_pratique.xlsx",
        "--correspondance",
        "-c",
    ),
    output_dir: str = typer.Option("../referentiels/markdown/mesures_citergie",
                                   "--output", "-o"),
) -> None:
    """
    Convert source docx file to 'mesures' markdown files.
    """
    typer.echo(f"Loading docx file: '{doc_file}'...")
    document = load_docx(doc_file)
    typer.echo(f"Reading citergie document...")
    mesures = docx_to_mesures(document)
    typer.echo(f"Found {len(mesures)} 'mesures'!")
    typer.echo(f"Reading correspondance table...")
    add_climat_pratic(mesures, correspondance_xlsx)

    with typer.progressbar(mesures) as progress:
        for mesure in progress:
            filename = os.path.join(output_dir, f"{mesure['id']}.md")
            md = mesure_to_markdown_legacy(mesure)
            write(filename, md)

    typer.echo(
        f"All {len(mesures)} 'mesures' were exported in '{output_dir}' as markdown files."
    )
Exemple #15
0
def indicateurs_citergie(
    indicateurs_xlsx: str = typer.Option(
        "../referentiels/sources/indicateurs_citergie.xlsx", "--indicateurs",
        "-i"),
    correspondance_xlsx: str = typer.Option(
        "../referentiels/sources/correspondance_citergie_climat_pratique.xlsx",
        "--correspondance",
        "-c",
    ),
    output_dir: str = typer.Option(
        "../referentiels/markdown/indicateurs_citergie", "--output", "-o"),
) -> None:
    """
    Convert source xlsx files to 'indicateurs' markdown files.
    """
    typer.echo(f"Parsing files...")
    indicators = parse_indicators_xlsx(indicateurs=indicateurs_xlsx,
                                       correspondance=correspondance_xlsx)
    mds = indicators_to_markdowns_legacy(indicators)

    with typer.progressbar(mds.items()) as progress:
        for number, md in progress:
            filename = os.path.join(output_dir, f"{number}.md")
            write(filename, md)

    typer.echo(
        f"All {len(mds)} 'indicateurs' were exported in '{output_dir}' as markdown files."
    )
Exemple #16
0
def save_data(data: List[cv2.VideoCapture], label: str,
              model: tf.keras.applications.MobileNetV2):
    convt_imgs = []
    with typer.progressbar(data) as progress:
        for img in progress:
            convt_imgs.append(process_image(img))

    # Get predictions
    images4d = np.asarray(convt_imgs)
    predictions = model.predict(images4d)
    probs = tf.nn.softmax(predictions).numpy()
    typer.echo(f"Predictions completed for {len(convt_imgs)} images")
    label_dict = load_label_dict()
    label_num = set_label_number(label, label_dict)
    labels = [label_num] * len(convt_imgs)

    # Load in previous model data and append:
    probs, labels = load_image_data(probs, labels)

    # Update model
    classifier = load_classifier()
    classifier.fit(probs, labels)

    # Save model, classifier and data
    save_label_dict(label_dict)
    save_classifier(classifier)
    save_probabilities(probs, labels)
Exemple #17
0
def main():
    total = 1000
    with typer.progressbar(length=total) as progress:
        for batch in range(4):
            # Fake processing time
            time.sleep(1)
            progress.update(2500)
    typer.echo(f"Processed {total} things in batches.")
Exemple #18
0
def main():
    total = 0
    with typer.progressbar(iterate_user_ids(), length=100) as progress:
        for value in progress:
            # Fake processing time
            time.sleep(0.01)
            total += 1
    print(f"Processed {total} user IDs.")
Exemple #19
0
def main():
    total = 0
    with typer.progressbar(range(100)) as progress:
        for value in progress:
            # Fake processing time
            time.sleep(0.01)
            total += 1
    print(f"Processed {total} things.")
def main(
    output_filepath: str,
    min_length: Optional[int] = None,
    lowercase: bool = True,
    pretrained_model_name_or_path: Optional[str] = None,
) -> None:
    """Downloads and lightly preprocesses WikiText-103. If `min_length is not None`, only documents
    with at least this many tokens are retained. If `pretrained_model_name_or_path` is not None, the
    tokenizer will be loaded as `AutoTokenizer.from_pretrained(pretrained_model_name_or_path)`
    using the HuggingFace Transformers library. Otherwise `str.split()` is used. This argument has
    no effect if `min-length is None`.
    """
    # Setup the pre-trained tokenizer, if specified
    if min_length is not None:
        if pretrained_model_name_or_path is not None:
            # Import transformers here to prevent ImportError errors if the
            # user doesn't want to use it.
            from transformers import AutoTokenizer

            tokenizer = AutoTokenizer.from_pretrained(
                pretrained_model_name_or_path).tokenize
        else:
            tokenizer = lambda x: x.split()  # noqa
    else:
        tokenizer = None

    # Download WikiText-103
    r = requests.get(WIKITEXT_103_URL, stream=True)
    z = zipfile.ZipFile(io.BytesIO(r.content))
    partition_filenames = z.namelist()[1:]
    typer.secho(f"{DOWNLOAD} Downloaded WikiText-103", bold=True)

    preprocessed_documents = []
    for filename in partition_filenames:
        text = z.open(filename).read().decode("utf-8")

        # Strip out subtitles and split the text into documents
        no_subtitles = re.sub(r"(=\s){2,5}.*(=\s){2,5}", "", text)
        documents = re.split(r"=\s.*\s=", no_subtitles)

        with typer.progressbar(documents,
                               label=typer.style("Preprocessing text",
                                                 bold=True)) as progress:
            for doc in progress:
                doc = _sanitize(doc, lowercase=lowercase)
                if not doc:
                    continue

                # Retain documents if the length of their shortest document is
                # equal to or greater than the minimum specified length
                if tokenizer is not None:
                    num_tokens = len(tokenizer(doc))
                    if num_tokens < min_length:
                        continue

                preprocessed_documents.append(doc)

    _write_output_to_disk(preprocessed_documents, output_filepath)
Exemple #21
0
def translate_loc(path: Path):
    popath = CURRENT_DIR / "MTGA" / "trans" / "pl"
    orig_path = path / "orig"

    mtga_files = (i for i in os.listdir(path) if i.endswith(".mtga"))
    pofiles = [i for i in os.listdir(popath) if i.endswith(".po")]

    os.makedirs(orig_path, exist_ok=True)
    with typer.progressbar(mtga_files,
                           label="Translating Game Binary") as progress:
        for filename in progress:
            filepath = path / filename
            bak_filepath = orig_path / filename
            bundle_name = filename.split("_", 1)[1].rsplit("_", 1)[0]
            poname = f"MTGA_{bundle_name}.po"

            if poname not in pofiles:
                continue
            if not bak_filepath.exists():
                shutil.move(filepath, bak_filepath)
                # shutil.move(f"{filepath}.dat", f"{bak_filepath}.dat")

            with open(bak_filepath) as source, open(filepath, "w") as outfile:
                po = polib.pofile(popath / poname)
                data = json.load(source)
                for obj in data:
                    key = obj["key"]

                    po_entry = po.find(key, by="msgctxt")
                    trans_list = obj["translations"]

                    main_lang_key = f"MainNav/Settings/LanguageNative_{_main_lang}"
                    debug_lang_key = f"MainNav/Settings/LanguageNative_{_debug_lang}"

                    if key.startswith(main_lang_key):
                        for trans_obj in obj["translations"]:
                            trans_obj["translation"] = "Polski"

                    elif key.startswith(debug_lang_key):
                        for trans_obj in obj["translations"]:
                            trans_obj["translation"] = "Polski (DEBUG)"

                    elif po_entry:
                        trans_obj = find_loc_trans_obj(trans_list,
                                                       lang=SUBSTITUTE_LANG)
                        trans_obj[
                            "translation"] = po_entry.msgstr or po_entry.msgid

                        debug_trans_obj = find_loc_trans_obj(
                            trans_list, lang=SUBSTITUTE_LANG_DEBUG)
                        debug_trans_obj["translation"] = po_entry.msgstr or key

                    else:
                        pass

                json.dump(fp=outfile, obj=data, ensure_ascii=False, indent=2)
            create_datfile(filepath)
def _get_media_parts(plex: PlexServer,
                     show_title: str) -> Generator[MediaPart, None, None]:
    show: Show = plex.library.section('TV Shows').get(show_title)
    with typer.progressbar(show.episodes(),
                           label='Updating episodes',
                           show_eta=False) as progress:
        for episode in progress:
            episode = episode.reload()
            yield from episode.iterParts()
def copy_dynamo_items(
    src_table: str,
    src_region: str,
    src_profile: str,
    dst_table: str,
    dst_region: str,
    dst_profile: str,
):
    src_table_msg = typer.style(src_table, fg=typer.colors.GREEN, bold=True)
    dst_table_msg = typer.style(dst_table, fg=typer.colors.GREEN, bold=True)

    typer.echo(f"DynamoDB: copy items from {src_table_msg} to {dst_table_msg}")

    # create client
    src_client = boto3.Session(profile_name=src_profile).client('dynamodb', region_name=src_region)
    dst_client = src_client
    if dst_region is not None:
        if dst_profile is not None:
            dst_client = boto3.Session(profile_name=dst_profile).client('dynamodb', region_name=dst_region)
        else:
            dst_client = boto3.client('dynamodb', region_name=dst_region)


    # scan
    dynamo_items = []
    api_response = src_client.scan(TableName=src_table, Select='ALL_ATTRIBUTES')
    dynamo_items.extend(api_response['Items'])

    items_len_msg = typer.style(str(len(dynamo_items)), fg=typer.colors.GREEN, bold=True)

    typer.echo(f"Collected {items_len_msg} items from source table {src_table_msg}")

    while 'LastEvaluatedKey' in api_response:
        api_response = src_client.scan(
            TableName=src_table,
            Select='ALL_ATTRIBUTES',
            ExclusiveStartKey=api_response['LastEvaluatedKey']
        )
        dynamo_items.extend(api_response['Items'])
        print("Collected total {0} items from table {1}".format(len(dynamo_items), src_table))

    # split all items into chunks, not very optimal as memory allocation is doubled,
    # though this script not intended for unattented execution, so it should be fine
    chunk_size = 25
    current_chunk = []
    chunks = [current_chunk]
    for item in dynamo_items:
        current_chunk.append(item)
        if len(current_chunk) == chunk_size:
            current_chunk = []
            chunks.append(current_chunk)

    with typer.progressbar(chunks, length=100, label="Copying") as progress:
        for chunk in progress:
            if len(chunk) > 0:
                write_request = {dst_table: list(map(lambda x: {'PutRequest': {'Item': x}}, chunk))}
                dst_client.batch_write_item(RequestItems=write_request)
Exemple #24
0
def check(
    source: pathlib.Path = typer.Option(..., help="The path to the source ipynb file"),
    submitted: str = typer.Option(
        ..., help="The path pattern to the submitted ipynb file(s)"
    ),
    feedback_suffix: str = typer.Option(
        "-feedback.md", help="The suffix to add to the file name for the feedback"
    ),
    output: pathlib.Path = typer.Option(
        "output.csv", help="The path to output comma separated value file"
    ),
):
    """
    This checks a given submission against a source.
    """

    source_nb_node = nbchkr.utils.read(source)
    with open(f"{output}", "w") as f:
        csv_writer = csv.writer(f)
        csv_writer.writerow(
            ["Submission filepath", "Score", "Maximum score", "Tags match"]
        )

        with typer.progressbar(sorted(glob.iglob(submitted))) as bar:
            for path in bar:
                nb_node = nbchkr.utils.read(path)
                if nb_node != {}:
                    tags_match = nbchkr.utils.check_tags_match(
                        source_nb_node=source_nb_node, nb_node=nb_node
                    )

                    nb_node = nbchkr.utils.add_checks(
                        nb_node=nb_node, source_nb_node=source_nb_node
                    )
                    score, maximum_score, feedback_md = nbchkr.utils.check(
                        nb_node=nb_node
                    )
                else:
                    score, maximum_score, feedback_md = (
                        None,
                        None,
                        "Your notebook file was not in the correct format and could not be read",
                    )
                    tags_match = False

                with open(f"{path}{feedback_suffix}", "w") as f:
                    f.write(feedback_md)

                csv_writer.writerow([path, score, maximum_score, tags_match])
                typer.echo(
                    f"{path} checked against {source}. Feedback written to {path}{feedback_suffix} and output written to {output}."
                )
                if tags_match is False:
                    typer.echo(
                        f"WARNING: {path} has tags that do not match the source."
                    )
def ping_ip_addresses(ip_addresses, count):
    reachable = []
    unreachable = []
    with typer.progressbar(ip_addresses, label="Пингую адреса") as bar:
        for ip in bar:
            if ping_ip(ip, count):
                reachable.append(ip)
            else:
                unreachable.append(ip)
    return reachable, unreachable
Exemple #26
0
def send_show_to_devices(devices, command, max_threads):
    host_output_dict = {}
    with ThreadPoolExecutor(max_workers=max_threads) as executor:
        results = executor.map(send_show, devices, repeat(command))
        with typer.progressbar(results,
                               length=len(devices),
                               label="Connecting") as bar:
            for data in bar:
                host_output_dict.update(data)
    return host_output_dict
Exemple #27
0
def predict_directory(
    directory: Path = typer.Argument(
        ...,
        readable=True,
        resolve_path=True,
        help="Directory to start searching for images from",
    ),
    csv_save_dir: Path = typer.Argument(
        ...,
        writable=True,
        resolve_path=True,
        help="Directory used to store the csv report",
    ),
    pattern: str = typer.Option("fs", help="Pattern used to filter image filenames"),
    bs: int = typer.Option(16, help="Batch Size"),
    image_format: str = typer.Option(
        ".tif",
        help="Image format for flyswot to use for predictions, defaults to `*.tif`",
    ),
    model_name: str = typer.Option(
        "latest", help="Which model flyswot should use for making predictions"
    ),
    model_path: str = None,
):
    """Predicts against all images stored under DIRECTORY which match PATTERN in the filename.

    By default searches for filenames containing 'fs'.

    Creates a CSV report saved to `csv_save_dir`
    """
    start_time = time.perf_counter()
    model_dir = models.ensure_model_dir()
    model = models.ensure_model(model_dir)
    # if model_name != "latest" and not model_path:
    #     model_parts = models._get_model_parts(Path(model_dir / Path(model_name)))
    # if model_name != "latest" and model_path:
    #     model_parts = models._get_model_parts(Path(model_path / Path(model_name)))
    onnxinference = OnnxInferenceSession(model.model, model.vocab)
    files = sorted(core.get_image_files_from_pattern(directory, pattern, image_format))
    check_files(files, pattern, directory)
    typer.echo(f"Found {len(files)} files matching {pattern} in {directory}")
    csv_fname = create_csv_fname(csv_save_dir)
    with typer.progressbar(length=len(files)) as progress:
        images_checked = 0
        for i, batch in enumerate(itertoolz.partition_all(bs, files)):
            batch_predictions = onnxinference.predict_batch(batch, bs)
            if i == 0:  # pragma: no cover
                create_csv_header(batch_predictions, csv_fname)
            write_batch_preds_to_csv(batch_predictions, csv_fname)
            progress.update(len(batch))
            images_checked += len(batch)
    delta = timedelta(seconds=time.perf_counter() - start_time)
    print_inference_summary(
        str(delta), pattern, directory, csv_fname, image_format, images_checked
    )
Exemple #28
0
def download(pdf, file_out):
    """function that takes a pdf object and converts it to an audio file"""

    with progressbar(length=pdf.getNumPages(),
                     label='Downloading') as progress:
        with open(f'{file_out}.mp3', 'wb') as f:
            for page in progress:
                text = pdf.getPage(page).extractText()
                if text:
                    tts = gTTS(text)
                    tts.write_to_fp(f)
Exemple #29
0
def pickle_data_types(force: bool = False) -> List[MetaData]:
    meta_datas: List[MetaData] = []
    all_json_files = list(json_data_dir.iterdir())
    with typer.progressbar(all_json_files) as progress:
        for json_data in progress:
            meta_data = parse_json_filepath(json_data)
            meta_datas.append(meta_data)
            if not meta_data.pkl_path.exists() or force:
                data = parse_json(meta_data)
                pickle_data(data, meta_data)
    return meta_datas
Exemple #30
0
def sliceDis_fold(fold_root, save_csv_path='slice_dist_check.csv'):
    """
    Generates a csv file with DICOM slice distance information.

    Arguments:
        - Root folder
        - Location and name to store CSV output
    Output csv file:
        - Instance number
        - If only one instance: single folder = 1 or 0 otherwise
        - distance_check: Output of `dcm_slicedistance` function calculated for each session
    """
    subj_list = [x.stem for x in Path(fold_root).iterdir() if x.is_dir()]
    sess, single_folder, diff = [], [], []
    with typer.progressbar(range(0, len(subj_list)),
                           label="Subjects") as subj_prog:
        for i in subj_prog:
            # if i > 3: break
            subj_path = Path(fold_root) / subj_list[i]
            sess_list = [
                x.stem for x in Path(subj_path).iterdir() if x.is_dir()
            ]
            for j in range(len(sess_list)):
                sess.append(sess_list[j])
                #print("(i, j): ", i, j, sess_list[j])
                sess_path = subj_path / sess_list[j]
                instance_list = [
                    x.stem for x in Path(sess_path).iterdir() if x.is_dir()
                ]
                if len(instance_list) == 1:
                    single_folder.append(1)
                else:
                    single_folder.append(0)
                try:
                    #same = dcm_slicedistance(sess_path + "/new_max/DICOM") # the DICOM files are under `new_max` and not `new_max/DICOM`
                    same = dcm_slicedistance(sess_path / "new_max")
                    diff.append(same)
                except:
                    try:
                        same = dcm_slicedistance(sess_path / "file0/DICOM")
                        diff.append(same)
                    except:
                        diff.append("")

                        print("dicom error")
    data = pd.DataFrame()
    data["sess"] = sess
    data["single_folder"] = single_folder
    data["distance_check"] = diff
    data.to_csv(save_csv_path, index=False)
    typer.secho(
        f"Slice distance check complete! Please review output in file: {str(save_csv_path)}.",
        fg=typer.colors.GREEN)