Ejemplo n.º 1
0
def add(
    values: List[str],
    unique: bool = typer.Option(False, help = "Delete duplicates of provided values if found")
):
    values = [v.rstrip("/") for v in values]
    print(":".join(values + [v for v in paths() if v not in value or not unique]))
def main(
        github_access_token: str = typer.Option(...,
                                                envvar="GITHUB_ACCESS_TOKEN",
                                                prompt=True),
        github_org: str = typer.Option(...),
        bitbucket_username: str = typer.Option(...,
                                               envvar="BITBUCKET_USERNAME",
                                               prompt=True),
        bitbucket_password: str = typer.Option(...,
                                               envvar="BITBUCKET_PASSWORD",
                                               prompt=True),
        bitbucket_team: str = typer.Option(...),
        user_prefix: List[str] = typer.Option(
            None,
            help="Prefix to remove from user names to attempt matching. "
            "For example, you can remove your company name from users login.",
        ),
        user_suffix: List[str] = typer.Option(
            None, help="Suffix to remove from user names to attempt matching"),
        jira_url: str = typer.
    Option(
        None,
        help=
        "Your Jira instance root url, i.e. https://yourcompany.atlassian.net/. "
        "Use Jira to fetch more information about users",
    ),
        jira_username: str = typer.Option(None),
        jira_password: str = typer.Option(None),
):

    github = Github(github_access_token, timeout=30, retry=3, per_page=100)

    print(f"Getting GitHub org {github_org} members")
    gh_org_members = github.get_organization(github_org).get_members()

    bitbucket = BitbucketExport(team_name=bitbucket_team,
                                username=bitbucket_username,
                                app_password=bitbucket_password)
    print(f"Getting Bitbucket team {bitbucket_team} members")
    bb_users_raw = bitbucket.get_team_users()
    print(f"Got {len(bb_users_raw)} Bitbucket users")

    bb_users: List[BitbucketUser] = []
    for bb_user_raw in bb_users_raw:
        names: List[UserName] = []
        if nickname := bb_user_raw.get("nickname"):
            names.append(
                UserName("Nickname", nickname,
                         clean_up_name(nickname, user_prefix, user_suffix)))
            names.append(
                UserName(
                    "Nickname", nickname,
                    clean_up_name(nickname,
                                  user_prefix,
                                  user_suffix,
                                  remove_spaces=True)))
        if display_name := bb_user_raw.get("display_name"):
            names.append(
                UserName("Display Name", display_name,
                         clean_up_name(display_name, user_prefix,
                                       user_suffix)))
            names.append(
                UserName(
                    "Display Name",
                    display_name,
                    clean_up_name(display_name,
                                  user_prefix,
                                  user_suffix,
                                  remove_spaces=True),
                ))
Ejemplo n.º 3
0
def packages_inspector(
    path: Path = typer.Argument(".", help="Path of the codebase to inspect"),
    verbose: bool = typer.Option(False,
                                 "--verbose",
                                 "-v",
                                 help="Also print the debug logs"),
    context_file: Path = typer.Option(Path(".packages-inspector.yaml"),
                                      help="Path to the yaml context file"),
    update_context_file: bool = typer.Option(
        True, help="Update the context file based on the current run"),
    pipfile: Optional[Path] = typer.Option(
        None, help="Specify a Pipfile as a reference"),
    requirements: Optional[Path] = typer.Option(
        None, help="Specify a requirements as a reference"),
    error_on_diff: bool = typer.Option(
        True,
        help=
        "With a requirements specified, exit on error if missing or unused packages are found"
    ),
    extra_module: List[str] = typer.Option(None,
                                           "--extra-module",
                                           "-e",
                                           help="Extra module to consider"),
    extra_package: List[str] = typer.Option(None,
                                            "--extra-package",
                                            help="Extra package to add"),
    ignore_module: List[str] = typer.Option(None,
                                            "--ignore-module",
                                            "-i",
                                            help="Module to ignore"),
    mapping: List[str] = typer.Option(
        None,
        "--mapping",
        "-m",
        help="Explicit mapping in the form module:package"),
    keep_package: List[str] = typer.Option(
        None, help="Add a package that is considered required anyhow"),
    interaction: bool = typer.Option(True,
                                     help="Allow or disallow interactions"),
    pypi_calls: bool = typer.Option(
        True,
        help=
        "Enable or disable the calls to pypi to search for a package, or to search if a package exists"
    ),
    apply: bool = typer.Option(
        False, help="Apply the changes to the Pipfile or requirements file"),
) -> None:
    """
    Find and validate the list of required packages

    A module is a module in the python context
    A package is a python package installable via pip
    A module can be part of multiple packages
    Modules and packages don't necessarily have the same name
    Modules can be defined both in PyPi (in N packages) and/or locally
    """
    handler = TyperHandler()
    handler.setFormatter(ColorFormatter())
    logger.addHandler(handler)

    if verbose:
        logger.setLevel(logging.DEBUG)
    else:
        logger.setLevel(logging.INFO)

    if requirements:
        packages_in_requirements = _keep_only_names(
            parse_requirements(requirements))
    elif pipfile:
        config = configparser.ConfigParser()
        config.read(pipfile)
        packages_in_requirements = set(config["packages"])
    else:
        packages_in_requirements = set()
    logger.debug(f"{packages_in_requirements}")

    context = _load_context(context_file)
    logger.debug(f"{context=}")

    # Some of the mmaping decisions might have already been made in a previous run
    # but didn't end up in the context file. That's why there is this recorded here.
    recorder = FileRecorder()

    logger.info("Discovering all the modules of the codebase...")
    only_imported_modules, imported_and_defined_modules = get_all_imports(
        path.as_posix())
    logger.info(
        f"Found {len(only_imported_modules)} imported only modules, "
        f"and {len(imported_and_defined_modules)} both imported and defined modules."
    )

    module_to_package_mapping = {
        **recorder.records,
        **context.get("mapping", {}),
        **{
            arg_split[0]: arg_split[1]
            for m in mapping if len(arg_split := m.split(":")) == 2
        },
    }

    interaction_hook = interactive_package_validation if interaction else automatic_package_validation

    try:
        potential_missing_packages, unused_packages, context = _inspect(
            only_imported_modules,
            imported_and_defined_modules,
            interaction_hook,
            recorder,
            context,
            packages_in_requirements,
            extra_module,
            extra_package,
            ignore_module,
            module_to_package_mapping,
            keep_package,
            pypi_calls,
        )
    except UnableToFindMapping as exc:
        critical_message = f"Unable to find the mapping for the python module {exc._module}."
        critical_message += f"\nWe couldn't find any explicit mapping in {context_file}."
        if requirements or pipfile:
            critical_message += (
                f"\nAnd we couldn't find any package with the same name in {requirements if requirements else pipfile}."
            )
        critical_message += "\n\nTry to run packages_inspector manually to update your context file."
        logger.critical(critical_message)
        raise typer.Exit(code=1)

    if update_context_file:
        _save_context(context_file, context)

    recorder.clear()

    _output_results(potential_missing_packages, unused_packages, requirements,
                    pipfile)

    if apply:
        _apply_results(potential_missing_packages, unused_packages,
                       requirements, pipfile)

    if potential_missing_packages or unused_packages:
        raise typer.Exit(code=1)
    else:
        typer.secho("\nAll good", fg="green")
Ejemplo n.º 4
0
def password(
    service: SupportedServices = typer.Argument(None, help="Service name"),
    show: bool = typer.Option(
        False,
        "--show",
        help="Show the current password(s)",
        show_default=False,
    ),
    random: bool = typer.Option(
        False,
        "--random",
        help="Generate a random password",
        show_default=False,
    ),
    new_password: str = typer.Option(
        None,
        "--password",
        help="Force the given password",
        show_default=False,
    ),
) -> None:

    Application.print_command(
        Application.serialize_parameter("--show", show, IF=show),
        Application.serialize_parameter("--random", random, IF=random),
        Application.serialize_parameter("--password",
                                        new_password,
                                        IF=new_password),
        Application.serialize_parameter("", service),
    )

    Application.get_controller().controller_init()

    # No service specified, only a summary will be reported
    if not service:

        if random:
            print_and_exit("--random flag is not supported without a service")

        if new_password:
            print_and_exit(
                "--password option is not supported without a service")

        MIN_PASSWORD_SCORE = int(
            Application.env.get("MIN_PASSWORD_SCORE", 2)  # type: ignore
        )

        last_updates = parse_projectrc()
        now = datetime.now()

        table: List[List[str]] = []
        for s in PASSWORD_MODULES:
            # This should never happens and can't be (easily) tested
            if s not in Application.data.base_services:  # pragma: no cover
                print_and_exit("Command misconfiguration, unknown {} service",
                               s)

            if s != REGISTRY and s not in Application.data.active_services:
                continue

            if s == REGISTRY and not Configuration.swarm_mode:
                continue

            module = PASSWORD_MODULES.get(s)

            if not module:  # pragma: no cover
                print_and_exit(f"{s} misconfiguration, module not found")

            for variable in module.PASSWORD_VARIABLES:

                password = Application.env.get(variable)

                if password == PLACEHOLDER:
                    score = None
                else:
                    result = zxcvbn(password)
                    score = result["score"]

                if variable in last_updates:
                    change_date = last_updates.get(variable,
                                                   datetime.fromtimestamp(0))
                    expiration_date = change_date + timedelta(
                        days=PASSWORD_EXPIRATION)
                    expired = now > expiration_date
                    last_change = change_date.strftime("%Y-%m-%d")
                else:
                    expired = True
                    last_change = "N/A"

                pass_line: List[str] = []

                pass_line.append(s)
                pass_line.append(variable)

                if expired:
                    pass_line.append(RED(last_change))
                else:
                    pass_line.append(GREEN(last_change))

                if score is None:
                    pass_line.append(RED("NOT SET"))
                elif score < MIN_PASSWORD_SCORE:
                    pass_line.append(RED(score))
                else:
                    pass_line.append(GREEN(score))

                if show:
                    pass_line.append(str(password))

                table.append(pass_line)

        headers = ["SERVICE", "VARIABLE", "LAST CHANGE", "STRENGTH"]
        if show:
            headers.append("PASSWORD")

        print("")
        print(tabulate(
            table,
            tablefmt=TABLE_FORMAT,
            headers=headers,
        ))

    # In this case a service is asked to be updated
    else:

        module = PASSWORD_MODULES.get(service.value)

        if not module:  # pragma: no cover
            print_and_exit(
                f"{service.value} misconfiguration, module not found")

        if random:
            new_password = get_strong_password()
        elif not new_password:
            print_and_exit(
                "Please specify one between --random and --password options")

        docker = Docker()

        variables = module.PASSWORD_VARIABLES
        old_password = Application.env.get(variables[0])
        new_variables = {variable: new_password for variable in variables}

        # Some services can only be updated if already running,
        # others can be updated even if offline,
        # but in every case if the stack is running it has to be restarted

        if service.value == REGISTRY:
            is_running = docker.registry.ping(do_exit=False)
            container: Optional[Tuple[str, str]] = ("registry", "")
        else:
            container = docker.get_container(service.value)
            is_running = container is not None

        is_running_needed = module.IS_RUNNING_NEEDED

        log.info("Changing password for {}...", service.value)

        if is_running_needed and (not is_running or not container):
            print_and_exit(
                "Can't update {} because it is not running. Please start your stack",
                service.value,
            )

        update_projectrc(new_variables)

        if container:
            module.password(container, old_password, new_password)

        if is_running:
            log.info("{} was running, restarting services...", service.value)

            Application.get_controller().check_placeholders_and_passwords(
                Application.data.compose_config, Application.data.services)
            if service.value == REGISTRY:
                port = cast(int, Application.env["REGISTRY_PORT"])

                docker.client.container.remove(REGISTRY, force=True)

                docker.compose.create_volatile_container(REGISTRY,
                                                         detach=True,
                                                         publish=[(port, port)
                                                                  ])
            elif Configuration.swarm_mode:

                docker.compose.dump_config(Application.data.services)
                docker.swarm.deploy()

            else:
                docker.compose.start_containers(Application.data.services)
        else:
            log.info("{} was not running, restart is not needed",
                     service.value)

        log.info(
            "The password of {} has been changed. "
            "Please find the new password into your .projectrc file as {} variable",
            service.value,
            variables[0],
        )
Ejemplo n.º 5
0
def main(name: List[str] = typer.Option(["World"],
                                        help="The name to say hi to.",
                                        autocompletion=complete_name)):
    for n in name:
        print(f"Hello {n}")
Ejemplo n.º 6
0
def validate(
    tracerepository_path: Path = TRACEREPOSITORY_PATH_OPTION,
    database_name: str = DATABASE_OPTION,
    area_filter: List[str] = DATA_FILTER,
    thematic_filter: List[str] = DATA_FILTER,
    traces_filter: List[str] = DATA_FILTER,
    scale_filter: List[str] = DATA_FILTER,
    report: bool = typer.Option(False),
    # report_directory: Optional[Path] = typer.Option(rules.PathNames.REPORTS.value),
    report_directory: Optional[Path] = typer.Option(
        None,
        help=("Defaults to file in tracerepository_path directory "
              f"with name: {rules.PathNames.REPORTS.value}."),
    ),
    metadata_json: Optional[Path] = typer.Option(
        None,
        help=("Defaults to file in tracerepository_path directory "
              f"with name: {rules.PathNames.METADATA.value}."),
        # rules.PathNames.METADATA.value, exists=True, dir_okay=False
    ),
):
    """
    Validate trace datasets.

    Only validates if the dataset has been marked as invalid in database.csv.
    """
    console = Console()
    database = tracerepository_path / database_name
    # Initialize Organizer
    organizer = Organizer(
        tracerepository_path=tracerepository_path,
        database=repo.read_database_csv(path=database),
    )

    # Resolve metadata_json_path
    metadata_json_path = (metadata_json if metadata_json is not None else
                          tracerepository_path /
                          rules.PathNames.METADATA.value)

    # Load metadata of traces column restrictions
    metadata = load_metadata_from_json(metadata_json_path=metadata_json_path)

    # Query for invalid traces
    invalids = organizer.query(
        area=area_filter,
        thematic=thematic_filter,
        traces=traces_filter,
        scale=scale_filter,
        validity=[
            rules.ValidationResults.INVALID,
            rules.ValidationResults.EMPTY,
            rules.ValidationResults.UNFIT,
        ],
    )

    # Only validate a single trace dataset once
    # Means you might have to validate for each area dataset
    # that uses the traces.
    # However only the area boundary validation is required for
    # the rest of the areas.
    unique_invalids_only = spatial.unique_invalids(invalids=invalids)

    # Report which data are validated
    if report:
        console.print(utils.create_validation_table(unique_invalids_only))

    # Validate the invalids
    update_tuples = spatial.validate_invalids(invalids=unique_invalids_only)

    # Exit with error code 1 if there's errors in updating the database.csv
    database_error, write_error = False, False

    assert len(update_tuples) == len(unique_invalids_only)
    # Iterate over results

    for update_tuple, invalid in zip(update_tuples, unique_invalids_only):

        # Validate and gather pandera reporting
        pandera_update_values, pandera_report = utils.pandera_reporting(
            update_tuple=update_tuple,
            metadata=metadata,
        )

        # If the geodataset is otherwise valid but fails pandera checks it will
        # be marked as unfit
        if len(pandera_update_values) > 0:
            update_tuple.update_values = pandera_update_values
        try:
            # Update Organizer database.csv
            organizer.update(
                area_name=invalid.area_path.stem,
                update_values=update_tuple.update_values,
            )

            # Write the database.csv
            repo.write_database_csv(path=database, database=organizer.database)

        except Exception:

            # Error in updating or writing database.csv
            database_error = True

            # Log exception
            logging.error(
                f"Error when updating or writing Organizer database.csv.\n"
                f"update_tuple: {update_tuple}\n"
                f"invalid: {invalid}\n",
                exc_info=True,
            )

        if not pandera_report.empty and report:
            report_directory = (tracerepository_path /
                                Path(rules.PathNames.REPORTS.value) if
                                report_directory is None else report_directory)
            str_report = utils.report_pandera_errors(
                pandera_report=pandera_report,
                report_directory=report_directory,
                area_name=update_tuple.area_name,
            )
            console.print(str_report)

    # Report validation results with a rich.table.Table
    if report:
        console.print(
            utils.create_validation_results_table(
                invalids=unique_invalids_only, update_tuples=update_tuples))

    if database_error or write_error:

        # Exit with error code 1 (not successful)
        raise typer.Exit(code=1)
def load_data(data_folder: pathlib.Path = typer.Option(
    DATA_FOLDER,
    file_okay=False,
    help='Path to the folder where the dataset will be saved')):
    download_nfl6_dataset(data_folder)
    prepare_nfl6_dataset(data_folder)
Ejemplo n.º 8
0
import platform
from pathlib import Path

import typer

from .path import get_default_osu_path

app = typer.Typer(name='osucli', help='Make osu! great again in terminal')

osu_path_option = typer.Option(
    get_default_osu_path(),
    '--osu_path',
    '-P',
    show_default=False,
    help='osu root path, use default path if not specified')


@app.callback()
def check_os():
    if platform.system() != 'Windows':
        typer.echo('osucli only support windows')
        raise typer.Exit()


@app.command('archive')
def archive_usr_data(osu_path: Path = osu_path_option,
                     output: Path = typer.Option(Path('.') / 'output.zip',
                                                 '--output',
                                                 '-o',
                                                 help='output path')):
    """
Ejemplo n.º 9
0
def execute(
    directory: Path = typer.Option(...),
    benchmarks: Path = typer.Option(...),
    submit_options: Path = typer.Option(...),
    database_path: Optional[Path] = None,
    resubmit: Optional[bool] = False,
):
    identifiers = deserialize_identifiers(benchmarks)
    options = deserialize_submit_options(submit_options)

    database_file = (directory / "result.json").resolve()
    if (database_path and database_path.is_file()
            and database_file != database_path.resolve()):
        database_file = directory / database_path.name
        shutil.copyfile(database_path, database_file)

    database = Database(database_file)
    runner = BenchmarkRunner(database,
                             workdir=directory,
                             materialize_fn=materialize_benchmark)
    benchmark_count = len(identifiers)

    def run():
        for (identifier, benchmark,
             result) in tqdm(runner.compute(identifiers),
                             total=benchmark_count):
            logging.info(f"Finished benchmark {identifier}: {result}")

    max_runtime = options.walltime
    if max_runtime.total_seconds() > 60:
        max_runtime = max_runtime - timedelta(minutes=1)

    logging.info(
        f"Starting to benchmark {benchmark_count} benchmarks, max time is {max_runtime}"
    )

    start = time.time()
    try:
        with_timeout(run, timeout_s=max_runtime.total_seconds())
        runner.save()

        duration = time.time() - start
        logging.info(f"Benchmark finished in {duration}s")

        # Store a symlink to the final resubmitted directory into the root directory
        if resubmit:
            root_dir = directory.parent.parent
            os.symlink(directory,
                       root_dir / "final-run",
                       target_is_directory=True)
    except TimeoutException:
        runner.save()

        if resubmit:
            root_dir = directory.parent
        else:
            root_dir = directory / "resubmits"
        directory = generate_job_dir(root_dir)

        remaining = [
            identifier for identifier in identifiers
            if not database.has_record_for(identifier)
        ]

        logging.warning(
            f"Benchmark didn't finish in {max_runtime}, computed {benchmark_count - len(remaining)}"
            f"/{benchmark_count}, resubmitting at {directory}")
        submit(
            remaining,
            workdir=directory,
            options=options,
            database_path=database_file,
            resubmit=True,
        )
    except BaseException:
        runner.save()
        logging.error(
            f"Error occurred while benchmarking: {traceback.format_exc()}")
Ejemplo n.º 10
0
Archivo: run.py Proyecto: efonte/ESRGAN
def video(
    model: str = typer.Argument(...),
    input: Path = typer.Option(Path("input/video.mp4"),
                               "--input",
                               "-i",
                               help="Input video"),
    output: Path = typer.Option(Path("output/video.mp4"),
                                "--output",
                                "-o",
                                help="Output video"),
    seamless: SeamlessOptions = typer.Option(
        None,
        "--seamless",
        "-s",
        case_sensitive=False,
        help=
        "Helps seamlessly upscale an image. tile = repeating along edges. mirror = reflected along edges. replicate = extended pixels along edges. alpha_pad = extended alpha border.",
    ),
    # cpu: bool = typer.Option(False, "--cpu", "-c", help="Use CPU instead of CUDA"),
    fp16: bool = typer.Option(
        False,
        "--floating-point-16",
        "-fp16",
        help="Use FloatingPoint16/Halftensor type for images.",
    ),
    device_id: int = typer.Option(
        0,
        "--device-id",
        "-did",
        help="The numerical ID of the GPU you want to use."),
    multi_gpu: bool = typer.Option(False,
                                   "--multi-gpu",
                                   "-mg",
                                   help="Multi GPU"),
    scenes_per_gpu: int = typer.Option(
        1,
        "--scenes-per-gpu",
        "-spg",
        help=
        "Number of scenes to be upscaled at the same time using the same GPU. 0 for automatic mode",
    ),
    cache_max_split_depth: bool = typer.Option(
        False,
        "--cache-max-split-depth",
        "-cmsd",
        help=
        "Caches the maximum recursion depth used by the split/merge function. Useful only when upscaling images of the same size.",
    ),
    ssim: bool = typer.Option(
        False,
        "--ssim",
        "-ssim",
        help=
        "True to enable duplication frame removal using ssim. False to use np.all().",
    ),
    min_ssim: float = typer.Option(0.9987,
                                   "--min-ssim",
                                   "-ms",
                                   help="Min SSIM value."),
    diff_mode: bool = typer.Option(False,
                                   "--diff",
                                   "-d",
                                   help="Enable diff mode (beta)."),
    chunk_size: int = typer.Option(
        16,
        "--chunk-size",
        "-cs",
        help=
        "Only used with diff mode. Chunk size to be able to generate the frame difference (beta).",
    ),
    padding_size: int = typer.Option(
        2,
        "--padding-size",
        "-ps",
        help=
        "Only used with diff mode. Padding size between each chunk (beta).",
    ),
    quality: float = typer.Option(
        10,
        "--quality",
        "-q",
        min=0,
        max=10,
        help="Video quality.",
    ),
    ffmpeg_params: str = typer.Option(
        None,
        "--ffmpeg-params",
        "--ffmpeg",
        help=
        'FFmpeg parameters to save the scenes. If -crf is present, the quality parameter will be ignored. Example: "-c:v libx265 -crf 5 -pix_fmt yuv444p10le -preset medium -x265-params pools=none -threads 8".',
    ),
    # deduplication: bool = typer.Option(
    #     False,
    #     "--deduplication",
    #     "-d",
    #     help="True to enable duplication frame removal",
    # ),
    deinterpaint: DeinterpaintOptions = typer.Option(
        None,
        "--deinterpaint",
        "-dp",
        case_sensitive=False,
        help=
        "De-interlacing by in-painting. Fills odd or even rows with green (#00FF00). Useful for models like Joey's 1x_DeInterPaint.",
    ),
    verbose: bool = typer.Option(
        False,
        "--verbose",
        "-v",
        help="Verbose mode",
    ),
):
    logging.basicConfig(
        level=logging.DEBUG if verbose else logging.WARNING,
        format="%(message)s",
        datefmt="[%X]",
        handlers=[RichHandler(markup=True)],
    )
    log = logging.getLogger()

    input = input.resolve()
    output = output.resolve()
    if not input.exists():
        log.error(f'Video "{input}" does not exist.')
        sys.exit(1)
    elif input.is_dir():
        log.error(f'Video "{input}" is a directory.')
        sys.exit(1)
    elif output.is_dir():
        log.error(f'Video "{output}" is a directory.')
        sys.exit(1)
    # elif not output.exists():
    #     output=input.with_name(f"{input.stem}_ai.mp4")

    upscale = Upscale(
        model=model,
        seamless=seamless,
        # cpu=cpu,
        fp16=fp16,
        device_id=device_id,
        cache_max_split_depth=cache_max_split_depth,
        alpha_mode=AlphaOptions.no_alpha,
        multi_gpu=multi_gpu,
    )
    if len(upscale.model_chain) > 1 and deinterpaint is not None:
        log.error(
            "Model Chain and DeInterPaint cannot be used at the same time.")
        exit(1)

    project_path = output.parent.joinpath(f"{output.stem}").absolute()
    ai_upscaled_path = project_path.joinpath("scenes")
    scenes_ini = project_path.joinpath("scenes.ini")
    frames_todo: List[Tuple[int, int]] = []
    frames_upscaled: List[Tuple[int, int]] = []
    config = configparser.ConfigParser()
    if project_path.is_dir():
        resume_mode = True
        log.info(f'Resuming project "{project_path}"')
        config.read(scenes_ini)
        for scene in config.sections():
            start_frame, end_frame = scene.split("_")
            start_frame = int(start_frame)
            end_frame = int(end_frame)
            if config.getboolean(scene, "upscaled") == True:
                frames_upscaled.append((start_frame, end_frame))
            else:
                frames_todo.append((start_frame, end_frame))
    else:
        resume_mode = False
        with get_console().status("Detecting scenes..."):
            scenes = find_scenes(str(input.absolute()))
        log.info(
            f"Detected {len(scenes)} scene{'' if len(scenes)==1 else 's'}")

        ai_upscaled_path.mkdir(parents=True, exist_ok=True)
        num_frames = scenes[-1][1].get_frames()
        for scene in scenes:
            start_frame = str(scene[0].get_frames() + 1).zfill(
                len(str(num_frames)))
            end_frame = str(scene[1].get_frames()).zfill(len(str(num_frames)))
            config[f"{start_frame}_{end_frame}"] = {
                "upscaled": "False",
                "duplicated_frames": "None",
                "average_fps": "None",
            }
            frames_todo.append((int(start_frame), int(end_frame)))

        with open(scenes_ini, "w") as configfile:
            config.write(configfile)

    video_reader: FfmpegFormat.Reader = imageio.get_reader(
        str(input.absolute()))
    fps = video_reader.get_meta_data()["fps"]
    num_frames = video_reader.count_frames()
    scale = 1

    if diff_mode:
        with get_console().status("Detecting the model's scale..."):
            img = video_reader.get_data(0)
            h, w, c = img.shape
            img = np.resize(img,
                            (h // 4, w // 4, c))  # resize for fast upscaling
            height, width, channels = img.shape
            height_ai, width_ai, channels_ai = upscale.image(img).shape
            scale = int(width_ai / width)
        log.info(f"Model's scale: x{scale}")

    if scenes_per_gpu < 1:
        with get_console().status(
                f"Detecting how many scenes can be upscaled at the same time..."
        ):
            img = video_reader.get_data(0)
            upscale.image(img)
        reserved = torch.cuda.memory_reserved(device_id)
        if multi_gpu:
            for i in range(torch.cuda.device_count()):
                device = torch.device(f"cuda:{i}")
                device_name = torch.cuda.get_device_name(i)
                total = torch.cuda.get_device_properties(i).total_memory
                # TODO upscale using the device i
                num_scenes_same_time = 0
                reserved_temp = 0
                while reserved_temp < total:
                    reserved_temp += reserved
                    num_scenes_same_time += 1
                if reserved_temp >= total:
                    num_scenes_same_time -= 1
                log.info(
                    f'Number of scenes to upscale at the same time on "{device_name}": {num_scenes_same_time}'
                )
                upscale.devices[device] = [
                    Lock() for _ in range(num_scenes_same_time)
                ]
        else:
            device = torch.device(f"cuda:{device_id}")
            device_name = torch.cuda.get_device_name(device_id)
            total = torch.cuda.get_device_properties(device_id).total_memory
            num_scenes_same_time = 0
            reserved_temp = 0
            while reserved_temp < total:
                reserved_temp += reserved
                num_scenes_same_time += 1
            if reserved_temp >= total:
                num_scenes_same_time -= 1
            log.info(
                f'Number of scenes to upscale at the same time on "{device_name}": {num_scenes_same_time}'
            )
            upscale.devices[device] = [
                Lock() for _ in range(num_scenes_same_time)
            ]
            if num_scenes_same_time > 1:
                multi_gpu = True
                upscale.multi_gpu = True
    else:
        for device in upscale.devices.keys():
            upscale.devices[device] = [Lock() for _ in range(scenes_per_gpu)]
        if scenes_per_gpu > 1:
            multi_gpu = True
            upscale.multi_gpu = True

    with Progress(
            # SpinnerColumn(),
            "[progress.description]{task.description}",
            "[progress.percentage]{task.percentage:>3.0f}%",
            BarColumn(),
            TimeRemainingColumn(),
            FpsSpeedColumn(),
    ) as progress:
        num_frames_upscaled = 0
        for start_frame, end_frame in frames_upscaled:
            num_frames_upscaled += end_frame - start_frame + 1
        task_upscaled_id = progress.add_task(
            f'Upscaling [green]"{input.name}"[/]', total=num_frames)
        if num_frames_upscaled > 0:
            log.info(f"Skipped {num_frames_upscaled} frames already upscaled")
            progress.update(task_upscaled_id,
                            completed=num_frames_upscaled,
                            refresh=True)

        if len(upscale.model_chain) > 1:
            # Fix model chain (because the models were not loaded if the threads start at the same time)
            upscale.image(255 * np.zeros([10, 10, 3], dtype=np.uint8))

        threads = []
        for start_frame, end_frame in frames_todo:
            num_lock = 0
            if multi_gpu:
                device, num_lock = upscale.get_available_device(
                    first_lock=False)
            else:
                device = list(upscale.devices.keys())[0]
            video_thread_func_args = {
                "device": device,
                "num_lock": num_lock,
                "multi_gpu": multi_gpu,
                "input": input,
                "start_frame": start_frame,
                "end_frame": end_frame,
                "num_frames": num_frames,
                "progress": progress,
                "task_upscaled_id": task_upscaled_id,
                "ai_upscaled_path": ai_upscaled_path,
                "fps": fps,
                "quality": quality,
                "ffmpeg_params": ffmpeg_params,
                "deinterpaint": deinterpaint,
                "diff_mode": diff_mode,
                "ssim": ssim,
                "min_ssim": min_ssim,
                "chunk_size": chunk_size,
                "padding_size": padding_size,
                "scale": scale,
                "upscale": upscale,
                "config": config,
                "scenes_ini": scenes_ini,
            }
            if multi_gpu:
                x = Thread(target=video_thread_func,
                           kwargs=video_thread_func_args)
                threads.append(x)
                x.daemon = True
                x.start()
            else:
                video_thread_func(**video_thread_func_args)
        for thread in threads:
            thread.join()

    with open(project_path.joinpath("ffmpeg_list.txt"), "w",
              encoding="utf-8") as outfile:
        for mp4_path in ai_upscaled_path.glob("*.mp4"):
            outfile.write(
                f"file '{mp4_path.relative_to(project_path).as_posix()}'\n")
    total_duplicated_frames = 0
    total_average_fps = 0
    for section in config.sections():
        total_duplicated_frames += config.getint(section, "duplicated_frames")
        total_average_fps += config.getfloat(section, "average_fps")
    total_average_fps = total_average_fps / len(config.sections())
    if not resume_mode:
        task_upscaled = next(task for task in progress.tasks
                             if task.id == task_upscaled_id)
        total_average_fps = task_upscaled.finished_speed or task_upscaled.speed or 0.01
    if total_duplicated_frames > 0:
        seconds_saved = (((1 / total_average_fps * num_frames) - (
            total_duplicated_frames * 0.04)  # 0.04 seconds per duplicate frame
                          ) / (num_frames - total_duplicated_frames) *
                         total_duplicated_frames)
        log.info(
            f"Total number of duplicated frames: {total_duplicated_frames} (saved ≈ {precisedelta(dt.timedelta(seconds=seconds_saved))})"
        )
    log.info(f"Total FPS: {total_average_fps:.2f}")
    print("\nUpscale completed!\n")

    bad_scenes = []
    with get_console().status(
            "Checking the correct number of frames of the mp4 files..."):
        for mp4_path in ai_upscaled_path.glob("*.mp4"):
            start_frame, end_frame = mp4_path.stem.split("_")
            num_frames = int(end_frame) - int(start_frame) + 1
            with imageio.get_reader(str(mp4_path.absolute())) as video_reader:
                frames_mp4 = video_reader.count_frames()
            if num_frames != frames_mp4:
                bad_scenes.append(f"{mp4_path.stem}")

    if len(bad_scenes) > 0:
        for scene in bad_scenes:
            config.set(scene, "upscaled", "False")
        with open(scenes_ini, "w") as configfile:
            config.write(configfile)
        if len(bad_scenes) == 1:
            bad_scenes_str = f"[green]{bad_scenes[0]}[/]"
        else:
            bad_scenes_str = f'[green]{"[/], [green]".join(bad_scenes[:-1])}[/] and [green]{bad_scenes[-1]}[/]'
        print(
            f"The following scenes were incorrectly upscaled: {bad_scenes_str}."
        )
        print(f"Please re-run the script to finish upscaling them.")
    else:
        print(
            f'Go to the "{project_path}" directory and run the following command to concatenate the scenes.'
        )
        print(
            Markdown(
                f"`ffmpeg -f concat -safe 0 -i ffmpeg_list.txt -i {input.absolute()} -map 0:v -map 1:a -c copy {output.name}`"
            ))
Ejemplo n.º 11
0
Archivo: run.py Proyecto: efonte/ESRGAN
def image(
    models: List[str] = typer.Argument(...),
    input: Path = typer.Option(Path("input"),
                               "--input",
                               "-i",
                               help="Input folder"),
    reverse: bool = typer.Option(False,
                                 "--reverse",
                                 "-r",
                                 help="Reverse Order"),
    output: Path = typer.Option(Path("output"),
                                "--output",
                                "-o",
                                help="Output folder"),
    skip_existing: bool = typer.Option(
        False,
        "--skip-existing",
        "-se",
        help="Skip existing output files",
    ),
    delete_input: bool = typer.Option(
        False,
        "--delete-input",
        "-di",
        help="Delete input files after upscaling",
    ),
    seamless: SeamlessOptions = typer.Option(
        None,
        "--seamless",
        "-s",
        case_sensitive=False,
        help=
        "Helps seamlessly upscale an image. tile = repeating along edges. mirror = reflected along edges. replicate = extended pixels along edges. alpha_pad = extended alpha border.",
    ),
    cpu: bool = typer.Option(False,
                             "--cpu",
                             "-c",
                             help="Use CPU instead of CUDA"),
    fp16: bool = typer.Option(
        False,
        "--floating-point-16",
        "-fp16",
        help="Use FloatingPoint16/Halftensor type for images.",
    ),
    device_id: int = typer.Option(
        0,
        "--device-id",
        "-did",
        help="The numerical ID of the GPU you want to use."),
    multi_gpu: bool = typer.Option(False,
                                   "--multi-gpu",
                                   "-mg",
                                   help="Multi GPU"),
    cache_max_split_depth: bool = typer.Option(
        False,
        "--cache-max-split-depth",
        "-cmsd",
        help=
        "Caches the maximum recursion depth used by the split/merge function. Useful only when upscaling images of the same size.",
    ),
    binary_alpha: bool = typer.Option(
        False,
        "--binary-alpha",
        "-ba",
        help=
        "Whether to use a 1 bit alpha transparency channel, Useful for PSX upscaling",
    ),
    ternary_alpha: bool = typer.Option(
        False,
        "--ternary-alpha",
        "-ta",
        help=
        "Whether to use a 2 bit alpha transparency channel, Useful for PSX upscaling",
    ),
    alpha_threshold: float = typer.Option(
        0.5,
        "--alpha-threshold",
        "-at",
        help=
        "Only used when binary_alpha is supplied. Defines the alpha threshold for binary transparency",
    ),
    alpha_boundary_offset: float = typer.Option(
        0.2,
        "--alpha-boundary-offset",
        "-abo",
        help=
        "Only used when binary_alpha is supplied. Determines the offset boundary from the alpha threshold for half transparency.",
    ),
    alpha_mode: AlphaOptions = typer.Option(
        "alpha_separately",
        "--alpha-mode",
        "-am",
        help=
        "Type of alpha processing to use. no_alpha = is no alpha processing. bas = is BA's difference method. alpha_separately = is upscaling the alpha channel separately (like IEU). swapping = is swapping an existing channel with the alpha channel.",
    ),
    imagemagick: bool = typer.Option(
        False,
        "--imagemagick",
        "-im",
        help=
        "Use ImageMagick to save the upscaled image (higher quality but slower). Disabled when using multi_gpu mode.",
    ),
    jpg: bool = typer.Option(
        False,
        "--jpg",
        "-j",
        help="Convert the image to jpg",
    ),
    resize: int = typer.Option(
        100,
        "--resize",
        "-r",
        help="Resize percent",
    ),
    zip: bool = typer.Option(
        False,
        "--zip",
        "-z",
        help="Compress the output to zip file",
    ),
    verbose: bool = typer.Option(
        False,
        "--verbose",
        "-v",
        help="Verbose mode",
    ),
):
    logging.basicConfig(
        level=logging.DEBUG if verbose else logging.WARNING,
        format="%(message)s",
        datefmt="[%X]",
        handlers=[RichHandler(markup=True)],
    )
    start_time = time.process_time()
    for model in models:
        upscale = Upscale(
            model=model,
            seamless=seamless,
            cpu=cpu,
            fp16=fp16,
            device_id=device_id,
            multi_gpu=multi_gpu,
            cache_max_split_depth=cache_max_split_depth,
            binary_alpha=binary_alpha,
            ternary_alpha=ternary_alpha,
            alpha_threshold=alpha_threshold,
            alpha_boundary_offset=alpha_boundary_offset,
            alpha_mode=alpha_mode,
            imagemagick=imagemagick,
            jpg=jpg,
            resize=resize,
            zip=zip,
        )
        models_str = model.split("+") if "+" in model else model.split(">")
        upscale.folder(
            input=input,
            output=output if len(models) == 1 or zip else output.joinpath(
                "_".join([Path(x).stem for x in models_str])),
            skip_existing=skip_existing,
            reverse=reverse,
            delete_input=delete_input,
        )
    log = logging.getLogger()
    log.info(
        f"Images upscaled in {precisedelta(dt.timedelta(seconds=time.process_time() - start_time))}"
    )
Ejemplo n.º 12
0
def hello(name:str,lastname:bool=typer.Option(False,'--lastname','-n',help="your lastname",),format:bool=False):
    if format:
        typer.echo("format is true")
    if lastname:
        print(f"lastname is {lastname}")
    typer.echo(f"Hello {name} {lastname}")
Ejemplo n.º 13
0
def main(number: List[float] = typer.Option([])):
    print(f"The sum is {sum(number)}")
Ejemplo n.º 14
0
def create(
    path: str = typer.Argument(
        None,
        help=(
            "The directory in which the deployment will be created "
            "(will be created if does not exist)"
        ),
    ),
    config_file_name: str = typer.Option(
        "config.toml", help="The configuration file name expected in the provided path"
    ),
    copy_conf: str = typer.Option(
        None, help="The configuration to copy from (e.g. dev_config.toml)"
    ),
    create_conf: bool = typer.Option(
        False,
        help="Enable/disable creation of a default configuration file",
    ),
    dev: bool = typer.Option(
        False,
        help=(
            "Enable/disable dev mode "
            "(fills the database with test data and allows http access)"
        ),
    ),
):
    """Create a new Quetz deployment."""

    logger.info(f"creating new deployment in path {path}")

    abs_path = os.path.abspath(path)
    config_file = os.path.join(path, config_file_name)
    deployments = _get_deployments()

    if os.path.exists(path) and abs_path in deployments:
        delete_ = typer.confirm(f'Quetz deployment exists at {path}.\nOverwrite it?')
        if delete_:
            delete(path, force=True)
            del deployments[abs_path]
        else:
            typer.echo('Use the start command to start a deployment.', err=True)
            raise typer.Abort()

    Path(path).mkdir(parents=True)

    # only authorize path with a config file to avoid deletion of unexpected files
    # when deleting Quetz instance
    if not all(f == config_file_name for f in os.listdir(path)):
        typer.echo(
            f'Quetz deployment not allowed at {path}.\n'
            'The path should not contain more than the configuration file.',
            err=True,
        )
        raise typer.Abort()

    if not os.path.exists(config_file) and not (create_conf or copy_conf):
        typer.echo(
            'No configuration file provided.\n'
            'Use --create-conf or --copy-conf to produce a config file.',
            err=True,
        )
        raise typer.Abort()

    if copy_conf:
        if not os.path.exists(copy_conf):
            typer.echo(f'Config file to copy does not exist {copy_conf}.', err=True)
            raise typer.Abort()

        typer.echo(f"Copying config file from {copy_conf} to {config_file}")
        shutil.copyfile(copy_conf, config_file)

    if not os.path.exists(config_file) and create_conf:
        if dev:
            https = 'false'
        else:
            https = 'true'
        conf = create_config(https=https)
        with open(config_file, 'w') as f:
            f.write(conf)

    os.environ[_env_prefix + _env_config_file] = config_file
    config = Config(config_file)

    os.chdir(path)
    Path('channels').mkdir()
    db = get_session(config.sqlalchemy_database_url)

    _run_migrations(config.sqlalchemy_database_url)
    _init_db(db, config)

    if dev:
        _fill_test_database(db)

    _store_deployment(abs_path, config_file_name)
Ejemplo n.º 15
0
def main(
    model: str = typer.Argument(...),
    input: Path = typer.Option(Path("input"), "--input", "-i", help="Input folder"),
    output: Path = typer.Option(Path("output"), "--output", "-o", help="Output folder"),
    reverse: bool = typer.Option(False, "--reverse", "-r", help="Reverse Order"),
    skip_existing: bool = typer.Option(
        False,
        "--skip-existing",
        "-se",
        help="Skip existing output files",
    ),
    delete_input: bool = typer.Option(
        False,
        "--delete-input",
        "-di",
        help="Delete input files after upscaling",
    ),
    seamless: SeamlessOptions = typer.Option(
        None,
        "--seamless",
        "-s",
        case_sensitive=False,
        help="Helps seamlessly upscale an image. tile = repeating along edges. mirror = reflected along edges. replicate = extended pixels along edges. alpha_pad = extended alpha border.",
    ),
    cpu: bool = typer.Option(False, "--cpu", "-c", help="Use CPU instead of CUDA"),
    fp16: bool = typer.Option(
        False,
        "--floating-point-16",
        "-fp16",
        help="Use FloatingPoint16/Halftensor type for images.",
    ),
    device_id: int = typer.Option(
        0, "--device-id", "-did", help="The numerical ID of the GPU you want to use."
    ),
    cache_max_split_depth: bool = typer.Option(
        False,
        "--cache-max-split-depth",
        "-cmsd",
        help="Caches the maximum recursion depth used by the split/merge function. Useful only when upscaling images of the same size.",
    ),
    binary_alpha: bool = typer.Option(
        False,
        "--binary-alpha",
        "-ba",
        help="Whether to use a 1 bit alpha transparency channel, Useful for PSX upscaling",
    ),
    ternary_alpha: bool = typer.Option(
        False,
        "--ternary-alpha",
        "-ta",
        help="Whether to use a 2 bit alpha transparency channel, Useful for PSX upscaling",
    ),
    alpha_threshold: float = typer.Option(
        0.5,
        "--alpha-threshold",
        "-at",
        help="Only used when binary_alpha is supplied. Defines the alpha threshold for binary transparency",
    ),
    alpha_boundary_offset: float = typer.Option(
        0.2,
        "--alpha-boundary-offset",
        "-abo",
        help="Only used when binary_alpha is supplied. Determines the offset boundary from the alpha threshold for half transparency.",
    ),
    alpha_mode: AlphaOptions = typer.Option(
        None,
        "--alpha-mode",
        "-am",
        help="Type of alpha processing to use. no_alpha = is no alpha processing. bas = is BA's difference method. alpha_separately = is upscaling the alpha channel separately (like IEU). swapping = is swapping an existing channel with the alpha channel.",
    ),
    verbose: bool = typer.Option(
        False,
        "--verbose",
        "-v",
        help="Verbose mode",
    ),
):

    logging.basicConfig(
        level=logging.DEBUG if verbose else logging.WARNING,
        format="%(message)s",
        datefmt="[%X]",
        handlers=[RichHandler(markup=True)],
        # handlers=[RichHandler(markup=True, rich_tracebacks=True)],
    )

    upscale = Upscale(
        model=model,
        input=input,
        output=output,
        reverse=reverse,
        skip_existing=skip_existing,
        delete_input=delete_input,
        seamless=seamless,
        cpu=cpu,
        fp16=fp16,
        device_id=device_id,
        cache_max_split_depth=cache_max_split_depth,
        binary_alpha=binary_alpha,
        ternary_alpha=ternary_alpha,
        alpha_threshold=alpha_threshold,
        alpha_boundary_offset=alpha_boundary_offset,
        alpha_mode=alpha_mode,
    )
    upscale.run()
Ejemplo n.º 16
0
def main(name: str, lastname: str = typer.Option(...)):
    typer.echo(f"Hello {name} {lastname}")
Ejemplo n.º 17
0
from shutil import rmtree
from typing import List, Optional, Sequence

import typer
from json5 import loads
from nialog.logger import setup_module_logging
from rich.console import Console
from rich.text import Text

from tracerepo import repo, rules, spatial, utils
from tracerepo.organize import Organizer

app = typer.Typer()
console = Console()

DATABASE_OPTION = typer.Option(rules.DATABASE_CSV, )

TRACEREPOSITORY_PATH_OPTION = typer.Option(
    ".",
    exists=True,
    file_okay=False,
    dir_okay=True,
)

DATA_FILTER = typer.Option(default=())


def logging_level(level: str):
    """
    Make logging level string.
    """
Ejemplo n.º 18
0
def main(
        inbam1: str = typer.Option(..., help="BAM from ref1"),
        inbam2: str = typer.Option(..., help="BAM from ref2"),
        outbam1: str = typer.Option(..., help="filtered BAM from ref1"),
        outbam2: str = typer.Option(..., help="filtered BAM from ref2"),
        ref1: str = typer.Option(..., help="path of ref1"),
        ref2: str = typer.Option(..., help="path of ref2"),
        sitesfile1: str = typer.
    Option(...,
           help="het sites file for ref1. chrom\tpos, gzipped, can be vcf.gz"),
        sitesfile2: str = typer.
    Option(...,
           help="het sites file for ref2. chrom\tpos, gzipped, can be vcf.gz"),
        saveinfo: str = typer.Option(
            None, help="save reads infor into this file (.tsv.gz)")):
    """
    bam1和bam2为同样的reads集合比对到不同的参考基因组上。
    该脚本根据已知的两个单倍体基因组之间的差异位点(sitesfile1 和 sitesfile2)去判断每个reads应该属于哪个基因组,之后筛选BAM文件。
    reads在两个BAM文件中情况一样时,会在两个文件中都保留下来,也就是只对比较效果有差异的reads做分配。
    只适用于二代reads,bwa mapping。
    """
    parent_conn1, child_conn1 = Pipe(duplex=False)
    parent_conn2, child_conn2 = Pipe(duplex=False)
    p1 = Process(target=stats_reads_AS_NM_pipe,
                 args=(inbam1, ref1, child_conn1))
    p2 = Process(target=stats_reads_AS_NM_pipe,
                 args=(inbam2, ref2, child_conn2))
    p1.start()
    p2.start()
    df1 = parent_conn1.recv()
    df2 = parent_conn2.recv()
    p1.join()
    p2.join()
    mdf = pd.merge(df1,
                   df2,
                   left_index=True,
                   right_index=True,
                   how='outer',
                   suffixes=['_bam1', '_bam2'])
    del (df1)
    del (df2)
    print(mdf.dtypes)

    p1 = Process(target=stat_query_mismatch_pipe,
                 args=(inbam1, ref1, sitesfile1, child_conn1))
    p2 = Process(target=stat_query_mismatch_pipe,
                 args=(inbam2, ref2, sitesfile2, child_conn2))
    p1.start()
    p2.start()
    reads2nQuery1, reads2QMis1 = parent_conn1.recv()
    reads2nQuery2, reads2QMis2 = parent_conn2.recv()
    p1.join()
    p2.join()

    mdf['QMis_bam1'] = mdf.index.map(lambda x: reads2QMis1.get(x, 0))
    del (reads2QMis1)
    mdf['QMis_bam2'] = mdf.index.map(lambda x: reads2QMis2.get(x, 0))
    del (reads2QMis2)
    mdf['nQuery_bam1'] = mdf.index.map(lambda x: reads2nQuery1.get(x, 0))
    del (reads2nQuery1)
    mdf['nQuery_bam2'] = mdf.index.map(lambda x: reads2nQuery2.get(x, 0))
    del (reads2nQuery2)

    keepped_reads_bam1, keepped_reads_bam2 = selection_reads(mdf)

    p1 = Process(target=filter_bam,
                 args=(inbam1, outbam1, keepped_reads_bam1, ref1))
    p2 = Process(target=filter_bam,
                 args=(inbam2, outbam2, keepped_reads_bam2, ref2))
    p1.start()
    p2.start()
    p1.join()
    p2.join()

    if saveinfo:
        print(f'save reads info to {saveinfo}')
        mdf.to_csv(saveinfo, sep='\t')
Ejemplo n.º 19
0
def main(name: str = typer.Option(..., callback=name_callback)):
    print(f"Hello {name}")
Ejemplo n.º 20
0
def install(build_system: BuildSystem = typer.Option(
    get_default_env_build_system)):
    sync(build_system=build_system)