def register_subcommand(parser: ArgumentParser):
     login_parser = parser.add_parser("login", help="Log in using the same credentials as on huggingface.co")
     login_parser.set_defaults(func=lambda args: LoginCommand(args))
     whoami_parser = parser.add_parser("whoami", help="Find out which huggingface.co account you are logged in as.")
     whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args))
     logout_parser = parser.add_parser("logout", help="Log out")
     logout_parser.set_defaults(func=lambda args: LogoutCommand(args))
     # s3
     s3_parser = parser.add_parser("s3", help="{ls, rm} Commands to interact with the files you upload on S3.")
     s3_subparsers = s3_parser.add_subparsers(help="s3 related commands")
     ls_parser = s3_subparsers.add_parser("ls")
     ls_parser.add_argument("--organization", type=str, help="Optional: organization namespace.")
     ls_parser.set_defaults(func=lambda args: ListObjsCommand(args))
     rm_parser = s3_subparsers.add_parser("rm")
     rm_parser.add_argument("filename", type=str, help="individual object filename to delete from S3.")
     rm_parser.add_argument("--organization", type=str, help="Optional: organization namespace.")
     rm_parser.set_defaults(func=lambda args: DeleteObjCommand(args))
     # upload
     upload_parser = parser.add_parser("upload", help="Upload a dataset to S3.")
     upload_parser.add_argument(
         "path", type=str, help="Local path of the dataset folder or individual file to upload."
     )
     upload_parser.add_argument("--organization", type=str, help="Optional: organization namespace.")
     upload_parser.add_argument(
         "--filename", type=str, default=None, help="Optional: override individual object filename on S3."
     )
     upload_parser.set_defaults(func=lambda args: UploadCommand(args))
Example #2
0
    def register_subcommand(parser: ArgumentParser):
        login_parser = parser.add_parser(
            "login",
            help="Log in using the same credentials as on huggingface.co")
        login_parser.set_defaults(func=lambda args: LoginCommand(args))
        whoami_parser = parser.add_parser(
            "whoami",
            help="Find out which huggingface.co account you are logged in as.")
        whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args))
        logout_parser = parser.add_parser("logout", help="Log out")
        logout_parser.set_defaults(func=lambda args: LogoutCommand(args))

        # new system: git-based repo system
        repo_parser = parser.add_parser(
            "repo",
            help=
            "{create, ls-files} Commands to interact with your huggingface.co repos.",
        )
        repo_subparsers = repo_parser.add_subparsers(
            help="huggingface.co repos related commands")
        ls_parser = repo_subparsers.add_parser(
            "ls-files", help="List all your files on huggingface.co")
        ls_parser.add_argument("--organization",
                               type=str,
                               help="Optional: organization namespace.")
        ls_parser.set_defaults(func=lambda args: ListReposObjsCommand(args))
        repo_create_parser = repo_subparsers.add_parser(
            "create", help="Create a new repo on huggingface.co")
        repo_create_parser.add_argument(
            "name",
            type=str,
            help=
            "Name for your repo. Will be namespaced under your username to build the repo id.",
        )
        repo_create_parser.add_argument(
            "--type",
            type=str,
            help=
            'Optional: repo_type: set to "dataset" or "space" if creating a dataset or space, default is model.',
        )
        repo_create_parser.add_argument(
            "--organization",
            type=str,
            help="Optional: organization namespace.")
        repo_create_parser.add_argument(
            "--space_sdk",
            type=str,
            help=
            'Optional: Hugging Face Spaces SDK type. Required when --type is set to "space".',
            choices=SPACES_SDK_TYPES,
        )
        repo_create_parser.add_argument(
            "-y",
            "--yes",
            action="store_true",
            help="Optional: answer Yes to the prompt",
        )
        repo_create_parser.set_defaults(
            func=lambda args: RepoCreateCommand(args))
Example #3
0
def generate_diagnostic_parsers(subparsers: ArgumentParser) -> None:
    """All CLI arg parsers generally used diagnostically are generated here.

    :param [ArgumentParser] `subparsers`: the subparsers needed for every CLI command that handles diagnostics for a
        Merlin job.
    """
    # merlin status
    status: ArgumentParser = subparsers.add_parser(
        "status",
        help="List server stats (name, number of tasks to do, \
                              number of connected workers) for a workflow spec.",
    )
    status.set_defaults(func=query_status)
    status.add_argument("specification",
                        type=str,
                        help="Path to a Merlin YAML spec file")
    status.add_argument(
        "--steps",
        nargs="+",
        type=str,
        dest="steps",
        default=["all"],
        help="The specific steps in the YAML file you want to query",
    )
    status.add_argument(
        "--task_server",
        type=str,
        default="celery",
        help="Task server type.\
                            Default: %(default)s",
    )
    status.add_argument(
        "--vars",
        action="store",
        dest="variables",
        type=str,
        nargs="+",
        default=None,
        help=
        "Specify desired Merlin variable values to override those found in the specification. Space-delimited. "
        "Example: '--vars LEARN=path/to/new_learn.py EPOCHS=3'",
    )
    status.add_argument("--csv",
                        type=str,
                        help="csv file to dump status report to",
                        default=None)

    # merlin info
    info: ArgumentParser = subparsers.add_parser(
        "info",
        help=
        "display info about the merlin configuration and the python configuration. Useful for debugging.",
    )
    info.set_defaults(func=print_info)
Example #4
0
 def register_subcommand(parser: ArgumentParser):
     login_parser = parser.add_parser("login", help="Log in using the same credentials as on huggingface.co")
     login_parser.set_defaults(func=lambda args: LoginCommand(args))
     whoami_parser = parser.add_parser("whoami", help="Find out which huggingface.co account you are logged in as.")
     whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args))
     logout_parser = parser.add_parser("logout", help="Log out")
     logout_parser.set_defaults(func=lambda args: LogoutCommand(args))
     # s3
     s3_parser = parser.add_parser("s3", help="{ls, rm} Commands to interact with the files you upload on S3.")
     s3_subparsers = s3_parser.add_subparsers(help="s3 related commands")
     ls_parser = s3_subparsers.add_parser("ls")
Example #5
0
    def register_subcommand(parser: ArgumentParser):
        enable_parser = parser.add_parser(
            "lfs-enable-largefiles", help="Configure your repository to enable upload of files > 5GB."
        )
        enable_parser.add_argument("path", type=str, help="Local path to repository you want to configure.")
        enable_parser.set_defaults(func=lambda args: LfsEnableCommand(args))

        upload_parser = parser.add_parser(
            LFS_MULTIPART_UPLOAD_COMMAND, help="Command will get called by git-lfs, do not call it directly."
        )
        upload_parser.set_defaults(func=lambda args: LfsUploadCommand(args))
Example #6
0
def add_response_arguments_to_parser(
        subparsers: argparse.ArgumentParser) -> None:
    """Given an argument parser subparser, build a response specific parser."""
    # sensor query parser
    parser_sensor = subparsers.add_parser(
        "sensor-query",
        aliases=["sq"],
        help=
        "Execute a sensor query (Response). Valid search fields: 'ip', 'hostname', and 'groupid'",
    )
    parser_sensor.add_argument("sensor_query",
                               help="the sensor query you'd like to execute")
    parser_sensor.add_argument(
        "-nw",
        "--no-warnings",
        action="store_true",
        default=False,
        help="Don't warn before printing large query results",
    )
    parser_sensor.add_argument(
        "-ad",
        "--all-details",
        action="store_true",
        default=False,
        help="Print all available process info (all fields).",
    )

    # response watchlist parser
    parser_watchlist = subparsers.add_parser(
        "response_watchlist",
        aliases=["rwl"],
        help="Work with response watchlists.")
    parser_watchlist.add_argument("-l",
                                  "--list-watchlists",
                                  action="store_true",
                                  help="Print all watchlists.")
    parser_watchlist.add_argument("-q",
                                  "--query-watchlists",
                                  action="store",
                                  help="filter watchlists by watchlist query")
    parser_watchlist.add_argument(
        "-json",
        "--watchlists-to-json",
        action="store_true",
        help="Convert watchlists to json and print to stdout.",
    )
    parser_watchlist.add_argument(
        "--watchlist-names-from-stdin",
        action="store_true",
        help="read a list of watchlist names from stdin to load.")
Example #7
0
    def _register_files_parser(self, parent: ArgumentParser) -> None:
        """
        Import OpenCL files into kernel datbase.

        The kernel database is used as a staging ground for input files, which
        are then preprocessed and assembled into corpuses. This program acts as
        the front end, assembling files from the file system into a database for
        preprocessing.
        """

        def _main(model_file: TextIO, sampler_file: TextIO) -> None:
            model_json = jsonutil.loads(model_file.read())
            model = clgen.Model.from_json(model_json)

            caches = [model.corpus.cache, model.cache]

            if sampler_file:
                sampler_json = jsonutil.loads(sampler_file.read())
                sampler = clgen.Sampler.from_json(sampler_json)
                caches.append(sampler.cache(model))

            files = sorted(
                types.flatten(c.ls(abspaths=True, recursive=True) for c in caches))
            print('\n'.join(files))

        parser = parent.add_parser("files", help="list cached files",
                                   description=inspect.getdoc(self),
                                   epilog=__help_epilog__)
        parser.set_defaults(dispatch_func=_main)
        parser.add_argument("model_file", metavar="<model>",
                            type=FileType("r"),
                            help="path to model specification file")
        parser.add_argument("sampler_file", metavar="<sampler>", nargs="?",
                            type=FileType("r"),
                            help="path to sampler specification file")
Example #8
0
def _set_cargo_command(parser: argparse.ArgumentParser) -> None:
    cargo_ = cargo.Cargo()
    cargo_cmd = parser.add_parser("cargo", help="see `cargo -h`")
    cargo_sub = cargo_cmd.add_subparsers()
    add = cargo_sub.add_parser("add", help="sed `add -h`", usage=textwrap.dedent('''
        cargo add [-h] JSON_FILE_PATH

        JSON_FILE_PATH format:
            {
              "foo": "bar"
            }
    ''').strip())
    add.set_defaults(func=cargo_.add)
    add.add_argument(
        "input",
        metavar="JSON_FILE_PATH",
        help=textwrap.dedent("登録対象の貨物JSONファイルパス"),
    )

    find = cargo_sub.add_parser("find", help="sed `add -h`", usage="cargo add [-h] CARGO_ID")
    find.set_defaults(func=cargo_.find)
    find.add_argument(
        "cargo_id",
        metavar="CARGO_ID",
        help=textwrap.dedent("検索対象の貨物ID"),
    )
Example #9
0
 def register_subcommand(parser: ArgumentParser):
     run_parser = parser.add_parser("run", help="Run a pipeline through the CLI")
     run_parser.add_argument("--task", choices=SUPPORTED_TASKS.keys(), help="Task to run")
     run_parser.add_argument("--input", type=str, help="Path to the file to use for inference")
     run_parser.add_argument("--output", type=str, help="Path to the file that will be used post to write results.")
     run_parser.add_argument("--model", type=str, help="Name or path to the model to instantiate.")
     run_parser.add_argument("--config", type=str, help="Name or path to the model's config to instantiate.")
     run_parser.add_argument(
         "--tokenizer", type=str, help="Name of the tokenizer to use. (default: same as the model name)"
     )
     run_parser.add_argument(
         "--column",
         type=str,
         help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)",
     )
     run_parser.add_argument(
         "--format",
         type=str,
         default="infer",
         choices=PipelineDataFormat.SUPPORTED_FORMATS,
         help="Input format to read from",
     )
     run_parser.add_argument(
         "--device",
         type=int,
         default=-1,
         help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)",
     )
     run_parser.add_argument("--overwrite", action="store_true", help="Allow overwriting the output file.")
     run_parser.set_defaults(func=run_command_factory)
Example #10
0
def _build_delete_job_parser(subparsers: argparse.ArgumentParser):
    delete_job_parser = subparsers.add_parser("delete-job", help="delete job")
    delete_job_parser.set_defaults(func="delete_job")
    delete_job_parser.add_argument(
        "job_id", metavar="job-id", help="job ID [base64 encoded]", type=b64decode
    )
    delete_job_parser.set_defaults(func="delete_job")
Example #11
0
 def register_subcommand(parser: ArgumentParser):
     """
     Register this command to argparse so it's available for the transformer-cli
     :param parser: Root parser to register command-specific arguments
     :return:
     """
     train_parser = parser.add_parser(
         "convert",
         help="CLI tool to run convert model from original "
         "author checkpoints to Transformers PyTorch checkpoints.",
     )
     train_parser.add_argument("--model_type", type=str, required=True, help="Model's type.")
     train_parser.add_argument(
         "--tf_checkpoint", type=str, required=True, help="TensorFlow checkpoint path or folder."
     )
     train_parser.add_argument(
         "--pytorch_dump_output", type=str, required=True, help="Path to the PyTorch savd model output."
     )
     train_parser.add_argument("--config", type=str, default="", help="Configuration file path or folder.")
     train_parser.add_argument(
         "--finetuning_task_name",
         type=str,
         default=None,
         help="Optional fine-tuning task name if the TF model was a finetuned model.",
     )
     train_parser.set_defaults(func=convert_command_factory)
Example #12
0
 def register_subcommand(parser: ArgumentParser):
     extract_parser = parser.add_parser(
         "pack",
         help=
         "CLI tool to extract all adapters in a directory and to prepare them for upload to AdapterHub.",
     )
     extract_parser.add_argument(
         "input_paths",
         metavar="PATH",
         type=str,
         nargs="+",
         help="Path to a directory pretrained models or pretrained adapters."
     )
     extract_parser.add_argument(
         "-o",
         "--output_path",
         type=str,
         help=
         "Path to a directory where the packed adapters will be saved. Will save to .PACK_OUTPUT folder by default.",
     )
     extract_parser.add_argument(
         "--template",
         type=str,
         help=
         "Path to a YAML file to be used as template for the adapter info cards."
     )
     extract_parser.add_argument(
         "--no_extract",
         action="store_true",
         help=
         "Don't attempt to extract from models found in the input directory.",
     )
     extract_parser.set_defaults(func=adapter_pack_command_factory)
 def register_subcommand(parser: ArgumentParser):
     _parser = parser.add_parser("rec.addmaskparam")
     _parser.add_argument("input", type=str, help="input rec")
     _parser.add_argument("output",
                          type=str,
                          help="output rec, with mask param")
     _parser.set_defaults(func=rec_add_mask_param_command_factory)
Example #14
0
def _register_atomize_parser(self, parent: ArgumentParser) -> None:
    """
    Extract and print corpus vocabulary.
    """
    # FIXME(polyglot):

    def _main(infile: TextIO, vocab: str, size: bool) -> None:
        atoms = corpus.atomize(infile.read(), vocab=vocab)

        if size:
            log.info("size:", len(atoms))
        else:
            log.info('\n'.join(atoms))

    parser = parent.add_parser("atomize",
                               help="atomize files",
                               description=inspect.getdoc(self),
                               epilog=__help_epilog__)
    parser.set_defaults(dispatch_func=_main)
    parser.add_argument('infile', metavar="<path>", type=FileType("r"),
                        help='path to input text file')
    parser.add_argument('-t', '--type', type=str, dest="vocab", default='char',
                        help='vocabulary type')
    parser.add_argument('-s', '--size', action="store_true",
                        help="print vocabulary size")
Example #15
0
    def _register_migrate_parser(self, parent: ArgumentParser) -> None:
        """
        Refresh the cached model, corpus, and sampler IDs.
        """

        def _main() -> None:
            cache = clgen.cachepath()

            log.warning("Not Implemented: refresh corpuses")

            if fs.isdir(cache, "model"):
                cached_modeldirs = fs.ls(fs.path(cache, "model"), abspaths=True)
                for cached_modeldir in cached_modeldirs:
                    cached_model_id = fs.basename(cached_modeldir)
                    cached_meta = jsonutil.read_file(fs.path(cached_modeldir, "META"))

                    model = clgen.Model.from_json(cached_meta)

                    if cached_model_id != model.hash:
                        log.info(cached_model_id, '->', model.hash)

                        if fs.isdir(model.cache.path):
                            log.fatal("cache conflict", file=sys.stderr)

                        fs.mv(cached_modeldir, model.cache.path)

            log.warning("Not Implemented: refresh samplers")

        parser = parent.add_parser("migrate",
                                   help="migrate the cache",
                                   description=inspect.getdoc(self),
                                   epilog=__help_epilog__)
        parser.set_defaults(dispatch_func=_main)
Example #16
0
def _build_get_job_parser(subparsers: argparse.ArgumentParser):
    get_job_parser = subparsers.add_parser("get-job", help="get job")
    get_job_parser.set_defaults(func="get_job")
    get_job_parser.add_argument(
        "job_id", metavar="job-id", help="job ID [base64 encoded]", type=b64decode
    )
    get_job_parser.set_defaults(func="get_job")
Example #17
0
    def _register_dump_parser(self, parent: ArgumentParser) -> None:
        """
        Dump kernel dataset to file(s).
        """

        def _main(db_file: BinaryIO, outpath: Path, dir: bool, eof: bool,
                  file_sep: bool, input_samples: bool, reverse: bool,
                  status: int) -> None:
            dbutil.dump_db(db_file.name, outpath, dir=dir, eof=eof,
                           fileid=file_sep, input_samples=input_samples)

        parser = parent.add_parser("dump", help="export database contents",
                                   description=inspect.getdoc(_main),
                                   epilog=__help_epilog__)
        parser.set_defaults(dispatch_func=_main)
        parser.add_argument('db_file', metavar="<db>", type=FileType("rb"),
                            help='path to kernels database')
        parser.add_argument('outpath', metavar="<path>", type=Path,
                            help='path to output file or directory')
        parser.add_argument("-d", "--dir", action='store_true',
                            help='output to directory (overrides -i, --eof, -r)')
        parser.add_argument("-i", "--file-sep", action='store_true',
                            help='include file separators')
        parser.add_argument('--input-samples', action='store_true',
                            help='use input contents, not preprocessed')
        parser.add_argument('--eof', action='store_true',
                            help='print end of file')
        parser.add_argument('-r', '--reverse', action='store_true',
                            help='use reverse order')
        parser.add_argument('-s', '--status', type=int, default=0,
                            help='status code to use')
Example #18
0
def _register_sample_parser(self, parent: ArgumentParser) -> None:
    """
    Sample a model.
    """

    def _main(model_file: TextIO, sampler_file: TextIO) -> None:
        model_json = jsonutil.loads(model_file.read())
        model = clgen.Model.from_json(model_json)

        sampler_json = jsonutil.loads(sampler_file.read())
        sampler = clgen.Sampler.from_json(sampler_json)

        model.train()
        sampler.sample(model)

    parser = parent.add_parser("sample", aliases=["s", "sa"],
                               help="train and sample models",
                               description=inspect.getdoc(self),
                               epilog=__help_epilog__)
    parser.set_defaults(dispatch_func=_main)
    parser.add_argument("model_file", metavar="<model>",
                        type=FileType("r"),
                        help="path to model specification file")
    parser.add_argument("sampler_file", metavar="<sampler>",
                        type=FileType("r"),
                        help="path to sampler specification file")
Example #19
0
def _register_test_parser(self, parent: ArgumentParser) -> None:
    """
    Run the CLgen self-test suite.
    """

    def _main(cache_path: bool, coveragerc_path: bool,
              coverage_path: bool) -> None:
        import clgen.test

        if cache_path:
            print(clgen.test.test_cache_path())
            sys.exit(0)
        elif coveragerc_path:
            print(clgen.test.coveragerc_path())
            sys.exit(0)
        elif coverage_path:
            print(clgen.test.coverage_report_path())
            sys.exit(0)

        sys.exit(clgen.test.testsuite())

    parser = parent.add_parser("test", help="run the testsuite",
                               description=inspect.getdoc(self),
                               epilog=__help_epilog__)
    parser.set_defaults(dispatch_func=_main)
    group = parser.add_mutually_exclusive_group()
    group.add_argument("--cache-path", action="store_true",
                       help="print path to test cache")
    group.add_argument("--coveragerc-path", action="store_true",
                       help="print path to coveragerc file")
    group.add_argument("--coverage-path", action="store_true",
                       help="print path to coverage file")
Example #20
0
def config_export_subprogram(subparsers: argparse.ArgumentParser) -> None:
    parser_export = subparsers.add_parser(
        'export',
        help="Export Python projects as FMUs",
    )

    parser_export.add_argument(
        "--project", "-p", required=True, help="path to Python project")

    parser_export.add_argument(
        "--output", '-o', required=True, help="output path of the exported archive")

    parser_export.add_argument(
        '--overwrite', '-ow', action='store_true', help='allow overwriting of existing files')

    group_bundle = parser_export.add_argument_group('bundle')

    help_bundle_interpreter = """Bundle a Python Interpreter in the FMU. 
    This will allow standalone execution on platforms where a suitable Python interpreter is not available, at the cost of an increase in archive size.
    Note that a interpreter is bundled for each supported platform.
    """
    group_bundle.add_argument(
        "-bi", type=bool, default=False, help=help_bundle_interpreter)

    help_bundle_libs = """Bundle current Python environment Libraries in the FMU. 
    """
    group_bundle.add_argument(
        "-bl", type=bool, default=True, help=help_bundle_libs)

    help_prune_libs = """ Decrease size of the included libraries by only including those used by the FMU.
    Note that this relies on analyzing static analysis of the Python files and may fail to discover all dependencies.
    """
    group_bundle.add_argument(
        "-bp", type=bool, default=False, help=help_prune_libs)
Example #21
0
def _build_list_rate_limit_parser(subparsers: argparse.ArgumentParser):
    list_rate_limits_parser = subparsers.add_parser(
        "list-rate-limits", help="list rate limits"
    )
    list_rate_limits_parser.add_argument("--offset", help="results offset", type=int)
    list_rate_limits_parser.add_argument("--limit", help="results to return", type=int)
    list_rate_limits_parser.set_defaults(func="list_rate_limits")
Example #22
0
def _build_set_job_parser(subparsers: argparse.ArgumentParser):
    set_job_parser = subparsers.add_parser("set-job", help="set job")
    set_job_parser.set_defaults(func="set_job")
    set_job_parser.add_argument(
        "--run-state",
        help=f"run state {starbelly_pb2.JobRunState.items()}",
        type=int,
        choices=starbelly_pb2.JobRunState.values(),
    )
    set_job_parser.add_argument(
        "--job-id", help="job ID [base64 encoded]", type=b64decode,
    )
    set_job_parser.add_argument(
        "--policy-id", help="policy ID [base64 encoded]", type=b64decode,
    )
    set_job_parser.add_argument(
        "--name", help="job name", type=str,
    )
    set_job_parser.add_argument(
        "--tags",
        help="job tags ('tag1,tag2,tag3')",
        type=lambda s: [i.strip() for i in s.split(",") if i.strip()],
    )
    set_job_parser.add_argument("--seeds", help="job seed url(s)", nargs="+")
    set_job_parser.set_defaults(func="set_job")
Example #23
0
 def register_subcommand(parser: ArgumentParser):
     create_project_parser = parser.add_parser(
         "create_project", description="✨ Creates a project in AutoNLP.")
     create_project_parser.add_argument("--name",
                                        type=str,
                                        default=None,
                                        required=True,
                                        help="The project's name")
     create_project_parser.add_argument(
         "--task",
         metavar="TASK",
         type=str,
         default=None,
         required=True,
         help=f"The project's task type, one of: {list(TASKS.keys())}",
         choices=list(TASKS.keys()),
     )
     create_project_parser.add_argument(
         "--language",
         type=str,
         default=None,
         required=True,
         metavar="LANGUAGE",
         help=f"The project's language, one of: {SUPPORTED_LANGUAGES}",
         choices=SUPPORTED_LANGUAGES,
     )
     create_project_parser.set_defaults(func=create_project_command_factory)
Example #24
0
def switchParser(parsers: ArgumentParser):
    helpText = 'Switch Kattis instance.'
    descText = helpText + ' Useful if you have exercises on e.g. itu.kattis.com using problems from open.kattis.com, allowing you to use a single folder for both (or even more). It is (currently) required that the configs share folder with your problems.'
    parser = parsers.add_parser('switch', description=descText, help=helpText)
    parser.add_argument('instance',
                        help='Name of instance to switch to',
                        nargs='?')
Example #25
0
 def register_subcommand(parser: ArgumentParser):
     login_parser = parser.add_parser("login")
     login_parser.set_defaults(func=lambda args: LoginCommand(args))
     whoami_parser = parser.add_parser("whoami")
     whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args))
     logout_parser = parser.add_parser("logout")
     logout_parser.set_defaults(func=lambda args: LogoutCommand(args))
     list_parser = parser.add_parser("ls")
     list_parser.set_defaults(func=lambda args: ListObjsCommand(args))
     # upload
     upload_parser = parser.add_parser("upload")
     upload_parser.add_argument("path", type=str, help="Local path of the folder or individual file to upload.")
     upload_parser.add_argument(
         "--filename", type=str, default=None, help="Optional: override individual object filename on S3."
     )
     upload_parser.set_defaults(func=lambda args: UploadCommand(args))
Example #26
0
    def register_subcommand(main_parser: ArgumentParser):
        parser = main_parser.add_parser(
            "eval",
            help="evaluate a model with TextAttack",
            formatter_class=ArgumentDefaultsHelpFormatter,
        )

        parser = ModelArgs._add_parser_args(parser)
        parser = DatasetArgs._add_parser_args(parser)

        parser.add_argument("--random-seed", default=765, type=int)
        parser.add_argument(
            "--batch-size",
            type=int,
            default=32,
            help="The batch size for evaluating the model.",
        )
        parser.add_argument(
            "--num-examples",
            "-n",
            type=int,
            required=False,
            default=5,
            help="The number of examples to process, -1 for entire dataset",
        )
        parser.add_argument(
            "--num-examples-offset",
            "-o",
            type=int,
            required=False,
            default=0,
            help="The offset to start at in the dataset.",
        )

        parser.set_defaults(func=EvalModelCommand())
Example #27
0
 def register_subcommand(parser: ArgumentParser):
     test_parser = parser.add_parser("test")
     test_parser.add_argument("--name", type=str, default=None, help="Dataset processing name")
     test_parser.add_argument(
         "--cache_dir",
         type=str,
         default=None,
         help="Cache directory where the datasets are stored.",
     )
     test_parser.add_argument(
         "--data_dir",
         type=str,
         default=None,
         help="Can be used to specify a manual directory to get the files from.",
     )
     test_parser.add_argument("--all_configs", action="store_true", help="Test all dataset configurations")
     test_parser.add_argument("--save_infos", action="store_true", help="Save the dataset infos file")
     test_parser.add_argument(
         "--ignore_verifications", action="store_true", help="Run the test without checksums and splits checks"
     )
     test_parser.add_argument("--force_redownload", action="store_true", help="Force dataset redownload")
     test_parser.add_argument(
         "--clear_cache",
         action="store_true",
         help="Remove downloaded files and cached datasets after each config test",
     )
     test_parser.add_argument("dataset", type=str, help="Name of the dataset to download")
     test_parser.set_defaults(func=test_command_factory)
Example #28
0
    def register_subcommand(parser: ArgumentParser):
        """
        Register this command to argparse so it's available for the transformer-cli

        Args:
            parser: Root parser to register command-specific arguments
        """
        serve_parser = parser.add_parser(
            "serve", help="CLI tool to run inference requests through REST and GraphQL endpoints."
        )
        serve_parser.add_argument(
            "--task",
            type=str,
            choices=get_supported_tasks(),
            help="The task to run the pipeline on",
        )
        serve_parser.add_argument("--host", type=str, default="localhost", help="Interface the server will listen on.")
        serve_parser.add_argument("--port", type=int, default=8888, help="Port the serving will listen to.")
        serve_parser.add_argument("--workers", type=int, default=1, help="Number of http workers")
        serve_parser.add_argument("--model", type=str, help="Model's name or path to stored model.")
        serve_parser.add_argument("--config", type=str, help="Model's config name or path to stored model.")
        serve_parser.add_argument("--tokenizer", type=str, help="Tokenizer name to use.")
        serve_parser.add_argument(
            "--device",
            type=int,
            default=-1,
            help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)",
        )
        serve_parser.set_defaults(func=serve_command_factory)
Example #29
0
 def register_subcommand(main_parser: ArgumentParser):
     parser = main_parser.add_parser(
         "benchmark-recipe",
         help="benchmark a recipe",
         formatter_class=ArgumentDefaultsHelpFormatter,
     )
     parser.set_defaults(func=BenchmarkRecipeCommand())
Example #30
0
 def register_subcommand(parser: ArgumentParser):
     run_beam_parser = parser.add_parser("run_beam")
     run_beam_parser.add_argument("--name", type=str, default=None, help="Dataset processing name")
     run_beam_parser.add_argument(
         "--cache_dir",
         type=str,
         default=None,
         help="Cache directory where the datasets are stored.",
     )
     run_beam_parser.add_argument(
         "--beam_pipeline_options",
         type=str,
         default="",
         help="Beam pipeline options, separated by commas. Example: `--beam_pipeline_options=job_name=my-job,project=my-project`",
     )
     run_beam_parser.add_argument(
         "--data_dir",
         type=str,
         default=None,
         help="Can be used to specify a manual directory to get the files from.",
     )
     run_beam_parser.add_argument("--all_configs", action="store_true", help="Test all dataset configurations")
     run_beam_parser.add_argument("--save_infos", action="store_true", help="Save the dataset infos file")
     run_beam_parser.add_argument(
         "--ignore_verifications", action="store_true", help="Run the test without checksums and splits checks"
     )
     run_beam_parser.add_argument("--force_redownload", action="store_true", help="Force dataset redownload")
     run_beam_parser.add_argument("dataset", type=str, help="Name of the dataset to download")
     run_beam_parser.set_defaults(func=run_beam_command_factory)
Example #31
0
def add_generate_exif_parser(subparsers: ArgumentParser):
    """Adds generate exif parser"""
    generate_parser = subparsers.add_parser('generate_exif', formatter_class=RawTextHelpFormatter)
    generate_parser.set_defaults(func=exif_generation_command)
    generate_parser.add_argument('-p',
                                 '--path',
                                 required=True,
                                 help='Folder PATH with OSC metadata file and images')
    _add_logging_argument(generate_parser)

    return subparsers
Example #32
0
def command_argument_paraser(fn: Callable, parser: ArgumentParser) -> list:
    '''
    Read the __doc__ part of function, and register it to the ArgumentParser
    '''

    @partial(ignore, res='')
    def doc_parser(doc: str) -> str:
        '''
        allow doc format: @xxxx and :xxxx
        eg:
            @parms: foo
            :parms: foo
            @argument: foo
        '''
        kvs = doc.strip().split(', ')
        if kvs[0].startswith(('@', ':')):
            return add([kvs[0].split(' ')[1]], kvs[1:])
        else:
            return doc.strip()

    def parse_doc(params: Iterable) -> (tuple, dict):
        '''
        map str to args list and kwargs dict, then
        '''
        args = (p for p in params if '=' not in p)
        kwargs = dict((tuple(p.split('=')) for p in params if '=' in p))
        return args, kwargs

    def add_params(parser: ArgumentParser, param: Iterable) -> ArgumentParser:
        '''
        apply the argument
        '''
        args, kwargs = parse_doc(param)
        parser.add_argument(*args, **kwargs)
        return parser

    docs = tuple(map(doc_parser, filter(bool, fn.__doc__.split('\n'))))
    name = fn.__name__
    params = filter(lambda x: isinstance(x, list), docs)
    helps = filter(lambda x: isinstance(x, str), docs)
    command = parser.add_parser(name, help=reduce(add, helps))
    return reduce(add_params, params, command)
Example #33
0
def add_upload_parser(subparsers: ArgumentParser):
    """Adds upload parser"""
    upload_parser = subparsers.add_parser('upload', formatter_class=RawTextHelpFormatter)
    upload_parser.set_defaults(func=upload_command)
    upload_parser.add_argument('-p',
                               '--path',
                               required=True,
                               help='Full path directory that contains sequence(s) '
                                    'folder(s) to upload')
    upload_parser.add_argument('-w',
                               '--workers',
                               required=False,
                               type=int,
                               default=10,
                               choices=range(1, 21),
                               metavar="[1-20]",
                               help='Number of parallel workers used to upload files. '
                                    'Default number is 10.')
    _add_environment_argument(upload_parser)
    _add_logging_argument(upload_parser)