def add_subparser(self, name: str, parser: argparse._SubParsersAction) -> argparse.ArgumentParser: # pylint: disable=protected-access description = '''Run the specified model against a JSON-lines input file.''' subparser = parser.add_parser( name, description=description, help='Use a trained model to make predictions.') subparser.add_argument('archive_file', type=str, help='the archived model to make predictions with') subparser.add_argument('input_file', type=argparse.FileType('r'), help='path to input file') subparser.add_argument('--output-file', type=argparse.FileType('w'), help='path to output file') subparser.add_argument('--weights-file', type=str, help='a path that overrides which weights file to use') batch_size = subparser.add_mutually_exclusive_group(required=False) batch_size.add_argument('--batch-size', type=int, default=1, help='The batch size to use for processing') subparser.add_argument('--silent', action='store_true', help='do not print output to stdout') cuda_device = subparser.add_mutually_exclusive_group(required=False) cuda_device.add_argument('--cuda-device', type=int, default=-1, help='id of GPU to use (if any)') subparser.add_argument('-o', '--overrides', type=str, default="", help='a HOCON structure used to override the experiment configuration') subparser.add_argument('--predictor', type=str, help='optionally specify a specific predictor to use') subparser.set_defaults(func=_predict) return subparser
def add_subparser(self, name: str, parser: argparse._SubParsersAction) -> argparse.ArgumentParser: # pylint: disable=protected-access description = '''Create word vectors using ELMo.''' subparser = parser.add_parser( name, description=description, help='Use a trained model to make predictions.') subparser.add_argument('input_file', type=argparse.FileType('r'), help='The path to the input file.') subparser.add_argument('output_file', type=str, help='The path to the output file.') group = subparser.add_mutually_exclusive_group(required=True) group.add_argument('--all', action='store_true', help='Output all three ELMo vectors.') group.add_argument('--top', action='store_true', help='Output the top ELMo vector.') group.add_argument('--average', action='store_true', help='Output the average of the ELMo vectors.') subparser.add_argument('--vocab-path', type=str, help='A path to a vocabulary file to generate.') subparser.add_argument( '--options-file', type=str, default=DEFAULT_OPTIONS_FILE, help='The path to the ELMo options file.') subparser.add_argument( '--weight-file', type=str, default=DEFAULT_WEIGHT_FILE, help='The path to the ELMo weight file.') subparser.add_argument('--batch-size', type=int, default=DEFAULT_BATCH_SIZE, help='The batch size to use.') subparser.add_argument('--cuda-device', type=int, default=-1, help='The cuda_device to run on.') subparser.set_defaults(func=elmo_command) return subparser
def add_subparser(self, name: str, parser: argparse._SubParsersAction) -> argparse.ArgumentParser: # pylint: disable=protected-access description = '''Train the specified model on the specified dataset.''' subparser = parser.add_parser(name, description=description, help='Train a model') subparser.add_argument('param_path', type=str, help='path to parameter file describing the model to be trained') subparser.add_argument('-s', '--serialization-dir', required=True, type=str, help='directory in which to save the model and its logs') subparser.add_argument('-r', '--recover', action='store_true', default=False, help='recover training from the state in serialization_dir') subparser.add_argument('-o', '--overrides', type=str, default="", help='a HOCON structure used to override the experiment configuration') subparser.add_argument('--file-friendly-logging', action='store_true', default=False, help='outputs tqdm status on separate lines and slows tqdm refresh rate') subparser.set_defaults(func=train_model_from_args) return subparser
def add_subparser(self, name: str, parser: argparse._SubParsersAction) -> argparse.ArgumentParser: # pylint: disable=protected-access description = '''Evaluate the specified model + dataset''' subparser = parser.add_parser( name, description=description, help='Evaluate the specified model + dataset') subparser.add_argument('archive_file', type=str, help='path to an archived trained model') subparser.add_argument('input_file', type=str, help='path to the file containing the evaluation data') subparser.add_argument('--output-file', type=str, help='path to output file') subparser.add_argument('--weights-file', type=str, help='a path that overrides which weights file to use') cuda_device = subparser.add_mutually_exclusive_group(required=False) cuda_device.add_argument('--cuda-device', type=int, default=-1, help='id of GPU to use (if any)') subparser.add_argument('-o', '--overrides', type=str, default="", help='a JSON structure used to override the experiment configuration') subparser.add_argument('--batch-weight-key', type=str, default="", help='If non-empty, name of metric used to weight the loss on a per-batch basis.') subparser.set_defaults(func=evaluate_from_args) return subparser
def add_subparser(self, name: str, parser: argparse._SubParsersAction) -> argparse.ArgumentParser: # pylint: disable=protected-access description = '''Generate a configuration stub for a specific class (or for config as a whole)''' subparser = parser.add_parser( name, description=description, help='Generate configuration stubs.') subparser.add_argument('cla55', nargs='?', default='', metavar='class') subparser.set_defaults(func=_configure) return subparser
def add_subparser(self, name: str, parser: argparse._SubParsersAction) -> argparse.ArgumentParser: # pylint: disable=protected-access description = '''Run the web service, which provides an HTTP API as well as a web demo.''' subparser = parser.add_parser( name, description=description, help='Run the web service and demo.') subparser.add_argument('--port', type=int, default=8000) subparser.set_defaults(func=_serve) return subparser
def register_for_trigger(cls, subparser: _SubParsersAction, *args, **kwargs): """ Registers the plugin as an option to run for the trigger :param subparser: the subparser on which to register :param args: additional arguments :param kwargs: additional keyword arguments :return the parser created by registering, to allow subclasses to register options when running """ parser = subparser.add_parser(cls.__name__.lower(), help=cls.help) parser.set_defaults(main_plugin=cls()) # pylint: disable=abstract-class-instantiated return parser
def add_subparser(self, name: str, parser: argparse._SubParsersAction) -> argparse.ArgumentParser: # pylint: disable=protected-access description = '''Test that installation works by running the unit tests.''' subparser = parser.add_parser( name, description=description, help='Run the unit tests.') subparser.add_argument('--run-all', action="store_true", help="By default, we skip tests that are slow " "or download large files. This flag will run all tests.") subparser.set_defaults(func=_run_test) return subparser
def add_subparser(self, name: str, parser: argparse._SubParsersAction) -> argparse.ArgumentParser: # pylint: disable=protected-access description = """Continues training a saved model on a new dataset.""" subparser = parser.add_parser(name, description=description, help='Continue training a model on a new dataset') subparser.add_argument('-m', '--model-archive', required=True, type=str, help='path to the saved model archive from training on the original data') subparser.add_argument('-c', '--config-file', required=True, type=str, help='configuration file to use for training. Format is the same as ' 'for the "train" command, but the "model" section is ignored.') subparser.add_argument('-s', '--serialization-dir', required=True, type=str, help='directory in which to save the fine-tuned model and its logs') subparser.add_argument('-o', '--overrides', type=str, default="", help='a JSON structure used to override the training configuration ' '(only affects the config_file, _not_ the model_archive)') subparser.add_argument('--extend-vocab', action='store_true', default=False, help='if specified, we will use the instances in your new dataset to ' 'extend your vocabulary. Currently expansion of embedding layers ' 'is not implemented, so if your model has an embedding layer ' 'this will probably make fine-tune crash.') subparser.add_argument('--file-friendly-logging', action='store_true', default=False, help='outputs tqdm status on separate lines and slows tqdm refresh rate') subparser.add_argument('--batch-weight-key', type=str, default="", help='If non-empty, name of metric used to weight the loss on a per-batch basis.') subparser.set_defaults(func=fine_tune_model_from_args) return subparser
def add_parser(self, subparsers: argparse._SubParsersAction) -> None: # If get_help() returns None, do not pass in a help argument at all. # This will prevent the command from appearing in the help output at # all. kwargs: Dict[str, Any] = {"aliases": self.get_aliases()} help = self.get_help() if help is not None: # The add_parser() code checks if 'help' is present in the keyword # arguments. Not being present is handled differently than if it # is present and None. It only hides the command from the help # output if the 'help' argument is not present at all. kwargs["help"] = help parser = subparsers.add_parser(self.get_name(), **kwargs) parser.set_defaults(func=self.run) self.setup_parser(parser)
def add_subparser(self, name: str, parser: argparse._SubParsersAction) -> argparse.ArgumentParser: # pylint: disable=protected-access description = '''Create word vectors using ELMo.''' subparser = parser.add_parser( name, description=description, help='Create word vectors using a pretrained ELMo model.') subparser.add_argument('input_file', type=argparse.FileType('r'), help='The path to the input file.') subparser.add_argument('output_file', type=str, help='The path to the output file.') group = subparser.add_mutually_exclusive_group(required=True) group.add_argument('--all', action='store_true', help='Output all three ELMo vectors.') group.add_argument('--top', action='store_true', help='Output the top ELMo vector.') group.add_argument('--average', action='store_true', help='Output the average of the ELMo vectors.') subparser.add_argument('--vocab-path', type=str, help='A path to a vocabulary file to generate.') subparser.add_argument( '--options-file', type=str, default=DEFAULT_OPTIONS_FILE, help='The path to the ELMo options file.') subparser.add_argument( '--weight-file', type=str, default=DEFAULT_WEIGHT_FILE, help='The path to the ELMo weight file.') subparser.add_argument('--batch-size', type=int, default=DEFAULT_BATCH_SIZE, help='The batch size to use.') subparser.add_argument('--file-friendly-logging', default=False, action='store_true', help='outputs tqdm status on separate lines and slows tqdm refresh rate.') subparser.add_argument('--cuda-device', type=int, default=-1, help='The cuda_device to run on.') subparser.add_argument( '--forget-sentences', action='store_true', help="If this flag is specified, and --use-sentence-keys is " "not, remove the string serialized JSON dictionary " "that associates sentences with their line number (its " "HDF5 key) that is normally placed in the " "\"sentence_to_index\" HDF5 key.") subparser.add_argument( '--use-sentence-keys', action='store_true', help="Normally a sentence's line number is used as the " "HDF5 key for its embedding. If this flag is specified, " "the sentence itself will be used as the key.") subparser.set_defaults(func=elmo_command) return subparser
def add_subparser(self, name: str, parser: argparse._SubParsersAction) -> argparse.ArgumentParser: # pylint: disable=protected-access description = '''Find a learning rate range where loss decreases quickly for the specified model and dataset.''' subparser = parser.add_parser(name, description=description, help='Find a learning rate range.') subparser.add_argument('param_path', type=str, help='path to parameter file describing the model to be trained') subparser.add_argument('-s', '--serialization-dir', required=True, type=str, help='The directory in which to save results.') subparser.add_argument('-o', '--overrides', type=str, default="", help='a JSON structure used to override the experiment configuration') subparser.add_argument('--start-lr', type=float, default=1e-5, help='learning rate to start the search') subparser.add_argument('--end-lr', type=float, default=10, help='learning rate up to which search is done') subparser.add_argument('--num-batches', type=int, default=100, help='number of mini-batches to run learning rate finder') subparser.add_argument('--stopping-factor', type=float, default=None, help='stop the search when the current loss exceeds the best loss recorded by ' 'multiple of stopping factor') subparser.add_argument('--linear', action='store_true', help='increase learning rate linearly instead of exponential increase') subparser.add_argument('-f', '--force', action='store_true', required=False, help='overwrite the output directory if it exists') subparser.set_defaults(func=find_learning_rate_from_args) return subparser
def add_subparser(self, name: str, parser: argparse._SubParsersAction) -> argparse.ArgumentParser: # pylint: disable=protected-access description = '''Create a vocabulary from the specified dataset.''' subparser = parser.add_parser( name, description=description, help='Create a vocabulary') subparser.add_argument('param_path', type=str, help='path to parameter file describing the model and its inputs') subparser.add_argument('-o', '--overrides', type=str, default="", help='a HOCON structure used to override the experiment configuration') subparser.set_defaults(func=make_vocab_from_args) return subparser
def add_subparser(self, name: str, parser: argparse._SubParsersAction) -> argparse.ArgumentParser: # pylint: disable=protected-access description = '''Create a vocabulary, compute dataset statistics and other training utilities.''' subparser = parser.add_parser(name, description=description, help='Create a vocabulary, compute dataset statistics ' 'and other training utilities.') subparser.add_argument('param_path', type=str, help='path to parameter file describing the model and its inputs') subparser.add_argument('-s', '--serialization-dir', required=True, type=str, help='directory in which to save the output of the dry run.') subparser.add_argument('-o', '--overrides', type=str, default="", help='a JSON structure used to override the experiment configuration') subparser.set_defaults(func=dry_run_from_args) return subparser
def add_subparser(self, name: str, parser: argparse._SubParsersAction) -> argparse.ArgumentParser: # pylint: disable=protected-access description = """Continues training a saved model on a new dataset.""" subparser = parser.add_parser(name, description=description, help='Continue training a model on a new dataset') subparser.add_argument('-m', '--model-archive', required=True, type=str, help='path to the saved model archive from training on the original data') subparser.add_argument('-c', '--config-file', required=True, type=str, help='configuration file to use for training. Format is the same as ' 'for the "train" command, but the "model" section is ignored.') subparser.add_argument('-s', '--serialization-dir', required=True, type=str, help='directory in which to save the fine-tuned model and its logs') subparser.add_argument('-o', '--overrides', type=str, default="", help='a JSON structure used to override the training configuration ' '(only affects the config_file, _not_ the model_archive)') subparser.add_argument('--file-friendly-logging', action='store_true', default=False, help='outputs tqdm status on separate lines and slows tqdm refresh rate') subparser.set_defaults(func=fine_tune_model_from_args) return subparser
def add_subparser(subparsers: _SubParsersAction, name: str, help: str) -> ArgumentParser: subparser = subparsers.add_parser(name, help=help) subparser.add_argument( "--state", "-s", dest="state_file", metavar="FILE", default=nixops.statefile.get_default_state_file(), help="path to state file", ) subparser.add_argument( "--deployment", "-d", dest="deployment", metavar="UUID_OR_NAME", default=os.environ.get("NIXOPS_DEPLOYMENT", os.environ.get("CHARON_DEPLOYMENT", None)), help="UUID or symbolic name of the deployment", ) subparser.add_argument("--debug", action="store_true", help="enable debug output") subparser.add_argument( "--confirm", action="store_true", help="confirm dangerous operations; do not ask", ) # Nix options that we pass along. subparser.add_argument( "-I", nargs=1, action="append", dest="nix_path", metavar="PATH", help="append a directory to the Nix search path", ) subparser.add_argument( "--max-jobs", "-j", type=int, metavar="N", help="set maximum number of concurrent Nix builds", ) subparser.add_argument( "--cores", type=int, metavar="N", help= "sets the value of the NIX_BUILD_CORES environment variable in the invocation of builders", ) subparser.add_argument("--keep-going", action="store_true", help="keep going after failed builds") subparser.add_argument( "--keep-failed", "-K", action="store_true", help="keep temporary directories of failed builds", ) subparser.add_argument( "--show-trace", action="store_true", help= "print a Nix stack trace if evaluation fails, or a python stack trace if nixops fails", ) subparser.add_argument("--fallback", action="store_true", help="fall back on installation from source") subparser.add_argument( "--no-build-output", action="store_true", help="suppress output written by builders", ) subparser.add_argument( "--option", nargs=2, action="append", dest="nix_options", metavar=("NAME", "VALUE"), help="set a Nix option", ) subparser.add_argument( "--read-only-mode", action="store_true", help="run Nix evaluations in read-only mode", ) return subparser
def add_subparser( self, parser: argparse._SubParsersAction) -> argparse.ArgumentParser: description = """Evaluate the specified model + dataset""" subparser = parser.add_parser( self.name, description=description, help="Evaluate the specified model + dataset.") subparser.add_argument("archive_file", type=str, help="path to an archived trained model") subparser.add_argument( "input_file", type=str, help="path to the file containing the evaluation data") subparser.add_argument("--output-file", type=str, help="path to output file") subparser.add_argument( "--weights-file", type=str, help="a path that overrides which weights file to use") cuda_device = subparser.add_mutually_exclusive_group(required=False) cuda_device.add_argument("--cuda-device", type=int, default=-1, help="id of GPU to use (if any)") subparser.add_argument( "-o", "--overrides", type=str, default="", help= "a JSON structure used to override the experiment configuration", ) subparser.add_argument( "--batch-size", type=int, help="If non-empty, the batch size to use during evaluation.") subparser.add_argument( "--batch-weight-key", type=str, default="", help= "If non-empty, name of metric used to weight the loss on a per-batch basis.", ) subparser.add_argument( "--extend-vocab", action="store_true", default=False, help= "if specified, we will use the instances in your new dataset to " "extend your vocabulary. If pretrained-file was used to initialize " "embedding layers, you may also need to pass --embedding-sources-mapping.", ) subparser.add_argument( "--embedding-sources-mapping", type=str, default="", help= "a JSON dict defining mapping from embedding module path to embedding " "pretrained-file used during training. If not passed, and embedding needs to be " "extended, we will try to use the original file paths used during training. If " "they are not available we will use random vectors for embedding extension.", ) subparser.set_defaults(func=evaluate_from_args) return subparser
def add_parser(cls: type, subparsers: argparse._SubParsersAction): parser = subparsers.add_parser(cls.COMMAND, help="Dump model outputs to a file.") cls.add_arguments(parser) parser.set_defaults(func=cls.execute)
def add_subparser( self, parser: argparse._SubParsersAction) -> argparse.ArgumentParser: description = """Run the specified model against a JSON-lines input file.""" subparser = parser.add_parser( self.name, description=description, help="Use a trained model to make predictions.") subparser.add_argument( "archive_file", type=str, help="the archived model to make predictions with") subparser.add_argument("input_file", type=str, help="path to or url of the input file") subparser.add_argument("--output-file", type=str, help="path to output file") subparser.add_argument( "--weights-file", type=str, help="a path that overrides which weights file to use") batch_size = subparser.add_mutually_exclusive_group(required=False) batch_size.add_argument("--batch-size", type=int, default=1, help="The batch size to use for processing") subparser.add_argument("--silent", action="store_true", help="do not print output to stdout") cuda_device = subparser.add_mutually_exclusive_group(required=False) cuda_device.add_argument("--cuda-device", type=int, default=-1, help="id of GPU to use (if any)") subparser.add_argument( "--use-dataset-reader", action="store_true", help= "Whether to use the dataset reader of the original model to load Instances. " "The validation dataset reader will be used if it exists, otherwise it will " "fall back to the train dataset reader. This behavior can be overridden " "with the --dataset-reader-choice flag.", ) subparser.add_argument( "--dataset-reader-choice", type=str, choices=["train", "validation"], default="validation", help= "Indicates which model dataset reader to use if the --use-dataset-reader " "flag is set.", ) subparser.add_argument( "-o", "--overrides", type=str, default="", help= ("a json(net) structure used to override the experiment configuration, e.g., " "'{\"iterator.batch_size\": 16}'. Nested parameters can be specified either" " with nested dictionaries or with dot syntax."), ) subparser.add_argument( "--predictor", type=str, help="optionally specify a specific predictor to use") subparser.add_argument( "--predictor-args", type=str, default="", help= ("an optional JSON structure used to provide additional parameters to the predictor" ), ) subparser.add_argument( "--file-friendly-logging", action="store_true", default=False, help= "outputs tqdm status on separate lines and slows tqdm refresh rate", ) subparser.set_defaults(func=_predict) return subparser
def add_render_parser(subparsers: argparse._SubParsersAction) -> None: parser = subparsers.add_parser( "render", help="Renders the triad keyboard onto an SVG" ) parser.add_argument("--open", action="store_true") parser.set_defaults(func=render)
def make_gcp_parser(subparsers: argparse._SubParsersAction) -> None: parser_gcp = subparsers.add_parser("gcp", help="gcp help") gcp_subparsers = parser_gcp.add_subparsers(help="command", dest="command") make_up_subparser(gcp_subparsers) make_down_subparser(gcp_subparsers)
def make_up_subparser(subparsers: argparse._SubParsersAction) -> None: parser_gcp = subparsers.add_parser("up", help="create gcp cluster") required_named = parser_gcp.add_argument_group("required named arguments") required_named.add_argument( "--cluster-id", type=str, default=None, required=True, help="unique identifier to name and tag resources", ) required_named.add_argument( "--project-id", type=str, default=None, required=True, help="project ID to create the cluster in", ) optional_named = parser_gcp.add_argument_group("optional named arguments") optional_named.add_argument( "--dry-run", action="store_true", help= "return the infrastructure plan to be executed based on your arguments", ) optional_named.add_argument( "--keypath", type=str, default=None, help="path to service account key if not using default credentials", ) optional_named.add_argument( "--network", type=str, default="det-default", help= "network name to create (the network should not already exist in the project)", ) optional_named.add_argument( "--det-version", type=str, default=determined_deploy.__version__, help=argparse.SUPPRESS, ) optional_named.add_argument( "--region", type=str, default=constants.defaults.REGION, help="region to create the cluster in (defaults to us-west1)", ) optional_named.add_argument( "--zone", type=str, default=None, help="zone to create the cluster in (defaults to `region`-b)", ) optional_named.add_argument( "--environment-image", type=str, default=constants.defaults.ENVIRONMENT_IMAGE, help=argparse.SUPPRESS, ) optional_named.add_argument( "--local-state-path", type=str, default=os.getcwd(), help=argparse.SUPPRESS, ) optional_named.add_argument( "--preemptible", type=str, default="false", help="whether to use preemptible instances for agents", ) optional_named.add_argument( "--operation-timeout-period", type=str, default=constants.defaults.OPERATION_TIMEOUT_PERIOD, help="operation timeout before retrying, e.g. 5m for 5 minutes", ) optional_named.add_argument( "--master-instance-type", type=str, default=constants.defaults.MASTER_INSTANCE_TYPE, help="instance type for master", ) optional_named.add_argument( "--agent-instance-type", type=str, default=constants.defaults.AGENT_INSTANCE_TYPE, help="instance type for agent", ) optional_named.add_argument( "--db-password", type=str, default=constants.defaults.DB_PASSWORD, help="password for master database", ) optional_named.add_argument( "--max-idle-agent-period", type=str, default=constants.defaults.MAX_IDLE_AGENT_PERIOD, help= "max agent idle time before it is shut down, e.g. 30m for 30 minutes", ) optional_named.add_argument( "--max-agent-starting-period", type=str, default=constants.defaults.MAX_AGENT_STARTING_PERIOD, help="max agent starting time before retrying, e.g. 30m for 30 minutes", ) optional_named.add_argument( "--port", type=int, default=constants.defaults.PORT, help="port to use for communication on master instance", ) optional_named.add_argument( "--gpu-type", type=str, default=constants.defaults.GPU_TYPE, help="type of GPU to use on agents", ) optional_named.add_argument( "--gpu-num", type=int, default=constants.defaults.GPU_NUM, help="number of GPUs per agent instance", ) optional_named.add_argument( "--max-dynamic-agents", type=int, default=constants.defaults.MAX_DYNAMIC_AGENTS, help="maximum number of dynamic agent instances at one time", ) optional_named.add_argument( "--static-agents", type=int, default=constants.defaults.STATIC_AGENTS, help=argparse.SUPPRESS, ) optional_named.add_argument( "--min-cpu-platform-master", type=str, default=constants.defaults.MIN_CPU_PLATFORM_MASTER, help="minimum cpu platform for master instances", ) optional_named.add_argument( "--min-cpu-platform-agent", type=str, default=constants.defaults.MIN_CPU_PLATFORM_AGENT, help="minimum cpu platform for agent instances", )
def add_subparser( self, parser: argparse._SubParsersAction) -> argparse.ArgumentParser: # pylint: disable=protected-access description = """Continues training a saved model on a new dataset.""" subparser = parser.add_parser( self.name, description=description, help='Continue training a model on a new dataset.') subparser.add_argument( '-m', '--model-archive', required=True, type=str, help= 'path to the saved model archive from training on the original data' ) subparser.add_argument( '-c', '--config-file', required=True, type=str, help='configuration file to use for training. Format is the same as ' 'for the "train" command, but the "model" section is ignored.') subparser.add_argument( '-s', '--serialization-dir', required=True, type=str, help='directory in which to save the fine-tuned model and its logs' ) subparser.add_argument( '-o', '--overrides', type=str, default="", help='a JSON structure used to override the training configuration ' '(only affects the config_file, _not_ the model_archive)') subparser.add_argument( '--extend-vocab', action='store_true', default=False, help= 'if specified, we will use the instances in your new dataset to ' 'extend your vocabulary. If pretrained-file was used to initialize ' 'embedding layers, you may also need to pass --embedding-sources-mapping.' ) subparser.add_argument( '--file-friendly-logging', action='store_true', default=False, help= 'outputs tqdm status on separate lines and slows tqdm refresh rate' ) subparser.add_argument( '--batch-weight-key', type=str, default="", help= 'If non-empty, name of metric used to weight the loss on a per-batch basis.' ) subparser.add_argument( '--embedding-sources-mapping', type=str, default="", help= 'a JSON dict defining mapping from embedding module path to embedding' 'pretrained-file used during training. If not passed, and embedding needs to be ' 'extended, we will try to use the original file paths used during training. If ' 'they are not available we will use random vectors for embedding extension.' ) subparser.add_argument("--fold", type=int, default=None) subparser.add_argument("--folds", type=int, default=None) subparser.add_argument("--l2", type=float, default=None) subparser.set_defaults(func=fine_tune_model_from_args) return subparser
def add_workspace_subparser(subparser: argparse._SubParsersAction, name: str) -> argparse.ArgumentParser: parser = subparser.add_parser(name) parser.add_argument("-w", "--workspace", dest="workspace_path") return parser
def setupSubCommand(cls, subparser: argparse._SubParsersAction): parser = subparser.add_parser("+exit", help="BOTを終了します(開発時専用コマンドです)") parser.set_defaults(handler=ExitProcess)
def add_subparser( self, name: str, parser: argparse._SubParsersAction) -> argparse.ArgumentParser: # pylint: disable=protected-access description = '''Run the specified model against a JSON-lines input file.''' subparser = parser.add_parser( name, description=description, help='Use a trained model to make predictions.') subparser.add_argument( 'archive_file', type=str, help='the archived model to make predictions with') subparser.add_argument('input_file', type=str, help='path to or url of the input file') subparser.add_argument('--output-file', type=str, help='path to output file') subparser.add_argument( '--weights-file', type=str, help='a path that overrides which weights file to use') batch_size = subparser.add_mutually_exclusive_group(required=False) batch_size.add_argument('--batch-size', type=int, default=1, help='The batch size to use for processing') subparser.add_argument('--silent', action='store_true', help='do not print output to stdout') cuda_device = subparser.add_mutually_exclusive_group(required=False) cuda_device.add_argument('--cuda-device', type=int, default=-1, help='id of GPU to use (if any)') subparser.add_argument( '--use-dataset-reader', action='store_true', help= 'Whether to use the dataset reader of the original model to load Instances. ' 'The validation dataset reader will be used if it exists, otherwise it will ' 'fall back to the train dataset reader. This behavior can be overridden ' 'with the --dataset-reader-choice flag.') subparser.add_argument( '--dataset-reader-choice', type=str, choices=['train', 'validation'], default='validation', help= 'Indicates which model dataset reader to use if the --use-dataset-reader ' 'flag is set.') subparser.add_argument( '-o', '--overrides', type=str, default="", help= 'a JSON structure used to override the experiment configuration') subparser.add_argument( '--predictor', type=str, help='optionally specify a specific predictor to use') subparser.set_defaults(func=_predict) return subparser
def add_subparser( self, parser: argparse._SubParsersAction) -> argparse.ArgumentParser: description = """Find a learning rate range where loss decreases quickly for the specified model and dataset.""" subparser = parser.add_parser(self.name, description=description, help="Find a learning rate range.") subparser.add_argument( "param_path", type=str, help="path to parameter file describing the model to be trained") subparser.add_argument( "-s", "--serialization-dir", required=True, type=str, help="The directory in which to save results.", ) subparser.add_argument( "-o", "--overrides", type=str, default="", help= "a JSON structure used to override the experiment configuration", ) subparser.add_argument("--start-lr", type=float, default=1e-5, help="learning rate to start the search") subparser.add_argument("--end-lr", type=float, default=10, help="learning rate up to which search is done") subparser.add_argument( "--num-batches", type=int, default=100, help="number of mini-batches to run learning rate finder", ) subparser.add_argument( "--stopping-factor", type=float, default=None, help= "stop the search when the current loss exceeds the best loss recorded by " "multiple of stopping factor", ) subparser.add_argument( "--linear", action="store_true", help= "increase learning rate linearly instead of exponential increase", ) subparser.add_argument( "-f", "--force", action="store_true", required=False, help="overwrite the output directory if it exists", ) subparser.set_defaults(func=find_learning_rate_from_args) return subparser
def add_subparser( self, name: str, parser: argparse._SubParsersAction) -> argparse.ArgumentParser: subparser = parser.add_parser(name, description="search with RayTune", help='Perform hyperparameter search') subparser.add_argument( "--experiment-name", type=str, required=True, help="a name for the experiment", ) subparser.add_argument( "--num-cpus", type=int, default=1, help="number of CPUs available to the experiment", ) subparser.add_argument( "--num-gpus", type=int, default=1, help="number of GPUs available to the experiment", ) subparser.add_argument( "--cpus-per-trial", type=int, default=1, help="number of CPUs dedicated to a single trial", ) subparser.add_argument( "--gpus-per-trial", type=int, default=1, help="number of GPUs dedicated to a single trial", ) subparser.add_argument( "--log-dir", type=str, default="./logs", help="directory in which to store trial logs and results", ) subparser.add_argument( "--with-server", action="store_true", default=False, help="start the Ray server", ) subparser.add_argument( "--server-port", type=int, default=10000, help="port for Ray server to listens on", ) subparser.add_argument( "--search-strategy", type=str, default="variant-generation", help="hyperparameter search strategy used by Ray-Tune", ) subparser.add_argument( "--search-space", "-e", type=os.path.abspath, required=True, help="name of dict describing the hyperparameter search space", ) subparser.add_argument( "--num-samples", type=int, default=1, help="Number of times to sample from the hyperparameter space. " + "If grid_search is provided as an argument, the grid will be " + "repeated num_samples of times.", ) subparser.add_argument( "--base-config", dest='base_config', required=True, type=os.path.abspath, help="path to parameter file describing the model to be trained", ) subparser.add_argument( "--include-package", type=str, action="append", default=[], help="additional packages to include", ) subparser.add_argument( "-o", "--overrides", type=str, default="", help= "a JSON structure used to override the experiment configuration", ) subparser.set_defaults(func=search_from_args) return subparser
def add_subparser( subparsers: argparse._SubParsersAction, cmd: str, server: bool = False, format: bool = False, signon: bool = False, stmtend: bool = False, stmt: bool = False, acctinforq: bool = False, tax: bool = False, help: Optional[str] = None, ) -> argparse.ArgumentParser: parser = subparsers.add_parser(cmd, help=help, description=help) parser.set_defaults(request=cmd) parser.add_argument("server", nargs="?", help="OFX server nickname") parser.add_argument( "--verbose", "-v", action="count", default=0, help="Give more output (option can be repeated)", ) # Higher-level configs (e.g. account #s) # imply lower-level configs (e.g. username/passwd) if stmt: stmtend = True if stmtend or tax: signon = True acctinforq = True # Support internally generated ACCTINFORQ for --all accounts if signon: format = True if format: server = True if server: parser.add_argument("--url", help="OFX server URL") parser.add_argument( "--ofxhome", metavar="ID#", help="FI id# on http://www.ofxhome.com/" ) parser.add_argument( "-w", "--write", action="store_true", default=None, help="Write working parameters to config file", ) parser.add_argument( "--useragent", dest="useragent", help="Value to use in HTTP 'User-Agent' header (defaults to empty string)", ) if format: parser.add_argument( "-n", "--dryrun", action="store_true", default=None, help="Display OFX request and exit without sending", ) add_format_group(parser) if signon: parser.add_argument( "--savepass", action="store_true", default=None, help="Store password in system keyring (requires python-keyring)", ) parser.add_argument( "--nokeyring", action="store_true", default=None, help="Don't use system keyring to store/retrieve passwords", ) add_signon_group(parser) if stmtend: add_bank_acct_group(parser) stmt_group = add_stmt_group(parser) if stmt: add_stmt_args(stmt_group) add_inv_acct_group(parser) add_inv_stmt_group(parser) if acctinforq: add_acctinforq_group(parser) if tax: add_tax_group(parser) return parser
def sub_parser(parser: argparse._SubParsersAction) -> None: sub = parser.add_parser(f"list-watchers") sub.set_defaults(query=Watchers) sub.add_argument('--repository', help='The repository name', required=True)
def create_subparser( subparsers: _SubParsersAction, fn: Callable[..., None], *, with_jobs: bool = False, with_no_lksp: bool = False, with_gargs: bool = False, with_build_dir: bool = False, accept_unknown_args: bool = False, ) -> ArgumentParser: """ Create a subparser with given ``fn`` as func. Extract doc and name from the function. :param bool with_jobs: Whether to create the --jobs/-j option. :param bool with_no_lksp: Whether to create the --no-langkit-support option. :param bool with_gargs: Whether to create the --gargs option. :param bool with_build_dir: Whether to create the --build-dir option. """ subparser = subparsers.add_parser(name=fn.__name__.replace('_', '-'), help=fn.__doc__, add_help=not accept_unknown_args) subparser.add_argument("--build-mode", "-b", choices=("dev", "prod"), default="dev", help="Select a preset for build options.") LibraryTypes.add_option(subparser) if with_jobs: subparser.add_argument( "--jobs", "-j", type=int, default=get_cpu_count(), help="Number of parallel jobs to spawn in parallel (default: your" " number of cpu).") if with_no_lksp: subparser.add_argument( "--no-langkit-support", action="store_true", help="Assume that Langkit_Support is already built and installed." " We rebuild it by default, for the convenience of" " developers.") if with_gargs: subparser.add_argument( '--gargs', action='append', help='Options appended to GPRbuild invocations.') if with_build_dir: subparser.add_argument( '--build-dir', help='Use a non-default build directory. This allows out-of-tree' ' builds.') def wrapper(args: Namespace, rest: List[str]): if len(rest) > 0: print("ERROR - unhandled command line arguments: {}".format( ", ".join(rest))) sys.exit(1) fn(args) subparser.set_defaults(func=fn if accept_unknown_args else wrapper) return subparser
def add_subparser(self, parser: argparse._SubParsersAction): description = 'Setup the SIMetrix metric' self.parser = parser.add_parser('simetrix', description=description, help=description) self.parser.set_defaults(subfunc=self.run)
def make_aws_parser(subparsers: argparse._SubParsersAction): parser_aws = subparsers.add_parser("aws", help="AWS help") aws_subparsers = parser_aws.add_subparsers(help="command", dest="command") make_down_subparser(aws_subparsers) make_up_subparser(aws_subparsers)
def add_subparser(cls, parser: argparse._SubParsersAction) -> None: kill = parser.add_parser(cls.NAME) kill.set_defaults(command=cls.from_arguments) kill.add_argument("--with-fire", action="store_true", help="A no-op flag that adds emphasis.")
def add_subparser(self, parser: argparse._SubParsersAction): description = 'Setup the MoverScore metric' self.parser = parser.add_parser('moverscore', description=description, help=description) self.parser.set_defaults(subfunc=self.run)
def add_subparser(cls, parser: argparse._SubParsersAction) -> None: stop = parser.add_parser(cls.NAME, epilog="Signals the Pyre server to stop.") stop.set_defaults(command=cls)
def add_subparser(self, parser: argparse._SubParsersAction): self.parser = parser.add_parser('moverscore') self.parser.set_defaults(subfunc=self.run)
def add_parser(cls: type, subparsers: argparse._SubParsersAction): parser = subparsers.add_parser(cls.COMMAND, help="Visualize selected entries") cls.add_arguments(parser) parser.set_defaults(func=cls.execute)
def add_subparser( self, parser: argparse._SubParsersAction) -> argparse.ArgumentParser: description = """Evaluate the specified model + dataset(s)""" subparser = parser.add_parser( self.name, description=description, help="Evaluate the specified model + dataset(s).") subparser.add_argument("archive_file", type=str, help="path to an archived trained model") subparser.add_argument( "input_file", type=str, help= ("path to the file containing the evaluation data" ' (for mutiple files, put ":" between filenames e.g., input1.txt:input2.txt)' ), ) subparser.add_argument( "--output-file", type=str, help= ("optional path to write the metrics to as JSON" ' (for mutiple files, put ":" between filenames e.g., output1.txt:output2.txt)' ), ) subparser.add_argument( "--predictions-output-file", type=str, help= ("optional path to write the predictions to as JSON lines" ' (for mutiple files, put ":" between filenames e.g., output1.jsonl:output2.jsonl)' ), ) subparser.add_argument( "--weights-file", type=str, help="a path that overrides which weights file to use") cuda_device = subparser.add_mutually_exclusive_group(required=False) cuda_device.add_argument("--cuda-device", type=int, default=-1, help="id of GPU to use (if any)") subparser.add_argument( "-o", "--overrides", type=str, default="", help= ("a json(net) structure used to override the experiment configuration, e.g., " "'{\"iterator.batch_size\": 16}'. Nested parameters can be specified either" " with nested dictionaries or with dot syntax."), ) subparser.add_argument( "--batch-size", type=int, help="If non-empty, the batch size to use during evaluation.") subparser.add_argument( "--batch-weight-key", type=str, default="", help= "If non-empty, name of metric used to weight the loss on a per-batch basis.", ) subparser.add_argument( "--extend-vocab", action="store_true", default=False, help= "if specified, we will use the instances in your new dataset to " "extend your vocabulary. If pretrained-file was used to initialize " "embedding layers, you may also need to pass --embedding-sources-mapping.", ) subparser.add_argument( "--embedding-sources-mapping", type=str, default="", help= "a JSON dict defining mapping from embedding module path to embedding " "pretrained-file used during training. If not passed, and embedding needs to be " "extended, we will try to use the original file paths used during training. If " "they are not available we will use random vectors for embedding extension.", ) subparser.add_argument( "--file-friendly-logging", action="store_true", default=False, help= "outputs tqdm status on separate lines and slows tqdm refresh rate", ) subparser.set_defaults(func=evaluate_from_args) return subparser
def add_subparser( self, name: str, parser: argparse._SubParsersAction) -> argparse.ArgumentParser: # pylint: disable=protected-access description = '''Create word vectors using ELMo.''' subparser = parser.add_parser( name, description=description, help='Use a trained model to make predictions.') subparser.add_argument('input_file', type=argparse.FileType('r'), help='The path to the input file.') subparser.add_argument('output_file', type=str, help='The path to the output file.') group = subparser.add_mutually_exclusive_group(required=True) group.add_argument('--all', action='store_true', help='Output all three ELMo vectors.') group.add_argument('--top', action='store_true', help='Output the top ELMo vector.') group.add_argument('--average', action='store_true', help='Output the average of the ELMo vectors.') subparser.add_argument('--vocab-path', type=str, help='A path to a vocabulary file to generate.') subparser.add_argument('--options-file', type=str, default=DEFAULT_OPTIONS_FILE, help='The path to the ELMo options file.') subparser.add_argument('--weight-file', type=str, default=DEFAULT_WEIGHT_FILE, help='The path to the ELMo weight file.') subparser.add_argument('--batch-size', type=int, default=DEFAULT_BATCH_SIZE, help='The batch size to use.') subparser.add_argument('--cuda-device', type=int, default=-1, help='The cuda_device to run on.') subparser.add_argument( '--forget-sentences', action='store_true', help="If this flag is specified, and --use-sentence-keys is " "not, remove the string serialized JSON dictionary " "that associates sentences with their line number (its " "HDF5 key) that is normally placed in the " "\"sentence_to_index\" HDF5 key.") subparser.add_argument( '--use-sentence-keys', action='store_true', help="Normally a sentence's line number is used as the " "HDF5 key for its embedding. If this flag is specified, " "the sentence itself will be used as the key.") subparser.set_defaults(func=elmo_command) return subparser
def add_subparser( self, parser: argparse._SubParsersAction) -> argparse.ArgumentParser: description = """Train the specified model on the specified dataset.""" subparser = parser.add_parser(self.name, description=description, help="Train a model.") subparser.add_argument( "param_path", type=str, help="path to parameter file describing the model to be trained") subparser.add_argument( "-s", "--serialization-dir", required=True, type=str, help="directory in which to save the model and its logs", ) subparser.add_argument( "-r", "--recover", action="store_true", default=False, help="recover training from the state in serialization_dir", ) subparser.add_argument( "-f", "--force", action="store_true", required=False, help="overwrite the output directory if it exists", ) subparser.add_argument( "-o", "--overrides", type=str, default="", help= ("a json(net) structure used to override the experiment configuration, e.g., " "'{\"iterator.batch_size\": 16}'. Nested parameters can be specified either" " with nested dictionaries or with dot syntax."), ) subparser.add_argument( "--node-rank", type=int, default=0, help="rank of this node in the distributed setup") subparser.add_argument( "--dry-run", action="store_true", help= ("do not train a model, but create a vocabulary, show dataset statistics and " "other training information"), ) subparser.add_argument( "--file-friendly-logging", action="store_true", default=False, help= "outputs tqdm status on separate lines and slows tqdm refresh rate", ) subparser.set_defaults(func=train_model_from_args) return subparser
def configure_parser(subparser: argparse._SubParsersAction) -> None: parser = subparser.add_parser("status") add_workspace_arg(parser) add_repos_selection_args(parser) parser.set_defaults(run=run)
def register_subparser(parsers: argparse._SubParsersAction) -> None: # pylint: disable=protected-access """Add subparser for `build` command.""" build_parser = parsers.add_parser( 'build', help='Commands for downloading and preparing datasets.') build_parser.add_argument( 'datasets', # Positional arguments type=str, nargs='*', help='Name(s) of the dataset(s) to build. Default to current dir. ' 'See https://www.tensorflow.org/datasets/cli for accepted values.', ) build_parser.add_argument( # Also accept keyword arguments '--datasets', type=str, nargs='+', dest='datasets_keyword', help='Datasets can also be provided as keyword argument.', ) # **** Debug options **** debug_group = build_parser.add_argument_group( 'Debug & tests', description='--pdb Enter post-mortem debugging mode ' 'if an exception is raised.') debug_group.add_argument( '--overwrite', action='store_true', help='Delete pre-existing dataset if it exists.', ) debug_group.add_argument( '--max_examples_per_split', type=int, nargs='?', const=1, help= 'When set, only generate the first X examples (default to 1), rather ' 'than the full dataset.', ) # **** Path options **** path_group = build_parser.add_argument_group('Paths') path_group.add_argument( '--data_dir', type=pathlib.Path, # Should match tfds.core.constant.DATA_DIR !! default=pathlib.Path( os.environ.get('TFDS_DATA_DIR', os.path.join('~', 'tensorflow_datasets'))), help='Where to place datasets. Default to ' '`~/tensorflow_datasets/` or `TFDS_DATA_DIR` environement variable.', ) path_group.add_argument( '--download_dir', type=pathlib.Path, help='Where to place downloads. Default to `<data_dir>/downloads/`.', ) path_group.add_argument( '--extract_dir', type=pathlib.Path, help='Where to extract files. Default to `<download_dir>/extracted/`.', ) path_group.add_argument( '--manual_dir', type=pathlib.Path, help='Where to manually download data (required for some datasets). ' 'Default to `<download_dir>/manual/`.', ) path_group.add_argument( '--add_name_to_manual_dir', action='store_true', help='If true, append the dataset name to the `manual_dir` (e.g. ' '`<download_dir>/manual/<dataset_name>/`. Useful to avoid collisions ' 'if many datasets are generated.') # **** Generation options **** generation_group = build_parser.add_argument_group('Generation') generation_group.add_argument( '--config', '-c', type=str, help='Config name to build. Build all configs if not set.') # We are forced to have 2 flags to avoid ambiguity when config name is # a number (e.g. `voc/2017`) generation_group.add_argument( '--config_idx', type=int, help='Config id to build (`builder_cls.BUILDER_CONFIGS[config_idx]`). ' 'Mutually exclusive with `--config`.') generation_group.add_argument( '--imports', '-i', type=str, help='Comma separated list of module to import to register datasets.') generation_group.add_argument( '--register_checksums', action='store_true', help='If True, store size and checksum of downloaded files.', ) generation_group.add_argument( '--force_checksums_validation', action='store_true', help='If True, raise an error if the checksums are not found.', ) generation_group.add_argument( '--beam_pipeline_options', type=str, # nargs='+', help='A (comma-separated) list of flags to pass to `PipelineOptions` ' 'when preparing with Apache Beam. ' '(see: https://www.tensorflow.org/datasets/beam_datasets). ' 'Example: `--beam_pipeline_options=job_name=my-job,project=my-project`' ) # **** Automation options **** automation_group = build_parser.add_argument_group( 'Automation', description='Used by automated scripts.') automation_group.add_argument( '--exclude_datasets', type=str, help='If set, generate all datasets except the one defined here. ' 'Comma separated list of datasets to exclude. ') automation_group.add_argument( '--experimental_latest_version', action='store_true', help='Build the latest Version(experiments=...) available rather than ' 'default version.') build_parser.set_defaults(subparser_fn=_build_datasets)
def build( subps: _SubParsersAction, parents: Optional[List[ArgumentParser]] = None ) -> None: """Return a command-line parser for the anib subcommand. :param subps: collection of subparsers in main parser :param parents: parsers from which arguments are inherited The terminology may be confusing, but in practice the main parser collects command-line arguments that are then available to this parser, which inherits options from the parsers in `parents` in addition to those defined below. """ parser = subps.add_parser( "anib", parents=parents, formatter_class=ArgumentDefaultsHelpFormatter ) # Required positional arguments: input and output directories parser.add_argument( action="store", dest="indir", default=None, type=Path, help="input genome directory", ) parser.add_argument( action="store", dest="outdir", default=None, type=Path, help="output analysis results directory", ) # Optional arguments parser.add_argument( "--dbpath", action="store", dest="dbpath", default=Path(".pyani/pyanidb"), type=Path, help="path to pyani database", ) parser.add_argument( "--blastn_exe", dest="blastn_exe", action="store", default=pyani_config.BLASTN_DEFAULT, type=Path, help="path to blastn executable", ) parser.add_argument( "--format_exe", dest="format_exe", action="store", default=pyani_config.MAKEBLASTDB_DEFAULT, type=Path, help="path to makeblastdb executable", ) parser.add_argument( "--fragsize", dest="fragsize", action="store", type=int, default=pyani_config.FRAGSIZE, help="blastn query fragment size", ) parser.set_defaults(func=subcommands.subcmd_anib)
def add_subparser(cls, parser: argparse._SubParsersAction) -> None: color = parser.add_parser(cls.NAME) color.set_defaults(command=cls) color.add_argument("path")