def main(): argparser = ArgParser(description="Load TUPA model and visualize, saving to .png file.") argparser.add_argument("models", nargs="+", help="model file basename(s) to load") args = argparser.parse_args() for filename in args.models: model = load_model(filename) visualize(model, filename)
def add_arguments(cls, arg_parser: ArgParser) -> None: """ Add pipeline-specific arguments. The parsed arguments are passed to the constructor as keywords. """ arg_parser.add_argument("-c", is_config_file=True, help="config file path") arg_parser.add_argument("--debug", action="store_true", help="turn on debugging") arg_parser.add_argument( "-f", "--force", action="store_true", help="force extract and transform, ignoring any cached data", ) arg_parser.add_argument( "--force-extract", action="store_true", help="force extract, ignoring any cached data", ) arg_parser.add_argument( "--logging-level", help="set logging-level level (see Python logging module)", )
def _mk_maget_parser(parser : ArgParser): group = parser.add_argument_group("MAGeT options", "Options for running MAGeT.") group.add_argument("--atlas-library", dest="atlas_lib", # can't make required=True since may not be using MAGeT :| type=str, # TODO: check existence of this dir? help="Directory of existing atlas/label pairs") group.add_argument("--pairwise", dest="pairwise", action="store_true", help="""If specified, register inputs to each other pairwise. [Default]""") group.add_argument("--no-pairwise", dest="pairwise", action="store_false", help="""If specified, only register inputs to atlases in library.""") parser.set_defaults(pairwise=True) group.add_argument("--mask", dest="mask", action="store_true", default=False, help="Create a mask for all images prior to handling labels. [Default = %(default)s]") group.add_argument("--mask-only", dest="mask_only", action="store_true", default=False, help="Create a mask for all images only, do not run full algorithm. [Default = %(default)s]") group.add_argument("--max-templates", dest="max_templates", default=25, type=int, help="Maximum number of templates to generate. [Default = %(default)s]") group.add_argument("--masking-method", dest="mask_method", default="minctracc", type=str, help="Specify whether to use minctracc or mincANTS for masking. [Default = %(default)s].") group.add_argument("--masking-nlin-protocol", dest="masking_nlin_protocol", # TODO basically copied from nlin parser type=str, default=None, help="Can optionally specify a registration protocol that is different from nlin protocol. " "Parameters must be specified as in either or the following examples: \n" "applications_testing/test_data/minctracc_example_nlin_protocol.csv \n" "applications_testing/test_data/mincANTS_example_nlin_protocol.csv \n" "[Default = %(default)s]") return parser
def setup_config_arguments(argParser: configargparse.ArgParser): argParser.add_argument( '-c', '--config-file', required=False, is_config_file=True, help='Path to the Config File which should be used.') argParser.add_argument('-t', '--telegram_api_token', required=True, help='Your Telegram Bot - Token.') argParser.add_argument('-ma', '--mqtt_server_address', required=False, default='127.0.0.1', help='The IP - Address of the MQTT - Server') argParser.add_argument('-mp', '--mqtt_server_port', required=False, type=int, default=1887, help='The port of the MQTT - Server.') argParser.add_argument('-d', '--debug', required=False, action='store_true', default=False, help='Set this Switch for additional debug logs.')
def inject_arguments(self, parser: ArgumentParser) -> None: """Inject arguments.""" super().inject_arguments(parser) def _inject_mssql_uri(mssql_uri: str) -> str: self._mssql_uri = mssql_uri return mssql_uri parser.add( "--mssql-uri", required=True, help=" ".join(( "MSSQL URI used to connect to a MSSQL database:", ("mssql+pymssql://USER:PASS@HOST:PORT/DATABASE?" "timeout=TIMEOUT"), "Use a valid uri." "Url encode all parts, but do not encode the entire uri.", "No unencoded colons, ampersands, slashes,", "question-marks, etc. in parts.", "Specifically, check url encoding of USER (domain slash)," "and PASSWORD.", )), env_var="MSSQL_URI", type=_inject_mssql_uri, )
def _mk_thickness_parser(parser: ArgParser): group = parser.add_argument_group("Thickness", "Thickness calculation options.") group.add_argument("--run-thickness", action='store_true', dest="run_thickness", help="Run thickness computation.") group.add_argument("--no-run-thickness", action='store_false', dest="run_thickness", help="Don't run thickness computation.") parser.set_defaults(run_thickness=True) group.add_argument( "--label-mapping", type=str, dest="label_mapping", help="path to CSV file mapping; see minclaplace/wiki/LaplaceGrid") group.add_argument( "--atlas-fwhm", dest="atlas_fwhm", type=float, # default ?! help="Blurring kernel (mm) for atlas") group.add_argument( "--thickness-fwhm", dest="thickness_fwhm", type=float, # default?? help="Blurring kernel (mm) for cortical surfaces") return parser
def main(cls, args: Optional[Dict[str, object]] = None): if args is None: arg_parser = ArgParser() cls.add_arguments(arg_parser) args = arg_parser.parse_args() args = vars(args).copy() if args.get("debug", False): logging_level = logging.DEBUG elif args.get("logging_level") is not None: logging_level = getattr(logging, args["logging_level"].upper()) else: logging_level = logging.INFO logging.basicConfig( format= "%(asctime)s:%(module)s:%(lineno)s:%(name)s:%(levelname)s: %(message)s", level=logging_level, ) pipeline_kwds = args.copy() for key in ("force", "force_extract", "logging_level"): try: pipeline_kwds.pop(key) except KeyError: pass pipeline = cls(**pipeline_kwds) force = bool(args.get("force", False)) force_extract = force or bool(args.get("force_extract", False)) pipeline.extract_transform_load(force_extract=force_extract)
def initialize_config(self, config_parser: configargparse.ArgParser) -> None: config_parser.add_argument( "--keep-chart-changes", required=False, action="store_true", help=f"Should the changes made in {_chart_yaml} be kept", )
def add_arguments(self, arg_parser: ArgParser, add_parent_args): arg_parser.add_argument("--cskg-release-zip-file-path", help="path to a CSKG release .zip file") arg_parser.add_argument( "--data-dir-path", default=str(paths.DATA_DIR), help="path to a directory to store extracted and transformed data", )
def _mk_segmentation_parser(parser : ArgParser, default : bool): group = parser.add_argument_group("Segmentation", "Segmentation options.") group.add_argument("--run-maget", action='store_true', dest="run_maget", help="Run MAGeT segmentation. [default = %(default)s]") group.add_argument("--no-run-maget", dest="run_maget", action='store_false', help="Don't run MAGeT segmentation") parser.set_defaults(run_maget=True) return parser
def main(): argparser = ArgParser(description="Load TUPA model and export as .npz file.") argparser.add_argument("models", nargs="+", help="model file basename(s) to load") args = argparser.parse_args() for filename in args.models: model = load_model(filename) save_model(model, filename) model.config.save(filename)
def initialize_config(self, config_parser: configargparse.ArgParser) -> None: config_parser.add_argument( "--destination", required=False, default=".", help="Path of a directory to store the packaged tgz.", )
def go_2(p, current_prefix, current_ns): if isinstance(p, BaseParser): new_p = ArgParser(default_config_files=config_files) for a in p.argparser._actions: new_a = copy.copy(a) ss = copy.deepcopy(new_a.option_strings) for ix, s in enumerate(new_a.option_strings): if s.startswith("--"): ss[ix] = "-" + current_prefix + "-" + s[2:] else: raise NotImplementedError new_a.option_strings = ss new_p._add_action(new_a) _used_args, _rest = new_p.parse_known_args(args, namespace=current_ns) # add a "_flags" field to each object so we know what flags caused a certain option to be set: # (however, note that post-parsing we may munge around ...) flags_dict = defaultdict(set) for action in new_p._actions: for opt in action.option_strings: flags_dict[action.dest].add(opt) current_ns.flags_ = Namespace(**flags_dict) # TODO: could continue parsing from `_rest` instead of original `args` elif isinstance(p, CompoundParser): current_ns.flags_ = set() # could also check for the CompoundParser case and not set flags there, # since there will never be any for q in p.parsers: ns = Namespace() if q.namespace in current_ns.__dict__: raise ValueError("Namespace field '%s' already in use" % q.namespace) # TODO could also allow, say, a None else: # gross but how to write n-ary identity fn that behaves sensibly on single arg?? current_ns.__dict__[q.namespace] = ns # FIXME this casting doesn't work for configurations with positional arguments, # which aren't unpacked correctly -- better to use a namedtuple # (making all arguments keyword-only also works, but then you have to supply # often meaningless defaults in the __init__) go_2(q.parser, current_prefix=current_prefix + (('-' + q.prefix) if q.prefix is not None else ''), current_ns=ns) # If a cast function is provided, apply it to the namespace, possibly doing dynamic type checking # and also allowing the checker to provide hinting for the types of the fields flags = ns.flags_ del ns.flags_ fixed = (q.cast(current_ns.__dict__[q.namespace]) #(q.cast(**vars(current_ns.__dict__[q.namespace])) if q.cast else current_ns.__dict__[q.namespace]) if isinstance(fixed, tuple): fixed = fixed.replace(flags_=flags) elif isinstance(fixed, Namespace): setattr(fixed, "flags_", flags) else: raise ValueError("currently only Namespace and NamedTuple objects are supported return types from " "parsing; got %s (a %s)" % (fixed, type(fixed))) current_ns.__dict__[q.namespace] = fixed # TODO current_ns or current_namespace or ns or namespace? else: raise TypeError("parser %s wasn't a %s (%s or %s) but a %s" % (p, Parser, BaseParser, CompoundParser, p.__class__))
def initialize_config(self, config_parser: configargparse.ArgParser) -> None: config_parser.add_argument( "--kubelinter-config", required=False, help= f"Path to optional 'kube-linter' config file. If empty, tries to load " f"'{self._default_kubelinter_cfg_file}'.", )
def initialize_config(self, config_parser: configargparse.ArgParser) -> None: config_parser.add_argument( "-c", "--chart-dir", required=False, default=".", help="Path to the Helm Chart to build.", )
def parse_args(): parser = ArgParser(default_config_files=[".env"]) parser.add("--start_page", required=True, type=int) parser.add("--end_page", type=int, default=701) parser.add("--file", required=True, help="books description file") args = parser.parse_args() return args
def main(): argparser = ArgParser( description= "Load TUPA model and save the features enumeration as a text JSON file." ) argparser.add_argument("models", nargs="+", help="model file basename(s) to load") argparser.add_argument("-s", "--suffix", default=".enum.json", help="filename suffix to append") argparser.add_argument("-l", "--lang", help="use spaCy model to decode numeric IDs") args = argparser.parse_args() for filename in args.models: model = load_model(filename) params = model.feature_extractor.params if args.lang: vocab = get_vocab(lang=args.lang) for param in params.values(): if param.data: param.data = [ decode(vocab, v) for v in sorted(param.data, key=param.data.get) ] save_json(model.filename + args.suffix, params)
def main(): # command line option handling # use an environment variable to look for a default config file # Alternately, we could use a default location for the file # (say `files = ['/etc/pydpiper.cfg', '~/pydpiper.cfg', './pydpiper.cfg']`) # TODO this logic is duplicated in application.py #if "PYDPIPER_CONFIG_FILE" in os.environ: default_config_file = os.getenv("PYDPIPER_CONFIG_FILE") if default_config_file is not None: try: with open(PYDPIPER_CONFIG_FILE): pass except: warnings.warn(f"PYDPIPER_CONFIG_FILE is set to '{default_config_file}', which can't be opened.") if default_config_file is not None: files = [default_config_file] else: files = [] from pydpiper.core.arguments import _mk_execution_parser parser = ArgParser(default_config_files=files) _mk_execution_parser(parser) # using parse_known_args instead of parse_args is a hack since we # currently send ALL arguments from the main program to the executor. # Alternately, we could keep a copy of the executor parser around # when constructing the executor shell command options, _ = parser.parse_known_args() ensure_exec_specified(options.num_exec) def local_launch(options): pe = pipelineExecutor(options=options, uri_file=options.urifile, pipeline_name="anon-executor") # didn't parse application options so don't have a --pipeline-name # FIXME - I doubt missing the other options even works, otherwise we could change the executor interface!! # executors don't use any shared-memory constructs, so OK to copy ps = [Process(target=launchExecutor, args=(pe,)) for _ in range(options.num_exec)] for p in ps: p.start() for p in ps: p.join() if options.local: local_launch(options) elif options.submit_server: roq = q.runOnQueueingSystem(options, sysArgs=sys.argv) for i in range(options.num_exec): roq.createAndSubmitExecutorJobFile(i, after=None, time=q.timestr_to_secs(options.time)) elif options.queue_type is not None: for i in range(options.num_exec): pe = pipelineExecutor(options=options, uri_file=options.urifile, pipeline_name="anon-executor") pe.submitToQueue(1) # TODO is there a reason why we have logic for submitting `i` executors again here? else: local_launch(options)
def initialize_config(self, config_parser: configargparse.ArgParser) -> None: config_parser.add_argument( "--ct-config", required=False, help="Path to optional 'ct' lint config file.", ) config_parser.add_argument( "--ct-schema", required=False, help="Path to optional 'ct' schema file.", )
def get_config() -> Namespace: """ Parse config and return the parsed args :return: ConfigArgParse Namespace object holding parsed args """ parser = ArgParser() add_args(parser) args = parser.parse_args() return args
def add_arguments(self, arg_parser: ArgParser): arg_parser.add_argument( "--benchmark-name", required=True, help= "name of the benchmark the submission was tested against (in snake_case)", ) arg_parser.add_argument( "--using-test-data", help= "true if using truncated data for testing (in the test_data directory)\nalters the test file input path", )
def _mk_segmentation_parser(parser: ArgParser, default: bool): group = parser.add_argument_group("Segmentation", "Segmentation options.") group.add_argument("--run-maget", action='store_true', dest="run_maget", help="Run MAGeT segmentation. [default = %(default)s]") group.add_argument("--no-run-maget", dest="run_maget", action='store_false', help="Don't run MAGeT segmentation") parser.set_defaults(run_maget=True) return parser
class Cli: __COMMAND_CLASSES = { "create-spreadsheets": CreateSpreadsheetsCommand, } def __init__(self): self.__arg_parser = ArgParser() self.__logger = logging.getLogger(self.__class__.__name__) def __add_arguments(self): arg_parsers = [self.__arg_parser] subparsers = self.__arg_parser.add_subparsers(dest="command", required=True) for command_name, command_class in self.__COMMAND_CLASSES.items(): command_arg_parser = subparsers.add_parser(command_name) command_class.add_arguments(command_arg_parser) arg_parsers.append(command_arg_parser) for arg_parser in arg_parsers: arg_parser.add_argument("-c", is_config_file=True, help="config file path") arg_parser.add_argument( "--debug", action="store_true", help="turn on debugging" ) arg_parser.add_argument( "--logging-level", help="set logging-level level (see Python logging module)", ) def __configure_logging(self, args): if args.debug: logging_level = logging.DEBUG elif args.logging_level is not None: logging_level = getattr(logging, args.logging_level.upper()) else: logging_level = logging.INFO logging.basicConfig( format="%(asctime)s:%(processName)s:%(module)s:%(lineno)s:%(name)s:%(levelname)s: %(message)s", level=logging_level, ) def main(self): self.__add_arguments() args = self.__arg_parser.parse_args() self.__configure_logging(args) command_class = self.__COMMAND_CLASSES[args.command] command_kwds = vars(args).copy() command_kwds.pop("c") command_kwds.pop("logging_level") command = command_class(**command_kwds) command()
def get_settings_parser() -> ArgParser: """Returns the single global argument parser for adding parameters. Parameters can be added in all modules by add_argument. After calling parse() once in the main program, all settings are available in the global settings dictionary. """ global _parser # pylint: disable=global-statement if not _parser: _parser = ArgParser( # default_config_files=["default.cfg"], formatter_class=ArgumentDefaultsRawHelpFormatter) _parser.set_defaults(seed=0) return _parser
def main(): argparser = ArgParser(description="Visualize scores of a model over the dev set, saving to .png file.") argparser.add_argument("models", nargs="+", help="model file basename(s) to load") args = argparser.parse_args() for pattern in args.models: for filename in sorted(glob(pattern)) or [pattern]: basename, _ = os.path.splitext(filename) for div in "dev", "test": try: scores = load_scores(basename, div=div) except OSError: continue visualize(scores, basename, div=div)
def _mk_thickness_parser(parser : ArgParser): group = parser.add_argument_group("Thickness", "Thickness calculation options.") group.add_argument("--run-thickness", action='store_true', dest="run_thickness", help="Run thickness computation.") group.add_argument("--no-run-thickness", action='store_false', dest="run_thickness", help="Don't run thickness computation.") parser.set_defaults(run_thickness=True) group.add_argument("--label-mapping", type=str, dest="label_mapping", help="path to CSV file mapping; see minclaplace/wiki/LaplaceGrid") group.add_argument("--atlas-fwhm", dest="atlas_fwhm", type=float, # default ?! help="Blurring kernel (mm) for atlas") group.add_argument("--thickness-fwhm", dest="thickness_fwhm", type=float, # default?? help="Blurring kernel (mm) for cortical surfaces") return parser
def create_args(): parser = ArgParser() parser.add('--db_section') parser.add('--reqnums') parser.add('--csv') args = parser.parse_args() return args
def initialize_config(self, config_parser: configargparse.ArgParser) -> None: config_parser.add_argument( "--generate-metadata", required=False, action="store_true", help="Generate the metadata file for Giant Swarm App Platform.", ) config_parser.add_argument( "--catalog-base-url", required=False, help= "Base URL of the catalog in which the app package will be stored in. Should end with a /", )
def _mk_registration_parser(p: ArgParser) -> ArgParser: g = p.add_argument_group("General registration options", "....") # p = ArgParser(add_help=False) g.add_argument("--input-space", dest="input_space", type=lambda x: InputSpace[x], # type: ignore # mypy/issues/741 default=InputSpace.native, # choices=[x for x, _ in InputSpace.__members__.items()], help="Option to specify space of input-files. Can be native (default), lsq6, lsq12. " "Native means that there is no prior formal alignment between the input files " "yet. lsq6 means that the input files have been aligned using translations " "and rotations; the code will continue with a 12 parameter alignment. lsq12 " "means that the input files are fully linearly aligned. Only non-linear " "registrations are performed. [Default=%(default)s]") g.add_argument("--resolution", dest="resolution", type=float, default=None, help="Specify the resolution at which you want the registration to be run. " "If not specified, the resolution of the target of your pipeline will " "be used. [Default=%(default)s]") g.add_argument("--subject-matter", dest="subject_matter", type=str, default=None, help="Specify the subject matter for the pipeline. This will set the parameters " "for multiple programs based on the overall size of the subject matter. Instead " "of using the resolution of the files. Currently supported option is: \"mousebrain\" " "[Default=%(default)s]") return p # g?
def inject_arguments( # pylint: disable=no-self-use,protected-access self, parser: ArgumentParser ) -> None: """Inject arguments.""" parser._default_config_files = [ "/local/config.yaml", "/secrets/config.yaml", ] parser._ignore_unknown_config_file_keys = True parser.add( "-c", "--config", is_config_file=True, help="config file path", env_var="CONFIG", # make ENV match default metavar )
def inject_arguments(self, parser: ArgumentParser) -> None: """Inject arguments.""" super().inject_arguments(parser) def _inject_model(path: str) -> Model: model = cast(Model, load_pickle_file(path)) self.model = model return model parser.add( "--model", required=True, help="Path to pickled model", env_var="MODEL_PATH", type=_inject_model, )
def initialize_config(self, config_parser: configargparse.ArgParser) -> None: config_parser.add_argument( "--replace-app-version-with-git", required=False, action="store_true", help= f"Should the {_chart_yaml_app_version_key} in {_chart_yaml} be replaced by a tag and hash from git", ) config_parser.add_argument( "--replace-chart-version-with-git", required=False, action="store_true", help= f"Should the {_chart_yaml_chart_version_key} in {_chart_yaml} be replaced by a tag and hash from git", )
def _mk_staging_parser(parser : ArgParser): group = parser.add_argument_group("Embryo staging options", "Options for staging embryos in a 4D atlas.") group.add_argument("--csv-4D", dest="csv_4D", type=str, help="CSV containing information about the 4D altas. Should contain " "the following fields: `volume`, `timepoint`, `file`, " "`mask_file`.") return parser
def _mk_model_building_parser(parser: ArgParser): group = parser.add_argument_group( "Model building options", "Options specific to consensus model building") #group.add_argument("--registration-strategy", dest="reg_strategy", # default="build_model", choices=['build_model', 'pairwise', 'tournament'], # help="Process used for model construction [Default = %(default)s") #group.add_argument("--preliminary-registration-strategy", dest="preliminary_reg_strategy", # default=None, choices=['pairwise', 'tournament'], # help="Process used to construct a preliminary target for nonlinear model building " # "(use with '--registration-strategy=build_model' only!)") #group.add_argument("--preliminary-registration-protocol", dest="preliminary_reg_protocol", # default=None, type=str, # help="Protocol file for the optional preliminary model building") group.add_argument( "--pairwise-nlin-max-pairs", default=None, type=int, dest="prelim_nlin_max_pairs", help="Maximum number of nonlinear registrations per input file " "for preliminary pairwise nonlinear model construction [default = %(default)s" ) group.add_argument( "--pairwise-nlin-max-images", default=25, type=int, dest="prelim_nlin_max_images", help="Maximum number of images to use " "for preliminary pairwise nonlinear model construction [default = %(default)s" ) # TODO prelim_tournament_max_depth # continue as a build model afterwards?? return parser
def _mk_common_space_parser(parser : ArgParser): group = parser.add_argument_group("Common space options", "Options for registration/resampling to common (db) space.") group.add_argument("--common-space-model", dest="common_space_model", type=str, help="Run MAGeT segmentation on the images.") group.add_argument("--no-common-space-registration", dest="do_common_space_registration", default=False, action="store_false", help="Skip registration to common (db) space.") return parser
def add_bool_arg(parser: ArgParser, name: str, default: bool, **kwargs): """Add a boolean parameter to the settings parser. This helper function add two arguments "--"+name and "--no-"+name to the settings parser for a boolean parameter. :param parser: parser obtained by get_settings_parser :param name: name of the parameter without "--" :param default: default value :param kwargs: further parameters such as help """ parser.add_argument('--' + name, dest=name, action='store_true', default=default, **kwargs) parser.add_argument('--no-' + name, dest=name, action='store_false')
def _mk_application_parser(p: ArgParser) -> ArgParser: """ The arguments that all applications share: --pipeline-name --restart --no-restart --output-dir --create-graph --execute --no-execute --version --verbose --no-verbose files - leftover arguments (0 or more are allowed) """ # p = ArgParser(add_help=False) g = p.add_argument_group("General application options", "General options for all pydpiper applications.") g.add_argument("--restart", dest="restart", action="store_false", default=True, help="Restart pipeline using backup files. [default = %(default)s]") g.add_argument("--pipeline-name", dest="pipeline_name", type=str, default=time.strftime("pipeline-%d-%m-%Y-at-%H-%m-%S"), help="Name of pipeline and prefix for models.") g.add_argument("--no-restart", dest="restart", action="store_false", help="Opposite of --restart") # TODO instead of prefixing all subdirectories (logs, backups, processed, ...) # with the pipeline name/date, we could create one identifying directory # and put these other directories inside g.add_argument("--output-dir", dest="output_directory", type=str, default='', help="Directory where output data and backups will be saved.") g.add_argument("--create-graph", dest="create_graph", action="store_true", default=False, help="Create a .dot file with graphical representation of pipeline relationships [default = %(default)s]") g.set_defaults(execute=True) g.set_defaults(verbose=True) g.add_argument("--execute", dest="execute", action="store_true", help="Actually execute the planned commands [default = %(default)s]") g.add_argument("--no-execute", dest="execute", action="store_false", help="Opposite of --execute") g.add_argument("--version", action="version", version="%(prog)s (" + get_distribution("pydpiper").version + ")") # pylint: disable=E1101 g.add_argument("--verbose", dest="verbose", action="store_true", help="Be verbose in what is printed to the screen [default = %(default)s]") g.add_argument("--no-verbose", dest="verbose", action="store_false", help="Opposite of --verbose") g.add_argument("--files", type=str, nargs='*', metavar='file', help='Files to process') g.add_argument("--csv-file", dest="csv_file", type=str, default=None, help="CSV file containing application-specific columns. [Default=%(default)s]") return p
def main(): argparser = ArgParser( description= "Visualize scores of a model over the dev set, saving to .png file.") argparser.add_argument("models", nargs="+", help="model file basename(s) to load") args = argparser.parse_args() for pattern in args.models: for filename in glob(pattern) or [pattern]: basename, _ = os.path.splitext(filename) for div in "dev", "test": try: scores = load_scores(basename, div=div) except OSError: continue visualize(scores, basename, div=div)
def _mk_chain_parser(): p = ArgParser(add_help=False) p.add_argument("--csv-file", dest="csv_file", type=str, required=True, help="The spreadsheet with information about your input data. " "For the registration chain you are required to have the " "following columns in your csv file: \" subject_id\", " "\"timepoint\", and \"filename\". Optionally you can have " "a column called \"is_common\" that indicates that a scan " "is to be used for the common time point registration " "using a 1, and 0 otherwise.") p.add_argument("--common-time-point", dest="common_time_point", type=int, default=None, help="The time point at which the inter-subject registration will be " "performed. I.e., the time point that will link the subjects together. " "If you want to use the last time point from each of your input files, " "(they might differ per input file) specify -1. If the common time " "is not specified, the assumption is that the spreadsheet contains " "the mapping using the \"is_common\" column. [Default = %(default)s]") p.add_argument("--common-time-point-name", dest="common_time_point_name", type=str, default="common", help="Option to specify a name for the common time point. This is useful for the " "creation of more readable output file names. Default is \"common\". Note " "that the common time point is the one created by an iterative group-wise " "registration (inter-subject).") return p
def configure_global_options(config_parser: configargparse.ArgParser) -> None: config_parser.add_argument( "-d", "--debug", required=False, default=False, action="store_true", help="Enable debug messages.", ) config_parser.add_argument("--version", action="version", version=f"{app_name} {get_version()}") config_parser.add_argument( "-b", "--build-engine", required=False, default="helm3", type=BuildEngineType, help="Select the build engine used for building your chart.", ) steps_group = config_parser.add_mutually_exclusive_group() steps_group.add_argument( "--steps", nargs="+", help=f"List of steps to execute. Available steps: {ALL_STEPS}", required=False, default=["all"], ) steps_group.add_argument( "--skip-steps", nargs="+", help=f"List of steps to skip. Available steps: {ALL_STEPS}", required=False, default=[], )
def inject_arguments(self, parser: ArgumentParser) -> None: """Inject arguments.""" super().inject_arguments(parser) def _inject_mongo_uri(mongo_uri: str) -> str: self._mongo_uri = mongo_uri return mongo_uri parser.add( "--mongo-uri", required=True, help=("Mongo URI used to connect to a Mongo database: " "mongodb://*****:*****@HOST1,HOST2,.../DATABASE?" "replicaset=REPLICASET&authsource=admin " "Url encode all parts: PASS in particular"), env_var="MONGO_URI", type=_inject_mongo_uri, )
def inject_arguments(self, parser: ArgumentParser) -> None: """Inject arguments.""" super().inject_arguments(parser) def _inject_mssql_uri(mssql_uri: str) -> str: self._mssql_uri = mssql_uri return mssql_uri parser.add( "--mssql-uri", required=True, help=( "MSSQL URI used to connect to a MSSQL database: " "mssql+pymssql://USER:PASS@HOST:PORT/DATABASE?timeout=TIMEOUT " "Url encode all parts: USER (domain slash), PASS in particular" ), env_var="MSSQL_URI", type=_inject_mssql_uri, )
def go_2(p, current_prefix, current_ns): if isinstance(p, BaseParser): new_p = ArgParser(default_config_files=config_files) for a in p.argparser._actions: new_a = copy.copy(a) ss = copy.deepcopy(new_a.option_strings) for ix, s in enumerate(new_a.option_strings): if s.startswith("--"): ss[ix] = "-" + current_prefix + "-" + s[2:] else: raise NotImplementedError new_a.option_strings = ss new_p._add_action(new_a) _used_args, _rest = new_p.parse_known_args(args, namespace=current_ns) # TODO: could continue parsing from `_rest` instead of original `args` elif isinstance(p, CompoundParser): for q in p.parsers: ns = Namespace() if q.namespace in current_ns.__dict__: raise ValueError("Namespace field '%s' already in use" % q.namespace) # TODO could also allow, say, a None else: # gross but how to write n-ary identity fn that behaves sensibly on single arg?? current_ns.__dict__[q.namespace] = ns # FIXME this casting doesn't work for configurations with positional arguments, # which aren't unpacked correctly -- better to use a namedtuple # (making all arguments keyword-only also works, but then you have to supply # often meaningless defaults in the __init__) go_2(q.parser, current_prefix=current_prefix + (('-' + q.prefix) if q.prefix is not None else ''), current_ns=ns) # If a cast function is provided, apply it to the namespace, possibly doing dynamic type checking # and also allowing the checker to provide hinting for the types of the fields current_ns.__dict__[q.namespace] = (q.cast(current_ns.__dict__[q.namespace]) #(q.cast(**vars(current_ns.__dict__[q.namespace])) if q.cast else current_ns.__dict__[q.namespace]) # TODO current_ns or current_namespace or ns or namespace? else: raise TypeError("parser %s wasn't a %s (%s or %s) but a %s" % (p, Parser, BaseParser, CompoundParser, p.__class__))
def _mk_nlin_parser(p: ArgParser): group = p.add_argument_group("Nonlinear registration options", "Options for performing a non-linear registration") group.add_argument("--registration-method", dest="reg_method", default="ANTS", choices=["ANTS", "minctracc"], help="Specify whether to use minctracc or ANTS for non-linear registrations. " "[Default = %(default)s]") group.add_argument("--nlin-protocol", dest="nlin_protocol", type=str, default=None, help="Can optionally specify a registration protocol that is different from defaults. " "Parameters must be specified as in either or the following examples: \n" "applications_testing/test_data/minctracc_example_nlin_protocol.csv \n" "applications_testing/test_data/mincANTS_example_nlin_protocol.csv \n" "[Default = %(default)s]") return p
def main(): argparser = ArgParser(description="Load TUPA model and save again to a different file.") argparser.add_argument("models", nargs="+", help="model file basename(s) to load") argparser.add_argument("-s", "--suffix", default=".1", help="filename suffix to append") args = argparser.parse_args() for filename in args.models: model = load_model(filename) model.filename += args.suffix model.classifier.filename += args.suffix model.save()
def _mk_nlin_parser(p: ArgParser): group = p.add_argument_group("Nonlinear registration options", "Options for performing a non-linear registration") group.add_argument("--registration-method", dest="reg_method", default="ANTS", choices=["ANTS", "antsRegistration", "demons", "DRAMMS", "elastix", "minctracc"], help="Specify algorithm used for non-linear registrations. " "[Default = %(default)s]") # TODO wire up the choices here in reg_method and reg_strategy to the actual ones ... group.add_argument("--registration-strategy", dest="reg_strategy", default="build_model", choices=['build_model', 'pairwise', 'tournament', 'tournament_and_build_model', 'pairwise_and_build_model'], help="Process used for model construction [Default = %(default)s") group.add_argument("--nlin-protocol", dest="nlin_protocol", type=str, default=None, help="Can optionally specify a registration protocol that is different from defaults. " "Parameters must be specified as in either or the following examples: \n" "applications_testing/test_data/minctracc_example_nlin_protocol.csv \n" "applications_testing/test_data/mincANTS_example_nlin_protocol.csv \n" "[Default = %(default)s]") return p
def add_param_arguments(ap=None, arg_default=None): # arguments with possible format-specific parameter values def add_argument(a, *args, **kwargs): return a.add_argument(*args, **kwargs) def add(a, *args, default=None, func=add_argument, **kwargs): arg = func(a, *args, default=default if arg_default is None else arg_default, **kwargs) try: RESTORED_ARGS.add(arg.dest) except AttributeError: RESTORED_ARGS.update(get_group_arg_names(arg)) def add_boolean(a, *args, **kwargs): add(a, *args, func=add_boolean_option, **kwargs) if not ap: ap = ArgParser() group = ap.add_argument_group(title="Node labels") add(group, "--max-node-labels", type=int, default=0, help="max number of node labels to allow") add(group, "--max-node-categories", type=int, default=0, help="max node categories to allow") add(group, "--min-node-label-count", type=int, default=2, help="min number of occurrences for a label") add_boolean(group, "use-gold-node-labels", "gold node labels when parsing") add_boolean(group, "wikification", "use Spotlight to wikify any named node") add_boolean(group, "node-labels", "prediction of node labels, if supported by format", default=True) group = ap.add_argument_group(title="Structural constraints") add_boolean(group, "linkage", "linkage nodes and edges") add_boolean(group, "implicit", "implicit nodes and edges") add_boolean(group, "remote", "remote edges", default=True) add_boolean(group, "constraints", "scheme-specific rules", default=True) add_boolean(group, "require-connected", "constraint that output graph must be connected") add(group, "--orphan-label", default="orphan", help="edge label to use for nodes without parents") add(group, "--max-action-ratio", type=float, default=100, help="max action/terminal ratio") add(group, "--max-node-ratio", type=float, default=10, help="max node/terminal ratio") add(group, "--max-height", type=int, default=20, help="max graph height") group = ap.add_mutually_exclusive_group() add(group, "--swap", choices=(REGULAR, COMPOUND), default=REGULAR, help="swap transitions") add(group, "--no-swap", action="store_false", dest="swap", help="exclude swap transitions") add(ap, "--max-swap", type=int, default=15, help="if compound swap enabled, maximum swap size") group = ap.add_argument_group(title="General classifier training parameters") add(group, "--learning-rate", type=float, help="rate for model weight updates (default: by trainer/1)") add(group, "--learning-rate-decay", type=float, default=0, help="learning rate decay per iteration") add(group, "--swap-importance", type=float, default=1, help="learning rate factor for Swap") add(group, "--max-training-per-format", type=int, help="max number of training passages per format per iteration") add_boolean(group, "missing-node-features", "allow node features to be missing if not available", default=True) add(group, "--omit-features", help="string of feature properties to omit, out of " + FEATURE_PROPERTIES) add_boolean(group, "curriculum", "sort training passages by action prediction accuracy in previous epoch") group = ap.add_argument_group(title="Perceptron parameters") add(group, "--min-update", type=int, default=5, help="minimum #updates for using a feature") SPARSE_ARG_NAMES.update(get_group_arg_names(group)) group = ap.add_argument_group(title="Neural network parameters") add(group, "--word-dim-external", type=int, default=300, help="dimension for external word embeddings") add(group, "--word-vectors", help="file to load external word embeddings from (default: GloVe)") add(group, "--vocab", help="file mapping integer ID to word form (to avoid loading spaCy), or '-' to use word form") add_boolean(group, "update-word-vectors", "external word vectors in training parameters", default=True) add(group, "--word-dim", type=int, default=0, help="dimension for learned word embeddings") add(group, "--lemma-dim", type=int, default=200, help="dimension for lemma embeddings") add(group, "--tag-dim", type=int, default=20, help="dimension for fine POS tag embeddings") add(group, "--pos-dim", type=int, default=20, help="dimension for coarse/universal POS tag embeddings") add(group, "--dep-dim", type=int, default=10, help="dimension for dependency relation embeddings") add(group, "--edge-label-dim", type=int, default=20, help="dimension for edge label embeddings") add(group, "--node-label-dim", type=int, default=0, help="dimension for node label embeddings") add(group, "--node-category-dim", type=int, default=0, help="dimension for node category embeddings") add(group, "--punct-dim", type=int, default=1, help="dimension for separator punctuation embeddings") add(group, "--action-dim", type=int, default=3, help="dimension for input action type embeddings") add(group, "--ner-dim", type=int, default=3, help="dimension for input entity type embeddings") add(group, "--shape-dim", type=int, default=3, help="dimension for word shape embeddings") add(group, "--prefix-dim", type=int, default=2, help="dimension for word prefix embeddings") add(group, "--suffix-dim", type=int, default=3, help="dimension for word suffix embeddings") add(group, "--output-dim", type=int, default=50, help="dimension for output action embeddings") add(group, "--layer-dim", type=int, default=50, help="dimension for hidden layers") add(group, "--layers", type=int, default=2, help="number of hidden layers") add(group, "--lstm-layer-dim", type=int, default=500, help="dimension for LSTM hidden layers") add(group, "--lstm-layers", type=int, default=0, help="number of LSTM hidden layers") add(group, "--embedding-layer-dim", type=int, default=500, help="dimension for layers before LSTM") add(group, "--embedding-layers", type=int, default=1, help="number of layers before LSTM") add(group, "--activation", choices=ACTIVATIONS, default=DEFAULT_ACTIVATION, help="activation function") add(group, "--init", choices=INITIALIZERS, default=DEFAULT_INITIALIZER, help="weight initialization") add(group, "--minibatch-size", type=int, default=200, help="mini-batch size for optimization") add(group, "--optimizer", choices=TRAINERS, default=DEFAULT_TRAINER, help="algorithm for optimization") add(group, "--loss", choices=LOSSES, default=DEFAULT_LOSS, help="loss function for training") add(group, "--max-words-external", type=int, default=250000, help="max external word vectors to use") add(group, "--max-words", type=int, default=10000, help="max number of words to keep embeddings for") add(group, "--max-lemmas", type=int, default=3000, help="max number of lemmas to keep embeddings for") add(group, "--max-tags", type=int, default=100, help="max number of fine POS tags to keep embeddings for") add(group, "--max-pos", type=int, default=100, help="max number of coarse POS tags to keep embeddings for") add(group, "--max-deps", type=int, default=100, help="max number of dep labels to keep embeddings for") add(group, "--max-edge-labels", type=int, default=15, help="max number of edge labels for embeddings") add(group, "--max-puncts", type=int, default=5, help="max number of punctuations for embeddings") add(group, "--max-action-types", type=int, default=10, help="max number of action types for embeddings") add(group, "--max-action-labels", type=int, default=100, help="max number of action labels to allow") add(group, "--max-ner-types", type=int, default=18, help="max number of entity types to allow") add(group, "--max-shapes", type=int, default=30, help="max number of word shapes to allow") add(group, "--max-prefixes", type=int, default=30, help="max number of 1-character word prefixes to allow") add(group, "--max-suffixes", type=int, default=500, help="max number of 3-character word suffixes to allow") add(group, "--word-dropout", type=float, default=0.2, help="word dropout parameter") add(group, "--word-dropout-external", type=float, default=0, help="word dropout for word vectors") add(group, "--lemma-dropout", type=float, default=0.2, help="lemma dropout parameter") add(group, "--tag-dropout", type=float, default=0.2, help="fine POS tag dropout parameter") add(group, "--pos-dropout", type=float, default=0.2, help="coarse POS tag dropout parameter") add(group, "--dep-dropout", type=float, default=0.5, help="dependency label dropout parameter") add(group, "--node-label-dropout", type=float, default=0.2, help="node label dropout parameter") add(group, "--node-dropout", type=float, default=0.1, help="probability to drop features for a whole node") add(group, "--dropout", type=float, default=0.4, help="dropout parameter between layers") add(group, "--max-length", type=int, default=120, help="maximum length of input sentence") add(group, "--rnn", choices=["None"] + list(RNNS), default=DEFAULT_RNN, help="type of recurrent neural network") add(group, "--gated", type=int, nargs="?", default=2, help="gated input to BiRNN and MLP") NN_ARG_NAMES.update(get_group_arg_names(group)) return ap
log_file = os.path.join(self.orig_out_dir, 'train.log') manifest_list_cfg = ', '.join([k+':'+v for k, v in self.manifests.items()]) with open(cfg_file, 'w') as f: f.write('manifest = [{}]\n'.format(manifest_list_cfg)) f.write('manifest_root = {}\n'.format(self.out_dir)) f.write('log = {}\n'.format(log_file)) f.write('epochs = 90\nrng_seed = 0\nverbose = True\neval_freq = 1\n') for setn, manifest in self.manifests.items(): if not os.path.exists(manifest): pairs = self.train_or_val_pairs(setn) records = [(os.path.relpath(fname, self.out_dir), os.path.relpath(self._target_filename(int(tgt)), self.out_dir)) for fname, tgt in pairs] np.savetxt(manifest, records, fmt='%s,%s') if __name__ == "__main__": parser = ArgParser() parser.add_argument('--input_dir', help='Directory to find input tars', default=None) parser.add_argument('--out_dir', help='Directory to write ingested files', default=None) parser.add_argument('--target_size', type=int, default=256, help='Size in pixels to scale shortest side DOWN to (0 means no scaling)') args = parser.parse_args() logger = logging.getLogger(__name__) bw = IngestI1K(input_dir=args.input_dir, out_dir=args.out_dir, target_size=args.target_size) bw.run()
def _process_args(self): flags = ArgParser(prog='repoman', add_config_file_help=True, ignore_unknown_config_file_keys=True, default_config_files=self.config_files) flags.add('--config-file', required=False, is_config_file=True, env_var='REPOMAN_CONFIG_FILE', help='override config file path') # global flags flags.add('--simpledb-domain', action='store', required=True, env_var='REPOMAN_SIMPLEDB_DOMAIN') flags.add('--s3-bucket', action='store', required=True, env_var='REPOMAN_S3_BUCKET') flags.add('--aws-profile', action='store', required=False, default='', env_var='REPOMAN_AWS_CREDENTIAL_PROFILE', help='Use the specified profile in ~/.aws/credentials') flags.add('--region', action='store', required=False, default=None, help='AWS region to connect to') flags.add('--aws-role', action='store', required=False, default='', env_var='REPOMAN_AWS_ROLE', help='Full ARN of IAM role to assume before calling any ' 'other AWS APIs') flags.add('--log-config', action='store', required=False, default='', env_var='REPOMAN_LOG_CONFIG', help='path to a JSON file with a python log configuration ') flags.add('--skip-checkup', action='store_true', required=False, default=False, help='do not run system health checkup on every action') flags.add('--debug', action='store_true', required=False, default=False, help='debug logging') flags.add('--gpg-home', required=False, env_var='REPOMAN_GPG_HOME', default='~/.gnupg', help='set path to gpg keyring') flags.add('--gpg-signer', action='append', required=False, help='gpg identity to sign as') flags.add('--gpg-pinentry-path', action='store', default='/usr/bin/pinentry', required=False, help='path to gpg pinentry program') flags.add('--gpg-passphrase', action='append', required=False, help='passphrase for gpg secret key for signing ' '(if multiple, must be in same order as --gpg-signer)') flags.add('--auto-purge', action='store', default=0, type=int, required=False, help='automatically purge packages older than the ' 'last N revisions when adding or copying') # subparsers for commands commands = flags.add_subparsers(dest='command') # singelton commands commands.add_parser('checkup', help='check that all systems are go') commands.add_parser('backup', help='dump the simpledb state to a JSON file') # restore command restore_flags = commands.add_parser( 'restore', help='restore simpledb state from a JSON file') restore_flags.add( 'filename', nargs=1, help='path to backup file') # commands that take flags setup_flags = commands.add_parser( 'setup', help='do initial system configuration: create simpledb domain ' 'and s3 bucket, specify at least one each architecture, ' 'distribution and component to publish.' ) repo_flags = commands.add_parser( 'repo', help='repo management commands') add_flags = commands.add_parser( 'add', help='add package files to repo') cp_flags = commands.add_parser( 'cp', help='move packages between components and distributions') rm_flags = commands.add_parser( 'rm', help='remove specific packages from repo') publish_flags = commands.add_parser( 'publish', help='publish the repository to s3') query_flags = commands.add_parser( 'query', help='query the repository') # command flags # query query_flags.add('-a', '--architecture', action='append', required=False, help='narrow query by architecture(s)') query_flags.add('-d', '--distribution', action='append', required=False, help='narrow query by distribution(s)') query_flags.add('-c', '--component', action='append', required=False, help='narrow query by component(s)') query_flags.add('-p', '--package', action='append', required=False, help='narrow query by package name(s)') query_flags.add('-w', '--wildcard', action='store_true', default=False, help='match package names to left of --package flag') query_flags.add('-H', '--query-hidden', action='store_true', default=False, help='include packages "hidden" by the removal of ' 'their distribution/component/architecture') query_flags.add('-f', '--format', action='store', dest='outputfmt', default='simple', choices=('json', 'jsonc', 'packages', 'simple', 'plain', 'grid', 'fancy_grid', 'pipe', 'orgtbl', 'jira', 'psql', 'rst', 'mediawiki', 'moinmoin', 'html', 'latex', 'latex_booktabs', 'textile'), help='select output format for querys & rm/cp prompts') query_latest = query_flags.add_mutually_exclusive_group() query_latest.add('-v', '--version', action='append', help='only return packages matching these versions') query_latest.add('-l', '--latest', action='store_const', dest='latest_versions', const=1, help='only return the most recent package version ' '(equivalent to `--recent 1`)') query_latest.add('-r', '--recent', action='store', default=0, type=int, dest='latest_versions', help='only return the N most recent package versions') # setup setup_flags.add('-a', '--architecture', action='append', required=True, help='specify at least one architecture') setup_flags.add('-d', '--distribution', action='append', required=True, help='specify at least one distribution') setup_flags.add('-c', '--component', action='append', required=True, help='specify at least one component') setup_flags.add('--s3-acl', action='store', default='private', required=False, choices=S3_BUCKET_ACLS, help='set a canned ACL for the S3 bucket ' '(default is private)') setup_flags.add('--s3-region', action='store', required=False, help='set region for s3 bucket ' '(default is us-east-1 AKA US/Standard)') setup_flags.add('--sns-topic', action='store', required=False, help='AWS SNS topic name for logging') setup_flags.add('--origin', action='store', required=False, help='origin string for repository') setup_flags.add('--label', action='store', required=False, help='label string for repository') setup_flags.add('--enable-website', action='store_true', required=False, default=False, help='configure public website hosting for ' 'the S3 bucket. Implies --s3-acl=public-read') # repo management operations repo_commands = repo_flags.add_subparsers(dest='repo_command') repo_add_architecture_flags = repo_commands.add_parser( 'add-architecture', help='add a architecture to repo') repo_add_architecture_flags.add( 'architecture_names', nargs='+', help='architecture to add') repo_add_architecture_flags.add( '--i-fear-no-evil', action='store_true', default=False, required=False, help='skip confirmation step for scary actions') raa_confirm = \ repo_add_architecture_flags.add_mutually_exclusive_group() raa_confirm.add('--confirm', action='store_true', dest='confirm', required=False, default=True, help='confirm any mutating actions') raa_confirm.add('-y', '--no-confirm', action='store_false', dest='confirm', required=False, default=False, help='do not prompt for confirmation') repo_rm_architecture_flags = repo_commands.add_parser( 'rm-architecture', help='remove a architecture from repo') repo_rm_architecture_flags.add( 'architecture_names', nargs='+', help='architecture to remove') repo_rm_architecture_flags.add( '--i-fear-no-evil', action='store_true', default=False, required=False, help='skip confirmation step for scary actions') rra_confirm = repo_rm_architecture_flags.add_mutually_exclusive_group() rra_confirm.add('--confirm', action='store_true', dest='confirm', required=False, default=True, help='confirm any mutating actions') rra_confirm.add('-y', '--no-confirm', action='store_false', dest='confirm', required=False, default=False, help='do not prompt for confirmation') repo_add_distribution_flags = repo_commands.add_parser( 'add-distribution', help='add a distribution to repo') repo_add_distribution_flags.add( 'distribution_names', nargs='+', help='distribution to add') repo_add_distribution_flags.add( '--i-fear-no-evil', action='store_true', default=False, required=False, help='skip confirmation step for scary actions') rad_confirm = \ repo_add_distribution_flags.add_mutually_exclusive_group() rad_confirm.add('--confirm', action='store_true', dest='confirm', required=False, default=True, help='confirm any mutating actions') rad_confirm.add('-y', '--no-confirm', action='store_false', dest='confirm', required=False, default=False, help='do not prompt for confirmation') repo_rm_distribution_flags = repo_commands.add_parser( 'rm-distribution', help='remove a distribution from repo') repo_rm_distribution_flags.add( 'distribution_names', nargs='+', help='distribution to remove') repo_rm_distribution_flags.add( '--i-fear-no-evil', action='store_true', default=False, required=False, help='skip confirmation step for scary actions') rrd_confirm = repo_rm_distribution_flags.add_mutually_exclusive_group() rrd_confirm.add('--confirm', action='store_true', dest='confirm', required=False, default=True, help='confirm any mutating actions') rrd_confirm.add('-y', '--no-confirm', action='store_false', dest='confirm', required=False, default=False, help='do not prompt for confirmation') repo_add_component_flags = repo_commands.add_parser( 'add-component', help='add a component to repo') repo_add_component_flags.add( 'component_names', nargs='+', help='component to add') repo_add_component_flags.add( '--i-fear-no-evil', action='store_true', default=False, required=False, help='skip confirmation step for scary actions') rac_confirm = repo_add_component_flags.add_mutually_exclusive_group() rac_confirm.add('--confirm', action='store_true', dest='confirm', required=False, default=True, help='confirm any mutating actions') rac_confirm.add('-y', '--no-confirm', action='store_false', dest='confirm', required=False, default=False, help='do not prompt for confirmation') repo_rm_component_flags = repo_commands.add_parser( 'rm-component', help='remove a component from repo') repo_rm_component_flags.add( 'component_names', nargs='+', help='component to remove') repo_rm_component_flags.add( '--i-fear-no-evil', action='store_true', default=False, required=False, help='skip confirmation step for scary actions') rrc_confirm = repo_rm_component_flags.add_mutually_exclusive_group() rrc_confirm.add('--confirm', action='store_true', dest='confirm', required=False, default=True, help='confirm any mutating actions') rrc_confirm.add('-y', '--no-confirm', action='store_false', dest='confirm', required=False, default=False, help='do not prompt for confirmation') repo_add_topic_flags = repo_commands.add_parser( 'add-topic', help='send notifications to an SNS topic') repo_add_topic_flags.add('topic_name', nargs=1, action='store', help='SNS topic to configure for logging') rat_confirm = repo_add_topic_flags.add_mutually_exclusive_group() rat_confirm.add('--confirm', action='store_true', dest='confirm', required=False, default=True, help='confirm any mutating actions') rat_confirm.add('-y', '--no-confirm', action='store_false', dest='confirm', required=False, default=False, help='do not prompt for confirmation') repo_rm_topic_flags = repo_commands.add_parser( 'rm-topic', help='remove SNS topic logging') rrt_confirm = repo_rm_topic_flags.add_mutually_exclusive_group() rrt_confirm.add('--confirm', action='store_true', dest='confirm', required=False, default=True, help='confirm any mutating actions') rrt_confirm.add('-y', '--no-confirm', action='store_false', dest='confirm', required=False, default=False, help='do not prompt for confirmation') repo_commands.add_parser('show-config', help='show current repo configuration') repo_add_origin_flags = repo_commands.add_parser( 'add-origin', help='set the repository origin string') repo_add_origin_flags.add('origin', nargs=1, action='store', help='origin string') rao_confirm = repo_add_origin_flags.add_mutually_exclusive_group() rao_confirm.add('--confirm', action='store_true', dest='confirm', required=False, default=True, help='confirm any mutating actions') rao_confirm.add('-y', '--no-confirm', action='store_false', dest='confirm', required=False, default=False, help='do not prompt for confirmation') repo_add_label_flags = repo_commands.add_parser( 'add-label', help='set the repository label string') repo_add_label_flags.add('label', nargs=1, action='store', help='label string') ral_confirm = repo_add_label_flags.add_mutually_exclusive_group() ral_confirm.add('--confirm', action='store_true', dest='confirm', required=False, default=True, help='confirm any mutating actions') ral_confirm.add('-y', '--no-confirm', action='store_false', dest='confirm', required=False, default=False, help='do not prompt for confirmation') repo_commands.add_parser('show-config', help='show current repo configuration') # add packages add_flags.add('-d', '--distribution', action='append', required=True, help='add to specified distribution') add_flags.add('-c', '--component', action='append', required=True, help='add to specified component') add_flags.add('--overwrite', action='store_true', required=False, default=False, help='re-upload packages even if they already exist ' 'in the repository') add_flags.add('--publish', action='store_true', required=False, default=False, help='publish the repo to s3 after adding packages') add_flags.add('files', nargs='+', help='debian package files to add') # copy cp_flags.add('--src-distribution', action='store', required=True, help='specify one or more distributions to copy from') cp_flags.add('--dst-distribution', action='store', help='specify one or more distributions to copy to') cp_flags.add('--src-component', action='store', required=True, help='specify one or more components to copy from') cp_flags.add('--dst-component', action='store', help='specify one or more components to copy to') cp_flags.add('-a', '--architecture', action='append', required=False, help='limit to specified architectures') cp_flags.add('-p', '--package', action='append', help='specify one or more package names to act on') cp_flags.add('--overwrite', action='store_true', required=False, default=False, help='re-upload packages even if they already exist ' 'in the repository -- this only applies to cross-' 'distribution copies') cp_flags.add('--promote', action='store_true', required=False, default=False, help='only copy files where the latest source version ' 'is more recent than the latest destination version ') cp_flags.add('-w', '--wildcard', action='store_true', default=False, help='match package names to left of --package flag') cp_latest = cp_flags.add_mutually_exclusive_group() cp_latest.add('-v', '--version', action='append', help='only copy packages matching these versions') cp_latest.add('-l', '--latest', action='store_const', dest='latest_versions', const=1, help='only copy the most recent package version ' '(equivalent to `--recent 1`)') cp_latest.add('-r', '--recent', action='store', default=0, type=int, dest='latest_versions', help='only copy the N most recent package versions') cp_flags.add('--i-fear-no-evil', action='store_true', default=False, required=False, help='skip confirmation step for scary actions') cp_confirm = cp_flags.add_mutually_exclusive_group() cp_confirm.add('--confirm', action='store_true', dest='confirm', required=False, default=True, help='confirm any mutating actions') cp_confirm.add('-y', '--no-confirm', action='store_false', dest='confirm', required=False, default=False, help='do not prompt for confirmation') # remove rm_flags.add('-a', '--architecture', action='append', required=False, help='limit to specified architectures') rm_flags.add('-d', '--distribution', action='append', required=False, help='limit to specified distributions') rm_flags.add('-c', '--component', action='append', required=False, help='limit to specified distributions') rm_flags.add('-p', '--package', action='append', required=False, help='limit to specified package names') rm_flags.add('--remove-from-s3', action='store_true', default=False, required=False, help='remove package files from s3') rm_flags.add('--publish', action='store_true', required=False, default=False, help='publish the repo to s3') rm_flags.add('-w', '--wildcard', action='store_true', default=False, help='match package names to left of --package flag') rm_flags.add('-H', '--rm-hidden', action='store_true', default=False, help='include packages "hidden" by the removal of ' 'their distribution/component/architecture') rm_flags.add('--i-fear-no-evil', action='store_true', default=False, required=False, help='skip confirmation step for scary actions') rm_flags.add('-f', '--format', action='store', dest='outputfmt', default='simple', choices=('json', 'jsonc', 'simple', 'plain', 'grid', 'fancy_grid', 'pipe', 'orgtbl', 'jira', 'psql', 'rst', 'mediawiki', 'moinmoin', 'html', 'latex', 'latex_booktabs', 'textile'), help='select output format for querys & rm/cp prompts') rm_latest = rm_flags.add_mutually_exclusive_group() rm_latest.add('-v', '--version', action='append', help='only delete packages matching these versions') rm_latest.add('-l', '--exclude-latest', action='store_const', dest='latest_versions', const=1, help='only delete the most recent package version ' '(equivalent to `--recent 1`)') rm_latest.add('-r', '--exclude-recent', action='store', default=0, type=int, dest='latest_versions', help='only delete the N most recent package versions') rm_confirm = rm_flags.add_mutually_exclusive_group() rm_confirm.add('--confirm', action='store_true', dest='confirm', required=False, default=True, help='confirm any mutating actions') rm_confirm.add('-y', '--no-confirm', action='store_false', dest='confirm', required=False, default=False, help='do not prompt for confirmation') # publish to s3 publish_flags.add('-d', '--distribution', action='append', required=False, help='limit to specified distributions ' '(default is all)') config = flags.parse_args(self.argv) return config
if "amr" not in keep: # Remove AMR-specific features: node label and category delete_if_exists((model.feature_params, model.classifier.params), (NODE_LABEL_KEY, "c")) delete_if_exists((model.classifier.labels, model.classifier.axes), {NODE_LABEL_KEY}.union(FORMATS).difference(keep)) def delete_if_exists(dicts, keys): for d in dicts: for key in keys: try: del d[key] except KeyError: pass def main(args): os.makedirs(args.out_dir, exist_ok=True) for filename in args.models: model = load_model(filename) strip_multitask(model, args.keep) model.filename = os.path.join(args.out_dir, os.path.basename(filename)) model.save() if __name__ == "__main__": argparser = ArgParser(description="Load TUPA model and save with just one task's features/weights.") argparser.add_argument("models", nargs="+", help="model file basename(s) to load") argparser.add_argument("-k", "--keep", nargs="+", choices=tuple(filter(None, FORMATS)), default=["ucca"], help="tasks to keep features/weights for") argparser.add_argument("-o", "--out-dir", default=".", help="directory to write modified model files to") main(argparser.parse_args())
def argument_parser(): # Create command line arguments parser = ArgParser() # General arguments parser.add('--db_section',required=True,help = "Database section in your \ .desservices.ini file, e.g., db-desoper or db-destest") parser.add("--user", action="store", default=os.environ['USER'], help="username that will submit") parser.add('--paramfile',is_config_file=True,help='Key = Value file that can be used to replace\ command-line') parser.add('--csv',help='CSV of exposures and information specified by user. If specified, \ code will use exposures in csv to submit jobs. Must also specify \ --delimiter') parser.add('--exclude_list',help='A comma-separated list or line-separated file of exposures \ to exclude from the dataframe') parser.add('--delimiter',default=',',help='The delimiter if specifying csv and is not \ comma-separated') parser.add('--campaign',required=True, help='Directory in pipebox where templates are \ stored, e.g., $PIPEBOX_DIR/templates/pipelines/finalcut/-->Y2A1dev<--') parser.add('--savefiles',action='store_true',help='Saves submit files to submit later.') parser.add('--queue_size',default=1000,help='If set and savefiles is not specified, code \ will submit specified runs up until queue_size is reached. Code \ will wait until queue drops below limit to submit next job') parser.add('--total_queue',action='store_true',help='If specified, total jobs per \ pipeline per machine will be counted and user will be ignored') parser.add('--labels',help='Human-readable labels to "mark" a given processing attempt') parser.add('--template_name',help='submitwcl template within pipeline/campaign') parser.add('--configfile',help='Name of user cfg file') parser.add('--out',help='Output directory for submit files') parser.add('--auto',action='store_true',help='Will run autosubmit mode if specified') parser.add('--resubmit_failed',action='store_true',help='Will ressubmit failed runs') parser.add('--resubmit_max',default=99,help='Set max attempt number for resubmit-failed.') parser.add('--ignore_processed',action='store_true',help='Will skip any expnum \ that has been attempted to process, pass/fail.') parser.add('--wait',default=30,help='Wait time (seconds) between dessubmits. \ Default=30s') # Archive arguments parser.add('--target_site',required=True,help='Computing node, i.e., fermigrid-sl6') parser.add('--archive_name',help='Home archive to store products, e.g., \ desar2home,prodbeta,...') parser.add('--project',default='ACT',help='Archive directory where runs are \ stored, e.g., $ARCHIVE/-->ACT<--/finalcut/') parser.add('--rundir',help='Archive directory structure') parser.add('--http',help='The machine to copy files through: desar0, desar1,desar2') # JIRA arguments parser.add('--jira_parent',help='JIRA parent ticket under which\ new ticket will be created.') parser.add('--jira_description',help='Description of ticket\ found in JIRA') parser.add('--jira_project',default='DESOPS',help='JIRA project where \ ticket will be created, e.g., DESOPS') parser.add('--jira_summary',help='Title of JIRA ticket. To submit multiple \ exposures under same ticket you can specify jira_summary') parser.add('--jira_user',help='JIRA username') parser.add('--jira_section',default='jira-desdm',help='JIRA section \ in .desservices.ini file') parser.add('--ignore_jira',default=False,action='store_true',help="If specified will not \ connect to JIRA, but must specify reqnum and jira_parent.") parser.add('--reqnum',help='Part of processing unique identifier. Tied to JIRA ticket \ number') parser.add('--decade', action='store_true', help='Uses the DECADE subsection of WCL') # EUPS arguments parser.add('--eups_stack',action='append',nargs='+', required=True,help='EUPS production stack, \ e.g., finalcut Y2A1+4') # Science arguments parser.add('--ccdnum',help='CCDs to be processed.') parser.add('--minsigma',help='Specify minsigma for immask (defaults to 6.0)') parser.add('--nite',help='For auto mode: if specified will submit all exposures found \ from nite') parser.add('--niterange',nargs='+',action='append',help='Specify a range of nites') parser.add('--RA','-ra',nargs='+',action='append',help='RA in deg., in the order of min max') parser.add('--Dec','-dec',nargs='+',action='append',help='Dec in deg., in the order of min max') parser.add('--epoch',help='Observing epoch. If not specified, will be calculated. E.g.,\ SVE1,SVE2,Y1E1,Y1E2,Y2E1,Y2E2...') parser.add('--inputcals_file',help='Key=Var list of calibrations to be used in processing. \ $PIPEBOX_DIR/templates/inputcals for a sample') # glide in options parser.add('--time_to_live',default=None,type=float,help='The amount of time-to-live (in hours)\ for the job to grab a glidein') # Transfers parser.add('--nginx',action='store_true',help='Use nginx?') # Condor options parser.add('--request_memory',default=8000,help='Amount of memory (MB) to use for processing.\ Default (8000) is set for finalcut on fermigrid-ce nodes. For \ supercal on fermigrid-ce nodes try 32000.') parser.add('--request_disk',default=90000000,help='Amount of disk space (MB) to use for \ processing. Default (90000000) is set for finalcut on \ fermigrid-ce nodes. For supercal on fermigrid-ce nodes try \ 200000000.') parser.add('--request_cpus',default=1,help='# of cpus to use for processing. Default (1) is \ set for finalcut on fermigrid-ce nodes.') return parser
########## --- Start of program --- ########## if __name__ == "__main__": # command line option handling # use an environment variable to look for a default config file # Alternately, we could use a default location for the file # (say `files = ['/etc/pydpiper.cfg', '~/pydpiper.cfg', './pydpiper.cfg']`) # TODO this logic is duplicated in application.py default_config_file = os.getenv("PYDPIPER_CONFIG_FILE") if default_config_file is not None: files = [default_config_file] else: files = [] parser = ArgParser(default_config_files=files) rf.addGenRegArgumentGroup(parser) # just to get --pipeline-name addExecutorArgumentGroup(parser) # using parse_known_args instead of parse_args is a hack since we # currently send ALL arguments from the main program to the executor # on PBS queues (FIXME not yet true on SGE queues, but this is # not the best solution anyway). # Alternately, we could keep a copy of the executor parser around # when constructing the executor shell command options = parser.parse_known_args()[0] #Check to make sure some executors have been specified. noExecSpecified(options.num_exec)
def _mk_lsq12_parser(): p = ArgParser(add_help=False) # group = parser.add_argument_group("LSQ12 registration options", # "Options for performing a pairwise, affine registration") p.set_defaults(run_lsq12=True) p.add_argument("--run-lsq12", dest="run_lsq12", action="store_true", help="Actually run the 12 parameter alignment [default = %(default)s]") p.add_argument("--no-run-lsq12", dest="run_lsq12", action="store_false", help="Opposite of --run-lsq12") p.add_argument("--lsq12-max-pairs", dest="max_pairs", type=parse_nullable_int, default=25, help="Maximum number of pairs to register together ('None' implies all pairs). " "[Default = %(default)s]") p.add_argument("--lsq12-likefile", dest="like_file", type=str, default=None, help="Can optionally specify a 'like'-file for resampling at the end of pairwise " "alignment. Default is None, which means that the input file will be used. " "[Default = %(default)s]") p.add_argument("--lsq12-protocol", dest="protocol", type=str, help="Can optionally specify a registration protocol that is different from defaults. " "Parameters must be specified as in the following example: \n" "applications_testing/test_data/minctracc_example_linear_protocol.csv \n" "[Default = %(default)s].") return p
if (all([os.path.exists(manifest) for manifest in self.manifests.values()]) and not self.overwrite): print("Found manfiest files, skipping ingest, use --overwrite to overwrite them.") return for setn, manifest in self.manifests.items(): pairs = self.train_or_val_pairs(setn) records = [(os.path.relpath(fname, self.out_dir), int(tgt)) for fname, tgt in pairs] records.insert(0, ('@FILE', 'STRING')) np.savetxt(manifest, records, fmt='%s\t%s') if __name__ == "__main__": parser = ArgParser() parser.add_argument('--input_dir', required=True, help='Directory to find input tars', default=None) parser.add_argument('--out_dir', required=True, help='Directory to write ingested files', default=None) parser.add_argument('--target_size', type=int, default=256, help='Size in pixels to scale shortest side DOWN to (0 means no scaling)') parser.add_argument('--overwrite', action='store_true', default=False, help='Overwrite files') args = parser.parse_args() logger = logging.getLogger(__name__) bw = IngestI1K(input_dir=args.input_dir, out_dir=args.out_dir, target_size=args.target_size, overwrite=args.overwrite) bw.run()
def _mk_lsq12_parser(): p = ArgParser(add_help=False) # group = parser.add_argument_group("LSQ12 registration options", # "Options for performing a pairwise, affine registration") p.set_defaults(run_lsq12=True) p.set_defaults(generate_tournament_style_lsq12_avg=False) p.add_argument("--run-lsq12", dest="run_lsq12", action="store_true", help="Actually run the 12 parameter alignment [default = %(default)s]") p.add_argument("--no-run-lsq12", dest="run_lsq12", action="store_false", help="Opposite of --run-lsq12") p.add_argument("--lsq12-max-pairs", dest="max_pairs", type=parse_nullable_int, default=25, help="Maximum number of pairs to register together ('None' implies all pairs). " "[Default = %(default)s]") p.add_argument("--lsq12-likefile", dest="like_file", type=str, default=None, help="Can optionally specify a 'like'-file for resampling at the end of pairwise " "alignment. Default is None, which means that the input file will be used. " "[Default = %(default)s]") p.add_argument("--lsq12-protocol", dest="protocol", type=str, help="Can optionally specify a registration protocol that is different from defaults. " "Parameters must be specified as in the following example: \n" "applications_testing/test_data/minctracc_example_linear_protocol.csv \n" "[Default = %(default)s].") #p.add_argument("--generate-tournament-style-lsq12-avg", dest="generate_tournament_style_lsq12_avg", # action="store_true", # help="Instead of creating the average of the lsq12 resampled files " # "by simply averaging them directly, create an iterative average " # "as follows. Perform a non linear registration between pairs " # "of files. Resample each file halfway along that transformation " # "in order for them to end up in the middle. Average those two files. " # "Then continue on to the next level as in a tournament. [default = %(default)s]") #p.add_argument("--no-generate-tournament-style-lsq12-avg", dest="generate_tournament_style_lsq12_avg", # action="store_false", # help="Opposite of --generate-tournament-style-lsq12-avg") return p
def parse(parser: Parser, args: List[str]) -> Namespace: # TODO: accepting a comma-separated list might allow more flexibility default_config_file = os.getenv("PYDPIPER_CONFIG_FILE") if default_config_file is not None: try: with open(default_config_file) as _: pass except: warnings.warn(f"PYDPIPER_CONFIG_FILE is set to '{default_config_file}', which can't be opened.") config_files = [default_config_file] if default_config_file else [] # First, build a parser that's aware of all options # (will be used for help/version/error messages). # This must be tried _before_ the partial parsing attempts # in order to get correct help/version messages. main_parser = ArgParser(default_config_files=config_files) # TODO: abstract out the recursive travels in go_1 and go_2 into a `walk` function def go_1(p, current_prefix): if isinstance(p, BaseParser): g = main_parser.add_argument_group(p.group_name) for a in p.argparser._actions: new_a = copy.copy(a) ss = copy.deepcopy(new_a.option_strings) for ix, s in enumerate(new_a.option_strings): if s.startswith("--"): ss[ix] = "-" + current_prefix + "-" + s[2:] # "" was "-" else: raise NotImplementedError( "sorry, I only understand flags starting with `--` at the moment, but got %s" % s) new_a.option_strings = ss g._add_action(new_a) elif isinstance(p, CompoundParser): for q in p.parsers: go_1(q.parser, current_prefix + (('-' + q.prefix) if q.prefix is not None else '')) else: raise TypeError( "parser %s wasn't a %s (%s or %s) but a %s" % (p, Parser, BaseParser, CompoundParser, p.__class__)) go_1(parser, "") # Use this parser to exit with a helpful message if parse fails or --help/--version specified: main_parser.parse_args(args) # Now, use parse_known_args for each parser in the tree of parsers to fill the appropriate namespace object ... def go_2(p, current_prefix, current_ns): if isinstance(p, BaseParser): new_p = ArgParser(default_config_files=config_files) for a in p.argparser._actions: new_a = copy.copy(a) ss = copy.deepcopy(new_a.option_strings) for ix, s in enumerate(new_a.option_strings): if s.startswith("--"): ss[ix] = "-" + current_prefix + "-" + s[2:] else: raise NotImplementedError new_a.option_strings = ss new_p._add_action(new_a) _used_args, _rest = new_p.parse_known_args(args, namespace=current_ns) # add a "_flags" field to each object so we know what flags caused a certain option to be set: # (however, note that post-parsing we may munge around ...) flags_dict = defaultdict(set) for action in new_p._actions: for opt in action.option_strings: flags_dict[action.dest].add(opt) current_ns.flags_ = Namespace(**flags_dict) # TODO: could continue parsing from `_rest` instead of original `args` elif isinstance(p, CompoundParser): current_ns.flags_ = set() # could also check for the CompoundParser case and not set flags there, # since there will never be any for q in p.parsers: ns = Namespace() if q.namespace in current_ns.__dict__: raise ValueError("Namespace field '%s' already in use" % q.namespace) # TODO could also allow, say, a None else: # gross but how to write n-ary identity fn that behaves sensibly on single arg?? current_ns.__dict__[q.namespace] = ns # FIXME this casting doesn't work for configurations with positional arguments, # which aren't unpacked correctly -- better to use a namedtuple # (making all arguments keyword-only also works, but then you have to supply # often meaningless defaults in the __init__) go_2(q.parser, current_prefix=current_prefix + (('-' + q.prefix) if q.prefix is not None else ''), current_ns=ns) # If a cast function is provided, apply it to the namespace, possibly doing dynamic type checking # and also allowing the checker to provide hinting for the types of the fields flags = ns.flags_ del ns.flags_ fixed = (q.cast(current_ns.__dict__[q.namespace]) #(q.cast(**vars(current_ns.__dict__[q.namespace])) if q.cast else current_ns.__dict__[q.namespace]) if isinstance(fixed, tuple): fixed = fixed.replace(flags_=flags) elif isinstance(fixed, Namespace): setattr(fixed, "flags_", flags) else: raise ValueError("currently only Namespace and NamedTuple objects are supported return types from " "parsing; got %s (a %s)" % (fixed, type(fixed))) current_ns.__dict__[q.namespace] = fixed # TODO current_ns or current_namespace or ns or namespace? else: raise TypeError("parser %s wasn't a %s (%s or %s) but a %s" % (p, Parser, BaseParser, CompoundParser, p.__class__)) main_ns = Namespace() go_2(parser, current_prefix="", current_ns=main_ns) return main_ns
def _mk_stats_parser(): p = ArgParser(add_help=False) # p.add_argument_group("Statistics options", # "Options for calculating statistics.") default_fwhms = "0.2" p.set_defaults(stats_kernels=default_fwhms) p.set_defaults(calc_stats=True) p.add_argument("--calc-stats", dest="calc_stats", action="store_true", help="Calculate statistics at the end of the registration. [Default = %(default)s]") p.add_argument("--no-calc-stats", dest="calc_stats", action="store_false", help="If specified, statistics are not calculated. Opposite of --calc-stats.") p.add_argument("--stats-kernels", dest="stats_kernels", type=str, help="comma separated list of blurring kernels for analysis. [Default = %(default)s].") return p
def _mk_lsq6_parser(with_nuc : bool = True, with_inormalize : bool = True): p = ArgParser(add_help=False) p.set_defaults(lsq6_method="lsq6_large_rotations") p.set_defaults(nuc = True if with_nuc else False) p.set_defaults(inormalize = True if with_inormalize else False) p.set_defaults(copy_header_info=False) # TODO: should this actually be part of the LSQ6 component? What would it return in this case? p.set_defaults(run_lsq6=True) p.add_argument("--run-lsq6", dest="run_lsq6", action="store_true", help="Actually run the 6 parameter alignment [default = %(default)s]") p.add_argument("--no-run-lsq6", dest="run_lsq6", action="store_false", help="Opposite of --run-lsq6") # TODO should be part of some mutually exclusive group ... p.add_argument("--init-model", dest="init_model", type=str, default=None, help="File in standard space in the initial model. The initial model " "can also have a file in native space and potentially a transformation " "file. See our wiki (https://wiki.mouseimaging.ca/) for detailed " "information on initial models. [Default = %(default)s]") p.add_argument("--lsq6-target", dest="lsq6_target", type=str, default=None, help="File to be used as the target for the initial (often 6-parameter) alignment. " "[Default = %(default)s]") p.add_argument("--bootstrap", dest="bootstrap", action="store_true", default=False, help="Use the first input file to the pipeline as the target for the " "initial (often 6-parameter) alignment. [Default = %(default)s]") # TODO: add information about the pride of models to the code in such a way that it # is reflected on GitHub p.add_argument("--pride-of-models", dest="pride_of_models", type=str, default=None, help="(selected longitudinal pipelines only!) Specify a csv file that contains the mapping of " "all your initial models at different time points. The idea is that you might " "want to use different initial models for the time points in your data. " "The csv file should have one column called \"model_file\", and one column " "called \"time_point\". The time points can be given in either integer values " "or float values. Each model file should point to the file in standard space " "for that particular model. [Default = %(default)s]") # TODO: do we need to implement this option? This was for Kieran Short, but the procedure # he will be using in the future most likely will not involve this option. # group.add_argument("--lsq6-alternate-data-prefix", dest="lsq6_alternate_prefix", # type=str, default=None, # help="Specify a prefix for an augmented data set to use for the 6 parameter " # "alignment. Assumptions: there is a matching alternate file for each regular input " # "file, e.g. input files are: input_1.mnc input_2.mnc ... input_n.mnc. If the " # "string provided for this flag is \"aug_\", then the following files should exist: " # "aug_input_1.mnc aug_input_2.mnc ... aug_input_n.mnc. These files are assumed to be " # "in the same orientation/location as the regular input files. They will be used for " # "for the 6 parameter alignment. The transformations will then be used to transform " # "the regular input files, with which the pipeline will continue.") p.add_argument("--lsq6-simple", dest="lsq6_method", action="store_const", const="lsq6_simple", help="Run a 6 parameter alignment assuming that the input files are roughly " "aligned: same space, similar orientation. Keep in mind that if you use an " "initial model with both a standard and a native space, the assumption is " "that the input files are already roughly aligned to the native space. " "Three iterations are run: 1st is 17 times stepsize blur, 2nd is 9 times " "stepsize gradient, 3rd is 4 times stepsize blur. [Default = %(default)s]") p.add_argument("--lsq6-centre-estimation", dest="lsq6_method", action="store_const", const="lsq6_centre_estimation", help="Run a 6 parameter alignment assuming that the input files have a " "similar orientation, but are scanned in different coils/spaces. [Default = %(default)s]") p.add_argument("--lsq6-large-rotations", dest="lsq6_method", action="store_const", const="lsq6_large_rotations", help="Run a 6 parameter alignment assuming that the input files have a random " "orientation and are scanned in different coils/spaces. A brute force search over " "the x,y,z rotation space is performed to find the best 6 parameter alignment. " "[Default = %(default)s]") p.add_argument("--lsq6-large-rotations-tmp-dir", dest="rotation_tmp_dir", type=str, default="/dev/shm/", help="Specify the directory that rotational_minctracc.py uses for temporary files. " "By default we use /dev/shm/, because this program involves a lot of I/O, and " "this is probably one of the fastest way to provide this. [Default = %(default)s]") p.add_argument("--lsq6-large-rotations-parameters", dest="rotation_params", type=str, default="5,4,10,8", help="Settings for the large rotation alignment. factor=factor based on smallest file " "resolution: 1) blur factor, 2) resample step size factor, 3) registration step size " "factor, 4) w_translations factor ***** if you are working with mouse brain data " " the defaults do not have to be based on the file resolution; a default set of " " settings works for all mouse brain. In order to use those setting, specify: " "\"mousebrain\" as the argument for this option. ***** [default = %(default)s]") p.add_argument("--lsq6-rotational-range", dest="rotation_range", type=int, default=50, help="Settings for the rotational range in degrees when running the large rotation " "alignment. [Default = %(default)s]") p.add_argument("--lsq6-rotational-interval", dest="rotation_interval", type=int, default=10, help="Settings for the rotational interval in degrees when running the large rotation " "alignment. [Default = %(default)s]") p.add_argument("--nuc", dest="nuc", action="store_true", help="Perform non-uniformity correction. [Default = %(default)s]") p.add_argument("--no-nuc", dest="nuc", action="store_false", help="If specified, do not perform non-uniformity correction. Opposite of --nuc.") p.add_argument("--inormalize", dest="inormalize", action="store_true", help="Normalize the intensities after lsq6 alignment and nuc, if done. " "[Default = %(default)s] ") p.add_argument("--no-inormalize", dest="inormalize", action="store_false", help="If specified, do not perform intensity normalization. Opposite of --inormalize.") p.add_argument("--copy-header-info-to-average", dest="copy_header_info", action="store_true", help="Copy the MINC header information of the first input file into the " "average that is created. [Default = %(default)s] ") p.add_argument("--no-copy-header-info-to-average", dest="copy_header_info", action="store_false", help="Opposite of --copy-header-info-to-average.") p.add_argument("--lsq6-protocol", dest="protocol_file", type=str, default=None, help="Specify an lsq6 protocol that overrides the default setting for stages in " "the 6 parameter minctracc call. Parameters must be specified as in the following \n" "example: applications_testing/test_data/minctracc_example_linear_protocol.csv \n" "[Default = %(default)s].") return p
def _mk_execution_parser(p: ArgParser) -> ArgParser: # parser = ArgParser(add_help=False) group = p.add_argument_group("Executor options", "Options controlling how and where the code is run.") group.add_argument("--uri-file", dest="urifile", type=str, default=None, help="Location for uri file if NameServer is not used. If not specified, default is current working directory.") group.add_argument("--use-ns", dest="use_ns", action="store_true", help="Use the Pyro NameServer to store object locations. Currently a Pyro nameserver must be started separately for this to work.") group.add_argument("--latency-tolerance", dest="latency_tolerance", type=float, default=600.0, help="Allowed grace period by which an executor may miss a heartbeat tick before being considered failed [Default = %(default)s.") group.add_argument("--num-executors", dest="num_exec", type=int, default=-1, help="Number of independent executors to launch. [Default = %(default)s. Code will not run without an explicit number specified.]") group.add_argument("--max-failed-executors", dest="max_failed_executors", type=int, default=10, help="Maximum number of failed executors before we stop relaunching. [Default = %(default)s]") # TODO: add corresponding --monitor-heartbeats group.add_argument("--no-monitor-heartbeats", dest="monitor_heartbeats", action="store_false", help="Don't assume executors have died if they don't check in with the server (NOTE: this can hang your pipeline if an executor crashes).") group.add_argument("--time", dest="time", type=str, default="23:59:59", help="Wall time to request for each server/executor in the format hh:mm:ss. Required only if --queue-type=pbs. Current default on PBS is %(default)s.") group.add_argument("--proc", dest="proc", type=int, default=1, help="Number of processes per executor. Also sets max value for processor use per executor. [Default = %(default)s]") group.add_argument("--mem", dest="mem", type=float, default=6, help="Total amount of requested memory (in GB) for all processes the executor runs. [Default = %(default)s].") group.add_argument("--pe", dest="pe", type=str, default=None, help="Name of the SGE pe, if any. [Default = %(default)s]") group.add_argument("--mem-request-attribute", dest="mem_request_attribute", type=str, default=None, help="Name of the resource attribute to request for managing memory limits. [Default = %(default)s]") group.add_argument("--greedy", dest="greedy", action="store_true", help="Request the full amount of RAM specified by --mem rather than the (lesser) amount needed by runnable jobs. Always use this if your executor is assigned a full node.") group.add_argument("--ppn", dest="ppn", type=int, default=8, help="Number of processes per node. Used when --queue-type=pbs. [Default = %(default)s].") group.add_argument("--queue-name", dest="queue_name", type=str, default=None, help="Name of the queue, e.g., all.q (MICe) or batch (SciNet)") group.add_argument("--queue-type", dest="queue_type", type=str, default=None, help="""Queue type to submit jobs, i.e., "sge" or "pbs". [Default = %(default)s]""") group.add_argument("--queue-opts", dest="queue_opts", type=str, default="", help="A string of extra arguments/flags to pass to qsub. [Default = %(default)s]") group.add_argument("--executor-start-delay", dest="executor_start_delay", type=int, default=180, help="Seconds before starting remote executors when running the server on the grid") group.add_argument("--submit-server", dest="submit_server", action="store_true", help="Submit the server to the grid. Currently works only with PBS/Torque systems.") group.add_argument("--no-submit-server", dest="submit_server", action="store_false", help="Opposite of --submit-server. [default]") group.add_argument("--max-idle-time", dest="max_idle_time", type=int, default=1, help="The number of minutes an executor is allowed to continuously sleep, i.e. wait for an available job, while active on a compute node/farm before it kills itself due to resource hogging. [Default = %(default)s]") group.add_argument("--time-to-accept-jobs", dest="time_to_accept_jobs", type=int, help="The number of minutes after which an executor will not accept new jobs anymore. This can be useful when running executors on a batch system where other (competing) jobs run for a limited amount of time. The executors can behave in a similar way by given them a rough end time. [Default = %(default)s]") group.add_argument('--local', dest="local", action='store_true', help="Don't submit anything to any specified queueing system but instead run as a server/executor") group.add_argument("--config-file", type=str, metavar='config_file', is_config_file=True, required=False, help='Config file location') group.add_argument("--prologue-file", type=str, metavar='file', help="Location of a shell script to inline into PBS submit script to set paths, load modules, etc.") group.add_argument("--min-walltime", dest="min_walltime", type=int, default=0, help="Min walltime (s) allowed by the queuing system [Default = %(default)s]") group.add_argument("--max-walltime", dest="max_walltime", type=int, default=None, help="Max walltime (s) allowed for jobs on the queuing system, or infinite if None [Default = %(default)s]") group.add_argument("--default-job-mem", dest="default_job_mem", type=float, default=1.75, help="Memory (in GB) to allocate to jobs which don't make a request. [Default=%(default)s]") group.add_argument("--memory-factor", dest="memory_factor", type=float, default=1, help="Overall factor by which to scale all memory estimates/requests (including default job memory, " "but not executor totals (--mem)), say due to system differences or overcommitted nodes. " "[Default=%(default)s]") group.add_argument("--cmd-wrapper", dest="cmd_wrapper", type=str, default="", help="Wrapper inside of which to run the command, e.g., '/usr/bin/time -v'. [Default='%(default)s']") group.add_argument("--check-input-files", dest="check_input_files", action="store_true", help="Check overall pipeline inputs exist and, when applicable, " "are valid MINC files [Default=%(default)s]") group.add_argument("--no-check-input-files", dest="check_input_files", action="store_false", help="Opposite of --check-input-files") group.set_defaults(check_inputs=True) group.add_argument("--check-outputs", dest="check_outputs", action="store_true", help="Check output files exist and error if not [Default=%(default)s]") group.add_argument("--no-check-outputs", dest="check_outputs", action="store_false", help="Opposite of --check-outputs.") group.set_defaults(check_outputs=False) group.add_argument("--fs-delay", dest="fs_delay", type=float, default=5, help="Time (sec) to allow for NFS to become consistent after stage completion [Default=%(default)s]") group.add_argument("--executor_wrapper", dest="executor_wrapper", type=str, default="", help="Command inside of which to run the executor. [Default='%(default)s']") group.add_argument("--defer-directory-creation", default=False, action="store_true", dest="defer_directory_creation", help="Create relevant directories when a stage is run instead of at startup [Default=%(default)s]") return p