Ejemplo n.º 1
1
def main():
    argparser = ArgParser(description="Load TUPA model and visualize, saving to .png file.")
    argparser.add_argument("models", nargs="+", help="model file basename(s) to load")
    args = argparser.parse_args()
    for filename in args.models:
        model = load_model(filename)
        visualize(model, filename)
Ejemplo n.º 2
0
def setup_config_arguments(argParser: configargparse.ArgParser):
    argParser.add_argument(
        '-c',
        '--config-file',
        required=False,
        is_config_file=True,
        help='Path to the Config File which should be used.')
    argParser.add_argument('-t',
                           '--telegram_api_token',
                           required=True,
                           help='Your Telegram Bot - Token.')
    argParser.add_argument('-ma',
                           '--mqtt_server_address',
                           required=False,
                           default='127.0.0.1',
                           help='The IP - Address of the MQTT - Server')
    argParser.add_argument('-mp',
                           '--mqtt_server_port',
                           required=False,
                           type=int,
                           default=1887,
                           help='The port of the MQTT - Server.')
    argParser.add_argument('-d',
                           '--debug',
                           required=False,
                           action='store_true',
                           default=False,
                           help='Set this Switch for additional debug logs.')
Ejemplo n.º 3
0
def _mk_crop_to_brain_parser():
    p = ArgParser(add_help=False)
    p.add_argument(
        "--crop_bbox_x",
        dest="bbox_x",
        type=float,
        default=0,
        help="length of bounding box in x direction (default units in pixels)")
    p.add_argument(
        "--crop_bbox_y",
        dest="bbox_y",
        type=float,
        default=0,
        help="length of bounding box in y direction (default units in pixels)")
    p.add_argument(
        "--crop_bbox_z",
        dest="bbox_z",
        type=float,
        default=0,
        help="length of bounding box in z direction (default units in pixels)")
    p.add_argument(
        "--crop_buffer_z",
        dest="buffer_z",
        type=float,
        default=0,
        help=
        "Add forced buffer in z direction (default units in pixels) (often the images sit too far forward)"
    )
    p.add_argument("--crop_mm_units",
                   action="store_true",
                   dest="mm_units",
                   default=False,
                   help="Units of shift are in mm instead of pixels")
    return p
Ejemplo n.º 4
0
def _mk_lsq12_parser():
    p = ArgParser(add_help=False)
    # group = parser.add_argument_group("LSQ12 registration options",
    #                                  "Options for performing a pairwise, affine registration")
    p.set_defaults(run_lsq12=True)
    p.add_argument("--run-lsq12", dest="run_lsq12",
                   action="store_true",
                   help="Actually run the 12 parameter alignment [default = %(default)s]")
    p.add_argument("--no-run-lsq12", dest="run_lsq12",
                   action="store_false",
                   help="Opposite of --run-lsq12")
    p.add_argument("--lsq12-max-pairs", dest="max_pairs",
                   type=parse_nullable_int, default=25,
                   help="Maximum number of pairs to register together ('None' implies all pairs). "
                        "[Default = %(default)s]")
    p.add_argument("--lsq12-likefile", dest="like_file",
                   type=str, default=None,
                   help="Can optionally specify a 'like'-file for resampling at the end of pairwise "
                        "alignment. Default is None, which means that the input file will be used. "
                        "[Default = %(default)s]")
    p.add_argument("--lsq12-protocol", dest="protocol",
                   type=str,
                   help="Can optionally specify a registration protocol that is different from defaults. "
                        "Parameters must be specified as in the following example: \n"
                        "applications_testing/test_data/minctracc_example_linear_protocol.csv \n"
                        "[Default = %(default)s].")
    return p
Ejemplo n.º 5
0
def get_job_status_trigger_config(args) -> JobStatusTriggerConfig:
    parser = ArgParser(auto_env_var_prefix="", prog=APP_NAME)

    def benchmark_status_from_input(input: str) -> BenchmarkJobStatus:
        return BenchmarkJobStatus(input.strip(" \t,"))

    # required
    parser.add_argument("--job-name", type=str, env_var="JOB_NAME", required=True)
    parser.add_argument(
        "--trigger-statuses", type=benchmark_status_from_input, nargs="+", env_var="TRIGGER_STATUSES", required=True
    )
    parser.add_argument("--command", type=str, env_var="COMMAND", required=True)

    # optional
    parser.add_argument("--job-namespace", type=str, default="default", env_var="JOB_NAMESPACE", required=False)
    parser.add_argument(
        "--job-not-found-grace-period-seconds",
        type=int,
        default=30,
        env_var="JOB_NOT_FOUND_GRACE_PERIOD_SECONDS",
        required=False,
    )

    parsed_args, _ = parser.parse_known_args(args)

    return JobStatusTriggerConfig(
        job_namespace=parsed_args.job_namespace,
        job_name=parsed_args.job_name,
        trigger_statuses=parsed_args.trigger_statuses,
        job_not_found_grace_period_seconds=parsed_args.job_not_found_grace_period_seconds,
        command=parsed_args.command,
    )
 def add_arguments(cls, arg_parser: ArgParser) -> None:
     """
     Add pipeline-specific arguments. The parsed arguments are passed to the constructor as keywords.
     """
     arg_parser.add_argument("-c",
                             is_config_file=True,
                             help="config file path")
     arg_parser.add_argument("--debug",
                             action="store_true",
                             help="turn on debugging")
     arg_parser.add_argument(
         "-f",
         "--force",
         action="store_true",
         help="force extract and transform, ignoring any cached data",
     )
     arg_parser.add_argument(
         "--force-extract",
         action="store_true",
         help="force extract, ignoring any cached data",
     )
     arg_parser.add_argument(
         "--logging-level",
         help="set logging-level level (see Python logging module)",
     )
Ejemplo n.º 7
0
 def initialize_config(self,
                       config_parser: configargparse.ArgParser) -> None:
     config_parser.add_argument(
         "--destination",
         required=False,
         default=".",
         help="Path of a directory to store the packaged tgz.",
     )
Ejemplo n.º 8
0
def main():
    argparser = ArgParser(description="Load TUPA model and export as .npz file.")
    argparser.add_argument("models", nargs="+", help="model file basename(s) to load")
    args = argparser.parse_args()
    for filename in args.models:
        model = load_model(filename)
        save_model(model, filename)
        model.config.save(filename)
Ejemplo n.º 9
0
 def add_arguments(self, arg_parser: ArgParser, add_parent_args):
     arg_parser.add_argument("--cskg-release-zip-file-path",
                             help="path to a CSKG release .zip file")
     arg_parser.add_argument(
         "--data-dir-path",
         default=str(paths.DATA_DIR),
         help="path to a directory to store extracted and transformed data",
     )
Ejemplo n.º 10
0
 def initialize_config(self,
                       config_parser: configargparse.ArgParser) -> None:
     config_parser.add_argument(
         "--keep-chart-changes",
         required=False,
         action="store_true",
         help=f"Should the changes made in {_chart_yaml} be kept",
     )
Ejemplo n.º 11
0
 def initialize_config(self,
                       config_parser: configargparse.ArgParser) -> None:
     config_parser.add_argument(
         "-c",
         "--chart-dir",
         required=False,
         default=".",
         help="Path to the Helm Chart to build.",
     )
Ejemplo n.º 12
0
 def initialize_config(self,
                       config_parser: configargparse.ArgParser) -> None:
     config_parser.add_argument(
         "--kubelinter-config",
         required=False,
         help=
         f"Path to optional 'kube-linter' config file. If empty, tries to load "
         f"'{self._default_kubelinter_cfg_file}'.",
     )
def _mk_TV_stitch_parser():
    p = ArgParser(add_help=False)
    p.add_argument("--scale-output", dest="scale_output",
                   type=int,
                   default=None,  # TODO raise a warning when this isn't specified
                   help="Multiply slice images by this value before saving to file")
    p.add_argument("--keep-stitch-tmp", dest="keep_tmp",
                   action="store_true", default=False,
                   help="Keep temporary files from TV_stitch.")
    return p
Ejemplo n.º 14
0
def main():
    argparser = ArgParser(description="Load TUPA model and save again to a different file.")
    argparser.add_argument("models", nargs="+", help="model file basename(s) to load")
    argparser.add_argument("-s", "--suffix", default=".1", help="filename suffix to append")
    args = argparser.parse_args()
    for filename in args.models:
        model = load_model(filename)
        model.filename += args.suffix
        model.classifier.filename += args.suffix
        model.save()
Ejemplo n.º 15
0
def _mk_lsq12_parser():
    p = ArgParser(add_help=False)
    # group = parser.add_argument_group("LSQ12 registration options",
    #                                  "Options for performing a pairwise, affine registration")
    p.set_defaults(run_lsq12=True)
    p.set_defaults(generate_tournament_style_lsq12_avg=False)
    p.add_argument(
        "--run-lsq12",
        dest="run_lsq12",
        action="store_true",
        help="Actually run the 12 parameter alignment [default = %(default)s]")
    p.add_argument("--no-run-lsq12",
                   dest="run_lsq12",
                   action="store_false",
                   help="Opposite of --run-lsq12")
    p.add_argument(
        "--lsq12-max-pairs",
        dest="max_pairs",
        type=parse_nullable_int,
        default=25,
        help=
        "Maximum number of pairs to register together ('None' implies all pairs). "
        "[Default = %(default)s]")
    p.add_argument(
        "--lsq12-likefile",
        dest="like_file",
        type=str,
        default=None,
        help=
        "Can optionally specify a 'like'-file for resampling at the end of pairwise "
        "alignment. Default is None, which means that the input file will be used. "
        "[Default = %(default)s]")
    p.add_argument(
        "--lsq12-protocol",
        dest="protocol",
        type=str,
        help=
        "Can optionally specify a registration protocol that is different from defaults. "
        "Parameters must be specified as in the following example: \n"
        "applications_testing/test_data/minctracc_example_linear_protocol.csv \n"
        "[Default = %(default)s].")
    #p.add_argument("--generate-tournament-style-lsq12-avg", dest="generate_tournament_style_lsq12_avg",
    #               action="store_true",
    #               help="Instead of creating the average of the lsq12 resampled files "
    #                    "by simply averaging them directly, create an iterative average "
    #                    "as follows. Perform a non linear registration between pairs "
    #                    "of files. Resample each file halfway along that transformation "
    #                    "in order for them to end up in the middle. Average those two files. "
    #                    "Then continue on to the next level as in a tournament. [default = %(default)s]")
    #p.add_argument("--no-generate-tournament-style-lsq12-avg", dest="generate_tournament_style_lsq12_avg",
    #               action="store_false",
    #               help="Opposite of --generate-tournament-style-lsq12-avg")
    return p
Ejemplo n.º 16
0
 def add_arguments(self, arg_parser: ArgParser):
     arg_parser.add_argument(
         "--benchmark-name",
         required=True,
         help=
         "name of the benchmark the submission was tested against (in snake_case)",
     )
     arg_parser.add_argument(
         "--using-test-data",
         help=
         "true if using truncated data for testing (in the test_data directory)\nalters the test file input path",
     )
Ejemplo n.º 17
0
 def initialize_config(self,
                       config_parser: configargparse.ArgParser) -> None:
     config_parser.add_argument(
         "--ct-config",
         required=False,
         help="Path to optional 'ct' lint config file.",
     )
     config_parser.add_argument(
         "--ct-schema",
         required=False,
         help="Path to optional 'ct' schema file.",
     )
Ejemplo n.º 18
0
def main():
    argparser = ArgParser(description="Visualize scores of a model over the dev set, saving to .png file.")
    argparser.add_argument("models", nargs="+", help="model file basename(s) to load")
    args = argparser.parse_args()
    for pattern in args.models:
        for filename in sorted(glob(pattern)) or [pattern]:
            basename, _ = os.path.splitext(filename)
            for div in "dev", "test":
                try:
                    scores = load_scores(basename, div=div)
                except OSError:
                    continue
                visualize(scores, basename, div=div)
Ejemplo n.º 19
0
 def initialize_config(self,
                       config_parser: configargparse.ArgParser) -> None:
     config_parser.add_argument(
         "--generate-metadata",
         required=False,
         action="store_true",
         help="Generate the metadata file for Giant Swarm App Platform.",
     )
     config_parser.add_argument(
         "--catalog-base-url",
         required=False,
         help=
         "Base URL of the catalog in which the app package will be stored in. Should end with a /",
     )
Ejemplo n.º 20
0
 def initialize_config(self,
                       config_parser: configargparse.ArgParser) -> None:
     config_parser.add_argument(
         "--replace-app-version-with-git",
         required=False,
         action="store_true",
         help=
         f"Should the {_chart_yaml_app_version_key} in {_chart_yaml} be replaced by a tag and hash from git",
     )
     config_parser.add_argument(
         "--replace-chart-version-with-git",
         required=False,
         action="store_true",
         help=
         f"Should the {_chart_yaml_chart_version_key} in {_chart_yaml} be replaced by a tag and hash from git",
     )
Ejemplo n.º 21
0
def add_bool_arg(parser: ArgParser, name: str, default: bool, **kwargs):
    """Add a boolean parameter to the settings parser.

    This helper function add two arguments "--"+name and "--no-"+name to the settings parser for a boolean parameter.

    :param parser: parser obtained by get_settings_parser
    :param name: name of the parameter without "--"
    :param default: default value
    :param kwargs: further parameters such as help
    """
    parser.add_argument('--' + name,
                        dest=name,
                        action='store_true',
                        default=default,
                        **kwargs)
    parser.add_argument('--no-' + name, dest=name, action='store_false')
Ejemplo n.º 22
0
def main():
    argparser = ArgParser(
        description=
        "Visualize scores of a model over the dev set, saving to .png file.")
    argparser.add_argument("models",
                           nargs="+",
                           help="model file basename(s) to load")
    args = argparser.parse_args()
    for pattern in args.models:
        for filename in glob(pattern) or [pattern]:
            basename, _ = os.path.splitext(filename)
            for div in "dev", "test":
                try:
                    scores = load_scores(basename, div=div)
                except OSError:
                    continue
                visualize(scores, basename, div=div)
Ejemplo n.º 23
0
def configure_global_options(config_parser: configargparse.ArgParser) -> None:
    config_parser.add_argument(
        "-d",
        "--debug",
        required=False,
        default=False,
        action="store_true",
        help="Enable debug messages.",
    )
    config_parser.add_argument("--version",
                               action="version",
                               version=f"{app_name} {get_version()}")
    config_parser.add_argument(
        "-b",
        "--build-engine",
        required=False,
        default="helm3",
        type=BuildEngineType,
        help="Select the build engine used for building your chart.",
    )
    steps_group = config_parser.add_mutually_exclusive_group()
    steps_group.add_argument(
        "--steps",
        nargs="+",
        help=f"List of steps to execute. Available steps: {ALL_STEPS}",
        required=False,
        default=["all"],
    )
    steps_group.add_argument(
        "--skip-steps",
        nargs="+",
        help=f"List of steps to skip. Available steps: {ALL_STEPS}",
        required=False,
        default=[],
    )
Ejemplo n.º 24
0
def _mk_chain_parser():
    p = ArgParser(add_help=False)
    p.add_argument("--csv-file", dest="csv_file",
                   type=str, required=True,
                   help="The spreadsheet with information about your input data. "
                        "For the registration chain you are required to have the "
                        "following columns in your csv file: \" subject_id\", "
                        "\"timepoint\", and \"filename\". Optionally you can have "
                        "a column called \"is_common\" that indicates that a scan "
                        "is to be used for the common time point registration "
                        "using a 1, and 0 otherwise.")
    p.add_argument("--common-time-point", dest="common_time_point",
                   type=int, default=None,
                   help="The time point at which the inter-subject registration will be "
                        "performed. I.e., the time point that will link the subjects together. "
                        "If you want to use the last time point from each of your input files, "
                        "(they might differ per input file) specify -1. If the common time "
                        "is not specified, the assumption is that the spreadsheet contains "
                        "the mapping using the \"is_common\" column. [Default = %(default)s]")
    p.add_argument("--common-time-point-name", dest="common_time_point_name",
                   type=str, default="common",
                   help="Option to specify a name for the common time point. This is useful for the "
                        "creation of more readable output file names. Default is \"common\". Note "
                        "that the common time point is the one created by an iterative group-wise "
                        "registration (inter-subject).")
    return p
Ejemplo n.º 25
0
def _mk_stats_parser():
    p = ArgParser(add_help=False)
    # p.add_argument_group("Statistics options",
    #                      "Options for calculating statistics.")
    default_fwhms = "0.2"
    p.set_defaults(stats_kernels=default_fwhms)
    p.set_defaults(calc_stats=True)
    p.add_argument(
        "--calc-stats",
        dest="calc_stats",
        action="store_true",
        help=
        "Calculate statistics at the end of the registration. [Default = %(default)s]"
    )
    p.add_argument(
        "--no-calc-stats",
        dest="calc_stats",
        action="store_false",
        help=
        "If specified, statistics are not calculated. Opposite of --calc-stats."
    )
    p.add_argument(
        "--stats-kernels",
        dest="stats_kernels",
        type=str,
        help=
        "comma separated list of blurring kernels for analysis. [Default = %(default)s]."
    )
    return p
Ejemplo n.º 26
0
def main():
    argparser = ArgParser(
        description=
        "Load TUPA model and save the features enumeration as a text JSON file."
    )
    argparser.add_argument("models",
                           nargs="+",
                           help="model file basename(s) to load")
    argparser.add_argument("-s",
                           "--suffix",
                           default=".enum.json",
                           help="filename suffix to append")
    argparser.add_argument("-l",
                           "--lang",
                           help="use spaCy model to decode numeric IDs")
    args = argparser.parse_args()
    for filename in args.models:
        model = load_model(filename)
        params = model.feature_extractor.params
        if args.lang:
            vocab = get_vocab(lang=args.lang)
            for param in params.values():
                if param.data:
                    param.data = [
                        decode(vocab, v)
                        for v in sorted(param.data, key=param.data.get)
                    ]
        save_json(model.filename + args.suffix, params)
Ejemplo n.º 27
0
def _mk_lsq12_parser():
    p = ArgParser(add_help=False)
    # group = parser.add_argument_group("LSQ12 registration options",
    #                                  "Options for performing a pairwise, affine registration")
    p.set_defaults(run_lsq12=True)
    p.set_defaults(generate_tournament_style_lsq12_avg=False)
    p.add_argument("--run-lsq12", dest="run_lsq12",
                   action="store_true",
                   help="Actually run the 12 parameter alignment [default = %(default)s]")
    p.add_argument("--no-run-lsq12", dest="run_lsq12",
                   action="store_false",
                   help="Opposite of --run-lsq12")
    p.add_argument("--lsq12-max-pairs", dest="max_pairs",
                   type=parse_nullable_int, default=25,
                   help="Maximum number of pairs to register together ('None' implies all pairs). "
                        "[Default = %(default)s]")
    p.add_argument("--lsq12-likefile", dest="like_file",
                   type=str, default=None,
                   help="Can optionally specify a 'like'-file for resampling at the end of pairwise "
                        "alignment. Default is None, which means that the input file will be used. "
                        "[Default = %(default)s]")
    p.add_argument("--lsq12-protocol", dest="protocol",
                   type=str,
                   help="Can optionally specify a registration protocol that is different from defaults. "
                        "Parameters must be specified as in the following example: \n"
                        "applications_testing/test_data/minctracc_example_linear_protocol.csv \n"
                        "[Default = %(default)s].")
    #p.add_argument("--generate-tournament-style-lsq12-avg", dest="generate_tournament_style_lsq12_avg",
    #               action="store_true",
    #               help="Instead of creating the average of the lsq12 resampled files "
    #                    "by simply averaging them directly, create an iterative average "
    #                    "as follows. Perform a non linear registration between pairs "
    #                    "of files. Resample each file halfway along that transformation "
    #                    "in order for them to end up in the middle. Average those two files. "
    #                    "Then continue on to the next level as in a tournament. [default = %(default)s]")
    #p.add_argument("--no-generate-tournament-style-lsq12-avg", dest="generate_tournament_style_lsq12_avg",
    #               action="store_false",
    #               help="Opposite of --generate-tournament-style-lsq12-avg")
    return p
Ejemplo n.º 28
0
def add_args(parser: ArgParser) -> None:
    """
    Add known arguments for parsing
    """

    parser.add_argument("-c",
                        "--config",
                        help="Config file path",
                        type=Path,
                        is_config_file=True)

    parser.add_argument("-v",
                        "--verbosity",
                        help="Application verbosity",
                        type=str.upper,
                        choices=["ERROR", "INFO", "DEBUG"],
                        default="INFO")

    parser.add_argument(
        "-t",
        "--bot-token",
        help="Discord bot token, must be present for the bot to work",
        type=str,
        env_var="DISCORD_BOT_TOKEN",
        required=True)

    parser.add_argument(
        "-i",
        "--info-channel-id",
        help=
        "Main channel ID, used for notifications when bot comes online or going offline",
        type=int,
        env_var="DISCORD_MAIN_CHANNEL_ID")

    parser.add_argument("-p",
                        "--prefix",
                        help="Prefix for bot commands e.g. '.<command>'",
                        type=str,
                        default=".")
Ejemplo n.º 29
0
 def __add_global_args(arg_parser: ArgParser):
     arg_parser.add_argument("-c", is_config_file=True, help="config file path")
     arg_parser.add_argument(
         "--debug", action="store_true", help="turn on debugging"
     )
     arg_parser.add_argument(
         "--logging-level",
         help="set logging-level level (see Python logging module)",
     )
def _mk_autocrop_parser():
    p = ArgParser(add_help=False)
    p.add_argument("--x-pad", dest="x_pad",
                   type=str,
                   default='0,0',
                   help="Padding in mm will be added to each sides. [default = %(default)s]")
    p.add_argument("--y-pad", dest="y_pad",
                   type=str,
                   default='0,0',
                   help="Padding in mm will be added to each sides. [default = %(default)s]")
    p.add_argument("--z-pad", dest="z_pad",
                   type=str,
                   default='0,0',
                   help="Padding in mm will be added to each side. [default = %(default)s]")
    return p
 def add_arguments(self, arg_parser: ArgParser, add_parent_arguments):
     arg_parser.add_argument("--file-path",
                             required=True,
                             help="Local path to the file to be uploaded")
     arg_parser.add_argument(
         "--file-id",
         required=True,
         help=
         "Id of the file in Drive that will be overwritten.  Must already exist.",
     )
     arg_parser.add_argument(
         "--service-account-file",
         required=True,
         help="Path to Google Cloud service account file",
     )
def _mk_consensus_to_atlas_parser():
    p = ArgParser(add_help=False)
    p.add_argument("--atlas-target", dest="atlas_target",
                   type=str,
                   default=None,
                   help="Register the consensus average to the ABI Atlas")
    p.add_argument("--atlas-target-label", dest="atlas_target_label",
                   type=str,
                   default=None,
                   help="Register the consensus average to the ABI Atlas")
    p.add_argument("--atlas-target-mask", dest="atlas_target_mask",
                   type=str,
                   default=None,
                   help="Register the consensus average to the ABI Atlas")
    return p
Ejemplo n.º 33
0
def _mk_stats_parser():
    p = ArgParser(add_help=False)
    # p.add_argument_group("Statistics options",
    #                      "Options for calculating statistics.")
    default_fwhms = "0.2"
    p.set_defaults(stats_kernels=default_fwhms)
    p.set_defaults(calc_stats=True)
    p.add_argument("--calc-stats", dest="calc_stats",
                   action="store_true",
                   help="Calculate statistics at the end of the registration. [Default = %(default)s]")
    p.add_argument("--no-calc-stats", dest="calc_stats",
                   action="store_false",
                   help="If specified, statistics are not calculated. Opposite of --calc-stats.")
    p.add_argument("--stats-kernels", dest="stats_kernels",
                   type=str,
                   help="comma separated list of blurring kernels for analysis. [Default = %(default)s].")
    return p
def _mk_stacks_to_volume_parser():
    p = ArgParser(add_help=False)
    p.add_argument("--input-resolution", dest="input_resolution",
                   type=float,
                   default=0.00137,
                   help="The raw in-plane resolution of the tiles in mm. [default = %(default)s]")
    p.add_argument("--plane-resolution", dest="plane_resolution",
                   type=float,
                   default=None,
                   help="The output in-plane resolution of the tiles in mm")
    p.add_argument("--manual-scale-output", dest="manual_scale_output",
                   action="store_true", default=False,
                   help="The purpose of this option is to correct for when brains have been imaged using different "
                        "interslice distances."
                        "If true [default = %(default)s], your input to --csv-file must have a scale_output column. "
                        "The stacked count MINC file will have its values scaled by that number. "
                        "If false, each brain's count slices will be scaled by its interslice distance divided by the "
                        "the minimum interslice distance of all brains. Each brain's scalar value will be reflected "
                        "in the output csv files."
                   )
    return p
Ejemplo n.º 35
0
def _mk_chain_parser():
    p = ArgParser(add_help=False)
    p.add_argument(
        "--csv-file",
        dest="csv_file",
        type=str,
        required=True,
        help="The spreadsheet with information about your input data. "
        "For the registration chain you are required to have the "
        "following columns in your csv file: \" subject_id\", "
        "\"timepoint\", and \"filename\". Optionally you can have "
        "a column called \"is_common\" that indicates that a scan "
        "is to be used for the common time point registration "
        "using a 1, and 0 otherwise.")
    p.add_argument(
        "--common-time-point",
        dest="common_time_point",
        type=int,
        default=None,
        help="The time point at which the inter-subject registration will be "
        "performed. I.e., the time point that will link the subjects together. "
        "If you want to use the last time point from each of your input files, "
        "(they might differ per input file) specify -1. If the common time "
        "is not specified, the assumption is that the spreadsheet contains "
        "the mapping using the \"is_common\" column. [Default = %(default)s]")
    p.add_argument(
        "--common-time-point-name",
        dest="common_time_point_name",
        type=str,
        default="common",
        help=
        "Option to specify a name for the common time point. This is useful for the "
        "creation of more readable output file names. Default is \"common\". Note "
        "that the common time point is the one created by an iterative group-wise "
        "registration (inter-subject).")
    return p
Ejemplo n.º 36
0
	def __init__(self,chosen_dir=None):
		#CMD arguments and configfile
		if sys.platform == 'win32':
			self.shell=True
			locs = [os.path.join(sys.path[0],'phpar2.exe'),
					'phpar2.exe',
					os.path.join(sys.path[0],'par2.exe'),
					'par2.exe',
					]
			par_cmd = 'par2'
			for p in locs:
				if os.path.isfile(p):
					par_cmd = p
					break
		else:
			self.shell=False
			par_cmd = 'par2'

		if chosen_dir == None:
			parser = ArgParser(default_config_files=['par2deep.ini', '~/.par2deep'])
		else:
			parser = ArgParser(default_config_files=[os.path.join(chosen_dir,'par2deep.ini'), '~/.par2deep'])

		parser.add_argument("-q", "--quiet", action='store_true', help="Don't asks questions, go with all defaults, including repairing and deleting files (default off).")
		parser.add_argument("-over", "--overwrite", action='store_true', help="Overwrite existing par2 files (default off).")
		parser.add_argument("-novfy", "--noverify", action='store_true', help="Do not verify existing files (default off).")
		parser.add_argument("-keep", "--keep_old", action='store_true', help="Keep unused par2 files and old par2 repair files (.1,.2 and so on).")
		parser.add_argument("-ex", "--excludes", action="append", type=str, default=[], help="Optionally excludes directories ('root' is files in the root of -dir).")
		parser.add_argument("-exex", "--extexcludes", action="append", type=str, default=[], help="Optionally excludes file extensions.")
		parser.add_argument("-dir", "--directory", type=str, default=os.getcwd(), help="Path to operate on (default is current directory).")
		parser.add_argument("-pc", "--percentage", type=int, default=5, help="Set the parity percentage (default 5%%).")
		parser.add_argument("-pcmd", "--par_cmd", type=str, default=par_cmd, help="Set path to alternative par2 command (default \"par2\").")
		
		#lets get a nice dict of all o' that.
		args = {k:v for k,v in vars(parser.parse_args()).items() if v is not None}
		self.args = args

		#add number of files
		args["nr_parfiles"] = str(1) #number of parity files

		#set that shit
		for k,v in self.args.items():
			setattr(self, k, v)

		return
Ejemplo n.º 37
0
        self.collectdata()

        records = [(fname, tgt) for fname, tgt in self.trainpairlist.items()]
        np.savetxt(self.manifests['train'], records, fmt='%s,%s')

        records = [(fname, tgt) for fname, tgt in self.valpairlist.items()]
        np.savetxt(self.manifests['val'], records, fmt='%s,%s')

        records = [(fname, tgt) for fname, tgt in self.testpairlist.items()]
        np.savetxt(self.manifests['test'], records, fmt='%s,%s')


if __name__ == "__main__":
    parser = ArgParser()
    parser.add_argument('--input_dir',
                        help='Directory to find input',
                        default='/hdd/Dataset/Flower102')
    parser.add_argument(
        '--out_dir',
        help='Directory to write ingested files',
        default='/home/william/PyProjects/TFcodes/dataset/flower102')
    parser.add_argument(
        '--target_size',
        type=int,
        default=256,
        help=
        'Size in pixels to scale shortest side DOWN to (0 means no scaling)')
    parser.add_argument('--ratio',
                        type=float,
                        default=0.3,
                        help='Percentage of dataset to be used for validation')
Ejemplo n.º 38
0
        if (all([os.path.exists(manifest) for manifest in self.manifests.values()])
                and not self.overwrite):
            print("Found manfiest files, skipping ingest, use --overwrite to overwrite them.")
            return

        for setn, manifest in self.manifests.items():
            pairs = self.train_or_val_pairs(setn)
            records = [(os.path.relpath(fname, self.out_dir), int(tgt))
                       for fname, tgt in pairs]
            records.insert(0, ('@FILE', 'STRING'))
            np.savetxt(manifest, records, fmt='%s\t%s')


if __name__ == "__main__":
    parser = ArgParser()
    parser.add_argument('--input_dir', required=True,
                        help='Directory to find input tars', default=None)
    parser.add_argument('--out_dir', required=True,
                        help='Directory to write ingested files', default=None)
    parser.add_argument('--target_size', type=int, default=256,
                        help='Size in pixels to scale shortest side DOWN to (0 means no scaling)')
    parser.add_argument('--overwrite', action='store_true', default=False, help='Overwrite files')
    args = parser.parse_args()

    logger = logging.getLogger(__name__)

    bw = IngestI1K(input_dir=args.input_dir, out_dir=args.out_dir, target_size=args.target_size,
                   overwrite=args.overwrite)

    bw.run()
Ejemplo n.º 39
0
    if "amr" not in keep:  # Remove AMR-specific features: node label and category
        delete_if_exists((model.feature_params, model.classifier.params), (NODE_LABEL_KEY, "c"))
    delete_if_exists((model.classifier.labels, model.classifier.axes), {NODE_LABEL_KEY}.union(FORMATS).difference(keep))


def delete_if_exists(dicts, keys):
    for d in dicts:
        for key in keys:
            try:
                del d[key]
            except KeyError:
                pass


def main(args):
    os.makedirs(args.out_dir, exist_ok=True)
    for filename in args.models:
        model = load_model(filename)
        strip_multitask(model, args.keep)
        model.filename = os.path.join(args.out_dir, os.path.basename(filename))
        model.save()


if __name__ == "__main__":
    argparser = ArgParser(description="Load TUPA model and save with just one task's features/weights.")
    argparser.add_argument("models", nargs="+", help="model file basename(s) to load")
    argparser.add_argument("-k", "--keep", nargs="+", choices=tuple(filter(None, FORMATS)), default=["ucca"],
                           help="tasks to keep features/weights for")
    argparser.add_argument("-o", "--out-dir", default=".", help="directory to write modified model files to")
    main(argparser.parse_args())
Ejemplo n.º 40
0
def _mk_lsq6_parser(with_nuc : bool = True,
                    with_inormalize : bool = True):
    p = ArgParser(add_help=False)
    p.set_defaults(lsq6_method="lsq6_large_rotations")
    p.set_defaults(nuc = True if with_nuc else False)
    p.set_defaults(inormalize = True if with_inormalize else False)
    p.set_defaults(copy_header_info=False)
    # TODO: should this actually be part of the LSQ6 component?  What would it return in this case?
    p.set_defaults(run_lsq6=True)
    p.add_argument("--run-lsq6", dest="run_lsq6",
                   action="store_true",
                   help="Actually run the 6 parameter alignment [default = %(default)s]")
    p.add_argument("--no-run-lsq6", dest="run_lsq6",
                   action="store_false",
                   help="Opposite of --run-lsq6")
    # TODO should be part of some mutually exclusive group ...
    p.add_argument("--init-model", dest="init_model",
                   type=str, default=None,
                   help="File in standard space in the initial model. The initial model "
                        "can also have a file in native space and potentially a transformation "
                        "file. See our wiki (https://wiki.mouseimaging.ca/) for detailed "
                        "information on initial models. [Default = %(default)s]")
    p.add_argument("--lsq6-target", dest="lsq6_target",
                   type=str, default=None,
                   help="File to be used as the target for the initial (often 6-parameter) alignment. "
                        "[Default = %(default)s]")
    p.add_argument("--bootstrap", dest="bootstrap",
                   action="store_true", default=False,
                   help="Use the first input file to the pipeline as the target for the "
                        "initial (often 6-parameter) alignment. [Default = %(default)s]")
    # TODO: add information about the pride of models to the code in such a way that it
    # is reflected on GitHub
    p.add_argument("--pride-of-models", dest="pride_of_models",
                   type=str, default=None,
                   help="(selected longitudinal pipelines only!) Specify a csv file that contains the mapping of "
                        "all your initial models at different time points. The idea is that you might "
                        "want to use different initial models for the time points in your data. "
                        "The csv file should have one column called \"model_file\", and one column "
                        "called \"time_point\". The time points can be given in either integer values "
                        "or float values. Each model file should point to the file in standard space "
                        "for that particular model.  [Default = %(default)s]")
    # TODO: do we need to implement this option? This was for Kieran Short, but the procedure
    # he will be using in the future most likely will not involve this option.
    # group.add_argument("--lsq6-alternate-data-prefix", dest="lsq6_alternate_prefix",
    #                   type=str, default=None,
    #                   help="Specify a prefix for an augmented data set to use for the 6 parameter "
    #                   "alignment. Assumptions: there is a matching alternate file for each regular input "
    #                   "file, e.g. input files are: input_1.mnc input_2.mnc ... input_n.mnc. If the "
    #                   "string provided for this flag is \"aug_\", then the following files should exist: "
    #                   "aug_input_1.mnc aug_input_2.mnc ... aug_input_n.mnc. These files are assumed to be "
    #                   "in the same orientation/location as the regular input files.  They will be used for "
    #                   "for the 6 parameter alignment. The transformations will then be used to transform "
    #                   "the regular input files, with which the pipeline will continue.")
    p.add_argument("--lsq6-simple", dest="lsq6_method",
                   action="store_const", const="lsq6_simple",
                   help="Run a 6 parameter alignment assuming that the input files are roughly "
                        "aligned: same space, similar orientation. Keep in mind that if you use an "
                        "initial model with both a standard and a native space, the assumption is "
                        "that the input files are already roughly aligned to the native space. "
                        "Three iterations are run: 1st is 17 times stepsize blur, 2nd is 9 times "
                        "stepsize gradient, 3rd is 4 times stepsize blur. [Default = %(default)s]")
    p.add_argument("--lsq6-centre-estimation", dest="lsq6_method",
                   action="store_const", const="lsq6_centre_estimation",
                   help="Run a 6 parameter alignment assuming that the input files have a "
                        "similar orientation, but are scanned in different coils/spaces. [Default = %(default)s]")
    p.add_argument("--lsq6-large-rotations", dest="lsq6_method",
                   action="store_const", const="lsq6_large_rotations",
                   help="Run a 6 parameter alignment assuming that the input files have a random "
                        "orientation and are scanned in different coils/spaces. A brute force search over "
                        "the x,y,z rotation space is performed to find the best 6 parameter alignment. "
                        "[Default = %(default)s]")
    p.add_argument("--lsq6-large-rotations-tmp-dir", dest="rotation_tmp_dir",
                   type=str, default="/dev/shm/",
                   help="Specify the directory that rotational_minctracc.py uses for temporary files. "
                        "By default we use /dev/shm/, because this program involves a lot of I/O, and "
                        "this is probably one of the fastest way to provide this. [Default = %(default)s]")
    p.add_argument("--lsq6-large-rotations-parameters", dest="rotation_params",
                   type=str, default="5,4,10,8",
                   help="Settings for the large rotation alignment. factor=factor based on smallest file "
                        "resolution: 1) blur factor, 2) resample step size factor, 3) registration step size "
                        "factor, 4) w_translations factor  ***** if you are working with mouse brain data "
                        " the defaults do not have to be based on the file resolution; a default set of "
                        " settings works for all mouse brain. In order to use those setting, specify: "
                        "\"mousebrain\" as the argument for this option. ***** [default = %(default)s]")
    p.add_argument("--lsq6-rotational-range", dest="rotation_range",
                   type=int, default=50,
                   help="Settings for the rotational range in degrees when running the large rotation "
                        "alignment. [Default = %(default)s]")
    p.add_argument("--lsq6-rotational-interval", dest="rotation_interval",
                   type=int, default=10,
                   help="Settings for the rotational interval in degrees when running the large rotation "
                        "alignment. [Default = %(default)s]")
    p.add_argument("--nuc", dest="nuc",
                   action="store_true",
                   help="Perform non-uniformity correction. [Default = %(default)s]")
    p.add_argument("--no-nuc", dest="nuc",
                   action="store_false",
                   help="If specified, do not perform non-uniformity correction. Opposite of --nuc.")
    p.add_argument("--inormalize", dest="inormalize",
                   action="store_true",
                   help="Normalize the intensities after lsq6 alignment and nuc, if done. "
                        "[Default = %(default)s] ")
    p.add_argument("--no-inormalize", dest="inormalize",
                   action="store_false",
                   help="If specified, do not perform intensity normalization. Opposite of --inormalize.")
    p.add_argument("--copy-header-info-to-average", dest="copy_header_info",
                   action="store_true",
                   help="Copy the MINC header information of the first input file into the "
                        "average that is created. [Default = %(default)s] ")
    p.add_argument("--no-copy-header-info-to-average", dest="copy_header_info",
                   action="store_false",
                   help="Opposite of --copy-header-info-to-average.")
    p.add_argument("--lsq6-protocol", dest="protocol_file",
                   type=str, default=None,
                   help="Specify an lsq6 protocol that overrides the default setting for stages in "
                        "the 6 parameter minctracc call. Parameters must be specified as in the following \n"
                        "example: applications_testing/test_data/minctracc_example_linear_protocol.csv \n"
                        "[Default = %(default)s].")
    return p
Ejemplo n.º 41
0
def getArgumentParser():
    parser = ArgParser(default_config_files = ["~/.orgviz.cfg"])
    parser.add_argument("--input", "-I", default = "default.org", env_var = "ORGVIZ_INPUT")
    parser.add_argument("--output", "-O", default = os.getcwd())
    parser.add_argument("--skipDrawingLegend", "-L", action = "store_true")
    parser.add_argument("--skipDrawingTeams", action = "store_true")
    parser.add_argument("--skipDrawingTitle", action = "store_true")
    parser.add_argument("--dotout", action = "store_true")
    parser.add_argument("--logging", type = int, default = 20, help = "1 = Everything. 50 = Critical only.")
    parser.add_argument("--teams", nargs = "*", default = [])
    parser.add_argument("--influence", nargs = "*", default = [], choices = ["supporter", "promoter", "enemy", "internal"])
    parser.add_argument("--profilePictureDirectory", default = "/opt/profilePictures/", help = "A directory containing [name].jpeg files of people in your organization.")
    parser.add_argument("--profilePictures", "-P", action = "store_true")
    parser.add_argument("--outputType", "-T", default = "svg", choices = ["png", "svg"])
    parser.add_argument("--keepDotfile", action = "store_false")
    parser.add_argument("--vizType", choices = ["DS", "inf", "none"], default = "DS");
    parser.add_argument("--dpi", type = int, default = 100, help = 'DPI (resolution), only used for PNG.');
    parser.add_argument("--attributeMatches", "-a", nargs = "*", default = [], metavar = "KEY=VALUE")

    return parser
Ejemplo n.º 42
0
        log_file = os.path.join(self.orig_out_dir, 'train.log')
        manifest_list_cfg = ', '.join([k+':'+v for k, v in self.manifests.items()])

        with open(cfg_file, 'w') as f:
            f.write('manifest = [{}]\n'.format(manifest_list_cfg))
            f.write('manifest_root = {}\n'.format(self.out_dir))
            f.write('log = {}\n'.format(log_file))
            f.write('epochs = 90\nrng_seed = 0\nverbose = True\neval_freq = 1\n')

        for setn, manifest in self.manifests.items():
            if not os.path.exists(manifest):
                pairs = self.train_or_val_pairs(setn)
                records = [(os.path.relpath(fname, self.out_dir),
                            os.path.relpath(self._target_filename(int(tgt)), self.out_dir))
                           for fname, tgt in pairs]
                np.savetxt(manifest, records, fmt='%s,%s')

if __name__ == "__main__":
    parser = ArgParser()
    parser.add_argument('--input_dir', help='Directory to find input tars', default=None)
    parser.add_argument('--out_dir', help='Directory to write ingested files', default=None)
    parser.add_argument('--target_size', type=int, default=256,
                        help='Size in pixels to scale shortest side DOWN to (0 means no scaling)')
    args = parser.parse_args()

    logger = logging.getLogger(__name__)

    bw = IngestI1K(input_dir=args.input_dir, out_dir=args.out_dir, target_size=args.target_size)

    bw.run()