def setup_parser(parser): parser_add_common_opt(parser, 'multidata', nargs='*', action='append') parser.add_argument('-e', '--exec', type=str, required=True, metavar='EXPR', action='append', dest='eval', help="""Python expression, or filename of a Python script, or '-' to read expressions from STDIN.""") parser_add_optgroup_from_def(parser, hdf5output)
def setup_parser(parser): from .helpers import parser_add_optgroup_from_def, parser_add_common_attr_opts parser_add_common_opt(parser, "multidata", required=True) parser_add_optgroup_from_def(parser, component_grp, exclusive=True) parser.add_argument( "-o", "--output", help="""output filename. If no output file name is given output will be directed to stdout, if permitted by the data format""", ) parser.add_argument( "-f", "--format", default="txt", choices=("hdf5", "nifti", "npy", "txt"), help="""output format""" ) parser_add_common_opt( parser, "multidata", names=("--mapper-dataset",), dest="mapperds", help="""path to a PyMVPA dataset whose mapper should be used for reverse mapping features into volumetric space for NIfTI export. By default the mapper in the input dataset is used.""", ) parser_add_optgroup_from_def(parser, hdf5_grp)
def setup_parser(parser): parser_add_common_opt(parser, 'multidata', required=True) # order of calls is relevant! for src in (common_args, detrend_args, bandpassfilter_args, normalize_args): parser_add_optgroup_from_def(parser, src) parser_add_optgroup_from_def(parser, single_required_hdf5output)
def setup_parser(parser): from .helpers import parser_add_optgroup_from_def, \ parser_add_common_attr_opts, single_required_hdf5output parser_add_common_opt(parser, 'multidata', required=True) parser_add_optgroup_from_def(parser, define_events_grp, exclusive=True) parser_add_optgroup_from_def(parser, mod_events_grp) parser_add_common_attr_opts(parser) parser_add_optgroup_from_def(parser, single_required_hdf5output)
def setup_parser(parser): from .helpers import parser_add_optgroup_from_def, \ parser_add_common_attr_opts, single_required_hdf5output parser_add_common_opt(parser, 'multidata', required=True) parser_add_optgroup_from_def(parser, samples_grp, exclusive=True) parser_add_optgroup_from_def(parser, features_grp, exclusive=True) parser_add_optgroup_from_def(parser, strip_grp) parser_add_optgroup_from_def(parser, single_required_hdf5output)
def setup_parser(parser): from .helpers import parser_add_optgroup_from_def, parser_add_common_attr_opts, single_required_hdf5output # order of calls is relevant! parser_add_common_opt(parser, "multidata", metavar="dataset", nargs="*", default=None) parser_add_optgroup_from_def(parser, datasrc_args, exclusive=True) parser_add_common_attr_opts(parser) parser_add_optgroup_from_def(parser, mri_args) parser_add_optgroup_from_def(parser, single_required_hdf5output)
def setup_parser(parser): from .helpers import parser_add_optgroup_from_def, \ parser_add_common_attr_opts, single_required_hdf5output, ca_opts_grp parser_add_common_opt(parser, 'multidata', required=True) parser_add_optgroup_from_def(parser, searchlight_opts_grp) parser_add_optgroup_from_def(parser, ca_opts_grp) parser_add_optgroup_from_def(parser, searchlight_constraints_opts_grp) parser_add_optgroup_from_def(parser, crossvalidation_opts_grp, prefix='--cv-') parser_add_optgroup_from_def(parser, single_required_hdf5output)
def setup_parser(parser): from .helpers import parser_add_optgroup_from_def, \ parser_add_common_attr_opts, single_required_hdf5output parser_add_common_opt(parser, 'multidata', required=True) # make learner and partitioner options required cv_opts_grp = copy.deepcopy(crossvalidation_opts_grp) for i in (0, 2): cv_opts_grp[1][i][1]['required'] = True parser_add_optgroup_from_def(parser, cv_opts_grp) parser_add_optgroup_from_def(parser, single_required_hdf5output)
def setup_parser(parser): from .helpers import parser_add_optgroup_from_def, \ parser_add_common_attr_opts, single_required_hdf5output parser_add_common_opt(parser, 'multidata', required=True) # make learner and partitioner options required cv_opts_grp = copy.deepcopy(crossvalidation_opts_grp) for i in (0,2): cv_opts_grp[1][i][1]['required'] = True parser_add_optgroup_from_def(parser, cv_opts_grp) parser_add_optgroup_from_def(parser, single_required_hdf5output)
def setup_parser(parser): parser_add_common_opt(parser, 'multidata', required=True) parser.add_argument( '-r', '--report', **dict(type=str, choices=info_fx.keys(), default='txtsummary', help="""choose a type of report. Default: terse summary in text format.""")) parser_add_optgroup_from_def(parser, xfm_grp) parser_add_optgroup_from_def(parser, output_grp) parser_add_optgroup_from_def(parser, ds_descr_grp)
def setup_parser(parser): from .helpers import parser_add_optgroup_from_def, \ parser_add_common_attr_opts, single_required_hdf5output # order of calls is relevant! parser_add_common_opt(parser, 'multidata', metavar='dataset', nargs='*', default=None) parser_add_optgroup_from_def(parser, datasrc_args, exclusive=True) parser_add_common_attr_opts(parser) parser_add_optgroup_from_def(parser, mri_args) parser_add_optgroup_from_def(parser, single_required_hdf5output)
def setup_parser(parser): # order of calls is relevant! inputargs = parser.add_argument_group('input data arguments') parser_add_common_opt(inputargs, 'multidata', action='append', required=True) parser_add_common_opt(inputargs, 'multidata', names=('-t', '--transform'), dest='transform', help="""\ Additional datasets for transformation into the common space. The number and order of these datasets have to match those of the training dataset arguments as the correspond mapper will be used to transform each individual dataset.""") algoparms = parser.add_argument_group('algorithm parameters') for param in _supported_parameters: param2arg(algoparms, Hyperalignment, param) outopts = parser.add_argument_group('output options') parser_add_common_opt(outopts, 'output_prefix', required=True) parser_add_common_opt(outopts, 'hdf5compression') for oopt in sorted(_output_specs): outopts.add_argument('--%s' % oopt, action='store_true', help=_output_specs[oopt]['help']) for ca in sorted(_supported_cas): ca2arg(outopts, Hyperalignment, ca, help="\nOutput will be stored into '<PREFIX>%s'" % _supported_cas[ca]['output_suffix'])
def setup_parser(parser): # order of calls is relevant! inputargs = parser.add_argument_group('input data arguments') parser_add_common_opt(inputargs, 'multidata', action='append', required=True) parser_add_common_opt( inputargs, 'multidata', names=('-t', '--transform'), dest='transform', help="""\ Additional datasets for transformation into the common space. The number and order of these datasets have to match those of the training dataset arguments as the correspond mapper will be used to transform each individual dataset.""") algoparms = parser.add_argument_group('algorithm parameters') for param in _supported_parameters: param2arg(algoparms, Hyperalignment, param) outopts = parser.add_argument_group('output options') parser_add_common_opt(outopts, 'output_prefix', required=True) parser_add_common_opt(outopts, 'hdf5compression') for oopt in sorted(_output_specs): outopts.add_argument('--%s' % oopt, action='store_true', help=_output_specs[oopt]['help']) for ca in sorted(_supported_cas): ca2arg(outopts, Hyperalignment, ca, help="\nOutput will be stored into '<PREFIX>%s'" % _supported_cas[ca]['output_suffix'])
def setup_parser(parser): parser_add_common_opt(parser, "multidata", required=True) parser.add_argument( "-r", "--report", **dict( type=str, choices=info_fx.keys(), default="txtsummary", help="""choose a type of report. Default: terse summary in text format.""", ) ) parser_add_optgroup_from_def(parser, xfm_grp) parser_add_optgroup_from_def(parser, output_grp) parser_add_optgroup_from_def(parser, ds_descr_grp)
def setup_parser(parser): from .helpers import parser_add_optgroup_from_def, \ parser_add_common_attr_opts parser_add_common_opt(parser, 'multidata', required=True) parser_add_optgroup_from_def(parser, component_grp, exclusive=True) parser.add_argument('-o', '--output', help="""output filename. If no output file name is given output will be directed to stdout, if permitted by the data format""") parser.add_argument('-f', '--format', default='txt', choices=('hdf5', 'nifti', 'npy', 'txt'), help="""output format""") parser_add_common_opt( parser, 'multidata', names=('--mapper-dataset',), dest='mapperds', help="""path to a PyMVPA dataset whose mapper should be used for reverse mapping features into volumetric space for NIfTI export. By default the mapper in the input dataset is used.""") parser_add_optgroup_from_def(parser, hdf5_grp)
def setup_parser(parser): from .helpers import parser_add_optgroup_from_def, \ parser_add_common_attr_opts parser_add_common_opt(parser, 'multidata', required=True) parser_add_common_opt(parser, 'mask', required=False) parser_add_common_opt(parser, 'output_file', required=True) parser.add_argument('-c', '--chance-level', default=0, type=float, help="""chance level performance""") parser.add_argument('-s', '--stat', choices=['t', 'z', 'p'], default='t', help="""Store corresponding statistic, e.g. z-value corresponding to the original t-value""") parser.add_argument( '-a', '--alternative', choices=['greater', 'less', 'two-sided'], default='greater', help="""Which tail of the distribution 'interesting' values belong to. E.g. if values are accuracies, it would be the 'greater', if errors -- the 'less'""")
def setup_parser(parser): """Setup parser for command line arguments""" from .helpers import parser_add_optgroup_from_def, \ parser_add_common_attr_opts parser_add_common_opt(parser, 'multidata', required=True) parser_add_common_opt(parser, 'mask', required=False) parser_add_common_opt(parser, 'output_file', required=True) parser.add_argument('-c', '--chance-level', default=0, type=float, help="""chance level performance""") parser.add_argument('-s', '--stat', choices=['t', 'z', 'p'], default='t', help="""Store corresponding statistic, e.g. z-value corresponding to the original t-value""") parser.add_argument('-a', '--alternative', choices=['greater', 'less', 'two-sided'], default='greater', help="""Which tail of the distribution 'interesting' values belong to. E.g. if values are accuracies, it would be the 'greater', if errors -- the 'less'""") parser.add_argument('--isample', default=0, type=int, help="""In case of multi sample dataset, which sample to extract to run the ttest on""")