Esempio n. 1
0
class _TemplateFlowSelectInputSpec(BaseInterfaceInputSpec):
    template = traits.Enum(*tf.templates(), mandatory=True, desc='Template ID')
    atlas = InputMultiObject(traits.Str, desc='Specify an atlas')
    resolution = InputMultiObject(traits.Int,
                                  desc='Specify a template resolution index')
    template_spec = traits.DictStrAny({'atlas': None},
                                      usedefault=True,
                                      desc='Template specifications')
Esempio n. 2
0
def validate_name(ctx, param, value):
    """Check whether this template already exists in the Archive."""
    from templateflow.api import templates

    value = value[4:] if value.startswith("tpl-") else value
    if value in templates():
        raise click.BadParameter(
            f"A template with name {value} already exists in the Archive.")
    return value
Esempio n. 3
0
class Reference:
    """
    Represent a (non)standard space specification.

    Examples
    --------
    >>> Reference('MNI152NLin2009cAsym')
    Reference(space='MNI152NLin2009cAsym', spec={})

    >>> Reference('MNI152NLin2009cAsym', {})
    Reference(space='MNI152NLin2009cAsym', spec={})

    >>> Reference('MNI152NLin2009cAsym', None)
    Reference(space='MNI152NLin2009cAsym', spec={})

    >>> Reference('MNI152NLin2009cAsym', {'res': 1})
    Reference(space='MNI152NLin2009cAsym', spec={'res': 1})

    >>> Reference('MNIPediatricAsym', {'cohort': '1'})
    Reference(space='MNIPediatricAsym', spec={'cohort': '1'})

    >>> Reference('func')
    Reference(space='func', spec={})

    >>> # Checks spaces with cohorts:
    >>> Reference('MNIPediatricAsym')
    Traceback (most recent call last):
      ...
    ValueError: standard space "MNIPediatricAsym" is not fully defined.
    ...

    >>> Reference(space='MNI152Lin', spec={'cohort': 1})
    Traceback (most recent call last):
      ...
    ValueError: standard space "MNI152Lin" does not accept ...

    >>> Reference('MNIPediatricAsym', {'cohort': '100'})
    Traceback (most recent call last):
      ...
    ValueError: standard space "MNIPediatricAsym" does not contain ...
    ...

    >>> Reference('MNIPediatricAsym', 'blah')
    Traceback (most recent call last):
      ...
    TypeError: ...

    >>> Reference('shouldraise')
    Traceback (most recent call last):
      ...
    ValueError: space identifier "shouldraise" is invalid.
    ...

    >>> # Check standard property
    >>> Reference('func').standard
    False
    >>> Reference('MNI152Lin').standard
    True
    >>> Reference('MNIPediatricAsym', {'cohort': 1}).standard
    True

    >>> # Equality/inequality checks
    >>> Reference('func') == Reference('func')
    True
    >>> Reference('func') != Reference('MNI152Lin')
    True
    >>> Reference('MNI152Lin', {'res': 1}) == Reference('MNI152Lin', {'res': 1})
    True
    >>> Reference('MNI152Lin', {'res': 1}) == Reference('MNI152Lin', {'res': 2})
    False
    >>> sp1 = Reference('MNIPediatricAsym', {'cohort': 1})
    >>> sp2 = Reference('MNIPediatricAsym', {'cohort': 2})
    >>> sp1 == sp2
    False
    >>> sp1 = Reference('MNIPediatricAsym', {'res': 1, 'cohort': 1})
    >>> sp2 = Reference('MNIPediatricAsym', {'cohort': 1, 'res': 1})
    >>> sp1 == sp2
    True

    """

    _standard_spaces = tuple(_tfapi.templates())

    space = attr.ib(default=None, type=str)
    """Name designating this space."""
    spec = attr.ib(
        factory=dict,
        validator=attr.validators.optional(attr.validators.instance_of(dict)),
    )
    """The dictionary of specs."""
    standard = attr.ib(default=False, repr=False, type=bool)
    """Whether this space is standard or not."""
    dim = attr.ib(default=3, repr=False, type=int)
    """Dimensionality of the sampling manifold."""

    def __attrs_post_init__(self):
        """Extract cohort out of spec."""
        if self.spec is None:
            object.__setattr__(self, "spec", {})

        if self.space.startswith("fsaverage"):
            space = self.space
            object.__setattr__(self, "space", "fsaverage")

            if "den" not in self.spec or space != "fsaverage":
                spec = self.spec.copy()
                spec["den"] = FSAVERAGE_DENSITY[space]
                object.__setattr__(self, "spec", spec)

        if self.space.startswith("fs"):
            object.__setattr__(self, "dim", 2)

        if self.space in self._standard_spaces:
            object.__setattr__(self, "standard", True)

        _cohorts = ["%s" % t for t in _tfapi.TF_LAYOUT.get_cohorts(template=self.space)]
        if "cohort" in self.spec:
            if not _cohorts:
                raise ValueError(
                    'standard space "%s" does not accept a cohort '
                    "specification." % self.space
                )

            if str(self.spec["cohort"]) not in _cohorts:
                raise ValueError(
                    'standard space "%s" does not contain any cohort '
                    'named "%s".' % (self.space, self.spec["cohort"])
                )
        elif _cohorts:
            _cohorts = ", ".join(['"cohort-%s"' % c for c in _cohorts])
            raise ValueError(
                'standard space "%s" is not fully defined.\n'
                "Set a valid cohort selector from: %s." % (self.space, _cohorts)
            )

    @property
    def fullname(self):
        """
        Generate a full-name combining cohort.

        Examples
        --------
        >>> Reference('MNI152Lin').fullname
        'MNI152Lin'

        >>> Reference('MNIPediatricAsym', {'cohort': 1}).fullname
        'MNIPediatricAsym:cohort-1'

        """
        if "cohort" not in self.spec:
            return self.space
        return "%s:cohort-%s" % (self.space, self.spec["cohort"])

    @property
    def legacyname(self):
        """
        Generate a legacy name for fsaverageX spaces.

        Examples
        --------
        >>> Reference(space='fsaverage')
        Reference(space='fsaverage', spec={'den': '164k'})
        >>> Reference(space='fsaverage').legacyname
        'fsaverage'
        >>> Reference(space='fsaverage6')
        Reference(space='fsaverage', spec={'den': '41k'})
        >>> Reference(space='fsaverage6').legacyname
        'fsaverage6'
        >>> # Overwrites density of legacy "fsaverage" specifications
        >>> Reference(space='fsaverage6', spec={'den': '10k'})
        Reference(space='fsaverage', spec={'den': '41k'})
        >>> Reference(space='fsaverage6', spec={'den': '10k'}).legacyname
        'fsaverage6'
        >>> # Return None if no legacy space
        >>> Reference(space='fsaverage', spec={'den': '30k'}).legacyname is None
        True

        """
        if self.space == "fsaverage" and self.spec["den"] in FSAVERAGE_LEGACY:
            return FSAVERAGE_LEGACY[self.spec["den"]]

    @space.validator
    def _check_name(self, attribute, value):
        if value.startswith("fsaverage"):
            return
        valid = list(self._standard_spaces) + NONSTANDARD_REFERENCES
        if value not in valid:
            raise ValueError(
                'space identifier "%s" is invalid.\nValid '
                "identifiers are: %s" % (value, ", ".join(valid))
            )

    def __str__(self):
        """
        Format this reference.

        Examples
        --------
        >>> str(Reference(space='MNIPediatricAsym', spec={'cohort': 2, 'res': 1}))
        'MNIPediatricAsym:cohort-2:res-1'

        """
        return ":".join(
            [self.space] + ["-".join((k, str(v))) for k, v in sorted(self.spec.items())]
        )

    @classmethod
    def from_string(cls, value):
        """
        Parse a string to generate the corresponding list of References.

        .. testsetup::

            >>> if PY_VERSION < (3, 6):
            ...     pytest.skip("This doctest does not work on python <3.6")

        Parameters
        ----------
        value: :obj:`str`
            A string containing a space specification following *fMRIPrep*'s
            language for ``--output-spaces``
            (e.g., ``MNIPediatricAsym:cohort-1:cohort-2:res-1:res-2``).

        Returns
        -------
        spaces : :obj:`list` of :obj:`Reference`
            A list of corresponding spaces given the input string.

        Examples
        --------
        >>> Reference.from_string("MNI152NLin2009cAsym")
        [Reference(space='MNI152NLin2009cAsym', spec={})]

        >>> # Bad space name
        >>> Reference.from_string("shouldraise")
        Traceback (most recent call last):
          ...
        ValueError: space identifier "shouldraise" is invalid.
        ...

        >>> # Missing cohort
        >>> Reference.from_string("MNIPediatricAsym")
        Traceback (most recent call last):
          ...
        ValueError: standard space "MNIPediatricAsym" is not fully defined.
        ...

        >>> Reference.from_string("MNIPediatricAsym:cohort-1")
        [Reference(space='MNIPediatricAsym', spec={'cohort': '1'})]

        >>> Reference.from_string("MNIPediatricAsym:cohort-1:cohort-2")
        [Reference(space='MNIPediatricAsym', spec={'cohort': '1'}),
         Reference(space='MNIPediatricAsym', spec={'cohort': '2'})]

        >>> Reference.from_string("fsaverage:den-10k:den-164k")
        [Reference(space='fsaverage', spec={'den': '10k'}),
         Reference(space='fsaverage', spec={'den': '164k'})]

        >>> Reference.from_string("MNIPediatricAsym:cohort-5:cohort-6:res-2")
        [Reference(space='MNIPediatricAsym', spec={'cohort': '5', 'res': '2'}),
         Reference(space='MNIPediatricAsym', spec={'cohort': '6', 'res': '2'})]

        >>> Reference.from_string("MNIPediatricAsym:cohort-5:cohort-6:res-2:res-iso1.6mm")
        [Reference(space='MNIPediatricAsym', spec={'cohort': '5', 'res': '2'}),
         Reference(space='MNIPediatricAsym', spec={'cohort': '5', 'res': 'iso1.6mm'}),
         Reference(space='MNIPediatricAsym', spec={'cohort': '6', 'res': '2'}),
         Reference(space='MNIPediatricAsym', spec={'cohort': '6', 'res': 'iso1.6mm'})]

        """
        _args = value.split(":")
        spec = defaultdict(list, {})
        for modifier in _args[1:]:
            mitems = modifier.split("-", 1)
            spec[mitems[0]].append(len(mitems) == 1 or mitems[1])

        allspecs = _expand_entities(spec)

        return [cls(_args[0], s) for s in allspecs]
Esempio n. 4
0
class SpatialReferences:
    """
    Manage specifications of spatial references.

    Examples
    --------
    >>> sp = SpatialReferences([
    ...     'func',
    ...     'fsnative',
    ...     'MNI152NLin2009cAsym',
    ...     'anat',
    ...     'fsaverage5',
    ...     'fsaverage6',
    ...     ('MNIPediatricAsym', {'cohort': '2'}),
    ...     ('MNI152NLin2009cAsym', {'res': 2}),
    ...     ('MNI152NLin2009cAsym', {'res': 1}),
    ... ])
    >>> sp.get_spaces(standard=False)
    ['func', 'fsnative', 'anat']

    >>> sp.get_spaces(standard=False, dim=(3,))
    ['func', 'anat']

    >>> sp.get_spaces(nonstandard=False)
    ['MNI152NLin2009cAsym', 'fsaverage', 'MNIPediatricAsym:cohort-2']

    >>> sp.get_spaces(nonstandard=False, dim=(3,))
    ['MNI152NLin2009cAsym', 'MNIPediatricAsym:cohort-2']

    >>> sp.get_fs_spaces()
    ['fsnative', 'fsaverage5', 'fsaverage6']

    >>> sp.get_standard(full_spec=True)
    [Reference(space='fsaverage', spec={'den': '10k'}),
     Reference(space='fsaverage', spec={'den': '41k'}),
     Reference(space='MNI152NLin2009cAsym', spec={'res': 2}),
     Reference(space='MNI152NLin2009cAsym', spec={'res': 1})]

    >>> sp.is_cached()
    False
    >>> sp.cached
    Traceback (most recent call last):
     ...
    ValueError: References have not ...

    >>> sp.checkpoint()
    >>> sp.is_cached()
    True
    >>> sp.cached.references
    [Reference(space='func', spec={}),
     Reference(space='fsnative', spec={}),
     Reference(space='MNI152NLin2009cAsym', spec={}),
     Reference(space='anat', spec={}),
     Reference(space='fsaverage', spec={'den': '10k'}),
     Reference(space='fsaverage', spec={'den': '41k'}),
     Reference(space='MNIPediatricAsym', spec={'cohort': '2'}),
     Reference(space='MNI152NLin2009cAsym', spec={'res': 2}),
     Reference(space='MNI152NLin2009cAsym', spec={'res': 1})]

    >>> sp.cached.get_fs_spaces()
    ['fsnative', 'fsaverage5', 'fsaverage6']

    >>> sp.add(('MNIPediatricAsym', {'cohort': '2'}))
    >>> sp.get_spaces(nonstandard=False, dim=(3,))
    ['MNI152NLin2009cAsym', 'MNIPediatricAsym:cohort-2']

    >>> sp += [('MNIPediatricAsym', {'cohort': '2'})]
    Traceback (most recent call last):
      ...
    ValueError: space ...

    >>> sp += [('MNIPediatricAsym', {'cohort': '1'})]
    >>> sp.get_spaces(nonstandard=False, dim=(3,))
    ['MNI152NLin2009cAsym', 'MNIPediatricAsym:cohort-2', 'MNIPediatricAsym:cohort-1']

    >>> sp.insert(0, ('MNIPediatricAsym', {'cohort': '3'}))
    >>> sp.get_spaces(nonstandard=False, dim=(3,))
    ['MNIPediatricAsym:cohort-3',
     'MNI152NLin2009cAsym',
     'MNIPediatricAsym:cohort-2',
     'MNIPediatricAsym:cohort-1']

    >>> sp.insert(0, ('MNIPediatricAsym', {'cohort': '3'}))
    Traceback (most recent call last):
      ...
    ValueError: space ...

    >>> sp.checkpoint()
    Traceback (most recent call last):
     ...
    ValueError: References have already ...

    >>> sp.checkpoint(force=True)
    >>> sp.cached.references
    [Reference(space='MNIPediatricAsym', spec={'cohort': '3'}),
     Reference(space='func', spec={}),
     Reference(space='fsnative', spec={}),
     Reference(space='MNI152NLin2009cAsym', spec={}),
     Reference(space='anat', spec={}),
     Reference(space='fsaverage', spec={'den': '10k'}),
     Reference(space='fsaverage', spec={'den': '41k'}),
     Reference(space='MNIPediatricAsym', spec={'cohort': '2'}),
     Reference(space='MNI152NLin2009cAsym', spec={'res': 2}),
     Reference(space='MNI152NLin2009cAsym', spec={'res': 1}),
     Reference(space='MNIPediatricAsym', spec={'cohort': '1'})]

    """

    __slots__ = ("_refs", "_cached")
    standard_spaces = tuple(_tfapi.templates())
    """List of supported standard reference spaces."""

    @staticmethod
    def check_space(space):
        """Build a :class:`Reference` object."""
        try:
            if isinstance(space, Reference):
                return space
        except IndexError:
            pass

        spec = {}
        if not isinstance(space, str):
            try:
                spec = space[1] or {}
            except IndexError:
                pass
            except TypeError:
                space = (None,)

            space = space[0]
        return Reference(space, spec)

    def __init__(self, spaces=None, checkpoint=False):
        """
        Maintain the bookkeeping of spaces and templates.

        Internal spaces are normalizations required for pipeline execution which
        can vary based on user arguments.
        Output spaces are desired user outputs.
        """
        self._refs = []
        self._cached = None
        if spaces is not None:
            if isinstance(spaces, str):
                spaces = [spaces]
            self.__iadd__(spaces)

            if checkpoint is True:
                self.checkpoint()

    def __iadd__(self, b):
        """Append a list of transforms to the internal list."""
        if not isinstance(b, (list, tuple)):
            raise TypeError("Must be a list.")

        for space in b:
            self.append(space)
        return self

    def __contains__(self, item):
        """Implement the ``in`` builtin."""
        if not self.references:
            return False
        item = self.check_space(item)
        for s in self.references:
            if s == item:
                return True
        return False

    def __str__(self):
        """
        Representation of this object.

        Examples
        --------
        >>> print(SpatialReferences())
        Spatial References: <none>.

        >>> print(SpatialReferences(['MNI152NLin2009cAsym']))
        Spatial References: MNI152NLin2009cAsym

        >>> print(SpatialReferences(['MNI152NLin2009cAsym', 'fsaverage5']))
        Spatial References: MNI152NLin2009cAsym, fsaverage:den-10k

        """
        spaces = ", ".join([str(s) for s in self.references]) or "<none>."
        return "Spatial References: %s" % spaces

    @property
    def references(self):
        """Get all specified references."""
        return self._refs

    @property
    def cached(self):
        """Get cached spaces, raise error if not cached."""
        if not self.is_cached():
            raise ValueError("References have not been cached")
        return self._cached

    def is_cached(self):
        return self._cached is not None

    def checkpoint(self, force=False):
        """Cache and freeze current spaces to separate attribute."""
        if self.is_cached() and not force:
            raise ValueError("References have already been cached")
        self._cached = self.__class__(self.references)

    def add(self, value):
        """Add one more space, without erroring if it exists."""
        if value not in self:
            self._refs += [self.check_space(value)]

    def append(self, value):
        """Concatenate one more space."""
        if value not in self:
            self._refs += [self.check_space(value)]
            return

        raise ValueError('space "%s" already in spaces.' % str(value))

    def insert(self, index, value, error=True):
        """Concatenate one more space."""
        if value not in self:
            self._refs.insert(index, self.check_space(value))
        elif error is True:
            raise ValueError('space "%s" already in spaces.' % str(value))

    def get_spaces(self, standard=True, nonstandard=True, dim=(2, 3)):
        """
        Return space names.

        Parameters
        ----------
        standard : :obj:`bool`, optional
            Return standard spaces.
        nonstandard : :obj:`bool`, optional
            Return nonstandard spaces.
        dim : :obj:`tuple`, optional
            Desired dimensions of the standard spaces (default is ``(2, 3)``)

        Examples
        --------
        >>> spaces = SpatialReferences(['MNI152NLin6Asym', ("fsaverage", {"den": "10k"})])
        >>> spaces.get_spaces()
        ['MNI152NLin6Asym', 'fsaverage']

        >>> spaces.get_spaces(standard=False)
        []

        >>> spaces.get_spaces(dim=(3,))
        ['MNI152NLin6Asym']

        >>> spaces.add(('MNI152NLin6Asym', {'res': '2'}))
        >>> spaces.get_spaces()
        ['MNI152NLin6Asym', 'fsaverage']

        >>> spaces.add(('func', {}))
        >>> spaces.get_spaces()
        ['MNI152NLin6Asym', 'fsaverage', 'func']

        >>> spaces.get_spaces(nonstandard=False)
        ['MNI152NLin6Asym', 'fsaverage']

        >>> spaces.get_spaces(standard=False)
        ['func']

        """
        out = []
        for s in self.references:
            if (
                s.fullname not in out
                and (s.standard is standard or s.standard is not nonstandard)
                and s.dim in dim
            ):
                out.append(s.fullname)
        return out

    def get_standard(self, full_spec=False, dim=(2, 3)):
        """
        Return output spaces.

        Parameters
        ----------
        full_spec : :obj:`bool`
            Return only fully-specified standard references (i.e., they must either
            have density or resolution set).
        dim : :obj:`tuple`, optional
            Desired dimensions of the standard spaces (default is ``(2, 3)``)

        """
        if not full_spec:
            return [s for s in self.references if s.standard and s.dim in dim]

        return [
            s
            for s in self.references
            if s.standard
            and s.dim in dim
            and (hasspec("res", s.spec) or hasspec("den", s.spec))
        ]

    def get_nonstandard(self, full_spec=False, dim=(2, 3)):
        """Return nonstandard spaces."""
        if not full_spec:
            return [s.space for s in self.references if not s.standard and s.dim in dim]
        return [
            s.space
            for s in self.references
            if not s.standard
            and s.dim in dim
            and (hasspec("res", s.spec) or hasspec("den", s.spec))
        ]

    def get_fs_spaces(self):
        """
        Return FreeSurfer spaces.

        Discards nonlegacy fsaverage values (i.e., with nonstandard density value).

        Examples
        --------
        >>> SpatialReferences([
        ...     'fsnative',
        ...     'fsaverage6',
        ...     'fsaverage5',
        ...     'MNI152NLin6Asym',
        ... ]).get_fs_spaces()
        ['fsnative', 'fsaverage6', 'fsaverage5']

        >>> SpatialReferences([
        ...     'fsnative',
        ...     'fsaverage6',
        ...     Reference(space='fsaverage', spec={'den': '30k'})
        ... ]).get_fs_spaces()
        ['fsnative', 'fsaverage6']

        """
        return [
            s.legacyname or s.space
            for s in self.references
            if s.legacyname or s.space == "fsnative"
        ]
Esempio n. 5
0
def get_parser():
    """Build parser object"""
    from smriprep.cli.utils import ParseTemplates, output_space as _output_space
    from templateflow.api import templates
    from packaging.version import Version
    from ..__about__ import __version__
    from ..workflows.bold.resampling import NONSTANDARD_REFERENCES
    from .version import check_latest, is_flagged

    verstr = 'fmriprep v{}'.format(__version__)
    currentv = Version(__version__)
    is_release = not any((currentv.is_devrelease, currentv.is_prerelease,
                          currentv.is_postrelease))

    parser = ArgumentParser(
        description='FMRIPREP: fMRI PREProcessing workflows',
        formatter_class=ArgumentDefaultsHelpFormatter)

    # Arguments as specified by BIDS-Apps
    # required, positional arguments
    # IMPORTANT: they must go directly with the parser object
    parser.add_argument(
        'bids_dir',
        action='store',
        type=Path,
        help='the root folder of a BIDS valid dataset (sub-XXXXX folders should '
        'be found at the top level in this folder).')
    parser.add_argument(
        'output_dir',
        action='store',
        type=Path,
        help='the output path for the outcomes of preprocessing and visual '
        'reports')
    parser.add_argument(
        'analysis_level',
        choices=['participant'],
        help='processing stage to be run, only "participant" in the case of '
        'FMRIPREP (see BIDS-Apps specification).')

    # optional arguments
    parser.add_argument('--version', action='version', version=verstr)

    g_bids = parser.add_argument_group('Options for filtering BIDS queries')
    g_bids.add_argument(
        '--skip_bids_validation',
        '--skip-bids-validation',
        action='store_true',
        default=False,
        help=
        'assume the input dataset is BIDS compliant and skip the validation')
    g_bids.add_argument(
        '--participant_label',
        '--participant-label',
        action='store',
        nargs='+',
        help='a space delimited list of participant identifiers or a single '
        'identifier (the sub- prefix can be removed)')
    # Re-enable when option is actually implemented
    # g_bids.add_argument('-s', '--session-id', action='store', default='single_session',
    #                     help='select a specific session to be processed')
    # Re-enable when option is actually implemented
    # g_bids.add_argument('-r', '--run-id', action='store', default='single_run',
    #                     help='select a specific run to be processed')
    g_bids.add_argument('-t',
                        '--task-id',
                        action='store',
                        help='select a specific task to be processed')
    g_bids.add_argument(
        '--echo-idx',
        action='store',
        type=int,
        help='select a specific echo to be processed in a multiecho series')

    g_perfm = parser.add_argument_group('Options to handle performance')
    g_perfm.add_argument('--nthreads',
                         '--n_cpus',
                         '-n-cpus',
                         action='store',
                         type=int,
                         help='maximum number of threads across all processes')
    g_perfm.add_argument('--omp-nthreads',
                         action='store',
                         type=int,
                         default=0,
                         help='maximum number of threads per-process')
    g_perfm.add_argument(
        '--mem_mb',
        '--mem-mb',
        action='store',
        default=0,
        type=int,
        help='upper bound memory limit for FMRIPREP processes')
    g_perfm.add_argument(
        '--low-mem',
        action='store_true',
        help='attempt to reduce memory usage (will increase disk usage '
        'in working directory)')
    g_perfm.add_argument('--use-plugin',
                         action='store',
                         default=None,
                         help='nipype plugin configuration file')
    g_perfm.add_argument('--anat-only',
                         action='store_true',
                         help='run anatomical workflows only')
    g_perfm.add_argument('--boilerplate',
                         action='store_true',
                         help='generate boilerplate only')
    g_perfm.add_argument(
        '--ignore-aroma-denoising-errors',
        action='store_true',
        default=False,
        help='DEPRECATED (now does nothing, see --error-on-aroma-warnings) '
        '- ignores the errors ICA_AROMA returns when there are no '
        'components classified as either noise or signal')
    g_perfm.add_argument(
        '--error-on-aroma-warnings',
        action='store_true',
        default=False,
        help='Raise an error if ICA_AROMA does not produce sensible output '
        '(e.g., if all the components are classified as signal or noise)')
    g_perfm.add_argument(
        "-v",
        "--verbose",
        dest="verbose_count",
        action="count",
        default=0,
        help="increases log verbosity for each occurence, debug level is -vvv")
    g_perfm.add_argument('--debug',
                         action='store_true',
                         default=False,
                         help='DEPRECATED - Does not do what you want.')

    g_conf = parser.add_argument_group('Workflow configuration')
    g_conf.add_argument(
        '--ignore',
        required=False,
        action='store',
        nargs="+",
        default=[],
        choices=['fieldmaps', 'slicetiming', 'sbref'],
        help=
        'ignore selected aspects of the input dataset to disable corresponding '
        'parts of the workflow (a space delimited list)')
    g_conf.add_argument(
        '--longitudinal',
        action='store_true',
        help='treat dataset as longitudinal - may increase runtime')
    g_conf.add_argument(
        '--t2s-coreg',
        action='store_true',
        help=
        'If provided with multi-echo BOLD dataset, create T2*-map and perform '
        'T2*-driven coregistration. When multi-echo data is provided and this '
        'option is not enabled, standard EPI-T1 coregistration is performed '
        'using the middle echo.')
    g_conf.add_argument(
        '--output-spaces',
        nargs='+',
        action=ParseTemplates,
        help="""\
Standard and non-standard spaces to resample anatomical and functional images to. \
Standard spaces may be specified by the form \
``<TEMPLATE>[:res-<resolution>][:cohort-<label>][...]``, where ``<TEMPLATE>`` is \
a keyword (valid keywords: %s) or path pointing to a user-supplied template, and \
may be followed by optional, colon-separated parameters. \
Non-standard spaces (valid keywords: %s) imply specific orientations and sampling \
grids. \
Important to note, the ``res-*`` modifier does not define the resolution used for \
the spatial normalization.
For further details, please check out \
https://fmriprep.readthedocs.io/en/%s/spaces.html""" %
        (', '.join('"%s"' % s
                   for s in templates()), ', '.join(NONSTANDARD_REFERENCES),
         currentv.base_version if is_release else 'latest'))

    g_conf.add_argument(
        '--output-space',
        required=False,
        action='store',
        type=str,
        nargs='+',
        choices=[
            'T1w', 'template', 'fsnative', 'fsaverage', 'fsaverage6',
            'fsaverage5'
        ],
        help='DEPRECATED: please use ``--output-spaces`` instead.')
    g_conf.add_argument(
        '--template',
        required=False,
        action='store',
        type=str,
        choices=['MNI152NLin2009cAsym'],
        help='volume template space (default: MNI152NLin2009cAsym). '
        'DEPRECATED: please use ``--output-spaces`` instead.')
    g_conf.add_argument(
        '--template-resampling-grid',
        required=False,
        action='store',
        help='Keyword ("native", "1mm", or "2mm") or path to an existing file. '
        'Allows to define a reference grid for the resampling of BOLD images in template '
        'space. Keyword "native" will use the original BOLD grid as reference. '
        'Keywords "1mm" and "2mm" will use the corresponding isotropic template '
        'resolutions. If a path is given, the grid of that image will be used. '
        'It determines the field of view and resolution of the output images, '
        'but is not used in normalization. '
        'DEPRECATED: please use ``--output-spaces`` instead.')
    g_conf.add_argument(
        '--bold2t1w-dof',
        action='store',
        default=6,
        choices=[6, 9, 12],
        type=int,
        help='Degrees of freedom when registering BOLD to T1w images. '
        '6 degrees (rotation and translation) are used by default.')
    g_conf.add_argument(
        '--force-bbr',
        action='store_true',
        dest='use_bbr',
        default=None,
        help=
        'Always use boundary-based registration (no goodness-of-fit checks)')
    g_conf.add_argument(
        '--force-no-bbr',
        action='store_false',
        dest='use_bbr',
        default=None,
        help=
        'Do not use boundary-based registration (no goodness-of-fit checks)')
    g_conf.add_argument(
        '--medial-surface-nan',
        required=False,
        action='store_true',
        default=False,
        help=
        'Replace medial wall values with NaNs on functional GIFTI files. Only '
        'performed for GIFTI files mapped to a freesurfer subject (fsaverage or fsnative).'
    )
    g_conf.add_argument('--dummy-scans',
                        required=False,
                        action='store',
                        default=None,
                        type=int,
                        help='Number of non steady state volumes.')

    # ICA_AROMA options
    g_aroma = parser.add_argument_group(
        'Specific options for running ICA_AROMA')
    g_aroma.add_argument('--use-aroma',
                         action='store_true',
                         default=False,
                         help='add ICA_AROMA to your preprocessing stream')
    g_aroma.add_argument(
        '--aroma-melodic-dimensionality',
        action='store',
        default=-200,
        type=int,
        help='Exact or maximum number of MELODIC components to estimate '
        '(positive = exact, negative = maximum)')

    # Confounds options
    g_confounds = parser.add_argument_group(
        'Specific options for estimating confounds')
    g_confounds.add_argument(
        '--return-all-components',
        required=False,
        action='store_true',
        default=False,
        help=
        'Include all components estimated in CompCor decomposition in the confounds '
        'file instead of only the components sufficient to explain 50 percent of '
        'BOLD variance in each CompCor mask')
    g_confounds.add_argument(
        '--fd-spike-threshold',
        required=False,
        action='store',
        default=0.5,
        type=float,
        help=
        'Threshold for flagging a frame as an outlier on the basis of framewise '
        'displacement')
    g_confounds.add_argument(
        '--dvars-spike-threshold',
        required=False,
        action='store',
        default=1.5,
        type=float,
        help=
        'Threshold for flagging a frame as an outlier on the basis of standardised '
        'DVARS')

    #  ANTs options
    g_ants = parser.add_argument_group(
        'Specific options for ANTs registrations')
    g_ants.add_argument(
        '--skull-strip-template',
        action='store',
        default='OASIS30ANTs',
        type=_output_space,
        help='select a template for skull-stripping with antsBrainExtraction')
    g_ants.add_argument(
        '--skull-strip-fixed-seed',
        action='store_true',
        help='do not use a random seed for skull-stripping - will ensure '
        'run-to-run replicability when used with --omp-nthreads 1')

    # Fieldmap options
    g_fmap = parser.add_argument_group(
        'Specific options for handling fieldmaps')
    g_fmap.add_argument(
        '--fmap-bspline',
        action='store_true',
        default=False,
        help='fit a B-Spline field using least-squares (experimental)')
    g_fmap.add_argument(
        '--fmap-no-demean',
        action='store_false',
        default=True,
        help='do not remove median (within mask) from fieldmap')

    # SyN-unwarp options
    g_syn = parser.add_argument_group(
        'Specific options for SyN distortion correction')
    g_syn.add_argument(
        '--use-syn-sdc',
        action='store_true',
        default=False,
        help='EXPERIMENTAL: Use fieldmap-free distortion correction')
    g_syn.add_argument(
        '--force-syn',
        action='store_true',
        default=False,
        help='EXPERIMENTAL/TEMPORARY: Use SyN correction in addition to '
        'fieldmap correction, if available')

    # FreeSurfer options
    g_fs = parser.add_argument_group(
        'Specific options for FreeSurfer preprocessing')
    g_fs.add_argument(
        '--fs-license-file',
        metavar='PATH',
        type=Path,
        help=
        'Path to FreeSurfer license key file. Get it (for free) by registering'
        ' at https://surfer.nmr.mgh.harvard.edu/registration.html')

    # Surface generation xor
    g_surfs = parser.add_argument_group('Surface preprocessing options')
    g_surfs.add_argument('--no-submm-recon',
                         action='store_false',
                         dest='hires',
                         help='disable sub-millimeter (hires) reconstruction')
    g_surfs_xor = g_surfs.add_mutually_exclusive_group()
    g_surfs_xor.add_argument('--cifti-output',
                             action='store_true',
                             default=False,
                             help='output BOLD files as CIFTI dtseries')
    g_surfs_xor.add_argument(
        '--fs-no-reconall',
        '--no-freesurfer',
        action='store_false',
        dest='run_reconall',
        help='disable FreeSurfer surface preprocessing.'
        ' Note : `--no-freesurfer` is deprecated and will be removed in 1.2.'
        ' Use `--fs-no-reconall` instead.')

    g_other = parser.add_argument_group('Other options')
    g_other.add_argument(
        '-w',
        '--work-dir',
        action='store',
        type=Path,
        default=Path('work'),
        help='path where intermediate results should be stored')
    g_other.add_argument(
        '--resource-monitor',
        action='store_true',
        default=False,
        help=
        'enable Nipype\'s resource monitoring to keep track of memory and CPU usage'
    )
    g_other.add_argument(
        '--reports-only',
        action='store_true',
        default=False,
        help=
        'only generate reports, don\'t run workflows. This will only rerun report '
        'aggregation, not reportlet generation for specific nodes.')
    g_other.add_argument(
        '--run-uuid',
        action='store',
        default=None,
        help='Specify UUID of previous run, to include error logs in report. '
        'No effect without --reports-only.')
    g_other.add_argument('--write-graph',
                         action='store_true',
                         default=False,
                         help='Write workflow graph.')
    g_other.add_argument(
        '--stop-on-first-crash',
        action='store_true',
        default=False,
        help='Force stopping on first crash, even if a work directory'
        ' was specified.')
    g_other.add_argument(
        '--notrack',
        action='store_true',
        default=False,
        help='Opt-out of sending tracking information of this run to '
        'the FMRIPREP developers. This information helps to '
        'improve FMRIPREP and provides an indicator of real '
        'world usage crucial for obtaining funding.')
    g_other.add_argument('--sloppy',
                         action='store_true',
                         default=False,
                         help='Use low-quality tools for speed - TESTING ONLY')

    latest = check_latest()
    if latest is not None and currentv < latest:
        print("""\
You are using fMRIPrep-%s, and a newer version of fMRIPrep is available: %s.
Please check out our documentation about how and when to upgrade:
https://fmriprep.readthedocs.io/en/latest/faq.html#upgrading""" %
              (__version__, latest),
              file=sys.stderr)

    _blist = is_flagged()
    if _blist[0]:
        _reason = _blist[1] or 'unknown'
        print("""\
WARNING: Version %s of fMRIPrep (current) has been FLAGGED
(reason: %s).
That means some severe flaw was found in it and we strongly
discourage its usage.""" % (__version__, _reason),
              file=sys.stderr)

    return parser
Esempio n. 6
0
def get_parser():
    """Build parser object"""
    from smriprep.cli.utils import ParseTemplates, output_space as _output_space
    from templateflow.api import templates
    from packaging.version import Version
    from ..__about__ import __version__
    from ..config import NONSTANDARD_REFERENCES
    from .version import check_latest, is_flagged

    verstr = 'dmriprep v{}'.format(__version__)
    currentv = Version(__version__)
    is_release = not any((currentv.is_devrelease, currentv.is_prerelease,
                          currentv.is_postrelease))

    parser = ArgumentParser(
        description='dMRIPrep: dMRI PREProcessing workflows',
        formatter_class=ArgumentDefaultsHelpFormatter)

    # Arguments as specified by BIDS-Apps
    # required, positional arguments
    # IMPORTANT: they must go directly with the parser object
    parser.add_argument(
        'bids_dir',
        action='store',
        type=Path,
        help='the root folder of a BIDS valid dataset (sub-XXXXX folders should '
        'be found at the top level in this folder).')
    parser.add_argument(
        'output_dir',
        action='store',
        type=Path,
        help='the output path for the outcomes of preprocessing and visual '
        'reports')
    parser.add_argument(
        'analysis_level',
        choices=['participant'],
        help='processing stage to be run, only "participant" in the case of '
        'dMRIPrep (see BIDS-Apps specification).')

    # optional arguments
    parser.add_argument('--version', action='version', version=verstr)

    g_bids = parser.add_argument_group('Options for filtering BIDS queries')
    g_bids.add_argument(
        '--skip-bids-validation',
        action='store_true',
        default=False,
        help=
        'assume the input dataset is BIDS compliant and skip the validation')
    g_bids.add_argument(
        '--participant_label',
        '--participant-label',
        action='store',
        nargs='+',
        help='a space delimited list of participant identifiers or a single '
        'identifier (the sub- prefix can be removed)')
    # Re-enable when option is actually implemented
    # g_bids.add_argument('-s', '--session-id', action='store', default='single_session',
    #                     help='select a specific session to be processed')
    # Re-enable when option is actually implemented
    # g_bids.add_argument('-r', '--run-id', action='store', default='single_run',
    #                     help='select a specific run to be processed')

    g_perfm = parser.add_argument_group('Options to handle performance')
    g_perfm.add_argument('--nprocs',
                         '--n_cpus',
                         '-n-cpus',
                         action='store',
                         type=int,
                         help='maximum number of threads across all processes')
    g_perfm.add_argument('--omp-nthreads',
                         action='store',
                         type=int,
                         default=0,
                         help='maximum number of threads per-process')
    g_perfm.add_argument(
        '--mem_mb',
        '--mem-mb',
        action='store',
        default=0,
        type=int,
        help='upper bound memory limit for dMRIPrep processes')
    g_perfm.add_argument(
        '--low-mem',
        action='store_true',
        help='attempt to reduce memory usage (will increase disk usage '
        'in working directory)')
    g_perfm.add_argument('--use-plugin',
                         action='store',
                         default=None,
                         help='nipype plugin configuration file')
    g_perfm.add_argument('--anat-only',
                         action='store_true',
                         help='run anatomical workflows only')
    g_perfm.add_argument('--boilerplate',
                         action='store_true',
                         help='generate boilerplate only')
    g_perfm.add_argument(
        "-v",
        "--verbose",
        dest="verbose_count",
        action="count",
        default=0,
        help="increases log verbosity for each occurence, debug level is -vvv")

    g_conf = parser.add_argument_group('Workflow configuration')
    g_conf.add_argument(
        '--ignore',
        required=False,
        action='store',
        nargs="+",
        default=[],
        choices=['sdc'],
        help=
        'ignore selected aspects of the input dataset to disable corresponding '
        'parts of the workflow (a space delimited list)')
    g_conf.add_argument(
        '--longitudinal',
        action='store_true',
        help='treat dataset as longitudinal - may increase runtime')
    g_conf.add_argument(
        '--output-spaces',
        nargs='+',
        action=ParseTemplates,
        help="""\
Standard and non-standard spaces to resample anatomical and functional images to. \
Standard spaces may be specified by the form \
``<TEMPLATE>[:res-<resolution>][:cohort-<label>][...]``, where ``<TEMPLATE>`` is \
a keyword (valid keywords: %s) or path pointing to a user-supplied template, and \
may be followed by optional, colon-separated parameters. \
Non-standard spaces (valid keywords: %s) imply specific orientations and sampling \
grids. \
Important to note, the ``res-*`` modifier does not define the resolution used for \
the spatial normalization.
For further details, please check out \
https://dmriprep.readthedocs.io/en/%s/spaces.html""" %
        (', '.join('"%s"' % s
                   for s in templates()), ', '.join(NONSTANDARD_REFERENCES),
         currentv.base_version if is_release else 'latest'))

    #  ANTs options
    g_ants = parser.add_argument_group(
        'Specific options for ANTs registrations')
    g_ants.add_argument(
        '--skull-strip-template',
        action='store',
        default='OASIS30ANTs',
        type=_output_space,
        help='select a template for skull-stripping with antsBrainExtraction')
    g_ants.add_argument(
        '--skull-strip-fixed-seed',
        action='store_true',
        help='do not use a random seed for skull-stripping - will ensure '
        'run-to-run replicability when used with --omp-nthreads 1')

    # Fieldmap options
    g_fmap = parser.add_argument_group(
        'Specific options for handling fieldmaps')
    g_fmap.add_argument(
        '--fmap-bspline',
        action='store_true',
        default=False,
        help='fit a B-Spline field using least-squares (experimental)')
    g_fmap.add_argument(
        '--fmap-no-demean',
        action='store_false',
        default=True,
        help='do not remove median (within mask) from fieldmap')

    # SyN-unwarp options
    g_syn = parser.add_argument_group(
        'Specific options for SyN distortion correction')
    g_syn.add_argument(
        '--use-syn-sdc',
        action='store_true',
        default=False,
        help='EXPERIMENTAL: Use fieldmap-free distortion correction')
    g_syn.add_argument(
        '--force-syn',
        action='store_true',
        default=False,
        help='EXPERIMENTAL/TEMPORARY: Use SyN correction in addition to '
        'fieldmap correction, if available')

    # FreeSurfer options
    g_fs = parser.add_argument_group(
        'Specific options for FreeSurfer preprocessing')
    g_fs.add_argument(
        '--fs-license-file',
        metavar='PATH',
        type=Path,
        help=
        'Path to FreeSurfer license key file. Get it (for free) by registering'
        ' at https://surfer.nmr.mgh.harvard.edu/registration.html')

    # Surface generation xor
    g_surfs = parser.add_argument_group('Surface preprocessing options')
    g_surfs_xor = g_surfs.add_mutually_exclusive_group()
    g_surfs_xor.add_argument(
        '--no-submm-recon',
        action='store_false',
        dest='hires',
        help='disable sub-millimeter (hires) reconstruction')
    g_surfs_xor.add_argument(
        '--fs-no-reconall',
        '--no-freesurfer',
        action='store_false',
        dest='run_reconall',
        help='disable FreeSurfer surface preprocessing.'
        ' Note : `--no-freesurfer` is deprecated and will be removed in 1.2.'
        ' Use `--fs-no-reconall` instead.')

    g_other = parser.add_argument_group('Other options')
    g_other.add_argument(
        '-w',
        '--work-dir',
        action='store',
        type=Path,
        default=Path('work'),
        help='path where intermediate results should be stored')
    g_other.add_argument(
        '--resource-monitor',
        action='store_true',
        default=False,
        help=
        'enable Nipype\'s resource monitoring to keep track of memory and CPU usage'
    )
    g_other.add_argument(
        '--reports-only',
        action='store_true',
        default=False,
        help=
        'only generate reports, don\'t run workflows. This will only rerun report '
        'aggregation, not reportlet generation for specific nodes.')
    g_other.add_argument(
        '--run-uuid',
        action='store',
        default=None,
        help='Specify UUID of previous run, to include error logs in report. '
        'No effect without --reports-only.')
    g_other.add_argument('--write-graph',
                         action='store_true',
                         default=False,
                         help='Write workflow graph.')
    g_other.add_argument(
        '--stop-on-first-crash',
        action='store_true',
        default=False,
        help='Force stopping on first crash, even if a work directory'
        ' was specified.')
    g_other.add_argument(
        '--notrack',
        action='store_true',
        default=False,
        help='Opt-out of sending tracking information of this run to '
        'the dMRIPrep developers. This information helps to '
        'improve dMRIPrep and provides an indicator of real '
        'world usage crucial for obtaining funding.')
    g_other.add_argument('--sloppy',
                         action='store_true',
                         default=False,
                         dest='debug',
                         help='Use low-quality tools for speed - TESTING ONLY')

    latest = check_latest()
    if latest is not None and currentv < latest:
        print("""\
You are using dMRIPrep-%s, and a newer version of dMRIPrep is available: %s.
Please check out our documentation about how and when to upgrade:
https://dmriprep.readthedocs.io/en/latest/faq.html#upgrading""" %
              (__version__, latest),
              file=sys.stderr)

    _blist = is_flagged()
    if _blist[0]:
        _reason = _blist[1] or 'unknown'
        print("""\
WARNING: Version %s of dMRIPrep (current) has been FLAGGED
(reason: %s).
That means some severe flaw was found in it and we strongly
discourage its usage.""" % (__version__, _reason),
              file=sys.stderr)

    return parser
Esempio n. 7
0
def get_parser():
    """Build parser object"""
    from pathlib import Path
    from argparse import ArgumentParser
    from argparse import RawTextHelpFormatter
    from templateflow.api import templates
    from .utils import ParseTemplates, output_space as _output_space
    from ..__about__ import __version__

    parser = ArgumentParser(description='sMRIPrep: Structural MRI PREProcessing workflows',
                            formatter_class=RawTextHelpFormatter)

    # Arguments as specified by BIDS-Apps
    # required, positional arguments
    # IMPORTANT: they must go directly with the parser object
    parser.add_argument('bids_dir', action='store', type=Path,
                        help='the root folder of a BIDS valid dataset (sub-XXXXX folders should '
                             'be found at the top level in this folder).')
    parser.add_argument('output_dir', action='store', type=Path,
                        help='the output path for the outcomes of preprocessing and visual '
                             'reports')
    parser.add_argument('analysis_level', choices=['participant'],
                        help='processing stage to be run, only "participant" in the case of '
                             'sMRIPrep (see BIDS-Apps specification).')

    # optional arguments
    parser.add_argument('--version', action='version', version='smriprep v{}'.format(__version__))

    g_bids = parser.add_argument_group('Options for filtering BIDS queries')
    g_bids.add_argument('--participant-label', '--participant_label', action='store', nargs='+',
                        help='a space delimited list of participant identifiers or a single '
                             'identifier (the sub- prefix can be removed)')

    g_perfm = parser.add_argument_group('Options to handle performance')
    g_perfm.add_argument('--nprocs', '--ncpus', '--nthreads', '--n_cpus', '-n-cpus',
                         action='store', type=int,
                         help='number of CPUs to be used.')
    g_perfm.add_argument('--omp-nthreads', action='store', type=int, default=0,
                         help='maximum number of threads per-process')
    g_perfm.add_argument('--mem-gb', '--mem_gb', action='store', default=0, type=float,
                         help='upper bound memory limit for sMRIPrep processes (in GB).')
    g_perfm.add_argument('--low-mem', action='store_true',
                         help='attempt to reduce memory usage (will increase disk usage '
                              'in working directory)')
    g_perfm.add_argument('--use-plugin', action='store', default=None,
                         help='nipype plugin configuration file')
    g_perfm.add_argument('--boilerplate', action='store_true',
                         help='generate boilerplate only')
    g_perfm.add_argument("-v", "--verbose", dest="verbose_count", action="count", default=0,
                         help="increases log verbosity for each occurence, debug level is -vvv")

    g_conf = parser.add_argument_group('Workflow configuration')
    g_conf.add_argument(
        '--output-spaces', nargs='+', action=ParseTemplates,
        help='paths or keywords prescribing standard spaces to which normalize the input T1w image'
             ' (valid keywords are: %s).' % ', '.join('"%s"' % s for s in templates()))
    g_conf.add_argument(
        '--longitudinal', action='store_true',
        help='treat dataset as longitudinal - may increase runtime')
    g_conf.add_argument(
        '--template', '--spatial-normalization-target', action='store', type=str,
        choices=[tpl for tpl in templates() if not tpl.startswith('fs')],
        help='DEPRECATED: spatial normalization target (one TemplateFlow Identifier)')

    #  ANTs options
    g_ants = parser.add_argument_group('Specific options for ANTs registrations')
    g_ants.add_argument(
        '--skull-strip-template', action='store', default='OASIS30ANTs', type=_output_space,
        help='select a template for skull-stripping with antsBrainExtraction')
    g_ants.add_argument('--skull-strip-fixed-seed', action='store_true',
                        help='do not use a random seed for skull-stripping - will ensure '
                             'run-to-run replicability when used with --omp-nthreads 1')

    # FreeSurfer options
    g_fs = parser.add_argument_group('Specific options for FreeSurfer preprocessing')
    g_fs.add_argument(
        '--fs-license-file', metavar='PATH', type=Path,
        help='Path to FreeSurfer license key file. Get it (for free) by registering'
             ' at https://surfer.nmr.mgh.harvard.edu/registration.html')
    g_fs.add_argument(
        '--fs-subjects-dir', metavar='PATH', type=Path,
        help='Path to existing FreeSurfer subjects directory to reuse. '
             '(default: OUTPUT_DIR/freesurfer)')

    # Surface generation xor
    g_surfs = parser.add_argument_group('Surface preprocessing options')
    g_surfs.add_argument('--no-submm-recon', action='store_false', dest='hires',
                         help='disable sub-millimeter (hires) reconstruction')
    g_surfs_xor = g_surfs.add_mutually_exclusive_group()

    g_surfs_xor.add_argument(
        '--fs-output-spaces', required=False, action='store', nargs='+',
        choices=['fsnative', 'fsaverage', 'fsaverage6', 'fsaverage5'],
        help="""DEPRECATED - configure Freesurfer's output spaces:
  - fsnative: individual subject surface
  - fsaverage*: FreeSurfer average meshes
this argument can be single value or a space delimited list,
for example: --fs-output-spaces fsnative fsaverage fsaverage5."""
    )
    g_surfs_xor.add_argument('--fs-no-reconall', '--no-freesurfer',
                             action='store_false', dest='run_reconall',
                             help='disable FreeSurfer surface preprocessing.'
                             ' Note : `--no-freesurfer` is deprecated and will be removed in 1.2.'
                             ' Use `--fs-no-reconall` instead.')

    g_other = parser.add_argument_group('Other options')
    g_other.add_argument('-w', '--work-dir', action='store', type=Path, default=Path('work'),
                         help='path where intermediate results should be stored')
    g_other.add_argument(
        '--resource-monitor', action='store_true', default=False,
        help='enable Nipype\'s resource monitoring to keep track of memory and CPU usage')
    g_other.add_argument(
        '--reports-only', action='store_true', default=False,
        help='only generate reports, don\'t run workflows. This will only rerun report '
             'aggregation, not reportlet generation for specific nodes.')
    g_other.add_argument(
        '--run-uuid', action='store', default=None,
        help='Specify UUID of previous run, to include error logs in report. '
             'No effect without --reports-only.')
    g_other.add_argument('--write-graph', action='store_true', default=False,
                         help='Write workflow graph.')
    g_other.add_argument('--stop-on-first-crash', action='store_true', default=False,
                         help='Force stopping on first crash, even if a work directory'
                              ' was specified.')
    g_other.add_argument('--notrack', action='store_true', default=False,
                         help='Opt-out of sending tracking information of this run to '
                              'the sMRIPrep developers. This information helps to '
                              'improve sMRIPrep and provides an indicator of real '
                              'world usage crucial for obtaining funding.')
    g_other.add_argument('--sloppy', action='store_true', default=False,
                         help='Use low-quality tools for speed - TESTING ONLY')

    return parser
Esempio n. 8
0
def init_anat_norm_wf(
    debug,
    omp_nthreads,
    reportlets_dir,
    template_list,
    template_specs=None,
):
    """
    An individual spatial normalization workflow using ``antsRegistration``.

    .. workflow ::
        :graph2use: orig
        :simple_form: yes

        from smriprep.workflows.norm import init_anat_norm_wf
        wf = init_anat_norm_wf(
            debug=False,
            omp_nthreads=1,
            reportlets_dir='.',
            template_list=['MNI152NLin2009cAsym', 'MNI152NLin6Asym'],
        )

    **Parameters**

        debug : bool
            Apply sloppy arguments to speed up processing. Use with caution,
            registration processes will be very inaccurate.
        omp_nthreads : int
            Maximum number of threads an individual process may use.
        reportlets_dir : str
            Directory in which to save reportlets.
        template_list : list of str
            List of TemplateFlow identifiers (e.g. ``MNI152NLin6Asym``) that
            specifies the target template for spatial normalization. In the
            future, this parameter should accept also paths to custom/private
            templates with TemplateFlow's organization.

    **Inputs**

        moving_image
            The input image that will be normalized to standard space.
        moving_mask
            A precise brain mask separating skull/skin/fat from brain
            structures.
        moving_segmentation
            A brain tissue segmentation of the ``moving_image``.
        moving_tpms
            tissue probability maps (TPMs) corresponding to the
            ``moving_segmentation``.
        lesion_mask
            (optional) A mask to exclude regions from the cost-function
            input domain to enable standardization of lesioned brains.
        orig_t1w
            The original T1w image from the BIDS structure.

    **Outputs**

        warped
            The T1w after spatial normalization, in template space.
        forward_transform
            The T1w-to-template transform.
        reverse_transform
            The template-to-T1w transform.
        tpl_mask
            The ``moving_mask`` in template space (matches ``warped`` output).
        tpl_seg
            The ``moving_segmentation`` in template space (matches ``warped``
            output).
        tpl_tpms
            The ``moving_tpms`` in template space (matches ``warped`` output).
        template
            The input parameter ``template`` for further use in nodes depending
            on this
            workflow.

    """

    if not isinstance(template_list, (list, tuple)):
        template_list = [template_list]

    templateflow = templates()
    if any(template not in templateflow for template in template_list):
        raise NotImplementedError(
            'This is embarrassing - custom templates are not (yet) supported.'
            'Please make sure none of the options already available via TemplateFlow '
            'fit your needs.')

    workflow = Workflow('anat_norm_wf')

    workflow.__desc__ = """\
Volume-based spatial normalization to {targets} ({targets_id}) was performed through
nonlinear registration with `antsRegistration` (ANTs {ants_ver}),
using brain-extracted versions of both T1w reference and the T1w template.
The following template{tpls} selected for spatial normalization:
""".format(ants_ver=ANTsInfo.version() or '(version unknown)',
           targets='%s standard space%s' %
           (defaultdict('several'.format, {
               1: 'one',
               2: 'two',
               3: 'three',
               4: 'four'
           })[len(template_list)], 's' * (len(template_list) != 1)),
           targets_id=', '.join(template_list),
           tpls=(' was', 's were')[len(template_list) != 1])

    if not template_specs:
        template_specs = [{}] * len(template_list)

    if len(template_list) != len(template_specs):
        raise RuntimeError(
            'Number of templates (%d) doesn\'t match the number of specs '
            '(%d) provided.' % (len(template_list), len(template_specs)))

    # Append template citations to description
    for template in template_list:
        template_meta = get_metadata(template)
        template_refs = ['@%s' % template.lower()]

        if template_meta.get('RRID', None):
            template_refs += ['RRID:%s' % template_meta['RRID']]

        workflow.__desc__ += """\
*{template_name}* [{template_refs}; TemplateFlow ID: {template}]""".format(
            template=template,
            template_name=template_meta['Name'],
            template_refs=', '.join(template_refs))
        workflow.__desc__ += (', ', '.')[template == template_list[-1]]

    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'moving_image', 'moving_mask', 'moving_segmentation', 'moving_tpms',
        'lesion_mask', 'orig_t1w', 'template'
    ]),
                        name='inputnode')
    inputnode.iterables = [('template', template_list)]
    out_fields = [
        'warped', 'forward_transform', 'reverse_transform', 'tpl_mask',
        'tpl_seg', 'tpl_tpms', 'template'
    ]
    poutputnode = pe.Node(niu.IdentityInterface(fields=out_fields),
                          name='poutputnode')

    tpl_specs = pe.Node(niu.Function(function=_select_specs),
                        name='tpl_specs',
                        run_without_submitting=True)
    tpl_specs.inputs.template_list = template_list
    tpl_specs.inputs.template_specs = template_specs

    tpl_select = pe.Node(niu.Function(function=_get_template),
                         name='tpl_select',
                         run_without_submitting=True)

    # With the improvements from poldracklab/niworkflows#342 this truncation is now necessary
    trunc_mov = pe.Node(ImageMath(operation='TruncateImageIntensity',
                                  op2='0.01 0.999 256'),
                        name='trunc_mov')

    registration = pe.Node(RobustMNINormalization(
        float=True,
        flavor=['precise', 'testing'][debug],
    ),
                           name='registration',
                           n_procs=omp_nthreads,
                           mem_gb=2)

    # Resample T1w-space inputs
    tpl_moving = pe.Node(ApplyTransforms(dimension=3,
                                         default_value=0,
                                         float=True,
                                         interpolation='LanczosWindowedSinc'),
                         name='tpl_moving')
    tpl_mask = pe.Node(ApplyTransforms(dimension=3,
                                       default_value=0,
                                       float=True,
                                       interpolation='MultiLabel'),
                       name='tpl_mask')

    tpl_seg = pe.Node(ApplyTransforms(dimension=3,
                                      default_value=0,
                                      float=True,
                                      interpolation='MultiLabel'),
                      name='tpl_seg')

    tpl_tpms = pe.MapNode(ApplyTransforms(dimension=3,
                                          default_value=0,
                                          float=True,
                                          interpolation='Gaussian'),
                          iterfield=['input_image'],
                          name='tpl_tpms')

    workflow.connect([
        (inputnode, tpl_specs, [('template', 'template')]),
        (inputnode, tpl_select, [('template', 'template')]),
        (inputnode, registration, [('template', 'template')]),
        (inputnode, trunc_mov, [('moving_image', 'op1')]),
        (inputnode, registration, [('moving_mask', 'moving_mask'),
                                   ('lesion_mask', 'lesion_mask')]),
        (inputnode, tpl_moving, [('moving_image', 'input_image')]),
        (inputnode, tpl_mask, [('moving_mask', 'input_image')]),
        (tpl_specs, tpl_select, [('out', 'template_spec')]),
        (tpl_specs, registration, [(('out', _drop_res), 'template_spec')]),
        (tpl_select, tpl_moving, [('out', 'reference_image')]),
        (tpl_select, tpl_mask, [('out', 'reference_image')]),
        (tpl_select, tpl_seg, [('out', 'reference_image')]),
        (tpl_select, tpl_tpms, [('out', 'reference_image')]),
        (trunc_mov, registration, [('output_image', 'moving_image')]),
        (registration, tpl_moving, [('composite_transform', 'transforms')]),
        (registration, tpl_mask, [('composite_transform', 'transforms')]),
        (inputnode, tpl_seg, [('moving_segmentation', 'input_image')]),
        (registration, tpl_seg, [('composite_transform', 'transforms')]),
        (inputnode, tpl_tpms, [('moving_tpms', 'input_image')]),
        (registration, tpl_tpms, [('composite_transform', 'transforms')]),
        (registration, poutputnode,
         [('composite_transform', 'forward_transform'),
          ('inverse_composite_transform', 'reverse_transform')]),
        (tpl_moving, poutputnode, [('output_image', 'warped')]),
        (tpl_mask, poutputnode, [('output_image', 'tpl_mask')]),
        (tpl_seg, poutputnode, [('output_image', 'tpl_seg')]),
        (tpl_tpms, poutputnode, [('output_image', 'tpl_tpms')]),
        (inputnode, poutputnode, [('template', 'template')]),
    ])

    # Generate and store report
    msk_select = pe.Node(niu.Function(
        function=_get_template,
        input_names=['template', 'template_spec', 'suffix', 'desc']),
                         name='msk_select',
                         run_without_submitting=True)
    msk_select.inputs.desc = 'brain'
    msk_select.inputs.suffix = 'mask'

    norm_msk = pe.Node(niu.Function(
        function=_rpt_masks,
        output_names=['before', 'after'],
        input_names=['mask_file', 'before', 'after', 'after_mask']),
                       name='norm_msk')
    norm_rpt = pe.Node(SimpleBeforeAfter(), name='norm_rpt', mem_gb=0.1)
    norm_rpt.inputs.after_label = 'Participant'  # after

    ds_t1_2_tpl_report = pe.Node(DerivativesDataSink(
        base_directory=reportlets_dir, keep_dtype=True),
                                 name='ds_t1_2_tpl_report',
                                 run_without_submitting=True)

    workflow.connect([
        (inputnode, msk_select, [('template', 'template')]),
        (inputnode, norm_rpt, [('template', 'before_label')]),
        (tpl_mask, norm_msk, [('output_image', 'after_mask')]),
        (tpl_specs, msk_select, [('out', 'template_spec')]),
        (msk_select, norm_msk, [('out', 'mask_file')]),
        (tpl_select, norm_msk, [('out', 'before')]),
        (tpl_moving, norm_msk, [('output_image', 'after')]),
        (norm_msk, norm_rpt, [('before', 'before'), ('after', 'after')]),
        (inputnode, ds_t1_2_tpl_report, [('template', 'space'),
                                         ('orig_t1w', 'source_file')]),
        (norm_rpt, ds_t1_2_tpl_report, [('out_report', 'in_file')]),
    ])

    # Provide synchronized output
    outputnode = pe.JoinNode(niu.IdentityInterface(fields=out_fields),
                             name='outputnode',
                             joinsource='inputnode')
    workflow.connect([
        (poutputnode, outputnode, [(f, f) for f in out_fields]),
    ])

    return workflow