Example #1
0
    def _run_interface(self, runtime):

        # Setup useful properties
        self.conf_raw = pd.read_csv(self.inputs.conf_raw, sep='\t')
        with open(self.inputs.conf_json, 'r') as json_file:
            self.conf_json = json.load(json_file)
        self.n_volumes = len(self.conf_raw)
        self.conf_prep = pd.DataFrame()

        # entities
        entities = parse_file_entities_with_pipelines(self.inputs.conf_raw)

        # Create preprocessed confounds step-by-step
        self._filter_motion_parameters()
        self._filter_tissue_signals()
        self._filter_acompcors()
        self._create_spike_regressors()
        self._create_summary_dict(subject=entities.get('subject'),
                                  task=entities.get('task'),
                                  session=entities.get('session'),
                                  run=entities.get('run'))

        # Store output
        entities['pipeline'] = self.inputs.pipeline['name']
        conf_prep = join(self.inputs.output_dir,
                         build_path(entities, self.conf_prep_pattern, False))
        conf_summary = join(
            self.inputs.output_dir,
            build_path(entities, self.conf_summary_pattern, False))
        self.conf_prep.to_csv(conf_prep, sep='\t', index=False, na_rep=0)
        with open(conf_summary, 'w') as f:
            json.dump(self.conf_summary, f)
        self._results['conf_prep'] = conf_prep
        self._results['conf_summary'] = conf_summary
        return runtime
Example #2
0
    def _run_interface(self, runtime):
        fname = self.inputs.fmri_denoised
        entities = parse_file_entities(fname)
        bold_img = nb.load(fname)
        parcellation_file = get_parcellation_file_path(entities['space'])
        masker = NiftiLabelsMasker(labels_img=parcellation_file,
                                   standardize=True)
        time_series = masker.fit_transform(bold_img, confounds=None)

        corr_measure = ConnectivityMeasure(kind='correlation')
        corr_mat = corr_measure.fit_transform([time_series])[0]
        entities['pipeline'] = extract_pipeline_from_path(fname)
        conn_file = join(self.inputs.output_dir,
                         build_path(entities, self.conn_file_pattern, False))
        carpet_plot_file = join(
            self.inputs.output_dir,
            build_path(entities, self.carpet_plot_pattern, False))
        matrix_plot_file = join(
            self.inputs.output_dir,
            build_path(entities, self.matrix_plot_pattern, False))

        make_carpetplot(time_series, carpet_plot_file)
        mplot = plot_matrix(corr_mat, vmin=-1, vmax=1)
        mplot.figure.savefig(matrix_plot_file)

        np.save(conn_file, corr_mat)

        self._results['corr_mat'] = conn_file
        self._results['carpet_plot'] = carpet_plot_file
        self._results['matrix_plot'] = matrix_plot_file

        return runtime
Example #3
0
    def test_strict_build_path(self):

        # Test with strict matching--should fail
        pats = ['[{session}/]{task}/r-{run}.nii.gz',
                't-{task}/{subject}-{run}.nii.gz']
        entities = {'subject': 1, 'task': "A", 'run': 2}
        assert build_path(entities, pats, True)
        entities = {'subject': 1, 'task': "A", 'age': 22}
        assert not build_path(entities, pats, True)
Example #4
0
    def _list_outputs(self):
        from bids.layout.writing import build_path

        base_dir = Path(self.inputs.base_directory)
        base_dir.mkdir(exist_ok=True, parents=True)  # pylint: disable=E1123

        path_patterns = self.inputs.path_patterns
        if not isdefined(path_patterns):
            path_patterns = None

        out_files = []
        for entities, in_file in zip(self.inputs.entities,
                                     self.inputs.in_file):
            ents = {**self.inputs.fixed_entities}
            ents.update(entities)

            ents = {k: snake_to_camel(str(v)) for k, v in ents.items()}

            out_fname = base_dir / build_path(ents, path_patterns)
            out_fname.parent.mkdir(exist_ok=True, parents=True)

            _copy_or_convert(in_file, out_fname)
            out_files.append(out_fname)

        return {"out_file": out_files}
Example #5
0
def build_path(entities, path_patterns, strict=False):
    """
    Extension of bids.build_path that throws exception instead of returning None
    Args:
        entities:
        A dictionary mapping entity names to entity values.
        Entities with ``None`` or empty-string value will be removed.
        Otherwise, entities will be cast to string values, therefore
        if any format is expected (e.g., zero-padded integers), the
        value should be formatted.
        path_patterns:
        A dictionary mapping entity names to entity values.
        Entities with ``None`` or empty-string value will be removed.
        Otherwise, entities will be cast to string values, therefore
        if any format is expected (e.g., zero-padded integers), the
        value should be formatted.
        strict:
        If True, all passed entities must be matched inside a
        pattern in order to be a valid match. If False, extra entities will
        be ignored so long as all mandatory entities are found.

    Returns: built path
    """
    path = writing.build_path(entities, path_patterns, strict)
    if path is not None:
        return path
    else:
        raise ValueError(
            f"Unable to build path with given entities: {entities}\n and path pattern {path_patterns}"
        )
Example #6
0
    def _run_interface(self, runtime):
        # noinspection PyUnreachableCode
        if __debug__:  # sanity check
            entities = [
                parse_file_entities_with_pipelines(path)
                for path in self.inputs.corr_mat
            ]
            assert_all_entities_equal(entities, "session", "task", "run",
                                      "pipeline")
        n_corr_mat = len(self.inputs.corr_mat)
        n_rois = 200
        group_corr_mat = np.zeros((n_corr_mat, n_rois, n_rois))
        for i, file in enumerate(self.inputs.corr_mat):
            group_corr_mat[i, :, :] = np.load(file)
        entities = parse_file_entities_with_pipelines(self.inputs.corr_mat[0])
        group_corr_file = join(
            self.inputs.output_dir,
            build_path(entities, self.group_corr_pattern, False))
        assert not exists(
            group_corr_file
        ), f"Group connectivity file already exists {group_corr_file}"
        np.save(group_corr_file, group_corr_mat)

        self._results['group_corr_mat'] = group_corr_file
        return runtime
Example #7
0
    def put(self, file_path: str) -> str:
        bids_path = self.bids_paths.get(file_path)

        if bids_path is not None:
            return bids_path  # already added

        tags = self.database.tags(file_path)
        assert isinstance(tags, dict)

        bids_tags = dict()
        for k, v in tags.items():
            bids_entity = k

            if bids_entity in entity_longnames:  # map to long names
                bids_entity = entity_longnames[bids_entity]

            if bids_entity == "task" and tags.get("datatype") == "fmap":
                assert "acq" not in tags
                bids_entity = "acquisition"

            if bids_entity == "run":
                if not v.isdecimal():  # enforce run to be numerical
                    run_identifier = str(int_digest(v))[:4]
                    logger.warning(
                        f'Converting run identifier "{v}" to number "{run_identifier}" for BIDS-compliance'
                    )
                    v = run_identifier

            if k in entities:
                bids_tags[bids_entity] = format_like_bids(v)
            else:
                if tags.get("datatype") == "fmap":
                    if k == "suffix":
                        k = "fmap"
                bids_tags[k] = v

        bids_path_result = build_path(bids_tags, bids_config.default_path_patterns)

        if bids_path_result is None:
            raise ValueError(f'Unable to build BIDS-compliant path for "{file_path}"')

        bids_path = str(bids_path_result)

        if bids_path in self.file_paths:
            if self.file_paths[bids_path] != str(file_path):
                raise ValueError("Cannot assign different files to the same BIDS path")

        self.bids_paths[file_path] = str(bids_path)
        self.file_paths[bids_path] = str(file_path)

        self.bids_tags[bids_path] = bids_tags

        self._metadata[bids_path] = get_bids_metadata(self.database, file_path)

        return bids_path
def bids_transform(target_project, sub_id, ses_id):

    data_path = utils.get_test_data_path(target_project)

    if target_project == 'EXTEND':
        new_file_name = build_path(
            utils.get_bids_entities(sub_id, ses_id),
            ('sub-{subject}/[ses-accel{ses}/]beh/sub-{subject}'
             '[_ses-accel{ses}]_{suffix}.{extension}'))

    elif target_project == 'BETTER':
        sub_id = 'GE' + sub_id
        new_file_name = build_path(utils.get_bids_entities(
            sub_id, ses_id), ('sub-{subject}/[ses-{ses}/]beh/sub-{subject}'
                              '[_ses-{ses}]_{suffix}.{extension}'))

    elif (target_project == 'BIKE_Pre') or (target_project == 'BIKE_Post'):
        sub_id = utils.bike_atrain_dict[sub_id]
        new_file_name = build_path(utils.get_bids_entities(
            sub_id, ses_id), ('sub-{subject}/[ses-{ses}/]beh/sub-{subject}'
                              '[_ses-{ses}]_{suffix}.{extension}'))

    elif target_project == 'PACR':
        sub_id = utils.pacr_dict[sub_id]
        new_file_name = build_path(utils.get_bids_entities(
            sub_id, ses_id), ('sub-{subject}/[ses-{ses}/]beh/sub-{subject}'
                              '[_ses-{ses}]_{suffix}.{extension}'))

    else:
        # This should give: sub-01/ses-5/beh/sub-01_ses-5_accel.csv

        # If there is a new/unknown project, the accelBIDSTransform tool will
        # return only the file name with no additional data_path for the project
        # To add new project information, add a conditional above this and modify
        # the utils.py file to account for a new data_path
        new_file_name = build_path(utils.get_bids_entities(
            sub_id, ses_id), ('sub-{subject}/[ses-{ses}/]beh/sub-{subject}'
                              '[_ses-{ses}]_{suffix}.{extension}'))

    new_file_location = os.path.join(data_path, new_file_name)

    return (new_file_location)
Example #9
0
 def put(self, filepath):
     if self.bidspaths_by_filepaths.get(filepath) is not None:
         return  # already added
     tags = self.database.tags(filepath)
     bidstags = dict()
     for k, v in tags.items():
         bidsentity = k
         if bidsentity in entity_longnames:
             bidsentity = entity_longnames[bidsentity]
         if k in entities:
             bidstags[bidsentity] = self._format_tagval(k, v)
         else:
             if k == "suffix" and tags.get("datatype") == "fmap":
                 k = "fmap"
             bidstags[k] = v
     bidspath = build_path(bidstags, bidsconfig.default_path_patterns)
     self.bidspaths_by_filepaths[filepath] = str(bidspath)
     self.filepaths_by_bidspaths[bidspath] = str(filepath)
     self.bidstags_by_bidspaths[bidspath] = bidstags
Example #10
0
    def _run_interface(self, runtime):
        import json
        import os.path as op
        import pkg_resources
        from bids.layout import parse_file_entities
        from bids.layout.writing import build_path

        deriv_cfg = pkg_resources.resource_string(
            "nibetaseries", op.join("data", "derivatives.json"))
        deriv_patterns = json.loads(
            deriv_cfg.decode('utf-8'))['fmriprep_path_patterns']

        subject_entities = parse_file_entities(self.inputs.source_file)
        betaseries_entities = parse_file_entities(self.inputs.in_file)
        # hotfix
        betaseries_entities['description'] = betaseries_entities['desc']

        subject_entities.update(betaseries_entities)

        out_file = build_path(subject_entities, deriv_patterns)

        if not out_file:
            raise ValueError("the provided entities do not make a valid file")

        base_directory = runtime.cwd
        if isdefined(self.inputs.base_directory):
            base_directory = os.path.abspath(self.inputs.base_directory)

        out_path = op.join(base_directory, self.out_path_base, out_file)

        os.makedirs(op.dirname(out_path), exist_ok=True)

        # copy the file to the output directory
        copy(self.inputs.in_file, out_path)

        self._results['out_file'] = out_path

        return runtime
Example #11
0
 def _run_interface(self, runtime):
     group_conf_summary = pd.DataFrame()
     # noinspection PyUnreachableCode
     if __debug__:  # sanity check
         entities = [
             parse_file_entities_with_pipelines(path)
             for path in self.inputs.conf_summary_json_files
         ]
         assert_all_entities_equal(entities, "session", "task", "run",
                                   "pipeline")
     for summary_json_file in self.inputs.conf_summary_json_files:
         with open(summary_json_file, 'r') as f:
             group_conf_summary = group_conf_summary.append(
                 pd.DataFrame(json.load(f), index=[0]))
     entities = parse_file_entities_with_pipelines(
         self.inputs.conf_summary_json_files[0])
     file_path = build_path(entities, self.file_pattern)
     fname = os.path.join(self.inputs.output_dir, file_path)
     assert not os.path.exists(
         fname), f"Group confounds file already exists at {fname}"
     group_conf_summary.to_csv(fname, sep='\t', index=False)
     assert os.path.exists(fname), "File not created"
     self._results['group_conf_summary'] = fname
     return runtime
Example #12
0
    def test_build_path(self, writable_file):

        # Single simple pattern
        with pytest.raises(TypeError):
            build_path(writable_file.entities)
        pat = join(writable_file.dirname,
                   '{task}/sub-{subject}/run-{run}.nii.gz')
        target = join(writable_file.dirname, 'rest/sub-3/run-2.nii.gz')
        assert build_path(writable_file.entities, pat) == target

        # Multiple simple patterns
        pats = ['{session}/{task}/r-{run}.nii.gz',
                't-{task}/{subject}-{run}.nii.gz',
                '{subject}/{task}.nii.gz']
        pats = [join(writable_file.dirname, p) for p in pats]
        target = join(writable_file.dirname, 't-rest/3-2.nii.gz')
        assert build_path(writable_file.entities, pats) == target

        # Pattern with optional entity
        pats = ['[{session}/]{task}/r-{run}.nii.gz',
                't-{task}/{subject}-{run}.nii.gz']
        pats = [join(writable_file.dirname, p) for p in pats]
        target = join(writable_file.dirname, 'rest/r-2.nii.gz')
        assert build_path(writable_file.entities, pats) == target

        # Pattern with conditional values
        pats = ['{task<func|acq>}/r-{run}.nii.gz',
                't-{task}/{subject}-{run}.nii.gz']
        pats = [join(writable_file.dirname, p) for p in pats]
        target = join(writable_file.dirname, 't-rest/3-2.nii.gz')
        assert build_path(writable_file.entities, pats) == target

        # Pattern with valid conditional values
        pats = ['{task<func|rest>}/r-{run}.nii.gz',
                't-{task}/{subject}-{run}.nii.gz']
        pats = [join(writable_file.dirname, p) for p in pats]
        target = join(writable_file.dirname, 'rest/r-2.nii.gz')
        assert build_path(writable_file.entities, pats) == target

        # Pattern with optional entity with conditional values
        pats = ['[{task<func|acq>}/]r-{run}.nii.gz',
                't-{task}/{subject}-{run}.nii.gz']
        pats = [join(writable_file.dirname, p) for p in pats]
        target = join(writable_file.dirname, 'r-2.nii.gz')
        assert build_path(writable_file.entities, pats) == target

        # Pattern with default value
        pats = ['ses-{session|A}/r-{run}.nii.gz']
        assert build_path({'run': 3}, pats) == 'ses-A/r-3.nii.gz'

        # Pattern with both valid and default values
        pats = ['ses-{session<A|B|C|D>|D}/r-{run}.nii.gz']
        assert build_path({'run': 3}, pats) == 'ses-D/r-3.nii.gz'
        pats = ['ses-{session<A|B|C|D>|D}/r-{run}.nii.gz']
        assert build_path({'session': 'B', 'run': 3}, pats) == 'ses-B/r-3.nii.gz'

        # Test extensions with dot and warning is issued
        pats = ['ses-{session<A|B|C>|D}/r-{run}.{extension}']
        with pytest.warns(UserWarning) as record:
            assert build_path({'session': 'B', 'run': 3, 'extension': '.nii'},
                              pats) == 'ses-B/r-3.nii'
        assert "defines an invalid default value" in record[0].message.args[0]

        # Test expansion of optional characters
        pats = ['ses-{session<[ABCD]>|D}/r-{run}.{extension}']
        assert build_path({'session': 'B', 'run': 3, 'extension': '.nii'},
                          pats) == 'ses-B/r-3.nii'

        # Test default-only patterns are correctly overriden by setting entity
        entities = {
            'subject': '01',
            'extension': 'bvec',
            'suffix': 'T1rho',
        }
        pats = (
            "sub-{subject}[/ses-{session}]/{datatype|dwi}/sub-{subject}[_ses-{session}]"
            "[_acq-{acquisition}]_{suffix|dwi}.{extension<bval|bvec|json|nii.gz|nii>|nii.gz}"
        )
        assert build_path(entities, pats) == 'sub-01/dwi/sub-01_T1rho.bvec'
        assert build_path(entities, pats, strict=True) == 'sub-01/dwi/sub-01_T1rho.bvec'

        # Test multiple paths
        pats = ['ses-{session<A|B|C>|D}/r-{run}.{extension<json|nii|nii.gz>|nii.gz}']
        assert sorted(
            build_path({
                'session': ['A', 'B'],
                'run': [1, 2],
                'extension': ['.nii.gz', 'json']
            }, pats)) == [
            'ses-A/r-1.json',
            'ses-A/r-1.nii.gz',
            'ses-A/r-2.json',
            'ses-A/r-2.nii.gz',
            'ses-B/r-1.json',
            'ses-B/r-1.nii.gz',
            'ses-B/r-2.json',
            'ses-B/r-2.nii.gz',
        ]
Example #13
0
# building blocks for generating valid file names
subject_entities = {
    "subject": "01",
    "session": "pre",
}

bids_bold_entities = {
    **subject_entities,
    "datatype": "func",
    "task": "waffles",
    "suffix": "bold",
    "run": 1,
    "extension": "nii.gz",
}
bids_bold_fname = build_path(bids_bold_entities, bids_patterns)

bids_json_entities = {
    **bids_bold_entities,
    "extension": "json",
}
bids_json_fname = build_path(bids_json_entities, bids_patterns)

bids_rest_entities = {
    **bids_bold_entities,
    "task": "rest",
}
bids_rest_fname = build_path(bids_rest_entities, bids_patterns)

bids_rest_json_entities = {
    **bids_rest_entities,
Example #14
0
 def create_fname(self, fname_params, meta_params):
     """create the bids derivatives regressor file path and path names"""
     self.fname = self.base_dir / build_path(fname_params, self.PATTERN)
     self.meta_fname = self.base_dir / build_path(meta_params, self.PATTERN)
Example #15
0
 def create_fname(self, fname_params, meta_params):
     self.fname = self.base_dir / build_path(fname_params, self.PATTERN)
     self.meta_fname = self.base_dir / build_path(meta_params, self.PATTERN)
Example #16
0
    def test_build_path(self, writable_file):

        # Single simple pattern
        with pytest.raises(TypeError):
            build_path(writable_file.entities)
        pat = join(writable_file.dirname,
                   '{task}/sub-{subject}/run-{run}.nii.gz')
        target = join(writable_file.dirname, 'rest/sub-3/run-2.nii.gz')
        assert build_path(writable_file.entities, pat) == target

        # Multiple simple patterns
        pats = [
            '{session}/{task}/r-{run}.nii.gz',
            't-{task}/{subject}-{run}.nii.gz', '{subject}/{task}.nii.gz'
        ]
        pats = [join(writable_file.dirname, p) for p in pats]
        target = join(writable_file.dirname, 't-rest/3-2.nii.gz')
        assert build_path(writable_file.entities, pats) == target

        # Pattern with optional entity
        pats = [
            '[{session}/]{task}/r-{run}.nii.gz',
            't-{task}/{subject}-{run}.nii.gz'
        ]
        pats = [join(writable_file.dirname, p) for p in pats]
        target = join(writable_file.dirname, 'rest/r-2.nii.gz')
        assert build_path(writable_file.entities, pats) == target

        # Pattern with conditional values
        pats = [
            '{task<func|acq>}/r-{run}.nii.gz',
            't-{task}/{subject}-{run}.nii.gz'
        ]
        pats = [join(writable_file.dirname, p) for p in pats]
        target = join(writable_file.dirname, 't-rest/3-2.nii.gz')
        assert build_path(writable_file.entities, pats) == target

        # Pattern with valid conditional values
        pats = [
            '{task<func|rest>}/r-{run}.nii.gz',
            't-{task}/{subject}-{run}.nii.gz'
        ]
        pats = [join(writable_file.dirname, p) for p in pats]
        target = join(writable_file.dirname, 'rest/r-2.nii.gz')
        assert build_path(writable_file.entities, pats) == target

        # Pattern with optional entity with conditional values
        pats = [
            '[{task<func|acq>}/]r-{run}.nii.gz',
            't-{task}/{subject}-{run}.nii.gz'
        ]
        pats = [join(writable_file.dirname, p) for p in pats]
        target = join(writable_file.dirname, 'r-2.nii.gz')
        assert build_path(writable_file.entities, pats) == target

        # Pattern with default value
        pats = ['sess-{session|A}/r-{run}.nii.gz']
        assert build_path({'run': 3}, pats) == 'sess-A/r-3.nii.gz'

        # Pattern with both valid and default values
        pats = ['sess-{session<A|B|C>|D}/r-{run}.nii.gz']
        assert build_path({
            'session': 1,
            'run': 3
        }, pats) == 'sess-D/r-3.nii.gz'
        pats = ['sess-{session<A|B|C>|D}/r-{run}.nii.gz']
        assert build_path({
            'session': 'B',
            'run': 3
        }, pats) == 'sess-B/r-3.nii.gz'
Example #17
0
def collect_derivatives(derivatives_dir,
                        subject_id,
                        std_spaces,
                        freesurfer,
                        spec=None,
                        patterns=None):
    """Gather existing derivatives and compose a cache."""
    if spec is None or patterns is None:
        _spec, _patterns = tuple(
            loads(
                Path(
                    pkgrf('aslprep',
                          'smriprep/data/io_spec.json')).read_text()).values())

        if spec is None:
            spec = _spec
        if patterns is None:
            patterns = _patterns

    derivs_cache = defaultdict(list, {})
    derivatives_dir = Path(derivatives_dir)

    def _check_item(item):
        if not item:
            return None

        if isinstance(item, str):
            item = [item]

        result = []
        for i in item:
            if not (derivatives_dir / i).exists():
                i = i.rstrip('.gz')
                if not (derivatives_dir / i).exists():
                    return None
            result.append(str(derivatives_dir / i))

        return result

    for space in [None] + std_spaces:
        for k, q in spec['baseline'].items():
            q['subject'] = subject_id
            if space is not None:
                q['space'] = space
            item = _check_item(build_path(q, patterns, strict=True))
            if not item:
                return None

            if space:
                derivs_cache["std_%s" %
                             k] += item if len(item) == 1 else [item]
            else:
                derivs_cache["t1w_%s" %
                             k] = item[0] if len(item) == 1 else item

    for space in std_spaces:
        for k, q in spec['std_xfms'].items():
            q['subject'] = subject_id
            q['from'] = q['from'] or space
            q['to'] = q['to'] or space
            item = _check_item(build_path(q, patterns))
            if not item:
                return None
            derivs_cache[k] += item

    derivs_cache = dict(derivs_cache)  # Back to a standard dictionary

    if freesurfer:
        for k, q in spec['surfaces'].items():
            q['subject'] = subject_id
            item = _check_item(build_path(q, patterns))
            if not item:
                return None

            if len(item) == 1:
                item = item[0]
            derivs_cache[k] = item

    derivs_cache['template'] = std_spaces
    return derivs_cache
Example #18
0
def bids_sessions(tmpdir_factory):
    f, _ = plt.subplots()
    svg_dir = tmpdir_factory.mktemp("work") / "fmriprep"
    svg_dir.ensure_dir()

    pattern = (
        "sub-{subject}[/ses-{session}]/{datatype<figures>}/"
        "sub-{subject}[_ses-{session}][_task-{task}][_acq-{acquisition}]"
        "[_ce-{ceagent}][_dir-{direction}][_rec-{reconstruction}]"
        "[_mod-{modality}][_run-{run}][_echo-{echo}][_space-{space}]"
        "[_desc-{desc}]_{suffix<dseg|T1w|bold>}{extension<.svg>}")
    subjects = ["01"]
    tasks = ["t1", "t2", "t3"]
    runs = ["01", "02", None]
    ces = ["none", "Gd"]
    descs = ["aroma", "bbregister", "carpetplot", "rois"]
    # create functional data for both sessions
    ses1_combos = product(subjects, ["1"], tasks, [None], runs, descs)
    ses2_combos = product(subjects, ["2"], tasks, ces, [None], descs)
    # have no runs in the second session (ex: dmriprep test data)
    # https://github.com/nipreps/dmriprep/pull/59
    all_combos = list(ses1_combos) + list(ses2_combos)

    for subject, session, task, ce, run, desc in all_combos:
        entities = {
            "subject": subject,
            "session": session,
            "task": task,
            "ceagent": ce,
            "run": run,
            "desc": desc,
            "extension": ".svg",
            "suffix": "bold",
            "datatype": "figures",
        }
        bids_path = build_path(entities, pattern)
        file_path = svg_dir / bids_path
        file_path.ensure()
        f.savefig(str(file_path))

    # create anatomical data
    anat_opts = [
        {
            "desc": "brain"
        },
        {
            "desc": "conform"
        },
        {
            "desc": "reconall"
        },
        {
            "desc": "rois"
        },
        {
            "suffix": "dseg"
        },
        {
            "space": "MNI152NLin6Asym"
        },
        {
            "space": "MNI152NLin2009cAsym"
        },
    ]
    anat_combos = product(subjects, anat_opts)
    for subject, anat_opt in anat_combos:
        anat_entities = {
            "subject": subject,
            "datatype": "anat",
            "suffix": "t1w"
        }
        anat_entities.update(**anat_opt)
        bids_path = build_path(entities, pattern)
        file_path = svg_dir / bids_path
        file_path.ensure()
        f.savefig(str(file_path))

    return svg_dir.dirname
Example #19
0
def predict_derivatives(subject_id, output_spaces, freesurfer):
    """
    Generate a list of the files that should be found in the output folder.

    The prediction of outputs serves two purposes:

      * Anticipates the set of outputs that will be generated.
      * Informs the decision of whether the workflow should be staged or some
        sections can be skip.

    Parameters
    ----------
    subject_id : :obj:`str`
        A subject id
    output_spaces : :obj:`list`
        TemplateFlow identifiers of the requested output spaces
    freesurfer : :obj:`bool`
        Whether the ``--fs-no-reconall`` flag was used.

    Examples
    --------
    >>> predict_derivatives('01', ['MNI152NLin2009cAsym'], False)
    ['sub-01/anat/sub-01_desc-brain_mask.nii.gz',
     'sub-01/anat/sub-01_desc-preproc_T1w.nii.gz',
     'sub-01/anat/sub-01_dseg.nii.gz',
     'sub-01/anat/sub-01_from-MNI152NLin2009cAsym_to-T1w_mode-image_xfm.h5',
     'sub-01/anat/sub-01_from-T1w_to-MNI152NLin2009cAsym_mode-image_xfm.h5',
     'sub-01/anat/sub-01_label-CSF_probseg.nii.gz',
     'sub-01/anat/sub-01_label-GM_probseg.nii.gz',
     'sub-01/anat/sub-01_label-WM_probseg.nii.gz',
     'sub-01/anat/sub-01_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz',
     'sub-01/anat/sub-01_space-MNI152NLin2009cAsym_desc-preproc_T1w.nii.gz',
     'sub-01/anat/sub-01_space-MNI152NLin2009cAsym_dseg.nii.gz',
     'sub-01/anat/sub-01_space-MNI152NLin2009cAsym_label-CSF_probseg.nii.gz',
     'sub-01/anat/sub-01_space-MNI152NLin2009cAsym_label-GM_probseg.nii.gz',
     'sub-01/anat/sub-01_space-MNI152NLin2009cAsym_label-WM_probseg.nii.gz']

    """
    spec = loads(
        Path(pkgrf('aslprep', 'smriprep/data/io_spec.json')).read_text())

    def _normalize_q(query, space=None):
        query = query.copy()
        query['subject'] = subject_id
        if space is not None:
            query['space'] = space
        if 'from' in query and not query['from']:
            query['from'] = output_spaces
        if 'to' in query and not query['to']:
            query['to'] = output_spaces
        return query

    queries = [
        _normalize_q(q, space=None)
        for q in spec['queries']['baseline'].values()
    ]

    queries += [
        _normalize_q(q, space=s) for s in output_spaces
        for q in spec['queries']['baseline'].values()
    ]
    queries += [_normalize_q(q) for q in spec['queries']['std_xfms'].values()]
    if freesurfer:
        queries += [
            _normalize_q(q) for q in spec['queries']['surfaces'].values()
        ]

    output = []
    for q in queries:
        paths = build_path(q, spec['patterns'])
        if isinstance(paths, str):
            output.append(paths)
        elif paths:
            output += paths

    return sorted(output)
Example #20
0
def bids_sessions(tmpdir_factory):
    f, _ = plt.subplots()
    svg_dir = tmpdir_factory.mktemp('work') / 'fmriprep'
    svg_dir.ensure_dir()

    pattern = (
        "sub-{subject}[/ses-{session}]/{datatype<anat|func>}/"
        "sub-{subject}[_ses-{session}][_task-{task}][_acq-{acquisition}]"
        "[_ce-{contrast}][_dir-{direction}][_rec-{reconstruction}]"
        "[_mod-{modality}][_run-{run}][_echo-{echo}][_space-{space}]"
        "[_desc-{desc}]_{suffix<dseg|T1w|bold>}.{extension<svg>}")
    subjects = ['01']
    tasks = ['t1', 't2', 't3']
    runs = ['01', '02', None]
    descs = ['aroma', 'bbregister', 'carpetplot', 'rois']
    # create functional data for both sessions
    ses1_combos = product(subjects, ['1'], tasks, runs, descs)
    ses2_combos = product(subjects, ['2'], tasks, [None], descs)
    # have no runs in the second session (ex: dmriprep test data)
    # https://github.com/nipreps/dmriprep/pull/59
    all_combos = list(ses1_combos) + list(ses2_combos)

    for subject, session, task, run, desc in all_combos:
        entities = {
            'subject': subject,
            'session': session,
            'task': task,
            'run': run,
            'desc': desc,
            'extension': 'svg',
            'suffix': 'bold',
            'datatype': 'func'
        }
        bids_path = build_path(entities, pattern)
        file_path = svg_dir / bids_path
        file_path.ensure()
        f.savefig(str(file_path))

    # create anatomical data
    anat_opts = [
        {
            'desc': 'brain'
        },
        {
            'desc': 'conform'
        },
        {
            'desc': 'reconall'
        },
        {
            'desc': 'rois'
        },
        {
            'suffix': 'dseg'
        },
        {
            'space': 'MNI152NLin6Asym'
        },
        {
            'space': 'MNI152NLin2009cAsym'
        },
    ]
    anat_combos = product(subjects, anat_opts)
    for subject, anat_opt in anat_combos:
        anat_entities = {
            "subject": subject,
            "datatype": 'anat',
            "suffix": 't1w'
        }
        anat_entities.update(**anat_opt)
        bids_path = build_path(entities, pattern)
        file_path = svg_dir / bids_path
        file_path.ensure()
        f.savefig(str(file_path))

    return svg_dir.dirname
Example #21
0
    def _run_interface(self, runtime):
        # Ready the output folder
        base_directory = runtime.cwd
        if isdefined(self.inputs.base_directory):
            base_directory = self.inputs.base_directory
        base_directory = Path(base_directory).absolute()
        out_path = base_directory / self.out_path_base
        out_path.mkdir(exist_ok=True, parents=True)

        # Ensure we have a list
        in_file = listify(self.inputs.in_file)

        # Read in the dictionary of metadata
        if isdefined(self.inputs.meta_dict):
            meta = self.inputs.meta_dict
            # inputs passed in construction take priority
            meta.update(self._metadata)
            self._metadata = meta

        # Initialize entities with those from the source file.
        in_entities = [
            parse_file_entities(str(relative_to_root(source_file)))
            for source_file in self.inputs.source_file
        ]
        out_entities = {
            k: v
            for k, v in in_entities[0].items() if all(
                ent.get(k) == v for ent in in_entities[1:])
        }
        for drop_entity in listify(self.inputs.dismiss_entities or []):
            out_entities.pop(drop_entity, None)

        # Override extension with that of the input file(s)
        out_entities["extension"] = [
            # _splitext does not accept .surf.gii (for instance)
            "".join(Path(orig_file).suffixes).lstrip(".")
            for orig_file in in_file
        ]

        compress = listify(self.inputs.compress) or [None]
        if len(compress) == 1:
            compress = compress * len(in_file)
        for i, ext in enumerate(out_entities["extension"]):
            if compress[i] is not None:
                ext = regz.sub("", ext)
                out_entities["extension"][
                    i] = f"{ext}.gz" if compress[i] else ext

        # Override entities with those set as inputs
        for key in self._allowed_entities:
            value = getattr(self.inputs, key)
            if value is not None and isdefined(value):
                out_entities[key] = value

        # Clean up native resolution with space
        if out_entities.get("resolution") == "native" and out_entities.get(
                "space"):
            out_entities.pop("resolution", None)

        if len(set(out_entities["extension"])) == 1:
            out_entities["extension"] = out_entities["extension"][0]

        # Insert custom (non-BIDS) entities from allowed_entities.
        custom_entities = set(out_entities.keys()) - set(BIDS_DERIV_ENTITIES)
        patterns = BIDS_DERIV_PATTERNS
        if custom_entities:
            # Example: f"{key}-{{{key}}}" -> "task-{task}"
            custom_pat = "_".join(f"{key}-{{{key}}}"
                                  for key in sorted(custom_entities))
            patterns = [
                pat.replace("_{suffix", "_".join(("", custom_pat, "{suffix")))
                for pat in patterns
            ]

        # Prepare SimpleInterface outputs object
        self._results["out_file"] = []
        self._results["compression"] = []
        self._results["fixed_hdr"] = [False] * len(in_file)

        dest_files = build_path(out_entities, path_patterns=patterns)
        if not dest_files:
            raise ValueError(
                f"Could not build path with entities {out_entities}.")

        # Make sure the interpolated values is embedded in a list, and check
        dest_files = listify(dest_files)
        if len(in_file) != len(dest_files):
            raise ValueError(f"Input files ({len(in_file)}) not matched "
                             f"by interpolated patterns ({len(dest_files)}).")

        for i, (orig_file, dest_file) in enumerate(zip(in_file, dest_files)):
            out_file = out_path / dest_file
            out_file.parent.mkdir(exist_ok=True, parents=True)
            self._results["out_file"].append(str(out_file))
            self._results["compression"].append(
                _copy_any(orig_file, str(out_file)))

            is_nifti = out_file.name.endswith(
                (".nii", ".nii.gz")) and not out_file.name.endswith(
                    (".dtseries.nii", ".dtseries.nii.gz"))
            data_dtype = self.inputs.data_dtype or DEFAULT_DTYPES[
                self.inputs.suffix]
            if is_nifti and any((self.inputs.check_hdr, data_dtype)):
                # Do not use mmap; if we need to access the data at all, it will be to
                # rewrite, risking a BusError
                nii = nb.load(out_file, mmap=False)

                if self.inputs.check_hdr:
                    hdr = nii.header
                    curr_units = tuple([
                        None if u == "unknown" else u
                        for u in hdr.get_xyzt_units()
                    ])
                    curr_codes = (int(hdr["qform_code"]),
                                  int(hdr["sform_code"]))

                    # Default to mm, use sec if data type is bold
                    units = (
                        curr_units[0] or "mm",
                        "sec" if out_entities["suffix"] == "bold" else None,
                    )
                    xcodes = (1, 1)  # Derivative in its original scanner space
                    if self.inputs.space:
                        xcodes = ((4, 4) if self.inputs.space
                                  in STANDARD_SPACES else (2, 2))

                    if curr_codes != xcodes or curr_units != units:
                        self._results["fixed_hdr"][i] = True
                        hdr.set_qform(nii.affine, xcodes[0])
                        hdr.set_sform(nii.affine, xcodes[1])
                        hdr.set_xyzt_units(*units)

                        # Rewrite file with new header
                        overwrite_header(nii, out_file)

                if data_dtype == "source":  # match source dtype
                    try:
                        data_dtype = nb.load(
                            self.inputs.source_file[0]).get_data_dtype()
                    except Exception:
                        LOGGER.warning(
                            f"Could not get data type of file {self.inputs.source_file[0]}"
                        )
                        data_dtype = None

                if data_dtype:
                    if self.inputs.check_hdr:
                        # load updated NIfTI
                        nii = nb.load(out_file, mmap=False)
                    data_dtype = np.dtype(data_dtype)
                    orig_dtype = nii.get_data_dtype()
                    if orig_dtype != data_dtype:
                        LOGGER.warning(
                            f"Changing {out_file} dtype from {orig_dtype} to {data_dtype}"
                        )
                        # coerce dataobj to new data dtype
                        if np.issubdtype(data_dtype, np.integer):
                            new_data = np.rint(nii.dataobj).astype(data_dtype)
                        else:
                            new_data = np.asanyarray(nii.dataobj,
                                                     dtype=data_dtype)
                        # and set header to match
                        nii.set_data_dtype(data_dtype)
                        nii = nii.__class__(new_data, nii.affine, nii.header)
                        nii.to_filename(out_file)

        if len(self._results["out_file"]) == 1:
            meta_fields = self.inputs.copyable_trait_names()
            self._metadata.update({
                k: getattr(self.inputs, k)
                for k in meta_fields if k not in self._static_traits
            })
            if self._metadata:
                out_file = Path(self._results["out_file"][0])
                # 1.3.x hack
                # For dtseries, we have been generating weird non-BIDS JSON files.
                # We can safely keep producing them to avoid breaking derivatives, but
                # only the existing keys should keep going into them.
                if out_file.name.endswith(".dtseries.nii"):
                    legacy_metadata = {}
                    for key in ("grayordinates", "space", "surface",
                                "surface_density", "volume"):
                        if key in self._metadata:
                            legacy_metadata[key] = self._metadata.pop(key)
                    if legacy_metadata:
                        sidecar = out_file.parent / f"{_splitext(str(out_file))[0]}.json"
                        sidecar.write_text(
                            dumps(legacy_metadata, sort_keys=True, indent=2))
                # The future: the extension is the first . and everything after
                sidecar = out_file.parent / f"{out_file.name.split('.', 1)[0]}.json"
                sidecar.write_text(
                    dumps(self._metadata, sort_keys=True, indent=2))
                self._results["out_meta"] = str(sidecar)
        return runtime
def rename_fmriprep_files(fmriprep_dir, renamed_dir, dset_desc, gen_ref=False):
    # copy the dataset_description file over first
    os.makedirs(renamed_dir, exist_ok=True)
    copyfile(dset_desc, os.path.join(renamed_dir, os.path.basename(dset_desc)))

    # collect the mapping from old file names to new file names
    rename_files = {}
    for root, _, files in os.walk(fmriprep_dir):
        for file in files:
            match = re.search(DERIV_REGEX, file)
            if match is not None:
                file_dict = match.groupdict()
                # change brainmask to desc-brain_mask
                if file_dict.get('desc') == 'brainmask':
                    file_dict['desc'] = 'brain'
                    file_dict['suffix'] = 'mask'
                # variant is now desc
                if file_dict.get('variant'):
                    file_dict['desc'] = file_dict.pop('variant')
                # different formats of transformation files
                if file_dict.get('space') and file_dict.get('target'):
                    file_dict['fspace'] = file_dict.pop('space')
                    file_dict['tspace'] = file_dict.pop('target')
                    file_dict['mode'] = 'image'
                    file_dict['suffix'] = 'xfm'
                    del file_dict['desc']
                if file_dict.get('target'):
                    file_dict['fspace'] = file_dict.pop('suffix')
                    file_dict['tspace'] = file_dict.pop('target')
                    file_dict['mode'] = 'image'
                    file_dict['suffix'] = 'xfm'
                    del file_dict['desc']
                # segmentations
                if file_dict.get('class'):
                    file_dict['label'] = file_dict.pop('class')
                    file_dict['suffix'] = 'probseg'
                    del file_dict['desc']
                if file_dict.get('desc') == 'dtissue':
                    file_dict['suffix'] = 'dseg'
                    del file_dict['desc']
                # freesurfer hemisphere files
                if file_dict['ext'].startswith('L') or file_dict['ext'].startswith('R'):
                    file_dict['hemi'] = file_dict['ext'][0]
                    file_dict['ext'] = file_dict['ext'][2:]
                    file_dict['suffix'] = file_dict.pop('desc')
                # aroma files
                if file_dict.get('desc') == 'MELODICmix':
                    file_dict['desc'] = 'MELODIC'
                    file_dict['suffix'] = 'mixing'
                if file_dict.get('desc') == 'AROMAnoiseICs':
                    file_dict['suffix'] = file_dict['desc']
                    del file_dict['desc']
                # confounds file change
                if file_dict.get('desc') == 'confounds':
                    file_dict['suffix'] = 'regressors'

                # write the file to the new directory
                new_file = build_path(file_dict, PATH_PATTERN)
                new_root = root.replace(fmriprep_dir, renamed_dir)
                new_path = os.path.join(new_root, new_file)
                old_path = os.path.join(root, file)
                rename_files[old_path] = new_path
                os.makedirs(new_root, exist_ok=True)
                copyfile(old_path, new_path)

                if file_dict['suffix'] == 'bold' and gen_ref:
                    ref_nii = nb.load(new_path)
                    # take the median of the first 10 volumes
                    median_image_data = np.median(
                        ref_nii.dataobj[:, :, :, :10], axis=3)
                    # do not need description
                    del file_dict['desc']
                    file_dict['suffix'] = "boldref"
                    boldref_file = build_path(file_dict, PATH_PATTERN)
                    boldref_path = os.path.join(new_root, boldref_file)
                    nb.Nifti1Image(median_image_data, ref_nii.affine,
                                   ref_nii.header).to_filename(boldref_path)
                    rename_files[boldref_path] = boldref_path

    # write out log for how files were renamed
    data_transfer_log = os.path.join(renamed_dir, "logs", "data_transfer.json")
    os.makedirs(os.path.dirname(data_transfer_log), exist_ok=True)
    with open(data_transfer_log, 'w') as fp:
        json.dump(rename_files, fp)

    return data_transfer_log