def test_copyfallback():
    if os.name is not 'posix':
        return
    orig_img, orig_hdr = _temp_analyze_files()
    pth, imgname = os.path.split(orig_img)
    pth, hdrname = os.path.split(orig_hdr)
    try:
        fatfs = TempFATFS()
    except IOError:
        warnings.warn('Fuse mount failed. copyfile fallback tests skipped.')
    else:
        with fatfs as fatdir:
            tgt_img = os.path.join(fatdir, imgname)
            tgt_hdr = os.path.join(fatdir, hdrname)
            for copy in (True, False):
                for use_hardlink in (True, False):
                    copyfile(orig_img, tgt_img, copy=copy,
                             use_hardlink=use_hardlink)
                    yield assert_true, os.path.exists(tgt_img)
                    yield assert_true, os.path.exists(tgt_hdr)
                    yield assert_false, os.path.islink(tgt_img)
                    yield assert_false, os.path.islink(tgt_hdr)
                    yield assert_false, os.path.samefile(orig_img, tgt_img)
                    yield assert_false, os.path.samefile(orig_hdr, tgt_hdr)
                    os.unlink(tgt_img)
                    os.unlink(tgt_hdr)
    finally:
        os.unlink(orig_img)
        os.unlink(orig_hdr)
def sink_mask_file(in_file, orig_file, out_dir):
    import os
    from nipype.utils.filemanip import fname_presuffix, copyfile
    os.makedirs(out_dir, exist_ok=True)
    out_file = fname_presuffix(orig_file, suffix='_mask', newpath=out_dir)
    copyfile(in_file, out_file, copy=True, use_hardlink=True)
    return out_file
Beispiel #3
0
 def _run_interface(self, runtime):
     _, _, ext = split_filename(self.inputs.max)
     copyfile(self.inputs.max, os.path.abspath(self.inputs.input_data_prefix + "_max" + ext), copy=False)
     
     _, _, ext = split_filename(self.inputs.ODF)
     copyfile(self.inputs.ODF, os.path.abspath(self.inputs.input_data_prefix + "_odf" + ext), copy=False)
     
     return super(ODFTracker, self)._run_interface(runtime)
Beispiel #4
0
def test_copyfile():
    orig_img, orig_hdr = _temp_analyze_files()
    pth, fname = os.path.split(orig_img)
    new_img = os.path.join(pth, 'newfile.img')
    new_hdr = os.path.join(pth, 'newfile.hdr')
    copyfile(orig_img, new_img)
    yield assert_true, os.path.exists(new_img)
    yield assert_true, os.path.exists(new_hdr)
    os.unlink(new_img)
    os.unlink(new_hdr)
    # final cleanup
    os.unlink(orig_img)
    os.unlink(orig_hdr)
Beispiel #5
0
 def _list_outputs(self):
     """Execute this module.
     """
     outdir = self.inputs.base_directory
     if not isdefined(outdir):
         outdir = '.'
     outdir = os.path.abspath(outdir)
     if isdefined(self.inputs.container):
         outdir = os.path.join(outdir, self.inputs.container)
     if not os.path.exists(outdir):
         os.makedirs(outdir)
     for key,files in self.inputs._outputs.items():
         iflogger.debug("key: %s files: %s"%(key, str(files)))
         files = filename_to_list(files)
         outfiles = []
         tempoutdir = outdir
         for d in key.split('.'):
             if d[0] == '@':
                 continue
             tempoutdir = os.path.join(tempoutdir,d)
         
         # flattening list
         if isinstance(files, list):
             if isinstance(files[0], list):
                 files = [item for sublist in files for item in sublist]
                 
         for src in filename_to_list(files):
             src = os.path.abspath(src)
             if os.path.isfile(src):
                 dst = self._get_dst(src)
                 dst = os.path.join(tempoutdir, dst)
                 dst = self._substitute(dst)
                 path,_ = os.path.split(dst)
                 if not os.path.exists(path):
                     os.makedirs(path)
                 iflogger.debug("copyfile: %s %s"%(src, dst))
                 copyfile(src, dst, copy=True)
             elif os.path.isdir(src):
                 dst = self._get_dst(os.path.join(src,''))
                 dst = os.path.join(tempoutdir, dst)
                 dst = self._substitute(dst)
                 path,_ = os.path.split(dst)
                 if not os.path.exists(path):
                     os.makedirs(path)
                 if os.path.exists(dst):
                     iflogger.debug("removing: %s"%dst)
                     shutil.rmtree(dst)
                 iflogger.debug("copydir: %s %s"%(src, dst))
                 shutil.copytree(src, dst)
     return None
Beispiel #6
0
 def _run_interface(self, runtime):
     if self.inputs.initialization == "PriorProbabilityImages":
         priors_directory = os.path.join(os.getcwd(), "priors")
         if not os.path.exists(priors_directory):
             os.makedirs(priors_directory)
         _, _, ext = split_filename(self.inputs.prior_probability_images[0])
         for i, f in enumerate(self.inputs.prior_probability_images):
             target = os.path.join(priors_directory,
                                      'priorProbImages%02d' % (i + 1) + ext)
             if not (os.path.exists(target) and os.path.realpath(target) == os.path.abspath(f)):
                 copyfile(os.path.abspath(f), os.path.join(priors_directory,
                                      'priorProbImages%02d' % (i + 1) + ext))
     runtime = super(Atropos, self)._run_interface(runtime)
     return runtime
Beispiel #7
0
def convert_rawdata(base_directory, input_dir, out_prefix):
    os.environ['UNPACK_MGH_DTI'] = '0'
    file_list = os.listdir(input_dir)

    # If RAWDATA folder contains one (and only one) gunzipped nifti file -> copy it
    first_file = os.path.join(input_dir, file_list[0])
    if len(file_list) == 1 and first_file.endswith('nii.gz'):
        copyfile(first_file, os.path.join(base_directory, 'NIFTI', out_prefix+'.nii.gz'), False, False, 'content') # intelligent copy looking at input's content
    else:
        mem = Memory(base_dir=os.path.join(base_directory,'NIPYPE'))
        mri_convert = mem.cache(fs.MRIConvert)
        res = mri_convert(in_file=first_file, out_file=os.path.join(base_directory, 'NIFTI', out_prefix + '.nii.gz'))
        if len(res.outputs.get()) == 0:
            return False

    return True
Beispiel #8
0
def _copy_any(src, dst):
    src_isgz = src.endswith('.gz')
    dst_isgz = dst.endswith('.gz')
    if src_isgz == dst_isgz:
        copyfile(src, dst, copy=True, use_hardlink=True)
        return False  # Make sure we do not reuse the hardlink later

    # Unlink target (should not exist)
    if os.path.exists(dst):
        os.unlink(dst)

    src_open = gzip.open if src_isgz else open
    dst_open = gzip.open if dst_isgz else open
    with src_open(src, 'rb') as f_in:
        with dst_open(dst, 'wb') as f_out:
            copyfileobj(f_in, f_out)
    return True
Beispiel #9
0
    def _run_interface(self, runtime):
        out_file = self._gen_outfilename()
        src_file = self.inputs.src_file
        ref_file = self.inputs.ref_file

        # Collect orientation infos

        # "orientation" => 3 letter acronym defining orientation
        src_orient = fs.utils.ImageInfo(in_file=src_file).run().outputs.orientation
        ref_orient = fs.utils.ImageInfo(in_file=ref_file).run().outputs.orientation
        # "convention" => RADIOLOGICAL/NEUROLOGICAL
        src_conv = fsl.Orient(in_file=src_file, get_orient=True).run().outputs.orient
        ref_conv = fsl.Orient(in_file=ref_file, get_orient=True).run().outputs.orient

        if src_orient == ref_orient:
            # no reorientation needed
            copyfile(src_file, out_file, False, False, "content")
            return runtime
        else:
            if src_conv != ref_conv:
                # if needed, match convention (radiological/neurological) to reference
                tmpsrc = os.path.join(os.path.dirname(src_file), "tmp_" + os.path.basename(src_file))

                fsl.SwapDimensions(in_file=src_file, new_dims=("-x", "y", "z"), out_file=tmpsrc).run()

                fsl.Orient(in_file=tmpsrc, swap_orient=True).run()
            else:
                # If conventions match, just use the original source
                tmpsrc = src_file

        tmp2 = os.path.join(os.path.dirname(src_file), "tmp.nii.gz")
        map_orient = {"L": "RL", "R": "LR", "A": "PA", "P": "AP", "S": "IS", "I": "SI"}
        fsl.SwapDimensions(
            in_file=tmpsrc,
            new_dims=(map_orient[ref_orient[0]], map_orient[ref_orient[1]], map_orient[ref_orient[2]]),
            out_file=tmp2,
        ).run()

        shutil.move(tmp2, out_file)

        # Only remove the temporary file if the conventions did not match.  Otherwise,
        # we end up removing the output.
        if tmpsrc != src_file:
            os.remove(tmpsrc)
        return runtime
Beispiel #10
0
def inject_skullstripped(subjects_dir, subject_id, skullstripped):
    mridir = op.join(subjects_dir, subject_id, 'mri')
    t1 = op.join(mridir, 'T1.mgz')
    bm_auto = op.join(mridir, 'brainmask.auto.mgz')
    bm = op.join(mridir, 'brainmask.mgz')

    if not op.exists(bm_auto):
        img = nb.load(t1)
        mask = nb.load(skullstripped)
        bmask = new_img_like(mask, mask.get_data() > 0)
        resampled_mask = resample_to_img(bmask, img, 'nearest')
        masked_image = new_img_like(img, img.get_data() * resampled_mask.get_data())
        masked_image.to_filename(bm_auto)

    if not op.exists(bm):
        copyfile(bm_auto, bm, copy=True, use_hardlink=True)

    return subjects_dir, subject_id
Beispiel #11
0
    def index(self, config):
        fig_dir = 'figures'
        subject_dir = self.root.split('/')[-1]
        subject = re.search('^(?P<subject_id>sub-[a-zA-Z0-9]+)$', subject_dir).group()
        svg_dir = os.path.join(self.out_dir, 'fmriprep', subject, fig_dir)
        os.makedirs(svg_dir, exist_ok=True)

        reportlet_list = list(sorted([str(f) for f in Path(self.root).glob('**/*.*')]))

        for subrep_cfg in config:
            reportlets = []
            for reportlet_cfg in subrep_cfg['reportlets']:
                rlet = Reportlet(**reportlet_cfg)
                for src in reportlet_list:
                    ext = src.split('.')[-1]
                    if rlet.file_pattern.search(src):
                        contents = None
                        if ext == 'html':
                            with open(src) as fp:
                                contents = fp.read().strip()
                        elif ext == 'svg':
                            fbase = os.path.basename(src)
                            copyfile(src, os.path.join(svg_dir, fbase),
                                     copy=True, use_hardlink=True)
                            contents = os.path.join(subject, fig_dir, fbase)

                        if contents:
                            rlet.source_files.append(src)
                            rlet.contents.append(contents)

                if rlet.source_files:
                    reportlets.append(rlet)

            if reportlets:
                sub_report = SubReport(
                    subrep_cfg['name'], reportlets=reportlets,
                    title=subrep_cfg.get('title'))
                self.sections.append(order_by_run(sub_report))

        error_dir = os.path.join(self.out_dir, "fmriprep", subject, 'log', self.run_uuid)
        if os.path.isdir(error_dir):
            self.index_error_dir(error_dir)
Beispiel #12
0
 def _run_interface(self, runtime):
     out_file = self._gen_outfilename()
     src_file = self.inputs.src_file
     ref_file = self.inputs.ref_file
 
     # Collect orientation infos
     
     # "orientation" => 3 letter acronym defining orientation
     src_orient = fs.utils.ImageInfo(in_file=src_file).run().outputs.orientation
     ref_orient = fs.utils.ImageInfo(in_file=ref_file).run().outputs.orientation
     # "convention" => RADIOLOGICAL/NEUROLOGICAL
     src_conv = fsl.Orient(in_file=src_file, get_orient=True).run().outputs.orient
     ref_conv = fsl.Orient(in_file=ref_file, get_orient=True).run().outputs.orient
     
     if src_orient == ref_orient:
         # no reorientation needed
         copyfile(src_file,out_file,False, False, 'content')
         return runtime
     else:
         if src_conv != ref_conv:
             # if needed, match convention (radiological/neurological) to reference
             tmpsrc = os.path.join(os.path.dirname(src_file), 'tmp_' + os.path.basename(src_file))
     
             fsl.SwapDimensions(in_file=src_file, new_dims=('-x','y','z'), out_file=tmpsrc).run()
     
             fsl.Orient(in_file=tmpsrc, swap_orient=True).run()
         else:
             # If conventions match, just use the original source
             tmpsrc = src_file
             
     tmp2 = os.path.join(os.path.dirname(src_file), 'tmp.nii.gz')
     map_orient = {'L':'RL','R':'LR','A':'PA','P':'AP','S':'IS','I':'SI'}
     fsl.SwapDimensions(in_file=tmpsrc, new_dims=(map_orient[ref_orient[0]],map_orient[ref_orient[1]],map_orient[ref_orient[2]]), out_file=tmp2).run()
         
     shutil.move(tmp2, out_file)
 
     # Only remove the temporary file if the conventions did not match.  Otherwise,
     # we end up removing the output.
     if tmpsrc != src_file:
         os.remove(tmpsrc)
     return runtime
def test_linkchain():
    if os.name is not 'posix':
        return
    orig_img, orig_hdr = _temp_analyze_files()
    pth, fname = os.path.split(orig_img)
    new_img1 = os.path.join(pth, 'newfile1.img')
    new_hdr1 = os.path.join(pth, 'newfile1.hdr')
    new_img2 = os.path.join(pth, 'newfile2.img')
    new_hdr2 = os.path.join(pth, 'newfile2.hdr')
    new_img3 = os.path.join(pth, 'newfile3.img')
    new_hdr3 = os.path.join(pth, 'newfile3.hdr')
    copyfile(orig_img, new_img1)
    yield assert_true, os.path.islink(new_img1)
    yield assert_true, os.path.islink(new_hdr1)
    copyfile(new_img1, new_img2, copy=True)
    yield assert_false, os.path.islink(new_img2)
    yield assert_false, os.path.islink(new_hdr2)
    yield assert_false, os.path.samefile(orig_img, new_img2)
    yield assert_false, os.path.samefile(orig_hdr, new_hdr2)
    copyfile(new_img1, new_img3, copy=True, use_hardlink=True)
    yield assert_false, os.path.islink(new_img3)
    yield assert_false, os.path.islink(new_hdr3)
    yield assert_true, os.path.samefile(orig_img, new_img3)
    yield assert_true, os.path.samefile(orig_hdr, new_hdr3)
    os.unlink(new_img1)
    os.unlink(new_hdr1)
    os.unlink(new_img2)
    os.unlink(new_hdr2)
    os.unlink(new_img3)
    os.unlink(new_hdr3)
    # final cleanup
    os.unlink(orig_img)
    os.unlink(orig_hdr)
Beispiel #14
0
    def index(self, config):
        fig_dir = 'figures'
        subject = 'sub-{}'.format(self.subject_id)
        svg_dir = self.out_dir / subject / fig_dir
        svg_dir.mkdir(parents=True, exist_ok=True)
        reportlet_list = list(sorted([str(f) for f in Path(self.root).glob('**/*.*')]))

        for subrep_cfg in config:
            reportlets = []
            for reportlet_cfg in subrep_cfg['reportlets']:
                rlet = Reportlet(**reportlet_cfg)
                for src in reportlet_list:
                    ext = src.split('.')[-1]
                    if rlet.file_pattern.search(src):
                        contents = None
                        if ext == 'html':
                            with open(src) as fp:
                                contents = fp.read().strip()
                        elif ext == 'svg':
                            fbase = Path(src).name
                            copyfile(src, str(svg_dir / fbase),
                                     copy=True, use_hardlink=True)
                            contents = str(Path(subject) / fig_dir / fbase)

                        if contents:
                            rlet.source_files.append(src)
                            rlet.contents.append(contents)

                if rlet.source_files:
                    reportlets.append(rlet)

            if reportlets:
                sub_report = SubReport(
                    subrep_cfg['name'], reportlets=reportlets,
                    title=subrep_cfg.get('title'))
                self.sections.append(order_by_run(sub_report))

        error_dir = self.out_dir / self.packagename / subject / 'log' / self.run_uuid
        if error_dir.is_dir():
            self.index_error_dir(error_dir)
Beispiel #15
0
    def _run_interface(self, runtime):
        for i in range(1, len(self.inputs.thsamples) + 1):
            _, _, ext = split_filename(self.inputs.thsamples[i - 1])
            copyfile(self.inputs.thsamples[i - 1],
                     self.inputs.samples_base_name + "_th%dsamples" % i + ext,
                     copy=True)
            _, _, ext = split_filename(self.inputs.thsamples[i - 1])
            copyfile(self.inputs.phsamples[i - 1],
                     self.inputs.samples_base_name + "_ph%dsamples" % i + ext,
                     copy=True)
            _, _, ext = split_filename(self.inputs.thsamples[i - 1])
            copyfile(self.inputs.fsamples[i - 1],
                     self.inputs.samples_base_name + "_f%dsamples" % i + ext,
                     copy=True)

        if isdefined(self.inputs.target_masks):
            f = open("targets.txt", "w")
            for target in self.inputs.target_masks:
                f.write("%s\n" % target)
            f.close()

        runtime = super(mapped_ProbTrackX, self)._run_interface(runtime)
        if runtime.stderr:
            self.raise_exception(runtime)
        return runtime
Beispiel #16
0
    def _run_interface(self, runtime):
        for i in range(1, len(self.inputs.thsamples) + 1):
            _, _, ext = split_filename(self.inputs.thsamples[i - 1])
            copyfile(self.inputs.thsamples[i - 1],
                     self.inputs.samples_base_name + "_th%dsamples" % i + ext,
                     copy=False)
            _, _, ext = split_filename(self.inputs.thsamples[i - 1])
            copyfile(self.inputs.phsamples[i - 1],
                     self.inputs.samples_base_name + "_ph%dsamples" % i + ext,
                     copy=False)
            _, _, ext = split_filename(self.inputs.thsamples[i - 1])
            copyfile(self.inputs.fsamples[i - 1],
                     self.inputs.samples_base_name + "_f%dsamples" % i + ext,
                     copy=False)

        if isdefined(self.inputs.target_masks):
            f = open("targets.txt", "w")
            for target in self.inputs.target_masks:
                f.write("%s\n" % target)
            f.close()
        if isinstance(self.inputs.seed, list):
            f = open("seeds.txt", "w")
            for seed in self.inputs.seed:
                if isinstance(seed, list):
                    f.write("%s\n" % (" ".join([str(s) for s in seed])))
                else:
                    f.write("%s\n" % seed)
            f.close()

        runtime = super(ProbTrackX, self)._run_interface(runtime)
        if runtime.stderr:
            self.raise_exception(runtime)
        return runtime
def test_masking(input_fname, expected_fname):
    bold_reference_wf = init_bold_reference_wf(omp_nthreads=1)
    bold_reference_wf.inputs.inputnode.bold_file = input_fname

    # Reconstruct base_fname from above
    dirname, basename = os.path.split(input_fname)
    dsname = os.path.basename(dirname)
    reports_dir = Path(os.getenv('FMRIPREP_REGRESSION_REPORTS', ''))
    newpath = reports_dir / dsname
    out_fname = fname_presuffix(basename, suffix='_masks.svg', use_ext=False,
                                newpath=str(newpath))
    newpath.mkdir(parents=True, exist_ok=True)

    mask_diff_plot = pe.Node(ROIsPlot(colors=['limegreen'], levels=[0.5]),
                             name='mask_diff_plot')
    mask_diff_plot.inputs.in_mask = expected_fname
    mask_diff_plot.inputs.out_report = out_fname

    outputnode = bold_reference_wf.get_node('outputnode')
    bold_reference_wf.connect([
        (outputnode, mask_diff_plot, [('ref_image', 'in_file'),
                                      ('bold_mask', 'in_rois')])
    ])
    res = bold_reference_wf.run(plugin='MultiProc')

    combine_masks = [node for node in res.nodes if node.name.endswith('combine_masks')][0]
    overlap = symmetric_overlap(expected_fname,
                                combine_masks.result.outputs.out_file)

    mask_dir = reports_dir / 'fmriprep_bold_mask' / dsname
    mask_dir.mkdir(parents=True, exist_ok=True)
    copyfile(combine_masks.result.outputs.out_file,
             fname_presuffix(basename, suffix='_mask',
                             use_ext=True, newpath=str(mask_dir)),
             copy=True)

    assert overlap > 0.95, input_fname
Beispiel #18
0
def parcellate(func_boot_img, local_corr, clust_type, _local_conn_mat_path,
               num_conn_comps, _clust_mask_corr_img, _standardize, _detrending,
               k, _local_conn, conf, _dir_path, _conn_comps):
    """
    API for performing any of a variety of clustering routines available
    through NiLearn.
    """
    import time
    import os
    import numpy as np
    from nilearn.regions import Parcellations
    from pynets.fmri.estimation import fill_confound_nans
    # from joblib import Memory
    import tempfile

    cache_dir = tempfile.mkdtemp()
    # memory = Memory(cache_dir, verbose=0)

    start = time.time()

    if (clust_type == "ward") and (local_corr != "allcorr"):
        if _local_conn_mat_path is not None:
            if not os.path.isfile(_local_conn_mat_path):
                try:
                    raise FileNotFoundError(
                        "File containing sparse matrix of local connectivity"
                        " structure not found.")
                except FileNotFoundError:
                    import sys
                    sys.exit(0)
        else:
            try:
                raise FileNotFoundError(
                    "File containing sparse matrix of local connectivity"
                    " structure not found.")
            except FileNotFoundError:
                import sys
                sys.exit(0)

    if (clust_type == "complete" or clust_type == "average"
            or clust_type == "single" or clust_type == "ward"
            or (clust_type == "rena" and num_conn_comps == 1)
            or (clust_type == "kmeans" and num_conn_comps == 1)):
        _clust_est = Parcellations(method=clust_type,
                                   standardize=_standardize,
                                   detrend=_detrending,
                                   n_parcels=k,
                                   mask=_clust_mask_corr_img,
                                   connectivity=_local_conn,
                                   mask_strategy="background",
                                   random_state=42)

        if conf is not None:
            import pandas as pd
            import random
            from nipype.utils.filemanip import fname_presuffix, copyfile

            out_name_conf = fname_presuffix(
                conf,
                suffix=f"_tmp{random.randint(1, 1000)}",
                newpath=cache_dir)
            copyfile(conf, out_name_conf, copy=True, use_hardlink=False)

            confounds = pd.read_csv(out_name_conf, sep="\t")
            if confounds.isnull().values.any():
                conf_corr = fill_confound_nans(confounds, _dir_path)
                try:
                    _clust_est.fit(func_boot_img, confounds=conf_corr)
                except UserWarning:
                    return None
                os.remove(conf_corr)
            else:
                try:
                    _clust_est.fit(func_boot_img, confounds=out_name_conf)
                except UserWarning:
                    return None
            os.remove(out_name_conf)
        else:
            try:
                _clust_est.fit(func_boot_img)
            except UserWarning:
                return None
        _clust_est.labels_img_.set_data_dtype(np.uint16)
        print(f"{clust_type}{k}"
              f"{(' clusters: %.2fs' % (time.time() - start))}")

        return _clust_est.labels_img_

    elif clust_type == "ncut":
        out_img = parcellate_ncut(_local_conn, k, _clust_mask_corr_img)
        out_img.set_data_dtype(np.uint16)
        print(f"{clust_type}{k}"
              f"{(' clusters: %.2fs' % (time.time() - start))}")
        return out_img

    elif (clust_type == "rena"
          or clust_type == "kmeans" and num_conn_comps > 1):
        from pynets.core import nodemaker
        from nilearn.regions import connected_regions, Parcellations
        from nilearn.image import iter_img, new_img_like
        from pynets.core.utils import flatten, proportional

        mask_img_list = []
        mask_voxels_dict = dict()
        for i, mask_img in enumerate(iter_img(_conn_comps)):
            mask_voxels_dict[i] = np.int(np.sum(np.asarray(mask_img.dataobj)))
            mask_img_list.append(mask_img)

        # Allocate k across connected components using Hagenbach-Bischoff
        # Quota based on number of voxels
        k_list = proportional(k, list(mask_voxels_dict.values()))

        conn_comp_atlases = []
        print(f"Building {len(mask_img_list)} separate atlases with "
              f"voxel-proportional k clusters for each "
              f"connected component...")
        for i, mask_img in enumerate(iter_img(mask_img_list)):
            if k_list[i] < 5:
                print(f"Only {k_list[i]} voxels in component. Discarding...")
                continue
            _clust_est = Parcellations(method=clust_type,
                                       standardize=_standardize,
                                       detrend=_detrending,
                                       n_parcels=k_list[i],
                                       mask=mask_img,
                                       mask_strategy="background",
                                       random_state=i)
            if conf is not None:
                import pandas as pd
                import random
                from nipype.utils.filemanip import fname_presuffix, copyfile

                out_name_conf = fname_presuffix(
                    conf,
                    suffix=f"_tmp{random.randint(1, 1000)}",
                    newpath=cache_dir)
                copyfile(conf, out_name_conf, copy=True, use_hardlink=False)

                confounds = pd.read_csv(out_name_conf, sep="\t")
                if confounds.isnull().values.any():
                    conf_corr = fill_confound_nans(confounds, _dir_path)
                    try:
                        _clust_est.fit(func_boot_img, confounds=conf_corr)
                    except UserWarning:
                        continue
                else:
                    try:
                        _clust_est.fit(func_boot_img, confounds=conf)
                    except UserWarning:
                        continue
            else:
                try:
                    _clust_est.fit(func_boot_img)
                except UserWarning:
                    continue
            conn_comp_atlases.append(_clust_est.labels_img_)

        # Then combine the multiple atlases, corresponding to each
        # connected component, into a single atlas
        atlas_of_atlases = []
        for atlas in iter_img(conn_comp_atlases):
            bna_data = np.around(np.asarray(atlas.dataobj)).astype("uint16")

            # Get an array of unique parcels
            bna_data_for_coords_uniq = np.unique(bna_data)

            # Number of parcels:
            par_max = len(bna_data_for_coords_uniq) - 1
            img_stack = []
            for idx in range(1, par_max + 1):
                roi_img = bna_data == bna_data_for_coords_uniq[idx].astype(
                    "uint16")
                img_stack.append(roi_img.astype("uint16"))
            img_stack = np.array(img_stack)

            img_list = []
            for idy in range(par_max):
                img_list.append(new_img_like(atlas, img_stack[idy]))
            atlas_of_atlases.append(img_list)
            del img_list, img_stack, bna_data

        atlas_of_atlases = list(flatten(atlas_of_atlases))

        [super_atlas_ward, _] = nodemaker.create_parcel_atlas(atlas_of_atlases)
        super_atlas_ward.set_data_dtype(np.uint16)
        del atlas_of_atlases, conn_comp_atlases, mask_img_list, \
            mask_voxels_dict

        print(f"{clust_type}{k}"
              f"{(' clusters: %.2fs' % (time.time() - start))}")

        # memory.clear(warn=False)

        return super_atlas_ward
Beispiel #19
0
    def _run_interface(self, runtime):
        _, _, ext = split_filename(self.inputs.tensor_file)
        copyfile(self.inputs.tensor_file, os.path.abspath(self.inputs.input_data_prefix + "_tensor" + ext), copy=False)

        return super(DTITracker, self)._run_interface(runtime)
Beispiel #20
0
    def _run_interface(self, runtime):
        from pynets.core import utils, nodemaker
        from nipype.utils.filemanip import fname_presuffix, copyfile
        from nilearn.image import concat_imgs
        import pandas as pd
        import time
        import textwrap
        from pathlib import Path
        import os.path as op
        import glob

        base_path = utils.get_file()
        # Test if atlas is a nilearn atlas. If so, fetch coords, labels, and/or
        # networks.
        nilearn_parc_atlases = [
            "atlas_harvard_oxford",
            "atlas_aal",
            "atlas_destrieux_2009",
            "atlas_talairach_gyrus",
            "atlas_talairach_ba",
            "atlas_talairach_lobe",
        ]
        nilearn_coords_atlases = ["coords_power_2011", "coords_dosenbach_2010"]
        nilearn_prob_atlases = ["atlas_msdl", "atlas_pauli_2017"]
        local_atlases = [
            op.basename(i).split(".nii")[0]
            for i in glob.glob(f"{str(Path(base_path).parent.parent)}"
                               f"/templates/atlases/*.nii.gz")
            if "_4d" not in i
        ]

        if self.inputs.parcellation is None and self.inputs.atlas in \
                nilearn_parc_atlases:
            [labels, networks_list, parcellation
             ] = nodemaker.nilearn_atlas_helper(self.inputs.atlas,
                                                self.inputs.parc)
            if parcellation:
                if not isinstance(parcellation, str):
                    nib.save(
                        parcellation, f"{runtime.cwd}"
                        f"{self.inputs.atlas}{'.nii.gz'}")
                    parcellation = f"{runtime.cwd}" \
                                   f"{self.inputs.atlas}{'.nii.gz'}"
                if self.inputs.clustering is False:
                    [parcellation,
                     labels] = \
                        nodemaker.enforce_hem_distinct_consecutive_labels(
                        parcellation, label_names=labels)
                [coords, atlas, par_max, label_intensities] = \
                    nodemaker.get_names_and_coords_of_parcels(parcellation)
                if self.inputs.parc is True:
                    parcels_4d_img = nodemaker.three_to_four_parcellation(
                        parcellation)
                else:
                    parcels_4d_img = None
            else:
                raise FileNotFoundError(
                    f"\nAtlas file for {self.inputs.atlas} not found!")

            atlas = self.inputs.atlas
        elif (self.inputs.parcellation is None and self.inputs.parc is False
              and self.inputs.atlas in nilearn_coords_atlases):
            print("Fetching coords and labels from nilearn coordinate-based"
                  " atlas library...")
            # Fetch nilearn atlas coords
            [coords, _, networks_list,
             labels] = nodemaker.fetch_nilearn_atlas_coords(self.inputs.atlas)
            parcels_4d = None
            par_max = None
            atlas = self.inputs.atlas
            parcellation = None
            label_intensities = None
        elif (self.inputs.parcellation is None and self.inputs.parc is False
              and self.inputs.atlas in nilearn_prob_atlases):
            import matplotlib
            matplotlib.use("agg")
            from nilearn.plotting import find_probabilistic_atlas_cut_coords

            print("Fetching coords and labels from nilearn probabilistic atlas"
                  " library...")
            # Fetch nilearn atlas coords
            [labels, networks_list, parcellation
             ] = nodemaker.nilearn_atlas_helper(self.inputs.atlas,
                                                self.inputs.parc)
            coords = find_probabilistic_atlas_cut_coords(maps_img=parcellation)
            if parcellation:
                if not isinstance(parcellation, str):
                    nib.save(
                        parcellation, f"{runtime.cwd}"
                        f"{self.inputs.atlas}{'.nii.gz'}")
                    parcellation = f"{runtime.cwd}" \
                                   f"{self.inputs.atlas}{'.nii.gz'}"
                if self.inputs.clustering is False:
                    [parcellation,
                     labels] = \
                        nodemaker.enforce_hem_distinct_consecutive_labels(
                        parcellation, label_names=labels)
                if self.inputs.parc is True:
                    parcels_4d_img = nodemaker.three_to_four_parcellation(
                        parcellation)
                else:
                    parcels_4d_img = None
            else:
                raise FileNotFoundError(
                    f"\nAtlas file for {self.inputs.atlas} not found!")

            par_max = None
            atlas = self.inputs.atlas
            label_intensities = None
        elif self.inputs.parcellation is None and self.inputs.atlas in \
            local_atlases:
            parcellation_pre = (
                f"{str(Path(base_path).parent.parent)}/templates/atlases/"
                f"{self.inputs.atlas}.nii.gz")
            parcellation = fname_presuffix(parcellation_pre,
                                           newpath=runtime.cwd)
            copyfile(parcellation_pre,
                     parcellation,
                     copy=True,
                     use_hardlink=False)
            try:
                par_img = nib.load(parcellation)
            except indexed_gzip.ZranError as e:
                print(
                    e, "\nCannot load subnetwork reference image. "
                    "Do you have git-lfs installed?")
            try:
                if self.inputs.clustering is False:
                    [parcellation, _] = \
                        nodemaker.enforce_hem_distinct_consecutive_labels(
                            parcellation)

                # Fetch user-specified atlas coords
                [coords, _, par_max, label_intensities] = \
                    nodemaker.get_names_and_coords_of_parcels(parcellation)
                if self.inputs.parc is True:
                    parcels_4d_img = nodemaker.three_to_four_parcellation(
                        parcellation)
                else:
                    parcels_4d_img = None
                # Describe user atlas coords
                print(f"\n{self.inputs.atlas} comes with {par_max} parcels\n")
            except ValueError as e:
                print(
                    e, "Either you have specified the name of an atlas that "
                    "does not exist in the nilearn or local repository or "
                    "you have not supplied a 3d atlas parcellation image!")
            labels = None
            networks_list = None
            atlas = self.inputs.atlas
        elif self.inputs.parcellation:
            if self.inputs.clustering is True:
                while True:
                    if op.isfile(self.inputs.parcellation):
                        break
                    else:
                        print("Waiting for atlas file...")
                        time.sleep(5)

            try:
                parcellation_tmp_path = fname_presuffix(
                    self.inputs.parcellation, newpath=runtime.cwd)
                copyfile(self.inputs.parcellation,
                         parcellation_tmp_path,
                         copy=True,
                         use_hardlink=False)
                # Fetch user-specified atlas coords
                if self.inputs.clustering is False:
                    [parcellation,
                     _] = nodemaker.enforce_hem_distinct_consecutive_labels(
                         parcellation_tmp_path)
                else:
                    parcellation = parcellation_tmp_path
                [coords, atlas, par_max, label_intensities] = \
                    nodemaker.get_names_and_coords_of_parcels(parcellation)
                if self.inputs.parc is True:
                    parcels_4d_img = nodemaker.three_to_four_parcellation(
                        parcellation)
                else:
                    parcels_4d_img = None

                atlas = utils.prune_suffices(atlas)

                # Describe user atlas coords
                print(f"\n{atlas} comes with {par_max} parcels\n")
            except ValueError as e:
                print(
                    e, "Either you have specified the name of an atlas that "
                    "does not exist in the nilearn or local repository or "
                    "you have not supplied a 3d atlas parcellation image!")
            labels = None
            networks_list = None
        else:
            raise ValueError(
                "Either you have specified the name of an atlas that does"
                " not exist in the nilearn or local repository or you have"
                " not supplied a 3d atlas parcellation image!")

        # Labels prep
        if atlas and not labels:
            if (self.inputs.ref_txt is not None) and (op.exists(
                    self.inputs.ref_txt)):
                labels = pd.read_csv(self.inputs.ref_txt,
                                     sep=" ",
                                     header=None,
                                     names=["Index",
                                            "Region"])["Region"].tolist()
            else:
                if atlas in local_atlases:
                    ref_txt = (
                        f"{str(Path(base_path).parent.parent)}/templates/"
                        f"labels/"
                        f"{atlas}.txt")
                else:
                    ref_txt = self.inputs.ref_txt
                if ref_txt is not None:
                    try:
                        labels = pd.read_csv(ref_txt,
                                             sep=" ",
                                             header=None,
                                             names=["Index", "Region"
                                                    ])["Region"].tolist()
                    except BaseException:
                        if self.inputs.use_parcel_naming is True:
                            try:
                                labels = nodemaker.parcel_naming(
                                    coords, self.inputs.vox_size)
                            except BaseException:
                                print("AAL reference labeling failed!")
                                labels = np.arange(len(coords) + 1)[
                                    np.arange(len(coords) + 1) != 0].tolist()
                        else:
                            print("Using generic index labels...")
                            labels = np.arange(len(coords) +
                                               1)[np.arange(len(coords) +
                                                            1) != 0].tolist()
                else:
                    if self.inputs.use_parcel_naming is True:
                        try:
                            labels = nodemaker.parcel_naming(
                                coords, self.inputs.vox_size)
                        except BaseException:
                            print("AAL reference labeling failed!")
                            labels = np.arange(len(coords) +
                                               1)[np.arange(len(coords) +
                                                            1) != 0].tolist()
                    else:
                        print("Using generic index labels...")
                        labels = np.arange(len(coords) +
                                           1)[np.arange(len(coords) +
                                                        1) != 0].tolist()

        dir_path = utils.do_dir_path(atlas, self.inputs.outdir)

        if len(coords) != len(labels):
            labels = [
                i for i in labels if (i != 'Unknown' and i != 'Background')
            ]
            if len(coords) != len(labels):
                print("Length of coordinates is not equal to length of "
                      "label names...")
                if self.inputs.use_parcel_naming is True:
                    try:
                        print("Attempting consensus parcel naming instead...")
                        labels = nodemaker.parcel_naming(
                            coords, self.inputs.vox_size)
                    except BaseException:
                        print("Reverting to integer labels instead...")
                        labels = np.arange(len(coords) +
                                           1)[np.arange(len(coords) +
                                                        1) != 0].tolist()
                else:
                    print("Reverting to integer labels instead...")
                    labels = np.arange(len(coords) +
                                       1)[np.arange(len(coords) +
                                                    1) != 0].tolist()

        print(f"Coordinates:\n{coords}")
        print(f"Labels:\n"
              f"{textwrap.shorten(str(labels), width=1000, placeholder='...')}"
              f"")

        assert len(coords) == len(labels)

        if label_intensities is not None:
            self._results["labels"] = list(zip(labels, label_intensities))
        else:
            self._results["labels"] = labels
        self._results["coords"] = coords
        self._results["atlas"] = atlas
        self._results["networks_list"] = networks_list
        # TODO: Optimize this with 4d array concatenation and .npyz

        out_path = f"{runtime.cwd}/parcels_4d.nii.gz"
        nib.save(parcels_4d_img, out_path)
        self._results["parcels_4d"] = out_path
        self._results["par_max"] = par_max
        self._results["parcellation"] = parcellation
        self._results["dir_path"] = dir_path

        return runtime
Beispiel #21
0
    def _list_outputs(self):
        """Execute this module.
        """

        # Init variables
        outputs = self.output_spec().get()
        out_files = []
        # Use hardlink
        use_hardlink = str2bool(
            config.get('execution', 'try_hard_link_datasink'))

        # Set local output directory if specified
        if isdefined(self.inputs.local_copy):
            outdir = self.inputs.local_copy
        else:
            outdir = self.inputs.base_directory
            # If base directory isn't given, assume current directory
            if not isdefined(outdir):
                outdir = '.'

        # Check if base directory reflects S3 bucket upload
        s3_flag, bucket_name = self._check_s3_base_dir()
        if s3_flag:
            s3dir = self.inputs.base_directory
            # If user overrides bucket object, use that
            if self.inputs.bucket:
                bucket = self.inputs.bucket
            # Otherwise fetch bucket object using name
            else:
                try:
                    bucket = self._fetch_bucket(bucket_name)
                # If encountering an exception during bucket access, set output
                # base directory to a local folder
                except Exception as exc:
                    s3dir = '<N/A>'
                    if not isdefined(self.inputs.local_copy):
                        local_out_exception = os.path.join(
                            os.path.expanduser('~'),
                            's3_datasink_' + bucket_name)
                        outdir = local_out_exception
                    # Log local copying directory
                    iflogger.info(
                        'Access to S3 failed! Storing outputs locally at: '
                        '%s\nError: %s', outdir, exc)
        else:
            s3dir = '<N/A>'

        # If container input is given, append that to outdir
        if isdefined(self.inputs.container):
            outdir = os.path.join(outdir, self.inputs.container)
            s3dir = os.path.join(s3dir, self.inputs.container)

        # If sinking to local folder
        if outdir != s3dir:
            outdir = os.path.abspath(outdir)
            # Create the directory if it doesn't exist
            if not os.path.exists(outdir):
                try:
                    os.makedirs(outdir)
                except OSError as inst:
                    if 'File exists' in inst.strerror:
                        pass
                    else:
                        raise (inst)

        # Iterate through outputs attributes {key : path(s)}
        for key, files in list(self.inputs._outputs.items()):
            if not isdefined(files):
                continue
            iflogger.debug("key: %s files: %s", key, str(files))
            files = ensure_list(files)
            tempoutdir = outdir
            if s3_flag:
                s3tempoutdir = s3dir
            for d in key.split('.'):
                if d[0] == '@':
                    continue
                tempoutdir = os.path.join(tempoutdir, d)
                if s3_flag:
                    s3tempoutdir = os.path.join(s3tempoutdir, d)

            # flattening list
            if isinstance(files, list):
                if isinstance(files[0], list):
                    files = [item for sublist in files for item in sublist]

            # Iterate through passed-in source files
            for src in ensure_list(files):
                # Format src and dst files
                src = os.path.abspath(src)
                if not os.path.isfile(src):
                    src = os.path.join(src, '')
                dst = self._get_dst(src)
                if s3_flag:
                    s3dst = os.path.join(s3tempoutdir, dst)
                    s3dst = self._substitute(s3dst)
                dst = os.path.join(tempoutdir, dst)
                dst = self._substitute(dst)
                path, _ = os.path.split(dst)

                # If we're uploading to S3
                if s3_flag:
                    self._upload_to_s3(bucket, src, s3dst)
                    out_files.append(s3dst)
                # Otherwise, copy locally src -> dst
                if not s3_flag or isdefined(self.inputs.local_copy):
                    # Create output directory if it doesnt exist
                    if not os.path.exists(path):
                        try:
                            os.makedirs(path)
                        except OSError as inst:
                            if 'File exists' in inst.strerror:
                                pass
                            else:
                                raise (inst)
                    # If src is a file, copy it to dst
                    if os.path.isfile(src):
                        iflogger.debug('copyfile: %s %s', src, dst)
                        copyfile(src,
                                 dst,
                                 copy=True,
                                 hashmethod='content',
                                 use_hardlink=use_hardlink)
                        out_files.append(dst)
                    # If src is a directory, copy entire contents to dst dir
                    elif os.path.isdir(src):
                        if os.path.exists(dst) and self.inputs.remove_dest_dir:
                            iflogger.debug('removing: %s', dst)
                            shutil.rmtree(dst)
                        iflogger.debug('copydir: %s %s', src, dst)
                        copytree(src, dst)
                        out_files.append(dst)

        # Return outputs dictionary
        outputs['out_file'] = out_files

        return outputs
Beispiel #22
0
    try:
        os.makedirs(dst)
    except OSError, why:
        if 'File exists' in why:
            pass
        else:
            raise why
    errors = []
    for name in names:
        srcname = os.path.join(src, name)
        dstname = os.path.join(dst, name)
        try:
            if os.path.isdir(srcname):
                copytree(srcname, dstname)
            else:
                copyfile(srcname, dstname, True, hashmethod='content')
        except (IOError, os.error), why:
            errors.append((srcname, dstname, str(why)))
        # catch the Error from the recursive copytree so that we can
        # continue with other files
        except Exception, err:
            errors.extend(err.args[0])
    if errors:
        raise Exception, errors


def add_traits(base, names, trait_type=None):
    """ Add traits to a traited class.

    All traits are set to Undefined by default
    """
Beispiel #23
0
    def _run_interface(self, runtime):
        import nibabel as nb
        import nilearn.image as nli
        from nipype.utils.filemanip import fname_presuffix, copyfile

        in_names = self.inputs.t1w_list
        orig_imgs = [nb.load(fname) for fname in in_names]
        reoriented = [nb.as_closest_canonical(img) for img in orig_imgs]
        target_shape = np.max([img.shape for img in reoriented], axis=0)
        target_zooms = np.min(
            [img.header.get_zooms()[:3] for img in reoriented], axis=0)

        resampled_imgs = []
        for img in reoriented:
            zooms = np.array(img.header.get_zooms()[:3])
            shape = np.array(img.shape)

            xyz_unit = img.header.get_xyzt_units()[0]
            if xyz_unit == 'unknown':
                # Common assumption; if we're wrong, unlikely to be the only thing that breaks
                xyz_unit = 'mm'
            # Set a 0.05mm threshold to performing rescaling
            atol = {'meter': 5e-5, 'mm': 0.05, 'micron': 50}[xyz_unit]

            # Rescale => change zooms
            # Resize => update image dimensions
            rescale = not np.allclose(zooms, target_zooms, atol=atol)
            resize = not np.all(shape == target_shape)
            if rescale or resize:
                target_affine = np.eye(4, dtype=img.affine.dtype)
                if rescale:
                    scale_factor = target_zooms / zooms
                    target_affine[:3, :3] = np.diag(scale_factor).dot(
                        img.affine[:3, :3])
                else:
                    target_affine[:3, :3] = img.affine[:3, :3]

                if resize:
                    # The shift is applied after scaling.
                    # Use a proportional shift to maintain relative position in dataset
                    size_factor = (target_shape.astype(float) +
                                   shape) / (2 * shape)
                    # Use integer shifts to avoid unnecessary interpolation
                    offset = (img.affine[:3, 3] * size_factor -
                              img.affine[:3, 3]).astype(int)
                    target_affine[:3, 3] = img.affine[:3, 3] + offset
                else:
                    target_affine[:3, 3] = img.affine[:3, 3]

                data = nli.resample_img(img, target_affine,
                                        target_shape).get_data()
                img = img.__class__(data, target_affine, img.header)

            resampled_imgs.append(img)

        out_names = [
            fname_presuffix(fname, suffix='_ras', newpath=runtime.cwd)
            for fname in in_names
        ]

        for orig, final, in_name, out_name in zip(orig_imgs, resampled_imgs,
                                                  in_names, out_names):
            if final is orig:
                copyfile(in_name, out_name, copy=True, use_hardlink=True)
            else:
                final.to_filename(out_name)

        self._results['t1w_list'] = out_names

        return runtime
Beispiel #24
0
    def _run_interface(self, runtime):
        import gc
        from nipype.utils.filemanip import fname_presuffix, copyfile
        from pynets.fmri import estimation

        if self.inputs.net_parcels_nii_path:
            out_name_net_parcels_nii_path = fname_presuffix(self.inputs.net_parcels_nii_path, suffix='_tmp',
                                                            newpath=runtime.cwd)
            copyfile(self.inputs.net_parcels_nii_path, out_name_net_parcels_nii_path, copy=True, use_hardlink=False)
        else:
            out_name_net_parcels_nii_path = None
        if self.inputs.mask:
            out_name_mask = fname_presuffix(self.inputs.mask, suffix='_tmp', newpath=runtime.cwd)
            copyfile(self.inputs.mask, out_name_mask, copy=True, use_hardlink=False)
        else:
            out_name_mask = None
        out_name_func_file = fname_presuffix(self.inputs.func_file, suffix='_tmp', newpath=runtime.cwd)
        copyfile(self.inputs.func_file, out_name_func_file, copy=True, use_hardlink=False)

        if self.inputs.conf:
            out_name_conf = fname_presuffix(self.inputs.conf, suffix='_tmp', newpath=runtime.cwd)
            copyfile(self.inputs.conf, out_name_conf, copy=True, use_hardlink=False)
        else:
            out_name_conf = None

        te = estimation.TimeseriesExtraction(net_parcels_nii_path=out_name_net_parcels_nii_path,
                                             node_size=self.inputs.node_size,
                                             conf=out_name_conf,
                                             func_file=out_name_func_file,
                                             coords=self.inputs.coords,
                                             roi=self.inputs.roi,
                                             dir_path=self.inputs.dir_path,
                                             ID=self.inputs.ID,
                                             network=self.inputs.network,
                                             smooth=self.inputs.smooth,
                                             atlas=self.inputs.atlas,
                                             uatlas=self.inputs.uatlas,
                                             labels=self.inputs.labels,
                                             c_boot=self.inputs.c_boot,
                                             block_size=self.inputs.block_size,
                                             hpass=self.inputs.hpass,
                                             mask=out_name_mask)

        te.prepare_inputs()
        if self.inputs.parc is False:
            if len(self.inputs.coords) > 0:
                te.extract_ts_coords()
            else:
                raise RuntimeError(
                    '\nERROR: Cannot extract time-series from an empty list of coordinates. \nThis usually means '
                    'that no nodes were generated based on the specified conditions at runtime (e.g. atlas was '
                    'overly restricted by an RSN or some user-defined mask.')
        else:
            te.extract_ts_parc()

        if float(self.inputs.c_boot) > 0:
            te.bootstrap_timeseries()

        te.save_and_cleanup()

        self._results['ts_within_nodes'] = te.ts_within_nodes
        self._results['node_size'] = te.node_size
        self._results['smooth'] = te.smooth
        self._results['dir_path'] = te.dir_path
        self._results['atlas'] = te.atlas
        self._results['uatlas'] = te.uatlas
        self._results['labels'] = te.labels
        self._results['coords'] = te.coords
        self._results['c_boot'] = te.c_boot
        self._results['hpass'] = te.hpass
        self._results['roi'] = self.inputs.roi

        del te
        gc.collect()

        return runtime
Beispiel #25
0
    try:
        os.makedirs(dst)
    except OSError, why:
        if 'File exists' in why:
            pass
        else:
            raise why
    errors = []
    for name in names:
        srcname = os.path.join(src, name)
        dstname = os.path.join(dst, name)
        try:
            if os.path.isdir(srcname):
                copytree(srcname, dstname)
            else:
                copyfile(srcname, dstname, True, hashmethod='content')
        except (IOError, os.error), why:
            errors.append((srcname, dstname, str(why)))
        # catch the Error from the recursive copytree so that we can
        # continue with other files
        except Exception, err:
            errors.extend(err.args[0])
    if errors:
        raise Exception, errors

def add_traits(base, names, trait_type=None):
    """ Add traits to a traited class.

    All traits are set to Undefined by default
    """
    if trait_type is None:
    def _run_interface(self, runtime):
        import nibabel as nb
        import nilearn.image as nli
        from nipype.utils.filemanip import fname_presuffix, copyfile

        in_names = self.inputs.t1w_list
        orig_imgs = [nb.load(fname) for fname in in_names]
        reoriented = [nb.as_closest_canonical(img) for img in orig_imgs]
        target_shape = np.max([img.shape for img in reoriented], axis=0)
        target_zooms = np.min([img.header.get_zooms()[:3]
                               for img in reoriented], axis=0)

        resampled_imgs = []
        for img in reoriented:
            zooms = np.array(img.header.get_zooms()[:3])
            shape = np.array(img.shape)

            xyz_unit = img.header.get_xyzt_units()[0]
            if xyz_unit == 'unknown':
                # Common assumption; if we're wrong, unlikely to be the only thing that breaks
                xyz_unit = 'mm'
            # Set a 0.05mm threshold to performing rescaling
            atol = {'meter': 5e-5, 'mm': 0.05, 'micron': 50}[xyz_unit]

            # Rescale => change zooms
            # Resize => update image dimensions
            rescale = not np.allclose(zooms, target_zooms, atol=atol)
            resize = not np.all(shape == target_shape)
            if rescale or resize:
                target_affine = np.eye(4, dtype=img.affine.dtype)
                if rescale:
                    scale_factor = target_zooms / zooms
                    target_affine[:3, :3] = np.diag(scale_factor).dot(img.affine[:3, :3])
                else:
                    target_affine[:3, :3] = img.affine[:3, :3]

                if resize:
                    # The shift is applied after scaling.
                    # Use a proportional shift to maintain relative position in dataset
                    size_factor = (target_shape.astype(float) + shape) / (2 * shape)
                    # Use integer shifts to avoid unnecessary interpolation
                    offset = (img.affine[:3, 3] * size_factor - img.affine[:3, 3]).astype(int)
                    target_affine[:3, 3] = img.affine[:3, 3] + offset
                else:
                    target_affine[:3, 3] = img.affine[:3, 3]

                data = nli.resample_img(img, target_affine, target_shape).get_data()
                img = img.__class__(data, target_affine, img.header)

            resampled_imgs.append(img)

        out_names = [fname_presuffix(fname, suffix='_ras', newpath=runtime.cwd)
                     for fname in in_names]

        for orig, final, in_name, out_name in zip(orig_imgs, resampled_imgs,
                                                  in_names, out_names):
            if final is orig:
                copyfile(in_name, out_name, copy=True, use_hardlink=True)
            else:
                final.to_filename(out_name)

        self._results['t1w_list'] = out_names

        return runtime
Beispiel #27
0
    def __init__(self, layout, out_dir, config=None):
        if not config:
            raise RuntimeError('Reportlet must have a config object')

        # PY35: Sorted config dict for consistent behavior
        self.name = config.get(
            'name',
            '_'.join('%s-%s' % i for i in sorted(config['bids'].items())))
        self.title = config.get('title')
        self.subtitle = config.get('subtitle')
        self.description = config.get('description')

        # Query the BIDS layout of reportlets
        files = layout.get(**config['bids'])

        self.components = []
        for bidsfile in files:
            src = Path(bidsfile.path)
            ext = ''.join(src.suffixes)
            desc_text = config.get('caption')

            contents = None
            if ext == '.html':
                contents = src.read_text().strip()
            elif ext == '.svg':
                entities = dict(bidsfile.entities)
                if desc_text:
                    desc_text = desc_text.format(**entities)

                entities['extension'] = 'svg'
                entities['datatype'] = 'figures'
                linked_svg = layout.build_path(entities, validate=False)
                if linked_svg is None:
                    raise ValueError(
                        "Could not generate SVG path to copy {src}"
                        " to. Entities: {entities}".format(src=src,
                                                           entities=entities))
                out_file = out_dir / linked_svg
                out_file.parent.mkdir(parents=True, exist_ok=True)
                # PY35: Coerce to str to pacify os.* functions that don't take Paths until 3.6
                copyfile(str(src), str(out_file), copy=True, use_hardlink=True)
                is_static = config.get('static', True)
                contents = SVG_SNIPPET[is_static].format(linked_svg)

                # Our current implementations of dynamic reportlets do this themselves,
                # however I'll leave the code here since this is potentially something we
                # will want to transfer from every figure generator to this location.
                # The following code misses setting preserveAspecRatio="xMidYMid meet"
                # if not is_static:
                #     # Remove height and width attributes from initial <svg> tag
                #     svglines = out_file.read_text().splitlines()
                #     expr = re.compile(r' (height|width)=["\'][0-9]+(\.[0-9]*)?[a-z]*["\']')
                #     for l, line in enumerate(svglines[:6]):
                #         if line.strip().startswith('<svg'):
                #             newline = expr.sub('', line)
                #             svglines[l] = newline
                #             out_file.write_text('\n'.join(svglines))
                #             break

            if contents:
                self.components.append((contents, desc_text))
Beispiel #28
0
class DataSink(IOBase):
    """ Generic datasink module to store structured outputs

        Primarily for use within a workflow. This interface allows arbitrary
        creation of input attributes. The names of these attributes define the
        directory structure to create for storage of the files or directories.

        The attributes take the following form:

        string[[.[@]]string[[.[@]]string]] ...

        where parts between [] are optional.

        An attribute such as contrasts.@con will create a 'contrasts' directory
        to store the results linked to the attribute. If the @ is left out, such
        as in 'contrasts.con', a subdirectory 'con' will be created under
        'contrasts'.

        the general form of the output is::

           'base_directory/container/parameterization/destloc/filename'

           destloc = string[[.[@]]string[[.[@]]string]] and
           filename comesfrom the input to the connect statement.

        .. warning::

            This is not a thread-safe node because it can write to a common
            shared location. It will not complain when it overwrites a file.

        .. note::

            If both substitutions and regexp_substitutions are used, then
            substitutions are applied first followed by regexp_substitutions.

            This interface **cannot** be used in a MapNode as the inputs are
            defined only when the connect statement is executed.

        Examples
        --------

        >>> ds = DataSink()
        >>> ds.inputs.base_directory = 'results_dir'
        >>> ds.inputs.container = 'subject'
        >>> ds.inputs.structural = 'structural.nii'
        >>> setattr(ds.inputs, 'contrasts.@con', ['cont1.nii', 'cont2.nii'])
        >>> setattr(ds.inputs, 'contrasts.alt', ['cont1a.nii', 'cont2a.nii'])
        >>> ds.run() # doctest: +SKIP

        To use DataSink in a MapNode, its inputs have to be defined at the
        time the interface is created.

        >>> ds = DataSink(infields=['contasts.@con'])
        >>> ds.inputs.base_directory = 'results_dir'
        >>> ds.inputs.container = 'subject'
        >>> ds.inputs.structural = 'structural.nii'
        >>> setattr(ds.inputs, 'contrasts.@con', ['cont1.nii', 'cont2.nii'])
        >>> setattr(ds.inputs, 'contrasts.alt', ['cont1a.nii', 'cont2a.nii'])
        >>> ds.run() # doctest: +SKIP

    """
    input_spec = DataSinkInputSpec
    output_spec = DataSinkOutputSpec

    def __init__(self, infields=None, **kwargs):
        """
        Parameters
        ----------
        infields : list of str
            Indicates the input fields to be dynamically created
        """

        super(DataSink, self).__init__(**kwargs)
        undefined_traits = {}
        # used for mandatory inputs check
        self._infields = infields
        if infields:
            for key in infields:
                self.inputs.add_trait(key, traits.Any)
                self.inputs._outputs[key] = Undefined
                undefined_traits[key] = Undefined
        self.inputs.trait_set(trait_change_notify=False, **undefined_traits)

    def _get_dst(self, src):
        ## If path is directory with trailing os.path.sep,
        ## then remove that for a more robust behavior
        src = src.rstrip(os.path.sep)
        path, fname = os.path.split(src)
        if self.inputs.parameterization:
            dst = path
            if isdefined(self.inputs.strip_dir):
                dst = dst.replace(self.inputs.strip_dir, '')
            folders = [
                folder for folder in dst.split(os.path.sep)
                if folder.startswith('_')
            ]
            dst = os.path.sep.join(folders)
            if fname:
                dst = os.path.join(dst, fname)
        else:
            if fname:
                dst = fname
            else:
                dst = path.split(os.path.sep)[-1]
        if dst[0] == os.path.sep:
            dst = dst[1:]
        return dst

    def _substitute(self, pathstr):
        pathstr_ = pathstr
        if isdefined(self.inputs.substitutions):
            for key, val in self.inputs.substitutions:
                oldpathstr = pathstr
                pathstr = pathstr.replace(key, val)
                if pathstr != oldpathstr:
                    iflogger.debug('sub.str: %s -> %s using %r -> %r' %
                                   (oldpathstr, pathstr, key, val))
        if isdefined(self.inputs.regexp_substitutions):
            for key, val in self.inputs.regexp_substitutions:
                oldpathstr = pathstr
                pathstr, _ = re.subn(key, val, pathstr)
                if pathstr != oldpathstr:
                    iflogger.debug('sub.regexp: %s -> %s using %r -> %r' %
                                   (oldpathstr, pathstr, key, val))
        if pathstr_ != pathstr:
            iflogger.info('sub: %s -> %s' % (pathstr_, pathstr))
        return pathstr

    def _list_outputs(self):
        """Execute this module.
        """
        outputs = self.output_spec().get()
        out_files = []
        outdir = self.inputs.base_directory
        if not isdefined(outdir):
            outdir = '.'
        outdir = os.path.abspath(outdir)
        if isdefined(self.inputs.container):
            outdir = os.path.join(outdir, self.inputs.container)
        if not os.path.exists(outdir):
            try:
                os.makedirs(outdir)
            except OSError, inst:
                if 'File exists' in inst:
                    pass
                else:
                    raise (inst)
        for key, files in self.inputs._outputs.items():
            if not isdefined(files):
                continue
            iflogger.debug("key: %s files: %s" % (key, str(files)))
            files = filename_to_list(files)
            tempoutdir = outdir
            for d in key.split('.'):
                if d[0] == '@':
                    continue
                tempoutdir = os.path.join(tempoutdir, d)

            # flattening list
            if isinstance(files, list):
                if isinstance(files[0], list):
                    files = [item for sublist in files for item in sublist]

            for src in filename_to_list(files):
                src = os.path.abspath(src)
                if os.path.isfile(src):
                    dst = self._get_dst(src)
                    dst = os.path.join(tempoutdir, dst)
                    dst = self._substitute(dst)
                    path, _ = os.path.split(dst)
                    if not os.path.exists(path):
                        try:
                            os.makedirs(path)
                        except OSError, inst:
                            if 'File exists' in inst:
                                pass
                            else:
                                raise (inst)
                    iflogger.debug("copyfile: %s %s" % (src, dst))
                    copyfile(src, dst, copy=True, hashmethod='content')
                    out_files.append(dst)
                elif os.path.isdir(src):
                    dst = self._get_dst(os.path.join(src, ''))
                    dst = os.path.join(tempoutdir, dst)
                    dst = self._substitute(dst)
                    path, _ = os.path.split(dst)
                    if not os.path.exists(path):
                        try:
                            os.makedirs(path)
                        except OSError, inst:
                            if 'File exists' in inst:
                                pass
                            else:
                                raise (inst)
                    if os.path.exists(dst) and self.inputs.remove_dest_dir:
                        iflogger.debug("removing: %s" % dst)
                        shutil.rmtree(dst)
                    iflogger.debug("copydir: %s %s" % (src, dst))
                    copytree(src, dst)
                    out_files.append(dst)
Beispiel #29
0
    try:
        os.makedirs(dst)
    except OSError, why:
        if 'File exists' in why:
            pass
        else:
            raise why
    errors = []
    for name in names:
        srcname = os.path.join(src, name)
        dstname = os.path.join(dst, name)
        try:
            if os.path.isdir(srcname):
                copytree(srcname, dstname)
            else:
                copyfile(srcname, dstname, True)
        except (IOError, os.error), why:
            errors.append((srcname, dstname, str(why)))
        # catch the Error from the recursive copytree so that we can
        # continue with other files
        except Exception, err:
            errors.extend(err.args[0])
    if errors:
        raise Exception, errors

def add_traits(base, names, trait_type=None):
    """ Add traits to a traited class.

    All traits are set to Undefined by default
    """
    if trait_type is None:
Beispiel #30
0
def test_drop_coords_labels_from_restricted_parcellation():
    from nipype.utils.filemanip import copyfile

    parlistfile = pkg_resources.resource_filename(
        "pynets", "templates/atlases/whole_brain_cluster_labels_PCA200.nii.gz")

    dir_path = str(tempfile.TemporaryDirectory().name)
    os.makedirs(dir_path, exist_ok=True)
    shutil.copy2(parlistfile, f"{dir_path}/{os.path.basename(parlistfile)}")
    parlistfile = f"{dir_path}/{os.path.basename(parlistfile)}"

    [coords, _, _, label_intensities] = \
        nodemaker.get_names_and_coords_of_parcels(parlistfile
    )

    labels = np.arange(len(coords) +
                       1)[np.arange(len(coords) + 1) != 0].tolist()
    labs = list(zip(labels, label_intensities))
    [
        parcellation_okay,
        cleaned_coords,
        cleaned_labels,
    ] = nodemaker.drop_coords_labels_from_restricted_parcellation(
        parlistfile, coords, labs)

    parcellation_okay_img = nib.load(parcellation_okay)
    intensities_ok = list(
        np.unique(np.asarray(parcellation_okay_img.dataobj).astype("int"))[1:])

    assert len(cleaned_coords) == len(cleaned_labels) == len(intensities_ok)

    parlist_img = nib.load(parlistfile)
    parlist_img_data = parlist_img.get_fdata()
    parlist_img_data[np.where(parlist_img_data == 10)] = 0
    par_tmp = tempfile.NamedTemporaryFile(mode="w+", suffix=".nii.gz")
    nib.save(nib.Nifti1Image(parlist_img_data, affine=parlist_img.affine),
             par_tmp.name)
    [
        parcellation_okay,
        cleaned_coords,
        cleaned_labels,
    ] = nodemaker.drop_coords_labels_from_restricted_parcellation(
        parlistfile, coords, labs)
    parcellation_okay_img = nib.load(parcellation_okay)
    intensities_ok = list(
        np.unique(np.asarray(parcellation_okay_img.dataobj).astype("int"))[1:])

    assert len(cleaned_coords) == len(cleaned_labels) == len(intensities_ok)

    bad_coords = np.delete(coords, 30, axis=0)
    del labs[-30]
    par_tmp2 = tempfile.NamedTemporaryFile(mode="w+", suffix=".nii.gz").name
    copyfile(parlistfile, par_tmp2, copy=True, use_hardlink=False)
    [
        parcellation_mod,
        cleaned_coords,
        cleaned_labels,
    ] = nodemaker.drop_coords_labels_from_restricted_parcellation(
        par_tmp2, bad_coords, labs)
    parcellation_mod_img = nib.load(parcellation_mod)
    intensities_ok = list(
        np.unique(np.asarray(parcellation_mod_img.dataobj).astype("int"))[1:])

    assert len(cleaned_coords) == len(cleaned_labels) == len(intensities_ok)
Beispiel #31
0
 def _run_interface(self, runtime):
     runtime.returncode = 0
     _ = copyfile(self.inputs.in_file, os.path.join(os.getcwd(),
                                                    self._rename()))
     return runtime
Beispiel #32
0
def run_tracking(step_curv_combinations, recon_path,
                 n_seeds_per_iter, directget, maxcrossing, max_length,
                 pft_back_tracking_dist, pft_front_tracking_dist,
                 particle_count, roi_neighborhood_tol, waymask, min_length,
                 track_type, min_separation_angle, sphere, tiss_class,
                 tissues4d, cache_dir, min_seeds=100):

    import gc
    import os
    import h5py
    from dipy.tracking import utils
    from dipy.tracking.streamline import select_by_rois
    from dipy.tracking.local_tracking import LocalTracking, \
        ParticleFilteringTracking
    from dipy.direction import (
        ProbabilisticDirectionGetter,
        ClosestPeakDirectionGetter,
        DeterministicMaximumDirectionGetter
    )
    from nilearn.image import index_img
    from pynets.dmri.track import prep_tissues
    from nibabel.streamlines.array_sequence import ArraySequence
    from nipype.utils.filemanip import copyfile, fname_presuffix
    import uuid
    from time import strftime

    run_uuid = f"{strftime('%Y%m%d_%H%M%S')}_{uuid.uuid4()}"

    recon_path_tmp_path = fname_presuffix(
        recon_path,
        suffix=f"_{'_'.join([str(i) for i in step_curv_combinations])}_"
               f"{run_uuid}",
        newpath=cache_dir
    )
    copyfile(
        recon_path,
        recon_path_tmp_path,
        copy=True,
        use_hardlink=False)

    tissues4d_tmp_path = fname_presuffix(
        tissues4d,
        suffix=f"_{'_'.join([str(i) for i in step_curv_combinations])}_"
               f"{run_uuid}",
        newpath=cache_dir
    )
    copyfile(
        tissues4d,
        tissues4d_tmp_path,
        copy=True,
        use_hardlink=False)

    if waymask is not None:
        waymask_tmp_path = fname_presuffix(
            waymask,
            suffix=f"_{'_'.join([str(i) for i in step_curv_combinations])}_"
                   f"{run_uuid}",
            newpath=cache_dir
        )
        copyfile(
            waymask,
            waymask_tmp_path,
            copy=True,
            use_hardlink=False)
    else:
        waymask_tmp_path = None

    tissue_img = nib.load(tissues4d_tmp_path)

    # Order:
    B0_mask = index_img(tissue_img, 0)
    atlas_img = index_img(tissue_img, 1)
    seeding_mask = index_img(tissue_img, 2)
    t1w2dwi = index_img(tissue_img, 3)
    gm_in_dwi = index_img(tissue_img, 4)
    vent_csf_in_dwi = index_img(tissue_img, 5)
    wm_in_dwi = index_img(tissue_img, 6)

    tiss_classifier = prep_tissues(
        t1w2dwi,
        gm_in_dwi,
        vent_csf_in_dwi,
        wm_in_dwi,
        tiss_class,
        B0_mask
    )

    B0_mask_data = np.asarray(B0_mask.dataobj).astype("bool")

    seeding_mask = np.asarray(
        seeding_mask.dataobj
    ).astype("bool").astype("int16")

    with h5py.File(recon_path_tmp_path, 'r+') as hf:
        mod_fit = hf['reconstruction'][:].astype('float32')

    print("%s%s" % ("Curvature: ", step_curv_combinations[1]))

    # Instantiate DirectionGetter
    if directget.lower() in ["probabilistic", "prob"]:
        dg = ProbabilisticDirectionGetter.from_shcoeff(
            mod_fit,
            max_angle=float(step_curv_combinations[1]),
            sphere=sphere,
            min_separation_angle=min_separation_angle,
        )
    elif directget.lower() in ["closestpeaks", "cp"]:
        dg = ClosestPeakDirectionGetter.from_shcoeff(
            mod_fit,
            max_angle=float(step_curv_combinations[1]),
            sphere=sphere,
            min_separation_angle=min_separation_angle,
        )
    elif directget.lower() in ["deterministic", "det"]:
        maxcrossing = 1
        dg = DeterministicMaximumDirectionGetter.from_shcoeff(
            mod_fit,
            max_angle=float(step_curv_combinations[1]),
            sphere=sphere,
            min_separation_angle=min_separation_angle,
        )
    else:
        raise ValueError(
            "ERROR: No valid direction getter(s) specified."
        )

    print("%s%s" % ("Step: ", step_curv_combinations[0]))

    # Perform wm-gm interface seeding, using n_seeds at a time
    seeds = utils.random_seeds_from_mask(
        seeding_mask > 0,
        seeds_count=n_seeds_per_iter,
        seed_count_per_voxel=False,
        affine=np.eye(4),
    )
    if len(seeds) < min_seeds:
        print(UserWarning(
            f"<{min_seeds} valid seed points found in wm-gm interface..."
        ))
        return None

    # print(seeds)

    # Perform tracking
    if track_type == "local":
        streamline_generator = LocalTracking(
            dg,
            tiss_classifier,
            seeds,
            np.eye(4),
            max_cross=int(maxcrossing),
            maxlen=int(max_length),
            step_size=float(step_curv_combinations[0]),
            fixedstep=False,
            return_all=True,
            random_seed=42
        )
    elif track_type == "particle":
        streamline_generator = ParticleFilteringTracking(
            dg,
            tiss_classifier,
            seeds,
            np.eye(4),
            max_cross=int(maxcrossing),
            step_size=float(step_curv_combinations[0]),
            maxlen=int(max_length),
            pft_back_tracking_dist=pft_back_tracking_dist,
            pft_front_tracking_dist=pft_front_tracking_dist,
            pft_max_trial=20,
            particle_count=particle_count,
            return_all=True,
            random_seed=42
        )
    else:
        raise ValueError(
            "ERROR: No valid tracking method(s) specified.")

    # Filter resulting streamlines by those that stay entirely
    # inside the brain
    try:
        roi_proximal_streamlines = utils.target(
            streamline_generator, np.eye(4),
            B0_mask_data.astype('bool'), include=True
        )
    except BaseException:
        print('No streamlines found inside the brain! '
              'Check registrations.')
        return None

    del mod_fit, seeds, tiss_classifier, streamline_generator, \
        B0_mask_data, seeding_mask, dg

    B0_mask.uncache()
    atlas_img.uncache()
    t1w2dwi.uncache()
    gm_in_dwi.uncache()
    vent_csf_in_dwi.uncache()
    wm_in_dwi.uncache()
    atlas_img.uncache()
    tissue_img.uncache()
    gc.collect()

    # Filter resulting streamlines by roi-intersection
    # characteristics
    atlas_data = np.array(atlas_img.dataobj).astype("uint16")

    # Build mask vector from atlas for later roi filtering
    parcels = []
    i = 0
    intensities = [i for i in np.unique(atlas_data) if i != 0]
    for roi_val in intensities:
        parcels.append(atlas_data == roi_val)
        i += 1

    parcel_vec = list(np.ones(len(parcels)).astype("bool"))

    try:
        roi_proximal_streamlines = \
            nib.streamlines.array_sequence.ArraySequence(
                select_by_rois(
                    roi_proximal_streamlines,
                    affine=np.eye(4),
                    rois=parcels,
                    include=parcel_vec,
                    mode="any",
                    tol=roi_neighborhood_tol,
                )
            )
        print("%s%s" % ("Filtering by: \nNode intersection: ",
                        len(roi_proximal_streamlines)))
    except BaseException:
        print('No streamlines found to connect any parcels! '
              'Check registrations.')
        return None

    try:
        roi_proximal_streamlines = nib.streamlines. \
            array_sequence.ArraySequence(
            [
                s for s in roi_proximal_streamlines
                if len(s) >= float(min_length)
            ]
        )
        print(f"Minimum fiber length >{min_length}mm: "
              f"{len(roi_proximal_streamlines)}")
    except BaseException:
        print('No streamlines remaining after minimal length criterion.')
        return None

    if waymask is not None and os.path.isfile(waymask_tmp_path):
        waymask_data = np.asarray(nib.load(waymask_tmp_path
                                           ).dataobj).astype("bool")
        try:
            roi_proximal_streamlines = roi_proximal_streamlines[
                utils.near_roi(
                    roi_proximal_streamlines,
                    np.eye(4),
                    waymask_data,
                    tol=int(round(roi_neighborhood_tol*0.50, 1)),
                    mode="all"
                )
            ]
            print("%s%s" % ("Waymask proximity: ",
                            len(roi_proximal_streamlines)))
            del waymask_data
        except BaseException:
            print('No streamlines remaining in waymask\'s vacinity.')
            return None

    hf.close()
    del parcels, atlas_data

    tmp_files = [tissues4d_tmp_path, waymask_tmp_path, recon_path_tmp_path]
    for j in tmp_files:
        if j is not None:
            if os.path.isfile(j):
                os.system(f"rm -f {j} &")

    if len(roi_proximal_streamlines) > 0:
        return ArraySequence([s.astype("float32") for s in
                              roi_proximal_streamlines])
    else:
        return None
Beispiel #33
0
def run_tracking(step_curv_combinations, recon_path, n_seeds_per_iter,
                 directget, maxcrossing, max_length, pft_back_tracking_dist,
                 pft_front_tracking_dist, particle_count, roi_neighborhood_tol,
                 waymask, min_length, track_type, min_separation_angle, sphere,
                 tiss_class, tissues4d, cache_dir):

    import gc
    import os
    import h5py
    from dipy.tracking import utils
    from dipy.tracking.streamline import select_by_rois
    from dipy.tracking.local_tracking import LocalTracking, \
        ParticleFilteringTracking
    from dipy.direction import (ProbabilisticDirectionGetter,
                                ClosestPeakDirectionGetter,
                                DeterministicMaximumDirectionGetter)
    from nilearn.image import index_img
    from pynets.dmri.track import prep_tissues
    from nibabel.streamlines.array_sequence import ArraySequence
    from nipype.utils.filemanip import copyfile, fname_presuffix

    recon_path_tmp_path = fname_presuffix(recon_path,
                                          suffix=f"_{step_curv_combinations}",
                                          newpath=cache_dir)
    copyfile(recon_path, recon_path_tmp_path, copy=True, use_hardlink=False)

    if waymask is not None:
        waymask_tmp_path = fname_presuffix(waymask,
                                           suffix=f"_{step_curv_combinations}",
                                           newpath=cache_dir)
        copyfile(waymask, waymask_tmp_path, copy=True, use_hardlink=False)
    else:
        waymask_tmp_path = None

    tissue_img = nib.load(tissues4d)

    # Order:
    B0_mask = index_img(tissue_img, 0)
    atlas_img = index_img(tissue_img, 1)
    atlas_data_wm_gm_int = index_img(tissue_img, 2)
    t1w2dwi = index_img(tissue_img, 3)
    gm_in_dwi = index_img(tissue_img, 4)
    vent_csf_in_dwi = index_img(tissue_img, 5)
    wm_in_dwi = index_img(tissue_img, 6)

    tiss_classifier = prep_tissues(t1w2dwi, gm_in_dwi, vent_csf_in_dwi,
                                   wm_in_dwi, tiss_class, B0_mask)

    B0_mask_data = np.asarray(B0_mask.dataobj).astype("bool")
    atlas_data = np.array(atlas_img.dataobj).astype("uint16")
    atlas_data_wm_gm_int_data = np.asarray(
        atlas_data_wm_gm_int.dataobj).astype("bool").astype("int16")

    # Build mask vector from atlas for later roi filtering
    parcels = []
    i = 0
    intensities = [i for i in np.unique(atlas_data) if i != 0]
    for roi_val in intensities:
        parcels.append(atlas_data == roi_val)
        i += 1

    del atlas_data

    parcel_vec = list(np.ones(len(parcels)).astype("bool"))

    with h5py.File(recon_path_tmp_path, 'r+') as hf:
        mod_fit = hf['reconstruction'][:].astype('float32')
    hf.close()

    print("%s%s" % ("Curvature: ", step_curv_combinations[1]))

    # Instantiate DirectionGetter
    if directget == "prob" or directget == "probabilistic":
        dg = ProbabilisticDirectionGetter.from_shcoeff(
            mod_fit,
            max_angle=float(step_curv_combinations[1]),
            sphere=sphere,
            min_separation_angle=min_separation_angle,
        )
    elif directget == "clos" or directget == "closest":
        dg = ClosestPeakDirectionGetter.from_shcoeff(
            mod_fit,
            max_angle=float(step_curv_combinations[1]),
            sphere=sphere,
            min_separation_angle=min_separation_angle,
        )
    elif directget == "det" or directget == "deterministic":
        maxcrossing = 1
        dg = DeterministicMaximumDirectionGetter.from_shcoeff(
            mod_fit,
            max_angle=float(step_curv_combinations[1]),
            sphere=sphere,
            min_separation_angle=min_separation_angle,
        )
    else:
        raise ValueError("ERROR: No valid direction getter(s) specified.")

    print("%s%s" % ("Step: ", step_curv_combinations[0]))

    # Perform wm-gm interface seeding, using n_seeds at a time
    seeds = utils.random_seeds_from_mask(
        atlas_data_wm_gm_int_data > 0,
        seeds_count=n_seeds_per_iter,
        seed_count_per_voxel=False,
        affine=np.eye(4),
    )
    if len(seeds) == 0:
        print(
            UserWarning("No valid seed points found in wm-gm "
                        "interface..."))
        return None

    # print(seeds)

    # Perform tracking
    if track_type == "local":
        streamline_generator = LocalTracking(
            dg,
            tiss_classifier,
            seeds,
            np.eye(4),
            max_cross=int(maxcrossing),
            maxlen=int(max_length),
            step_size=float(step_curv_combinations[0]),
            fixedstep=False,
            return_all=True,
        )
    elif track_type == "particle":
        streamline_generator = ParticleFilteringTracking(
            dg,
            tiss_classifier,
            seeds,
            np.eye(4),
            max_cross=int(maxcrossing),
            step_size=float(step_curv_combinations[0]),
            maxlen=int(max_length),
            pft_back_tracking_dist=pft_back_tracking_dist,
            pft_front_tracking_dist=pft_front_tracking_dist,
            particle_count=particle_count,
            return_all=True,
        )
    else:
        try:
            raise ValueError("ERROR: No valid tracking method(s) specified.")
        except ValueError:
            import sys
            sys.exit(0)

    # Filter resulting streamlines by those that stay entirely
    # inside the brain
    try:
        roi_proximal_streamlines = utils.target(streamline_generator,
                                                np.eye(4),
                                                B0_mask_data,
                                                include=True)
    except BaseException:
        print('No streamlines found inside the brain! ' 'Check registrations.')
        return None

    # Filter resulting streamlines by roi-intersection
    # characteristics

    try:
        roi_proximal_streamlines = \
            nib.streamlines.array_sequence.ArraySequence(
                select_by_rois(
                    roi_proximal_streamlines,
                    affine=np.eye(4),
                    rois=parcels,
                    include=parcel_vec,
                    mode="%s" % ("any" if waymask is not None else
                                 "both_end"),
                    tol=roi_neighborhood_tol,
                )
            )
        print("%s%s" % ("Filtering by: \nNode intersection: ",
                        len(roi_proximal_streamlines)))
    except BaseException:
        print('No streamlines found to connect any parcels! '
              'Check registrations.')
        return None

    try:
        roi_proximal_streamlines = nib.streamlines. \
            array_sequence.ArraySequence(
            [
                s for s in roi_proximal_streamlines
                if len(s) >= float(min_length)
            ]
        )
        print(f"Minimum fiber length >{min_length}mm: "
              f"{len(roi_proximal_streamlines)}")
    except BaseException:
        print('No streamlines remaining after minimal length criterion.')
        return None

    if waymask is not None and os.path.isfile(waymask_tmp_path):
        from nilearn.image import math_img
        mask = math_img("img > 0.0075", img=nib.load(waymask_tmp_path))
        waymask_data = np.asarray(mask.dataobj).astype("bool")
        try:
            roi_proximal_streamlines = roi_proximal_streamlines[utils.near_roi(
                roi_proximal_streamlines,
                np.eye(4),
                waymask_data,
                tol=roi_neighborhood_tol,
                mode="all")]
            print("%s%s" %
                  ("Waymask proximity: ", len(roi_proximal_streamlines)))
        except BaseException:
            print('No streamlines remaining in waymask\'s vacinity.')
            return None

    out_streams = [s.astype("float32") for s in roi_proximal_streamlines]

    del dg, seeds, roi_proximal_streamlines, streamline_generator, \
        atlas_data_wm_gm_int_data, mod_fit, B0_mask_data

    os.remove(recon_path_tmp_path)
    gc.collect()

    try:
        return ArraySequence(out_streams)
    except BaseException:
        return None
Beispiel #34
0
 def _run_interface(self, runtime):
     _, _, ext = split_filename(self.inputs.tensor_file)
     copyfile(self.inputs.tensor_file, os.path.abspath(self.inputs.input_data_prefix + "_tensor" + ext), copy=False)
     
     return super(DTITracker, self)._run_interface(runtime)
Beispiel #35
0
    def _run_interface(self, runtime):
        import gc
        import os.path as op
        from pynets.registration import register
        from nipype.utils.filemanip import fname_presuffix, copyfile

        anat_file_tmp_path = fname_presuffix(self.inputs.anat_file, suffix='_tmp', newpath=runtime.cwd)
        copyfile(self.inputs.anat_file, anat_file_tmp_path, copy=True, use_hardlink=False)

        fa_tmp_path = fname_presuffix(self.inputs.fa_path, suffix='_tmp', newpath=runtime.cwd)
        copyfile(self.inputs.fa_path, fa_tmp_path, copy=True, use_hardlink=False)

        ap_tmp_path = fname_presuffix(self.inputs.ap_path, suffix='_tmp', newpath=runtime.cwd)
        copyfile(self.inputs.ap_path, ap_tmp_path, copy=True, use_hardlink=False)

        B0_mask_tmp_path = fname_presuffix(self.inputs.B0_mask, suffix='_tmp', newpath=runtime.cwd)
        copyfile(self.inputs.B0_mask, B0_mask_tmp_path, copy=True, use_hardlink=False)

        reg = register.DmriReg(basedir_path=runtime.cwd,
                               fa_path=fa_tmp_path,
                               ap_path=ap_tmp_path,
                               B0_mask=B0_mask_tmp_path,
                               anat_file=anat_file_tmp_path,
                               mask=self.inputs.mask,
                               vox_size=self.inputs.vox_size,
                               simple=self.inputs.simple)

        if (self.inputs.overwrite is True) or (op.isfile(reg.map_path) is False):
            # Perform anatomical segmentation
            reg.gen_tissue()

        if (self.inputs.overwrite is True) or (op.isfile(reg.t1w2dwi) is False):
            # Align t1w to dwi
            reg.t1w2dwi_align()

        if (self.inputs.overwrite is True) or (op.isfile(reg.wm_gm_int_in_dwi) is False):
            # Align tissue
            reg.tissue2dwi_align()

        if self.inputs.waymask is not None:
            if (self.inputs.overwrite is True) or (op.isfile(reg.waymask_in_dwi) is False):
                # Align waymask
                reg.waymask2dwi_align(self.inputs.waymask)
        else:
            reg.waymask_in_dwi = None

        self._results['wm_in_dwi'] = reg.wm_in_dwi
        self._results['gm_in_dwi'] = reg.gm_in_dwi
        self._results['vent_csf_in_dwi'] = reg.vent_csf_in_dwi
        self._results['csf_mask_dwi'] = reg.csf_mask_dwi
        self._results['anat_file'] = self.inputs.anat_file
        self._results['t1w2dwi'] = reg.t1w2dwi
        self._results['B0_mask'] = self.inputs.B0_mask
        self._results['ap_path'] = self.inputs.ap_path
        self._results['gtab_file'] = self.inputs.gtab_file
        self._results['dwi_file'] = self.inputs.dwi_file
        self._results['waymask_in_dwi'] = reg.waymask_in_dwi
        self._results['basedir_path'] = runtime.cwd

        gc.collect()

        return runtime
Beispiel #36
0
 def _run_interface(self, runtime):
     runtime.returncode = 0
     _ = copyfile(self.inputs.in_file,
                  os.path.join(os.getcwd(), self._rename()))
     return runtime
            os.path.join(raw_data_dir, sub_id,
                         'experimental_parameters.json')) as f:
        json_s = f.read()
        experimental_parameters = json.loads(json_s)
    analysis_info.update(experimental_parameters)

    if not op.isdir(os.path.join(preprocessed_data_dir, sub_id)):
        try:
            os.makedirs(os.path.join(preprocessed_data_dir, sub_id))
        except OSError:
            pass

    # copy json files to preprocessed data folder
    # this allows these parameters to be updated and synced across subjects by changing only the raw data files.
    copyfile(os.path.join(raw_data_dir, 'acquisition_parameters.json'),
             os.path.join(preprocessed_data_dir,
                          'acquisition_parameters.json'),
             copy=True)
    copyfile(os.path.join(raw_data_dir, 'analysis_parameters.json'),
             os.path.join(preprocessed_data_dir, 'analysis_parameters.json'),
             copy=True)
    copyfile(os.path.join(raw_data_dir, sub_id,
                          'experimental_parameters.json'),
             os.path.join(preprocessed_data_dir, sub_id,
                          'experimental_parameters.json'),
             copy=True)

    if preprocess:

        # the actual workflow
        all_calcarine_reward_workflow = create_all_calcarine_reward_preprocessing_workflow(
            analysis_info, name='all_calcarine_reward')