예제 #1
0
    def _run_interface(self, runtime):
        for i in range(1, len(self.inputs.thsamples) + 1):
            _, _, ext = split_filename(self.inputs.thsamples[i - 1])
            copyfile(self.inputs.thsamples[i - 1],
                     self.inputs.samples_base_name + "_th%dsamples" % i + ext,
                     copy=False)
            _, _, ext = split_filename(self.inputs.thsamples[i - 1])
            copyfile(self.inputs.phsamples[i - 1],
                     self.inputs.samples_base_name + "_ph%dsamples" % i + ext,
                     copy=False)
            _, _, ext = split_filename(self.inputs.thsamples[i - 1])
            copyfile(self.inputs.fsamples[i - 1],
                     self.inputs.samples_base_name + "_f%dsamples" % i + ext,
                     copy=False)

        if isdefined(self.inputs.target_masks):
            f = open("targets.txt", "w")
            for target in self.inputs.target_masks:
                f.write("%s\n" % target)
            f.close()
        if isinstance(self.inputs.seed, list):
            f = open("seeds.txt", "w")
            for seed in self.inputs.seed:
                if isinstance(seed, list):
                    f.write("%s\n" % (" ".join([str(s) for s in seed])))
                else:
                    f.write("%s\n" % seed)
            f.close()

        runtime = super(ProbTrackX, self)._run_interface(runtime)
        if runtime.stderr:
            self.raise_exception(runtime)
        return runtime
예제 #2
0
    def _run_interface(self, runtime):
        for i in range(1, len(self.inputs.thsamples) + 1):
            _, _, ext = split_filename(self.inputs.thsamples[i - 1])
            copyfile(self.inputs.thsamples[i - 1],
                     self.inputs.samples_base_name + "_th%dsamples" % i + ext,
                     copy=True)
            _, _, ext = split_filename(self.inputs.thsamples[i - 1])
            copyfile(self.inputs.phsamples[i - 1],
                     self.inputs.samples_base_name + "_ph%dsamples" % i + ext,
                     copy=True)
            _, _, ext = split_filename(self.inputs.thsamples[i - 1])
            copyfile(self.inputs.fsamples[i - 1],
                     self.inputs.samples_base_name + "_f%dsamples" % i + ext,
                     copy=True)

        if isdefined(self.inputs.target_masks):
            f = open("targets.txt", "w")
            for target in self.inputs.target_masks:
                f.write("%s\n" % target)
            f.close()

        runtime = super(mapped_ProbTrackX, self)._run_interface(runtime)
        if runtime.stderr:
            self.raise_exception(runtime)
        return runtime
예제 #3
0
    def _run_interface(self, runtime):
        preprocessedfile = self.inputs.preprocessedfile
        regfile = self.inputs.regfile

        #invert transform matrix
        invt = fsl.ConvertXFM()
        invt.inputs.in_file = regfile
        invt.inputs.invert_xfm = True
        invt.inputs.out_file = regfile + '_inv.mat'
        invt_result= invt.run()

        #define source mask (surface, volume)
        input_labels = self.inputs.vol_source+self.inputs.vol_target
        sourcemask = get_mask(input_labels, self.inputs.parcfile)
        sourcemaskfile = os.path.abspath('sourcemask.nii')
        sourceImg = nb.Nifti1Image(sourcemask, None)
        nb.save(sourceImg, sourcemaskfile)

        #transform anatomical mask to functional space
        sourcexfm = fsl.ApplyXfm()
        sourcexfm.inputs.in_file = sourcemaskfile
        sourcexfm.inputs.in_matrix_file = invt_result.outputs.out_file
        _, base, _ = split_filename(sourcemaskfile)
        sourcexfm.inputs.out_file = base + '_xfm.nii.gz'
        sourcexfm.inputs.reference = preprocessedfile
        sourcexfm.inputs.interp = 'nearestneighbour'
        sourcexfm.inputs.apply_xfm = True
        sourcexfm_result = sourcexfm.run()

        #manual source data creation (-mask_source option not yet available in afni)
        sourcemask_xfm = nb.load(sourcexfm_result.outputs.out_file).get_data()
        inputdata = nb.load(preprocessedfile).get_data()
        maskedinput = np.zeros_like(inputdata)
        for timepoint in range(inputdata.shape[3]):
            maskedinput[:,:,:,timepoint] = np.where(sourcemask_xfm,inputdata[:,:,:,timepoint],0)
        maskedinputfile = os.path.abspath('inputfile.nii')
        inputImg = nb.Nifti1Image(maskedinput, None)
        nb.save(inputImg, maskedinputfile)

        ##PREPARE TARGET MASK##

        #define target mask (surface, volume)
        targetmask = get_mask(self.inputs.vol_target, self.inputs.parcfile)
        targetmaskfile = os.path.abspath('targetmask.nii')
        targetImg = nb.Nifti1Image(targetmask, None)
        nb.save(targetImg, targetmaskfile)

        #same transform for target
        targetxfm = fsl.ApplyXfm()
        targetxfm.inputs.in_file = targetmaskfile
        targetxfm.inputs.in_matrix_file = invt_result.outputs.out_file
        _, base, _ = split_filename(targetmaskfile)
        targetxfm.inputs.out_file = base + '_xfm.nii.gz'
        targetxfm.inputs.reference = preprocessedfile
        targetxfm.inputs.interp = 'nearestneighbour'
        targetxfm.inputs.apply_xfm = True
        targetxfm_result = targetxfm.run()

        return runtime
예제 #4
0
파일: odf.py 프로젝트: agramfort/nipype
 def _run_interface(self, runtime):
     _, _, ext = split_filename(self.inputs.max)
     copyfile(self.inputs.max, os.path.abspath(self.inputs.input_data_prefix + "_max" + ext), copy=False)
     
     _, _, ext = split_filename(self.inputs.ODF)
     copyfile(self.inputs.ODF, os.path.abspath(self.inputs.input_data_prefix + "_odf" + ext), copy=False)
     
     return super(ODFTracker, self)._run_interface(runtime)
예제 #5
0
 def _gen_outfilename(self, ext):
     _, name , _ = split_filename(self.inputs.aparc_aseg_file)
     if self.inputs.use_freesurfer_LUT:
         prefix = 'fsLUT'
     elif not self.inputs.use_freesurfer_LUT and isdefined(self.inputs.LUT_file):
         lutpath, lutname, lutext = split_filename(self.inputs.LUT_file)
         prefix = lutname
     return prefix + '_' + name + '.' + ext
예제 #6
0
파일: cmtk.py 프로젝트: Alunisiira/nipype
 def _gen_outfilename(self, ext):
     if ext.endswith("mat") and isdefined(self.inputs.out_matrix_mat_file):
         _, name , _ = split_filename(self.inputs.out_matrix_mat_file)
     elif isdefined(self.inputs.out_matrix_file):
         _, name , _ = split_filename(self.inputs.out_matrix_file)
     else:
         _, name , _ = split_filename(self.inputs.tract_file)
     return name + ext
예제 #7
0
    def _gen_filename(self, name):
        """Generate output file name
"""
        if name == 'out_file':
            _, fname, ext = split_filename(self.inputs.in_file)
            return os.path.join(os.getcwd(), ''.join((fname, '_wmcsfresidual',ext)))
    
        if name == 'outcomp':
            _, fname, ext = split_filename(self.inputs.in_file)
            return os.path.join(os.getcwd(), ''.join((fname, '_pcs','.txt')))
예제 #8
0
    def _gen_filename(self, name):
        """Generate output file name
"""
        if name == 'out_file':
            _, fname, ext = split_filename(self.inputs.in_file)
            return os.path.join(os.getcwd(), ''.join((fname, '_3dv',ext)))
    
        if name == 'oned_file':
            _, fname, ext = split_filename(self.inputs.in_file)
            return os.path.join(os.getcwd(), ''.join((fname, '_3dv1D','.1D')))
예제 #9
0
def test_split_filename():
    res = split_filename('foo.nii')
    yield assert_equal, res, ('', 'foo', '.nii')
    res = split_filename('foo.nii.gz')
    yield assert_equal, res, ('', 'foo', '.nii.gz')
    res = split_filename('/usr/local/foo.nii.gz')
    yield assert_equal, res, ('/usr/local', 'foo', '.nii.gz')
    res = split_filename('../usr/local/foo.nii')
    yield assert_equal, res, ('../usr/local', 'foo', '.nii')
    res = split_filename('/usr/local/foo.a.b.c.d')
    yield assert_equal, res, ('/usr/local', 'foo.a.b.c', '.d')
예제 #10
0
def fs_extract_label_rois(subdir, pet, dat, labels):
    """
    Uses freesurfer tools to extract

    Parameters
    -----------
    subdir : subjects freesurfer directory

    pet : filename of subjects PET volume coreg'd to mri space

    dat : filename of dat generated by tkregister mapping pet to mri

    labels : filename of subjects aparc+aseg.mgz

    Returns
    -------
    stats_file: file  that contains roi stats

    label_file : file of volume with label rois in pet space
               you can check dat with ...
               'tkmedit %s T1.mgz -overlay %s -overlay-reg %s
               -fthresh 0.5 -fmid1'%(subject, pet, dat)
                 
    """
    pth, nme, ext = split_filename(pet)
    pth_lbl, nme_lbl, ext_lbl = split_filename(labels)
    
    stats_file = os.path.join(pth, '%s_%s_stats'%(nme, nme_lbl))
    label_file = os.path.join(pth, '%s_%s_.nii.gz'%(nme, nme_lbl))

    # Gen label file
    cmd = ['mri_label2vol',
           '--seg %s/mri/%s'%(subdir, labels),
           '--temp %s'%(pet),
           '--reg'%(dat),
           '--o %s'%(label_file)]
    cmd = ' '.join(cmd)
    cout = CommandLine(cmd).run()
    if not cout.runtime.returncode == 0:
        print 'mri_label2vol failed for %s'%(pet)
        return None, None
    ## Get stats
    cmd = ['mri_segstats',
           '--seg %s'%(label_file),
           '--sum %s'%(stats_file),
           '--in %s'%(pet),
           '--nonempty --ctab',
           '/usr/local/freesurfer_x86_64-4.5.0/FreeSurferColorLUT.txt']
    cmd = ' '.join(cmd)
    cout = CommandLine(cmd).run()
    if not cout.runtime.returncode == 0:
        print 'mri_segstats failed for %s'%(pet)
        return None, None
    return stats_file, label_file
 def _list_outputs(self):
     outputs = self.output_spec().get()
     path, name, ext = split_filename(self.inputs.out_stats_file)
     if not ext == '.mat':
         ext = '.mat'
     out_stats_file = op.abspath(name + ext)
     outputs["stats_file"] = out_stats_file
     path, name, ext = split_filename(self.inputs.out_network_file)
     if not ext == '.pck':
         ext = '.pck'
     out_network_file = op.abspath(name + ext)
     outputs["network_file"] = out_network_file
     return outputs
예제 #12
0
파일: utils.py 프로젝트: dohmatob/nipype
 def _list_outputs(self):
     outputs = self._outputs().get()
     out_file = self.inputs.out_file
     if not isdefined(out_file):
         if isdefined(self.inputs.stat_image2) and (
             not isdefined(self.inputs.show_negative_stats)
             or not self.inputs.show_negative_stats):
                 stem = "%s_and_%s" % (split_filename(self.inputs.stat_image)[1],
                                       split_filename(self.inputs.stat_image2)[1])
         else:
             stem = split_filename(self.inputs.stat_image)[1]
         out_file = self._gen_fname(stem, suffix='_overlay')
     outputs['out_file'] = os.path.abspath(out_file)
     return outputs
예제 #13
0
def z_image(image,outliers):
    """Calculates z-score of timeseries removing timpoints with outliers.

    Parameters
    ----------
    image :
    outliers :

    Returns
    -------
    File : z-image
    """
    import numpy as np
    import math
    import nibabel as nib
    from scipy.stats.mstats import zscore
    from nipype.utils.filemanip import split_filename
    import os
    if isinstance(image,list):
        image = image[0]
    if isinstance(outliers,list):
        outliers = outliers[0]
        
    def try_import(fname):
        try:
            a = np.genfromtxt(fname)
            return np.atleast_1d(a).astype(int)
        except:
            return np.array([]).astype(int)

    z_img = os.path.abspath('z_no_outliers_' + split_filename(image)[1] + '.nii.gz')
    arts = try_import(outliers)
    img = nib.load(image)
    data, aff = np.asarray(img.get_data()), img.get_affine()
    weights = np.zeros(data.shape, dtype=bool)
    for a in arts:
        weights[:, :, :, a] = True
    data_mask = np.ma.array(data, mask=weights)
    z = (data_mask - np.mean(data_mask, axis=3)[:,:,:,None])/np.std(data_mask,axis=3)[:,:,:,None]
    final_image = nib.Nifti1Image(z, aff)
    final_image.to_filename(z_img)

    z_img2 = os.path.abspath('z_' + split_filename(image)[1] + '.nii.gz')
    z2 = (data - np.mean(data, axis=3)[:,:,:,None])/np.std(data,axis=3)[:,:,:,None]
    final_image = nib.Nifti1Image(z2, aff)
    final_image.to_filename(z_img2)

    z_img = [z_img, z_img2]
    return z_img
예제 #14
0
    def _list_outputs(self):
        outputs = self._outputs().get()
        pth, base, ext = split_filename(self.inputs.template_file)
        outputs["normalization_parameter_file"] = os.path.realpath(base + "_2mni.mat")
        outputs["normalized_files"] = []
        prefix = "w"
        if isdefined(self.inputs.modulate) and self.inputs.modulate:
            prefix = "m" + prefix
        if isdefined(self.inputs.fwhm) and self.inputs.fwhm > 0:
            prefix = "s" + prefix
        for filename in self.inputs.apply_to_files:
            pth, base, ext = split_filename(filename)
            outputs["normalized_files"].append(os.path.realpath("%s%s%s" % (prefix, base, ext)))

        return outputs
예제 #15
0
파일: model.py 프로젝트: dbanda/nipype
    def _list_outputs(self):
        outputs = self.output_spec().get()
        # Get the top-level output directory
        if not isdefined(self.inputs.glm_dir):
            glmdir = os.getcwd()
        else:
            glmdir = os.path.abspath(self.inputs.glm_dir)
        outputs["glm_dir"] = glmdir

        # Assign the output files that always get created
        outputs["beta_file"] = os.path.join(glmdir, "beta.mgh")
        outputs["error_var_file"] = os.path.join(glmdir, "rvar.mgh")
        outputs["error_stddev_file"] = os.path.join(glmdir, "rstd.mgh")
        outputs["mask_file"] = os.path.join(glmdir, "mask.mgh")
        outputs["fwhm_file"] = os.path.join(glmdir, "fwhm.dat")
        outputs["dof_file"] = os.path.join(glmdir, "dof.dat")
        # Assign the conditional outputs
        if isdefined(self.inputs.save_residual) and self.inputs.save_residual:
            outputs["error_file"] = os.path.join(glmdir, "eres.mgh")
        if isdefined(self.inputs.save_estimate) and self.inputs.save_estimate:
            outputs["estimate_file"] = os.path.join(glmdir, "yhat.mgh")

        # Get the contrast directory name(s)
        if isdefined(self.inputs.contrast):
            contrasts = []
            for c in self.inputs.contrast:
                if split_filename(c)[2] in [".mat", ".dat", ".mtx", ".con"]:
                    contrasts.append(split_filename(c)[1])
                else:
                    contrasts.append(os.path.split(c)[1])
        elif isdefined(self.inputs.one_sample) and self.inputs.one_sample:
            contrasts = ["osgm"]

        # Add in the contrast images
        outputs["sig_file"] = [os.path.join(glmdir, c, "sig.mgh") for c in contrasts]
        outputs["ftest_file"] = [os.path.join(glmdir, c, "F.mgh") for c in contrasts]
        outputs["gamma_file"] = [os.path.join(glmdir, c, "gamma.mgh") for c in contrasts]
        outputs["gamma_var_file"] = [os.path.join(glmdir, c, "gammavar.mgh") for c in contrasts]

        # Add in the PCA results, if relevant
        if isdefined(self.inputs.pca) and self.inputs.pca:
            pcadir = os.path.join(glmdir, "pca-eres")
            outputs["spatial_eigenvectors"] = os.path.join(pcadir, "v.mgh")
            outputs["frame_eigenvectors"] = os.path.join(pcadir, "u.mtx")
            outputs["singluar_values"] = os.path.join(pcadir, "sdiag.mat")
            outputs["svd_stats_file"] = os.path.join(pcadir, "stats.dat")

        return outputs
def extract_subrois(timeseries_file, label_file, indices):
    """Extract voxel time courses for each subcortical roi index

    Parameters
    ----------

    timeseries_file: a 4D Nifti file
    label_file: a 3D file containing rois in the same space/size of the 4D file
    indices: a list of indices for ROIs to extract.

    Returns
    -------
    out_file: a text file containing time courses for each voxel of each roi
        The first four columns are: freesurfer index, i, j, k positions in the
        label file
    """
    img = nb.load(timeseries_file)
    data = img.get_data()
    roiimg = nb.load(label_file)
    rois = roiimg.get_data()
    prefix = split_filename(timeseries_file)[1]
    out_ts_file = os.path.join(os.getcwd(), '%s_subcortical_ts.txt' % prefix)
    with open(out_ts_file, 'wt') as fp:
        for fsindex in indices:
            ijk = np.nonzero(rois == fsindex)
            ts = data[ijk]
            for i0, row in enumerate(ts):
                fp.write('%d,%d,%d,%d,' % (fsindex, ijk[0][i0],
                                           ijk[1][i0], ijk[2][i0]) +
                         ','.join(['%.10f' % val for val in row]) + '\n')
    return out_ts_file
    def slicetime(file, sliceorder):
        print "running slicetiming"
        slicetime = afni.TShift(outputtype='NIFTI_GZ')
        slicetime.inputs.in_file = file
        if type(sliceorder)==list:
            custom_order = open(os.path.abspath('afni_custom_order_file.txt'),'w')
            tpattern = []
            for i in xrange(len(sliceorder)):
                tpattern.append((i*tr/float(Nz), sliceorder[i]))
                tpattern.sort(key=lambda x:x[1])
                for i,t in enumerate(tpattern):
                    print '%f\n'%(t[0])
                    custom_order.write('%f\n'%(t[0]))
            custom_order.close()
            order_file = 'afni_custom_order_file.txt'
        elif type(sliceorder)==str:
            order_file = sliceorder
        else:
            raise TypeError('sliceorder must be filepath or list')

        slicetime.inputs.args ='-tpattern @%s' % os.path.abspath(order_file)
        slicetime.inputs.tr = str(tr)+'s'
        slicetime.inputs.outputtype = 'NIFTI_GZ'
        slicetime.inputs.out_file = os.path.abspath(split_filename(file)[1] +\
                                                    "_tshift.nii.gz")

        res = slicetime.run()
        file_to_realign = res.outputs.out_file
        return file_to_realign
예제 #18
0
파일: test_base.py 프로젝트: vsaase/nipype
def test_cycle_namesource1():
    tmp_infile = setup_file()
    tmpd, nme, ext = split_filename(tmp_infile)
    pwd = os.getcwd()
    os.chdir(tmpd)

    class spec3(nib.CommandLineInputSpec):
        moo = nib.File(name_source=['doo'], hash_files=False, argstr="%s",
                       position=1, name_template='%s_mootpl')
        poo = nib.File(name_source=['moo'], hash_files=False,
                       argstr="%s", position=2)
        doo = nib.File(name_source=['poo'], hash_files=False,
                       argstr="%s", position=3)

    class TestCycle(nib.CommandLine):
        _cmd = "mycommand"
        input_spec = spec3

    # Check that an exception is raised
    to0 = TestCycle()
    not_raised = True
    try:
        to0.cmdline
    except nib.NipypeInterfaceError:
        not_raised = False
    yield assert_false, not_raised

    os.chdir(pwd)
    teardown_file(tmpd)
예제 #19
0
파일: test_base.py 프로젝트: vsaase/nipype
def test_chained_namesource():
    tmp_infile = setup_file()
    tmpd, nme, ext = split_filename(tmp_infile)
    pwd = os.getcwd()
    os.chdir(tmpd)

    class spec2(nib.CommandLineInputSpec):
        doo = nib.File(exists=True, argstr="%s", position=1)
        moo = nib.File(name_source=['doo'], hash_files=False, argstr="%s",
                       position=2, name_template='%s_mootpl')
        poo = nib.File(name_source=['moo'], hash_files=False,
                       argstr="%s", position=3)

    class TestName(nib.CommandLine):
        _cmd = "mycommand"
        input_spec = spec2

    testobj = TestName()
    testobj.inputs.doo = tmp_infile
    res = testobj.cmdline
    yield assert_true, '%s' % tmp_infile in res
    yield assert_true, '%s_mootpl ' % nme in res
    yield assert_true, '%s_mootpl_generated' % nme in res

    os.chdir(pwd)
    teardown_file(tmpd)
def get_mean_timeseries(infile,roi,mask):
    import os
    import nibabel as nib
    from nipype.utils.filemanip import fname_presuffix, split_filename
    import numpy as np

    img = nib.load(infile)
    data, aff = img.get_data(), img.get_affine()

    roi_img = nib.load(roi) 
    roi_data, roi_affine = roi_img.get_data(), roi_img.get_affine()

    if len(roi_data.shape) > 3:
        roi_data = roi_data[:,:,:,0]

    mask = nib.load(mask).get_data()
    roi_data = (roi_data > 0).astype(int) + (mask>0).astype(int)

    _,roiname,_ = split_filename(roi)
    outfile = fname_presuffix(infile,"%s_"%roiname,'.txt',newpath=os.path.abspath('.'),use_ext=False)
    
    out_data = np.mean(data[roi_data>1,:],axis=0)
    print out_data.shape
    
    np.savetxt(outfile,out_data)

    return outfile, roiname
예제 #21
0
def test_fast_list_outputs(setup_infile, tmpdir):
    ''' By default (no -o), FSL's fast command outputs files into the same
    directory as the input files. If the flag -o is set, it outputs files into
    the cwd '''
    def _run_and_test(opts, output_base):
        outputs = fsl.FAST(**opts)._list_outputs()
        for output in outputs.values():
            if output:
                for filename in filename_to_list(output):
                    assert os.path.realpath(filename).startswith(os.path.realpath(output_base))

    # set up
    tmp_infile, indir = setup_infile
    cwd = tmpdir.mkdir("new")
    cwd.chdir()
    assert indir != cwd.strpath
    out_basename = 'a_basename'

    # run and test
    opts = {'in_files': tmp_infile}
    input_path, input_filename, input_ext = split_filename(tmp_infile)
    _run_and_test(opts, os.path.join(input_path, input_filename))

    opts['out_basename'] = out_basename
    _run_and_test(opts, os.path.join(cwd.strpath, out_basename))
def bandpass_filter(files, lowpass_freq, highpass_freq, fs):
    """Bandpass filter the input files

    Parameters
    ----------
    files: list of 4d nifti files
    lowpass_freq: cutoff frequency for the low pass filter (in Hz)
    highpass_freq: cutoff frequency for the high pass filter (in Hz)
    fs: sampling rate (in Hz)
    """
    out_files = []
    for filename in filename_to_list(files):
        path, name, ext = split_filename(filename)
        out_file = os.path.join(os.getcwd(), name + '_bp' + ext)
        img = nb.load(filename)
        timepoints = img.shape[-1]
        F = np.zeros((timepoints))
        lowidx = int(timepoints / 2) + 1
        if lowpass_freq > 0:
            lowidx = np.round(float(lowpass_freq) / fs * timepoints)
        highidx = 0
        if highpass_freq > 0:
            highidx = np.round(float(highpass_freq) / fs * timepoints)
        F[highidx:lowidx] = 1
        F = ((F + F[::-1]) > 0).astype(int)
        data = img.get_data()
        if np.all(F == 1):
            filtered_data = data
        else:
            filtered_data = np.real(np.fft.ifftn(np.fft.fftn(data) * F))
        img_out = nb.Nifti1Image(filtered_data, img.affine, img.header)
        img_out.to_filename(out_file)
        out_files.append(out_file)
    return list_to_filename(out_files)
예제 #23
0
파일: test_utils.py 프로젝트: stymy/nipype
def test_coreg():
    moving = example_data(infile="functional.nii")
    target = example_data(infile="T1.nii")
    mat = example_data(infile="trans.mat")
    coreg = spmu.CalcCoregAffine(matlab_cmd="mymatlab")
    coreg.inputs.target = target
    assert_equal(coreg.inputs.matlab_cmd, "mymatlab")
    coreg.inputs.moving = moving
    assert_equal(isdefined(coreg.inputs.mat), False)
    pth, mov, _ = split_filename(moving)
    _, tgt, _ = split_filename(target)
    mat = os.path.join(pth, "%s_to_%s.mat" % (mov, tgt))
    invmat = fname_presuffix(mat, prefix="inverse_")
    scrpt = coreg._make_matlab_command(None)
    assert_equal(coreg.inputs.mat, mat)
    assert_equal(coreg.inputs.invmat, invmat)
예제 #24
0
 def _list_outputs(self):
     outputs = self._outputs().get()
     _, base, _ = split_filename(self.inputs.in_Files[0])
     #outputs["out_File"] = os.path.abspath(base+'_Stability.nii')
     outputs["variation_mat"] = os.path.abspath(base+'_VariationMat.nii')
     outputs["consensus_mat"] = os.path.abspath(base+'_ConsensusMat.nii')
     return outputs
예제 #25
0
파일: misc.py 프로젝트: cdla/nipype
def replaceext(in_list, ext):
    out_list = list()
    for filename in in_list:
        path, name, _ = split_filename(op.abspath(filename))
        out_name = op.join(path, name) + ext
        out_list.append(out_name)
    return out_list
예제 #26
0
 def _list_outputs(self):
     outputs = self._outputs().get()
     _, name, ext = split_filename(self.inputs.out_file)
     if not ext == '.cff':
         ext = '.cff'
     outputs['connectome_file'] = op.abspath(name + ext)
     return outputs
예제 #27
0
파일: preprocess.py 프로젝트: B-Rich/nipype
 def _gen_outfilename(self):
     _, name , _ = split_filename(self.inputs.in_file)
     if isdefined(self.inputs.out_filename):
         outname = self.inputs.out_filename
     else:
         outname = name + '_mrconvert.' + self.inputs.extension
     return outname
	def _run_interface(self, runtime):
		from nilearn.input_data import NiftiMasker, NiftiLabelsMasker
		from nipype.utils.filemanip import split_filename
		import nibabel as nib
		import os

		functional_filename = self.inputs.in_file
		atlas_filename = self.inputs.atlas_filename
		mask_filename = self.inputs.mask_filename

		# Extracting the ROI signals
		masker = NiftiLabelsMasker(labels_img=atlas_filename,
                           background_label = 0,
                           standardize=True,
                           detrend = True,
                           verbose = 1
                           )
		time_series = masker.fit_transform(functional_filename)

		# Removing the ROI signal from the time series
		nifti_masker = NiftiMasker(mask_img=mask_filename)
		masked_data = nifti_masker.fit_transform(functional_filename, confounds=time_series[...,0])
		masked_img = nifti_masker.inverse_transform(masked_data)

		# Saving the result to disk
		outputs = self._outputs().get()
		fname = self.inputs.in_file
		_, base, _ = split_filename(fname)
		nib.save(masked_img, os.path.abspath(base + '_regressed.nii.gz'))
		return runtime
예제 #29
0
    def _run_interface(self, runtime):
        extracted_networks = []

        for i, con in enumerate(self.inputs.in_files):
            mycon = cf.load(con)
            nets = mycon.get_connectome_network()
            for ne in nets:
                # here, you might want to skip networks with a given
                # metadata information
                ne.load()
                contitle = mycon.get_connectome_meta().get_title()
                ne.set_name( str(i) + ': ' + contitle + ' - ' + ne.get_name() )
                ne.set_src(ne.get_name())
                extracted_networks.append(ne)

        # Add networks to new connectome
        newcon = cf.connectome(title = 'All CNetworks', connectome_network = extracted_networks)
        # Setting additional metadata
        metadata = newcon.get_connectome_meta()
        metadata.set_creator('My Name')
        metadata.set_email('My Email')

        _, name, ext = split_filename(self.inputs.out_file)
        if not ext == '.cff':
            ext = '.cff'
        cf.save_to_cff(newcon, op.abspath(name + ext))

        return runtime
예제 #30
0
def test_cycle_namesource2(setup_file):
    tmp_infile = setup_file
    tmpd, nme, ext = split_filename(tmp_infile)

    class spec3(nib.CommandLineInputSpec):
        moo = nib.File(name_source=['doo'], hash_files=False, argstr="%s",
                       position=1, name_template='%s_mootpl')
        poo = nib.File(name_source=['moo'], hash_files=False,
                       argstr="%s", position=2)
        doo = nib.File(name_source=['poo'], hash_files=False,
                       argstr="%s", position=3)

    class TestCycle(nib.CommandLine):
        _cmd = "mycommand"
        input_spec = spec3

    # Check that loop can be broken by setting one of the inputs
    to1 = TestCycle()
    to1.inputs.poo = tmp_infile

    not_raised = True
    try:
        res = to1.cmdline
    except nib.NipypeInterfaceError:
        not_raised = False
    print(res)

    assert not_raised
    assert '%s' % tmp_infile in res
    assert '%s_generated' % nme in res
    assert '%s_generated_mootpl' % nme in res
예제 #31
0
 def _gen_outfilename(self):
     _, name, _ = split_filename(self.inputs.in_file)
     return name + '_mode.nii'
예제 #32
0
 def _gen_outfilename(self):
     _, name, _ = split_filename(self.inputs.in_file)
     return name + '_tracked.tck'
예제 #33
0
 def _gen_outfilename(self):
     output_root = self.inputs.output_root
     first_file = self.inputs.in_files[0]
     _, _, ext = split_filename(first_file)
     return output_root + ext
예제 #34
0
def rename_into_caps(in_bids_dwi, fname_dwi, fname_bval, fname_bvec, fname_brainmask):
    """Rename the outputs of the pipelines into CAPS.


    Args:
        in_bids_dwi (str): Input BIDS DWI to extract the <source_file>
        fname_dwi (str): Preprocessed DWI file.
        fname_bval (str): Preprocessed bval.
        fname_bvec (str): Preprocessed bvec.
        fname_brainmask (str): B0 mask.

    Returns:
        Tuple[str, str, str, str]: The different outputs in CAPS format.
    """
    import os

    from nipype.interfaces.utility import Rename
    from nipype.utils.filemanip import split_filename

    # Extract <source_file> in format sub-CLNC01_ses-M00[_acq-label]_dwi
    _, source_file_dwi, _ = split_filename(in_bids_dwi)

    # Extract base path from fname:
    base_dir_dwi, _, _ = split_filename(fname_dwi)
    base_dir_bval, _, _ = split_filename(fname_bval)
    base_dir_bvec, _, _ = split_filename(fname_bvec)
    base_dir_brainmask, _, _ = split_filename(fname_brainmask)

    # Rename into CAPS DWI:
    rename_dwi = Rename()
    rename_dwi.inputs.in_file = fname_dwi
    rename_dwi.inputs.format_string = os.path.join(
        base_dir_dwi, f"{source_file_dwi}_space-T1w_preproc.nii.gz"
    )
    out_caps_dwi = rename_dwi.run()

    # Rename into CAPS bval:
    rename_bval = Rename()
    rename_bval.inputs.in_file = fname_bval
    rename_bval.inputs.format_string = os.path.join(
        base_dir_bval, f"{source_file_dwi}_space-T1w_preproc.bval"
    )
    out_caps_bval = rename_bval.run()

    # Rename into CAPS bvec:
    rename_bvec = Rename()
    rename_bvec.inputs.in_file = fname_bvec
    rename_bvec.inputs.format_string = os.path.join(
        base_dir_bvec, f"{source_file_dwi}_space-T1w_preproc.bvec"
    )
    out_caps_bvec = rename_bvec.run()

    # Rename into CAPS DWI:
    rename_brainmask = Rename()
    rename_brainmask.inputs.in_file = fname_brainmask
    rename_brainmask.inputs.format_string = os.path.join(
        base_dir_brainmask, f"{source_file_dwi}_space-T1w_brainmask.nii.gz"
    )
    out_caps_brainmask = rename_brainmask.run()

    return (
        out_caps_dwi.outputs.out_file,
        out_caps_bval.outputs.out_file,
        out_caps_bvec.outputs.out_file,
        out_caps_brainmask.outputs.out_file,
    )
예제 #35
0
def nii2streamlines(imgfile, maskfile, bvals, bvecs):
    import numpy as np
    import nibabel as nib
    import os

    from dipy.reconst.dti import TensorModel

    img = nib.load(imgfile)
    bvals = np.genfromtxt(bvals)
    bvecs = np.genfromtxt(bvecs)
    if bvecs.shape[1] != 3:
        bvecs = bvecs.T

    from nipype.utils.filemanip import split_filename
    _, prefix, _ = split_filename(imgfile)

    from dipy.data import gradient_table

    gtab = gradient_table(bvals, bvecs)
    data = img.get_data()
    affine = img.get_affine()
    zooms = img.get_header().get_zooms()[:3]
    new_zooms = (2., 2., 2.)
    data2, affine2 = data, affine
    mask = nib.load(maskfile).get_data().astype(np.bool)
    tenmodel = TensorModel(gtab)
    tenfit = tenmodel.fit(data2, mask)

    from dipy.reconst.dti import fractional_anisotropy
    FA = fractional_anisotropy(tenfit.evals)
    FA[np.isnan(FA)] = 0
    fa_img = nib.Nifti1Image(FA, img.get_affine())
    nib.save(fa_img, '%s_tensor_fa.nii.gz' % prefix)

    evecs = tenfit.evecs

    evec_img = nib.Nifti1Image(evecs, img.get_affine())
    nib.save(evec_img, '%s_tensor_evec.nii.gz' % prefix)

    from dipy.data import get_sphere
    sphere = get_sphere('symmetric724')
    from dipy.reconst.dti import quantize_evecs

    peak_indices = quantize_evecs(tenfit.evecs, sphere.vertices)

    from dipy.tracking.eudx import EuDX

    eu = EuDX(FA,
              peak_indices,
              odf_vertices=sphere.vertices,
              a_low=0.2,
              seeds=10**6,
              ang_thr=35)
    tensor_streamlines = [streamline for streamline in eu]

    hdr = nib.trackvis.empty_header()
    hdr['voxel_size'] = new_zooms
    hdr['voxel_order'] = 'LPS'
    hdr['dim'] = data2.shape[:3]

    import dipy.tracking.metrics as dmetrics
    tensor_streamlines = ((sl, None, None) for sl in tensor_streamlines
                          if dmetrics.length(sl) > 15)

    ten_sl_fname = '%s_streamline.trk' % prefix

    nib.trackvis.write(ten_sl_fname,
                       tensor_streamlines,
                       hdr,
                       points_space='voxel')
    return ten_sl_fname
예제 #36
0
def write_trackvis_scene(track_file,
                         n_clusters=1,
                         skip=80,
                         names=None,
                         out_file="NewScene.scene"):
    from random import randint, uniform
    bg_r, bg_g, bg_b = 0, 0, 0
    f = open(out_file, 'w')

    # Write some comments
    #f.write('<?xml version="1.0"?>\n')
    #f.write('<TrackVis>\n')
    f.write('<Comment>\n')
    f.write(
        '    Scene file for TrackVis. Copyright (c) Ruopeng wang and Van J. Wedeen\n'
    )
    f.write(
        '    DO NOT mess with this file unless you know what you are doing.\n')
    f.write(
        '      * Starting from version 2.0, all the parameters are represented in LPS coordinate.\n'
    )
    f.write(
        '        So x/y/z represent L/P/S. TrackVis will make neccessary transformation when loading\n'
    )
    f.write('        older scene files.\n')
    f.write('    Written using coma.workflows.dti.write_trackvis_scene()\n')
    f.write('</Comment>\n')

    # Define the scene dimensions
    import nibabel.trackvis as trk
    _, hdr = trk.read(track_file)
    x, y, z = hdr['dim']
    vx, vy, vz = hdr['voxel_size']

    f.write('<Scene version="2.2">\n')
    f.write('    <Dimension x="%d" y="%d" z="%d" />\n' % (x, y, z))
    f.write('    <VoxelSize x="%f" y="%f" z="%f" />\n' % (vx, vy, vz))
    f.write('    <VoxelOrder current="LPS" original="LAS" />\n')
    f.write('    <LengthUnit value="0" />\n')

    from nipype.utils.filemanip import split_filename
    _, name, ext = split_filename(track_file)
    rpath = name + ext

    f.write('    <TrackFile path="%s" rpath="%s" />\n' % (track_file, rpath))
    f.write('    <Tracks>\n')

    colors = []

    for n in range(0, n_clusters):
        color = generate_new_color(colors, pastel_factor=0.7)
        colors.append(color)
        h = randint(0, 300)  # Select random green'ish hue from hue wheel
        s = uniform(0.2, 1)
        v = uniform(0.2, 1)

        r, g, b = hsv_to_rgb(h, s, v)
        r = randint(0, 255)
        g = randint(0, 255)
        b = randint(0, 255)

        if names is not None:
            f.write('        <Track name="%s" id="%d">\n' %
                    (str(names[n]), n + 1000))
        else:
            f.write('        <Track name="Track %d" id="%d">\n' %
                    (n, n + 1000))

        f.write('            <Length low="0" high="1e+08" />\n')
        f.write('            <Property id="0" low="%d" high="%d" />\n' %
                (n, n))  # Determines which bundle is shown
        f.write(
            '            <Slice plane="0" number="91" thickness="1" testmode="0" enable="0" visible="1" operator="and" id="1925142528"/>\n'
        )
        f.write(
            '            <Slice plane="1" number="109" thickness="1" testmode="0" enable="0" visible="1" operator="and" id="1881842394"/>\n'
        )
        f.write(
            '            <Slice plane="2" number="91" thickness="1" testmode="0" enable="0" visible="1" operator="and" id="2133446589"/>\n'
        )
        f.write('            <Skip value="%f" enable="1" />\n' % skip)
        f.write('            <ShadingMode value="0" />\n')
        f.write('            <Radius value="0.05" />\n')
        f.write('            <NumberOfSides value="5" />\n')
        f.write('            <ColorCode value="1" />\n')
        f.write('            <SolidColor r="%d" g="%d" b="%d" />\n' %
                (r, g, b))
        f.write('            <ScalarIndex value="0" />\n')
        f.write('            <ScalarGradient>\n')
        f.write('                <ColorStop stop="0" r="1" g="1" b="0" />\n')
        f.write('                <ColorStop stop="1" r="1" g="0" b="0" />\n')
        f.write('            </ScalarGradient>\n')
        f.write('            <Saturation value="1" />\n')
        f.write('            <HelixPoint x="91" y="109" z="91" />\n')
        f.write('            <HelixVector x="1" y="0" z="0" />\n')
        f.write('            <HelixAxis visibility="1" />\n')
        f.write('            <Visibility value="1" />\n')
        f.write(
            '            <AnnotationPosition x="67.8772" y="113.876" z="95.828" />\n'
        )
        f.write('    </Track>\n')

    f.write('    <CurrentIndex value="0" />\n')
    f.write('</Tracks>\n')

    f.write('<LookUpTable>\n')
    f.write('    <DirectionScheme value="2" />\n')
    f.write('    <DirectionVector value="0" />\n')
    f.write('</LookUpTable>\n')
    f.write('<Coordinate>\n')
    f.write('    <Point000 x="0" y="0" z="0" id="-1" />\n')
    f.write('    <Point100 x="1" y="0" z="0" id="-1" />\n')
    f.write('    <Point##0 x="0" y="1" z="0" id="-1" />\n')
    f.write('    <Annotation type="1" />\n')
    f.write('</Coordinate>\n')
    f.write('<Camera>\n')
    f.write('    <Position x="91" y="-388.794" z="91" />\n')
    f.write('    <FocalPoint x="91" y="109" z="91" />\n')
    f.write('    <ViewUp x="0" y="2.28382e-16" z="1" />\n')
    f.write('    <ViewAngle value="30" />\n')
    f.write('    <ClippingRange near="333.159" far="702.527" />\n')
    f.write('</Camera>\n')
    f.write('<ObjectAnnotation value="0" />\n')
    f.write('<BackgroundColor r="%d" g="%d" b="%d" />\n' % (bg_r, bg_g, bg_b))
    f.write('</Scene>\n')
    return out_file
예제 #37
0
 def _format_arg(self, name, spec, value):
     if name == 'field':
         pth, fname, _ = split_filename(value)
         return spec.argstr % op.join(pth, fname)
     return super(ExtendedEddy, self)._format_arg(name, spec, value)
예제 #38
0
 def _gen_outfilename(self):
     _, name, _ = split_filename(self.inputs.in_file)
     return name + '_peaks.Bdouble'
예제 #39
0
파일: rois.py 프로젝트: zuxfoucault/pypes
 def _list_outputs(self):
     outputs = self._outputs().get()
     fname = self.inputs.img
     _, base, _ = split_filename(fname)
     outputs["out"] = os.path.abspath(base + '_drained.nii.gz')
     return outputs
예제 #40
0
def test_flirt():
    # setup
    tmpdir, infile, reffile = setup_flirt()

    flirter = fsl.FLIRT()
    yield assert_equal, flirter.cmd, 'flirt'

    flirter.inputs.bins = 256
    flirter.inputs.cost = 'mutualinfo'

    flirted = fsl.FLIRT(in_file=infile,
                        reference=reffile,
                        out_file='outfile',
                        out_matrix_file='outmat.mat',
                        bins=256,
                        cost='mutualinfo')
    flirt_est = fsl.FLIRT(in_file=infile,
                          reference=reffile,
                          out_matrix_file='outmat.mat',
                          bins=256,
                          cost='mutualinfo')
    yield assert_not_equal, flirter.inputs, flirted.inputs
    yield assert_not_equal, flirted.inputs, flirt_est.inputs

    yield assert_equal, flirter.inputs.bins, flirted.inputs.bins
    yield assert_equal, flirter.inputs.cost, flirt_est.inputs.cost
    realcmd = 'flirt -in %s -ref %s -out outfile -omat outmat.mat ' \
        '-bins 256 -cost mutualinfo' % (infile, reffile)
    yield assert_equal, flirted.cmdline, realcmd

    flirter = fsl.FLIRT()
    # infile not specified
    yield assert_raises, ValueError, flirter.run
    flirter.inputs.in_file = infile
    # reference not specified
    yield assert_raises, ValueError, flirter.run
    flirter.inputs.reference = reffile
    # Generate outfile and outmatrix
    pth, fname, ext = split_filename(infile)
    outfile = fsl_name(flirter, '%s_flirt' % fname)
    outmat = '%s_flirt.mat' % fname
    realcmd = 'flirt -in %s -ref %s -out %s -omat %s' % (infile, reffile,
                                                         outfile, outmat)
    yield assert_equal, flirter.cmdline, realcmd

    _, tmpfile = tempfile.mkstemp(suffix='.nii', dir=tmpdir)
    # Loop over all inputs, set a reasonable value and make sure the
    # cmdline is updated correctly.
    for key, trait_spec in sorted(fsl.FLIRT.input_spec().traits().items()):
        # Skip mandatory inputs and the trait methods
        if key in ('trait_added', 'trait_modified', 'in_file', 'reference',
                   'environ', 'output_type', 'out_file', 'out_matrix_file',
                   'in_matrix_file', 'apply_xfm', 'ignore_exception',
                   'terminal_output', 'out_log', 'save_log'):
            continue
        param = None
        value = None
        if key == 'args':
            param = '-v'
            value = '-v'
        elif isinstance(trait_spec.trait_type, File):
            value = tmpfile
            param = trait_spec.argstr % value
        elif trait_spec.default is False:
            param = trait_spec.argstr
            value = True
        elif key in ('searchr_x', 'searchr_y', 'searchr_z'):
            value = [-45, 45]
            param = trait_spec.argstr % ' '.join(str(elt) for elt in value)
        else:
            value = trait_spec.default
            param = trait_spec.argstr % value
        cmdline = 'flirt -in %s -ref %s' % (infile, reffile)
        # Handle autogeneration of outfile
        pth, fname, ext = split_filename(infile)
        outfile = fsl_name(fsl.FLIRT(), '%s_flirt' % fname)
        outfile = ' '.join(['-out', outfile])
        # Handle autogeneration of outmatrix
        outmatrix = '%s_flirt.mat' % fname
        outmatrix = ' '.join(['-omat', outmatrix])
        # Build command line
        cmdline = ' '.join([cmdline, outfile, outmatrix, param])
        flirter = fsl.FLIRT(in_file=infile, reference=reffile)
        setattr(flirter.inputs, key, value)
        yield assert_equal, flirter.cmdline, cmdline

    # Test OutputSpec
    flirter = fsl.FLIRT(in_file=infile, reference=reffile)
    pth, fname, ext = split_filename(infile)
    flirter.inputs.out_file = ''.join(['foo', ext])
    flirter.inputs.out_matrix_file = ''.join(['bar', ext])
    outs = flirter._list_outputs()
    yield assert_equal, outs['out_file'], \
          os.path.join(os.getcwd(), flirter.inputs.out_file)
    yield assert_equal, outs['out_matrix_file'], \
          os.path.join(os.getcwd(), flirter.inputs.out_matrix_file)

    teardown_flirt(tmpdir)
예제 #41
0
    def _run_interface(self, runtime):
        with threadpool_limits(limits=1, user_api='blas'):
            #load in images and get shape
            fmag = nb.load(self.inputs.mag)
            fphase = nb.load(self.inputs.phase)

            saveshape = fmag.header.get_data_shape()
            nt = saveshape[-1]
            savetype = fmag.header.get_data_dtype()
            memory_limit = 2 * (10**9)  #2GB of data max used in bytes

            # calculate maximum number of voxel in memory at one time
            num_voxels_in_chunk = np.round(
                0.95 * memory_limit / (2 * nt * savetype.itemsize), -3)

            #this interface is gonna go slice by slice as the dataobj cannot be reshaped
            assert (num_voxels_in_chunk > np.prod(saveshape[0:2]))

            # create output memmaps
            saveuuid = uuid.uuid4()
            filesim = path.join(os.getcwd(),
                                'filesim_' + str(saveuuid) + '.dat')
            sim = np.memmap(filesim,
                            dtype=savetype,
                            shape=tuple(saveshape),
                            mode='w+')
            filefilt = path.join(os.getcwd(),
                                 'filefilt_' + str(saveuuid) + '.dat')
            filt = np.memmap(filefilt,
                             dtype=savetype,
                             shape=tuple(saveshape),
                             mode='w+')
            filestdm = path.join(os.getcwd(),
                                 'filestdm_' + str(saveuuid) + '.dat')
            stdm = np.memmap(filestdm,
                             dtype=savetype,
                             shape=tuple(saveshape[0:-1]),
                             mode='w+')
            filestdp = path.join(os.getcwd(),
                                 'filestdp_' + str(saveuuid) + '.dat')
            stdp = np.memmap(filestdp,
                             dtype=savetype,
                             shape=tuple(saveshape[0:-1]),
                             mode='w+')
            filer2 = path.join(os.getcwd(), 'filer2_' + str(saveuuid) + '.dat')
            r2 = np.memmap(filer2,
                           dtype=savetype,
                           shape=tuple(saveshape[0:-1]),
                           mode='w+')
            filebeta = path.join(os.getcwd(),
                                 'filebeta_' + str(saveuuid) + '.dat')
            # save beta intialization until we know size

            #load additional regressors (motion or physio)
            if self.inputs.global_regressors:
                regressors = np.loadtxt(self.inputs.global_regressors)
                beta = np.memmap(filebeta,
                                 dtype=savetype,
                                 shape=tuple([
                                     saveshape[0], saveshape[1], saveshape[2],
                                     2 + regressors.shape[1]
                                 ]),
                                 mode='w+')
            else:
                beta = np.memmap(
                    filebeta,
                    dtype=savetype,
                    shape=tuple([saveshape[0], saveshape[1], saveshape[2], 2]),
                    mode='w+')

            #initialize model
            linearfit = odr.Model(self.multiplelinear)

            # freqs for FT indices used for noise estimation
            freqs = np.linspace(-1.0, 1.0, nt) / (2 * self.inputs.TR)
            noise_idx = np.where((abs(freqs) > self.inputs.noise_lb))[0]
            noise_mask = np.fft.fftshift(1.0 *
                                         (abs(freqs) > self.inputs.noise_lb))

            # create mask that is 3% of initial image intensity
            mag_first = np.reshape(np.array(fmag.dataobj[:, :, :, 0]), [
                -1,
            ])
            mask = np.reshape(mag_first > 0.03 * np.max(mag_first),
                              [-1, saveshape[2]])

            # load image in one slice at a time
            for slice in range(saveshape[2]):
                print('On slice: ', slice, ' / ', saveshape[2])
                mag = np.reshape(fmag.dataobj[:, :, slice, :], [-1, nt])
                ph = np.reshape(fphase.dataobj[:, :, slice, :], [-1, nt])

                # Estimate sigmas in one preproc step as fft can be done across multiple voxels at once
                temp = mag
                mu = np.mean(mag, axis=-1)
                stdm[:, :, slice] = np.reshape(
                    np.std(
                        np.fft.ifft(
                            np.fft.fft(mag - mu[..., np.newaxis]) *
                            noise_mask), -1), [saveshape[0], saveshape[1]])
                temp = ph
                mu = np.mean(ph, axis=-1)
                stdp[:, :, slice] = np.reshape(
                    np.std(
                        np.fft.ifft(
                            np.fft.fft(ph - mu[..., np.newaxis]) * noise_mask),
                        -1), [saveshape[0], saveshape[1]])

                #perform fit voxel by voxel
                for x in range(mag.shape[0]):
                    if mask[x, slice]:
                        #need row and column index for saving as memmaps are 4D
                        r, c = np.unravel_index(x, saveshape[0:2])

                        mm = np.mean(mag[x, :])
                        mp = np.mean(ph[x, :])

                        # intialize X as columns of data, regressors (optional), intercept
                        if 'regressors' in locals():
                            design = np.row_stack((ph[x, :], regressors.T,
                                                   np.ones(ph[x, :].shape)))
                            ests = np.hstack(
                                [[stdm[r, c, slice] / stdp[r, c, slice]],
                                 np.ones((regressors.shape[1], )), [mm / mp]])
                            mydata = odr.RealData(design,
                                                  mag[x, :].T,
                                                  sx=np.hstack(
                                                      (stdp[r, c, slice],
                                                       np.std(regressors,
                                                              axis=0), 1)),
                                                  sy=stdm[r, c, slice])
                        else:
                            design = np.row_stack(
                                (ph[x, :], np.ones(ph[x, :].shape)))
                            ests = [
                                stdm[r, c, slice] / stdp[r, c, slice], mm / mp
                            ]
                            mydata = odr.RealData(design,
                                                  mag[x, :].T,
                                                  sx=np.hstack(
                                                      [stdp[r, c, slice], 1]),
                                                  sy=stdm[r, c, slice])
                        # and fit model
                        # mag = A*phase + B*regressors + C
                        # (y=mx+b)
                        # call : (x,y,sx,sy)
                        odr_obj = odr.ODR(mydata,
                                          linearfit,
                                          beta0=ests,
                                          maxit=200)
                        res = odr_obj.run()
                        est = res.y
                        r2[r, c, slice] = 1.0 - sum(
                            (mag[x, :] - est)**2) / sum((mag[x, :] - mm)**2)
                        beta[r, c, slice, :] = res.beta

                        # take out scaled phase signal
                        sim[r, c, slice] = ph[x, :] * res.beta[0]
                        filt[r, c, slice] = mag[x, :] - est + mm

            _, outname, _ = split_filename(self.inputs.mag)
            print(outname)
            outnii = nb.Nifti1Image(sim,
                                    affine=fmag.affine,
                                    header=fmag.get_header())
            outnii.to_filename(outname + '_sim.nii.gz')
            outnii = nb.Nifti1Image(filt,
                                    affine=fmag.affine,
                                    header=fmag.get_header())
            outnii.to_filename(outname + '_filt.nii.gz')

            # plot fit statistic info
            outnii = nb.Nifti1Image(stdp,
                                    affine=fmag.affine,
                                    header=fmag.get_header())
            outnii.to_filename(outname + '_stdp.nii.gz')
            outnii = nb.Nifti1Image(stdm,
                                    affine=fmag.affine,
                                    header=fmag.get_header())
            outnii.to_filename(outname + '_stdm.nii.gz')
            outnii = nb.Nifti1Image(r2,
                                    affine=fmag.affine,
                                    header=fmag.get_header())
            outnii.to_filename(outname + '_r2.nii.gz')
            outnii = nb.Nifti1Image(beta,
                                    affine=fmag.affine,
                                    header=fmag.get_header())
            outnii.to_filename(outname + '_betas.nii.gz')
            return runtime
예제 #42
0
#   and its Node :class:`ephypype.interfaces.mne.power.Power` compute the PSD
#   by the welch function of the scipy package.

##############################################################################
from ephypype.gather.gather_results import get_results  # noqa
from visbrain.objects import SourceObj, SceneObj, ColorbarObj  # noqa
from visbrain.utils import normalize  # noqa
from nipype.utils.filemanip import split_filename  # noqa

psd_files, channel_coo_files = get_results(main_workflow.base_dir,
                                           main_workflow.name,
                                           pipeline='power')

sc = SceneObj(size=(1800, 500), bgcolor=(.1, .1, .1))
for psd_file, channel_coo_file in zip(psd_files, channel_coo_files):
    path_xyz, basename, ext = split_filename(psd_file)

    arch = np.load(psd_file)
    psds, freqs = arch['psds'], arch['freqs']
    xyz = np.genfromtxt(channel_coo_file, dtype=float)
    freq_bands = np.asarray(freq_bands)
    clim = (psds.min(), psds.max())

    # Find indices of frequencies :
    idx_fplt = np.abs(
        (freqs.reshape(1, 1, -1) - freq_bands[..., np.newaxis])).argmin(2)
    psdf = np.array([psds[:, k[0]:k[1]].mean(1) for k in idx_fplt])
    radius = normalize(np.c_[psdf.min(1), psdf.max(1)], 5, 25).astype(float)

    for num, (fb, fbn, psd,
              rx) in enumerate(zip(freq_bands, freq_band_names, psdf, radius)):
예제 #43
0
 def _gen_outfilename(self):
     _, name , _ = split_filename(self.inputs.in_file)
     return name + '_SH.mif'
예제 #44
0
    def _gen_filename(self, name):
        """Generate output file name
"""
        if name == 'out_file':
            _, fname, ext = split_filename(self.inputs.in_file)
            return os.path.join(os.getcwd(), ''.join((fname, '_3dT', ext)))
예제 #45
0
파일: bids.py 프로젝트: rtybanana/qsiprep
    def _run_interface(self, runtime):
        params = get_bids_params(self.inputs.dwi_file)
        self._results = {
            key: val
            for key, val in list(params.items()) if val is not None
        }
        space = self._results.get("space_id")
        if space is None:
            raise Exception("Unable to detect space of %s" %
                            self.inputs.dwi_file)

        # Find the additional files
        out_root, fname, _ = split_filename(self.inputs.dwi_file)
        self._results['bval_file'] = op.join(out_root, fname + ".bval")
        self._results['bvec_file'] = op.join(out_root, fname + ".bvec")
        self._get_if_exists('confounds_file',
                            op.join(out_root, "*confounds.tsv"))
        self._get_if_exists('local_bvec_file',
                            op.join(out_root, fname[:-3] + 'bvec.nii*'))
        self._get_if_exists('b_file', op.join(out_root, fname + ".b"))
        self._get_if_exists(
            'mask_file', op.join(out_root, fname[:-11] + 'brain_mask.nii.gz'))
        self._get_if_exists('dwi_ref',
                            op.join(out_root, fname[:-16] + 'dwiref.nii.gz'))
        self._results['dwi_file'] = self.inputs.dwi_file

        # Image QC doesn't include space
        self._get_if_exists(
            'qc_file', self._get_qc_filename(out_root, params, "ImageQC",
                                             "csv"))
        self._get_if_exists(
            'slice_qc_file',
            self._get_qc_filename(out_root, params, "SliceQC", "json"))

        # Get the anatomical data
        path_parts = out_root.split(op.sep)[:-1]  # remove "dwi"
        # Anat is above ses
        if path_parts[-1].startswith('ses'):
            path_parts.pop()
        """
        qp_root = op.sep.join(path_parts)
        anat_root = op.join(qp_root, 'anat')
        sub = self._results['subject_id']
        if space == "space-T1w":
            self._get_if_exists('tpms', anat_root + "/%s_label-*_probseg.nii*" % sub)
            self._get_if_exists('t1_brain',
                '%s/%s_desc-preproc_T1w.nii*' % (anat_root, sub))
            self._get_if_exists('anat_mask',
                '%s/%s_desc-brain_mask.nii*' % (anat_root, sub))
        else:
            self._get_if_exists('tpms',
                anat_root + "/%s_space-MNI152NLin2009cAsym_label-CSF_probseg.nii*" % sub,
                multi_ok=True)
            self._get_if_exists('seg',
                '%s/%s_space-MNI152NLin2009cAsym_dseg.nii*' % (anat_root, sub))[0]
            self._get_if_exists('t1_brain',
                '%s/%s_space-MNI152NLin2009cAsym_desc-preproc_T1w.nii*' % (anat_root, sub))
            self._get_if_exists('anat_mask',
                '%s/%s_space-MNI152NLin2009cAsym_desc-brain_mask.nii*' % (anat_root, sub))
        self._get_if_exists('t1_2_mni_reverse_transform',
            '%s/%s_from-MNI152NLin2009cAsym_to-T1w*_xfm.h5' % (anat_root, sub))
        self._get_if_exists('t1_2_mni_forward_transform',
            '%s/%s_from-T1w_to-MNI152NLin2009cAsym*_xfm.h5' % (anat_root, sub))
        """
        return runtime
예제 #46
0
 def _gen_outfilename(self):
     _, bvec , _ = split_filename(self.inputs.bvec_file)
     _, bval , _ = split_filename(self.inputs.bval_file)
     return bvec + '_' + bval + '.txt'
예제 #47
0
    def _create_cifti_image(bold_file,
                            label_file,
                            annotation_files,
                            gii_files,
                            volume_target,
                            surface_target,
                            tr,
                            download_link=None):
        """
        Generate CIFTI image in target space

        Parameters
            bold_file : 4D BOLD timeseries
            label_file : label atlas
            annotation_files : FreeSurfer annotations
            gii_files : 4D BOLD surface timeseries in GIFTI format
            volume_target : label atlas space
            surface_target : gii_files space
            tr : repetition timeseries
            download_link : URL to download label_file

        Returns
            out_file : BOLD data as CIFTI dtseries
        """

        label_img = nb.load(label_file)
        bold_img = resample_to_img(bold_file, label_img)

        bold_data = bold_img.get_data()
        timepoints = bold_img.shape[3]
        label_data = label_img.get_data()

        # set up CIFTI information
        series_map = ci.Cifti2MatrixIndicesMap(
            (0, ),
            'CIFTI_INDEX_TYPE_SERIES',
            number_of_series_points=timepoints,
            series_exponent=0,
            series_start=0.0,
            series_step=tr,
            series_unit='SECOND')
        # Create CIFTI brain models
        idx_offset = 0
        brainmodels = []
        bm_ts = np.empty((timepoints, 0))

        for structure, labels in CIFTI_STRUCT_WITH_LABELS.items():
            if labels is None:  # surface model
                model_type = "CIFTI_MODEL_TYPE_SURFACE"
                # use the corresponding annotation
                hemi = structure.split('_')[-1]
                annot = nb.freesurfer.read_annot(
                    annotation_files[hemi == "RIGHT"])
                # currently only supports L/R cortex
                gii = nb.load(gii_files[hemi == "RIGHT"])
                # calculate total number of vertices
                surf_verts = len(annot[0])
                # remove medial wall for CIFTI format
                vert_idx = np.nonzero(
                    annot[0] != annot[2].index(b'unknown'))[0]
                # extract values across volumes
                ts = np.array([tsarr.data[vert_idx] for tsarr in gii.darrays])

                vert_idx = ci.Cifti2VertexIndices(vert_idx)
                bm = ci.Cifti2BrainModel(index_offset=idx_offset,
                                         index_count=len(vert_idx),
                                         model_type=model_type,
                                         brain_structure=structure,
                                         vertex_indices=vert_idx,
                                         n_surface_vertices=surf_verts)
                bm_ts = np.column_stack((bm_ts, ts))
                idx_offset += len(vert_idx)
                brainmodels.append(bm)
            else:
                model_type = "CIFTI_MODEL_TYPE_VOXELS"
                vox = []
                ts = None
                for label in labels:
                    ijk = np.nonzero(label_data == label)
                    ts = (bold_data[ijk] if ts is None else np.concatenate(
                        (ts, bold_data[ijk])))
                    vox += [[ijk[0][ix], ijk[1][ix], ijk[2][ix]]
                            for ix, row in enumerate(ts)]

                bm_ts = np.column_stack((bm_ts, ts.T))

                vox = ci.Cifti2VoxelIndicesIJK(vox)
                bm = ci.Cifti2BrainModel(index_offset=idx_offset,
                                         index_count=len(vox),
                                         model_type=model_type,
                                         brain_structure=structure,
                                         voxel_indices_ijk=vox)
                idx_offset += len(vox)
                brainmodels.append(bm)

        volume = ci.Cifti2Volume(
            bold_img.shape[:3],
            ci.Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ(
                -3, bold_img.affine))
        brainmodels.append(volume)

        # create CIFTI geometry based on brainmodels
        geometry_map = ci.Cifti2MatrixIndicesMap(
            (1, ), 'CIFTI_INDEX_TYPE_BRAIN_MODELS', maps=brainmodels)
        # provide some metadata to CIFTI matrix
        meta = {
            "target_surface": surface_target,
            "target_volume": volume_target,
            "download_link": download_link,
        }
        # generate and save CIFTI image
        matrix = ci.Cifti2Matrix()
        matrix.append(series_map)
        matrix.append(geometry_map)
        matrix.metadata = ci.Cifti2MetaData(meta)
        hdr = ci.Cifti2Header(matrix)
        img = ci.Cifti2Image(bm_ts, hdr)
        img.nifti_header.set_intent('NIFTI_INTENT_CONNECTIVITY_DENSE_SERIES')

        _, out_base, _ = split_filename(bold_file)
        out_file = "{}.dtseries.nii".format(out_base)
        ci.save(img, out_file)
        return os.path.join(os.getcwd(), out_file)
예제 #48
0
    def _parse_stdout(self, stdout):
        files = []
        reoriented_files = []
        reoriented_and_cropped_files = []
        philips_dwi = []
        bvecs = []
        bvals = []
        skip = False
        last_added_file = None
        t13d = []
        t12d = []
        pdt2 = []
        flair = []
        psir = []
        mtr = []
        b0map = []
        dirsense = []

        if isdefined(self.inputs.output_dir):
            output_dir = self.inputs.output_dir
        else:
            output_dir = self._gen_filename('output_dir')

        for line in stdout.split("\n"):
            if not skip:
                m_file = None
                if line.startswith("Saving "):
                    m_file = line[len("Saving "):]
                elif line.startswith("GZip..."):
                    # for gzipped outpus files are not absolute
                    m_file = os.path.abspath(
                        os.path.join(output_dir, line[len("GZip..."):]))
                elif line.startswith("Number of diffusion directions "):
                    if last_added_file:
                        base, filename, ext = split_filename(last_added_file)
                        bvecs.append(os.path.join(base, filename + ".bvec"))
                        bvals.append(os.path.join(base, filename + ".bval"))
                elif re.search('.*-->(.*)', line):
                    val = re.search('.*-->(.*)', line)
                    val = val.groups()[0]
                    if isdefined(self.inputs.output_dir):
                        output_dir = self.inputs.output_dir
                    else:
                        output_dir = self._gen_filename('output_dir')
                    val = os.path.join(output_dir, val)
                    m_file = val

                if m_file:
                    files.append(m_file)
                    last_added_file = m_file
                    continue

                if line.startswith("Reorienting as "):
                    reoriented_files.append(line[len("Reorienting as "):])
                    skip = True
                    continue
                elif line.startswith("Cropping NIfTI/Analyze image "):
                    # base, filename = os.path.split(line[len("Cropping NIfTI/Analyze image "):])
                    # filename = "c" + filename
                    # We don't need at all
                    # reoriented_and_cropped_files.append(os.path.join(base, filename))
                    skip = True
                    continue
                elif line.startswith("Removed DWI from DTI scan "):
                    # remove xFILENAME from converted list and add to philips_dwi
                    file_ind = files.index(last_added_file)
                    del files[file_ind]
                    philips_dwi.append(last_added_file)

                    # when converting from Philips scanner, bvec and bval files, the nifti files is
                    # cropped to remove eADC image (last image)
                    # bval and bvec files are therefore associated with the xFILENAME not the FILENAME

                    # get indices for original bvec and bval files from list (must be xFILENAME version)
                    base, filename, ext = split_filename(last_added_file)
                    orgfilename = filename[1:]  # remove 'x' from filename
                    bvec_ind = bvecs.index(
                        os.path.join(base, orgfilename + ".bvec"))
                    bval_ind = bvals.index(
                        os.path.join(base, orgfilename + ".bval"))
                    del bvecs[bvec_ind]
                    del bvals[bval_ind]

                    # replace bvec/bval with xFILE version
                    bvecs.append(os.path.join(base, filename + '.bvec'))
                    bvals.append(os.path.join(base, filename + '.bval'))

                    skip = True
                    continue
                elif re.search('.*-->(.*)T13D(.*)', line):
                    t13d.append(last_added_file)
                    skip = True
                    continue
                elif re.search('.*-->(.*)T1FASTCLEAR(.*)', line):
                    t12d.append(last_added_file)
                    skip = True
                    continue
                elif re.search('.*-->(.*)PDT2(.*)', line):
                    pdt2.append(last_added_file)
                    skip = True
                    continue
                elif re.search('.*-->(.*)PSIR(.*)', line):
                    psir.append(last_added_file)
                    skip = True
                    continue
                elif re.search('.*-->(.*)FLAIR(.*)', line):
                    flair.append(last_added_file)
                    skip = True
                    continue
                elif re.search('.*-->(.*)MTR(.*)', line):
                    mtr.append(last_added_file)
                    skip = True
                    continue
                elif re.search('.*-->(.*)BOMap(.*)', line):
                    b0map.append(last_added_file)
                    skip = True
                    continue
                elif re.search('.*-->(.*)DIRSENSE(.*)', line):
                    dirsense.append(last_added_file)
                    skip = True
                    continue

            skip = False
        return files, reoriented_files, reoriented_and_cropped_files, philips_dwi, bvecs, bvals, \
               output_dir, t13d, t12d, pdt2, flair, psir, mtr, b0map, dirsense
예제 #49
0
파일: utils.py 프로젝트: wanderine/nipype
 def _make_mat_file(self):
     """ makes name for matfile if doesn exist"""
     pth, mv, _ = split_filename(self.inputs.moving)
     _, tgt, _ = split_filename(self.inputs.target)
     mat = os.path.join(pth, '%s_to_%s.mat' % (mv, tgt))
     return mat
예제 #50
0
def _create_cifti_image(bold_file, label_file, bold_surfs, annotation_files, tr, targets):
    """
    Generate CIFTI image in target space.

    Parameters
    ----------
    bold_file : str
        BOLD volumetric timeseries
    label_file : str
        Subcortical label file
    bold_surfs : list
        BOLD surface timeseries [L,R]
    annotation_files : list
        Surface label files used to remove medial wall
    tr : float
        BOLD repetition time
    targets : tuple or list
        Surface and volumetric output spaces

    Returns
    -------
    out :
        BOLD data saved as CIFTI dtseries
    """
    bold_img = nb.load(bold_file)
    label_img = nb.load(label_file)
    if label_img.shape != bold_img.shape[:3]:
        warnings.warn("Resampling bold volume to match label dimensions")
        bold_img = resample_to_img(bold_img, label_img)

    bold_data = bold_img.get_fdata(dtype='float32')
    timepoints = bold_img.shape[3]
    label_data = np.asanyarray(label_img.dataobj).astype('int16')

    # Create brain models
    idx_offset = 0
    brainmodels = []
    bm_ts = np.empty((timepoints, 0))

    for structure, labels in CIFTI_STRUCT_WITH_LABELS.items():
        if labels is None:  # surface model
            model_type = "CIFTI_MODEL_TYPE_SURFACE"
            # use the corresponding annotation
            hemi = structure.split('_')[-1]
            # currently only supports L/R cortex
            surf = nb.load(bold_surfs[hemi == "RIGHT"])
            surf_verts = len(surf.darrays[0].data)
            if annotation_files[0].endswith('.annot'):
                annot = nb.freesurfer.read_annot(annotation_files[hemi == "RIGHT"])
                # remove medial wall
                medial = np.nonzero(annot[0] != annot[2].index(b'unknown'))[0]
            else:
                annot = nb.load(annotation_files[hemi == "RIGHT"])
                medial = np.nonzero(annot.darrays[0].data)[0]
            # extract values across volumes
            ts = np.array([tsarr.data[medial] for tsarr in surf.darrays])

            vert_idx = ci.Cifti2VertexIndices(medial)
            bm = ci.Cifti2BrainModel(
                index_offset=idx_offset,
                index_count=len(vert_idx),
                model_type=model_type,
                brain_structure=structure,
                vertex_indices=vert_idx,
                n_surface_vertices=surf_verts
            )
            idx_offset += len(vert_idx)
            bm_ts = np.column_stack((bm_ts, ts))
        else:
            model_type = "CIFTI_MODEL_TYPE_VOXELS"
            vox = []
            ts = None
            for label in labels:
                ijk = np.nonzero(label_data == label)
                if ijk[0].size == 0:  # skip label if nothing matches
                    continue
                ts = (bold_data[ijk] if ts is None
                      else np.concatenate((ts, bold_data[ijk])))
                vox += [[ijk[0][ix], ijk[1][ix], ijk[2][ix]]
                        for ix, row in enumerate(ts)]

            vox = ci.Cifti2VoxelIndicesIJK(vox)
            bm = ci.Cifti2BrainModel(
                index_offset=idx_offset,
                index_count=len(vox),
                model_type=model_type,
                brain_structure=structure,
                voxel_indices_ijk=vox
            )
            idx_offset += len(vox)
            bm_ts = np.column_stack((bm_ts, ts.T))
        # add each brain structure to list
        brainmodels.append(bm)

    # add volume information
    brainmodels.append(
        ci.Cifti2Volume(
            bold_img.shape[:3],
            ci.Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ(-3, bold_img.affine)
        )
    )

    # generate Matrix information
    series_map = ci.Cifti2MatrixIndicesMap(
        (0,),
        'CIFTI_INDEX_TYPE_SERIES',
        number_of_series_points=timepoints,
        series_exponent=0,
        series_start=0.0,
        series_step=tr,
        series_unit='SECOND'
    )
    geometry_map = ci.Cifti2MatrixIndicesMap(
        (1, ),
        'CIFTI_INDEX_TYPE_BRAIN_MODELS',
        maps=brainmodels
    )
    # provide some metadata to CIFTI matrix
    meta = {
        "surface": targets[0],
        "volume": targets[1],
    }
    # generate and save CIFTI image
    matrix = ci.Cifti2Matrix()
    matrix.append(series_map)
    matrix.append(geometry_map)
    matrix.metadata = ci.Cifti2MetaData(meta)
    hdr = ci.Cifti2Header(matrix)
    img = ci.Cifti2Image(bm_ts, hdr)
    img.nifti_header.set_intent('NIFTI_INTENT_CONNECTIVITY_DENSE_SERIES')

    out_file = "{}.dtseries.nii".format(split_filename(bold_file)[1])
    ci.save(img, out_file)
    return os.path.join(os.getcwd(), out_file)
예제 #51
0
    def _run_interface(self, runtime):

        # Get b=0 images from all the inputs
        b0_series, b0_indices, original_files = load_epi_dwi_fieldmaps(
            self.inputs.b0_file, self.inputs.b0_threshold)

        # Only get the requested number of images
        _, fmap_imain, fmap_report, _ = topup_inputs_from_4d_file(
            b0_series,
            b0_indices,
            original_files,
            image_source="EPI fieldmap",
            max_per_spec=self.inputs.max_num_b0s)
        LOGGER.info(fmap_report)

        # Get b=0 images and metadata from all the input images
        b0_fieldmap_metadata = []
        for image_path in set(original_files):
            pth, fname, _ = split_filename(image_path)
            original_json = op.join(pth, fname) + ".json"
            b0_fieldmap_metadata.append(original_json)

        # Warn the user if the metadata does not match
        merged_metadata = _merge_metadata(b0_fieldmap_metadata)
        merged_b0s = to_lps(fmap_imain, tuple(self.inputs.orientation))
        # Output just one 3/4d image and a sidecar
        if not self.inputs.output_3d_images:
            # Save the conformed fmap
            output_fmap = fname_presuffix(self.inputs.b0_file[0],
                                          suffix="conform",
                                          newpath=runtime.cwd)
            output_json = fname_presuffix(output_fmap,
                                          use_ext=False,
                                          suffix=".json")
            fmap_imain.to_filename(output_fmap)
            with open(output_json, "w") as sidecar:
                json.dump(merged_metadata, sidecar)
            self._results['fmap_file'] = output_fmap
            self._results['fmap_info'] = output_json
            return runtime

        image_list = []
        json_list = []
        for imgnum, img in enumerate(iter_img(merged_b0s)):

            # Save the conformed fmap and metadata
            output_fmap = fname_presuffix(self.inputs.b0_file[0],
                                          suffix="%s_%03d" %
                                          (self.inputs.orientation, imgnum),
                                          newpath=runtime.cwd)
            output_json = fname_presuffix(output_fmap,
                                          use_ext=False,
                                          suffix=".json")
            with open(output_json, "w") as sidecar:
                json.dump(merged_metadata, sidecar)
            img.to_filename(output_fmap)

            # Append to lists
            image_list.append(output_fmap)
            json_list.append(output_json)

        self._results['fmap_file'] = image_list
        self._results['fmap_info'] = json_list
        return runtime
예제 #52
0
    def _run_interface(self, runtime):
        with threadpool_limits(limits=self.inputs.n_threads, user_api='blas'):
            f = nb.load(self.inputs.mag)
            mag = f.get_data()

            f = nb.load(self.inputs.phase)
            ph = f.get_data()

            if self.inputs.global_regressors:
                regressors = np.loadtxt(self.inputs.global_regressors)

            saveshape = np.array(mag.shape)
            nt = mag.shape[-1]

            scales = np.zeros(np.prod(saveshape[0:-1]))
            filt = np.zeros((np.prod(saveshape[0:-1]), nt))
            sim = np.zeros_like(filt)
            residuals = np.zeros_like(filt)

            delta = np.zeros_like(filt)
            eps = np.zeros_like(filt)
            xshift = np.zeros_like(filt)
            stdm = np.zeros(saveshape[0:-1])
            stdp = np.zeros(saveshape[0:-1])
            r2 = np.zeros_like(scales)

            mag = np.array(mag)
            mm = np.mean(mag, axis=-1)
            mask = mm > 0.03 * np.max(mm)

            linearfit = odr.Model(self.multiplelinear)

            # freqs for FT indices
            freqs = np.linspace(-1.0, 1.0, nt) / (2 * self.inputs.TR)

            noise_idx = np.where((abs(freqs) > self.inputs.noise_lb))[0]
            noise_mask = np.fft.fftshift(1.0 * (abs(freqs) > self.inputs.noise_lb))

            # Estimate sigmas in one preproc step
            for x in range(mag.shape[0]):
                temp = mag[x, :, :, :]
                mu = np.mean(temp, -1)
                stdm[x, :, :] = np.std(np.fft.ifft(np.fft.fft(temp - mu[...,  np.newaxis]) * noise_mask), -1)
                temp = ph[x, :, :, :]
                mu = np.mean(temp, -1)
                stdp[x, :, :] = np.std(np.fft.ifft(np.fft.fft(temp - mu[..., np.newaxis]) * noise_mask), -1)

            stdm = np.reshape(stdm, (-1,))
            stdp = np.reshape(stdp, (-1,))
            mask = np.reshape(mask, (-1,))
            mag = np.reshape(mag, (-1, nt))
            ph = np.reshape(ph, (-1, nt))
            for x in range(mag.shape[0]):
                if mask[x]:
                    mm = np.mean(mag[x, :])
                    mp = np.mean(ph[x, :])

                    if 'regressors' in locals():
                        design = np.row_stack((ph[x, :], regressors.T, np.ones(ph[x, :].shape)))
                        ests = np.hstack([[stdm[x] / stdp[x]], np.ones((regressors.shape[1],)), [mm / mp]])
                        mydata = odr.RealData(design, mag[x, :].T,
                                              sx=np.hstack((stdp[x],
                                                           np.std(regressors,
                                                           axis=0), 1)), sy=stdm[x])
                    else:
                        design = np.row_stack((ph[x, :], np.ones(ph[x, :].shape)))
                        ests = [stdm[x] / stdp[x], mm / mp]
                        mydata = odr.RealData(design, mag[x, :].T,
                                              sx=np.hstack([stdp[x], 1]),
                                              sy=stdm[x])

                    # and fit model
                    # mag = A*phase + B*regressors + C
                    # (y=mx+b)
                    # call : (x,y,sx,sy)
                    odr_obj = odr.ODR(mydata, linearfit, beta0=ests, maxit=200)
                    res = odr_obj.run()
                    est = res.y
                    r2[x] = 1.0 - sum((mag[x, :] - est) ** 2) / sum((mag[x, :] - mm) ** 2)

                    # take out scaled phase signal and re-mean may need correction
                    sim[x, :] = ph[x, :]*res.beta[0]

                    filt[x, :] = mag[x, :] - est + mm
                    # estimate residuals
                    residuals[x, :] = np.sign(mag[x, :]-est)*(np.sum(res.delta**2,
                                                              axis=0) + res.eps**2)
                    delta[x, :] = np.sum(res.delta, axis=0)
                    eps[x, :] = res.eps
                    xshift[x, :] = np.sum(res.xplus, axis=0)

            _, outname, _ = split_filename(self.inputs.mag)
            print(outname)
            outnii = nb.Nifti1Image(np.reshape(sim, saveshape),
                                    affine=f.affine, header=f.get_header())
            outnii.to_filename(outname + '_sim.nii.gz')
            outnii = nb.Nifti1Image(np.reshape(filt, saveshape), affine=f.affine,
                                    header=f.get_header())
            outnii.to_filename(outname + '_filt.nii.gz')
            outnii = nb.Nifti1Image(np.reshape(residuals, saveshape),
                                    affine=f.affine, header=f.get_header())
            outnii.to_filename(outname + '_residuals.nii.gz')
            outnii = nb.Nifti1Image(np.reshape(delta, saveshape),
                                    affine=f.affine, header=f.get_header())
            outnii.to_filename(outname + '_xres.nii.gz')
            outnii = nb.Nifti1Image(np.reshape(eps, saveshape),
                                    affine=f.affine, header=f.get_header())
            outnii.to_filename(outname + '_yres.nii.gz')
            outnii = nb.Nifti1Image(np.reshape(xshift, saveshape),
                                    affine=f.affine, header=f.get_header())
            outnii.to_filename(outname + '_xplus.nii.gz')

            # plot fit statistic info
            outnii = nb.Nifti1Image(np.reshape(stdp, saveshape[0:-1]),
                                    affine=f.affine, header=f.get_header())
            outnii.to_filename(outname + '_stdp.nii.gz')
            outnii = nb.Nifti1Image(np.reshape(stdm, saveshape[0:-1]),
                                    affine=f.affine, header=f.get_header())
            outnii.to_filename(outname + '_stdm.nii.gz')
            outnii = nb.Nifti1Image(np.reshape(r2, saveshape[0:-1]),
                                    affine=f.affine, header=f.get_header())
            outnii.to_filename(outname + '_r2.nii.gz')
            return runtime
예제 #53
0
    def _run_interface(self, runtime):
        path, name, ext = split_filename(self.inputs.time_course_image)
        data_dir = op.abspath('./matching')
        copy_to = op.join(data_dir, 'components')
        if not op.exists(copy_to):
            os.makedirs(copy_to)
        copy_to = op.join(copy_to, name)
        shutil.copyfile(self.inputs.time_course_image, copy_to + ext)
        if ext == '.img':
            shutil.copyfile(op.join(path, name) + '.hdr', copy_to + '.hdr')
        elif ext == '.hdr':
            shutil.copyfile(op.join(path, name) + '.img', copy_to + '.img')
        time_course_file = copy_to + '.img'

        path, name, ext = split_filename(self.inputs.ica_mask_image)
        shutil.copyfile(self.inputs.ica_mask_image,
                        op.join(data_dir, name) + ext)
        if ext == '.img':
            shutil.copyfile(
                op.join(path, name) + '.hdr',
                op.join(data_dir, name) + '.hdr')
        elif ext == '.hdr':
            shutil.copyfile(
                op.join(path, name) + '.img',
                op.join(data_dir, name) + '.img')
        mask_file = op.abspath(self.inputs.ica_mask_image)
        repetition_time = self.inputs.repetition_time
        component_file = op.abspath(self.inputs.in_file)
        coma_rest_lib_path = op.abspath(self.inputs.coma_rest_lib_path)
        component_index = self.inputs.component_index
        if isdefined(self.inputs.out_stats_file):
            path, name, ext = split_filename(self.inputs.out_stats_file)
            if not ext == '.mat':
                ext = '.mat'
            out_stats_file = op.abspath(name + ext)
        else:
            if isdefined(self.inputs.subject_id):
                out_stats_file = op.abspath(self.inputs.subject_id + '_IC_' +
                                            str(self.inputs.component_index) +
                                            '.mat')
            else:
                out_stats_file = op.abspath('IC_' +
                                            str(self.inputs.component_index) +
                                            '.mat')

        d = dict(component_file=component_file,
                 IC=component_index,
                 time_course_file=time_course_file,
                 mask_name=mask_file,
                 Tr=repetition_time,
                 coma_rest_lib_path=coma_rest_lib_path,
                 out_stats_file=out_stats_file)
        script = Template("""
        restlib_path = '$coma_rest_lib_path';
        setup_restlib_paths(restlib_path);
        Tr = $Tr;
        out_stats_file = '$out_stats_file';
        component_file = '$component_file';
        maskName = '$mask_name';
        maskData = load_nii(maskName);        
        dataCompSpatial = load_nii(component_file)
        time_course_file = '$time_course_file'
        timeData = load_nii(time_course_file)
        IC = $IC
        [feature dataZ temporalData] = computeFingerprintSpaceTime(dataCompSpatial.img,timeData.img(:,IC),maskData.img,Tr);
        save '$out_stats_file'
        """).substitute(d)
        result = MatlabCommand(script=script,
                               mfile=True,
                               prescript=[''],
                               postscript=[''])
        r = result.run()
        print 'Saving stats file as {s}'.format(s=out_stats_file)
        return runtime
예제 #54
0
    def _run_interface(self, runtime):
        data_dir = op.abspath('./denoise/components')
        if not os.path.exists(data_dir):
            os.makedirs(data_dir)

        in_files = self.inputs.in_files
        if len(self.inputs.in_files) > 1:
            print 'Multiple ({n}) input images detected! Copying to {d}...'.format(
                n=len(self.inputs.in_files), d=data_dir)
            for in_file in self.inputs.in_files:
                path, name, ext = split_filename(in_file)
                shutil.copyfile(in_file, op.join(data_dir, name) + ext)
                if ext == '.img':
                    shutil.copyfile(
                        op.join(path, name) + '.hdr',
                        op.join(data_dir, name) + '.hdr')
                elif ext == '.hdr':
                    shutil.copyfile(
                        op.join(path, name) + '.img',
                        op.join(data_dir, name) + '.img')
            print 'Copied!'
            in_files = self.inputs.in_files

        elif isdefined(self.inputs.in_file4d):
            print 'Single four-dimensional image selected. Splitting and copying to {d}'.format(
                d=data_dir)
            in_files = nb.four_to_three(self.inputs.in_file4d)
            for in_file in in_files:
                path, name, ext = split_filename(in_file)
                shutil.copyfile(in_file, op.join(data_dir, name) + ext)
            print 'Copied!'

        else:
            print 'Single functional image provided. Ending...'
            in_files = self.inputs.in_files

        nComponents = len(in_files)
        path, name, ext = split_filename(self.inputs.time_course_image)
        shutil.copyfile(self.inputs.time_course_image,
                        op.join(data_dir, name) + ext)

        if ext == '.img':
            shutil.copyfile(
                op.join(path, name) + '.hdr',
                op.join(data_dir, name) + '.hdr')
        elif ext == '.hdr':
            shutil.copyfile(
                op.join(path, name) + '.img',
                op.join(data_dir, name) + '.img')

        data_dir = op.abspath('./denoise')
        path, name, ext = split_filename(self.inputs.ica_mask_image)
        shutil.copyfile(self.inputs.ica_mask_image,
                        op.join(data_dir, name) + ext)
        if ext == '.img':
            shutil.copyfile(
                op.join(path, name) + '.hdr',
                op.join(data_dir, name) + '.hdr')
        elif ext == '.hdr':
            shutil.copyfile(
                op.join(path, name) + '.img',
                op.join(data_dir, name) + '.img')
        mask_file = op.join(data_dir, name)
        repetition_time = self.inputs.repetition_time
        neuronal_image = op.abspath(self.inputs.out_neuronal_image)
        non_neuronal_image = op.abspath(self.inputs.out_non_neuronal_image)
        coma_rest_lib_path = op.abspath(self.inputs.coma_rest_lib_path)
        d = dict(data_dir=data_dir,
                 mask_name=mask_file,
                 nComponents=nComponents,
                 Tr=repetition_time,
                 nameNeuronal=neuronal_image,
                 nameNonNeuronal=non_neuronal_image,
                 coma_rest_lib_path=coma_rest_lib_path)
        script = Template("""
        restlib_path = '$coma_rest_lib_path';
        setup_restlib_paths(restlib_path)
        dataDir = '$data_dir';
        maskName = '$mask_name';
        nCompo = $nComponents;
        Tr = $Tr;
        nameNeuronalData = '$nameNeuronal';
        nameNonNeuronalData = '$nameNonNeuronal';
        denoiseImage(dataDir,maskName,nCompo,Tr,nameNeuronalData,nameNonNeuronalData, restlib_path);
        """).substitute(d)
        result = MatlabCommand(script=script,
                               mfile=True,
                               prescript=[''],
                               postscript=[''])
        r = result.run()
        print 'Neuronal component image saved as {n}'.format(n=neuronal_image)
        print 'Non-neuronal component image saved as {n}'.format(
            n=non_neuronal_image)
        return runtime
예제 #55
0
def test_flirt(setup_flirt):
    # setup
    tmpdir, infile, reffile = setup_flirt

    flirter = fsl.FLIRT()
    assert flirter.cmd == 'flirt'

    flirter.inputs.bins = 256
    flirter.inputs.cost = 'mutualinfo'

    flirted = fsl.FLIRT(in_file=infile, reference=reffile,
                        out_file='outfile', out_matrix_file='outmat.mat',
                        bins=256,
                        cost='mutualinfo')
    flirt_est = fsl.FLIRT(in_file=infile, reference=reffile,
                          out_matrix_file='outmat.mat',
                          bins=256,
                          cost='mutualinfo')
    assert flirter.inputs != flirted.inputs
    assert flirted.inputs != flirt_est.inputs

    assert flirter.inputs.bins == flirted.inputs.bins
    assert flirter.inputs.cost == flirt_est.inputs.cost
    realcmd = 'flirt -in %s -ref %s -out outfile -omat outmat.mat ' \
        '-bins 256 -cost mutualinfo' % (infile, reffile)
    assert flirted.cmdline == realcmd

    flirter = fsl.FLIRT()
    # infile not specified
    with pytest.raises(ValueError):
        flirter.cmdline
    flirter.inputs.in_file = infile
    # reference not specified
    with pytest.raises(ValueError):
        flirter.cmdline
    flirter.inputs.reference = reffile

    # Generate outfile and outmatrix
    pth, fname, ext = split_filename(infile)
    outfile = fsl_name(flirter, '%s_flirt' % fname)
    outmat = '%s_flirt.mat' % fname
    realcmd = 'flirt -in %s -ref %s -out %s -omat %s' % (infile, reffile,
                                                         outfile, outmat)
    assert flirter.cmdline == realcmd

    # test apply_xfm option
    axfm = deepcopy(flirter)
    axfm.inputs.apply_xfm = True
    # in_matrix_file or uses_qform must be defined
    with pytest.raises(RuntimeError): axfm.cmdline
    axfm2 = deepcopy(axfm)
    # test uses_qform
    axfm.inputs.uses_qform = True
    assert axfm.cmdline == (realcmd + ' -applyxfm -usesqform')
    # test in_matrix_file
    axfm2.inputs.in_matrix_file = reffile
    assert axfm2.cmdline == (realcmd + ' -applyxfm -init %s' % reffile)


    _, tmpfile = tempfile.mkstemp(suffix='.nii', dir=tmpdir)
    # Loop over all inputs, set a reasonable value and make sure the
    # cmdline is updated correctly.
    for key, trait_spec in sorted(fsl.FLIRT.input_spec().traits().items()):
        # Skip mandatory inputs and the trait methods
        if key in ('trait_added', 'trait_modified', 'in_file', 'reference',
                   'environ', 'output_type', 'out_file', 'out_matrix_file',
                   'in_matrix_file', 'apply_xfm', 'ignore_exception',
                   'terminal_output', 'out_log', 'save_log'):
            continue
        param = None
        value = None
        if key == 'args':
            param = '-v'
            value = '-v'
        elif isinstance(trait_spec.trait_type, File):
            value = tmpfile
            param = trait_spec.argstr % value
        elif trait_spec.default is False:
            param = trait_spec.argstr
            value = True
        elif key in ('searchr_x', 'searchr_y', 'searchr_z'):
            value = [-45, 45]
            param = trait_spec.argstr % ' '.join(str(elt) for elt in value)
        else:
            value = trait_spec.default
            param = trait_spec.argstr % value
        cmdline = 'flirt -in %s -ref %s' % (infile, reffile)
        # Handle autogeneration of outfile
        pth, fname, ext = split_filename(infile)
        outfile = fsl_name(fsl.FLIRT(), '%s_flirt' % fname)
        outfile = ' '.join(['-out', outfile])
        # Handle autogeneration of outmatrix
        outmatrix = '%s_flirt.mat' % fname
        outmatrix = ' '.join(['-omat', outmatrix])
        # Build command line
        cmdline = ' '.join([cmdline, outfile, outmatrix, param])
        flirter = fsl.FLIRT(in_file=infile, reference=reffile)
        setattr(flirter.inputs, key, value)
        assert flirter.cmdline == cmdline

    # Test OutputSpec
    flirter = fsl.FLIRT(in_file=infile, reference=reffile)
    pth, fname, ext = split_filename(infile)
    flirter.inputs.out_file = ''.join(['foo', ext])
    flirter.inputs.out_matrix_file = ''.join(['bar', ext])
    outs = flirter._list_outputs()
    assert outs['out_file'] == \
        os.path.join(os.getcwd(), flirter.inputs.out_file)
    assert outs['out_matrix_file'] == \
        os.path.join(os.getcwd(), flirter.inputs.out_matrix_file)
예제 #56
0
    def _run_interface(self, runtime):
        path, name, ext = split_filename(self.inputs.time_course_image)
        data_dir = op.abspath('./matching')
        copy_to = op.join(data_dir, 'components')
        if not op.exists(copy_to):
            os.makedirs(copy_to)
        copy_to = op.join(copy_to, name)
        shutil.copyfile(self.inputs.time_course_image, copy_to + ext)
        if ext == '.img':
            shutil.copyfile(op.join(path, name) + '.hdr', copy_to + '.hdr')
        elif ext == '.hdr':
            shutil.copyfile(op.join(path, name) + '.img', copy_to + '.img')

        data_dir = op.abspath('./matching/components')
        in_files = self.inputs.in_files
        if len(self.inputs.in_files) > 1:
            print 'Multiple ({n}) input images detected! Copying to {d}...'.format(
                n=len(self.inputs.in_files), d=data_dir)
            for in_file in self.inputs.in_files:
                path, name, ext = split_filename(in_file)
                shutil.copyfile(in_file, op.join(data_dir, name) + ext)
                if ext == '.img':
                    shutil.copyfile(
                        op.join(path, name) + '.hdr',
                        op.join(data_dir, name) + '.hdr')
                elif ext == '.hdr':
                    shutil.copyfile(
                        op.join(path, name) + '.img',
                        op.join(data_dir, name) + '.img')
            print 'Copied!'

        elif isdefined(self.inputs.in_file4d):
            print 'Single four-dimensional image selected. Splitting and copying to {d}'.format(
                d=data_dir)
            in_files = nb.four_to_three(self.inputs.in_file4d)
            for in_file in in_files:
                path, name, ext = split_filename(in_file)
                shutil.copyfile(in_file, op.join(data_dir, name) + ext)
            print 'Copied!'

        else:
            raise Exception('Single functional image provided. Ending...')
            in_files = self.inputs.in_files

        nComponents = len(in_files)
        repetition_time = self.inputs.repetition_time
        coma_rest_lib_path = op.abspath(self.inputs.coma_rest_lib_path)

        data_dir = op.abspath('./matching')
        if not op.exists(data_dir):
            os.makedirs(data_dir)

        path, name, ext = split_filename(self.inputs.ica_mask_image)
        copy_to = op.join(data_dir, 'components')
        if not op.exists(copy_to):
            os.makedirs(copy_to)
        copy_to = op.join(copy_to, name)
        shutil.copyfile(self.inputs.ica_mask_image, copy_to + ext)

        if ext == '.img':
            shutil.copyfile(op.join(path, name) + '.hdr', copy_to + '.hdr')
        elif ext == '.hdr':
            shutil.copyfile(op.join(path, name) + '.img', copy_to + '.img')

        mask_file = op.abspath(self.inputs.ica_mask_image)
        out_stats_file = op.abspath(self.inputs.out_stats_file)
        d = dict(out_stats_file=out_stats_file,
                 data_dir=data_dir,
                 mask_name=mask_file,
                 timecourse=op.abspath(self.inputs.time_course_image),
                 subj_id=self.inputs.subject_id,
                 nComponents=nComponents,
                 Tr=repetition_time,
                 coma_rest_lib_path=coma_rest_lib_path)

        script = Template("""
        restlib_path = '$coma_rest_lib_path';
        setup_restlib_paths(restlib_path)
        namesTemplate = {'rAuditory_corr','rCerebellum_corr','rDMN_corr','rECN_L_corr','rECN_R_corr','rSalience_corr','rSensorimotor_corr','rVisual_lateral_corr','rVisual_medial_corr','rVisual_occipital_corr'};
        indexNeuronal = 1:$nComponents;
        nCompo = $nComponents;
        out_stats_file = '$out_stats_file'; 
        Tr = $Tr;
        data_dir = '$data_dir'
        mask_name = '$mask_name'
        subj_id = '$subj_id'
        time_course_name = '$timecourse'

        [dataAssig maxGoF] = selectionMatchClassification(data_dir, subj_id, mask_name, time_course_name, namesTemplate,indexNeuronal,nCompo,Tr,restlib_path)
                            
        for i=1:size(dataAssig,1)
            str{i} = sprintf('Template %d: %s to component %d with GoF %f is neuronal %d prob=%f',dataAssig(i,1),namesTemplate{i},dataAssig(i,2),dataAssig(i,3),dataAssig(i,4),dataAssig(i,5));
            disp(str{i});
        end
        maxGoF
        templates = dataAssig(:,1)
        components = dataAssig(:,2)
        gofs = dataAssig(:,3)
        neuronal_bool = dataAssig(:,4)
        neuronal_prob = dataAssig(:,5)
        save '$out_stats_file'
        """).substitute(d)
        print 'Saving stats file as {s}'.format(s=out_stats_file)
        result = MatlabCommand(script=script,
                               mfile=True,
                               prescript=[''],
                               postscript=[''])
        r = result.run()
        return runtime
예제 #57
0
    def _run_interface(self, runtime):
        preprocessedfile = self.inputs.preprocessedfile
        regfile = self.inputs.regfile

        #invert transform matrix
        invt = fsl.ConvertXFM()
        invt.inputs.in_file = regfile
        invt.inputs.invert_xfm = True
        invt.inputs.out_file = regfile + '_inv.mat'
        invt_result = invt.run()

        #define source mask (surface, volume)
        input_labels = self.inputs.vol_source + self.inputs.vol_target
        sourcemask = get_mask(input_labels, self.inputs.parcfile)
        sourcemaskfile = os.path.abspath('sourcemask.nii')
        sourceImg = nb.Nifti1Image(sourcemask, None)
        nb.save(sourceImg, sourcemaskfile)

        #transform anatomical mask to functional space
        sourcexfm = fsl.ApplyXfm()
        sourcexfm.inputs.in_file = sourcemaskfile
        sourcexfm.inputs.in_matrix_file = invt_result.outputs.out_file
        _, base, _ = split_filename(sourcemaskfile)
        sourcexfm.inputs.out_file = base + '_xfm.nii.gz'
        sourcexfm.inputs.reference = preprocessedfile
        sourcexfm.inputs.interp = 'nearestneighbour'
        sourcexfm.inputs.apply_xfm = True
        sourcexfm_result = sourcexfm.run()

        #manual source data creation (-mask_source option not yet available in afni)
        sourcemask_xfm = nb.load(sourcexfm_result.outputs.out_file).get_data()
        inputdata = nb.load(preprocessedfile).get_data()
        maskedinput = np.zeros_like(inputdata)
        for timepoint in range(inputdata.shape[3]):
            maskedinput[:, :, :,
                        timepoint] = np.where(sourcemask_xfm,
                                              inputdata[:, :, :, timepoint], 0)
        maskedinputfile = os.path.abspath('inputfile.nii')
        inputImg = nb.Nifti1Image(maskedinput, None)
        nb.save(inputImg, maskedinputfile)

        ##PREPARE TARGET MASK##

        #define target mask (surface, volume)
        targetmask = get_mask(self.inputs.vol_target, self.inputs.parcfile)
        targetmaskfile = os.path.abspath('targetmask.nii')
        targetImg = nb.Nifti1Image(targetmask, None)
        nb.save(targetImg, targetmaskfile)

        #same transform for target
        targetxfm = fsl.ApplyXfm()
        targetxfm.inputs.in_file = targetmaskfile
        targetxfm.inputs.in_matrix_file = invt_result.outputs.out_file
        _, base, _ = split_filename(targetmaskfile)
        targetxfm.inputs.out_file = base + '_xfm.nii.gz'
        targetxfm.inputs.reference = preprocessedfile
        targetxfm.inputs.interp = 'nearestneighbour'
        targetxfm.inputs.apply_xfm = True
        targetxfm_result = targetxfm.run()

        return runtime
예제 #58
0
 def _gen_outfilename(self):
     _, name, _ = split_filename(self.inputs.in_files[0])
     return name + '_gmsh.' + self.inputs.output_type
예제 #59
0
 def _gen_outfilename(self):
     _, name, _ = split_filename(self.inputs.scheme_file)
     return name + '_MESD.Bdouble'
예제 #60
0
    def _parse_stdout(self, stdout):
        import re
        import os
        files = []
        reoriented_files = []
        reoriented_and_cropped_files = []
        bvecs = []
        bvals = []
        skip = False
        last_added_file = None
        for line in stdout.split("\n"):
            if not skip:
                file = None
                if line.startswith("Saving "):
                    file = line[len("Saving "):]
                elif line.startswith("GZip..."):
                    #for gzipped outpus files are not absolute
                    if isdefined(self.inputs.output_dir):
                        output_dir = self.inputs.output_dir

                    else:
                        output_dir = self._gen_filename('output_dir')
                    file = os.path.abspath(
                        os.path.join(output_dir, line[len("GZip..."):]))
                elif line.startswith("Number of diffusion directions "):
                    if last_added_file:
                        base, filename, ext = split_filename(last_added_file)
                        bvecs.append(os.path.join(base, filename + ".bvec"))
                        bvals.append(os.path.join(base, filename + ".bval"))

                elif re.search('.*->(.*)', line):
                    val = re.search('.*->(.*)', line)
                    val = val.groups()[0]
                    if isdefined(self.inputs.output_dir):
                        output_dir = self.inputs.output_dir
                    else:
                        output_dir = self._gen_filename('output_dir')
                    val = os.path.join(output_dir, val)
                    file = val

                if file:
                    if last_added_file and os.path.exists(
                            file) and not last_added_file in file:
                        files.append(file)
                    last_added_file = file
                    continue

                if line.startswith("Reorienting as "):
                    reoriented_files.append(line[len("Reorienting as "):])
                    skip = True
                    continue
                elif line.startswith("Cropping NIfTI/Analyze image "):
                    base, filename = os.path.split(
                        line[len("Cropping NIfTI/Analyze image "):])
                    filename = "c" + filename
                    reoriented_and_cropped_files.append(
                        os.path.join(base, filename))
                    skip = True
                    continue
            skip = False
        return files, reoriented_files, reoriented_and_cropped_files, bvecs, bvals