Exemplo n.º 1
0
def create_workflow():
    parser = argparse.ArgumentParser()
    parser.add_argument('-d', dest='data',
                        help="Path to bids dataset")
    args = parser.parse_args()
    if not os.path.exists(args.data):
        raise IOError('Input data not found')
    if not os.path.exists(OUTDIR):
        os.makedirs(OUTDIR)

    # grab data from bids structure
    layout = BIDSLayout(args.data)
    subj = layout.get_subjects()[0]
    func = [f.filename for f in layout.get(subject=subj, type='bold',
                                           extensions=['nii.gz'])][0]

    outfile = os.path.join(OUTDIR, 'test_{}_{}_motcor'.format(subj, ENV['os']))

    # run interface
    mcflirt = MCFLIRT()
    mcflirt.inputs.in_file = func
    # FIX: this has to be unique for each environment
    mcflirt.inputs.out_file = outfile + '.nii.gz'
    res = mcflirt.run()

    # write out json to keep track of information
    ENV.update({'inputs': res.inputs})
    ENV.update({'nipype_version': nipype.__version__})
    #ENV.update({'outputs': res.outputs})
    # write out to json
    env_to_json(ENV, outname=outfile + '.json')
def test_mcflirt_run():
    file_inp = os.path.join(Data_dir, "data_input/sub-02_task-fingerfootlips_bold.nii.gz")
    file_out_ref = os.path.join(Data_dir, "data_ref/sub-02_task-fingerfootlips_bold_MCF.nii.gz")


    mcflt = MCFLIRT()

    mcflt.inputs.in_file = file_inp
    mcflt.inputs.out_file = "output_mcf.nii.gz"

    mcflt.run()

    data_out_ref = nb.load(file_out_ref).get_data()
    data_out = nb.load(mcflt.inputs.out_file).get_data()

    assert np.allclose(data_out_ref, data_out) # think about atol and rtol
def test_mcflirt_run_copy_image(image_fmri_nii, image_copy_fmri_nii, tmpdir):
    file_inp, image_inp, data_inp = image_fmri_nii
    filename_copy, data_copy = image_copy_fmri_nii

    mcflt = MCFLIRT()
    #pdb.set_trace()
    mcflt.inputs.in_file = filename_copy
    mcflt.inputs.out_file = str(tmpdir.join("output_mcf_copy_im.nii.gz"))
    mcflt.basedir = "test"

    mcflt.run()

    img_out = nb.load(mcflt.inputs.out_file)
    data_out = img_out.get_data()
    #pdb.set_trace()
    # since all images are the same mcflirt shouldn't do anything
    assert (data_copy == data_out).all()
Exemplo n.º 4
0
def init_complex_mcf(name='', ref=False, fix_ge=True, negate=True):
    inputnode = Node(
        IdentityInterface(fields=['real_file', 'imag_file', 'ref_file']),
        name='inputnode')
    outputnode = Node(
        IdentityInterface(fields=['x_file', 'ref_file', 'mask_file']),
        name='outputnode')

    ri = Node(Complex(fix_ge=fix_ge,
                      negate=negate,
                      magnitude_out_file=name + '_mag.nii.gz',
                      real_out_file=name + '_r.nii.gz',
                      imag_out_file=name + '_i.nii.gz'),
              name='ri_' + name)
    moco = Node(MCFLIRT(mean_vol=not ref, save_mats=True), name='moco_' + name)
    apply_r = Node(ApplyXfm4D(four_digit=True), name='apply_r' + name)
    apply_i = Node(ApplyXfm4D(four_digit=True), name='apply_i' + name)
    x = Node(Complex(complex_out_file=name + '_x.nii.gz'), name='x_' + name)
    f = Node(Filter(complex_in=True, complex_out=True, filter_spec='Tukey'),
             name='filter_' + name)

    wf = Workflow(name='prep_' + name)
    wf.connect([(inputnode, ri, [('real_file', 'real')]),
                (inputnode, ri, [('imag_file', 'imag')]),
                (ri, moco, [('magnitude_out_file', 'in_file')]),
                (ri, apply_r, [('real_out_file', 'in_file')]),
                (ri, apply_i, [('imag_out_file', 'in_file')]),
                (moco, apply_r, [('mat_dir', 'trans_dir')]),
                (moco, apply_i, [('mat_dir', 'trans_dir')]),
                (apply_r, x, [('out_file', 'real')]),
                (apply_i, x, [('out_file', 'imag')]),
                (x, f, [('complex_out_file', 'in_file')]),
                (f, outputnode, [('out_file', 'x_file')])])
    if not ref:
        mask = Node(BET(mask=True, no_output=True), name='mask')
        wf.connect([(moco, mask, [('mean_img', 'in_file')]),
                    (moco, apply_r, [('mean_img', 'ref_vol')]),
                    (moco, apply_i, [('mean_img', 'ref_vol')]),
                    (moco, outputnode, [('mean_img', 'ref_file')]),
                    (mask, outputnode, [('mask_file', 'mask_file')])])
    else:
        wf.connect([(inputnode, moco, [('ref_file', 'ref_file')]),
                    (inputnode, apply_r, [('ref_file', 'ref_vol')]),
                    (inputnode, apply_i, [('ref_file', 'ref_vol')])])

    return wf
Exemplo n.º 5
0
def make_w_topup():
    n_in = Node(IdentityInterface(fields=[
        'func',  # after motion correction
        'fmap',
        ]), name='input')

    n_out = Node(IdentityInterface(fields=[
        'func',
        ]), name='output')

    n_mean_func = Node(MeanImage(), name='mean_func')

    n_mc_fmap = Node(MCFLIRT(), name='motion_correction_fmap')
    n_mean_fmap = Node(MeanImage(), name='mean_fmap')

    n_list = Node(Merge_list(2), name='list')
    n_merge = Node(Merge(), name='merge')
    n_merge.inputs.dimension = 't'

    n_topup = Node(TOPUP(), name='topup')
    n_topup.inputs.encoding_file = _generate_acqparams()
    n_topup.inputs.subsamp = 1  # slower, but it accounts for odd number of slices

    n_acqparam = Node(function_acq_params, name='acquisition_parameters')

    n_apply = Node(ApplyTOPUP(), name='topup_apply')
    n_apply.inputs.method = 'jac'

    w = Workflow('topup')
    w.connect(n_in, 'fmap', n_mc_fmap, 'in_file')
    w.connect(n_mc_fmap, 'out_file', n_mean_fmap, 'in_file')
    w.connect(n_in, 'func', n_mean_func, 'in_file')
    w.connect(n_mean_func, 'out_file', n_list, 'in1')
    w.connect(n_mean_fmap, 'out_file', n_list, 'in2')
    w.connect(n_list, 'out', n_merge, 'in_files')
    w.connect(n_merge, 'merged_file', n_topup, 'in_file')

    w.connect(n_in, 'func', n_apply, 'in_files')
    w.connect(n_topup, 'out_fieldcoef', n_apply, 'in_topup_fieldcoef')
    w.connect(n_topup, 'out_movpar', n_apply, 'in_topup_movpar')
    w.connect(n_in, 'func', n_acqparam, 'in_file')
    w.connect(n_acqparam, 'encoding_file', n_apply, 'encoding_file')

    w.connect(n_apply, 'out_corrected', n_out, 'func')

    return w
Exemplo n.º 6
0
def mcflirt(infile: Path) -> Path:
    # motion-correction FIRST supported by e.g.
    # https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6736626/
    # given we are dealing with FSL
    outfile = Path(str(infile).replace("bold.nii.gz", MCFLIRT_SUFFIX))
    if outfile.exists():
        return outfile
    cmd = MCFLIRT(
        in_file=str(infile),
        out_file=str(outfile),
        output_type="NIFTI_GZ",
        save_mats=False,
        save_rms=False,
        stages=3,
        stats_imgs=False,
        terminal_output="stream",
        mean_vol=False,  # speed up things
    )

    results = cmd.run()
    return Path(results.outputs.out_file)
def test_mcflirt_run(image_fmri_nii, cost_function, tmpdir):
    file_inp, _, data_inp = image_fmri_nii

    mcflt = MCFLIRT()

    mcflt.inputs.in_file = file_inp
    mcflt.inputs.out_file = str(tmpdir.join("output_mcf.nii.gz"))
    mcflt.basedir = "test"
    setattr(mcflt.inputs, "cost", cost_function)

    mcflt.run()

    data_out = nb.load(mcflt.inputs.out_file).get_data()

    # the middle image shouldn't change
    assert (data_inp[:, :, :, data_inp.shape[3] //
                     2] == data_out[:, :, :, data_inp.shape[3] // 2]).all()

    # i'm assuming that the sum shouldn't change "too much"
    for i in range(data_inp.shape[3]):
        assert np.allclose(data_inp[:, :, :, i].sum(),
                           data_out[:, :, :, i].sum(),
                           rtol=5e-3)
def test_mcflirt_translate_image(image_fmri_nii, tmpdir):
    file_inp, image_inp, data_inp = image_fmri_nii

    mcflt = MCFLIRT()

    filename_trans, data_trans = image_translate_nii(data_inp, image_inp)

    mcflt.inputs.in_file = filename_trans
    mcflt.inputs.out_file = str(tmpdir.join("output_mcf_translate.nii.gz"))
    mcflt.basedir = "test"
    mcflt.inputs.smooth = 0.

    mcflt.run()

    img_out = nb.load(mcflt.inputs.out_file)
    data_out = img_out.get_data()

    # should think about some other error metric
    # this one gives a big error
    # mcflt.inputs.smooth = 0. doesn't really change
    for i in [0, 2]:
        assert np.allclose(data_out[:, :, :, i],
                           data_out[:, :, :, 1],
                           rtol=1e-1)
Exemplo n.º 9
0
    def _run_interface(self, runtime):
        in_files = self.inputs.in_files
        if not isinstance(in_files, list):
            in_files = [self.inputs.in_files]

        if self.inputs.to_ras:
            in_files = [reorient(inf, newpath=runtime.cwd) for inf in in_files]

        run_hmc = self.inputs.hmc and len(in_files) > 1

        nii_list = []
        # Remove one-sized extra dimensions
        for i, f in enumerate(in_files):
            filenii = nb.load(f)
            filenii = nb.squeeze_image(filenii)
            if len(filenii.shape) == 5:
                raise RuntimeError("Input image (%s) is 5D." % f)
            if filenii.dataobj.ndim == 4:
                nii_list += nb.four_to_three(filenii)
            else:
                nii_list.append(filenii)

        if len(nii_list) > 1:
            filenii = nb.concat_images(nii_list)
        else:
            filenii = nii_list[0]

        merged_fname = fname_presuffix(self.inputs.in_files[0],
                                       suffix="_merged",
                                       newpath=runtime.cwd)
        filenii.to_filename(merged_fname)
        self._results["out_file"] = merged_fname
        self._results["out_avg"] = merged_fname

        if filenii.dataobj.ndim < 4:
            # TODO: generate identity out_mats and zero-filled out_movpar
            return runtime

        if run_hmc:
            from nipype.interfaces.fsl import MCFLIRT

            mcflirt = MCFLIRT(
                cost="normcorr",
                save_mats=True,
                save_plots=True,
                ref_vol=0,
                in_file=merged_fname,
            )
            mcres = mcflirt.run()
            filenii = nb.load(mcres.outputs.out_file)
            self._results["out_file"] = mcres.outputs.out_file
            self._results["out_mats"] = mcres.outputs.mat_file
            self._results["out_movpar"] = mcres.outputs.par_file

        hmcdata = filenii.get_fdata(dtype="float32")
        if self.inputs.grand_mean_scaling:
            if not isdefined(self.inputs.in_mask):
                mean = np.median(hmcdata, axis=-1)
                thres = np.percentile(mean, 25)
                mask = mean > thres
            else:
                mask = nb.load(
                    self.inputs.in_mask).get_fdata(dtype="float32") > 0.5

            nimgs = hmcdata.shape[-1]
            means = np.median(hmcdata[mask[..., np.newaxis]].reshape(
                (-1, nimgs)).T,
                              axis=-1)
            max_mean = means.max()
            for i in range(nimgs):
                hmcdata[..., i] *= max_mean / means[i]

        hmcdata = hmcdata.mean(axis=3)
        if self.inputs.zero_based_avg:
            hmcdata -= hmcdata.min()

        self._results["out_avg"] = fname_presuffix(self.inputs.in_files[0],
                                                   suffix="_avg",
                                                   newpath=runtime.cwd)
        nb.Nifti1Image(hmcdata, filenii.affine,
                       filenii.header).to_filename(self._results["out_avg"])

        return runtime
Exemplo n.º 10
0
    m_image = nib.load(mask_image).get_data()
    out_image = np.zeros(in_image.shape)
    idx = np.where(m_image)
    if (invert_sign):
        out_image[idx] = stats.zscore(in_image[idx]) * -1.0
    else:
        out_image[idx] = stats.zscore(in_image[idx])
        print("Note: NOT inverting z-scores.")
    img = nib.Nifti1Image(out_image, nib.load(input_image).affine)
    img.to_filename("rcfe.nii")

    return path.abspath("rcfe.nii")


# Motion correction on fmri time series
mcflirt_node = Node(MCFLIRT(mean_vol=True, output_type='NIFTI'),
                    name="mcflirt")
# mcflirt_node = Node(MCFLIRT(mean_vol=True, output_type='NIFTI'), iterables=['in_file'], name="mcflirt")

# Compute mean(fslmaths) of the fmri time series
mean_fmri_node = Node(MeanImage(output_type='NIFTI'), name="meanimage")

# Skull Strip the fmri time series
bet_fmri_node = Node(BET(output_type='NIFTI', mask=True), name="bet_fmri")

# Bias Correct the fmri time series
bias_correction_node = Node(N4BiasFieldCorrection(), name='bias_correction')

# Returns the relative concentration of brain iron
rcfe_node = Node(Function(input_names=['input_image', 'mask_image'],
                          output_names=['output_image'],
Exemplo n.º 11
0
def preproc_workflow(input_dir,
                     output_dir,
                     subject_list,
                     ses_list,
                     anat_file,
                     func_file,
                     scan_size=477,
                     bet_frac=0.37):
    """
    The preprocessing workflow used in the preparation of the psilocybin vs escitalopram rsFMRI scans.
    Workflows and notes are defined throughout. Inputs are designed to be general and masks/default MNI space is provided

    :param input_dir: The input file directory containing all scans in BIDS format
    :param output_dir: The output file directory
    :param subject_list: a list of subject numbers
    :param ses_list: a list of scan numbers (session numbers)
    :param anat_file: The format of the anatomical scan within the input directory
    :param func_file: The format of the functional scan within the input directory
    :param scan_size: The length of the scan by number of images, most 10 minutes scans are around 400-500 depending
    upon scanner defaults and parameters - confirm by looking at your data
    :param bet_frac: brain extraction fractional intensity threshold
    :return: the preprocessing workflow
    """
    preproc = Workflow(name='preproc')
    preproc.base_dir = output_dir

    # Infosource - a function free node to iterate over the list of subject names
    infosource = Node(IdentityInterface(fields=['subject_id', 'ses']),
                      name="infosource")

    infosource.iterables = [('subject_id', subject_list), ('ses', ses_list)]

    # SelectFiles - to grab the data (alternative to DataGrabber)
    templates = {
        'anat': anat_file,
        'func': func_file
    }  # define the template of each file input

    selectfiles = Node(SelectFiles(templates, base_directory=input_dir),
                       name="selectfiles")

    # Datasink - creates output folder for important outputs
    datasink = Node(DataSink(base_directory=output_dir, container=output_dir),
                    name="datasink")

    preproc.connect([(infosource, selectfiles, [('subject_id', 'subject_id'),
                                                ('ses', 'ses')])])
    ''' 
    This is your functional processing workflow, used to trim scans, despike the signal, slice-time correct, 
    and motion correct your data 
    '''

    fproc = Workflow(name='fproc')  # the functional processing workflow

    # ExtractROI - skip dummy scans at the beginning of the recording by removing the first three
    trim = Node(ExtractROI(t_min=3, t_size=scan_size, output_type='NIFTI_GZ'),
                name="trim")

    # 3dDespike - despike
    despike = Node(Despike(outputtype='NIFTI_GZ', args='-NEW'), name="despike")
    fproc.connect([(trim, despike, [('roi_file', 'in_file')])])
    preproc.connect([(selectfiles, fproc, [('func', 'trim.in_file')])])

    # 3dTshift - slice time correction
    slicetime = Node(TShift(outputtype='NIFTI_GZ', tpattern='alt+z2'),
                     name="slicetime")
    fproc.connect([(despike, slicetime, [('out_file', 'in_file')])])

    # 3dVolreg - correct motion and output 1d matrix
    moco = Node(Volreg(outputtype='NIFTI_GZ',
                       interp='Fourier',
                       zpad=4,
                       args='-twopass'),
                name="moco")
    fproc.connect([(slicetime, moco, [('out_file', 'in_file')])])

    moco_bpfdt = Node(
        MOCObpfdt(), name='moco_bpfdt'
    )  # use the matlab function to correct the motion regressor
    fproc.connect([(moco, moco_bpfdt, [('oned_file', 'in_file')])])
    '''
    This is the co-registration workflow using FSL and ANTs
    '''

    coreg = Workflow(name='coreg')

    # BET - structural data brain extraction
    bet_anat = Node(BET(output_type='NIFTI_GZ', frac=bet_frac, robust=True),
                    name="bet_anat")

    # FSL segmentation process to get WM map
    seg = Node(FAST(bias_iters=6,
                    img_type=1,
                    output_biascorrected=True,
                    output_type='NIFTI_GZ'),
               name="seg")
    coreg.connect([(bet_anat, seg, [('out_file', 'in_files')])])

    # functional to structural registration
    mean = Node(MCFLIRT(mean_vol=True, output_type='NIFTI_GZ'), name="mean")

    # BBR using linear methods for initial transform fit
    func2struc = Node(FLIRT(cost='bbr', dof=6, output_type='NIFTI_GZ'),
                      name='func2struc')
    coreg.connect([(seg, func2struc, [('restored_image', 'reference')])])
    coreg.connect([(mean, func2struc, [('mean_img', 'in_file')])])
    coreg.connect([(seg, func2struc, [(('tissue_class_files', pickindex, 2),
                                       'wm_seg')])])

    # convert the FSL linear transform into a C3d format for AFNI
    f2s_c3d = Node(C3dAffineTool(itk_transform=True, fsl2ras=True),
                   name='f2s_c3d')
    coreg.connect([(func2struc, f2s_c3d, [('out_matrix_file', 'transform_file')
                                          ])])
    coreg.connect([(mean, f2s_c3d, [('mean_img', 'source_file')])])
    coreg.connect([(seg, f2s_c3d, [('restored_image', 'reference_file')])])

    # Functional to structural registration via ANTs non-linear registration
    reg = Node(Registration(
        fixed_image='default_images/MNI152_T1_2mm_brain.nii.gz',
        transforms=['Affine', 'SyN'],
        transform_parameters=[(0.1, ), (0.1, 3.0, 0.0)],
        number_of_iterations=[[1500, 1000, 1000], [100, 70, 50, 20]],
        dimension=3,
        write_composite_transform=True,
        collapse_output_transforms=True,
        metric=['MI'] + ['CC'],
        metric_weight=[1] * 2,
        radius_or_number_of_bins=[32] + [4],
        convergence_threshold=[1.e-8, 1.e-9],
        convergence_window_size=[20] + [10],
        smoothing_sigmas=[[2, 1, 0], [4, 2, 1, 0]],
        sigma_units=['vox'] * 2,
        shrink_factors=[[4, 2, 1], [6, 4, 2, 1]],
        use_histogram_matching=[False] + [True],
        use_estimate_learning_rate_once=[True, True],
        output_warped_image=True),
               name='reg')

    coreg.connect([(seg, reg, [('restored_image', 'moving_image')])
                   ])  # connect segmentation node to registration node

    merge1 = Node(niu.Merge(2), iterfield=['in2'],
                  name='merge1')  # merge the linear and nonlinear transforms
    coreg.connect([(f2s_c3d, merge1, [('itk_transform', 'in2')])])
    coreg.connect([(reg, merge1, [('composite_transform', 'in1')])])

    # warp the functional images into MNI space using the transforms from FLIRT and SYN
    warp = Node(ApplyTransforms(
        reference_image='default_images/MNI152_T1_2mm_brain.nii.gz',
        input_image_type=3),
                name='warp')
    coreg.connect([(moco, warp, [('out_file', 'input_image')])])
    coreg.connect([(merge1, warp, [('out', 'transforms')])])

    preproc.connect([(selectfiles, coreg, [('anat', 'bet_anat.in_file')])])
    preproc.connect([(fproc, coreg, [('moco.out_file', 'mean.in_file')])])
    '''
    Scrubbing workflow - find the motion outliers, bandpass filter, re-mean the data after bpf
    '''

    scrub = Workflow(name='scrub')

    # Generate the Scrubbing Regressor
    scrub_metrics = Node(MotionOutliers(dummy=4,
                                        out_file='FD_outliers.1D',
                                        metric='fd',
                                        threshold=0.4),
                         name="scrub_metrics")

    # regress out timepoints
    scrub_frames = Node(Bandpass(highpass=0,
                                 lowpass=99999,
                                 outputtype='NIFTI_GZ'),
                        name='scrub_frames')
    scrub.connect([(scrub_metrics, scrub_frames, [('out_file',
                                                   'orthogonalize_file')])])
    preproc.connect([(coreg, scrub, [('warp.output_image',
                                      'scrub_frames.in_file')])])
    preproc.connect([(selectfiles, scrub, [('func', 'scrub_metrics.in_file')])
                     ])

    # mean image for remeaning after bandpass
    premean = Node(TStat(args='-mean', outputtype='NIFTI_GZ'), name='premean')
    # remean the image
    remean2 = Node(Calc(expr='a+b', outputtype='NIFTI_GZ'), name='remean2')
    scrub.connect([(scrub_frames, remean2, [('out_file', 'in_file_a')])])
    scrub.connect([(premean, remean2, [('out_file', 'in_file_b')])])
    preproc.connect([(coreg, scrub, [('warp.output_image', 'premean.in_file')])
                     ])
    '''
    Regressors for final cleaning steps
    '''

    regressors = Workflow(name='regressors')

    # Using registered structural image to create the masks for both WM and CSF
    regbet = Node(BET(robust=True, frac=0.37, output_type='NIFTI_GZ'),
                  name='regbet')

    regseg = Node(FAST(img_type=1,
                       output_type='NIFTI_GZ',
                       no_pve=True,
                       no_bias=True,
                       segments=True),
                  name='regseg')
    regressors.connect([(regbet, regseg, [('out_file', 'in_files')])])
    preproc.connect([(coreg, regressors, [('reg.warped_image',
                                           'regbet.in_file')])])
    '''
    Create a cerebrospinal fluid (CSF) regressor 
    '''

    # subtract subcortical GM from the CSF mask
    subcortgm = Node(BinaryMaths(
        operation='sub',
        operand_file='default_images/subcortical_gm_mask_bin.nii.gz',
        output_type='NIFTI_GZ',
        args='-bin'),
                     name='subcortgm')
    regressors.connect([(regseg, subcortgm, [(('tissue_class_files', pickindex,
                                               0), 'in_file')])])

    # Fill the mask holes

    fillcsf = Node(MaskTool(fill_holes=True, outputtype='NIFTI_GZ'),
                   name='fillcsf')
    regressors.connect([(subcortgm, fillcsf, [('out_file', 'in_file')])])

    # Erode the mask

    erocsf = Node(MaskTool(outputtype='NIFTI_GZ', dilate_inputs='-1'),
                  name='erocsf')
    regressors.connect([(fillcsf, erocsf, [('out_file', 'in_file')])])

    # Take mean csf signal from functional image
    meancsf = Node(ImageMeants(output_type='NIFTI_GZ'), name='meancsf')
    regressors.connect([(erocsf, meancsf, [('out_file', 'mask')])])
    preproc.connect([(coreg, regressors, [('warp.output_image',
                                           'meancsf.in_file')])])

    bpf_dt_csf = Node(CSFbpfdt(), name='bpf_dt_csf')
    regressors.connect([(meancsf, bpf_dt_csf, [('out_file', 'in_file')])])
    '''
    Creates a local white matter regressor
    '''

    # subtract subcortical gm
    subcortgm2 = Node(BinaryMaths(
        operation='sub',
        operand_file='default_images/subcortical_gm_mask_bin.nii.gz',
        output_type='NIFTI_GZ',
        args='-bin'),
                      name='subcortgm2')
    regressors.connect([(regseg, subcortgm2, [(('tissue_class_files',
                                                pickindex, 2), 'in_file')])])

    # fill mask
    fillwm = Node(MaskTool(fill_holes=True, outputtype='NIFTI_GZ'),
                  name='fillwm')
    regressors.connect([(subcortgm2, fillwm, [('out_file', 'in_file')])])

    # erod mask
    erowm = Node(MaskTool(outputtype='NIFTI_GZ', dilate_inputs='-1'),
                 name='erowm')
    regressors.connect([(fillwm, erowm, [('out_file', 'in_file')])])

    # generate local wm
    localwm = Node(Localstat(neighborhood=('SPHERE', 25),
                             stat='mean',
                             nonmask=True,
                             outputtype='NIFTI_GZ'),
                   name='localwm')
    regressors.connect([(erowm, localwm, [('out_file', 'mask_file')])])
    preproc.connect([(coreg, regressors, [('warp.output_image',
                                           'localwm.in_file')])])

    # bandpass filter the local wm regressor
    localwm_bpf = Node(Fourier(highpass=0.01,
                               lowpass=0.08,
                               args='-retrend',
                               outputtype='NIFTI_GZ'),
                       name='loacwm_bpf')
    regressors.connect([(localwm, localwm_bpf, [('out_file', 'in_file')])])

    # detrend the local wm regressor

    localwm_bpf_dt = Node(Detrend(args='-polort 2', outputtype='NIFTI_GZ'),
                          name='localwm_bpf_dt')
    regressors.connect([(localwm_bpf, localwm_bpf_dt, [('out_file', 'in_file')
                                                       ])])
    '''
    Clean up your functional image with the regressors you have created above
    '''

    # create a mask for blurring filtering, and detrending

    clean = Workflow(name='clean')

    mask = Node(BET(mask=True, functional=True), name='mask')

    mean_mask = Node(MCFLIRT(mean_vol=True, output_type='NIFTI_GZ'),
                     name="mean_mask")

    dilf = Node(DilateImage(operation='max', output_type='NIFTI_GZ'),
                name='dilf')
    clean.connect([(mask, dilf, [('mask_file', 'in_file')])])
    preproc.connect([(scrub, clean, [('remean2.out_file', 'mask.in_file')])])

    fill = Node(MaskTool(in_file='default_images/MNI152_T1_2mm_brain.nii.gz',
                         fill_holes=True,
                         outputtype='NIFTI_GZ'),
                name='fill')

    axb = Node(Calc(expr='a*b', outputtype='NIFTI_GZ'), name='axb')
    clean.connect([(dilf, axb, [('out_file', 'in_file_a')])])
    clean.connect([(fill, axb, [('out_file', 'in_file_b')])])

    bxc = Node(Calc(expr='ispositive(a)*b', outputtype='NIFTI_GZ'), name='bxc')
    clean.connect([(mean_mask, bxc, [('mean_img', 'in_file_a')])])
    clean.connect([(axb, bxc, [('out_file', 'in_file_b')])])
    preproc.connect([(scrub, clean, [('remean2.out_file', 'mean_mask.in_file')
                                     ])])

    #### BLUR, FOURIER BPF, and DETREND

    blurinmask = Node(BlurInMask(fwhm=6, outputtype='NIFTI_GZ'),
                      name='blurinmask')
    clean.connect([(bxc, blurinmask, [('out_file', 'mask')])])
    preproc.connect([(scrub, clean, [('remean2.out_file', 'blurinmask.in_file')
                                     ])])

    fourier = Node(Fourier(highpass=0.01,
                           lowpass=0.08,
                           retrend=True,
                           outputtype='NIFTI_GZ'),
                   name='fourier')
    clean.connect([(blurinmask, fourier, [('out_file', 'in_file')])])

    tstat = Node(TStat(args='-mean', outputtype='NIFTI_GZ'), name='tstat')
    clean.connect([(fourier, tstat, [('out_file', 'in_file')])])

    detrend = Node(Detrend(args='-polort 2', outputtype='NIFTI_GZ'),
                   name='detrend')
    clean.connect([(fourier, detrend, [('out_file', 'in_file')])])

    remean = Node(Calc(expr='a+b', outputtype='NIFTI_GZ'), name='remean')
    clean.connect([(detrend, remean, [('out_file', 'in_file_a')])])
    clean.connect([(tstat, remean, [('out_file', 'in_file_b')])])

    concat = Node(ConcatModel(), name='concat')

    # Removes nuisance regressors via regression function
    clean_rs = Node(Bandpass(highpass=0, lowpass=99999, outputtype='NIFTI_GZ'),
                    name='clean_rs')

    clean.connect([(concat, clean_rs, [('out_file', 'orthogonalize_file')])])

    remean1 = Node(Calc(expr='a+b', outputtype='NIFTI_GZ'), name='remean1')
    clean.connect([(clean_rs, remean1, [('out_file', 'in_file_a')])])
    clean.connect([(tstat, remean1, [('out_file', 'in_file_b')])])

    preproc.connect([(regressors, clean, [('bpf_dt_csf.out_file',
                                           'concat.in_file_a')])])
    preproc.connect([(fproc, clean, [('moco_bpfdt.out_file',
                                      'concat.in_file_b')])])

    preproc.connect([(regressors, clean, [('localwm_bpf_dt.out_file',
                                           'clean_rs.orthogonalize_dset')])])
    clean.connect([(remean, clean_rs, [('out_file', 'in_file')])])
    '''
    Write graphical output detailing the workflows and nodes 
    '''

    fproc.write_graph(graph2use='flat',
                      format='png',
                      simple_form=True,
                      dotfilename='./fproc.dot')
    fproc.write_graph(graph2use='colored',
                      format='png',
                      simple_form=True,
                      dotfilename='./fproc_color.dot')

    coreg.write_graph(graph2use='flat',
                      format='png',
                      simple_form=True,
                      dotfilename='./coreg.dot')
    coreg.write_graph(graph2use='colored',
                      format='png',
                      simple_form=True,
                      dotfilename='./coreg_color.dot')

    scrub.write_graph(graph2use='flat',
                      format='png',
                      simple_form=True,
                      dotfilename='./scrub.dot')
    scrub.write_graph(graph2use='colored',
                      format='png',
                      simple_form=True,
                      dotfilename='./scrub_color.dot')

    regressors.write_graph(graph2use='flat',
                           format='png',
                           simple_form=True,
                           dotfilename='./reg.dot')
    regressors.write_graph(graph2use='colored',
                           format='png',
                           simple_form=True,
                           dotfilename='./reg_color.dot')

    preproc.write_graph(graph2use='flat',
                        format='png',
                        simple_form=True,
                        dotfilename='./preproc.dot')
    preproc.write_graph(graph2use='colored',
                        format='png',
                        simple_form=True,
                        dotfilename='./preproc_color.dot')

    return preproc
Exemplo n.º 12
0
def preprocessing(*argu):

    argu = argu[0]
    json_file = argu[1]

    with open(json_file, 'r') as jsonfile:
        info = json.load(jsonfile, object_pairs_hook=OrderedDict)

    subject_list = info["subject_list"]
    experiment_dir = info["experiment_dir"]
    output_dir = 'datasink'
    working_dir = 'workingdir'

    task_list = info["task_list"]

    fwhm = [*map(int, info["fwhm"])]
    TR = float(info["TR"])
    iso_size = 4
    slice_list = [*map(int, info["slice order"])]

    # ExtractROI - skip dummy scans
    extract = Node(ExtractROI(t_min=int(info["dummy scans"]),
                              t_size=-1,
                              output_type='NIFTI'),
                   name="extract")

    slicetime = Node(SliceTiming(num_slices=len(slice_list),
                                 ref_slice=int(median(slice_list)),
                                 slice_order=slice_list,
                                 time_repetition=TR,
                                 time_acquisition=TR - (TR / len(slice_list))),
                     name="slicetime")

    mcflirt = Node(MCFLIRT(mean_vol=True, save_plots=True,
                           output_type='NIFTI'),
                   name="mcflirt")

    # Smooth - image smoothing
    smooth = Node(Smooth(), name="smooth")
    smooth.iterables = ("fwhm", fwhm)

    # Artifact Detection - determines outliers in functional images
    art = Node(ArtifactDetect(norm_threshold=2,
                              zintensity_threshold=3,
                              mask_type='spm_global',
                              parameter_source='FSL',
                              use_differences=[True, False],
                              plot_type='svg'),
               name="art")

    # BET - Skullstrip anatomical Image
    bet_anat = Node(BET(frac=0.5, robust=True, output_type='NIFTI_GZ'),
                    name="bet_anat")

    # FAST - Image Segmentation
    segmentation = Node(FAST(output_type='NIFTI_GZ'),
                        name="segmentation",
                        mem_gb=4)

    # Select WM segmentation file from segmentation output
    def get_wm(files):
        return files[-1]

    # Threshold - Threshold WM probability image
    threshold = Node(Threshold(thresh=0.5, args='-bin',
                               output_type='NIFTI_GZ'),
                     name="threshold")

    # FLIRT - pre-alignment of functional images to anatomical images
    coreg_pre = Node(FLIRT(dof=6, output_type='NIFTI_GZ'), name="coreg_pre")

    # FLIRT - coregistration of functional images to anatomical images with BBR
    coreg_bbr = Node(FLIRT(dof=6,
                           cost='bbr',
                           schedule=opj(os.getenv('FSLDIR'),
                                        'etc/flirtsch/bbr.sch'),
                           output_type='NIFTI_GZ'),
                     name="coreg_bbr")

    # Apply coregistration warp to functional images
    applywarp = Node(FLIRT(interp='spline',
                           apply_isoxfm=iso_size,
                           output_type='NIFTI'),
                     name="applywarp")

    # Apply coregistration warp to mean file
    applywarp_mean = Node(FLIRT(interp='spline',
                                apply_isoxfm=iso_size,
                                output_type='NIFTI_GZ'),
                          name="applywarp_mean")

    # Create a coregistration workflow
    coregwf = Workflow(name='coregwf')
    coregwf.base_dir = opj(experiment_dir, working_dir)

    # Connect all components of the coregistration workflow
    coregwf.connect([
        (bet_anat, segmentation, [('out_file', 'in_files')]),
        (segmentation, threshold, [(('partial_volume_files', get_wm),
                                    'in_file')]),
        (bet_anat, coreg_pre, [('out_file', 'reference')]),
        (threshold, coreg_bbr, [('out_file', 'wm_seg')]),
        (coreg_pre, coreg_bbr, [('out_matrix_file', 'in_matrix_file')]),
        (coreg_bbr, applywarp, [('out_matrix_file', 'in_matrix_file')]),
        (bet_anat, applywarp, [('out_file', 'reference')]),
        (coreg_bbr, applywarp_mean, [('out_matrix_file', 'in_matrix_file')]),
        (bet_anat, applywarp_mean, [('out_file', 'reference')]),
    ])

    # Infosource - a function free node to iterate over the list of subject names
    infosource = Node(IdentityInterface(fields=['subject_id', 'task_name']),
                      name="infosource")
    infosource.iterables = [('subject_id', subject_list),
                            ('task_name', task_list)]

    # SelectFiles - to grab the data (alternativ to DataGrabber)
    anat_file = opj('sub-{subject_id}', 'anat', 'sub-{subject_id}_T1w.nii.gz')
    func_file = opj('sub-{subject_id}', 'func',
                    'sub-{subject_id}_task-{task_name}_bold.nii.gz')

    templates = {'anat': anat_file, 'func': func_file}
    selectfiles = Node(SelectFiles(templates,
                                   base_directory=info["base directory"]),
                       name="selectfiles")

    # Datasink - creates output folder for important outputs
    datasink = Node(DataSink(base_directory=experiment_dir,
                             container=output_dir),
                    name="datasink")

    ## Use the following DataSink output substitutions
    substitutions = [
        ('_subject_id_', 'sub-'),
        ('_task_name_', '/task-'),
        ('_fwhm_', 'fwhm-'),
        ('_roi', ''),
        ('_mcf', ''),
        ('_st', ''),
        ('_flirt', ''),
        ('.nii_mean_reg', '_mean'),
        ('.nii.par', '.par'),
    ]
    subjFolders = [('fwhm-%s/' % f, 'fwhm-%s_' % f) for f in fwhm]
    substitutions.extend(subjFolders)
    datasink.inputs.substitutions = substitutions

    # Create a preprocessing workflow
    preproc = Workflow(name='preproc')
    preproc.base_dir = opj(experiment_dir, working_dir)

    # Connect all components of the preprocessing workflow
    preproc.connect([
        (infosource, selectfiles, [('subject_id', 'subject_id'),
                                   ('task_name', 'task_name')]),
        (selectfiles, extract, [('func', 'in_file')]),
        (extract, slicetime, [('roi_file', 'in_files')]),
        (slicetime, mcflirt, [('timecorrected_files', 'in_file')]),
        (selectfiles, coregwf, [('anat', 'bet_anat.in_file'),
                                ('anat', 'coreg_bbr.reference')]),
        (mcflirt, coregwf, [('mean_img', 'coreg_pre.in_file'),
                            ('mean_img', 'coreg_bbr.in_file'),
                            ('mean_img', 'applywarp_mean.in_file')]),
        (mcflirt, coregwf, [('out_file', 'applywarp.in_file')]),
        (coregwf, smooth, [('applywarp.out_file', 'in_files')]),
        (mcflirt, datasink, [('par_file', 'preproc.@par')]),
        (smooth, datasink, [('smoothed_files', 'preproc.@smooth')]),
        (coregwf, datasink, [('applywarp_mean.out_file', 'preproc.@mean')]),
        (coregwf, art, [('applywarp.out_file', 'realigned_files')]),
        (mcflirt, art, [('par_file', 'realignment_parameters')]),
        (coregwf, datasink, [('coreg_bbr.out_matrix_file',
                              'preproc.@mat_file'),
                             ('bet_anat.out_file', 'preproc.@brain')]),
        (art, datasink, [('outlier_files', 'preproc.@outlier_files'),
                         ('plot_files', 'preproc.@plot_files')]),
    ])
    # Create preproc output graph# Creat # Create
    preproc.write_graph(graph2use='colored', format='png', simple_form=True)

    # Visualize the graph
    img1 = imread(opj(preproc.base_dir, 'preproc', 'graph.png'))
    plt.imshow(img1)
    plt.xticks([]), plt.yticks([])
    plt.show()

    # Visualize the detailed graph# Visua # Visual
    preproc.write_graph(graph2use='flat', format='png', simple_form=True)
    img2 = imread(opj(preproc.base_dir, 'preproc', 'graph_detailed.png'))
    plt.imshow(img2)
    plt.xticks([]), plt.yticks([])
    plt.show()

    print("Workflow all set. Check the workflow image :)")

    response = input('Should run the workflow? Enter yes or no :')

    if response == 'yes':
        preproc.run('MultiProc', plugin_args={'n_procs': 10})
    elif response == 'no':
        print('Exits the program since you entered no')
    else:
        raise RuntimeError('Should enter either yes or no')
Exemplo n.º 13
0
def fsl_motion_correction(name='realign'):
    """Realign a time series to the middle volume using spline interpolation

    Uses MCFLIRT to realign the time series and ApplyWarp to apply the rigid
    body transformations using spline interpolation (unknown order).

    Nipype Inputs
    -------------
    realign_input.func: exising file
        The path to the fMRI file.

    Nipype Outputs
    --------------
    realign_output.realigned_file: exising file
        The path to the realigned fMRI file.

    realign_output.realign_params: mat file
        .mat file with the affine transformation parameters.

    Example
    -------
    >>> wf = fsl_motion_correction()
    >>> wf.inputs.inputspec.func = 'f3.nii'
    >>> wf.run() # doctest: +SKIP
    """
    wf = Workflow(name=name)

    # input node
    input = setup_node(IdentityInterface(fields=['func']),
                       name='realign_input')
    realigner = setup_node(MCFLIRT(save_mats=True, stats_imgs=True),
                           name='mcflirt')
    splitter = setup_node(Split(dimension='t'), name='splitter')
    warper = MapNode(ApplyWarp(interp='spline'),
                     iterfield=['in_file', 'premat'],
                     name='warper')

    joiner = setup_node(Merge(dimension='t'), name='joiner')

    # output node
    output = setup_node(IdentityInterface(fields=[
        'realigned_file',
        'realign_params',
    ]),
                        name='realign_output')

    wf.connect([
        # input
        (input, realigner, [
            ("func", "in_file"),
            (("func", select_volume, 'middle'), "ref_vol"),
        ]),
        # split
        (realigner, splitter, [("out_file", "in_file")]),
        (realigner, warper, [
            ("mat_file", "premat"),
            ("variance_img", "ref_file"),
        ]),
        # warp
        (splitter, warper, [("out_files", "in_file")]),
        (warper, joiner, [("mat_file", "premat")]),
        # output
        (joiner, output, [("merged_file", "realigned_file")]),
        (realigner, output, [("mat_file", "realign_params")]),
    ])
    return wf
Exemplo n.º 14
0
"""Test FSL's MCFLIRT for motion correction"""
from nipype.interfaces.fsl import MCFLIRT

mcflt = MCFLIRT()
mcflt.inputs.in_file = 'test-data/haxby2001/subj2/bold.nii.gz'
mcflt.inputs.cost = 'mutualinfo'
mcflt.inputs.out_file = 'output/fsl-mcflirt/functional_mcorr.nii.gz'
mcflt.inputs.save_mats = True
mcflt.inputs.save_plots = True
mcflt.cmdline

# How long to run?
%timeit mcflt.run()
##### Specify important variables #####

experiment_dir = '../resources/example_data/' # location of experiment folder
subject_list = ['sub-01'] # create the subject_list variable

output_dir = 'output_prepro_ALPACA' # name of output folder
working_dir = 'workingdir_prepro_ALPACA' # name of working directory

fwhm_size=6

##### Create & specify nodes to be used and connected during the preprocessing pipeline #####

### base preprocessing pipeline

# Realign - correct for motion
realign = Node(MCFLIRT(mean_vol=True, save_mats=True, save_plots=True, save_rms=True, outputtype='NIFTI'),
               name="realign")

# Plot the rotations, translations, and displacement parameters from MCFLIRT
plotrot = MapNode(PlotMotionParams(in_source="fsl",
                                          plot_type="rotations"),
                         name="plotrotation",
                         iterfield=["in_file"])

plottrans = MapNode(PlotMotionParams(in_source="fsl",
                                                plot_type="translations"),
                           name="plottranslation",
                           iterfield=["in_file"])

plotdisp = MapNode(PlotMotionParams(in_source="fsl",
                                               plot_type="displacement"),
Exemplo n.º 16
0
def main(paths, options_binary_string, ANAT, num_proc=7):

    json_path = paths[0]
    base_directory = paths[1]
    motion_correction_bet_directory = paths[2]
    parent_wf_directory = paths[3]
    # functional_connectivity_directory=paths[4]
    coreg_reg_directory = paths[5]
    atlas_resize_reg_directory = paths[6]
    subject_list = paths[7]
    datasink_name = paths[8]
    # fc_datasink_name=paths[9]
    atlasPath = paths[10]
    # brain_path=paths[11]
    # mask_path=paths[12]
    # atlas_path=paths[13]
    # tr_path=paths[14]
    # motion_params_path=paths[15]
    # func2std_mat_path=paths[16]
    # MNI3mm_path=paths[17]
    # demographics_file_path = paths[18]
    # phenotype_file_path = paths[19]
    data_directory = paths[20]

    number_of_subjects = len(subject_list)
    print("Working with ", number_of_subjects, " subjects.")

    # Create our own custom function - BIDSDataGrabber using a Function Interface.

    # In[858]:

    def get_nifti_filenames(subject_id, data_dir):
        #     Remember that all the necesary imports need to be INSIDE the function for the Function Interface to work!
        from bids.grabbids import BIDSLayout

        layout = BIDSLayout(data_dir)
        run = 1

        anat_file_path = [
            f.filename for f in layout.get(
                subject=subject_id, type='T1w', extensions=['nii', 'nii.gz'])
        ]
        func_file_path = [
            f.filename for f in layout.get(subject=subject_id,
                                           type='bold',
                                           run=run,
                                           extensions=['nii', 'nii.gz'])
        ]

        if len(anat_file_path) == 0:
            return None, func_file_path[0]  # No Anatomical files present
        return anat_file_path[0], func_file_path[0]

    BIDSDataGrabber = Node(Function(
        function=get_nifti_filenames,
        input_names=['subject_id', 'data_dir'],
        output_names=['anat_file_path', 'func_file_path']),
                           name='BIDSDataGrabber')
    # BIDSDataGrabber.iterables = [('subject_id',subject_list)]
    BIDSDataGrabber.inputs.data_dir = data_directory

    # ## Return TR

    def get_TR(in_file):
        from bids.grabbids import BIDSLayout

        data_directory = '/home1/varunk/data/ABIDE1/RawDataBIDs'
        layout = BIDSLayout(data_directory)
        metadata = layout.get_metadata(path=in_file)
        TR = metadata['RepetitionTime']
        return TR

    # ---------------- Added new Node to return TR and other slice timing correction params-------------------------------
    def _getMetadata(in_file):
        from bids.grabbids import BIDSLayout
        import logging

        logger = logging.getLogger(__name__)
        logger.setLevel(logging.DEBUG)

        # create a file handler
        handler = logging.FileHandler('progress.log')

        # add the handlers to the logger
        logger.addHandler(handler)

        interleaved = True
        index_dir = False
        data_directory = '/home1/varunk/data/ABIDE1/RawDataBIDs'
        layout = BIDSLayout(data_directory)
        metadata = layout.get_metadata(path=in_file)
        print(metadata)

        logger.info('Extracting Meta Data of file: %s', in_file)
        try:
            tr = metadata['RepetitionTime']
        except KeyError:
            print(
                'Key RepetitionTime not found in task-rest_bold.json so using a default of 2.0 '
            )
            tr = 2
            logger.error(
                'Key RepetitionTime not found in task-rest_bold.json for file %s so using a default of 2.0 ',
                in_file)

        try:
            slice_order = metadata['SliceAcquisitionOrder']
        except KeyError:
            print(
                'Key SliceAcquisitionOrder not found in task-rest_bold.json so using a default of interleaved ascending '
            )
            logger.error(
                'Key SliceAcquisitionOrder not found in task-rest_bold.json for file %s so using a default of interleaved ascending',
                in_file)
            return tr, index_dir, interleaved

        if slice_order.split(' ')[0] == 'Sequential':
            interleaved = False
        if slice_order.split(' ')[1] == 'Descending':
            index_dir = True

        return tr, index_dir, interleaved

    getMetadata = Node(Function(
        function=_getMetadata,
        input_names=['in_file'],
        output_names=['tr', 'index_dir', 'interleaved']),
                       name='getMetadata')

    # ### Skipping 4 starting scans
    # Extract ROI for skipping first 4 scans of the functional data
    # > **Arguments:**
    # t_min: (corresponds to time dimension) Denotes the starting time of the inclusion
    # t_size: Denotes the number of scans to include
    #
    # The logic behind skipping 4 initial scans is to take scans after the subject has stabalized in the scanner.

    # In[863]:

    # ExtractROI - skip dummy scans
    extract = Node(ExtractROI(t_min=4, t_size=-1),
                   output_type='NIFTI',
                   name="extract")

    # ### Slice time correction
    # Created a Node that does slice time correction
    # > **Arguments**:
    # index_dir=False -> Slices were taken bottom to top i.e. in ascending order
    # interleaved=True means odd slices were acquired first and then even slices [or vice versa(Not sure)]

    slicetimer = Node(SliceTimer(output_type='NIFTI'), name="slicetimer")

    # ### Motion Correction
    # Motion correction is done using fsl's mcflirt. It alligns all the volumes of a functional scan to each other

    # MCFLIRT - motion correction
    mcflirt = Node(MCFLIRT(mean_vol=True, save_plots=True,
                           output_type='NIFTI'),
                   name="mcflirt")

    #  Just a dummy node to transfer the output of Mcflirt to the next workflow. Needed if we didnt want to use the Mcflirt
    from_mcflirt = Node(IdentityInterface(fields=['in_file']),
                        name="from_mcflirt")

    # ### Skull striping
    # I used fsl's BET

    # In[868]:

    skullStrip = Node(BET(mask=False, frac=0.3, robust=True),
                      name='skullStrip')  #

    # *Note*: Do not include special characters in ```name``` field above coz then  wf.writegraph will cause issues

    # ## Resample
    # I needed to resample the anatomical file from 1mm to 3mm. Because registering a 1mm file was taking a huge amount of time.
    #

    # In[872]:

    # Resample - resample anatomy to 3x3x3 voxel resolution
    resample_mni = Node(
        Resample(
            voxel_size=(3, 3, 3),
            resample_mode='Cu',  # cubic interpolation
            outputtype='NIFTI'),
        name="resample_mni")

    resample_anat = Node(
        Resample(
            voxel_size=(3, 3, 3),
            resample_mode='Cu',  # cubic interpolation
            outputtype='NIFTI'),
        name="resample_anat")

    # In[873]:

    resample_atlas = Node(
        Resample(
            voxel_size=(3, 3, 3),
            resample_mode='NN',  # cubic interpolation
            outputtype='NIFTI'),
        name="resample_atlas")

    resample_atlas.inputs.in_file = atlasPath

    # # Matrix operations
    # ### For concatenating the transformation matrices

    concat_xform = Node(ConvertXFM(concat_xfm=True), name='concat_xform')

    # Node to calculate the inverse of func2std matrix
    inv_mat = Node(ConvertXFM(invert_xfm=True), name='inv_mat')

    # ## Extracting the mean brain

    meanfunc = Node(interface=ImageMaths(op_string='-Tmean', suffix='_mean'),
                    name='meanfunc')

    meanfuncmask = Node(interface=BET(mask=True, no_output=True, frac=0.3),
                        name='meanfuncmask')

    # ## Apply Mask

    # Does BET (masking) on the whole func scan [Not using this, creates bug for join node]
    maskfunc = Node(interface=ImageMaths(suffix='_bet', op_string='-mas'),
                    name='maskfunc')

    # Does BET (masking) on the mean func scan
    maskfunc4mean = Node(interface=ImageMaths(suffix='_bet', op_string='-mas'),
                         name='maskfunc4mean')

    # ## Datasink
    # I needed to define the structure of what files are saved and where.

    # Create DataSink object
    dataSink = Node(DataSink(), name='datasink')

    # Name of the output folder
    dataSink.inputs.base_directory = opj(base_directory, datasink_name)

    # Define substitution strings so that the data is similar to BIDS
    substitutions = [
        ('_subject_id_', 'sub-'), ('_resample_brain_flirt.nii_brain', ''),
        ('_roi_st_mcf_flirt.nii_brain_flirt', ''),
        ('task-rest_run-1_bold_roi_st_mcf.nii', 'motion_params'),
        ('T1w_resample_brain_flirt_sub-0050002_task-rest_run-1_bold_roi_st_mcf_mean_bet_flirt',
         'fun2std')
    ]

    # Feed the substitution strings to the DataSink node
    dataSink.inputs.substitutions = substitutions

    # ### Apply Mask to functional data
    # Mean file of the motion corrected functional scan is sent to
    # skullStrip to get just the brain and the mask_image.
    # Mask_image is just a binary file (containing 1 where brain is present and 0 where it isn't).
    # After getting the mask_image form skullStrip, apply that mask to aligned
    # functional image to extract its brain and remove the skull

    # In[889]:

    # Function
    # in_file: The file on which you want to apply mask
    # in_file2 = mask_file:  The mask you want to use. Make sure that mask_file has same size as in_file
    # out_file : Result of applying mask in in_file -> Gives the path of the output file

    def applyMask_func(in_file, in_file2):
        import numpy as np
        import nibabel as nib
        import os
        from os.path import join as opj

        # convert from unicode to string : u'/tmp/tmp8daO2Q/..' -> '/tmp/tmp8daO2Q/..' i.e. removes the prefix 'u'
        mask_file = in_file2

        brain_data = nib.load(in_file)
        mask_data = nib.load(mask_file)

        brain = brain_data.get_data().astype('float32')
        mask = mask_data.get_data()

        # applying mask by multiplying elementwise to the binary mask

        if len(brain.shape) == 3:  # Anat file
            brain = np.multiply(brain, mask)
        elif len(brain.shape) > 3:  # Functional File
            for t in range(brain.shape[-1]):
                brain[:, :, :, t] = np.multiply(brain[:, :, :, t], mask)
        else:
            pass

        # Saving the brain file

        path = os.getcwd()

        in_file_split_list = in_file.split('/')
        in_file_name = in_file_split_list[-1]

        out_file = in_file_name + '_brain.nii.gz'  # changing name
        brain_with_header = nib.Nifti1Image(brain,
                                            affine=brain_data.affine,
                                            header=brain_data.header)
        nib.save(brain_with_header, out_file)

        out_file = opj(path, out_file)
        out_file2 = in_file2

        return out_file, out_file2

    # #### Things learnt:
    # 1. I found out that whenever a node is being executed, it becomes the current directory and whatever file you create now, will be stored here.
    # 2. #from IPython.core.debugger import Tracer; Tracer()()    # Debugger doesnt work in nipype

    # Wrap the above function inside a Node

    # In[890]:

    applyMask = Node(Function(function=applyMask_func,
                              input_names=['in_file', 'in_file2'],
                              output_names=['out_file', 'out_file2']),
                     name='applyMask')

    # ### Some nodes needed for Co-registration and Normalization

    # Node for getting the xformation matrix
    func2anat_reg = Node(FLIRT(output_type='NIFTI'), name="func2anat_reg")

    # Node for applying xformation matrix to functional data
    func2std_xform = Node(FLIRT(output_type='NIFTI', apply_xfm=True),
                          name="func2std_xform")

    # Node for applying xformation matrix to functional data
    std2func_xform = Node(FLIRT(output_type='NIFTI',
                                apply_xfm=True,
                                interp='nearestneighbour'),
                          name="std2func_xform")

    # Node for Normalizing/Standardizing the anatomical and getting the xformation matrix
    anat2std_reg = Node(FLIRT(output_type='NIFTI'), name="anat2std_reg")

    # I wanted to use the MNI file as input to the workflow so I created an Identity
    # Node that reads the MNI file path and outputs the same MNI file path.
    # Then I connected this node to whereever it was needed.

    MNI152_2mm = Node(IdentityInterface(fields=['standard_file', 'mask_file']),
                      name="MNI152_2mm")
    # Set the mask_file and standard_file input in the Node. This setting sets the input mask_file permanently.
    MNI152_2mm.inputs.mask_file = os.path.expandvars(
        '$FSLDIR/data/standard/MNI152_T1_2mm_brain_mask.nii.gz')

    MNI152_2mm.inputs.standard_file = os.path.expandvars(
        '$FSLDIR/data/standard/MNI152_T1_2mm_brain.nii.gz')
    # MNI152_2mm.inputs.mask_file = '/usr/share/fsl/5.0/data/standard/MNI152_T1_2mm_brain_mask.nii.gz'
    # MNI152_2mm.inputs.standard_file = '/usr/share/fsl/5.0/data/standard/MNI152_T1_2mm_brain.nii.gz'

    # ## Band Pass Filtering
    # Let's do a band pass filtering on the data using the code from https://neurostars.org/t/bandpass-filtering-different-outputs-from-fsl-and-nipype-custom-function/824/2

    ### AFNI

    bandpass = Node(afni.Bandpass(highpass=0.008,
                                  lowpass=0.08,
                                  despike=False,
                                  no_detrend=True,
                                  notrans=True,
                                  outputtype='NIFTI_GZ'),
                    name='bandpass')

    # ### Following is a Join Node that collects the preprocessed file paths and saves them in a file

    # In[902]:

    def save_file_list_function_in_brain(in_brain):
        import numpy as np
        import os
        from os.path import join as opj

        file_list = np.asarray(in_brain)
        print('######################## File List ######################: \n',
              file_list)

        np.save('brain_file_list', file_list)
        file_name = 'brain_file_list.npy'
        out_brain = opj(os.getcwd(), file_name)  # path
        return out_brain

    def save_file_list_function_in_mask(in_mask):
        import numpy as np
        import os
        from os.path import join as opj

        file_list2 = np.asarray(in_mask)
        print('######################## File List ######################: \n',
              file_list2)

        np.save('mask_file_list', file_list2)
        file_name2 = 'mask_file_list.npy'
        out_mask = opj(os.getcwd(), file_name2)  # path
        return out_mask

    def save_file_list_function_in_motion_params(in_motion_params):
        import numpy as np
        import os
        from os.path import join as opj

        file_list3 = np.asarray(in_motion_params)
        print('######################## File List ######################: \n',
              file_list3)

        np.save('motion_params_file_list', file_list3)
        file_name3 = 'motion_params_file_list.npy'
        out_motion_params = opj(os.getcwd(), file_name3)  # path
        return out_motion_params

    def save_file_list_function_in_motion_outliers(in_motion_outliers):
        import numpy as np
        import os
        from os.path import join as opj

        file_list4 = np.asarray(in_motion_outliers)
        print('######################## File List ######################: \n',
              file_list4)

        np.save('motion_outliers_file_list', file_list4)
        file_name4 = 'motion_outliers_file_list.npy'
        out_motion_outliers = opj(os.getcwd(), file_name4)  # path
        return out_motion_outliers

    def save_file_list_function_in_joint_xformation_matrix(
            in_joint_xformation_matrix):
        import numpy as np
        import os
        from os.path import join as opj

        file_list5 = np.asarray(in_joint_xformation_matrix)
        print('######################## File List ######################: \n',
              file_list5)

        np.save('joint_xformation_matrix_file_list', file_list5)
        file_name5 = 'joint_xformation_matrix_file_list.npy'
        out_joint_xformation_matrix = opj(os.getcwd(), file_name5)  # path
        return out_joint_xformation_matrix

    def save_file_list_function_in_tr(in_tr):
        import numpy as np
        import os
        from os.path import join as opj

        tr_list = np.asarray(in_tr)
        print('######################## TR List ######################: \n',
              tr_list)

        np.save('tr_list', tr_list)
        file_name6 = 'tr_list.npy'
        out_tr = opj(os.getcwd(), file_name6)  # path
        return out_tr

    def save_file_list_function_in_atlas(in_atlas):
        import numpy as np
        import os
        from os.path import join as opj

        file_list7 = np.asarray(in_atlas)
        print('######################## File List ######################: \n',
              file_list7)

        np.save('atlas_file_list', file_list7)
        file_name7 = 'atlas_file_list.npy'
        out_atlas = opj(os.getcwd(), file_name7)  # path
        return out_atlas

    save_file_list_in_brain = JoinNode(Function(
        function=save_file_list_function_in_brain,
        input_names=['in_brain'],
        output_names=['out_brain']),
                                       joinsource="infosource",
                                       joinfield=['in_brain'],
                                       name="save_file_list_in_brain")

    save_file_list_in_mask = JoinNode(Function(
        function=save_file_list_function_in_mask,
        input_names=['in_mask'],
        output_names=['out_mask']),
                                      joinsource="infosource",
                                      joinfield=['in_mask'],
                                      name="save_file_list_in_mask")

    save_file_list_in_motion_outliers = JoinNode(
        Function(function=save_file_list_function_in_motion_outliers,
                 input_names=['in_motion_outliers'],
                 output_names=['out_motion_outliers']),
        joinsource="infosource",
        joinfield=['in_motion_outliers'],
        name="save_file_list_in_motion_outliers")

    save_file_list_in_motion_params = JoinNode(
        Function(function=save_file_list_function_in_motion_params,
                 input_names=['in_motion_params'],
                 output_names=['out_motion_params']),
        joinsource="infosource",
        joinfield=['in_motion_params'],
        name="save_file_list_in_motion_params")

    save_file_list_in_joint_xformation_matrix = JoinNode(
        Function(function=save_file_list_function_in_joint_xformation_matrix,
                 input_names=['in_joint_xformation_matrix'],
                 output_names=['out_joint_xformation_matrix']),
        joinsource="infosource",
        joinfield=['in_joint_xformation_matrix'],
        name="save_file_list_in_joint_xformation_matrix")

    save_file_list_in_tr = JoinNode(Function(
        function=save_file_list_function_in_tr,
        input_names=['in_tr'],
        output_names=['out_tr']),
                                    joinsource="infosource",
                                    joinfield=['in_tr'],
                                    name="save_file_list_in_tr")

    save_file_list_in_atlas = JoinNode(Function(
        function=save_file_list_function_in_atlas,
        input_names=['in_atlas'],
        output_names=['out_atlas']),
                                       joinsource="infosource",
                                       joinfield=['in_atlas'],
                                       name="save_file_list_in_atlas")

    # save_file_list = JoinNode(Function(function=save_file_list_function, input_names=['in_brain', 'in_mask', 'in_motion_params','in_motion_outliers','in_joint_xformation_matrix', 'in_tr', 'in_atlas'],
    #                output_names=['out_brain','out_mask','out_motion_params','out_motion_outliers','out_joint_xformation_matrix','out_tr', 'out_atlas']),
    #                joinsource="infosource",
    #                joinfield=['in_brain', 'in_mask', 'in_motion_params','in_motion_outliers','in_joint_xformation_matrix','in_tr', 'in_atlas'],
    #                name="save_file_list")

    # def save_file_list_function(in_brain, in_mask, in_motion_params, in_motion_outliers, in_joint_xformation_matrix, in_tr, in_atlas):
    #     # Imports
    #     import numpy as np
    #     import os
    #     from os.path import join as opj
    #
    #
    #     file_list = np.asarray(in_brain)
    #     print('######################## File List ######################: \n',file_list)
    #
    #     np.save('brain_file_list',file_list)
    #     file_name = 'brain_file_list.npy'
    #     out_brain = opj(os.getcwd(),file_name) # path
    #
    #
    #     file_list2 = np.asarray(in_mask)
    #     print('######################## File List ######################: \n',file_list2)
    #
    #     np.save('mask_file_list',file_list2)
    #     file_name2 = 'mask_file_list.npy'
    #     out_mask = opj(os.getcwd(),file_name2) # path
    #
    #
    #     file_list3 = np.asarray(in_motion_params)
    #     print('######################## File List ######################: \n',file_list3)
    #
    #     np.save('motion_params_file_list',file_list3)
    #     file_name3 = 'motion_params_file_list.npy'
    #     out_motion_params = opj(os.getcwd(),file_name3) # path
    #
    #
    #     file_list4 = np.asarray(in_motion_outliers)
    #     print('######################## File List ######################: \n',file_list4)
    #
    #     np.save('motion_outliers_file_list',file_list4)
    #     file_name4 = 'motion_outliers_file_list.npy'
    #     out_motion_outliers = opj(os.getcwd(),file_name4) # path
    #
    #
    #     file_list5 = np.asarray(in_joint_xformation_matrix)
    #     print('######################## File List ######################: \n',file_list5)
    #
    #     np.save('joint_xformation_matrix_file_list',file_list5)
    #     file_name5 = 'joint_xformation_matrix_file_list.npy'
    #     out_joint_xformation_matrix = opj(os.getcwd(),file_name5) # path
    #
    #     tr_list = np.asarray(in_tr)
    #     print('######################## TR List ######################: \n',tr_list)
    #
    #     np.save('tr_list',tr_list)
    #     file_name6 = 'tr_list.npy'
    #     out_tr = opj(os.getcwd(),file_name6) # path
    #
    #
    #     file_list7 = np.asarray(in_atlas)
    #     print('######################## File List ######################: \n',file_list7)
    #
    #     np.save('atlas_file_list',file_list7)
    #     file_name7 = 'atlas_file_list.npy'
    #     out_atlas = opj(os.getcwd(),file_name7) # path
    #
    #
    #
    #
    #     return out_brain, out_mask, out_motion_params, out_motion_outliers, out_joint_xformation_matrix, out_tr , out_atlas
    #
    #
    #
    # save_file_list = JoinNode(Function(function=save_file_list_function, input_names=['in_brain', 'in_mask', 'in_motion_params','in_motion_outliers','in_joint_xformation_matrix', 'in_tr', 'in_atlas'],
    #                  output_names=['out_brain','out_mask','out_motion_params','out_motion_outliers','out_joint_xformation_matrix','out_tr', 'out_atlas']),
    #                  joinsource="infosource",
    #                  joinfield=['in_brain', 'in_mask', 'in_motion_params','in_motion_outliers','in_joint_xformation_matrix','in_tr', 'in_atlas'],
    #                  name="save_file_list")

    # ### Motion outliers

    motionOutliers = Node(MotionOutliers(no_motion_correction=False,
                                         metric='fd',
                                         out_metric_plot='fd_plot.png',
                                         out_metric_values='fd_raw.txt'),
                          name='motionOutliers')

    # ## Workflow for atlas registration  from std to functional

    wf_atlas_resize_reg = Workflow(name=atlas_resize_reg_directory)

    wf_atlas_resize_reg.connect([

        # Apply the inverse matrix to the 3mm Atlas to transform it to func space
        (maskfunc4mean, std2func_xform, [(('out_file', 'reference'))]),
        (resample_atlas, std2func_xform, [('out_file', 'in_file')]),

        # Now, applying the inverse matrix
        (inv_mat, std2func_xform, [('out_file', 'in_matrix_file')]
         ),  # output: Atlas in func space
        (std2func_xform, save_file_list_in_atlas, [('out_file', 'in_atlas')]),

        # ---------------------------Save the required files --------------------------------------------
        (save_file_list_in_motion_params, dataSink,
         [('out_motion_params', 'motion_params_paths.@out_motion_params')]),
        (save_file_list_in_motion_outliers, dataSink,
         [('out_motion_outliers', 'motion_outliers_paths.@out_motion_outliers')
          ]),
        (save_file_list_in_brain, dataSink,
         [('out_brain', 'preprocessed_brain_paths.@out_brain')]),
        (save_file_list_in_mask, dataSink,
         [('out_mask', 'preprocessed_mask_paths.@out_mask')]),
        (save_file_list_in_joint_xformation_matrix, dataSink,
         [('out_joint_xformation_matrix',
           'joint_xformation_matrix_paths.@out_joint_xformation_matrix')]),
        (save_file_list_in_tr, dataSink, [('out_tr', 'tr_paths.@out_tr')]),
        (save_file_list_in_atlas, dataSink, [('out_atlas',
                                              'atlas_paths.@out_atlas')])
    ])

    # In[909]:

    wf_coreg_reg = Workflow(name=coreg_reg_directory)
    # wf_coreg_reg.base_dir = base_directory
    # Dir where all the outputs will be stored(inside coregistrationPipeline folder).

    if ANAT == 1:
        wf_coreg_reg.connect(BIDSDataGrabber, 'anat_file_path', skullStrip,
                             'in_file')  # Resampled the anat file to 3mm

        wf_coreg_reg.connect(skullStrip, 'out_file', resample_anat, 'in_file')

        wf_coreg_reg.connect(
            resample_anat, 'out_file', func2anat_reg, 'reference'
        )  # Make the resampled file as reference in func2anat_reg

        # Sec 1. The above 3 steps registers the mean image to resampled anat image and
        # calculates the xformation matrix .. I hope the xformation matrix will be saved

        wf_coreg_reg.connect(MNI152_2mm, 'standard_file', resample_mni,
                             'in_file')

        wf_coreg_reg.connect(resample_mni, 'out_file', anat2std_reg,
                             'reference')

        wf_coreg_reg.connect(resample_anat, 'out_file', anat2std_reg,
                             'in_file')

        # Calculates the Xformationmatrix from anat3mm to MNI 3mm

        # We can get those matrices by refering to func2anat_reg.outputs.out_matrix_file and similarly for anat2std_reg

        wf_coreg_reg.connect(func2anat_reg, 'out_matrix_file', concat_xform,
                             'in_file')

        wf_coreg_reg.connect(anat2std_reg, 'out_matrix_file', concat_xform,
                             'in_file2')

        wf_coreg_reg.connect(concat_xform, 'out_file', dataSink,
                             'tranformation_matrix_fun2std.@out_file')

        wf_coreg_reg.connect(concat_xform, 'out_file',
                             save_file_list_in_joint_xformation_matrix,
                             'in_joint_xformation_matrix')

        # Now inverse the func2std MAT to std2func
        wf_coreg_reg.connect(concat_xform, 'out_file', wf_atlas_resize_reg,
                             'inv_mat.in_file')
# ------------------------------------------------------------------------------------------------------------------------------

# Registration of Functional to MNI 3mm space w/o using anatomical
    if ANAT == 0:
        print('Not using Anatomical high resoulution files')
        wf_coreg_reg.connect(MNI152_2mm, 'standard_file', resample_mni,
                             'in_file')
        wf_coreg_reg.connect(
            resample_mni, 'out_file', func2anat_reg, 'reference'
        )  # Make the resampled file as reference in func2anat_reg

        wf_coreg_reg.connect(func2anat_reg, 'out_matrix_file', dataSink,
                             'tranformation_matrix_fun2std.@out_file')

        wf_coreg_reg.connect(func2anat_reg, 'out_matrix_file',
                             save_file_list_in_joint_xformation_matrix,
                             'in_joint_xformation_matrix')

        # Now inverse the func2std MAT to std2func
        wf_coreg_reg.connect(func2anat_reg, 'out_matrix_file',
                             wf_atlas_resize_reg, 'inv_mat.in_file')

    # ## Co-Registration, Normalization and Bandpass Workflow
    # 1. Co-registration means alligning the func to anat
    # 2. Normalization means aligning func/anat to standard
    # 3. Applied band pass filtering in range - highpass=0.008, lowpass=0.08

    # In[910]:

    wf_motion_correction_bet = Workflow(name=motion_correction_bet_directory)
    # wf_motion_correction_bet.base_dir = base_directory

    wf_motion_correction_bet.connect([
        (from_mcflirt, meanfunc, [('in_file', 'in_file')]),
        (meanfunc, meanfuncmask, [('out_file', 'in_file')]),
        (from_mcflirt, applyMask, [('in_file', 'in_file')]),  # 1
        (meanfuncmask, applyMask, [
            ('mask_file', 'in_file2')
        ]),  # 2 output: 1&2,  BET on coregistered fmri scan
        (meanfunc, maskfunc4mean, [('out_file', 'in_file')]),  # 3
        (meanfuncmask, maskfunc4mean,
         [('mask_file', 'in_file2')]),  # 4 output: 3&4, BET on mean func scan
        (applyMask, save_file_list_in_brain, [('out_file', 'in_brain')]),
        (applyMask, save_file_list_in_mask, [('out_file2', 'in_mask')]),
        (maskfunc4mean, wf_coreg_reg, [('out_file', 'func2anat_reg.in_file')])
    ])

    infosource = Node(IdentityInterface(fields=['subject_id']),
                      name="infosource")

    infosource.iterables = [('subject_id', subject_list)]

    # Create the workflow

    wf = Workflow(name=parent_wf_directory)
    # base_dir = opj(s,'result')
    wf.base_dir = base_directory  # Dir where all the outputs will be stored(inside BETFlow folder).

    # wf.connect([      (infosource, BIDSDataGrabber, [('subject_id','subject_id')]),
    #                   (BIDSDataGrabber, extract, [('func_file_path','in_file')]),
    #
    #                   (BIDSDataGrabber,getMetadata, [('func_file_path','in_file')]),
    #
    #                   (getMetadata,slicetimer, [('tr','time_repetition')]),
    #
    #
    #                   (getMetadata,slicetimer, [('index_dir','index_dir')]),
    #
    #                   (getMetadata,slicetimer, [('interleaved','interleaved')]),
    #
    #                   (getMetadata,save_file_list_in_tr, [('tr','in_tr')]),
    #
    #                   (extract,slicetimer,[('roi_file','in_file')]),
    #
    #                   (slicetimer, mcflirt,[('slice_time_corrected_file','in_file')])
    #                   (mcflirt,dataSink,[('par_file','motion_params.@par_file')]), # saves the motion parameters calculated before
    #
    #                   (mcflirt,save_file_list_in_motion_params,[('par_file','in_motion_params')]),
    #
    #                   (mcflirt,wf_motion_correction_bet,[('out_file','from_mcflirt.in_file')])
    #            ])
    # # Run it in parallel
    # wf.run('MultiProc', plugin_args={'n_procs': num_proc})
    #
    #
    #
    # # Visualize the detailed graph
    # # from IPython.display import Image
    # wf.write_graph(graph2use='flat', format='png', simple_form=True)

    # Options:
    # discard 4 Volumes (extract), slicetimer, mcflirt
    print('Preprocessing Options:')
    print('Skipping 4 dummy volumes - ', options_binary_string[0])
    print('Slicetiming correction - ', options_binary_string[1])
    print('Finding Motion Outliers - ', options_binary_string[2])
    print('Doing Motion Correction - ', options_binary_string[3])

    # ANAT = 0
    nodes = [extract, slicetimer, motionOutliers, mcflirt]
    wf.connect(infosource, 'subject_id', BIDSDataGrabber, 'subject_id')
    wf.connect(BIDSDataGrabber, 'func_file_path', getMetadata, 'in_file')
    wf.connect(getMetadata, 'tr', save_file_list_in_tr, 'in_tr')

    old_node = BIDSDataGrabber
    old_node_output = 'func_file_path'

    for idx, include in enumerate(options_binary_string):

        if old_node == extract:
            old_node_output = 'roi_file'
        elif old_node == slicetimer:
            old_node_output = 'slice_time_corrected_file'
        # elif old_node == mcflirt:

        # old_node_output = 'out_file'

        if int(include):
            new_node = nodes[idx]

            if new_node == slicetimer:
                wf.connect(getMetadata, 'tr', slicetimer, 'time_repetition')
                wf.connect(getMetadata, 'index_dir', slicetimer, 'index_dir')
                wf.connect(getMetadata, 'interleaved', slicetimer,
                           'interleaved')
                new_node_input = 'in_file'
            elif new_node == extract:
                new_node_input = 'in_file'
            elif new_node == mcflirt:
                new_node_input = 'in_file'
                wf.connect(mcflirt, 'par_file', dataSink,
                           'motion_params.@par_file'
                           )  # saves the motion parameters calculated before

                wf.connect(mcflirt, 'par_file',
                           save_file_list_in_motion_params, 'in_motion_params')

                wf.connect(mcflirt, 'out_file', wf_motion_correction_bet,
                           'from_mcflirt.in_file')

            elif new_node == motionOutliers:

                wf.connect(meanfuncmask, 'mask_file', motionOutliers, 'mask')

                wf.connect(motionOutliers, 'out_file', dataSink,
                           'motionOutliers.@out_file')

                wf.connect(motionOutliers, 'out_metric_plot', dataSink,
                           'motionOutliers.@out_metric_plot')

                wf.connect(motionOutliers, 'out_metric_values', dataSink,
                           'motionOutliers.@out_metric_values')

                wf.connect(motionOutliers, 'out_file',
                           save_file_list_in_motion_outliers,
                           'in_motion_outliers')

                new_node_input = 'in_file'

                wf.connect(old_node, old_node_output, new_node, new_node_input)

                continue

            wf.connect(old_node, old_node_output, new_node, new_node_input)

            old_node = new_node

        else:
            if idx == 3:
                # new_node = from_mcflirt
                # new_node_input = 'from_mcflirt.in_file'

                wf.connect(old_node, old_node_output, wf_motion_correction_bet,
                           'from_mcflirt.in_file')

                # old_node = new_node

    TEMP_DIR_FOR_STORAGE = opj(base_directory, 'crash_files')
    wf.config = {"execution": {"crashdump_dir": TEMP_DIR_FOR_STORAGE}}

    # Visualize the detailed graph
    # from IPython.display import Image

    wf.write_graph(graph2use='flat', format='png', simple_form=True)

    # Run it in parallel
    wf.run('MultiProc', plugin_args={'n_procs': num_proc})
Exemplo n.º 17
0
    warp whole head (not skull stripped) T1 to MNI 152 T1 2mm template (ANTS).
                                                                                   -> mat file/txt, image and coff file
3) apply combined coregistration from fMRI to T1 to MNI Template to rcFe (ANTS);
    apply spatial smoothing (4mm iso gaussian; fslmaths).
"""

if args['t1_temp'] is not None:
    template = os.path.abspath(args['t1_temp'])
else:
    template = os.path.abspath(args['epi_temp'])
    #template = ""
    print("set up other temp")

# 1_______________________
# Motion correction on fmri time series
mcflirt = Node(MCFLIRT(mean_vol=True, output_type='NIFTI'), name="mcflirt")

# Compute mean(fslmaths) of the fmri time series
mean_fmri = Node(MeanImage(output_type='NIFTI'), name="meanimage")

# Skull Strip the fmri time series
bet_fmri = Node(BET(output_type='NIFTI', mask=True), name="bet_fmri")


def compute_scFe(input_image, mask_image, invert_sign=True):
    import nibabel as nib
    import numpy as np
    from scipy import stats
    from os import path
    '''
    Just in case you want to return just the image (e.g. rendering or
Exemplo n.º 18
0
Arquivo: cest.py Projeto: em-blue/QUIT
def prep(zfrqs, dummies=0, pca_retain=0, name='CEST_prep'):
    inputnode = Node(IdentityInterface(fields=['zspec_file', 'ref_file']),
                     name='inputnode')
    outputnode = Node(IdentityInterface(
        fields=['zspec_file', 'f0_map', 'mask_file', 'ref_file', 'DS', 'MT']),
                      name='outputnode')

    moco = Node(MCFLIRT(cost='mutualinfo', mean_vol=True), name='moco')
    mask = Node(BET(mask=True, no_output=True), name='mask')

    if (dummies > 0):
        ref_index = dummies - 1
        zspec_select = Node(Select(volumes=list(range(dummies, len(zfrqs))),
                                   out_file='zspec.nii.gz'),
                            name='zspec_select')
        zfrqs = np.array(zfrqs[dummies:])
    else:
        ref_index = 0
        zfrqs = np.array(zfrqs)

    zspec_ref = Node(Select(volumes=[
        ref_index,
    ], out_file='reference.nii.gz'),
                     name='zspec_ref')
    zspec_norm = Node(ImageMaths(op_string='-div', out_file='zspec.nii.gz'),
                      name='zspec_norm')

    f0_indices = (np.abs(zfrqs) > 7) | (np.abs(zfrqs) < 1.1)
    sat_frqs = zfrqs[f0_indices]
    sat_angles = np.repeat(180.0, len(f0_indices))
    f0_select = Node(Select(volumes=np.where(f0_indices)[0].tolist(),
                            out_file='background_zspec.nii.gz'),
                     name='f0_select')
    sequence = {
        'MTSat': {
            'pulse': {
                'p1': 0.4,
                'p2': 0.3,
                'bandwidth': 0.39
            },
            'Trf': 0.02,
            'TR': 4,
            'FA': 5,
            'sat_f0': sat_frqs.tolist(),
            'sat_angle': sat_angles.tolist()
        }
    }
    two_pools = [{
        'name': 'DS',
        'df0': [0, -2.5, 2.5],
        'fwhm': [1.0, 1.e-6, 3.0],
        'A': [0.2, 1.e-3, 1.0],
        'use_bandwidth': True
    }, {
        'name': 'MT',
        'df0': [-2.5, -5.0, -0.5],
        'fwhm': [50.0, 35.0, 200.0],
        'A': [0.3, 1.e-3, 1.0]
    }]
    f0_fit = Node(Lorentzian(sequence=sequence, pools=two_pools, verbose=True),
                  name='f0_fit')

    out_frqs = np.sort(zfrqs)
    f0_correct = Node(ZSpec(in_freqs=zfrqs.tolist(),
                            out_freqs=out_frqs.tolist(),
                            verbose=True),
                      name='f0_correct')

    prep = Workflow(name=name)
    prep.connect([(inputnode, moco, [('zspec_file', 'in_file'),
                                     ('ref_file', 'ref_file')]),
                  (moco, zspec_ref, [('out_file', 'in_file')]),
                  (moco, mask, [('mean_img', 'in_file')]),
                  (zspec_ref, zspec_norm, [('out_file', 'in_file2')]),
                  (zspec_norm, f0_select, [('out_file', 'in_file')]),
                  (f0_select, f0_fit, [('out_file', 'in_file')]),
                  (mask, f0_fit, [('mask_file', 'mask_file')]),
                  (zspec_norm, f0_correct, [('out_file', 'in_file')]),
                  (f0_fit, f0_correct, [('DS_f0', 'f0_map')]),
                  (mask, f0_correct, [('mask_file', 'mask_file')]),
                  (moco, outputnode, [('mean_img', 'ref_file')]),
                  (mask, outputnode, [('out_file', 'mask_file')]),
                  (f0_fit, outputnode, [('DS_f0', 'f0_map'), ('DS_A', 'DS'),
                                        ('MT_A', 'MT')])])
    if (dummies > 0):
        prep.connect([(moco, zspec_select, [('out_file', 'in_file')]),
                      (zspec_select, zspec_norm, [('out_file', 'in_file')])])
    else:
        prep.connect([(moco, zspec_norm, [('out_file', 'in_file')])])

    if pca_retain > 0:
        f0_pca = Node(PCA(retain=pca_retain, projections_file='proj.nii.gz'),
                      name='f0_pca')
        prep.connect([(f0_correct, f0_pca, [('out_file', 'in_file')]),
                      (f0_pca, outputnode, [('out_file', 'zspec_file')])])
    else:
        prep.connect([(f0_correct, outputnode, [('out_file', 'zspec_file')])])

    return (prep, out_frqs)
Exemplo n.º 19
0
def wfmaker(project_dir,
            raw_dir,
            subject_id,
            task_name='',
            apply_trim=False,
            apply_dist_corr=False,
            apply_smooth=False,
            apply_filter=False,
            mni_template='2mm',
            apply_n4=True,
            ants_threads=8,
            readable_crash_files=False):
    """
    This function returns a "standard" workflow based on requested settings. Assumes data is in the following directory structure in BIDS format:

    *Work flow steps*:

    1) EPI Distortion Correction (FSL; optional)
    2) Trimming (nipy)
    3) Realignment/Motion Correction (FSL)
    4) Artifact Detection (rapidART/python)
    5) Brain Extraction + N4 Bias Correction (ANTs)
    6) Coregistration (rigid) (ANTs)
    7) Normalization to MNI (non-linear) (ANTs)
    8) Low-pass filtering (nilearn; optional)
    8) Smoothing (FSL; optional)
    9) Downsampling to INT16 precision to save space (nibabel)

    Args:
        project_dir (str): full path to the root of project folder, e.g. /my/data/myproject. All preprocessed data will be placed under this foler and the raw_dir folder will be searched for under this folder
        raw_dir (str): folder name for raw data, e.g. 'raw' which would be automatically converted to /my/data/myproject/raw
        subject_id (str/int): subject ID to process. Can be either a subject ID string e.g. 'sid-0001' or an integer to index the entire list of subjects in raw_dir, e.g. 0, which would process the first subject
        apply_trim (int/bool; optional): number of volumes to trim from the beginning of each functional run; default is None
        task_name (str; optional): which functional task runs to process; default is all runs
        apply_dist_corr (bool; optional): look for fmap files and perform distortion correction; default False
        smooth (int/list; optional): smoothing to perform in FWHM mm; if a list is provided will create outputs for each smoothing kernel separately; default False
        apply_filter (float/list; optional): low-pass/high-freq filtering cut-offs in Hz; if a list is provided will create outputs for each filter cut-off separately. With high temporal resolution scans .25Hz is a decent value to capture respitory artifacts; default None/False
        mni_template (str; optional): which mm resolution template to use, e.g. '3mm'; default '2mm'
        apply_n4 (bool; optional): perform N4 Bias Field correction on the anatomical image; default true
        ants_threads (int; optional): number of threads ANTs should use for its processes; default 8
        readable_crash_files (bool; optional): should nipype crash files be saved as txt? This makes them easily readable, but sometimes interferes with nipype's ability to use cached results of successfully run nodes (i.e. picking up where it left off after bugs are fixed); default False

    Examples:

        >>> from cosanlab_preproc.wfmaker import wfmaker
        >>> # Create workflow that performs no distortion correction, trims first 5 TRs, no filtering, 6mm smoothing, and normalizes to 2mm MNI space. Run it with 16 cores.
        >>>
        >>> workflow = wfmaker(
                        project_dir = '/data/project',
                        raw_dir = 'raw',
                        apply_trim = 5)
        >>>
        >>> workflow.run('MultiProc',plugin_args = {'n_procs': 16})
        >>>
        >>> # Create workflow that performs distortion correction, trims first 25 TRs, no filtering and filtering .25hz, 6mm and 8mm smoothing, and normalizes to 3mm MNI space. Run it serially (will be super slow!).
        >>>
        >>> workflow = wfmaker(
                        project_dir = '/data/project',
                        raw_dir = 'raw',
                        apply_trim = 25,
                        apply_dist_corr = True,
                        apply_filter = [0, .25],
                        apply_smooth = [6.0, 8.0],
                        mni = '3mm')
        >>>
        >>> workflow.run()

    """

    ##################
    ### PATH SETUP ###
    ##################
    if mni_template not in ['1mm', '2mm', '3mm']:
        raise ValueError("MNI template must be: 1mm, 2mm, or 3mm")

    data_dir = os.path.join(project_dir, raw_dir)
    output_dir = os.path.join(project_dir, 'preprocessed')
    output_final_dir = os.path.join(output_dir, 'final')
    output_interm_dir = os.path.join(output_dir, 'intermediate')
    log_dir = os.path.join(project_dir, 'logs', 'nipype')

    if not os.path.exists(output_final_dir):
        os.makedirs(output_final_dir)
    if not os.path.exists(output_interm_dir):
        os.makedirs(output_interm_dir)
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    # Set MNI template
    MNItemplate = os.path.join(get_resource_path(),
                               'MNI152_T1_' + mni_template + '_brain.nii.gz')
    MNImask = os.path.join(get_resource_path(),
                           'MNI152_T1_' + mni_template + '_brain_mask.nii.gz')
    MNItemplatehasskull = os.path.join(get_resource_path(),
                                       'MNI152_T1_' + mni_template + '.nii.gz')

    # Set ANTs files
    bet_ants_template = os.path.join(get_resource_path(),
                                     'OASIS_template.nii.gz')
    bet_ants_prob_mask = os.path.join(
        get_resource_path(), 'OASIS_BrainCerebellumProbabilityMask.nii.gz')
    bet_ants_registration_mask = os.path.join(
        get_resource_path(), 'OASIS_BrainCerebellumRegistrationMask.nii.gz')

    #################################
    ### NIPYPE IMPORTS AND CONFIG ###
    #################################
    # Update nipype global config because workflow.config[] = ..., doesn't seem to work
    # Can't store nipype config/rc file in container anyway so set them globaly before importing and setting up workflow as suggested here: http://nipype.readthedocs.io/en/latest/users/config_file.html#config-file
    from nipype import config
    if readable_crash_files:
        cfg = dict(execution={'crashfile_format': 'txt'})
        config.update_config(cfg)
    config.update_config(
        {'logging': {
            'log_directory': log_dir,
            'log_to_file': True
        }})
    from nipype import logging
    logging.update_logging(config)

    # Now import everything else
    from nipype.interfaces.io import DataSink
    from nipype.interfaces.utility import Merge, IdentityInterface
    from nipype.pipeline.engine import Node, Workflow
    from nipype.interfaces.nipy.preprocess import ComputeMask
    from nipype.algorithms.rapidart import ArtifactDetect
    from nipype.interfaces.ants.segmentation import BrainExtraction, N4BiasFieldCorrection
    from nipype.interfaces.ants import Registration, ApplyTransforms
    from nipype.interfaces.fsl import MCFLIRT, TOPUP, ApplyTOPUP
    from nipype.interfaces.fsl.maths import MeanImage
    from nipype.interfaces.fsl import Merge as MERGE
    from nipype.interfaces.fsl.utils import Smooth
    from nipype.interfaces.nipy.preprocess import Trim
    from .interfaces import Plot_Coregistration_Montage, Plot_Quality_Control, Plot_Realignment_Parameters, Create_Covariates, Down_Sample_Precision, Create_Encoding_File, Filter_In_Mask

    ##################
    ### INPUT NODE ###
    ##################

    layout = BIDSLayout(data_dir)
    # Dartmouth subjects are named with the sub- prefix, handle whether we receive an integer identifier for indexing or the full subject id with prefixg
    if isinstance(subject_id, six.string_types):
        subId = subject_id[4:]
    elif isinstance(subject_id, int):
        subId = layout.get_subjects()[subject_id]
        subject_id = 'sub-' + subId
    else:
        raise TypeError("subject_id should be a string or integer")

    #Get anat file location
    anat = layout.get(subject=subId, type='T1w',
                      extensions='.nii.gz')[0].filename

    #Get functional file locations
    if task_name:
        funcs = [
            f.filename for f in layout.get(subject=subId,
                                           type='bold',
                                           task=task_name,
                                           extensions='.nii.gz')
        ]
    else:
        funcs = [
            f.filename for f in layout.get(
                subject=subId, type='bold', extensions='.nii.gz')
        ]

    #Turn functional file list into interable Node
    func_scans = Node(IdentityInterface(fields=['scan']), name='func_scans')
    func_scans.iterables = ('scan', funcs)

    #Get TR for use in filtering below; we're assuming all BOLD runs have the same TR
    tr_length = layout.get_metadata(funcs[0])['RepetitionTime']

    #####################################
    ## TRIM ##
    #####################################
    if apply_trim:
        trim = Node(Trim(), name='trim')
        trim.inputs.begin_index = apply_trim

    #####################################
    ## DISTORTION CORRECTION ##
    #####################################

    if apply_dist_corr:
        #Get fmap file locations
        fmaps = [
            f.filename for f in layout.get(
                subject=subId, modality='fmap', extensions='.nii.gz')
        ]
        if not fmaps:
            raise IOError(
                "Distortion Correction requested but field map scans not found..."
            )

        #Get fmap metadata
        totalReadoutTimes, measurements, fmap_pes = [], [], []

        for i, fmap in enumerate(fmaps):
            # Grab total readout time for each fmap
            totalReadoutTimes.append(
                layout.get_metadata(fmap)['TotalReadoutTime'])

            # Grab measurements (for some reason pyBIDS doesn't grab dcm_meta... fields from side-car json file and json.load, doesn't either; so instead just read the header using nibabel to determine number of scans)
            measurements.append(nib.load(fmap).header['dim'][4])

            # Get phase encoding direction
            fmap_pe = layout.get_metadata(fmap)["PhaseEncodingDirection"]
            fmap_pes.append(fmap_pe)

        encoding_file_writer = Node(interface=Create_Encoding_File(),
                                    name='create_encoding')
        encoding_file_writer.inputs.totalReadoutTimes = totalReadoutTimes
        encoding_file_writer.inputs.fmaps = fmaps
        encoding_file_writer.inputs.fmap_pes = fmap_pes
        encoding_file_writer.inputs.measurements = measurements
        encoding_file_writer.inputs.file_name = 'encoding_file.txt'

        merge_to_file_list = Node(interface=Merge(2),
                                  infields=['in1', 'in2'],
                                  name='merge_to_file_list')
        merge_to_file_list.inputs.in1 = fmaps[0]
        merge_to_file_list.inputs.in1 = fmaps[1]

        #Merge AP and PA distortion correction scans
        merger = Node(interface=MERGE(dimension='t'), name='merger')
        merger.inputs.output_type = 'NIFTI_GZ'
        merger.inputs.in_files = fmaps
        merger.inputs.merged_file = 'merged_epi.nii.gz'

        #Create distortion correction map
        topup = Node(interface=TOPUP(), name='topup')
        topup.inputs.output_type = 'NIFTI_GZ'

        #Apply distortion correction to other scans
        apply_topup = Node(interface=ApplyTOPUP(), name='apply_topup')
        apply_topup.inputs.output_type = 'NIFTI_GZ'
        apply_topup.inputs.method = 'jac'
        apply_topup.inputs.interp = 'spline'

    ###################################
    ### REALIGN ###
    ###################################
    realign_fsl = Node(MCFLIRT(), name="realign")
    realign_fsl.inputs.cost = 'mutualinfo'
    realign_fsl.inputs.mean_vol = True
    realign_fsl.inputs.output_type = 'NIFTI_GZ'
    realign_fsl.inputs.save_mats = True
    realign_fsl.inputs.save_rms = True
    realign_fsl.inputs.save_plots = True

    ###################################
    ### MEAN EPIs ###
    ###################################
    #For coregistration after realignment
    mean_epi = Node(MeanImage(), name='mean_epi')
    mean_epi.inputs.dimension = 'T'

    #For after normalization is done to plot checks
    mean_norm_epi = Node(MeanImage(), name='mean_norm_epi')
    mean_norm_epi.inputs.dimension = 'T'

    ###################################
    ### MASK, ART, COV CREATION ###
    ###################################
    compute_mask = Node(ComputeMask(), name='compute_mask')
    compute_mask.inputs.m = .05

    art = Node(ArtifactDetect(), name='art')
    art.inputs.use_differences = [True, False]
    art.inputs.use_norm = True
    art.inputs.norm_threshold = 1
    art.inputs.zintensity_threshold = 3
    art.inputs.mask_type = 'file'
    art.inputs.parameter_source = 'FSL'

    make_cov = Node(Create_Covariates(), name='make_cov')

    ################################
    ### N4 BIAS FIELD CORRECTION ###
    ################################
    if apply_n4:
        n4_correction = Node(N4BiasFieldCorrection(), name='n4_correction')
        n4_correction.inputs.copy_header = True
        n4_correction.inputs.save_bias = False
        n4_correction.inputs.num_threads = ants_threads
        n4_correction.inputs.input_image = anat

    ###################################
    ### BRAIN EXTRACTION ###
    ###################################
    brain_extraction_ants = Node(BrainExtraction(), name='brain_extraction')
    brain_extraction_ants.inputs.dimension = 3
    brain_extraction_ants.inputs.use_floatingpoint_precision = 1
    brain_extraction_ants.inputs.num_threads = ants_threads
    brain_extraction_ants.inputs.brain_probability_mask = bet_ants_prob_mask
    brain_extraction_ants.inputs.keep_temporary_files = 1
    brain_extraction_ants.inputs.brain_template = bet_ants_template
    brain_extraction_ants.inputs.extraction_registration_mask = bet_ants_registration_mask
    brain_extraction_ants.inputs.out_prefix = 'bet'

    ###################################
    ### COREGISTRATION ###
    ###################################
    coregistration = Node(Registration(), name='coregistration')
    coregistration.inputs.float = False
    coregistration.inputs.output_transform_prefix = "meanEpi2highres"
    coregistration.inputs.transforms = ['Rigid']
    coregistration.inputs.transform_parameters = [(0.1, ), (0.1, )]
    coregistration.inputs.number_of_iterations = [[1000, 500, 250, 100]]
    coregistration.inputs.dimension = 3
    coregistration.inputs.num_threads = ants_threads
    coregistration.inputs.write_composite_transform = True
    coregistration.inputs.collapse_output_transforms = True
    coregistration.inputs.metric = ['MI']
    coregistration.inputs.metric_weight = [1]
    coregistration.inputs.radius_or_number_of_bins = [32]
    coregistration.inputs.sampling_strategy = ['Regular']
    coregistration.inputs.sampling_percentage = [0.25]
    coregistration.inputs.convergence_threshold = [1e-08]
    coregistration.inputs.convergence_window_size = [10]
    coregistration.inputs.smoothing_sigmas = [[3, 2, 1, 0]]
    coregistration.inputs.sigma_units = ['mm']
    coregistration.inputs.shrink_factors = [[4, 3, 2, 1]]
    coregistration.inputs.use_estimate_learning_rate_once = [True]
    coregistration.inputs.use_histogram_matching = [False]
    coregistration.inputs.initial_moving_transform_com = True
    coregistration.inputs.output_warped_image = True
    coregistration.inputs.winsorize_lower_quantile = 0.01
    coregistration.inputs.winsorize_upper_quantile = 0.99

    ###################################
    ### NORMALIZATION ###
    ###################################
    # Settings Explanations
    # Only a few key settings are worth adjusting and most others relate to how ANTs optimizer starts or iterates and won't make a ton of difference
    # Brian Avants referred to these settings as the last "best tested" when he was aligning fMRI data: https://github.com/ANTsX/ANTsRCore/blob/master/R/antsRegistration.R#L275
    # Things that matter the most:
    # smoothing_sigmas:
    # how much gaussian smoothing to apply when performing registration, probably want the upper limit of this to match the resolution that the data is collected at e.g. 3mm
    # Old settings [[3,2,1,0]]*3
    # shrink_factors
    # The coarseness with which to do registration
    # Old settings [[8,4,2,1]] * 3
    # >= 8 may result is some problems causing big chunks of cortex with little fine grain spatial structure to be moved to other parts of cortex
    # Other settings
    # transform_parameters:
    # how much regularization to do for fitting that transformation
    # for syn this pertains to both the gradient regularization term, and the flow, and elastic terms. Leave the syn settings alone as they seem to be the most well tested across published data sets
    # radius_or_number_of_bins
    # This is the bin size for MI metrics and 32 is probably adequate for most use cases. Increasing this might increase precision (e.g. to 64) but takes exponentially longer
    # use_histogram_matching
    # Use image intensity distribution to guide registration
    # Leave it on for within modality registration (e.g. T1 -> MNI), but off for between modality registration (e.g. EPI -> T1)
    # convergence_threshold
    # threshold for optimizer
    # convergence_window_size
    # how many samples should optimizer average to compute threshold?
    # sampling_strategy
    # what strategy should ANTs use to initialize the transform. Regular here refers to approximately random sampling around the center of the image mass
    normalization = Node(Registration(), name='normalization')
    normalization.inputs.float = False
    normalization.inputs.collapse_output_transforms = True
    normalization.inputs.convergence_threshold = [1e-06, 1e-06, 1e-07]
    normalization.inputs.convergence_window_size = [10]
    normalization.inputs.dimension = 3
    normalization.inputs.fixed_image = MNItemplate
    normalization.inputs.initial_moving_transform_com = True
    normalization.inputs.metric = ['MI', 'MI', 'CC']
    normalization.inputs.metric_weight = [1.0] * 3
    normalization.inputs.number_of_iterations = [[1000, 500, 250, 100],
                                                 [1000, 500, 250, 100],
                                                 [100, 70, 50, 20]]
    normalization.inputs.num_threads = ants_threads
    normalization.inputs.output_transform_prefix = 'anat2template'
    normalization.inputs.output_inverse_warped_image = True
    normalization.inputs.output_warped_image = True
    normalization.inputs.radius_or_number_of_bins = [32, 32, 4]
    normalization.inputs.sampling_percentage = [0.25, 0.25, 1]
    normalization.inputs.sampling_strategy = ['Regular', 'Regular', 'None']
    normalization.inputs.shrink_factors = [[4, 3, 2, 1]] * 3
    normalization.inputs.sigma_units = ['vox'] * 3
    normalization.inputs.smoothing_sigmas = [[2, 1], [2, 1], [3, 2, 1, 0]]
    normalization.inputs.transforms = ['Rigid', 'Affine', 'SyN']
    normalization.inputs.transform_parameters = [(0.1, ), (0.1, ),
                                                 (0.1, 3.0, 0.0)]
    normalization.inputs.use_histogram_matching = True
    normalization.inputs.winsorize_lower_quantile = 0.005
    normalization.inputs.winsorize_upper_quantile = 0.995
    normalization.inputs.write_composite_transform = True

    ###################################
    ### APPLY TRANSFORMS AND SMOOTH ###
    ###################################
    merge_transforms = Node(Merge(2),
                            iterfield=['in2'],
                            name='merge_transforms')

    # Used for epi -> mni, via (coreg + norm)
    apply_transforms = Node(ApplyTransforms(),
                            iterfield=['input_image'],
                            name='apply_transforms')
    apply_transforms.inputs.input_image_type = 3
    apply_transforms.inputs.float = False
    apply_transforms.inputs.num_threads = 12
    apply_transforms.inputs.environ = {}
    apply_transforms.inputs.interpolation = 'BSpline'
    apply_transforms.inputs.invert_transform_flags = [False, False]
    apply_transforms.inputs.reference_image = MNItemplate

    # Used for t1 segmented -> mni, via (norm)
    apply_transform_seg = Node(ApplyTransforms(), name='apply_transform_seg')
    apply_transform_seg.inputs.input_image_type = 3
    apply_transform_seg.inputs.float = False
    apply_transform_seg.inputs.num_threads = 12
    apply_transform_seg.inputs.environ = {}
    apply_transform_seg.inputs.interpolation = 'MultiLabel'
    apply_transform_seg.inputs.invert_transform_flags = [False]
    apply_transform_seg.inputs.reference_image = MNItemplate

    ###################################
    ### PLOTS ###
    ###################################
    plot_realign = Node(Plot_Realignment_Parameters(), name="plot_realign")
    plot_qa = Node(Plot_Quality_Control(), name="plot_qa")
    plot_normalization_check = Node(Plot_Coregistration_Montage(),
                                    name="plot_normalization_check")
    plot_normalization_check.inputs.canonical_img = MNItemplatehasskull

    ############################################
    ### FILTER, SMOOTH, DOWNSAMPLE PRECISION ###
    ############################################
    #Use cosanlab_preproc for down sampling
    down_samp = Node(Down_Sample_Precision(), name="down_samp")

    #Use FSL for smoothing
    if apply_smooth:
        smooth = Node(Smooth(), name='smooth')
        if isinstance(apply_smooth, list):
            smooth.iterables = ("fwhm", apply_smooth)
        elif isinstance(apply_smooth, int) or isinstance(apply_smooth, float):
            smooth.inputs.fwhm = apply_smooth
        else:
            raise ValueError("apply_smooth must be a list or int/float")

    #Use cosanlab_preproc for low-pass filtering
    if apply_filter:
        lp_filter = Node(Filter_In_Mask(), name='lp_filter')
        lp_filter.inputs.mask = MNImask
        lp_filter.inputs.sampling_rate = tr_length
        lp_filter.inputs.high_pass_cutoff = 0
        if isinstance(apply_filter, list):
            lp_filter.iterables = ("low_pass_cutoff", apply_filter)
        elif isinstance(apply_filter, int) or isinstance(apply_filter, float):
            lp_filter.inputs.low_pass_cutoff = apply_filter
        else:
            raise ValueError("apply_filter must be a list or int/float")

    ###################
    ### OUTPUT NODE ###
    ###################
    #Collect all final outputs in the output dir and get rid of file name additions
    datasink = Node(DataSink(), name='datasink')
    datasink.inputs.base_directory = output_final_dir
    datasink.inputs.container = subject_id

    # Remove substitutions
    data_dir_parts = data_dir.split('/')[1:]
    prefix = ['_scan_'] + data_dir_parts + [subject_id] + ['func']
    func_scan_names = [os.path.split(elem)[-1] for elem in funcs]
    to_replace = []
    for elem in func_scan_names:
        bold_name = elem.split(subject_id + '_')[-1]
        bold_name = bold_name.split('.nii.gz')[0]
        to_replace.append(('..'.join(prefix + [elem]), bold_name))
    datasink.inputs.substitutions = to_replace

    #####################
    ### INIT WORKFLOW ###
    #####################
    workflow = Workflow(name=subId)
    workflow.base_dir = output_interm_dir

    ############################
    ######### PART (1a) #########
    # func -> discorr -> trim -> realign
    # OR
    # func -> trim -> realign
    # OR
    # func -> discorr -> realign
    # OR
    # func -> realign
    ############################
    if apply_dist_corr:
        workflow.connect([(encoding_file_writer, topup, [('encoding_file',
                                                          'encoding_file')]),
                          (encoding_file_writer, apply_topup,
                           [('encoding_file', 'encoding_file')]),
                          (merger, topup, [('merged_file', 'in_file')]),
                          (func_scans, apply_topup, [('scan', 'in_files')]),
                          (topup, apply_topup,
                           [('out_fieldcoef', 'in_topup_fieldcoef'),
                            ('out_movpar', 'in_topup_movpar')])])
        if apply_trim:
            # Dist Corr + Trim
            workflow.connect([(apply_topup, trim, [('out_corrected', 'in_file')
                                                   ]),
                              (trim, realign_fsl, [('out_file', 'in_file')])])
        else:
            # Dist Corr + No Trim
            workflow.connect([(apply_topup, realign_fsl, [('out_corrected',
                                                           'in_file')])])
    else:
        if apply_trim:
            # No Dist Corr + Trim
            workflow.connect([(func_scans, trim, [('scan', 'in_file')]),
                              (trim, realign_fsl, [('out_file', 'in_file')])])
        else:
            # No Dist Corr + No Trim
            workflow.connect([
                (func_scans, realign_fsl, [('scan', 'in_file')]),
            ])

    ############################
    ######### PART (1n) #########
    # anat -> N4 -> bet
    # OR
    # anat -> bet
    ############################
    if apply_n4:
        workflow.connect([(n4_correction, brain_extraction_ants,
                           [('output_image', 'anatomical_image')])])
    else:
        brain_extraction_ants.inputs.anatomical_image = anat

    ##########################################
    ############### PART (2) #################
    # realign -> coreg -> mni (via t1)
    # t1 -> mni
    # covariate creation
    # plot creation
    ###########################################

    workflow.connect([
        (realign_fsl, plot_realign, [('par_file', 'realignment_parameters')]),
        (realign_fsl, plot_qa, [('out_file', 'dat_img')]),
        (realign_fsl, art, [('out_file', 'realigned_files'),
                            ('par_file', 'realignment_parameters')]),
        (realign_fsl, mean_epi, [('out_file', 'in_file')]),
        (realign_fsl, make_cov, [('par_file', 'realignment_parameters')]),
        (mean_epi, compute_mask, [('out_file', 'mean_volume')]),
        (compute_mask, art, [('brain_mask', 'mask_file')]),
        (art, make_cov, [('outlier_files', 'spike_id')]),
        (art, plot_realign, [('outlier_files', 'outliers')]),
        (plot_qa, make_cov, [('fd_outliers', 'fd_outliers')]),
        (brain_extraction_ants, coregistration, [('BrainExtractionBrain',
                                                  'fixed_image')]),
        (mean_epi, coregistration, [('out_file', 'moving_image')]),
        (brain_extraction_ants, normalization, [('BrainExtractionBrain',
                                                 'moving_image')]),
        (coregistration, merge_transforms, [('composite_transform', 'in2')]),
        (normalization, merge_transforms, [('composite_transform', 'in1')]),
        (merge_transforms, apply_transforms, [('out', 'transforms')]),
        (realign_fsl, apply_transforms, [('out_file', 'input_image')]),
        (apply_transforms, mean_norm_epi, [('output_image', 'in_file')]),
        (normalization, apply_transform_seg, [('composite_transform',
                                               'transforms')]),
        (brain_extraction_ants, apply_transform_seg,
         [('BrainExtractionSegmentation', 'input_image')]),
        (mean_norm_epi, plot_normalization_check, [('out_file', 'wra_img')])
    ])

    ##################################################
    ################### PART (3) #####################
    # epi (in mni) -> filter -> smooth -> down sample
    # OR
    # epi (in mni) -> filter -> down sample
    # OR
    # epi (in mni) -> smooth -> down sample
    # OR
    # epi (in mni) -> down sample
    ###################################################

    if apply_filter:
        workflow.connect([(apply_transforms, lp_filter, [('output_image',
                                                          'in_file')])])

        if apply_smooth:
            # Filtering + Smoothing
            workflow.connect([(lp_filter, smooth, [('out_file', 'in_file')]),
                              (smooth, down_samp, [('smoothed_file', 'in_file')
                                                   ])])
        else:
            # Filtering + No Smoothing
            workflow.connect([(lp_filter, down_samp, [('out_file', 'in_file')])
                              ])
    else:
        if apply_smooth:
            # No Filtering + Smoothing
            workflow.connect([
                (apply_transforms, smooth, [('output_image', 'in_file')]),
                (smooth, down_samp, [('smoothed_file', 'in_file')])
            ])
        else:
            # No Filtering + No Smoothing
            workflow.connect([(apply_transforms, down_samp, [('output_image',
                                                              'in_file')])])

    ##########################################
    ############### PART (4) #################
    # down sample -> save
    # plots -> save
    # covs -> save
    # t1 (in mni) -> save
    # t1 segmented masks (in mni) -> save
    ##########################################

    workflow.connect([
        (down_samp, datasink, [('out_file', 'functional.@down_samp')]),
        (plot_realign, datasink, [('plot', 'functional.@plot_realign')]),
        (plot_qa, datasink, [('plot', 'functional.@plot_qa')]),
        (plot_normalization_check, datasink,
         [('plot', 'functional.@plot_normalization')]),
        (make_cov, datasink, [('covariates', 'functional.@covariates')]),
        (normalization, datasink, [('warped_image', 'structural.@normanat')]),
        (apply_transform_seg, datasink, [('output_image',
                                          'structural.@normanatseg')])
    ])

    if not os.path.exists(os.path.join(output_dir, 'pipeline.png')):
        workflow.write_graph(dotfilename=os.path.join(output_dir, 'pipeline'),
                             format='png')

    print(f"Creating workflow for subject: {subject_id}")
    if ants_threads == 8:
        print(
            f"ANTs will utilize the default of {ants_threads} threads for parallel processing."
        )
    else:
        print(
            f"ANTs will utilize the user-requested {ants_threads} threads for parallel processing."
        )
    return workflow
Exemplo n.º 20
0
from nipype.interfaces.fsl import SliceTimer, MCFLIRT, PlotMotionParams

func = "../../data/ds000171/sub-control01/func/sub-control01_task-music_run-1_bold.nii.gz"
func_bet = '../testdata/func_bet2.nii.gz'

# Perform slice timing correction
st = SliceTimer()
st.inputs.in_file = func
st.inputs.interleaved = True
st.inputs.time_repetition = 3
st.inputs.out_file = '../testdata/func_st.nii.gz'

st.run()

# Perform motion correction
mc = MCFLIRT()
mc.inputs.in_file = '../testdata/func_st.nii.gz'
mc.inputs.cost = 'mutualinfo'
mc.inputs.interpolation = 'sinc'
mc.inputs.save_mats = True
mc.inputs.save_plots = True
mc.inputs.mean_vol = True
mc.inputs.out_file = '../testdata/func_mc_st.gz'
mc.run()

#Plot Motion Parameters - saved as .png in same directory

# Rotation
plotter_rot = PlotMotionParams()
plotter_rot.inputs.in_file = '../testdata/func_mc_st.gz.par'
plotter_rot.inputs.in_source = 'fsl'
Exemplo n.º 21
0
    def _run_interface(self, runtime):
        img = nb.load(self.inputs.in_file)

        # If reference is 3D, return it directly
        if img.dataobj.ndim == 3:
            self._results["out_file"] = self.inputs.in_file
            self._results["out_volumes"] = self.inputs.in_file
            self._results["out_drift"] = [1.0]
            return runtime

        fname = partial(fname_presuffix,
                        self.inputs.in_file,
                        newpath=runtime.cwd)

        # Slicing may induce inconsistencies with shape-dependent values in extensions.
        # For now, remove all. If this turns out to be a mistake, we can select extensions
        # that don't break pipeline stages.
        img.header.extensions.clear()
        img = nb.squeeze_image(img)

        # If reference was 4D, but single-volume - write out squeezed and return.
        if img.dataobj.ndim == 3:
            self._results["out_file"] = fname(suffix="_squeezed")
            img.to_filename(self._results["out_file"])
            self._results["out_volumes"] = self.inputs.in_file
            self._results["out_drift"] = [1.0]
            return runtime

        img_len = img.shape[3]
        t_mask = (self.inputs.t_mask
                  if isdefined(self.inputs.t_mask) else [True] * img_len)

        if len(t_mask) != img_len:
            raise ValueError(
                f"Image length ({img_len} timepoints) unmatched by mask ({len(t_mask)})"
            )

        n_volumes = sum(t_mask)
        if n_volumes < 1:
            raise ValueError(
                "At least one volume should be selected for slicing")

        self._results["out_file"] = fname(suffix="_average")
        self._results["out_volumes"] = fname(suffix="_sliced")

        sliced = nb.concat_images(
            i for i, t in zip(nb.four_to_three(img), t_mask) if t)

        data = sliced.get_fdata(dtype="float32")
        # Data can come with outliers showing very high numbers - preemptively prune
        data = np.clip(
            data,
            a_min=0.0 if self.inputs.nonnegative else np.percentile(data, 0.2),
            a_max=np.percentile(data, 99.8),
        )

        gs_drift = np.mean(data, axis=(0, 1, 2))
        gs_drift /= gs_drift.max()
        self._results["out_drift"] = [float(i) for i in gs_drift]

        data /= gs_drift
        data = np.clip(
            data,
            a_min=0.0 if self.inputs.nonnegative else data.min(),
            a_max=data.max(),
        )
        sliced.__class__(data, sliced.affine, sliced.header).to_filename(
            self._results["out_volumes"])

        if n_volumes == 1:
            nb.squeeze_image(sliced).to_filename(self._results["out_file"])
            self._results["out_drift"] = [1.0]
            return runtime

        if self.inputs.mc_method == "AFNI":
            from nipype.interfaces.afni import Volreg

            res = Volreg(
                in_file=self._results["out_volumes"],
                args="-Fourier -twopass",
                zpad=4,
                outputtype="NIFTI_GZ",
            ).run()
            # self._results["out_hmc"] = res.outputs.oned_matrix_save

        elif self.inputs.mc_method == "FSL":
            from nipype.interfaces.fsl import MCFLIRT

            res = MCFLIRT(
                in_file=self._results["out_volumes"],
                ref_vol=0,
                interpolation="sinc",
            ).run()
            self._results["out_hmc"] = res.outputs.mat_file

        if self.inputs.mc_method:
            data = nb.load(res.outputs.out_file).get_fdata(dtype="float32")

        data = np.clip(
            data,
            a_min=0.0 if self.inputs.nonnegative else data.min(),
            a_max=data.max(),
        )

        sliced.__class__(np.median(data, axis=3), sliced.affine,
                         sliced.header).to_filename(self._results["out_file"])
        return runtime
Exemplo n.º 22
0
subject_list = ['1']

# list of subject identifiers

fwhm = 8  # Smoothing widths to apply (Gaussian kernel size)
TR = 2  # Repetition time
init_volume = 0  # Firts volumen identification which will use in the pipeline
iso_size = 2  # Isometric resample of functional images to voxel size (in mm)

# ExtractROI - skip dummy scans
extract = Node(ExtractROI(t_min=init_volume, t_size=-1, output_type='NIFTI'),
               name="extract")

# MCFLIRT - motion correction
mcflirt = Node(MCFLIRT(mean_vol=True, save_plots=True, output_type='NIFTI'),
               name="motion_correction")

# SliceTimer - correct for slice wise acquisition
slicetimer = Node(SliceTimer(index_dir=False,
                             interleaved=True,
                             output_type='NIFTI',
                             time_repetition=TR),
                  name="slice_timing_correction")

# Smooth - image smoothing
smooth = Node(spm.Smooth(fwhm=fwhm), name="smooth")

# Artifact Detection - determines outliers in functional images
art = Node(ArtifactDetect(norm_threshold=2,
                          zintensity_threshold=3,
Exemplo n.º 23
0
def run(base_dir):
    template = '/home/brainlab/Desktop/Rudas/Data/Parcellation/TPM.nii'
    matlab_cmd = '/home/brainlab/Desktop/Rudas/Tools/spm12_r7487/spm12/run_spm12.sh /home/brainlab/Desktop/Rudas/Tools/MCR/v713/ script'
    spm.SPMCommand.set_mlab_paths(matlab_cmd=matlab_cmd, use_mcr=True)

    print('SPM version: ' + str(spm.SPMCommand().version))

    structural_dir = '/home/brainlab/Desktop/Rudas/Data/Propofol/Structurals/'
    experiment_dir = opj(base_dir, 'output/')
    output_dir = 'datasink'
    working_dir = 'workingdir'
    '''

    subject_list = ['2014_05_02_02CB',
                    '2014_05_16_16RA',
                    '2014_05_30_30AQ',
                    '2014_07_04_04HD']
    '''
    subject_list = [
        '2014_05_02_02CB', '2014_05_16_16RA', '2014_05_30_30AQ',
        '2014_07_04_04HD', '2014_07_04_04SG', '2014_08_13_13CA',
        '2014_10_08_08BC', '2014_10_08_08VR', '2014_10_22_22CY',
        '2014_10_22_22TK', '2014_11_17_17EK', '2014_11_17_17NA',
        '2014_11_19_19SA', '2014_11_19_AK', '2014_11_25.25JK',
        '2014_11_27_27HF', '2014_12_10_10JR'
    ]

    # list of subject identifiers

    fwhm = 8  # Smoothing widths to apply (Gaussian kernel size)
    TR = 2  # Repetition time
    init_volume = 0  # Firts volumen identification which will use in the pipeline
    iso_size = 2  # Isometric resample of functional images to voxel size (in mm)

    # ExtractROI - skip dummy scans
    extract = Node(ExtractROI(t_min=init_volume,
                              t_size=-1,
                              output_type='NIFTI'),
                   name="extract")

    # MCFLIRT - motion correction
    mcflirt = Node(MCFLIRT(mean_vol=True, save_plots=True,
                           output_type='NIFTI'),
                   name="motion_correction")

    # SliceTimer - correct for slice wise acquisition
    slicetimer = Node(SliceTimer(index_dir=False,
                                 interleaved=True,
                                 output_type='NIFTI',
                                 time_repetition=TR),
                      name="slice_timing_correction")

    # Smooth - image smoothing
    smooth = Node(spm.Smooth(fwhm=fwhm), name="smooth")

    n4bias = Node(N4Bias(out_file='t1_n4bias.nii.gz'), name='n4bias')

    descomposition = Node(Descomposition(n_components=20,
                                         low_pass=0.1,
                                         high_pass=0.01,
                                         tr=TR),
                          name='descomposition')

    # Artifact Detection - determines outliers in functional images
    art = Node(ArtifactDetect(norm_threshold=2,
                              zintensity_threshold=3,
                              mask_type='spm_global',
                              parameter_source='FSL',
                              use_differences=[True, False],
                              plot_type='svg'),
               name="artifact_detection")

    extract_confounds_ws_csf = Node(
        ExtractConfounds(out_file='ev_without_gs.csv'),
        name='extract_confounds_ws_csf')

    extract_confounds_gs = Node(ExtractConfounds(out_file='ev_with_gs.csv',
                                                 delimiter=','),
                                name='extract_confounds_global_signal')

    signal_extraction = Node(SignalExtraction(
        time_series_out_file='time_series.csv',
        correlation_matrix_out_file='correlation_matrix.png',
        atlas_identifier='cort-maxprob-thr25-2mm',
        tr=TR,
        plot=True),
                             name='signal_extraction')

    art_remotion = Node(ArtifacRemotion(out_file='fmri_art_removed.nii'),
                        name='artifact_remotion')

    # BET - Skullstrip anatomical anf funtional images
    bet_t1 = Node(BET(frac=0.5, robust=True, mask=True,
                      output_type='NIFTI_GZ'),
                  name="bet_t1")

    # FAST - Image Segmentation
    segmentation = Node(FAST(output_type='NIFTI'), name="segmentation")

    # Normalize - normalizes functional and structural images to the MNI template
    normalize_fmri = Node(Normalize12(jobtype='estwrite',
                                      tpm=template,
                                      write_voxel_sizes=[2, 2, 2],
                                      write_bounding_box=[[-90, -126, -72],
                                                          [90, 90, 108]]),
                          name="normalize_fmri")

    gunzip = Node(Gunzip(), name="gunzip")

    normalize_t1 = Node(Normalize12(
        jobtype='estwrite',
        tpm=template,
        write_voxel_sizes=[iso_size, iso_size, iso_size],
        write_bounding_box=[[-90, -126, -72], [90, 90, 108]]),
                        name="normalize_t1")

    normalize_masks = Node(Normalize12(
        jobtype='estwrite',
        tpm=template,
        write_voxel_sizes=[iso_size, iso_size, iso_size],
        write_bounding_box=[[-90, -126, -72], [90, 90, 108]]),
                           name="normalize_masks")

    # Threshold - Threshold WM probability image
    threshold = Node(Threshold(thresh=0.5, args='-bin',
                               output_type='NIFTI_GZ'),
                     name="wm_mask_threshold")

    # FLIRT - pre-alignment of functional images to anatomical images
    coreg_pre = Node(FLIRT(dof=6, output_type='NIFTI_GZ'),
                     name="linear_warp_estimation")

    # FLIRT - coregistration of functional images to anatomical images with BBR
    coreg_bbr = Node(FLIRT(dof=6,
                           cost='bbr',
                           schedule=opj(os.getenv('FSLDIR'),
                                        'etc/flirtsch/bbr.sch'),
                           output_type='NIFTI_GZ'),
                     name="nonlinear_warp_estimation")

    # Apply coregistration warp to functional images
    applywarp = Node(FLIRT(interp='spline',
                           apply_isoxfm=iso_size,
                           output_type='NIFTI'),
                     name="registration_fmri")

    # Apply coregistration warp to mean file
    applywarp_mean = Node(FLIRT(interp='spline',
                                apply_isoxfm=iso_size,
                                output_type='NIFTI_GZ'),
                          name="registration_mean_fmri")

    # Infosource - a function free node to iterate over the list of subject names
    infosource = Node(IdentityInterface(fields=['subject_id']),
                      name="infosource")
    infosource.iterables = [('subject_id', subject_list)]

    # SelectFiles - to grab the data (alternativ to DataGrabber)
    anat_file = opj(structural_dir, '{subject_id}', 't1.nii')
    func_file = opj('{subject_id}', 'fmri.nii')

    templates = {'anat': anat_file, 'func': func_file}

    selectfiles = Node(SelectFiles(templates, base_directory=base_dir),
                       name="selectfiles")

    # Datasink - creates output folder for important outputs
    datasink = Node(DataSink(base_directory=experiment_dir,
                             container=output_dir),
                    name="datasink")

    # Create a coregistration workflow
    coregwf = Workflow(name='coreg_fmri_to_t1')
    coregwf.base_dir = opj(experiment_dir, working_dir)

    # Create a preprocessing workflow
    preproc = Workflow(name='preproc')
    preproc.base_dir = opj(experiment_dir, working_dir)

    # Connect all components of the coregistration workflow

    coregwf.connect([
        (bet_t1, n4bias, [('out_file', 'in_file')]),
        (n4bias, segmentation, [('out_file', 'in_files')]),
        (segmentation, threshold, [(('partial_volume_files', get_latest),
                                    'in_file')]),
        (n4bias, coreg_pre, [('out_file', 'reference')]),
        (threshold, coreg_bbr, [('out_file', 'wm_seg')]),
        (coreg_pre, coreg_bbr, [('out_matrix_file', 'in_matrix_file')]),
        (coreg_bbr, applywarp, [('out_matrix_file', 'in_matrix_file')]),
        (n4bias, applywarp, [('out_file', 'reference')]),
        (coreg_bbr, applywarp_mean, [('out_matrix_file', 'in_matrix_file')]),
        (n4bias, applywarp_mean, [('out_file', 'reference')]),
    ])

    ## Use the following DataSink output substitutions
    substitutions = [('_subject_id_', 'sub-')]
    #                 ('_fwhm_', 'fwhm-'),
    #                 ('_roi', ''),
    #                 ('_mcf', ''),
    #                 ('_st', ''),
    #                 ('_flirt', ''),
    #                 ('.nii_mean_reg', '_mean'),
    #                 ('.nii.par', '.par'),
    #                 ]
    #subjFolders = [('fwhm-%s/' % f, 'fwhm-%s_' % f) for f in fwhm]

    #substitutions.extend(subjFolders)
    datasink.inputs.substitutions = substitutions

    # Connect all components of the preprocessing workflow
    preproc.connect([
        (infosource, selectfiles, [('subject_id', 'subject_id')]),
        (selectfiles, extract, [('func', 'in_file')]),
        (extract, mcflirt, [('roi_file', 'in_file')]),
        (mcflirt, slicetimer, [('out_file', 'in_file')]),
        (selectfiles, coregwf, [('anat', 'bet_t1.in_file'),
                                ('anat', 'nonlinear_warp_estimation.reference')
                                ]),
        (mcflirt, coregwf, [('mean_img', 'linear_warp_estimation.in_file'),
                            ('mean_img', 'nonlinear_warp_estimation.in_file'),
                            ('mean_img', 'registration_mean_fmri.in_file')]),
        (slicetimer, coregwf, [('slice_time_corrected_file',
                                'registration_fmri.in_file')]),
        (coregwf, art, [('registration_fmri.out_file', 'realigned_files')]),
        (mcflirt, art, [('par_file', 'realignment_parameters')]),
        (art, art_remotion, [('outlier_files', 'outlier_files')]),
        (coregwf, art_remotion, [('registration_fmri.out_file', 'in_file')]),
        (coregwf, gunzip, [('n4bias.out_file', 'in_file')]),
        (selectfiles, normalize_fmri, [('anat', 'image_to_align')]),
        (art_remotion, normalize_fmri, [('out_file', 'apply_to_files')]),
        (selectfiles, normalize_t1, [('anat', 'image_to_align')]),
        (gunzip, normalize_t1, [('out_file', 'apply_to_files')]),
        (selectfiles, normalize_masks, [('anat', 'image_to_align')]),
        (coregwf, normalize_masks, [(('segmentation.partial_volume_files',
                                      get_wm_csf), 'apply_to_files')]),
        (normalize_fmri, smooth, [('normalized_files', 'in_files')]),
        (smooth, extract_confounds_ws_csf, [('smoothed_files', 'in_file')]),
        (normalize_masks, extract_confounds_ws_csf, [('normalized_files',
                                                      'list_mask')]),
        (mcflirt, extract_confounds_ws_csf, [('par_file', 'file_concat')]),

        #(smooth, extract_confounds_gs, [('smoothed_files', 'in_file')]),
        #(normalize_t1, extract_confounds_gs, [(('normalized_files',change_to_list), 'list_mask')]),
        #(extract_confounds_ws_csf, extract_confounds_gs, [('out_file', 'file_concat')]),
        (smooth, signal_extraction, [('smoothed_files', 'in_file')]),
        #(extract_confounds_gs, signal_extraction, [('out_file', 'confounds_file')]),
        (extract_confounds_ws_csf, signal_extraction, [('out_file',
                                                        'confounds_file')]),

        #(smooth, descomposition, [('smoothed_files', 'in_file')]),
        #(extract_confounds_ws_csf, descomposition, [('out_file', 'confounds_file')]),

        #(extract_confounds_gs, datasink, [('out_file', 'preprocessing.@confounds_with_gs')]),
        (extract_confounds_ws_csf, datasink,
         [('out_file', 'preprocessing.@confounds_without_gs')]),
        (smooth, datasink, [('smoothed_files', 'preprocessing.@smoothed')]),
        (normalize_fmri, datasink, [('normalized_files',
                                     'preprocessing.@fmri_normalized')]),
        (normalize_t1, datasink, [('normalized_files',
                                   'preprocessing.@t1_normalized')]),
        (normalize_masks, datasink, [('normalized_files',
                                      'preprocessing.@masks_normalized')]),
        (signal_extraction, datasink, [('time_series_out_file',
                                        'preprocessing.@time_serie')]),
        (signal_extraction, datasink, [('correlation_matrix_out_file',
                                        'preprocessing.@correlation_matrix')]),
        (signal_extraction, datasink,
         [('fmri_cleaned_out_file', 'preprocessing.@fmri_cleaned_out_file')]),
        #(descomposition, datasink, [('out_file', 'preprocessing.@descomposition')]),
        #(descomposition, datasink, [('plot_files', 'preprocessing.@descomposition_plot_files')])
    ])

    preproc.write_graph(graph2use='colored', format='png', simple_form=True)
    preproc.run()
Exemplo n.º 24
0
def preproc(data_dir, sink_dir, subject, task, session, run, masks,
            motion_thresh, moco):
    from nipype.interfaces.fsl import MCFLIRT, FLIRT, FNIRT, ExtractROI, ApplyWarp, MotionOutliers, InvWarp, FAST
    #from nipype.interfaces.afni import AlignEpiAnatPy
    from nipype.interfaces.utility import Function
    from nilearn.plotting import plot_anat
    from nilearn import input_data

    #WRITE A DARA GRABBER
    def get_niftis(subject_id, data_dir, task, run, session):
        from os.path import join, exists
        t1 = join(data_dir, subject_id, 'session-{0}'.format(session),
                  'anatomical', 'anatomical-0', 'anatomical.nii.gz')
        #t1_brain_mask = join(data_dir, subject_id, 'session-1', 'anatomical', 'anatomical-0', 'fsl', 'anatomical-bet.nii.gz')
        epi = join(data_dir, subject_id, 'session-{0}'.format(session), task,
                   '{0}-{1}'.format(task, run), '{0}.nii.gz'.format(task))
        assert exists(t1), "t1 does not exist at {0}".format(t1)
        assert exists(epi), "epi does not exist at {0}".format(epi)
        standard = '/home/applications/fsl/5.0.8/data/standard/MNI152_T1_2mm.nii.gz'
        return t1, epi, standard

    data = Function(
        function=get_niftis,
        input_names=["subject_id", "data_dir", "task", "run", "session"],
        output_names=["t1", "epi", "standard"])
    data.inputs.data_dir = data_dir
    data.inputs.subject_id = subject
    data.inputs.run = run
    data.inputs.session = session
    data.inputs.task = task
    grabber = data.run()

    if session == 0:
        sesh = 'pre'
    if session == 1:
        sesh = 'post'

    #reg_dir = '/home/data/nbc/physics-learning/data/first-level/{0}/session-1/retr/retr-{1}/retr-5mm.feat/reg'.format(subject, run)
    #set output paths for quality assurance pngs
    qa1 = join(
        sink_dir, 'qa',
        '{0}-session-{1}_{2}-{3}_t1_flirt.png'.format(subject, session, task,
                                                      run))
    qa2 = join(
        sink_dir, 'qa',
        '{0}-session-{1}_{2}-{3}_mni_flirt.png'.format(subject, session, task,
                                                       run))
    qa3 = join(
        sink_dir, 'qa',
        '{0}-session-{1}_{2}-{3}_mni_fnirt.png'.format(subject, session, task,
                                                       run))
    confound_file = join(
        sink_dir, sesh, subject,
        '{0}-session-{1}_{2}-{3}_confounds.txt'.format(subject, session, task,
                                                       run))

    #run motion correction if indicated
    if moco == True:
        mcflirt = MCFLIRT(ref_vol=144, save_plots=True, output_type='NIFTI_GZ')
        mcflirt.inputs.in_file = grabber.outputs.epi
        #mcflirt.inputs.in_file = join(data_dir, subject, 'session-1', 'retr', 'retr-{0}'.format(run), 'retr.nii.gz')
        mcflirt.inputs.out_file = join(
            sink_dir, sesh, subject,
            '{0}-session-{1}_{2}-{3}_mcf.nii.gz'.format(
                subject, session, task, run))
        flirty = mcflirt.run()
        motion = np.genfromtxt(flirty.outputs.par_file)
    else:
        print "no moco needed"
        motion = 0

    #calculate motion outliers
    try:
        mout = MotionOutliers(metric='fd', threshold=motion_thresh)
        mout.inputs.in_file = grabber.outputs.epi
        mout.inputs.out_file = join(
            sink_dir, sesh, subject,
            '{0}-session-{1}_{2}-{3}_fd-gt-{3}mm'.format(
                subject, session, task, run, motion_thresh))
        mout.inputs.out_metric_plot = join(
            sink_dir, sesh, subject,
            '{0}-session-{1}_{2}-{3}_metrics.png'.format(
                subject, session, task, run))
        mout.inputs.out_metric_values = join(
            sink_dir, sesh, subject,
            '{0}-session-{1}_{2}-{3}_fd.txt'.format(subject, session, task,
                                                    run))
        moutliers = mout.run()
        outliers = np.genfromtxt(moutliers.outputs.out_file)
        e = 'no errors in motion outliers, yay'
    except Exception as e:
        print(e)
        outliers = np.genfromtxt(mout.inputs.out_metric_values)
        #set everything above the threshold to 1 and everything below to 0
        outliers[outliers > motion_thresh] = 1
        outliers[outliers < motion_thresh] = 0

    #concatenate motion parameters and motion outliers to form confounds file

    #outliers = outliers.reshape((outliers.shape[0],1))
    conf = outliers
    np.savetxt(confound_file, conf, delimiter=',')

    #extract an example volume for normalization
    ex_fun = ExtractROI(t_min=144, t_size=1)
    ex_fun.inputs.in_file = flirty.outputs.out_file
    ex_fun.inputs.roi_file = join(
        sink_dir, sesh, subject,
        '{0}-session-{1}_{2}-{3}-example_func.nii.gz'.format(
            subject, session, task, run))
    fun = ex_fun.run()

    warp = ApplyWarp(interp="nn", abswarp=True)

    if not exists(
            '/home/data/nbc/physics-learning/data/first-level/{0}/session-{1}/{2}/{2}-{3}/{2}-5mm.feat/reg/example_func2standard_warp.nii.gz'
            .format(subject, session, task, run)):
        #two-step normalization using flirt and fnirt, outputting qa pix
        flit = FLIRT(cost_func="corratio", dof=12)
        reg_func = flit.run(
            reference=fun.outputs.roi_file,
            in_file=grabber.outputs.t1,
            searchr_x=[-180, 180],
            searchr_y=[-180, 180],
            out_file=join(
                sink_dir, sesh, subject,
                '{0}-session-{1}_{2}-{3}_t1-flirt.nii.gz'.format(
                    subject, session, task, run)),
            out_matrix_file=join(
                sink_dir, sesh, subject,
                '{0}-session-{1}_{2}-{3}_t1-flirt.mat'.format(
                    subject, session, task, run)))
        reg_mni = flit.run(
            reference=grabber.outputs.t1,
            in_file=grabber.outputs.standard,
            searchr_y=[-180, 180],
            searchr_z=[-180, 180],
            out_file=join(
                sink_dir, sesh, subject,
                '{0}-session-{1}_{2}-{3}_mni-flirt-t1.nii.gz'.format(
                    subject, session, task, run)),
            out_matrix_file=join(
                sink_dir, sesh, subject,
                '{0}-session-{1}_{2}-{3}_mni-flirt-t1.mat'.format(
                    subject, session, task, run)))

        #plot_stat_map(aligner.outputs.out_file, bg_img=fun.outputs.roi_file, colorbar=True, draw_cross=False, threshold=1000, output_file=qa1a, dim=-2)
        display = plot_anat(fun.outputs.roi_file, dim=-1)
        display.add_edges(reg_func.outputs.out_file)
        display.savefig(qa1, dpi=300)
        display.close()

        display = plot_anat(grabber.outputs.t1, dim=-1)
        display.add_edges(reg_mni.outputs.out_file)
        display.savefig(qa2, dpi=300)
        display.close()

        perf = FNIRT(output_type='NIFTI_GZ')
        perf.inputs.warped_file = join(
            sink_dir, sesh, subject,
            '{0}-session-{1}_{2}-{3}_mni-fnirt-t1.nii.gz'.format(
                subject, session, task, run))
        perf.inputs.affine_file = reg_mni.outputs.out_matrix_file
        perf.inputs.in_file = grabber.outputs.standard
        perf.inputs.subsampling_scheme = [8, 4, 2, 2]
        perf.inputs.fieldcoeff_file = join(
            sink_dir, sesh, subject,
            '{0}-session-{1}_{2}-{3}_mni-fnirt-t1-warpcoeff.nii.gz'.format(
                subject, session, task, run))
        perf.inputs.field_file = join(
            sink_dir, sesh, subject,
            '{0}-session-{1}_{2}-{3}_mni-fnirt-t1-warp.nii.gz'.format(
                subject, session, task, run))
        perf.inputs.ref_file = grabber.outputs.t1
        reg2 = perf.run()
        warp.inputs.field_file = reg2.outputs.field_file
        #plot fnirted MNI overlaid on example func
        display = plot_anat(grabber.outputs.t1, dim=-1)
        display.add_edges(reg2.outputs.warped_file)
        display.savefig(qa3, dpi=300)
        display.close()
    else:
        warpspeed = InvWarp(output_type='NIFTI_GZ')
        warpspeed.inputs.warp = '/home/data/nbc/physics-learning/data/first-level/{0}/session-{1}/{2}/{2}-{3}/{2}-5mm.feat/reg/example_func2standard_warp.nii.gz'.format(
            subject, session, task, run)
        warpspeed.inputs.reference = fun.outputs.roi_file
        warpspeed.inputs.inverse_warp = join(
            sink_dir, sesh, subject,
            '{0}-session-{1}_{2}-{3}_mni-fnirt-t1-warp.nii.gz'.format(
                subject, session, task, run))
        mni2epiwarp = warpspeed.run()
        warp.inputs.field_file = mni2epiwarp.outputs.inverse_warp

    for key in masks.keys():
        #warp takes us from mni to epi
        warp.inputs.in_file = masks[key]
        warp.inputs.ref_file = fun.outputs.roi_file
        warp.inputs.out_file = join(
            sink_dir, sesh, subject,
            '{0}-session-{1}_{2}-{3}_{4}.nii.gz'.format(
                subject, session, task, run, key))
        net_warp = warp.run()

        qa_file = join(
            sink_dir, 'qa', '{0}-session-{1}_{2}-{3}_qa_{4}.png'.format(
                subject, session, task, run, key))

        display = plotting.plot_roi(net_warp.outputs.out_file,
                                    bg_img=fun.outputs.roi_file,
                                    colorbar=True,
                                    vmin=0,
                                    vmax=18,
                                    draw_cross=False)
        display.savefig(qa_file, dpi=300)
        display.close()

    return flirty.outputs.out_file, confound_file, e
experiment_dir = '/home/luiscp/Documents/Data/ADRC_90Plus/output'
output_dir = 'dwi_analysis'

subject_list = [
    '233', '234', '235', '236', '237', '238', '239', '240', '242', '243',
    '244', '245', '246', '248', '249', '250', '251', '253', '254', '256',
    '257', '259'
]
#subject_list = ['259']
###############################
#specify nodes
###############################

#Motion Correction (FSL)
motioncor = Node(MCFLIRT(output_type=u'NIFTI_GZ',
                         interpolation=u'spline',
                         mean_vol=True,
                         cost=u'mutualinfo'),
                 name="motioncor")

#Smoothing (FSL)
smooth = Node(SUSAN(fwhm=1.0, output_type=u'NIFTI_GZ',
                    brightness_threshold=10),
              name="smooth")

#Skull remove (FSL)
skullstrip = Node(BET(frac=0.4,
                      output_type=u'NIFTI_GZ',
                      robust=True,
                      mask=True),
                  name="skullstrip")
Exemplo n.º 26
0
def builder(subject_id,
            subId,
            project_dir,
            data_dir,
            output_dir,
            output_final_dir,
            output_interm_dir,
            layout,
            anat=None,
            funcs=None,
            fmaps=None,
            task_name='',
            session=None,
            apply_trim=False,
            apply_dist_corr=False,
            apply_smooth=False,
            apply_filter=False,
            mni_template='2mm',
            apply_n4=True,
            ants_threads=8,
            readable_crash_files=False,
            write_logs=True):
    """
    Core function that returns a workflow. See wfmaker for more details.

    Args:
        subject_id: name of subject folder for final outputted sub-folder name
        subId: abbreviate name of subject for intermediate outputted sub-folder name
        project_dir: full path to root of project
        data_dir: full path to raw data files
        output_dir: upper level output dir (others will be nested within this)
        output_final_dir: final preprocessed sub-dir name
        output_interm_dir: intermediate preprcess sub-dir name
        layout: BIDS layout instance
    """

    ##################
    ### PATH SETUP ###
    ##################
    if session is not None:
        session = int(session)
        if session < 10:
            session = '0' + str(session)
        else:
            session = str(session)

    # Set MNI template
    MNItemplate = os.path.join(get_resource_path(),
                               'MNI152_T1_' + mni_template + '_brain.nii.gz')
    MNImask = os.path.join(get_resource_path(),
                           'MNI152_T1_' + mni_template + '_brain_mask.nii.gz')
    MNItemplatehasskull = os.path.join(get_resource_path(),
                                       'MNI152_T1_' + mni_template + '.nii.gz')

    # Set ANTs files
    bet_ants_template = os.path.join(get_resource_path(),
                                     'OASIS_template.nii.gz')
    bet_ants_prob_mask = os.path.join(
        get_resource_path(), 'OASIS_BrainCerebellumProbabilityMask.nii.gz')
    bet_ants_registration_mask = os.path.join(
        get_resource_path(), 'OASIS_BrainCerebellumRegistrationMask.nii.gz')

    #################################
    ### NIPYPE IMPORTS AND CONFIG ###
    #################################
    # Update nipype global config because workflow.config[] = ..., doesn't seem to work
    # Can't store nipype config/rc file in container anyway so set them globaly before importing and setting up workflow as suggested here: http://nipype.readthedocs.io/en/latest/users/config_file.html#config-file

    # Create subject's intermediate directory before configuring nipype and the workflow because that's where we'll save log files in addition to intermediate files
    if not os.path.exists(os.path.join(output_interm_dir, subId, 'logs')):
        os.makedirs(os.path.join(output_interm_dir, subId, 'logs'))
    log_dir = os.path.join(output_interm_dir, subId, 'logs')
    from nipype import config
    if readable_crash_files:
        cfg = dict(execution={'crashfile_format': 'txt'})
        config.update_config(cfg)
    config.update_config({
        'logging': {
            'log_directory': log_dir,
            'log_to_file': write_logs
        },
        'execution': {
            'crashdump_dir': log_dir
        }
    })
    from nipype import logging
    logging.update_logging(config)

    # Now import everything else
    from nipype.interfaces.io import DataSink
    from nipype.interfaces.utility import Merge, IdentityInterface
    from nipype.pipeline.engine import Node, Workflow
    from nipype.interfaces.nipy.preprocess import ComputeMask
    from nipype.algorithms.rapidart import ArtifactDetect
    from nipype.interfaces.ants.segmentation import BrainExtraction, N4BiasFieldCorrection
    from nipype.interfaces.ants import Registration, ApplyTransforms
    from nipype.interfaces.fsl import MCFLIRT, TOPUP, ApplyTOPUP
    from nipype.interfaces.fsl.maths import MeanImage
    from nipype.interfaces.fsl import Merge as MERGE
    from nipype.interfaces.fsl.utils import Smooth
    from nipype.interfaces.nipy.preprocess import Trim
    from .interfaces import Plot_Coregistration_Montage, Plot_Quality_Control, Plot_Realignment_Parameters, Create_Covariates, Down_Sample_Precision, Create_Encoding_File, Filter_In_Mask

    ##################
    ### INPUT NODE ###
    ##################

    # Turn functional file list into interable Node
    func_scans = Node(IdentityInterface(fields=['scan']), name='func_scans')
    func_scans.iterables = ('scan', funcs)

    # Get TR for use in filtering below; we're assuming all BOLD runs have the same TR
    tr_length = layout.get_metadata(funcs[0])['RepetitionTime']

    #####################################
    ## TRIM ##
    #####################################
    if apply_trim:
        trim = Node(Trim(), name='trim')
        trim.inputs.begin_index = apply_trim

    #####################################
    ## DISTORTION CORRECTION ##
    #####################################

    if apply_dist_corr:
        # Get fmap file locations
        fmaps = [
            f.filename for f in layout.get(
                subject=subId, modality='fmap', extensions='.nii.gz')
        ]
        if not fmaps:
            raise IOError(
                "Distortion Correction requested but field map scans not found..."
            )

        # Get fmap metadata
        totalReadoutTimes, measurements, fmap_pes = [], [], []

        for i, fmap in enumerate(fmaps):
            # Grab total readout time for each fmap
            totalReadoutTimes.append(
                layout.get_metadata(fmap)['TotalReadoutTime'])

            # Grab measurements (for some reason pyBIDS doesn't grab dcm_meta... fields from side-car json file and json.load, doesn't either; so instead just read the header using nibabel to determine number of scans)
            measurements.append(nib.load(fmap).header['dim'][4])

            # Get phase encoding direction
            fmap_pe = layout.get_metadata(fmap)["PhaseEncodingDirection"]
            fmap_pes.append(fmap_pe)

        encoding_file_writer = Node(interface=Create_Encoding_File(),
                                    name='create_encoding')
        encoding_file_writer.inputs.totalReadoutTimes = totalReadoutTimes
        encoding_file_writer.inputs.fmaps = fmaps
        encoding_file_writer.inputs.fmap_pes = fmap_pes
        encoding_file_writer.inputs.measurements = measurements
        encoding_file_writer.inputs.file_name = 'encoding_file.txt'

        merge_to_file_list = Node(interface=Merge(2),
                                  infields=['in1', 'in2'],
                                  name='merge_to_file_list')
        merge_to_file_list.inputs.in1 = fmaps[0]
        merge_to_file_list.inputs.in1 = fmaps[1]

        # Merge AP and PA distortion correction scans
        merger = Node(interface=MERGE(dimension='t'), name='merger')
        merger.inputs.output_type = 'NIFTI_GZ'
        merger.inputs.in_files = fmaps
        merger.inputs.merged_file = 'merged_epi.nii.gz'

        # Create distortion correction map
        topup = Node(interface=TOPUP(), name='topup')
        topup.inputs.output_type = 'NIFTI_GZ'

        # Apply distortion correction to other scans
        apply_topup = Node(interface=ApplyTOPUP(), name='apply_topup')
        apply_topup.inputs.output_type = 'NIFTI_GZ'
        apply_topup.inputs.method = 'jac'
        apply_topup.inputs.interp = 'spline'

    ###################################
    ### REALIGN ###
    ###################################
    realign_fsl = Node(MCFLIRT(), name="realign")
    realign_fsl.inputs.cost = 'mutualinfo'
    realign_fsl.inputs.mean_vol = True
    realign_fsl.inputs.output_type = 'NIFTI_GZ'
    realign_fsl.inputs.save_mats = True
    realign_fsl.inputs.save_rms = True
    realign_fsl.inputs.save_plots = True

    ###################################
    ### MEAN EPIs ###
    ###################################
    # For coregistration after realignment
    mean_epi = Node(MeanImage(), name='mean_epi')
    mean_epi.inputs.dimension = 'T'

    # For after normalization is done to plot checks
    mean_norm_epi = Node(MeanImage(), name='mean_norm_epi')
    mean_norm_epi.inputs.dimension = 'T'

    ###################################
    ### MASK, ART, COV CREATION ###
    ###################################
    compute_mask = Node(ComputeMask(), name='compute_mask')
    compute_mask.inputs.m = .05

    art = Node(ArtifactDetect(), name='art')
    art.inputs.use_differences = [True, False]
    art.inputs.use_norm = True
    art.inputs.norm_threshold = 1
    art.inputs.zintensity_threshold = 3
    art.inputs.mask_type = 'file'
    art.inputs.parameter_source = 'FSL'

    make_cov = Node(Create_Covariates(), name='make_cov')

    ################################
    ### N4 BIAS FIELD CORRECTION ###
    ################################
    if apply_n4:
        n4_correction = Node(N4BiasFieldCorrection(), name='n4_correction')
        n4_correction.inputs.copy_header = True
        n4_correction.inputs.save_bias = False
        n4_correction.inputs.num_threads = ants_threads
        n4_correction.inputs.input_image = anat

    ###################################
    ### BRAIN EXTRACTION ###
    ###################################
    brain_extraction_ants = Node(BrainExtraction(), name='brain_extraction')
    brain_extraction_ants.inputs.dimension = 3
    brain_extraction_ants.inputs.use_floatingpoint_precision = 1
    brain_extraction_ants.inputs.num_threads = ants_threads
    brain_extraction_ants.inputs.brain_probability_mask = bet_ants_prob_mask
    brain_extraction_ants.inputs.keep_temporary_files = 1
    brain_extraction_ants.inputs.brain_template = bet_ants_template
    brain_extraction_ants.inputs.extraction_registration_mask = bet_ants_registration_mask
    brain_extraction_ants.inputs.out_prefix = 'bet'

    ###################################
    ### COREGISTRATION ###
    ###################################
    coregistration = Node(Registration(), name='coregistration')
    coregistration.inputs.float = False
    coregistration.inputs.output_transform_prefix = "meanEpi2highres"
    coregistration.inputs.transforms = ['Rigid']
    coregistration.inputs.transform_parameters = [(0.1, ), (0.1, )]
    coregistration.inputs.number_of_iterations = [[1000, 500, 250, 100]]
    coregistration.inputs.dimension = 3
    coregistration.inputs.num_threads = ants_threads
    coregistration.inputs.write_composite_transform = True
    coregistration.inputs.collapse_output_transforms = True
    coregistration.inputs.metric = ['MI']
    coregistration.inputs.metric_weight = [1]
    coregistration.inputs.radius_or_number_of_bins = [32]
    coregistration.inputs.sampling_strategy = ['Regular']
    coregistration.inputs.sampling_percentage = [0.25]
    coregistration.inputs.convergence_threshold = [1e-08]
    coregistration.inputs.convergence_window_size = [10]
    coregistration.inputs.smoothing_sigmas = [[3, 2, 1, 0]]
    coregistration.inputs.sigma_units = ['mm']
    coregistration.inputs.shrink_factors = [[4, 3, 2, 1]]
    coregistration.inputs.use_estimate_learning_rate_once = [True]
    coregistration.inputs.use_histogram_matching = [False]
    coregistration.inputs.initial_moving_transform_com = True
    coregistration.inputs.output_warped_image = True
    coregistration.inputs.winsorize_lower_quantile = 0.01
    coregistration.inputs.winsorize_upper_quantile = 0.99

    ###################################
    ### NORMALIZATION ###
    ###################################
    # Settings Explanations
    # Only a few key settings are worth adjusting and most others relate to how ANTs optimizer starts or iterates and won't make a ton of difference
    # Brian Avants referred to these settings as the last "best tested" when he was aligning fMRI data: https://github.com/ANTsX/ANTsRCore/blob/master/R/antsRegistration.R#L275
    # Things that matter the most:
    # smoothing_sigmas:
    # how much gaussian smoothing to apply when performing registration, probably want the upper limit of this to match the resolution that the data is collected at e.g. 3mm
    # Old settings [[3,2,1,0]]*3
    # shrink_factors
    # The coarseness with which to do registration
    # Old settings [[8,4,2,1]] * 3
    # >= 8 may result is some problems causing big chunks of cortex with little fine grain spatial structure to be moved to other parts of cortex
    # Other settings
    # transform_parameters:
    # how much regularization to do for fitting that transformation
    # for syn this pertains to both the gradient regularization term, and the flow, and elastic terms. Leave the syn settings alone as they seem to be the most well tested across published data sets
    # radius_or_number_of_bins
    # This is the bin size for MI metrics and 32 is probably adequate for most use cases. Increasing this might increase precision (e.g. to 64) but takes exponentially longer
    # use_histogram_matching
    # Use image intensity distribution to guide registration
    # Leave it on for within modality registration (e.g. T1 -> MNI), but off for between modality registration (e.g. EPI -> T1)
    # convergence_threshold
    # threshold for optimizer
    # convergence_window_size
    # how many samples should optimizer average to compute threshold?
    # sampling_strategy
    # what strategy should ANTs use to initialize the transform. Regular here refers to approximately random sampling around the center of the image mass

    normalization = Node(Registration(), name='normalization')
    normalization.inputs.float = False
    normalization.inputs.collapse_output_transforms = True
    normalization.inputs.convergence_threshold = [1e-06]
    normalization.inputs.convergence_window_size = [10]
    normalization.inputs.dimension = 3
    normalization.inputs.fixed_image = MNItemplate
    normalization.inputs.initial_moving_transform_com = True
    normalization.inputs.metric = ['MI', 'MI', 'CC']
    normalization.inputs.metric_weight = [1.0] * 3
    normalization.inputs.number_of_iterations = [[1000, 500, 250, 100],
                                                 [1000, 500, 250, 100],
                                                 [100, 70, 50, 20]]
    normalization.inputs.num_threads = ants_threads
    normalization.inputs.output_transform_prefix = 'anat2template'
    normalization.inputs.output_inverse_warped_image = True
    normalization.inputs.output_warped_image = True
    normalization.inputs.radius_or_number_of_bins = [32, 32, 4]
    normalization.inputs.sampling_percentage = [0.25, 0.25, 1]
    normalization.inputs.sampling_strategy = ['Regular', 'Regular', 'None']
    normalization.inputs.shrink_factors = [[8, 4, 2, 1]] * 3
    normalization.inputs.sigma_units = ['vox'] * 3
    normalization.inputs.smoothing_sigmas = [[3, 2, 1, 0]] * 3
    normalization.inputs.transforms = ['Rigid', 'Affine', 'SyN']
    normalization.inputs.transform_parameters = [(0.1, ), (0.1, ),
                                                 (0.1, 3.0, 0.0)]
    normalization.inputs.use_histogram_matching = True
    normalization.inputs.winsorize_lower_quantile = 0.005
    normalization.inputs.winsorize_upper_quantile = 0.995
    normalization.inputs.write_composite_transform = True

    # NEW SETTINGS (need to be adjusted; specifically shink_factors and smoothing_sigmas need to be the same length)
    # normalization = Node(Registration(), name='normalization')
    # normalization.inputs.float = False
    # normalization.inputs.collapse_output_transforms = True
    # normalization.inputs.convergence_threshold = [1e-06, 1e-06, 1e-07]
    # normalization.inputs.convergence_window_size = [10]
    # normalization.inputs.dimension = 3
    # normalization.inputs.fixed_image = MNItemplate
    # normalization.inputs.initial_moving_transform_com = True
    # normalization.inputs.metric = ['MI', 'MI', 'CC']
    # normalization.inputs.metric_weight = [1.0]*3
    # normalization.inputs.number_of_iterations = [[1000, 500, 250, 100],
    #                                              [1000, 500, 250, 100],
    #                                              [100, 70, 50, 20]]
    # normalization.inputs.num_threads = ants_threads
    # normalization.inputs.output_transform_prefix = 'anat2template'
    # normalization.inputs.output_inverse_warped_image = True
    # normalization.inputs.output_warped_image = True
    # normalization.inputs.radius_or_number_of_bins = [32, 32, 4]
    # normalization.inputs.sampling_percentage = [0.25, 0.25, 1]
    # normalization.inputs.sampling_strategy = ['Regular',
    #                                           'Regular',
    #                                           'None']
    # normalization.inputs.shrink_factors = [[4, 3, 2, 1]]*3
    # normalization.inputs.sigma_units = ['vox']*3
    # normalization.inputs.smoothing_sigmas = [[2, 1], [2, 1], [3, 2, 1, 0]]
    # normalization.inputs.transforms = ['Rigid', 'Affine', 'SyN']
    # normalization.inputs.transform_parameters = [(0.1,),
    #                                              (0.1,),
    #                                              (0.1, 3.0, 0.0)]
    # normalization.inputs.use_histogram_matching = True
    # normalization.inputs.winsorize_lower_quantile = 0.005
    # normalization.inputs.winsorize_upper_quantile = 0.995
    # normalization.inputs.write_composite_transform = True

    ###################################
    ### APPLY TRANSFORMS AND SMOOTH ###
    ###################################
    merge_transforms = Node(Merge(2),
                            iterfield=['in2'],
                            name='merge_transforms')

    # Used for epi -> mni, via (coreg + norm)
    apply_transforms = Node(ApplyTransforms(),
                            iterfield=['input_image'],
                            name='apply_transforms')
    apply_transforms.inputs.input_image_type = 3
    apply_transforms.inputs.float = False
    apply_transforms.inputs.num_threads = 12
    apply_transforms.inputs.environ = {}
    apply_transforms.inputs.interpolation = 'BSpline'
    apply_transforms.inputs.invert_transform_flags = [False, False]
    apply_transforms.inputs.reference_image = MNItemplate

    # Used for t1 segmented -> mni, via (norm)
    apply_transform_seg = Node(ApplyTransforms(), name='apply_transform_seg')
    apply_transform_seg.inputs.input_image_type = 3
    apply_transform_seg.inputs.float = False
    apply_transform_seg.inputs.num_threads = 12
    apply_transform_seg.inputs.environ = {}
    apply_transform_seg.inputs.interpolation = 'MultiLabel'
    apply_transform_seg.inputs.invert_transform_flags = [False]
    apply_transform_seg.inputs.reference_image = MNItemplate

    ###################################
    ### PLOTS ###
    ###################################
    plot_realign = Node(Plot_Realignment_Parameters(), name="plot_realign")
    plot_qa = Node(Plot_Quality_Control(), name="plot_qa")
    plot_normalization_check = Node(Plot_Coregistration_Montage(),
                                    name="plot_normalization_check")
    plot_normalization_check.inputs.canonical_img = MNItemplatehasskull

    ############################################
    ### FILTER, SMOOTH, DOWNSAMPLE PRECISION ###
    ############################################
    # Use cosanlab_preproc for down sampling
    down_samp = Node(Down_Sample_Precision(), name="down_samp")

    # Use FSL for smoothing
    if apply_smooth:
        smooth = Node(Smooth(), name='smooth')
        if isinstance(apply_smooth, list):
            smooth.iterables = ("fwhm", apply_smooth)
        elif isinstance(apply_smooth, int) or isinstance(apply_smooth, float):
            smooth.inputs.fwhm = apply_smooth
        else:
            raise ValueError("apply_smooth must be a list or int/float")

    # Use cosanlab_preproc for low-pass filtering
    if apply_filter:
        lp_filter = Node(Filter_In_Mask(), name='lp_filter')
        lp_filter.inputs.mask = MNImask
        lp_filter.inputs.sampling_rate = tr_length
        lp_filter.inputs.high_pass_cutoff = 0
        if isinstance(apply_filter, list):
            lp_filter.iterables = ("low_pass_cutoff", apply_filter)
        elif isinstance(apply_filter, int) or isinstance(apply_filter, float):
            lp_filter.inputs.low_pass_cutoff = apply_filter
        else:
            raise ValueError("apply_filter must be a list or int/float")

    ###################
    ### OUTPUT NODE ###
    ###################
    # Collect all final outputs in the output dir and get rid of file name additions
    datasink = Node(DataSink(), name='datasink')
    if session:
        datasink.inputs.base_directory = os.path.join(output_final_dir,
                                                      subject_id)
        datasink.inputs.container = 'ses-' + session
    else:
        datasink.inputs.base_directory = output_final_dir
        datasink.inputs.container = subject_id

    # Remove substitutions
    data_dir_parts = data_dir.split('/')[1:]
    if session:
        prefix = ['_scan_'] + data_dir_parts + [subject_id] + [
            'ses-' + session
        ] + ['func']
    else:
        prefix = ['_scan_'] + data_dir_parts + [subject_id] + ['func']
    func_scan_names = [os.path.split(elem)[-1] for elem in funcs]
    to_replace = []
    for elem in func_scan_names:
        bold_name = elem.split(subject_id + '_')[-1]
        bold_name = bold_name.split('.nii.gz')[0]
        to_replace.append(('..'.join(prefix + [elem]), bold_name))
    datasink.inputs.substitutions = to_replace

    #####################
    ### INIT WORKFLOW ###
    #####################
    # If we have sessions provide the full path to the subject's intermediate directory
    # and only rely on workflow init to create the session container *within* that directory
    # Otherwise just point to the intermediate directory and let the workflow init create the subject container within the intermediate directory
    if session:
        workflow = Workflow(name='ses_' + session)
        workflow.base_dir = os.path.join(output_interm_dir, subId)
    else:
        workflow = Workflow(name=subId)
        workflow.base_dir = output_interm_dir

    ############################
    ######### PART (1a) #########
    # func -> discorr -> trim -> realign
    # OR
    # func -> trim -> realign
    # OR
    # func -> discorr -> realign
    # OR
    # func -> realign
    ############################
    if apply_dist_corr:
        workflow.connect([(encoding_file_writer, topup, [('encoding_file',
                                                          'encoding_file')]),
                          (encoding_file_writer, apply_topup,
                           [('encoding_file', 'encoding_file')]),
                          (merger, topup, [('merged_file', 'in_file')]),
                          (func_scans, apply_topup, [('scan', 'in_files')]),
                          (topup, apply_topup,
                           [('out_fieldcoef', 'in_topup_fieldcoef'),
                            ('out_movpar', 'in_topup_movpar')])])
        if apply_trim:
            # Dist Corr + Trim
            workflow.connect([(apply_topup, trim, [('out_corrected', 'in_file')
                                                   ]),
                              (trim, realign_fsl, [('out_file', 'in_file')])])
        else:
            # Dist Corr + No Trim
            workflow.connect([(apply_topup, realign_fsl, [('out_corrected',
                                                           'in_file')])])
    else:
        if apply_trim:
            # No Dist Corr + Trim
            workflow.connect([(func_scans, trim, [('scan', 'in_file')]),
                              (trim, realign_fsl, [('out_file', 'in_file')])])
        else:
            # No Dist Corr + No Trim
            workflow.connect([
                (func_scans, realign_fsl, [('scan', 'in_file')]),
            ])

    ############################
    ######### PART (1n) #########
    # anat -> N4 -> bet
    # OR
    # anat -> bet
    ############################
    if apply_n4:
        workflow.connect([(n4_correction, brain_extraction_ants,
                           [('output_image', 'anatomical_image')])])
    else:
        brain_extraction_ants.inputs.anatomical_image = anat

    ##########################################
    ############### PART (2) #################
    # realign -> coreg -> mni (via t1)
    # t1 -> mni
    # covariate creation
    # plot creation
    ###########################################

    workflow.connect([
        (realign_fsl, plot_realign, [('par_file', 'realignment_parameters')]),
        (realign_fsl, plot_qa, [('out_file', 'dat_img')]),
        (realign_fsl, art, [('out_file', 'realigned_files'),
                            ('par_file', 'realignment_parameters')]),
        (realign_fsl, mean_epi, [('out_file', 'in_file')]),
        (realign_fsl, make_cov, [('par_file', 'realignment_parameters')]),
        (mean_epi, compute_mask, [('out_file', 'mean_volume')]),
        (compute_mask, art, [('brain_mask', 'mask_file')]),
        (art, make_cov, [('outlier_files', 'spike_id')]),
        (art, plot_realign, [('outlier_files', 'outliers')]),
        (plot_qa, make_cov, [('fd_outliers', 'fd_outliers')]),
        (brain_extraction_ants, coregistration, [('BrainExtractionBrain',
                                                  'fixed_image')]),
        (mean_epi, coregistration, [('out_file', 'moving_image')]),
        (brain_extraction_ants, normalization, [('BrainExtractionBrain',
                                                 'moving_image')]),
        (coregistration, merge_transforms, [('composite_transform', 'in2')]),
        (normalization, merge_transforms, [('composite_transform', 'in1')]),
        (merge_transforms, apply_transforms, [('out', 'transforms')]),
        (realign_fsl, apply_transforms, [('out_file', 'input_image')]),
        (apply_transforms, mean_norm_epi, [('output_image', 'in_file')]),
        (normalization, apply_transform_seg, [('composite_transform',
                                               'transforms')]),
        (brain_extraction_ants, apply_transform_seg,
         [('BrainExtractionSegmentation', 'input_image')]),
        (mean_norm_epi, plot_normalization_check, [('out_file', 'wra_img')])
    ])

    ##################################################
    ################### PART (3) #####################
    # epi (in mni) -> filter -> smooth -> down sample
    # OR
    # epi (in mni) -> filter -> down sample
    # OR
    # epi (in mni) -> smooth -> down sample
    # OR
    # epi (in mni) -> down sample
    ###################################################

    if apply_filter:
        workflow.connect([(apply_transforms, lp_filter, [('output_image',
                                                          'in_file')])])

        if apply_smooth:
            # Filtering + Smoothing
            workflow.connect([(lp_filter, smooth, [('out_file', 'in_file')]),
                              (smooth, down_samp, [('smoothed_file', 'in_file')
                                                   ])])
        else:
            # Filtering + No Smoothing
            workflow.connect([(lp_filter, down_samp, [('out_file', 'in_file')])
                              ])
    else:
        if apply_smooth:
            # No Filtering + Smoothing
            workflow.connect([
                (apply_transforms, smooth, [('output_image', 'in_file')]),
                (smooth, down_samp, [('smoothed_file', 'in_file')])
            ])
        else:
            # No Filtering + No Smoothing
            workflow.connect([(apply_transforms, down_samp, [('output_image',
                                                              'in_file')])])

    ##########################################
    ############### PART (4) #################
    # down sample -> save
    # plots -> save
    # covs -> save
    # t1 (in mni) -> save
    # t1 segmented masks (in mni) -> save
    # realignment parms -> save
    ##########################################

    workflow.connect([
        (down_samp, datasink, [('out_file', 'functional.@down_samp')]),
        (plot_realign, datasink, [('plot', 'functional.@plot_realign')]),
        (plot_qa, datasink, [('plot', 'functional.@plot_qa')]),
        (plot_normalization_check, datasink,
         [('plot', 'functional.@plot_normalization')]),
        (make_cov, datasink, [('covariates', 'functional.@covariates')]),
        (normalization, datasink, [('warped_image', 'structural.@normanat')]),
        (apply_transform_seg, datasink, [('output_image',
                                          'structural.@normanatseg')]),
        (realign_fsl, datasink, [('par_file', 'functional.@motionparams')])
    ])

    if not os.path.exists(os.path.join(output_dir, 'pipeline.png')):
        workflow.write_graph(dotfilename=os.path.join(output_dir, 'pipeline'),
                             format='png')

    print(f"Creating workflow for subject: {subject_id}")
    if ants_threads != 8:
        print(
            f"ANTs will utilize the user-requested {ants_threads} threads for parallel processing."
        )
    return workflow
Exemplo n.º 27
0
    def run(self):
        matlab_cmd = self.paths['spm_path'] + ' ' + self.paths[
            'mcr_path'] + '/ script'
        spm.SPMCommand.set_mlab_paths(matlab_cmd=matlab_cmd, use_mcr=True)
        print(matlab_cmd)
        print('SPM version: ' + str(spm.SPMCommand().version))

        experiment_dir = opj(self.paths['input_path'], 'output/')
        output_dir = 'datasink'
        working_dir = 'workingdir'

        subject_list = self.subject_list

        # list of subject identifiers
        fwhm = self.parameters[
            'fwhm']  # Smoothing widths to apply (Gaussian kernel size)
        tr = self.parameters['tr']  # Repetition time
        init_volume = self.parameters[
            'init_volume']  # Firts volumen identification which will use in the pipeline
        iso_size = self.parameters[
            'iso_size']  # Isometric resample of functional images to voxel size (in mm)
        low_pass = self.parameters['low_pass']
        high_pass = self.parameters['high_pass']
        t1_relative_path = self.paths['t1_relative_path']
        fmri_relative_path = self.paths['fmri_relative_path']

        # ExtractROI - skip dummy scans
        extract = Node(ExtractROI(t_min=init_volume,
                                  t_size=-1,
                                  output_type='NIFTI'),
                       name="extract")  #FSL

        # MCFLIRT - motion correction
        mcflirt = Node(MCFLIRT(mean_vol=True,
                               save_plots=True,
                               output_type='NIFTI'),
                       name="motion_correction")  #FSL

        # SliceTimer - correct for slice wise acquisition
        slicetimer = Node(SliceTimer(index_dir=False,
                                     interleaved=True,
                                     output_type='NIFTI',
                                     time_repetition=tr),
                          name="slice_timing_correction")  #FSL

        # Smooth - image smoothing

        denoise = Node(Denoise(), name="denoising")  #Interfaces with dipy

        smooth = Node(spm.Smooth(fwhm=fwhm), name="smooth")  #SPM

        n4bias = Node(N4Bias(out_file='t1_n4bias.nii.gz'),
                      name='n4bias')  #Interface with SimpleITK

        descomposition = Node(Descomposition(n_components=20,
                                             low_pass=0.1,
                                             high_pass=0.01,
                                             tr=tr),
                              name='descomposition')  #Interface with nilearn

        # Artifact Detection - determines outliers in functional images
        art = Node(ArtifactDetect(norm_threshold=2,
                                  zintensity_threshold=3,
                                  mask_type='spm_global',
                                  parameter_source='FSL',
                                  use_differences=[True, False],
                                  plot_type='svg'),
                   name="artifact_detection")  #Rapidart

        extract_confounds_ws_csf = Node(
            ExtractConfounds(out_file='ev_without_gs.csv'),
            name='extract_confounds_ws_csf')  #Interfece

        extract_confounds_gs = Node(ExtractConfounds(out_file='ev_with_gs.csv',
                                                     delimiter=','),
                                    name='extract_confounds_global_signal')

        signal_extraction = Node(SignalExtraction(
            time_series_out_file='time_series.csv',
            correlation_matrix_out_file='correlation_matrix.png',
            labels_parcellation_path=self.paths['labels_parcellation_path'],
            mask_mni_path=self.paths['mask_mni_path'],
            tr=tr,
            low_pass=low_pass,
            high_pass=high_pass,
            plot=False),
                                 name='signal_extraction')
        signal_extraction.iterables = [('image_parcellation_path',
                                        self.paths['image_parcellation_path'])]

        art_remotion = Node(
            ArtifacRemotion(out_file='fmri_art_removed.nii'),
            name='artifact_remotion')  #This interface requires implementation

        # BET - Skullstrip anatomical anf funtional images
        bet_t1 = Node(BET(frac=0.5,
                          robust=True,
                          mask=True,
                          output_type='NIFTI_GZ'),
                      name="bet_t1")  #FSL

        # FAST - Image Segmentation
        segmentation = Node(FAST(output_type='NIFTI'),
                            name="segmentation")  #FSL

        # Normalize - normalizes functional and structural images to the MNI template
        normalize_fmri = Node(Normalize12(
            jobtype='estwrite',
            tpm=self.paths['template_spm_path'],
            write_voxel_sizes=[iso_size, iso_size, iso_size],
            write_bounding_box=[[-90, -126, -72], [90, 90, 108]]),
                              name="normalize_fmri")  #SPM

        gunzip = Node(Gunzip(), name="gunzip")

        normalize_t1 = Node(Normalize12(
            jobtype='estwrite',
            tpm=self.paths['template_spm_path'],
            write_voxel_sizes=[iso_size, iso_size, iso_size],
            write_bounding_box=[[-90, -126, -72], [90, 90, 108]]),
                            name="normalize_t1")

        normalize_masks = Node(Normalize12(
            jobtype='estwrite',
            tpm=self.paths['template_spm_path'],
            write_voxel_sizes=[iso_size, iso_size, iso_size],
            write_bounding_box=[[-90, -126, -72], [90, 90, 108]]),
                               name="normalize_masks")

        # Threshold - Threshold WM probability image
        threshold = Node(Threshold(thresh=0.5,
                                   args='-bin',
                                   output_type='NIFTI_GZ'),
                         name="wm_mask_threshold")

        # FLIRT - pre-alignment of functional images to anatomical images
        coreg_pre = Node(FLIRT(dof=6, output_type='NIFTI_GZ'),
                         name="linear_warp_estimation")

        # FLIRT - coregistration of functional images to anatomical images with BBR
        coreg_bbr = Node(FLIRT(dof=6,
                               cost='bbr',
                               schedule=opj(os.getenv('FSLDIR'),
                                            'etc/flirtsch/bbr.sch'),
                               output_type='NIFTI_GZ'),
                         name="nonlinear_warp_estimation")

        # Apply coregistration warp to functional images
        applywarp = Node(FLIRT(interp='spline',
                               apply_isoxfm=iso_size,
                               output_type='NIFTI'),
                         name="registration_fmri")

        # Apply coregistration warp to mean file
        applywarp_mean = Node(FLIRT(interp='spline',
                                    apply_isoxfm=iso_size,
                                    output_type='NIFTI_GZ'),
                              name="registration_mean_fmri")

        # Infosource - a function free node to iterate over the list of subject names
        infosource = Node(IdentityInterface(fields=['subject_id']),
                          name="infosource")
        infosource.iterables = [('subject_id', subject_list)]

        # SelectFiles - to grab the data (alternativ to DataGrabber)
        anat_file = opj('{subject_id}', t1_relative_path)
        func_file = opj('{subject_id}', fmri_relative_path)

        #anat_file = opj('{subject_id}/anat/', 'data.nii')
        #func_file = opj('{subject_id}/func/', 'data.nii')

        templates = {'anat': anat_file, 'func': func_file}

        selectfiles = Node(SelectFiles(
            templates, base_directory=self.paths['input_path']),
                           name="selectfiles")

        # Datasink - creates output folder for important outputs
        datasink = Node(DataSink(base_directory=experiment_dir,
                                 container=output_dir),
                        name="datasink")

        # Create a coregistration workflow
        coregwf = Workflow(name='coreg_fmri_to_t1')
        coregwf.base_dir = opj(experiment_dir, working_dir)

        # Create a preprocessing workflow
        preproc = Workflow(name='preproc')
        preproc.base_dir = opj(experiment_dir, working_dir)

        # Connect all components of the coregistration workflow

        coregwf.connect([
            (bet_t1, n4bias, [('out_file', 'in_file')]),
            (n4bias, segmentation, [('out_file', 'in_files')]),
            (segmentation, threshold, [(('partial_volume_files', get_latest),
                                        'in_file')]),
            (n4bias, coreg_pre, [('out_file', 'reference')]),
            (threshold, coreg_bbr, [('out_file', 'wm_seg')]),
            (coreg_pre, coreg_bbr, [('out_matrix_file', 'in_matrix_file')]),
            (coreg_bbr, applywarp, [('out_matrix_file', 'in_matrix_file')]),
            (n4bias, applywarp, [('out_file', 'reference')]),
            (coreg_bbr, applywarp_mean, [('out_matrix_file', 'in_matrix_file')
                                         ]),
            (n4bias, applywarp_mean, [('out_file', 'reference')]),
        ])

        ## Use the following DataSink output substitutions
        substitutions = [('_subject_id_', 'sub-')]
        #                 ('_fwhm_', 'fwhm-'),
        #                 ('_roi', ''),
        #                 ('_mcf', ''),
        #                 ('_st', ''),
        #                 ('_flirt', ''),
        #                 ('.nii_mean_reg', '_mean'),
        #                 ('.nii.par', '.par'),
        #                 ]
        # subjFolders = [('fwhm-%s/' % f, 'fwhm-%s_' % f) for f in fwhm]

        # substitutions.extend(subjFolders)
        datasink.inputs.substitutions = substitutions

        # Connect all components of the preprocessing workflow
        preproc.connect([
            (infosource, selectfiles, [('subject_id', 'subject_id')]),
            (selectfiles, extract, [('func', 'in_file')]),
            (extract, mcflirt, [('roi_file', 'in_file')]),
            (mcflirt, slicetimer, [('out_file', 'in_file')]),
            (selectfiles, denoise, [('anat', 'in_file')]),
            (denoise, coregwf, [('out_file', 'bet_t1.in_file'),
                                ('out_file',
                                 'nonlinear_warp_estimation.reference')]),
            (mcflirt, coregwf,
             [('mean_img', 'linear_warp_estimation.in_file'),
              ('mean_img', 'nonlinear_warp_estimation.in_file'),
              ('mean_img', 'registration_mean_fmri.in_file')]),
            (slicetimer, coregwf, [('slice_time_corrected_file',
                                    'registration_fmri.in_file')]),
            (coregwf, art, [('registration_fmri.out_file', 'realigned_files')
                            ]),
            (mcflirt, art, [('par_file', 'realignment_parameters')]),
            (art, art_remotion, [('outlier_files', 'outlier_files')]),
            (coregwf, art_remotion, [('registration_fmri.out_file', 'in_file')
                                     ]),
            (coregwf, gunzip, [('n4bias.out_file', 'in_file')]),
            (selectfiles, normalize_fmri, [('anat', 'image_to_align')]),
            (art_remotion, normalize_fmri, [('out_file', 'apply_to_files')]),
            (selectfiles, normalize_t1, [('anat', 'image_to_align')]),
            (gunzip, normalize_t1, [('out_file', 'apply_to_files')]),
            (selectfiles, normalize_masks, [('anat', 'image_to_align')]),
            (coregwf, normalize_masks, [(('segmentation.partial_volume_files',
                                          get_wm_csf), 'apply_to_files')]),
            (normalize_fmri, smooth, [('normalized_files', 'in_files')]),
            (smooth, extract_confounds_ws_csf, [('smoothed_files', 'in_file')
                                                ]),
            (normalize_masks, extract_confounds_ws_csf, [('normalized_files',
                                                          'list_mask')]),
            (mcflirt, extract_confounds_ws_csf, [('par_file', 'file_concat')]),
            (art, extract_confounds_ws_csf, [('outlier_files', 'outlier_files')
                                             ]),

            # (smooth, extract_confounds_gs, [('smoothed_files', 'in_file')]),
            # (normalize_t1, extract_confounds_gs, [(('normalized_files',change_to_list), 'list_mask')]),
            # (extract_confounds_ws_csf, extract_confounds_gs, [('out_file', 'file_concat')]),
            (smooth, signal_extraction, [('smoothed_files', 'in_file')]),
            # (extract_confounds_gs, signal_extraction, [('out_file', 'confounds_file')]),
            (extract_confounds_ws_csf, signal_extraction,
             [('out_file', 'confounds_file')]),

            #(smooth, descomposition, [('smoothed_files', 'in_file')]),
            #(extract_confounds_ws_csf, descomposition, [('out_file', 'confounds_file')]),

            # (extract_confounds_gs, datasink, [('out_file', 'preprocessing.@confounds_with_gs')]),
            (denoise, datasink, [('out_file', 'preprocessing.@t1_denoised')]),
            (extract_confounds_ws_csf, datasink,
             [('out_file', 'preprocessing.@confounds_without_gs')]),
            (smooth, datasink, [('smoothed_files', 'preprocessing.@smoothed')
                                ]),
            (normalize_fmri, datasink, [('normalized_files',
                                         'preprocessing.@fmri_normalized')]),
            (normalize_t1, datasink, [('normalized_files',
                                       'preprocessing.@t1_normalized')]),
            (normalize_masks, datasink, [('normalized_files',
                                          'preprocessing.@masks_normalized')]),
            (signal_extraction, datasink, [('time_series_out_file',
                                            'preprocessing.@time_serie')]),
            (signal_extraction, datasink,
             [('correlation_matrix_out_file',
               'preprocessing.@correlation_matrix')])
        ])
        #(signal_extraction, datasink,
        # [('fmri_cleaned_out_file', 'preprocessing.@fmri_cleaned_out_file')])])
        #,
        #(descomposition, datasink, [('out_file', 'preprocessing.@descomposition')]),
        #(descomposition, datasink, [('plot_files', 'preprocessing.@descomposition_plot_files')])
        #])

        preproc.write_graph(graph2use='colored',
                            format='png',
                            simple_form=True)
        preproc.run()