Exemple #1
0
def regmotoasl(asl,m0file,m02asl):
    from nipype.interfaces import fsl
    meanasl = fsl.MeanImage(); meanasl.inputs.in_file = asl
    meanasl.inputs.out_file = fname_presuffix(asl,suffix='_meanasl')
    meanasl.run()
    meanm0 = fsl.MeanImage(); meanm0.inputs.in_file = m0file
    meanm0.inputs.out_file = fname_presuffix(asl,suffix='_meanm0')
    meanm0.run()
    flt = fsl.FLIRT(bins=640, cost_func='mutualinfo')
    flt.inputs.in_file = meanm0.inputs.out_file 
    flt.inputs.reference = meanasl.inputs.out_file
    flt.inputs.out_file = m02asl
    flt.run()
    return m02asl    
Exemple #2
0
def epi_sbref_registration(name='EPI_SBrefRegistration'):
    workflow = pe.Workflow(name=name)
    inputnode = pe.Node(
        niu.IdentityInterface(fields=['epi_brain', 'sbref_brain']),
        name='inputnode')
    outputnode = pe.Node(
        niu.IdentityInterface(fields=['epi_registered', 'out_mat']),
        name='outputnode')

    mean = pe.Node(fsl.MeanImage(dimension='T'), name='EPImean')
    inu = pe.Node(ants.N4BiasFieldCorrection(dimension=3), name='EPImeanBias')
    epi_sbref = pe.Node(fsl.FLIRT(dof=6, out_matrix_file='init.mat'),
                        name='EPI2SBRefRegistration')

    epi_split = pe.Node(fsl.Split(dimension='t'), name='EPIsplit')
    epi_xfm = pe.MapNode(fsl.ApplyXfm(),
                         name='EPIapplyxfm',
                         iterfield=['in_file'])
    epi_merge = pe.Node(fsl.Merge(dimension='t'), name='EPImergeback')
    workflow.connect([
        (inputnode, epi_split, [('epi_brain', 'in_file')]),
        (inputnode, epi_sbref, [('sbref_brain', 'reference')]),
        (inputnode, epi_xfm, [('sbref_brain', 'reference')]),
        (inputnode, mean, [('epi_brain', 'in_file')]),
        (mean, inu, [('out_file', 'input_image')]),
        (inu, epi_sbref, [('output_image', 'in_file')]),
        (epi_split, epi_xfm, [('out_files', 'in_file')]),
        (epi_sbref, epi_xfm, [('out_matrix_file', 'in_matrix_file')]),
        (epi_xfm, epi_merge, [('out_file', 'in_files')]),
        (epi_sbref, outputnode, [('out_matrix_file', 'out_mat')]),
        (epi_merge, outputnode, [('merged_file', 'epi_registered')])
    ])
    return workflow
Exemple #3
0
def getMean(input_file, appendix):
    output_file = os.path.join(os.path.dirname(input_file),
                               appendix + '.nii.gz')
    myMean = fsl.MeanImage(in_file=input_file, out_file=output_file)
    print(myMean.cmdline)
    myMean.run()
    return output_file
Exemple #4
0
def measure_sim(image, ses, sub, trial, modality, reference):
    file_data = {}
    file_data["path"] = image
    file_data["ses"] = ses
    file_data["sub"] = sub
    file_data["trial"] = trial

    if modality in ("func", "dwi"):
        temporal_mean = fsl.MeanImage()
        temporal_mean.inputs.in_file = image
        temporal_mean.inputs.out_file = "/tmp/" + hashlib.md5(
            image).hexdigest()[:8] + ".nii.gz"
        temporal_mean_res = temporal_mean.run()
        image = temporal_mean_res.outputs.out_file

    sim = ants.MeasureImageSimilarity()
    sim.inputs.dimension = 3
    sim.inputs.metric = 'MI'
    sim.inputs.fixed_image = reference
    sim.inputs.moving_image = image
    sim.inputs.metric_weight = 1.0
    sim.inputs.radius_or_number_of_bins = 8
    sim.inputs.sampling_strategy = 'None'
    sim.inputs.sampling_percentage = 0.3
    #sim.inputs.fixed_image_mask = 'mask.nii'
    #sim.inputs.moving_image_mask = 'mask.nii.gz'
    sim_res = sim.run()
    file_data["similarity"] = sim_res.outputs.similarity

    return file_data
Exemple #5
0
def main(sourcedata,
         derivatives,
         subject,
         session,
         run,
         wf_dir):

    layout = BIDSLayout(sourcedata)

    bolds = layout.get(subject=subject,
                       session=session,
                       run=run,
                       suffix='bold',
                       return_type='file')

    
    bold = bolds 
    for bold in bolds:
        print('Making reference image of {}'.format(bold))

    inputnode = pe.Node(niu.IdentityInterface(fields=['bold']),
                        name='inputnode')
    inputnode.inputs.bold = bolds

    wf = pe.Workflow(name='make_ref_{}_{}_{}'.format(subject,
                                                     session,
                                                     run))

    wf.base_dir = wf_dir

    mc_wf_bold = create_motion_correction_workflow(name='mc_wf_bold',
                                                   method='FSL',
                                                   lightweight=True)

                                              

    wf.connect(inputnode, 'bold', mc_wf_bold, 'inputspec.in_files')
    wf.connect(inputnode, ('bold', pickfirst), mc_wf_bold, 'inputspec.which_file_is_EPI_space')

    mean_bold = pe.MapNode(fsl.MeanImage(dimension='T'), 
                                 iterfield=['in_file'],
                                 name='mean_bold1')

    n4_correct = pe.MapNode(ants.N4BiasFieldCorrection(), 
                            iterfield=['input_image'],
                            name='n4_correct')
    wf.connect(mean_bold, 'out_file', n4_correct, 'input_image')
    
    ds = pe.MapNode(DerivativesDataSink(out_path_base='simple_bold_ref',
                                        suffix='reference',
                                        base_directory=derivatives),
                                iterfield=['in_file', 'source_file'],
                                name='ds_reg_report')
    
    wf.connect(mc_wf_bold, 'outputspec.motion_corrected_files', mean_bold, 'in_file')
    wf.connect(n4_correct, 'output_image', ds, 'in_file')
    wf.connect(inputnode, 'bold', ds, 'source_file')
    

    wf.run()
Exemple #6
0
def getEPIMean(file_name, proc_Path):
    output_file = os.path.join(
        proc_Path,
        os.path.basename(file_name).split('.')[0]) + 'EPI.nii.gz'
    myMean = fsl.MeanImage(in_file=file_name, out_file=output_file)
    print(myMean.cmdline)
    print(myMean.cmdline)
    myMean.run()
    return output_file
Exemple #7
0
def create_bbregister_workflow(name="bbregister",
                               contrast_type="t2",
                               partial_brain=False,
                               init_with="fsl"):
    """Find a linear transformation to align the EPI file with the anatomy."""
    in_fields = ["subject_id", "timeseries"]
    if partial_brain:
        in_fields.append("whole_brain_template")
    inputnode = Node(IdentityInterface(in_fields), "inputs")

    # Take the mean over time to get a target volume
    meanvol = MapNode(fsl.MeanImage(), "in_file", "meanvol")

    # Do a rough skullstrip using BET
    skullstrip = MapNode(fsl.BET(), "in_file", "bet")

    # Estimate the registration to Freesurfer conformed space
    func2anat = MapNode(
        fs.BBRegister(contrast_type=contrast_type,
                      init=init_with,
                      epi_mask=True,
                      registered_file=True,
                      out_reg_file="func2anat_tkreg.dat",
                      out_fsl_file="func2anat_flirt.mat"), "source_file",
        "func2anat")

    # Make an image for quality control on the registration
    report = MapNode(CoregReport(), "in_file", "coreg_report")

    # Define the workflow outputs
    outputnode = Node(IdentityInterface(["tkreg_mat", "flirt_mat", "report"]),
                      "outputs")

    bbregister = Workflow(name=name)

    # Connect the registration
    bbregister.connect([
        (inputnode, func2anat, [("subject_id", "subject_id")]),
        (inputnode, report, [("subject_id", "subject_id")]),
        (inputnode, meanvol, [("timeseries", "in_file")]),
        (meanvol, skullstrip, [("out_file", "in_file")]),
        (skullstrip, func2anat, [("out_file", "source_file")]),
        (func2anat, report, [("registered_file", "in_file")]),
        (func2anat, outputnode, [("out_reg_file", "tkreg_mat")]),
        (func2anat, outputnode, [("out_fsl_file", "flirt_mat")]),
        (report, outputnode, [("out_file", "report")]),
    ])

    # Possibly connect the full_fov image
    if partial_brain:
        bbregister.connect([
            (inputnode, func2anat, [("whole_brain_template",
                                     "intermediate_file")]),
        ])

    return bbregister
Exemple #8
0
def measure_sim(
    path_template,
    substitutions,
    reference,
    metric="MI",
    radius_or_number_of_bins=8,
    sampling_strategy="None",
    sampling_percentage=0.3,
    mask="",
):
    """Return a similarity metric score for two 3d images"""

    image_path = path_template.format(**substitutions)
    image_path = path.abspath(path.expanduser(image_path))

    #some BIDS identifier combinations may not exist:
    if not path.isfile(image_path):
        return {}

    file_data = {}
    file_data["path"] = image_path
    file_data["session"] = substitutions["session"]
    file_data["subject"] = substitutions["subject"]
    file_data["acquisition"] = substitutions["acquisition"]

    img = nib.load(image_path)
    if img.header['dim'][0] > 3:
        image_name = path.basename(file_data["path"])
        merged_image_name = "merged_" + image_name
        merged_image_path = path.join("/tmp", merged_image_name)
        if not path.isfile(merged_image_path):
            temporal_mean = fsl.MeanImage()
            temporal_mean.inputs.in_file = image_path
            temporal_mean.inputs.out_file = merged_image_path
            temporal_mean_res = temporal_mean.run()
            image_path = temporal_mean_res.outputs.out_file
        else:
            image_path = merged_image_path

    sim = ants.MeasureImageSimilarity()
    sim.inputs.dimension = 3
    sim.inputs.metric = metric
    sim.inputs.fixed_image = reference
    sim.inputs.moving_image = image_path
    sim.inputs.metric_weight = 1.0
    sim.inputs.radius_or_number_of_bins = radius_or_number_of_bins
    sim.inputs.sampling_strategy = sampling_strategy
    sim.inputs.sampling_percentage = sampling_percentage
    if mask:
        sim.inputs.fixed_image_mask = mask
    #sim.inputs.moving_image_mask = 'mask.nii.gz'
    sim_res = sim.run()
    file_data["similarity"] = sim_res.outputs.similarity

    return file_data
Exemple #9
0
 def create_html_viewer(self):
     mi = fsl.MeanImage()
     mi_run = mi.run(
         in_file=os.path.join(self.taskdir, self.task +
                              "_input_functional_masked.nii.gz"))
     mean_img_path = mi_run.outputs.out_file
     html_view = nilearn.plotting.view_img(self.img,
                                           threshold=0,
                                           bg_img='MNI152',
                                           vmax=10,
                                           title=self.task)
     html_view.save_as_html(
         os.path.join(self.outputdir, self.task, 'figs',
                      self.task + "_viewer.html"))
     viewer_file = '"' + '/'.join(
         ('.', self.task, 'figs', self.task + "_viewer.html")) + '"'
     return viewer_file
Exemple #10
0
def create_filtering_workflow(name="filter",
                              hpf_cutoff=128,
                              TR=2,
                              output_name="timeseries"):
    """Scale and high-pass filter the timeseries."""
    inputnode = Node(IdentityInterface(["timeseries", "mask_file"]), "inputs")

    # Grand-median scale within the brain mask
    scale = MapNode(ScaleTimeseries(statistic="median", target=10000),
                    ["in_file", "mask_file"], "scale")

    # Gaussian running-line filter
    if hpf_cutoff is None:
        hpf_sigma = -1
    else:
        hpf_sigma = (hpf_cutoff / 2.0) / TR
    filter = MapNode(fsl.TemporalFilter(highpass_sigma=hpf_sigma), "in_file",
                     "filter")

    # Possibly replace the mean
    # (In later versions of FSL, the highpass filter removes the
    # mean component. Put it back, but be flexible so this isn't
    # broken on older versions of FSL).
    replacemean = MapNode(ReplaceMean(output_name=output_name),
                          ["orig_file", "filtered_file"], "replacemean")

    # Compute a final mean functional volume
    meanfunc = MapNode(fsl.MeanImage(out_file="mean_func.nii.gz"), "in_file",
                       "meanfunc")

    outputnode = Node(IdentityInterface(["timeseries", "mean_file"]),
                      "outputs")

    filtering = Workflow(name)
    filtering.connect([
        (inputnode, scale, [("timeseries", "in_file"),
                            ("mask_file", "mask_file")]),
        (scale, filter, [("out_file", "in_file")]),
        (scale, replacemean, [("out_file", "orig_file")]),
        (filter, replacemean, [("out_file", "filtered_file")]),
        (replacemean, meanfunc, [("out_file", "in_file")]),
        (replacemean, outputnode, [("out_file", "timeseries")]),
        (meanfunc, outputnode, [("out_file", "mean_file")]),
    ])

    return filtering
Exemple #11
0
    def _run_interface(self, runtime):
        if len(self.inputs.in_files) == 1:
            self._results['out_file'] = self.inputs.in_files[0]
            self._results['out_avg'] = self.inputs.in_files[0]
            # TODO: generate identity out_mats and zero-filled out_movpar

            return runtime

        magmrg = fsl.Merge(dimension='t', in_files=self.inputs.in_files)
        mcflirt = fsl.MCFLIRT(cost='normcorr', save_mats=True, save_plots=True,
                              ref_vol=0, in_file=magmrg.run().outputs.merged_file)
        mcres = mcflirt.run()
        self._results['out_mats'] = mcres.outputs.mat_file
        self._results['out_movpar'] = mcres.outputs.par_file
        self._results['out_file'] = mcres.outputs.out_file

        mean = fsl.MeanImage(dimension='T', in_file=mcres.outputs.out_file)
        self._results['out_avg'] = mean.run().outputs.out_file
        return runtime
Exemple #12
0
def _multiple_pe_hmc(in_files, in_movpar, in_ref=None):
    """
    This function interprets that we are dealing with a
    multiple PE (phase encoding) input if it finds several
    files in in_files.

    If we have several images with various PE directions,
    it will compute the HMC parameters between them using
    an embedded workflow.

    It just forwards the two inputs otherwise.
    """
    import os
    from nipype.interfaces import fsl
    from nipype.interfaces import ants

    if len(in_files) == 1:
        out_file = in_files[0]
        out_movpar = in_movpar
    else:
        if in_ref is None:
            in_ref = 0

        # Head motion correction
        fslmerge = fsl.Merge(dimension='t', in_files=in_files)
        hmc = fsl.MCFLIRT(ref_vol=in_ref, save_mats=True, save_plots=True)
        hmc.inputs.in_file = fslmerge.run().outputs.merged_file
        hmc_res = hmc.run()
        out_file = hmc_res.outputs.out_file
        out_movpar = hmc_res.outputs.par_file

    mean = fsl.MeanImage(
        dimension='T', in_file=out_file)
    inu = ants.N4BiasFieldCorrection(
        dimension=3, input_image=mean.run().outputs.out_file)
    inu_res = inu.run()
    out_ref = inu_res.outputs.output_image
    bet = fsl.BET(
        frac=0.6, mask=True, in_file=out_ref)
    out_mask = bet.run().outputs.mask_file

    return (out_file, out_ref, out_mask, out_movpar)
Exemple #13
0
def merge_and_mean(muscle_heatmaps, heatmap_concatenated, heatmap_group_average):

    heatmap_list = list()
    # heatmap_list is a list with two values per entry, first is the tag and
    # second is the location of a warped heatmap for the muscle for that subject
    for list_entry in muscle_heatmaps:
        heatmap_list.append(list_entry[1])
        
    merge_heatmaps = fsl.Merge()
    merge_heatmaps.inputs.in_files = heatmap_list
    merge_heatmaps.inputs.dimension = 't'
    merge_heatmaps.inputs.merged_file = heatmap_concatenated
    merge_heatmaps.inputs.output_type = 'NIFTI_GZ'
    # parent_logger.critical(merge_heatmaps.cmdline)
    merge_results = merge_heatmaps.run()
    
    mean_heatmap = fsl.MeanImage()
    mean_heatmap.inputs.in_file = heatmap_concatenated
    mean_heatmap.inputs.dimension = 'T'
    mean_heatmap.inputs.out_file = heatmap_group_average
    mean_heatmap.inputs.output_type = 'NIFTI_GZ'
    # parent_logger.critical(mean_heatmap.cmdline)
    mean_result = mean_heatmap.run()
Exemple #14
0
def init_transform_to_first_image_wf(name='transform_images', n_images=2):

    wf = pe.Workflow(name=name)

    inputnode = pe.Node(
        niu.IdentityInterface(fields=['in_files', 'transforms']),
        name='inputnode')

    split = pe.Node(niu.Split(splits=[1, n_images - 1]), name='split')
    wf.connect(inputnode, 'in_files', split, 'inlist')

    apply_sinc = pe.MapNode(
        ants.ApplyTransforms(interpolation='LanczosWindowedSinc'),
        iterfield=['input_image'],
        name='apply_sinc')
    wf.connect(inputnode, 'transforms', apply_sinc, 'transforms')
    wf.connect(split, ('out1', _pickone), apply_sinc, 'reference_image')
    wf.connect(split, 'out2', apply_sinc, 'input_image')

    merge_lists = pe.Node(niu.Merge(2), name='merge_lists')
    wf.connect(split, 'out1', merge_lists, 'in1')
    wf.connect(apply_sinc, 'output_image', merge_lists, 'in2')

    merge_niftis = pe.Node(fsl.Merge(dimension='t'), name='merge_niftis')
    wf.connect(merge_lists, 'out', merge_niftis, 'in_files')

    mean_image = pe.Node(fsl.MeanImage(dimension='T'), name='mean_image')
    wf.connect(merge_niftis, 'merged_file', mean_image, 'in_file')

    outputnode = pe.Node(
        niu.IdentityInterface(fields=['mean_image', 'transformed_images']),
        name='outputnode')
    wf.connect(mean_image, 'out_file', outputnode, 'mean_image')
    wf.connect(merge_lists, 'out', outputnode, 'transformed_images')

    return wf
Exemple #15
0
def create_skullstrip_workflow(name="skullstrip"):
    """Remove non-brain voxels from the timeseries."""

    # Define the workflow inputs
    inputnode = Node(
        IdentityInterface(["subject_id", "timeseries", "reg_file"]), "inputs")

    # Mean the timeseries across the fourth dimension
    origmean = MapNode(fsl.MeanImage(), "in_file", "origmean")

    # Grab the Freesurfer aparc+aseg file as an anatomical brain mask
    getaseg = Node(
        io.SelectFiles({"aseg": "{subject_id}/mri/aparc+aseg.mgz"},
                       base_directory=os.environ["SUBJECTS_DIR"]), "getaseg")

    # Threshold the aseg volume to get a boolean mask
    makemask = Node(fs.Binarize(dilate=4, min=0.5), "makemask")

    # Transform the brain mask into functional space
    transform = MapNode(fs.ApplyVolTransform(inverse=True, interp="nearest"),
                        ["reg_file", "source_file"], "transform")

    # Convert the mask to nifti and rename
    convertmask = MapNode(fs.MRIConvert(out_file="functional_mask.nii.gz"),
                          "in_file", "convertmask")

    # Use the mask to skullstrip the timeseries
    stripts = MapNode(fs.ApplyMask(), ["in_file", "mask_file"], "stripts")

    # Use the mask to skullstrip the mean image
    stripmean = MapNode(fs.ApplyMask(), ["in_file", "mask_file"], "stripmean")

    # Generate images summarizing the skullstrip and resulting data
    reportmask = MapNode(MaskReport(), ["mask_file", "orig_file", "mean_file"],
                         "reportmask")

    # Define the workflow outputs
    outputnode = Node(
        IdentityInterface(["timeseries", "mean_file", "mask_file", "report"]),
        "outputs")

    # Define and connect the workflow
    skullstrip = Workflow(name)

    skullstrip.connect([
        (inputnode, origmean, [("timeseries", "in_file")]),
        (inputnode, getaseg, [("subject_id", "subject_id")]),
        (origmean, transform, [("out_file", "source_file")]),
        (getaseg, makemask, [("aseg", "in_file")]),
        (makemask, transform, [("binary_file", "target_file")]),
        (inputnode, transform, [("reg_file", "reg_file")]),
        (transform, stripts, [("transformed_file", "mask_file")]),
        (transform, stripmean, [("transformed_file", "mask_file")]),
        (inputnode, stripts, [("timeseries", "in_file")]),
        (origmean, stripmean, [("out_file", "in_file")]),
        (stripmean, reportmask, [("out_file", "mean_file")]),
        (origmean, reportmask, [("out_file", "orig_file")]),
        (transform, reportmask, [("transformed_file", "mask_file")]),
        (transform, convertmask, [("transformed_file", "in_file")]),
        (stripts, outputnode, [("out_file", "timeseries")]),
        (stripmean, outputnode, [("out_file", "mean_file")]),
        (convertmask, outputnode, [("out_file", "mask_file")]),
        (reportmask, outputnode, [("out_files", "report")]),
    ])

    return skullstrip
Exemple #16
0
def init_ica_aroma_wf(template,
                      metadata,
                      mem_gb,
                      omp_nthreads,
                      name='ica_aroma_wf',
                      susan_fwhm=6.0,
                      ignore_aroma_err=False,
                      aroma_melodic_dim=-200,
                      use_fieldwarp=True):
    """
    This workflow wraps `ICA-AROMA`_ to identify and remove motion-related
    independent components from a BOLD time series.

    The following steps are performed:

    #. Remove non-steady state volumes from the bold series.
    #. Smooth data using FSL `susan`, with a kernel width FWHM=6.0mm.
    #. Run FSL `melodic` outside of ICA-AROMA to generate the report
    #. Run ICA-AROMA
    #. Aggregate identified motion components (aggressive) to TSV
    #. Return ``classified_motion_ICs`` and ``melodic_mix`` for user to complete
       non-aggressive denoising in T1w space
    #. Calculate ICA-AROMA-identified noise components
       (columns named ``AROMAAggrCompXX``)

    Additionally, non-aggressive denoising is performed on the BOLD series
    resampled into MNI space.

    There is a current discussion on whether other confounds should be extracted
    before or after denoising `here <http://nbviewer.jupyter.org/github/poldracklab/\
    fmriprep-notebooks/blob/922e436429b879271fa13e76767a6e73443e74d9/issue-817_\
    aroma_confounds.ipynb>`__.

    .. workflow::
        :graph2use: orig
        :simple_form: yes

        from fmriprep.workflows.bold.confounds import init_ica_aroma_wf
        wf = init_ica_aroma_wf(template='MNI152NLin2009cAsym',
                               metadata={'RepetitionTime': 1.0},
                               mem_gb=3,
                               omp_nthreads=1)

    **Parameters**

        template : str
            Spatial normalization template used as target when that
            registration step was previously calculated with
            :py:func:`~fmriprep.workflows.bold.registration.init_bold_reg_wf`.
            The template must be one of the MNI templates (fMRIPrep uses
            ``MNI152NLin2009cAsym`` by default).
        metadata : dict
            BIDS metadata for BOLD file
        mem_gb : float
            Size of BOLD file in GB
        omp_nthreads : int
            Maximum number of threads an individual process may use
        name : str
            Name of workflow (default: ``bold_mni_trans_wf``)
        susan_fwhm : float
            Kernel width (FWHM in mm) for the smoothing step with
            FSL ``susan`` (default: 6.0mm)
        use_fieldwarp : bool
            Include SDC warp in single-shot transform from BOLD to MNI
        ignore_aroma_err : bool
            Do not fail on ICA-AROMA errors
        aroma_melodic_dim: int
            Set the dimensionality of the MELODIC ICA decomposition.
            Negative numbers set a maximum on automatic dimensionality estimation.
            Positive numbers set an exact number of components to extract.
            (default: -200, i.e., estimate <=200 components)

    **Inputs**

        itk_bold_to_t1
            Affine transform from ``ref_bold_brain`` to T1 space (ITK format)
        t1_2_mni_forward_transform
            ANTs-compatible affine-and-warp transform file
        name_source
            BOLD series NIfTI file
            Used to recover original information lost during processing
        skip_vols
            number of non steady state volumes
        bold_split
            Individual 3D BOLD volumes, not motion corrected
        bold_mask
            BOLD series mask in template space
        hmc_xforms
            List of affine transforms aligning each volume to ``ref_image`` in ITK format
        fieldwarp
            a :abbr:`DFM (displacements field map)` in ITK format
        movpar_file
            SPM-formatted motion parameters file

    **Outputs**

        aroma_confounds
            TSV of confounds identified as noise by ICA-AROMA
        aroma_noise_ics
            CSV of noise components identified by ICA-AROMA
        melodic_mix
            FSL MELODIC mixing matrix
        nonaggr_denoised_file
            BOLD series with non-aggressive ICA-AROMA denoising applied

    .. _ICA-AROMA: https://github.com/maartenmennes/ICA-AROMA

    """
    workflow = Workflow(name=name)
    workflow.__postdesc__ = """\
Automatic removal of motion artifacts using independent component analysis
[ICA-AROMA, @aroma] was performed on the *preprocessed BOLD on MNI space*
time-series after removal of non-steady state volumes and spatial smoothing
with an isotropic, Gaussian kernel of 6mm FWHM (full-width half-maximum).
Corresponding "non-aggresively" denoised runs were produced after such
smoothing.
Additionally, the "aggressive" noise-regressors were collected and placed
in the corresponding confounds file.
"""

    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'itk_bold_to_t1', 't1_2_mni_forward_transform', 'name_source',
        'skip_vols', 'bold_split', 'bold_mask', 'hmc_xforms', 'fieldwarp',
        'movpar_file'
    ]),
                        name='inputnode')

    outputnode = pe.Node(niu.IdentityInterface(fields=[
        'aroma_confounds', 'aroma_noise_ics', 'melodic_mix',
        'nonaggr_denoised_file'
    ]),
                         name='outputnode')

    bold_mni_trans_wf = init_bold_mni_trans_wf(
        template=template,
        mem_gb=mem_gb,
        omp_nthreads=omp_nthreads,
        template_out_grid=str(
            get_template('MNI152Lin') /
            'tpl-MNI152Lin_space-MNI_res-02_T1w.nii.gz'),
        use_compression=False,
        use_fieldwarp=use_fieldwarp,
        name='bold_mni_trans_wf')
    bold_mni_trans_wf.__desc__ = None

    rm_non_steady_state = pe.Node(niu.Function(function=_remove_volumes,
                                               output_names=['bold_cut']),
                                  name='rm_nonsteady')

    calc_median_val = pe.Node(fsl.ImageStats(op_string='-k %s -p 50'),
                              name='calc_median_val')
    calc_bold_mean = pe.Node(fsl.MeanImage(), name='calc_bold_mean')

    def _getusans_func(image, thresh):
        return [tuple([image, thresh])]

    getusans = pe.Node(niu.Function(function=_getusans_func,
                                    output_names=['usans']),
                       name='getusans',
                       mem_gb=0.01)

    smooth = pe.Node(fsl.SUSAN(fwhm=susan_fwhm), name='smooth')

    # melodic node
    melodic = pe.Node(fsl.MELODIC(no_bet=True,
                                  tr_sec=float(metadata['RepetitionTime']),
                                  mm_thresh=0.5,
                                  out_stats=True,
                                  dim=aroma_melodic_dim),
                      name="melodic")

    # ica_aroma node
    ica_aroma = pe.Node(ICA_AROMARPT(denoise_type='nonaggr',
                                     generate_report=True,
                                     TR=metadata['RepetitionTime']),
                        name='ica_aroma')

    add_non_steady_state = pe.Node(niu.Function(function=_add_volumes,
                                                output_names=['bold_add']),
                                   name='add_nonsteady')

    # extract the confound ICs from the results
    ica_aroma_confound_extraction = pe.Node(
        ICAConfounds(ignore_aroma_err=ignore_aroma_err),
        name='ica_aroma_confound_extraction')

    ds_report_ica_aroma = pe.Node(DerivativesDataSink(suffix='ica_aroma'),
                                  name='ds_report_ica_aroma',
                                  run_without_submitting=True,
                                  mem_gb=DEFAULT_MEMORY_MIN_GB)

    def _getbtthresh(medianval):
        return 0.75 * medianval

    # connect the nodes
    workflow.connect([
        (inputnode, bold_mni_trans_wf,
         [('name_source', 'inputnode.name_source'),
          ('bold_split', 'inputnode.bold_split'),
          ('bold_mask', 'inputnode.bold_mask'),
          ('hmc_xforms', 'inputnode.hmc_xforms'),
          ('itk_bold_to_t1', 'inputnode.itk_bold_to_t1'),
          ('t1_2_mni_forward_transform',
           'inputnode.t1_2_mni_forward_transform'),
          ('fieldwarp', 'inputnode.fieldwarp')]),
        (inputnode, ica_aroma, [('movpar_file', 'motion_parameters')]),
        (inputnode, rm_non_steady_state, [('skip_vols', 'skip_vols')]),
        (bold_mni_trans_wf, rm_non_steady_state, [('outputnode.bold_mni',
                                                   'bold_file')]),
        (bold_mni_trans_wf, calc_median_val, [('outputnode.bold_mask_mni',
                                               'mask_file')]),
        (rm_non_steady_state, calc_median_val, [('bold_cut', 'in_file')]),
        (rm_non_steady_state, calc_bold_mean, [('bold_cut', 'in_file')]),
        (calc_bold_mean, getusans, [('out_file', 'image')]),
        (calc_median_val, getusans, [('out_stat', 'thresh')]),
        # Connect input nodes to complete smoothing
        (rm_non_steady_state, smooth, [('bold_cut', 'in_file')]),
        (getusans, smooth, [('usans', 'usans')]),
        (calc_median_val, smooth, [(('out_stat', _getbtthresh),
                                    'brightness_threshold')]),
        # connect smooth to melodic
        (smooth, melodic, [('smoothed_file', 'in_files')]),
        (bold_mni_trans_wf, melodic, [('outputnode.bold_mask_mni', 'mask')]),
        # connect nodes to ICA-AROMA
        (smooth, ica_aroma, [('smoothed_file', 'in_file')]),
        (bold_mni_trans_wf, ica_aroma, [('outputnode.bold_mask_mni',
                                         'report_mask'),
                                        ('outputnode.bold_mask_mni', 'mask')]),
        (melodic, ica_aroma, [('out_dir', 'melodic_dir')]),
        # generate tsvs from ICA-AROMA
        (ica_aroma, ica_aroma_confound_extraction, [('out_dir', 'in_directory')
                                                    ]),
        (inputnode, ica_aroma_confound_extraction, [('skip_vols', 'skip_vols')
                                                    ]),
        # output for processing and reporting
        (ica_aroma_confound_extraction,
         outputnode, [('aroma_confounds', 'aroma_confounds'),
                      ('aroma_noise_ics', 'aroma_noise_ics'),
                      ('melodic_mix', 'melodic_mix')]),
        # TODO change melodic report to reflect noise and non-noise components
        (ica_aroma, add_non_steady_state, [('nonaggr_denoised_file',
                                            'bold_cut_file')]),
        (bold_mni_trans_wf, add_non_steady_state, [('outputnode.bold_mni',
                                                    'bold_file')]),
        (inputnode, add_non_steady_state, [('skip_vols', 'skip_vols')]),
        (add_non_steady_state, outputnode, [('bold_add',
                                             'nonaggr_denoised_file')]),
        (ica_aroma, ds_report_ica_aroma, [('out_report', 'in_file')]),
    ])

    return workflow
Exemple #17
0
def init_ica_aroma_wf(
    mem_gb,
    metadata,
    omp_nthreads,
    aroma_melodic_dim=-200,
    err_on_aroma_warn=False,
    name='ica_aroma_wf',
    susan_fwhm=6.0,
):
    """
    Build a workflow that runs `ICA-AROMA`_.

    This workflow wraps `ICA-AROMA`_ to identify and remove motion-related
    independent components from a BOLD time series.

    The following steps are performed:

    #. Remove non-steady state volumes from the bold series.
    #. Smooth data using FSL `susan`, with a kernel width FWHM=6.0mm.
    #. Run FSL `melodic` outside of ICA-AROMA to generate the report
    #. Run ICA-AROMA
    #. Aggregate identified motion components (aggressive) to TSV
    #. Return ``classified_motion_ICs`` and ``melodic_mix`` for user to complete
       non-aggressive denoising in T1w space
    #. Calculate ICA-AROMA-identified noise components
       (columns named ``AROMAAggrCompXX``)

    Additionally, non-aggressive denoising is performed on the BOLD series
    resampled into MNI space.

    There is a current discussion on whether other confounds should be extracted
    before or after denoising `here
    <http://nbviewer.jupyter.org/github/poldracklab/fmriprep-notebooks/blob/922e436429b879271fa13e76767a6e73443e74d9/issue-817_aroma_confounds.ipynb>`__.

    .. _ICA-AROMA: https://github.com/maartenmennes/ICA-AROMA

    Workflow Graph
        .. workflow::
            :graph2use: orig
            :simple_form: yes

            from fmriprep.workflows.bold.confounds import init_ica_aroma_wf
            wf = init_ica_aroma_wf(
                mem_gb=3,
                metadata={'RepetitionTime': 1.0},
                omp_nthreads=1)

    Parameters
    ----------
    metadata : :obj:`dict`
        BIDS metadata for BOLD file
    mem_gb : :obj:`float`
        Size of BOLD file in GB
    omp_nthreads : :obj:`int`
        Maximum number of threads an individual process may use
    name : :obj:`str`
        Name of workflow (default: ``bold_tpl_trans_wf``)
    susan_fwhm : :obj:`float`
        Kernel width (FWHM in mm) for the smoothing step with
        FSL ``susan`` (default: 6.0mm)
    err_on_aroma_warn : :obj:`bool`
        Do not fail on ICA-AROMA errors
    aroma_melodic_dim : :obj:`int`
        Set the dimensionality of the MELODIC ICA decomposition.
        Negative numbers set a maximum on automatic dimensionality estimation.
        Positive numbers set an exact number of components to extract.
        (default: -200, i.e., estimate <=200 components)

    Inputs
    ------
    itk_bold_to_t1
        Affine transform from ``ref_bold_brain`` to T1 space (ITK format)
    anat2std_xfm
        ANTs-compatible affine-and-warp transform file
    name_source
        BOLD series NIfTI file
        Used to recover original information lost during processing
    skip_vols
        number of non steady state volumes
    bold_split
        Individual 3D BOLD volumes, not motion corrected
    bold_mask
        BOLD series mask in template space
    hmc_xforms
        List of affine transforms aligning each volume to ``ref_image`` in ITK format
    movpar_file
        SPM-formatted motion parameters file

    Outputs
    -------
    aroma_confounds
        TSV of confounds identified as noise by ICA-AROMA
    aroma_noise_ics
        CSV of noise components identified by ICA-AROMA
    melodic_mix
        FSL MELODIC mixing matrix
    nonaggr_denoised_file
        BOLD series with non-aggressive ICA-AROMA denoising applied

    """
    from niworkflows.engine.workflows import LiterateWorkflow as Workflow
    from niworkflows.interfaces.segmentation import ICA_AROMARPT
    from niworkflows.interfaces.utility import KeySelect
    from niworkflows.interfaces.utils import TSV2JSON

    workflow = Workflow(name=name)
    workflow.__postdesc__ = """\
Automatic removal of motion artifacts using independent component analysis
[ICA-AROMA, @aroma] was performed on the *preprocessed BOLD on MNI space*
time-series after removal of non-steady state volumes and spatial smoothing
with an isotropic, Gaussian kernel of 6mm FWHM (full-width half-maximum).
Corresponding "non-aggresively" denoised runs were produced after such
smoothing.
Additionally, the "aggressive" noise-regressors were collected and placed
in the corresponding confounds file.
"""

    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'bold_std',
        'bold_mask_std',
        'movpar_file',
        'name_source',
        'skip_vols',
        'spatial_reference',
    ]),
                        name='inputnode')

    outputnode = pe.Node(niu.IdentityInterface(fields=[
        'aroma_confounds', 'aroma_noise_ics', 'melodic_mix',
        'nonaggr_denoised_file', 'aroma_metadata'
    ]),
                         name='outputnode')

    # extract out to BOLD base
    select_std = pe.Node(KeySelect(fields=['bold_mask_std', 'bold_std']),
                         name='select_std',
                         run_without_submitting=True)
    select_std.inputs.key = 'MNI152NLin6Asym_res-2'

    rm_non_steady_state = pe.Node(niu.Function(function=_remove_volumes,
                                               output_names=['bold_cut']),
                                  name='rm_nonsteady')

    calc_median_val = pe.Node(fsl.ImageStats(op_string='-k %s -p 50'),
                              name='calc_median_val')
    calc_bold_mean = pe.Node(fsl.MeanImage(), name='calc_bold_mean')

    def _getusans_func(image, thresh):
        return [tuple([image, thresh])]

    getusans = pe.Node(niu.Function(function=_getusans_func,
                                    output_names=['usans']),
                       name='getusans',
                       mem_gb=0.01)

    smooth = pe.Node(fsl.SUSAN(fwhm=susan_fwhm), name='smooth')

    # melodic node
    melodic = pe.Node(fsl.MELODIC(no_bet=True,
                                  tr_sec=float(metadata['RepetitionTime']),
                                  mm_thresh=0.5,
                                  out_stats=True,
                                  dim=aroma_melodic_dim),
                      name="melodic")

    # ica_aroma node
    ica_aroma = pe.Node(ICA_AROMARPT(denoise_type='nonaggr',
                                     generate_report=True,
                                     TR=metadata['RepetitionTime'],
                                     args='-np'),
                        name='ica_aroma')

    add_non_steady_state = pe.Node(niu.Function(function=_add_volumes,
                                                output_names=['bold_add']),
                                   name='add_nonsteady')

    # extract the confound ICs from the results
    ica_aroma_confound_extraction = pe.Node(
        ICAConfounds(err_on_aroma_warn=err_on_aroma_warn),
        name='ica_aroma_confound_extraction')

    ica_aroma_metadata_fmt = pe.Node(TSV2JSON(index_column='IC',
                                              output=None,
                                              enforce_case=True,
                                              additional_metadata={
                                                  'Method': {
                                                      'Name':
                                                      'ICA-AROMA',
                                                      'Version':
                                                      getenv(
                                                          'AROMA_VERSION',
                                                          'n/a')
                                                  }
                                              }),
                                     name='ica_aroma_metadata_fmt')

    ds_report_ica_aroma = pe.Node(DerivativesDataSink(
        desc='aroma', datatype="figures", dismiss_entities=("echo", )),
                                  name='ds_report_ica_aroma',
                                  run_without_submitting=True,
                                  mem_gb=DEFAULT_MEMORY_MIN_GB)

    def _getbtthresh(medianval):
        return 0.75 * medianval

    # connect the nodes
    workflow.connect([
        (inputnode, select_std, [('spatial_reference', 'keys'),
                                 ('bold_std', 'bold_std'),
                                 ('bold_mask_std', 'bold_mask_std')]),
        (inputnode, ica_aroma, [('movpar_file', 'motion_parameters')]),
        (inputnode, rm_non_steady_state, [('skip_vols', 'skip_vols')]),
        (select_std, rm_non_steady_state, [('bold_std', 'bold_file')]),
        (select_std, calc_median_val, [('bold_mask_std', 'mask_file')]),
        (rm_non_steady_state, calc_median_val, [('bold_cut', 'in_file')]),
        (rm_non_steady_state, calc_bold_mean, [('bold_cut', 'in_file')]),
        (calc_bold_mean, getusans, [('out_file', 'image')]),
        (calc_median_val, getusans, [('out_stat', 'thresh')]),
        # Connect input nodes to complete smoothing
        (rm_non_steady_state, smooth, [('bold_cut', 'in_file')]),
        (getusans, smooth, [('usans', 'usans')]),
        (calc_median_val, smooth, [(('out_stat', _getbtthresh),
                                    'brightness_threshold')]),
        # connect smooth to melodic
        (smooth, melodic, [('smoothed_file', 'in_files')]),
        (select_std, melodic, [('bold_mask_std', 'mask')]),
        # connect nodes to ICA-AROMA
        (smooth, ica_aroma, [('smoothed_file', 'in_file')]),
        (select_std, ica_aroma, [('bold_mask_std', 'report_mask'),
                                 ('bold_mask_std', 'mask')]),
        (melodic, ica_aroma, [('out_dir', 'melodic_dir')]),
        # generate tsvs from ICA-AROMA
        (ica_aroma, ica_aroma_confound_extraction, [('out_dir', 'in_directory')
                                                    ]),
        (inputnode, ica_aroma_confound_extraction, [('skip_vols', 'skip_vols')
                                                    ]),
        (ica_aroma_confound_extraction, ica_aroma_metadata_fmt,
         [('aroma_metadata', 'in_file')]),
        # output for processing and reporting
        (ica_aroma_confound_extraction,
         outputnode, [('aroma_confounds', 'aroma_confounds'),
                      ('aroma_noise_ics', 'aroma_noise_ics'),
                      ('melodic_mix', 'melodic_mix')]),
        (ica_aroma_metadata_fmt, outputnode, [('output', 'aroma_metadata')]),
        (ica_aroma, add_non_steady_state, [('nonaggr_denoised_file',
                                            'bold_cut_file')]),
        (select_std, add_non_steady_state, [('bold_std', 'bold_file')]),
        (inputnode, add_non_steady_state, [('skip_vols', 'skip_vols')]),
        (add_non_steady_state, outputnode, [('bold_add',
                                             'nonaggr_denoised_file')]),
        (ica_aroma, ds_report_ica_aroma, [('out_report', 'in_file')]),
    ])

    return workflow
Exemple #18
0
Intensity_Normalization = Node(fsl.BinaryMaths(),
                               name='Intensity_Normalization')
Intensity_Normalization.inputs.operation = 'mul'

# ========================================================================================================
# In[18]:

#   fslmaths ${folder}_mcf_2highres_intnorm -bptf 25 -1 -add tempMean ${folder}_mcf_2highres_tempfilt;
# sigma[vol] = filter_width[secs]/(2*TR[secs])
high_pass_filter = Node(fsl.TemporalFilter(), name='high_pass_filter')
high_pass_filter.inputs.highpass_sigma = 22.5  # 90s / (2*2(TR))
# ========================================================================================================
# In[19]

# Get the mean image
Get_Mean_Image = Node(fsl.MeanImage(), name='Get_Mean_Image')
Get_Mean_Image.inputs.dimension = 'T'

# Add the mean image to the filtered image
Add_Mean_Image = Node(fsl.BinaryMaths(), name='Add_Mean_Image')
Add_Mean_Image.inputs.operation = 'add'

# ========================================================================================================
# In[20]:

melodic = Node(fsl.MELODIC(), name='Melodic')
melodic.inputs.approach = 'concat'
melodic.inputs.no_bet = True
melodic.inputs.bg_threshold = 10.0
melodic.inputs.tr_sec = 2.00
melodic.inputs.mm_thresh = 0.5
Exemple #19
0
def generic(
    bids_base,
    template,
    autorotate=False,
    debug=False,
    functional_blur_xy=False,
    functional_match={},
    functional_registration_method="composite",
    keep_work=False,
    n_jobs=False,
    n_jobs_percentage=0.8,
    out_base=None,
    realign="time",
    registration_mask="",
    sessions=[],
    structural_match={},
    subjects=[],
    tr=1,
    workflow_name='generic',
    params={},
    phase_dictionary=GENERIC_PHASES,
    enforce_dummy_scans=DUMMY_SCANS,
    masking_config_path='',
    exclude={},
):
    '''
	Generic preprocessing and registration workflow for small animal data in BIDS format.

	Parameters
	----------
	bids_base : str
		Path to the BIDS data set root.
	template : str
		Path to the template to register the data to.
	autorotate : bool, optional
		Whether to use a multi-rotation-state transformation start.
		This allows the registration to commence with the best rotational fit, and may help if the orientation of the data is malformed with respect to the header.
	debug : bool, optional
		Whether to enable nipype debug mode.
		This increases logging.
	exclude : dict
		A dictionary with any combination of "sessions", "subjects", "tasks" as keys and corresponding identifiers as values.
		If this is specified matching entries will be excluded in the analysis.
	functional_blur_xy : float, optional
		Factor by which to smooth data in the xy-plane; if parameter evaluates to false, no smoothing will be applied.
		Ideally this value should correspond to the resolution or smoothness in the z-direction (assuing z represents the lower-resolution slice-encoding direction).
	functional_match : dict, optional
		Dictionary specifying a whitelist to use for functional data inclusion into the workflow; if dictionary is empty no whitelist is present and all data will be considered.
		The dictionary should have keys which are 'acquisition', 'task', or 'modality', and values which are lists of acceptable strings for the respective BIDS field.
	functional_registration_method : {'composite','functional','structural'}, optional
		How to register the functional scan to the template.
		Values mean the following: 'composite' that it will be registered to the structural scan which will in turn be registered to the template, 'functional' that it will be registered directly, 'structural' that it will be registered exactly as the structural scan.
	keep_work : bool, str
		Whether to keep the work directory after workflow conclusion (this directory contains all the intermediary processing commands, inputs, and outputs --- it is invaluable for debugging but many times larger in size than the actual output).
	n_jobs : int, optional
		Number of processors to maximally use for the workflow; if unspecified a best guess will be estimate based on `n_jobs_percentage` and hardware (but not on current load).
	n_jobs_percentage : float, optional
		Percentage of available processors (as in available hardware, not available free load) to maximally use for the workflow (this is overriden by `n_jobs`).
	out_base : str, optional
		Output base directory --- inside which a directory named `workflow_name`(as well as associated directories) will be created.
	realign : {"space","time","spacetime",""}, optional
		Parameter that dictates slictiming correction and realignment of slices. "time" (FSL.SliceTimer) is default, since it works safely. Use others only with caution!
	registration_mask : str, optional
		Mask to use for the registration process.
		This mask will constrain the area for similarity metric evaluation, but the data will not be cropped.
	sessions : list, optional
		A whitelist of sessions to include in the workflow, if the list is empty there is no whitelist and all sessions will be considered.
	structural_match : dict, optional
		Dictionary specifying a whitelist to use for structural data inclusion into the workflow; if dictionary is empty no whitelist is present and all data will be considered.
		The dictionary should have keys which are 'acquisition', or 'modality', and values which are lists of acceptable strings for the respective BIDS field.
	subjects : list, optional
		A whitelist of subjects to include in the workflow, if the list is empty there is no whitelist and all sessions will be considered.
	tr : float, optional
		Repetition time, explicitly.
		WARNING! This is a parameter waiting for deprecation.
	workflow_name : str, optional
		Top level name for the output directory.
	masking_config_path: str
		Path to the json configuration file that will be read by the MLEBE "predict_maks" function. If it is set, the segmentation models from the MLEBE package are used to extract the brain region in an additional masking node.
	'''

    bids_base, out_base, out_dir, template, registration_mask, data_selection, functional_scan_types, structural_scan_types, subjects_sessions, func_ind, struct_ind = common_select(
        bids_base,
        out_base,
        workflow_name,
        template,
        registration_mask,
        functional_match,
        structural_match,
        subjects,
        sessions,
        exclude,
    )

    if not n_jobs:
        n_jobs = max(int(round(mp.cpu_count() * n_jobs_percentage)), 2)

    find_physio = pe.Node(
        name='find_physio',
        interface=util.Function(
            function=corresponding_physiofile,
            input_names=inspect.getargspec(corresponding_physiofile)[0],
            output_names=['physiofile', 'meta_physiofile']))

    get_f_scan = pe.Node(name='get_f_scan',
                         interface=util.Function(
                             function=get_bids_scan,
                             input_names=inspect.getargspec(get_bids_scan)[0],
                             output_names=[
                                 'scan_path', 'scan_type', 'task', 'nii_path',
                                 'nii_name', 'events_name', 'subject_session',
                                 'metadata_filename', 'dict_slice', 'ind_type'
                             ]))
    get_f_scan.inputs.ignore_exception = True
    get_f_scan.inputs.data_selection = data_selection
    get_f_scan.inputs.bids_base = bids_base
    get_f_scan.iterables = ("ind_type", func_ind)

    dummy_scans = pe.Node(
        name='dummy_scans',
        interface=util.Function(
            function=force_dummy_scans,
            input_names=inspect.getargspec(force_dummy_scans)[0],
            output_names=['out_file', 'deleted_scans']))
    dummy_scans.inputs.desired_dummy_scans = enforce_dummy_scans

    events_file = pe.Node(
        name='events_file',
        interface=util.Function(
            function=write_bids_events_file,
            input_names=inspect.getargspec(write_bids_events_file)[0],
            output_names=['out_file']))

    datasink = pe.Node(nio.DataSink(), name='datasink')
    datasink.inputs.base_directory = out_dir
    datasink.inputs.parameterization = False

    workflow_connections = [
        (get_f_scan, dummy_scans, [('nii_path', 'in_file')]),
        (dummy_scans, events_file, [('deleted_scans', 'forced_dummy_scans')]),
        (get_f_scan, events_file, [('nii_path', 'timecourse_file'),
                                   ('task', 'task'),
                                   ('scan_path', 'scan_dir')]),
        (get_f_scan, find_physio, [('nii_path', 'nii_path')]),
        (events_file, datasink, [('out_file', 'func.@events')]),
        (find_physio, datasink, [('physiofile', 'func.@physio')]),
        (find_physio, datasink, [('meta_physiofile', 'func.@meta_physio')]),
        (get_f_scan, events_file, [('events_name', 'out_file')]),
        (get_f_scan, datasink, [(('subject_session', ss_to_path), 'container')
                                ]),
    ]

    if realign == "space":
        realigner = pe.Node(interface=spm.Realign(), name="realigner")
        realigner.inputs.register_to_mean = True
        workflow_connections.extend([
            (dummy_scans, realigner, [('out_file', 'in_file')]),
        ])

    elif realign == "spacetime":
        realigner = pe.Node(interface=nipy.SpaceTimeRealigner(),
                            name="realigner")
        realigner.inputs.slice_times = "asc_alt_2"
        realigner.inputs.tr = tr
        realigner.inputs.slice_info = 3  #3 for coronal slices (2 for horizontal, 1 for sagittal)
        workflow_connections.extend([
            (dummy_scans, realigner, [('out_file', 'in_file')]),
        ])

    elif realign == "time":
        realigner = pe.Node(interface=fsl.SliceTimer(), name="slicetimer")
        realigner.inputs.time_repetition = tr
        workflow_connections.extend([
            (dummy_scans, realigner, [('out_file', 'in_file')]),
        ])

    #ADDING SELECTABLE NODES AND EXTENDING WORKFLOW AS APPROPRIATE:
    s_biascorrect, f_biascorrect = real_size_nodes()

    if structural_scan_types.any():
        s_data_selection = deepcopy(data_selection)
        for match in structural_match.keys():
            s_data_selection = s_data_selection.loc[
                s_data_selection[match].isin(structural_match[match])]

        get_s_scan = pe.Node(
            name='get_s_scan',
            interface=util.Function(
                function=get_bids_scan,
                input_names=inspect.getargspec(get_bids_scan)[0],
                output_names=[
                    'scan_path', 'scan_type', 'task', 'nii_path', 'nii_name',
                    'events_name', 'subject_session', 'metadata_filename',
                    'dict_slice', 'ind_type'
                ]))
        get_s_scan.inputs.ignore_exception = True
        get_s_scan.inputs.data_selection = s_data_selection
        get_s_scan.inputs.bids_base = bids_base

        s_register, s_warp, f_register, f_warp = generic_registration(
            template,
            template_mask=registration_mask,
            phase_dictionary=phase_dictionary,
        )
        #TODO: incl. in func registration
        if autorotate:
            s_rotated = autorotate(template)
            workflow_connections.extend([
                (s_biascorrect, s_rotated, [('output_image', 'out_file')]),
                (s_rotated, s_register, [('out_file', 'moving_image')]),
            ])
        else:
            workflow_connections.extend([
                (s_biascorrect, s_register, [('output_image', 'moving_image')
                                             ]),
                (s_register, s_warp, [('composite_transform', 'transforms')]),
                (get_s_scan, s_warp, [('nii_path', 'input_image')]),
                (s_warp, datasink, [('output_image', 'anat')]),
            ])
        if masking_config_path:
            from mlebe.masking.predict_mask import predict_mask
            s_mask = pe.Node(
                name='s_mask',
                interface=util.Function(
                    function=predict_mask,
                    input_names=inspect.getfullargspec(predict_mask)[0],
                    output_names=['out_file', 'mask_list', 'mask']))
            f_mask = pe.Node(
                name='f_mask',
                interface=util.Function(
                    function=predict_mask,
                    input_names=inspect.getfullargspec(predict_mask)[0],
                    output_names=['out_file', 'mask_list', 'mask']))
            s_mask.inputs.masking_config_path = masking_config_path
            f_mask.inputs.masking_config_path = masking_config_path
            f_mask.inputs.input_type = 'func'
            workflow_connections.extend([
                (get_f_scan, get_s_scan, [('subject_session', 'selector')]),
                (get_f_scan, f_mask, [('nii_path', 'in_file')]),
                (f_mask, f_biascorrect, [('mask', 'mask_image')]),
                (get_s_scan, s_warp, [('nii_name', 'output_image')]),
                (get_s_scan, s_mask, [('nii_path', 'in_file')]),
                (s_mask, s_biascorrect, [('out_file', 'input_image')]),
                (s_mask, s_biascorrect, [('mask', 'mask_image')]),
                (s_mask, s_register, [('mask_list', 'moving_image_masks')]),
                (f_mask, f_register, [('mask_list', 'moving_image_masks')]),
            ])

        else:
            workflow_connections.extend([
                (get_f_scan, get_s_scan, [('subject_session', 'selector')]),
                (get_s_scan, s_warp, [('nii_name', 'output_image')]),
                (get_s_scan, s_biascorrect, [('nii_path', 'input_image')]),
            ])

    if functional_registration_method == "structural":
        if not structural_scan_types.any():
            raise ValueError(
                'The option `registration="structural"` requires there to be a structural scan type.'
            )
        workflow_connections.extend([
            (s_register, f_warp, [('composite_transform', 'transforms')]),
        ])
        if realign == "space":
            workflow_connections.extend([
                (realigner, f_warp, [('realigned_files', 'input_image')]),
            ])
        elif realign == "spacetime":
            workflow_connections.extend([
                (realigner, f_warp, [('out_file', 'input_image')]),
            ])
        elif realign == "time":
            workflow_connections.extend([
                (realigner, f_warp, [('slice_time_corrected_file',
                                      'input_image')]),
            ])
        else:
            workflow_connections.extend([
                (dummy_scans, f_warp, [('out_file', 'input_image')]),
            ])
    elif functional_registration_method == "composite":
        if not structural_scan_types.any():
            raise ValueError(
                'The option `registration="composite"` requires there to be a structural scan type.'
            )
        temporal_mean = pe.Node(interface=fsl.MeanImage(),
                                name="temporal_mean")

        merge = pe.Node(util.Merge(2), name='merge')

        if masking_config_path:
            additional_biascorrect = additional_s_biascorrect()
            workflow_connections.extend([
                (temporal_mean, f_biascorrect, [('out_file', 'input_image')]),
                (f_biascorrect, f_register, [('output_image', 'moving_image')
                                             ]),
                (get_s_scan, additional_biascorrect, [('nii_path',
                                                       'input_image')]),
                (additional_biascorrect, f_register, [('output_image',
                                                       'fixed_image')]),
                (s_register, merge, [('composite_transform', 'in1')]),
                (f_register, merge, [('composite_transform', 'in2')]),
                (merge, f_warp, [('out', 'transforms')]),
            ])
        else:
            workflow_connections.extend([
                (temporal_mean, f_biascorrect, [('out_file', 'input_image')]),
                (f_biascorrect, f_register, [('output_image', 'moving_image')
                                             ]),
                (s_biascorrect, f_register, [('output_image', 'fixed_image')]),
                (s_register, merge, [('composite_transform', 'in1')]),
                (f_register, merge, [('composite_transform', 'in2')]),
                (merge, f_warp, [('out', 'transforms')]),
            ])
        if realign == "space":
            workflow_connections.extend([
                (realigner, temporal_mean, [('realigned_files', 'in_file')]),
                (realigner, f_warp, [('realigned_files', 'input_image')]),
            ])
        elif realign == "spacetime":
            workflow_connections.extend([
                (realigner, temporal_mean, [('out_file', 'in_file')]),
                (realigner, f_warp, [('out_file', 'input_image')]),
            ])
        elif realign == "time":
            workflow_connections.extend([
                (realigner, temporal_mean, [('slice_time_corrected_file',
                                             'in_file')]),
                (realigner, f_warp, [('slice_time_corrected_file',
                                      'input_image')]),
            ])
        else:
            workflow_connections.extend([
                (dummy_scans, temporal_mean, [('out_file', 'in_file')]),
                (dummy_scans, f_warp, [('out_file', 'input_image')]),
            ])
    elif functional_registration_method == "functional":
        f_register, f_warp = functional_registration(template)

        temporal_mean = pe.Node(interface=fsl.MeanImage(),
                                name="temporal_mean")

        #f_cutoff = pe.Node(interface=fsl.ImageMaths(), name="f_cutoff")
        #f_cutoff.inputs.op_string = "-thrP 30"

        #f_BET = pe.Node(interface=fsl.BET(), name="f_BET")
        #f_BET.inputs.mask = True
        #f_BET.inputs.frac = 0.5

        workflow_connections.extend([
            (temporal_mean, f_biascorrect, [('out_file', 'input_image')]),
            #(f_biascorrect, f_cutoff, [('output_image', 'in_file')]),
            #(f_cutoff, f_BET, [('out_file', 'in_file')]),
            #(f_BET, f_register, [('out_file', 'moving_image')]),
            (f_biascorrect, f_register, [('output_image', 'moving_image')]),
            (f_register, f_warp, [('composite_transform', 'transforms')]),
        ])
        if realign == "space":
            workflow_connections.extend([
                (realigner, temporal_mean, [('realigned_files', 'in_file')]),
                (realigner, f_warp, [('realigned_files', 'input_image')]),
            ])
        elif realign == "spacetime":
            workflow_connections.extend([
                (realigner, temporal_mean, [('out_file', 'in_file')]),
                (realigner, f_warp, [('out_file', 'input_image')]),
            ])
        elif realign == "time":
            workflow_connections.extend([
                (realigner, temporal_mean, [('slice_time_corrected_file',
                                             'in_file')]),
                (realigner, f_warp, [('slice_time_corrected_file',
                                      'input_image')]),
            ])
        else:
            workflow_connections.extend([
                (dummy_scans, temporal_mean, [('out_file', 'in_file')]),
                (dummy_scans, f_warp, [('out_file', 'input_image')]),
            ])

    if functional_blur_xy:
        blur = pe.Node(interface=afni.preprocess.BlurToFWHM(), name="blur")
        blur.inputs.fwhmxy = functional_blur_xy
        workflow_connections.extend([
            (get_f_scan, blur, [('nii_name', 'out_file')]),
            (f_warp, blur, [('output_image', 'in_file')]),
            (blur, datasink, [('out_file', 'func')]),
        ])
    else:
        workflow_connections.extend([
            (get_f_scan, f_warp, [('nii_name', 'output_image')]),
            (f_warp, datasink, [('output_image', 'func')]),
        ])

    workflow_config = {
        'execution': {
            'crashdump_dir': path.join(out_base, 'crashdump'),
        }
    }
    if debug:
        workflow_config['logging'] = {
            'workflow_level': 'DEBUG',
            'utils_level': 'DEBUG',
            'interface_level': 'DEBUG',
            'filemanip_level': 'DEBUG',
            'log_to_file': 'true',
        }

    workdir_name = workflow_name + "_work"
    #this gives the name of the workdir, the output name is passed to the datasink
    workflow = pe.Workflow(name=workdir_name)
    workflow.connect(workflow_connections)
    workflow.base_dir = out_base
    workflow.config = workflow_config
    try:
        workflow.write_graph(dotfilename=path.join(workflow.base_dir,
                                                   workdir_name, "graph.dot"),
                             graph2use="hierarchical",
                             format="png")
    except OSError:
        print(
            'We could not write the DOT file for visualization (`dot` function from the graphviz package). This is non-critical to the processing, but you should get this fixed.'
        )

    workflow.run(plugin="MultiProc", plugin_args={'n_procs': n_jobs})
    copy_bids_files(bids_base, os.path.join(out_base, workflow_name))
    if not keep_work:
        workdir = path.join(workflow.base_dir, workdir_name)
        try:
            shutil.rmtree(workdir)
        except OSError as e:
            if str(e) == 'Cannot call rmtree on a symbolic link':
                print(
                    'Not deleting top level workdir (`{}`), as it is a symlink. Deleting only contents instead'
                    .format(workdir))
                for file_object in os.listdir(workdir):
                    file_object_path = os.path.join(workdir, file_object)
                    if os.path.isfile(file_object_path):
                        os.unlink(file_object_path)
                    else:
                        shutil.rmtree(file_object_path)
            else:
                raise OSError(str(e))
Exemple #20
0
    def run(self, n_ants_jobs=1, n_pipeline_jobs=1, strTemplatePath=None):
        """Perform registration.

        Args:
            n_ants_jobs (int, optional): number of parallel ANTs threads. Defaults to 1.
            n_pipeline_jobs (int, optional): number of parallel processing jobs, should be >= n_ants_jobs. Defaults to 1.
            strTemplatePath (str, optional): path to MNI template. Defaults to the T1-weighted template found in data/

        Raises:
            ValueError: n_ants_jobs > n_pipeline_jobs
        """

        # Use default EPI template if not specified
        if not strTemplatePath:
            strTemplatePath = os.path.join(
                os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
                'data/tpl-MNI152NLin2009cAsym_res-02_desc-fMRIPrep_boldref_brain.nii.gz'
            )
        # If using single-threaded mode, restrict ANTs to single-threaded mode as well
        if n_pipeline_jobs == 1:
            n_ants_jobs = 1
        # Can't have more ANTs threads than total amount of jobs!
        if n_ants_jobs > n_pipeline_jobs:
            raise ValueError(
                'Number of ANTs jobs cannot be higher than the number of total pipeline jobs.'
            )

        if not os.path.exists(self.strOutputDir):
            os.makedirs(self.strOutputDir)

        workflow = Workflow('registration',
                            base_dir=os.path.join(self.strOutputDir))

        # Compute mean image
        meanimage = MapNode(fsl.MeanImage(dimension='T',
                                          output_type='NIFTI_GZ'),
                            name='1_mean',
                            iterfield=['in_file'])
        meanimage.inputs.in_file = self.lsInputs

        # Skull strip
        funcstrip = make_func_mask_workflow(base_dir=self.strOutputDir)
        workflow.connect(meanimage, 'out_file', funcstrip,
                         'inputnode.mean_file')

        # Register T1w with EPI template.
        register = MapNode(ants.RegistrationSynQuick(
            fixed_image=strTemplatePath, num_threads=n_ants_jobs),
                           name='2_register',
                           iterfield=['moving_image', 'output_prefix'],
                           mem_gb=16,
                           n_procs=n_ants_jobs)
        lsPrefixes = [self.get_output_prefix(s) + '_' for s in self.lsInputs]
        register.inputs.output_prefix = lsPrefixes
        workflow.connect(funcstrip, 'outputnode.masked_file', register,
                         'moving_image')

        # Softlink output files to output_directory/subject/transformation_type/...
        datasink = MapNode(io.DataSink(base_directory=self.strOutputDir,
                                       parameterization=False,
                                       remove_dest_dir=True,
                                       infields=[
                                           'forward_warp_field',
                                           'inverse_warp_field', 'out_matrix'
                                       ]),
                           name='datasink',
                           iterfield=[
                               'container', 'forward_warp_field',
                               'inverse_warp_field', 'out_matrix'
                           ])
        datasink.inputs.container = [
            self.get_output_prefix(s) for s in self.lsInputs
        ]
        # Datasink is creating this extra empty folder for no apparent reason, this removes it
        datasink.inputs.regexp_substitutions = [(r'trait_added', r'')]
        workflow.connect([(register, datasink,
                           [('forward_warp_field', 'forward_warp_field'),
                            ('inverse_warp_field', 'inverse_warp_field'),
                            ('out_matrix', 'out_matrix')])])

        if n_pipeline_jobs == 1:
            workflow.run()
        else:
            workflow.run(plugin='MultiProc',
                         plugin_args={'n_procs': n_pipeline_jobs})
Exemple #21
0
                                                outfields=['normalized_T1s']),
                                name="struct_datasource")
    struct_datasource.inputs.base_directory = os.path.join(
        resultsdir, 'volumes')
    struct_datasource.inputs.template = 'normalized_T1/_subject_id_%s/*.nii.gz'
    struct_datasource.inputs.template_args['normalized_T1s'] = [[
        'subject_ids'
    ]]
    struct_datasource.inputs.sort_filelist = True
    struct_datasource.inputs.subject_ids = subjects

    merge_structs = pe.Node(fsl.Merge(dimension='t'), name="merge_structs")

    wf.connect(struct_datasource, 'normalized_T1s', merge_structs, 'in_files')

    mean_struct = pe.Node(fsl.MeanImage(dimension="T"), name="mean_struct")

    wf.connect(merge_structs, 'merged_file', mean_struct, 'in_file')

    std_struct = pe.Node(fsl.ImageMaths(op_string="-Tstd"), name="std_struct")

    wf.connect(merge_structs, 'merged_file', std_struct, 'in_file')
    ds = pe.Node(nio.DataSink(), name="datasink")
    ds.run_without_submitting = True
    ds.inputs.regexp_substitutions = [("_diff_i[^/]*/", ""),
                                      ("_avg_i[^/]*/", "")]
    ds.inputs.base_directory = os.path.join(resultsdir, "volumes")

    wf.connect(mean_struct, 'out_file', ds, "mean_struct")
    wf.connect(std_struct, 'out_file', ds, "std_struct")
Exemple #22
0
def create_confound_removal_workflow(workflow_name="confound_removal"):

    inputnode = pe.Node(util.IdentityInterface(
        fields=["subject_id", "timeseries", "reg_file", "motion_parameters"]),
                        name="inputs")

    # Get the Freesurfer aseg volume from the Subjects Directory
    getaseg = pe.Node(io.FreeSurferSource(subjects_dir=fs.Info.subjectsdir()),
                      name="getaseg")

    # Binarize the Aseg to use as a whole brain mask
    asegmask = pe.Node(fs.Binarize(min=0.5, dilate=2), name="asegmask")

    # Extract and erode a mask of the deep cerebral white matter
    extractwm = pe.Node(fs.Binarize(match=[2, 41], erode=3), name="extractwm")

    # Extract and erode a mask of the ventricles and CSF
    extractcsf = pe.Node(fs.Binarize(match=[4, 5, 14, 15, 24, 31, 43, 44, 63],
                                     erode=1),
                         name="extractcsf")

    # Mean the timeseries across the fourth dimension
    meanfunc = pe.MapNode(fsl.MeanImage(),
                          iterfield=["in_file"],
                          name="meanfunc")

    # Invert the anatomical coregistration and resample the masks
    regwm = pe.MapNode(fs.ApplyVolTransform(inverse=True, interp="nearest"),
                       iterfield=["source_file", "reg_file"],
                       name="regwm")

    regcsf = pe.MapNode(fs.ApplyVolTransform(inverse=True, interp="nearest"),
                        iterfield=["source_file", "reg_file"],
                        name="regcsf")

    regbrain = pe.MapNode(fs.ApplyVolTransform(inverse=True, interp="nearest"),
                          iterfield=["source_file", "reg_file"],
                          name="regbrain")

    # Convert to Nifti for FSL tools
    convertwm = pe.MapNode(fs.MRIConvert(out_type="niigz"),
                           iterfield=["in_file"],
                           name="convertwm")

    convertcsf = pe.MapNode(fs.MRIConvert(out_type="niigz"),
                            iterfield=["in_file"],
                            name="convertcsf")

    convertbrain = pe.MapNode(fs.MRIConvert(out_type="niigz"),
                              iterfield=["in_file"],
                              name="convertbrain")

    # Add the mask images together for a report image
    addconfmasks = pe.MapNode(fsl.ImageMaths(suffix="conf",
                                             op_string="-mul 2 -add",
                                             out_data_type="char"),
                              iterfield=["in_file", "in_file2"],
                              name="addconfmasks")

    # Overlay and slice the confound mask overlaied on mean func for reporting
    confoverlay = pe.MapNode(fsl.Overlay(auto_thresh_bg=True,
                                         stat_thresh=(.7, 2)),
                             iterfield=["background_image", "stat_image"],
                             name="confoverlay")

    confslice = pe.MapNode(fsl.Slicer(image_width=800, label_slices=False),
                           iterfield=["in_file"],
                           name="confslice")
    confslice.inputs.sample_axial = 2

    # Extract the mean signal from white matter and CSF masks
    wmtcourse = pe.MapNode(fs.SegStats(exclude_id=0, avgwf_txt_file=True),
                           iterfield=["segmentation_file", "in_file"],
                           name="wmtcourse")

    csftcourse = pe.MapNode(fs.SegStats(exclude_id=0, avgwf_txt_file=True),
                            iterfield=["segmentation_file", "in_file"],
                            name="csftcourse")

    # Extract the mean signal from over the whole brain
    globaltcourse = pe.MapNode(fs.SegStats(exclude_id=0, avgwf_txt_file=True),
                               iterfield=["segmentation_file", "in_file"],
                               name="globaltcourse")

    # Build the confound design matrix
    conf_inputs = [
        "motion_params", "global_waveform", "wm_waveform", "csf_waveform"
    ]
    confmatrix = pe.MapNode(util.Function(input_names=conf_inputs,
                                          output_names=["confound_matrix"],
                                          function=make_confound_matrix),
                            iterfield=conf_inputs,
                            name="confmatrix")

    # Regress the confounds out of the timeseries
    confregress = pe.MapNode(fsl.FilterRegressor(filter_all=True),
                             iterfield=["in_file", "design_file", "mask"],
                             name="confregress")

    # Rename the confound mask png
    renamepng = pe.MapNode(util.Rename(format_string="confound_sources.png"),
                           iterfield=["in_file"],
                           name="renamepng")

    # Define the outputs
    outputnode = pe.Node(
        util.IdentityInterface(fields=["timeseries", "confound_sources"]),
        name="outputs")

    # Define and connect the confound workflow
    confound = pe.Workflow(name=workflow_name)

    confound.connect([
        (inputnode, meanfunc, [("timeseries", "in_file")]),
        (inputnode, getaseg, [("subject_id", "subject_id")]),
        (getaseg, extractwm, [("aseg", "in_file")]),
        (getaseg, extractcsf, [("aseg", "in_file")]),
        (getaseg, asegmask, [("aseg", "in_file")]),
        (extractwm, regwm, [("binary_file", "target_file")]),
        (extractcsf, regcsf, [("binary_file", "target_file")]),
        (asegmask, regbrain, [("binary_file", "target_file")]),
        (meanfunc, regwm, [("out_file", "source_file")]),
        (meanfunc, regcsf, [("out_file", "source_file")]),
        (meanfunc, regbrain, [("out_file", "source_file")]),
        (inputnode, regwm, [("reg_file", "reg_file")]),
        (inputnode, regcsf, [("reg_file", "reg_file")]),
        (inputnode, regbrain, [("reg_file", "reg_file")]),
        (regwm, convertwm, [("transformed_file", "in_file")]),
        (regcsf, convertcsf, [("transformed_file", "in_file")]),
        (regbrain, convertbrain, [("transformed_file", "in_file")]),
        (convertwm, addconfmasks, [("out_file", "in_file")]),
        (convertcsf, addconfmasks, [("out_file", "in_file2")]),
        (addconfmasks, confoverlay, [("out_file", "stat_image")]),
        (meanfunc, confoverlay, [("out_file", "background_image")]),
        (confoverlay, confslice, [("out_file", "in_file")]),
        (confslice, renamepng, [("out_file", "in_file")]),
        (regwm, wmtcourse, [("transformed_file", "segmentation_file")]),
        (inputnode, wmtcourse, [("timeseries", "in_file")]),
        (regcsf, csftcourse, [("transformed_file", "segmentation_file")]),
        (inputnode, csftcourse, [("timeseries", "in_file")]),
        (regbrain, globaltcourse, [("transformed_file", "segmentation_file")]),
        (inputnode, globaltcourse, [("timeseries", "in_file")]),
        (inputnode, confmatrix, [("motion_parameters", "motion_params")]),
        (wmtcourse, confmatrix, [("avgwf_txt_file", "wm_waveform")]),
        (csftcourse, confmatrix, [("avgwf_txt_file", "csf_waveform")]),
        (globaltcourse, confmatrix, [("avgwf_txt_file", "global_waveform")]),
        (confmatrix, confregress, [("confound_matrix", "design_file")]),
        (inputnode, confregress, [("timeseries", "in_file")]),
        (convertbrain, confregress, [("out_file", "mask")]),
        (confregress, outputnode, [("out_file", "timeseries")]),
        (renamepng, outputnode, [("out_file", "confound_sources")]),
    ])

    return confound
Exemple #23
0
modelspec = pe.Node(interface=modelgen.SpecifySPMModel(),
                    name='modelspec')
modelspec.inputs.input_units = 'secs'
modelspec.inputs.output_units = 'secs'
modelspec.inputs.time_repetition = TR
modelspec.inputs.high_pass_filter_cutoff = HIGHPASS_CUTOFF
workflow.connect(get_session_informations, 'informations', modelspec, 'subject_info')
workflow.connect(smooth, 'smoothed_files', modelspec, 'functional_runs')

# merge runs's masks
merge_masks = pe.Node(interface=fsl.Merge(dimension='t'),
                     name='merge_masks')
workflow.connect(datasource, 'mask', merge_masks, 'in_files')

# create mean runs mask
mean_mask = pe.Node(interface=fsl.MeanImage(args='-bin', output_type='NIFTI'),
                    name='mean_mask')
workflow.connect(merge_masks, 'merged_file', mean_mask, 'in_file')

# generate a first level SPM.mat
level1design = pe.Node(interface=spm.Level1Design(),
                       name='level1design')
level1design.inputs.timing_units = modelspec.inputs.output_units
level1design.inputs.interscan_interval = modelspec.inputs.time_repetition
level1design.inputs.bases = {'hrf': {'derivs': [0, 0]}}
level1design.inputs.model_serial_correlations = 'AR(1)'
workflow.connect(modelspec, 'session_info', level1design, 'session_info')
workflow.connect(mean_mask, 'out_file', level1design, 'mask_image')

# plot the design matrix
plot_design_matrix = pe.Node(niu.Function(input_names=['mat_file'],
Exemple #24
0
def bruker(measurements_base,
	functional_scan_types=[],
	structural_scan_types=[],
	sessions=[],
	subjects=[],
	measurements=[],
	exclude_subjects=[],
	exclude_measurements=[],
	actual_size=False,
	functional_blur_xy=False,
	functional_registration_method="structural",
	highpass_sigma=225,
	lowpass_sigma=None,
	negative_contrast_agent=False,
	n_procs=N_PROCS,
	realign=True,
	registration_mask=False,
	template="/home/chymera/ni_data/templates/ds_QBI_chr.nii.gz",
	tr=1,
	very_nasty_bruker_delay_hack=False,
	workflow_name="generic",
	keep_work=False,
	autorotate=False,
	strict=False,
	):

	measurements_base = os.path.abspath(os.path.expanduser(measurements_base))

	#select all functional/sturctural scan types unless specified
	if not functional_scan_types or not structural_scan_types:
		scan_classification = pd.read_csv(scan_classification_file_path)
		if not functional_scan_types:
			functional_scan_types = list(scan_classification[(scan_classification["categories"] == "functional")]["scan_type"])
		if not structural_scan_types:
			structural_scan_types = list(scan_classification[(scan_classification["categories"] == "structural")]["scan_type"])

	#hack to allow structural scan type disabling:
	if structural_scan_types == -1:
		structural_scan_types = []

	# define measurement directories to be processed, and populate the list either with the given include_measurements, or with an intelligent selection
	scan_types = deepcopy(functional_scan_types)
	scan_types.extend(structural_scan_types)
	data_selection=get_data_selection(measurements_base, sessions, scan_types=scan_types, subjects=subjects, exclude_subjects=exclude_subjects, measurements=measurements, exclude_measurements=exclude_measurements)
	if not subjects:
		subjects = set(list(data_selection["subject"]))
	if not sessions:
		sessions = set(list(data_selection["session"]))

	if structural_registration:
		structural_scan_types = [structural_scan_types[0]]

	# here we start to define the nipype workflow elements (nodes, connectons, meta)
	subjects_sessions = data_selection[["subject","session"]].drop_duplicates().values.tolist()
	infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_session']), name="infosource")
	infosource.iterables = [('subject_session', subjects_sessions)]

	get_f_scan = pe.Node(name='get_f_scan', interface=util.Function(function=get_scan,input_names=inspect.getargspec(get_scan)[0], output_names=['scan_path','scan_type']))
	if not strict:
		get_f_scan.inputs.ignore_exception = True
	get_f_scan.inputs.data_selection = data_selection
	get_f_scan.inputs.measurements_base = measurements_base
	get_f_scan.iterables = ("scan_type", functional_scan_types)

	f_bru2nii = pe.Node(interface=bru2nii.Bru2(), name="f_bru2nii")
	f_bru2nii.inputs.actual_size=actual_size

	dummy_scans = pe.Node(name='dummy_scans', interface=util.Function(function=force_dummy_scans,input_names=inspect.getargspec(force_dummy_scans)[0], output_names=['out_file']))
	dummy_scans.inputs.desired_dummy_scans = DUMMY_SCANS

	events_file = pe.Node(name='events_file', interface=util.Function(function=write_events_file,input_names=inspect.getargspec(write_events_file)[0], output_names=['out_file']))
	events_file.inputs.dummy_scans_ms = DUMMY_SCANS * tr * 1000
	events_file.inputs.stim_protocol_dictionary = STIM_PROTOCOL_DICTIONARY
	events_file.inputs.very_nasty_bruker_delay_hack = very_nasty_bruker_delay_hack

	if realign:
		realigner = pe.Node(interface=nipy.SpaceTimeRealigner(), name="realigner")
		realigner.inputs.slice_times = "asc_alt_2"
		realigner.inputs.tr = tr
		realigner.inputs.slice_info = 3 #3 for coronal slices (2 for horizontal, 1 for sagittal)

	bandpass = pe.Node(interface=fsl.maths.TemporalFilter(), name="bandpass")
	bandpass.inputs.highpass_sigma = highpass_sigma
	if lowpass_sigma:
		bandpass.inputs.lowpass_sigma = lowpass_sigma
	else:
		bandpass.inputs.lowpass_sigma = tr

	bids_filename = pe.Node(name='bids_filename', interface=util.Function(function=sss_filename,input_names=inspect.getargspec(sss_filename)[0], output_names=['filename']))

	bids_stim_filename = pe.Node(name='bids_stim_filename', interface=util.Function(function=sss_filename,input_names=inspect.getargspec(sss_filename)[0], output_names=['filename']))
	bids_stim_filename.inputs.suffix = "events"
	bids_stim_filename.inputs.extension = ".tsv"

	datasink = pe.Node(nio.DataSink(), name='datasink')
	datasink.inputs.base_directory = path.join(measurements_base,"preprocessing",workflow_name)
	datasink.inputs.parameterization = False

	workflow_connections = [
		(infosource, get_f_scan, [('subject_session', 'selector')]),
		(infosource, bids_stim_filename, [('subject_session', 'subject_session')]),
		(get_f_scan, bids_stim_filename, [('scan_type', 'scan')]),
		(get_f_scan, f_bru2nii, [('scan_path', 'input_dir')]),
		(f_bru2nii, dummy_scans, [('nii_file', 'in_file')]),
		(get_f_scan, dummy_scans, [('scan_path', 'scan_dir')]),
		(get_f_scan, events_file, [
			('scan_type', 'scan_type'),
			('scan_path', 'scan_dir')
			]),
		(events_file, datasink, [('out_file', 'func.@events')]),
		(bids_stim_filename, events_file, [('filename', 'out_file')]),
		(infosource, datasink, [(('subject_session',ss_to_path), 'container')]),
		(infosource, bids_filename, [('subject_session', 'subject_session')]),
		(get_f_scan, bids_filename, [('scan_type', 'scan')]),
		(bids_filename, bandpass, [('filename', 'out_file')]),
		(bandpass, datasink, [('out_file', 'func')]),
		]

	if realign:
		workflow_connections.extend([
			(dummy_scans, realigner, [('out_file', 'in_file')]),
			])

	#ADDING SELECTABLE NODES AND EXTENDING WORKFLOW AS APPROPRIATE:
	if structural_scan_types:
		get_s_scan = pe.Node(name='get_s_scan', interface=util.Function(function=get_scan,input_names=inspect.getargspec(get_scan)[0], output_names=['scan_path','scan_type']))
		if not strict:
			get_s_scan.inputs.ignore_exception = True
		get_s_scan.inputs.data_selection = data_selection
		get_s_scan.inputs.measurements_base = measurements_base
		get_s_scan.iterables = ("scan_type", structural_scan_types)

		s_bru2nii = pe.Node(interface=bru2nii.Bru2(), name="s_bru2nii")
		s_bru2nii.inputs.force_conversion=True
		s_bru2nii.inputs.actual_size=actual_size

		if "DSURQEc" in template:
			s_biascorrect = pe.Node(interface=ants.N4BiasFieldCorrection(), name="s_biascorrect")
			s_biascorrect.inputs.dimension = 3
			s_biascorrect.inputs.bspline_fitting_distance = 10
			s_biascorrect.inputs.bspline_order = 4
			s_biascorrect.inputs.shrink_factor = 2
			s_biascorrect.inputs.n_iterations = [150,100,50,30]
			s_biascorrect.inputs.convergence_threshold = 1e-16
			s_register, s_warp, _, _ = DSURQEc_structural_registration(template, registration_mask)
			#TODO: incl. in func registration
			if autorotate:
				workflow_connections.extend([
					(s_biascorrect, s_rotated, [('output_image', 'out_file')]),
					(s_rotated, s_register, [('out_file', 'moving_image')]),
					])
			else:
				workflow_connections.extend([
					(s_biascorrect, s_register, [('output_image', 'moving_image')]),
					(s_register, s_warp, [('composite_transform', 'transforms')]),
					(s_bru2nii, s_warp, [('nii_file', 'input_image')]),
					(s_warp, datasink, [('output_image', 'anat')]),
					])
		else:
			s_biascorrect = pe.Node(interface=ants.N4BiasFieldCorrection(), name="s_biascorrect")
			s_biascorrect.inputs.dimension = 3
			s_biascorrect.inputs.bspline_fitting_distance = 100
			s_biascorrect.inputs.shrink_factor = 2
			s_biascorrect.inputs.n_iterations = [200,200,200,200]
			s_biascorrect.inputs.convergence_threshold = 1e-11

			s_reg_biascorrect = pe.Node(interface=ants.N4BiasFieldCorrection(), name="s_reg_biascorrect")
			s_reg_biascorrect.inputs.dimension = 3
			s_reg_biascorrect.inputs.bspline_fitting_distance = 95
			s_reg_biascorrect.inputs.shrink_factor = 2
			s_reg_biascorrect.inputs.n_iterations = [500,500,500,500]
			s_reg_biascorrect.inputs.convergence_threshold = 1e-14

			s_cutoff = pe.Node(interface=fsl.ImageMaths(), name="s_cutoff")
			s_cutoff.inputs.op_string = "-thrP 20 -uthrp 98"

			s_BET = pe.Node(interface=fsl.BET(), name="s_BET")
			s_BET.inputs.mask = True
			s_BET.inputs.frac = 0.3
			s_BET.inputs.robust = True

			s_mask = pe.Node(interface=fsl.ApplyMask(), name="s_mask")
			s_register, s_warp, _, _ = structural_registration(template)

			workflow_connections.extend([
				(s_bru2nii, s_reg_biascorrect, [('nii_file', 'input_image')]),
				(s_reg_biascorrect, s_cutoff, [('output_image', 'in_file')]),
				(s_cutoff, s_BET, [('out_file', 'in_file')]),
				(s_biascorrect, s_mask, [('output_image', 'in_file')]),
				(s_BET, s_mask, [('mask_file', 'mask_file')]),
				])

			#TODO: incl. in func registration
			if autorotate:
				workflow_connections.extend([
					(s_mask, s_rotated, [('out_file', 'out_file')]),
					(s_rotated, s_register, [('out_file', 'moving_image')]),
					])
			else:
				workflow_connections.extend([
					(s_mask, s_register, [('out_file', 'moving_image')]),
					(s_register, s_warp, [('composite_transform', 'transforms')]),
					(s_bru2nii, s_warp, [('nii_file', 'input_image')]),
					(s_warp, datasink, [('output_image', 'anat')]),
					])


		if(autorotate):
			s_rotated = autorotate(template)

		s_bids_filename = pe.Node(name='s_bids_filename', interface=util.Function(function=sss_filename,input_names=inspect.getargspec(sss_filename)[0], output_names=['filename']))
		s_bids_filename.inputs.scan_prefix = False

		workflow_connections.extend([
			(infosource, get_s_scan, [('subject_session', 'selector')]),
			(infosource, s_bids_filename, [('subject_session', 'subject_session')]),
			(get_s_scan, s_bru2nii, [('scan_path','input_dir')]),
			(get_s_scan, s_bids_filename, [('scan_type', 'scan')]),
			(s_bids_filename, s_warp, [('filename','output_image')]),
			(s_bru2nii, s_biascorrect, [('nii_file', 'input_image')]),
			])



	if functional_registration_method == "structural":
		if not structural_scan_types:
			raise ValueError('The option `registration="structural"` requires there to be a structural scan type.')
		workflow_connections.extend([
			(s_register, f_warp, [('composite_transform', 'transforms')]),
			])
		if realign:
			workflow_connections.extend([
				(realigner, f_warp, [('out_file', 'input_image')]),
				])
		else:
			workflow_connections.extend([
				(dummy_scans, f_warp, [('out_file', 'input_image')]),
				])


	if functional_registration_method == "composite":
		if not structural_scan_types:
			raise ValueError('The option `registration="composite"` requires there to be a structural scan type.')
		_, _, f_register, f_warp = DSURQEc_structural_registration(template, registration_mask)

		temporal_mean = pe.Node(interface=fsl.MeanImage(), name="temporal_mean")

		f_biascorrect = pe.Node(interface=ants.N4BiasFieldCorrection(), name="f_biascorrect")
		f_biascorrect.inputs.dimension = 3
		f_biascorrect.inputs.bspline_fitting_distance = 100
		f_biascorrect.inputs.shrink_factor = 2
		f_biascorrect.inputs.n_iterations = [200,200,200,200]
		f_biascorrect.inputs.convergence_threshold = 1e-11

		merge = pe.Node(util.Merge(2), name='merge')

		workflow_connections.extend([
			(temporal_mean, f_biascorrect, [('out_file', 'input_image')]),
			(f_biascorrect, f_register, [('output_image', 'moving_image')]),
			(s_biascorrect, f_register, [('output_image', 'fixed_image')]),
			(f_register, merge, [('composite_transform', 'in1')]),
			(s_register, merge, [('composite_transform', 'in2')]),
			(merge, f_warp, [('out', 'transforms')]),
			])
		if realign:
			workflow_connections.extend([
				(realigner, temporal_mean, [('out_file', 'in_file')]),
				(realigner, f_warp, [('out_file', 'input_image')]),
				])
		else:
			workflow_connections.extend([
				(dummy_scans, temporal_mean, [('out_file', 'input_image')]),
				(dummy_scans, f_warp, [('out_file', 'input_image')]),
				])

	elif functional_registration_method == "functional":
		f_register, f_warp = functional_registration(template)

		temporal_mean = pe.Node(interface=fsl.MeanImage(), name="temporal_mean")

		f_biascorrect = pe.Node(interface=ants.N4BiasFieldCorrection(), name="f_biascorrect")
		f_biascorrect.inputs.dimension = 3
		f_biascorrect.inputs.bspline_fitting_distance = 100
		f_biascorrect.inputs.shrink_factor = 2
		f_biascorrect.inputs.n_iterations = [200,200,200,200]
		f_biascorrect.inputs.convergence_threshold = 1e-11

		f_cutoff = pe.Node(interface=fsl.ImageMaths(), name="f_cutoff")
		f_cutoff.inputs.op_string = "-thrP 30"

		f_BET = pe.Node(interface=fsl.BET(), name="f_BET")
		f_BET.inputs.mask = True
		f_BET.inputs.frac = 0.5

		workflow_connections.extend([
			(temporal_mean, f_biascorrect, [('out_file', 'input_image')]),
			(f_biascorrect, f_cutoff, [('output_image', 'in_file')]),
			(f_cutoff, f_BET, [('out_file', 'in_file')]),
			(f_BET, register, [('out_file', 'moving_image')]),
			(register, f_warp, [('composite_transform', 'transforms')]),
			])
		if realign:
			workflow_connections.extend([
				(realigner, temporal_mean, [('out_file', 'in_file')]),
				(realigner, f_warp, [('out_file', 'input_image')]),
				])
		else:
			workflow_connections.extend([
				(dummy_scans, temporal_mean, [('out_file', 'input_image')]),
				(dummy_scans, f_warp, [('out_file', 'input_image')]),
				])


	invert = pe.Node(interface=fsl.ImageMaths(), name="invert")
	if functional_blur_xy and negative_contrast_agent:
		blur = pe.Node(interface=afni.preprocess.BlurToFWHM(), name="blur")
		blur.inputs.fwhmxy = functional_blur_xy
		workflow_connections.extend([
			(f_warp, blur, [('output_image', 'in_file')]),
			(blur, invert, [(('out_file', fslmaths_invert_values), 'op_string')]),
			(blur, invert, [('out_file', 'in_file')]),
			(invert, bandpass, [('out_file', 'in_file')]),
			])
	elif functional_blur_xy:
		blur = pe.Node(interface=afni.preprocess.BlurToFWHM(), name="blur")
		blur.inputs.fwhmxy = functional_blur_xy
		workflow_connections.extend([
			(f_warp, blur, [('output_image', 'in_file')]),
			(blur, bandpass, [('out_file', 'in_file')]),
			])
	elif negative_contrast_agent:
		blur = pe.Node(interface=afni.preprocess.BlurToFWHM(), name="blur")
		blur.inputs.fwhmxy = functional_blur_xy
		workflow_connections.extend([
			(f_warp, invert, [(('output_image', fslmaths_invert_values), 'op_string')]),
			(f_warp, invert, [('output_image', 'in_file')]),
			(invert, bandpass, [('out_file', 'in_file')]),
			])
	else:
		workflow_connections.extend([
			(f_warp, bandpass, [('output_image', 'in_file')]),
			])

	workdir_name = workflow_name+"_work"
	workflow = pe.Workflow(name=workdir_name)
	workflow.connect(workflow_connections)
	workflow.base_dir = path.join(measurements_base,"preprocessing")
	workflow.config = {"execution": {"crashdump_dir": path.join(measurements_base,"preprocessing/crashdump")}}
	workflow.write_graph(dotfilename=path.join(workflow.base_dir,workdir_name,"graph.dot"), graph2use="hierarchical", format="png")

	workflow.run(plugin="MultiProc",  plugin_args={'n_procs' : n_procs})
	if not keep_work:
		shutil.rmtree(path.join(workflow.base_dir,workdir_name))
Exemple #25
0
def main(derivatives_dir,
         ds,
         wf_folders):

    if ds == 'ds-02':
        template_mask = op.join(derivatives_dir,
                                ds,
                                'conjunct_masks',
                                'sub-{subject}',
                                'anat',
                                'sub-{subject}_desc-{mask}_mask.nii.gz')
    elif ds == 'ds-01':
        template_mask = op.join(derivatives_dir,
                                ds,
                                'conjunct_masks',
                                'sub-{subject}',
                                'anat',
                                'sub-{subject}_space-FLASH_desc-{mask}_space-T1w.nii.gz')

    template_transform = op.join(derivatives_dir,
                                 ds,
                                 'fmriprep',
                                 'sub-{subject}',
                                 'anat',
                                 'sub-{subject}_from-T1w_to-MNI152NLin2009cAsym_mode-image_xfm.h5')

    t1w_template = op.join(derivatives_dir,
                         ds,
                         'fmriprep',
                         'sub-{subject}',
                         'anat',
                         'sub-{subject}_desc-preproc_T1w.nii.gz')

    templates = {'mask':template_mask,
                'transform':template_transform,
                 't1w':t1w_template}

    selector = pe.MapNode(nio.SelectFiles(templates),
                          iterfield=['subject'],
                          name='selector')

    if ds == 'ds-02':
        subjects = ['{:02d}'.format(i) for i in list(range(1, 16))]
        subjects.pop(3)
        subjects.pop(0)
    elif ds =='ds-01':
        subjects = ['{:02d}'.format(i) for i in list(range(1, 19))]



    selector.inputs.subject = subjects
    selector.iterables = [('mask', ['stnl', 'stnr'])]

    wf = pe.Workflow(name='transform_stn_masks',
                     base_dir=wf_folders)

    reorient_mask = pe.MapNode(niu.Function(function=resample_to_img,
                                            input_names=['source_img',
                                                      'target_img'],
                                         output_names='reoriented_image'),
                               iterfield=['source_img',
                                          'target_img'],
                            name='reorient_mask')
    wf.connect(selector, 'mask', reorient_mask, 'source_img')
    wf.connect(selector, 't1w', reorient_mask, 'target_img')

    transformer = pe.MapNode(ants.ApplyTransforms(interpolation='NearestNeighbor'),
                             iterfield=['input_image', 'transforms'],
                             name='transformer')

    wf.config = { "execution": { "crashdump_dir": op.join(os.environ['HOME'], 'crashdumps') }}
    
    wf.connect(reorient_mask, 'reoriented_image', transformer, 'input_image')
    wf.connect(selector, 'transform', transformer, 'transforms')
    transformer.inputs.reference_image = fsl.Info.standard_image('MNI152_T1_0.5mm.nii.gz')

    merge_masks = pe.Node(fsl.Merge(dimension='t'),
                          name='merge_masks')
    wf.connect(transformer, 'output_image', merge_masks, 'in_files')

    mean_mask = pe.Node(fsl.MeanImage(),
                        name='mean_mask')
    wf.connect(merge_masks, 'merged_file', mean_mask, 'in_file')

    base_dir = op.join(derivatives_dir, ds)
    ds = pe.Node(nio.DataSink(base_directory=base_dir),
                 name='datasink')

    wf.connect(transformer, 'output_image', ds, 'individual_masks_mni_space')
    wf.connect(mean_mask, 'out_file', ds, 'mean_mask_mni_space')
                 

    wf.run(plugin='MultiProc',
           plugin_args={'n_procs':4})
Exemple #26
0
def legacy(
    bids_base,
    template,
    autorotate=False,
    debug=False,
    functional_blur_xy=False,
    functional_match={},
    keep_work=False,
    negative_contrast_agent=False,
    n_procs=N_PROCS,
    out_base=None,
    realign="time",
    registration_mask=False,
    sessions=[],
    structural_match={},
    subjects=[],
    tr=1,
    workflow_name='legacy',
):
    '''
	Legacy realignment and registration workflow representative of the tweaks and workarounds commonly used in the pre-SAMRI period.

	Parameters
	----------
	bids_base : str
		Path to the BIDS data set root.
	template : str
		Path to the template to register the data to.
	autorotate : bool, optional
		Whether to use a multi-rotation-state transformation start.
		This allows the registration to commence with the best rotational fit, and may help if the orientation of the data is malformed with respect to the header.
	debug : bool, optional
		Whether to enable nipype debug mode.
		This increases logging.
	functional_blur_xy : float, optional
		Factor by which to smooth data in the xy-plane; if parameter evaluates to false, no smoothing will be applied.
		Ideally this value should correspond to the resolution or smoothness in the z-direction (assuing z represents the lower-resolution slice-encoding direction).
	functional_match : dict, optional
		Dictionary specifying a whitelist to use for functional data inclusion into the workflow; if dictionary is empty no whitelist is present and all data will be considered.
		The dictionary should have keys which are 'acquisition', 'task', or 'modality', and values which are lists of acceptable strings for the respective BIDS field.
	keep_work : bool, str
		Whether to keep the work directory after workflow conclusion (this directory contains all the intermediary processing commands, inputs, and outputs --- it is invaluable for debugging but many times larger in size than the actual output).
	negative_contrast_agent : bool, optional
		Whether the scan was acquired witn a negative contrast agent given the imaging modality; if true the values will be inverted with respect to zero.
		This is commonly used for iron nano-particle Cerebral Blood Volume (CBV) measurements.
	n_procs : int, optional
		Number of processors to maximally use for the workflow; if unspecified a best guess will be estimate based on hardware (but not on current load).
	out_base : str, optional
		Output base directory --- inside which a directory named `workflow_name` (as well as associated directories) will be created.
	realign : {"space","time","spacetime",""}, optional
		Parameter that dictates slictiming correction and realignment of slices. "time" (FSL.SliceTimer) is default, since it works safely. Use others only with caution!
	registration_mask : str, optional
		Mask to use for the registration process.
		This mask will constrain the area for similarity metric evaluation, but the data will not be cropped.
	sessions : list, optional
		A whitelist of sessions to include in the workflow, if the list is empty there is no whitelist and all sessions will be considered.
	structural_match : dict, optional
		Dictionary specifying a whitelist to use for structural data inclusion into the workflow; if dictionary is empty no whitelist is present and all data will be considered.
		The dictionary should have keys which are 'acquisition', or 'modality', and values which are lists of acceptable strings for the respective BIDS field.
	subjects : list, optional
		A whitelist of subjects to include in the workflow, if the list is empty there is no whitelist and all sessions will be considered.
	tr : float, optional
		Repetition time, explicitly.
		WARNING! This is a parameter waiting for deprecation.
	workflow_name : str, optional
		Top level name for the output directory.
	'''

    bids_base, out_base, out_dir, template, registration_mask, data_selection, functional_scan_types, structural_scan_types, subjects_sessions, func_ind, struct_ind = common_select(
        bids_base,
        out_base,
        workflow_name,
        template,
        registration_mask,
        functional_match,
        structural_match,
        subjects,
        sessions,
    )

    get_f_scan = pe.Node(name='get_f_scan',
                         interface=util.Function(
                             function=get_bids_scan,
                             input_names=inspect.getargspec(get_bids_scan)[0],
                             output_names=[
                                 'scan_path', 'scan_type', 'task', 'nii_path',
                                 'nii_name', 'file_name', 'events_name',
                                 'subject_session'
                             ]))
    get_f_scan.inputs.ignore_exception = True
    get_f_scan.inputs.data_selection = data_selection
    get_f_scan.inputs.bids_base = bids_base
    get_f_scan.iterables = ("ind_type", func_ind)

    dummy_scans = pe.Node(
        name='dummy_scans',
        interface=util.Function(
            function=force_dummy_scans,
            input_names=inspect.getargspec(force_dummy_scans)[0],
            output_names=['out_file', 'deleted_scans']))
    dummy_scans.inputs.desired_dummy_scans = DUMMY_SCANS

    events_file = pe.Node(
        name='events_file',
        interface=util.Function(
            function=write_bids_events_file,
            input_names=inspect.getargspec(write_bids_events_file)[0],
            output_names=['out_file']))

    temporal_mean = pe.Node(interface=fsl.MeanImage(), name="temporal_mean")

    f_resize = pe.Node(interface=VoxelResize(), name="f_resize")
    f_resize.inputs.resize_factors = [10, 10, 10]

    f_percentile = pe.Node(interface=fsl.ImageStats(), name="f_percentile")
    f_percentile.inputs.op_string = '-p 98'

    f_threshold = pe.Node(interface=fsl.Threshold(), name="f_threshold")

    f_fast = pe.Node(interface=fsl.FAST(), name="f_fast")
    f_fast.inputs.no_pve = True
    f_fast.inputs.output_biascorrected = True

    f_bet = pe.Node(interface=fsl.BET(), name="f_BET")

    f_swapdim = pe.Node(interface=fsl.SwapDimensions(), name="f_swapdim")
    f_swapdim.inputs.new_dims = ('x', '-z', '-y')

    f_deleteorient = pe.Node(interface=FSLOrient(), name="f_deleteorient")
    f_deleteorient.inputs.main_option = 'deleteorient'

    datasink = pe.Node(nio.DataSink(), name='datasink')
    datasink.inputs.base_directory = out_dir
    datasink.inputs.parameterization = False

    workflow_connections = [
        (get_f_scan, dummy_scans, [('nii_path', 'in_file')]),
        (get_f_scan, dummy_scans, [('scan_path', 'scan_dir')]),
        (dummy_scans, events_file, [('deleted_scans', 'forced_dummy_scans')]),
        (dummy_scans, f_resize, [('out_file', 'in_file')]),
        (get_f_scan, events_file, [('nii_path', 'timecourse_file'),
                                   ('task', 'task'),
                                   ('scan_path', 'scan_dir')]),
        (events_file, datasink, [('out_file', 'func.@events')]),
        (get_f_scan, events_file, [('events_name', 'out_file')]),
        (get_f_scan, datasink, [(('subject_session', ss_to_path), 'container')
                                ]),
        (temporal_mean, f_percentile, [('out_file', 'in_file')]),
        # here we divide by 10 assuming 10 percent noise
        (f_percentile, f_threshold, [(('out_stat', divideby_10), 'thresh')]),
        (temporal_mean, f_threshold, [('out_file', 'in_file')]),
        (f_threshold, f_fast, [('out_file', 'in_files')]),
        (f_fast, f_bet, [('restored_image', 'in_file')]),
        (f_resize, f_deleteorient, [('out_file', 'in_file')]),
        (f_deleteorient, f_swapdim, [('out_file', 'in_file')]),
    ]

    if realign == "space":
        realigner = pe.Node(interface=spm.Realign(), name="realigner")
        realigner.inputs.register_to_mean = True
        workflow_connections.extend([
            (f_swapdim, realigner, [('out_file', 'in_file')]),
        ])

    elif realign == "spacetime":
        realigner = pe.Node(interface=nipy.SpaceTimeRealigner(),
                            name="realigner")
        realigner.inputs.slice_times = "asc_alt_2"
        realigner.inputs.tr = tr
        realigner.inputs.slice_info = 3  #3 for coronal slices (2 for horizontal, 1 for sagittal)
        workflow_connections.extend([
            (f_swapdim, realigner, [('out_file', 'in_file')]),
        ])

    elif realign == "time":
        realigner = pe.Node(interface=fsl.SliceTimer(), name="slicetimer")
        realigner.inputs.time_repetition = tr
        workflow_connections.extend([
            (f_swapdim, realigner, [('out_file', 'in_file')]),
        ])

    #if structural_scan_types.any():
    #	get_s_scan = pe.Node(name='get_s_scan', interface=util.Function(function=get_bids_scan, input_names=inspect.getargspec(get_bids_scan)[0], output_names=['scan_path','scan_type','task', 'nii_path', 'nii_name', 'file_name', 'events_name', 'subject_session']))
    #	get_s_scan.inputs.ignore_exception = True
    #	get_s_scan.inputs.data_selection = data_selection
    #	get_s_scan.inputs.bids_base = bids_base

    #	s_cutoff = pe.Node(interface=fsl.ImageMaths(), name="s_cutoff")
    #	s_cutoff.inputs.op_string = "-thrP 20 -uthrp 98"

    #	s_resize = pe.Node(interface=VoxelResize(), name="s_resize")

    #	s_BET = pe.Node(interface=fsl.BET(), name="s_BET")
    #	s_BET.inputs.mask = True
    #	s_BET.inputs.frac = 0.3
    #	s_BET.inputs.robust = True

    #	ants_introduction = pe.Node(interface=legacy.antsIntroduction(), name='ants_introduction')
    #	ants_introduction.inputs.dimension = 3
    #	ants_introduction.inputs.reference_image = template
    #	#will need updating to `1`
    #	ants_introduction.inputs.bias_field_correction = True
    #	ants_introduction.inputs.transformation_model = 'GR'
    #	ants_introduction.inputs.max_iterations = [8,15,8]

    #	s_mask = pe.Node(interface=fsl.ApplyMask(), name="s_mask")
    #	s_register, s_warp, f_warp = structural_registration(template)

    #	workflow_connections.extend([
    #		(get_s_scan, s_reg_biascorrect, [('nii_path', 'input_image')]),
    #		(s_reg_biascorrect, s_cutoff, [('output_image', 'in_file')]),
    #		(s_cutoff, s_BET, [('out_file', 'in_file')]),
    #		(s_biascorrect, s_mask, [('output_image', 'in_file')]),
    #		(s_BET, s_mask, [('mask_file', 'mask_file')]),
    #		])

    #	#TODO: incl. in func registration
    #	if autorotate:
    #		workflow_connections.extend([
    #			(s_mask, s_rotated, [('out_file', 'out_file')]),
    #			(s_rotated, s_register, [('out_file', 'moving_image')]),
    #			])
    #	else:
    #		workflow_connections.extend([
    #			(s_mask, s_register, [('out_file', 'moving_image')]),
    #			(s_register, s_warp, [('composite_transform', 'transforms')]),
    #			(get_s_scan, s_warp, [('nii_path', 'input_image')]),
    #			(s_warp, datasink, [('output_image', 'anat')]),
    #			])

    #	if autorotate:
    #		s_rotated = autorotate(template)

    #	workflow_connections.extend([
    #		(get_f_scan, get_s_scan, [('subject_session', 'selector')]),
    #		(get_s_scan, s_warp, [('nii_name','output_image')]),
    #		(get_s_scan, s_biascorrect, [('nii_path', 'input_image')]),
    #		])

    f_antsintroduction = pe.Node(interface=antslegacy.antsIntroduction(),
                                 name='ants_introduction')
    f_antsintroduction.inputs.dimension = 3
    f_antsintroduction.inputs.reference_image = template
    #will need updating to `1`
    f_antsintroduction.inputs.bias_field_correction = True
    f_antsintroduction.inputs.transformation_model = 'GR'
    f_antsintroduction.inputs.max_iterations = [8, 15, 8]

    f_warp = pe.Node(interface=ants.WarpTimeSeriesImageMultiTransform(),
                     name='f_warp')
    f_warp.inputs.reference_image = template
    f_warp.inputs.dimension = 4

    f_copysform2qform = pe.Node(interface=FSLOrient(),
                                name='f_copysform2qform')
    f_copysform2qform.inputs.main_option = 'copysform2qform'

    warp_merge = pe.Node(util.Merge(2), name='warp_merge')

    workflow_connections.extend([
        (f_bet, f_antsintroduction, [('out_file', 'input_image')]),
        (f_antsintroduction, warp_merge, [('warp_field', 'in1')]),
        (f_antsintroduction, warp_merge, [('affine_transformation', 'in2')]),
        (warp_merge, f_warp, [('out', 'transformation_series')]),
        (f_warp, f_copysform2qform, [('output_image', 'in_file')]),
    ])
    if realign == "space":
        workflow_connections.extend([
            (realigner, temporal_mean, [('realigned_files', 'in_file')]),
            (realigner, f_warp, [('realigned_files', 'input_image')]),
        ])
    elif realign == "spacetime":
        workflow_connections.extend([
            (realigner, temporal_mean, [('out_file', 'in_file')]),
            (realigner, f_warp, [('out_file', 'input_image')]),
        ])
    elif realign == "time":
        workflow_connections.extend([
            (realigner, temporal_mean, [('slice_time_corrected_file',
                                         'in_file')]),
            (realigner, f_warp, [('slice_time_corrected_file', 'input_image')
                                 ]),
        ])
    else:
        workflow_connections.extend([
            (f_resize, temporal_mean, [('out_file', 'in_file')]),
            (f_swapdim, f_warp, [('out_file', 'input_image')]),
        ])

    invert = pe.Node(interface=fsl.ImageMaths(), name="invert")

    blur = pe.Node(interface=afni.preprocess.BlurToFWHM(), name="blur")
    blur.inputs.fwhmxy = functional_blur_xy

    if functional_blur_xy and negative_contrast_agent:
        workflow_connections.extend([
            (f_copysform2qform, blur, [('out_file', 'in_file')]),
            (blur, invert, [(('out_file', fslmaths_invert_values), 'op_string')
                            ]),
            (blur, invert, [('out_file', 'in_file')]),
            (get_f_scan, invert, [('nii_name', 'output_image')]),
            (invert, datasink, [('out_file', 'func')]),
        ])

    elif functional_blur_xy:
        workflow_connections.extend([
            (get_f_scan, blur, [('nii_name', 'output_image')]),
            (f_copysform2qform, blur, [('out_file', 'in_file')]),
            (blur, datasink, [('out_file', 'func')]),
        ])

    elif negative_contrast_agent:
        workflow_connections.extend([
            (get_f_scan, invert, [('nii_name', 'out_file')]),
            (f_copysform2qform, invert, [(('out_file', fslmaths_invert_values),
                                          'op_string')]),
            (f_copysform2qform, invert, [('out_file', 'in_file')]),
            (invert, datasink, [('out_file', 'func')]),
        ])
    else:

        f_rename = pe.Node(util.Rename(), name='f_rename')

        workflow_connections.extend([
            (get_f_scan, f_rename, [('nii_name', 'format_string')]),
            (f_copysform2qform, f_rename, [('out_file', 'in_file')]),
            (f_rename, datasink, [('out_file', 'func')]),
        ])

    workflow_config = {
        'execution': {
            'crashdump_dir': path.join(bids_base, 'preprocessing/crashdump'),
        }
    }
    if debug:
        workflow_config['logging'] = {
            'workflow_level': 'DEBUG',
            'utils_level': 'DEBUG',
            'interface_level': 'DEBUG',
            'filemanip_level': 'DEBUG',
            'log_to_file': 'true',
        }

    workdir_name = workflow_name + "_work"
    #this gives the name of the workdir, the output name is passed to the datasink
    workflow = pe.Workflow(name=workdir_name)
    workflow.connect(workflow_connections)
    workflow.base_dir = out_base
    workflow.config = workflow_config
    workflow.write_graph(dotfilename=path.join(workflow.base_dir, workdir_name,
                                               "graph.dot"),
                         graph2use="hierarchical",
                         format="png")

    workflow.run(plugin="MultiProc", plugin_args={'n_procs': n_procs})
    if not keep_work:
        workdir = path.join(workflow.base_dir, workdir_name)
        try:
            shutil.rmtree(workdir)
        except OSError as e:
            if str(e) == 'Cannot call rmtree on a symbolic link':
                print(
                    'Not deleting top level workdir (`{}`), as it is a symlink. Deleting only contents instead'
                    .format(workdir))
                for file_object in os.listdir(workdir):
                    file_object_path = os.path.join(workdir, file_object)
                    if os.path.isfile(file_object_path):
                        os.unlink(file_object_path)
                    else:
                        shutil.rmtree(file_object_path)
            else:
                raise OSError(str(e))
Exemple #27
0
def legacy(
    bids_base,
    template,
    debug=False,
    functional_blur_xy=False,
    functional_match={},
    keep_work=False,
    n_jobs=False,
    n_jobs_percentage=0.8,
    out_base=None,
    realign="time",
    registration_mask=False,
    sessions=[],
    structural_match={},
    subjects=[],
    tr=1,
    workflow_name='legacy',
    enforce_dummy_scans=DUMMY_SCANS,
    exclude={},
):
    '''
	Legacy realignment and registration workflow representative of the tweaks and workarounds commonly used in the pre-SAMRI period.

	Parameters
	----------
	bids_base : str
		Path to the BIDS data set root.
	template : str
		Path to the template to register the data to.
	debug : bool, optional
		Whether to enable nipype debug mode.
		This increases logging.
	exclude : dict
		A dictionary with any combination of "sessions", "subjects", "tasks" as keys and corresponding identifiers as values.
		If this is specified matching entries will be excluded in the analysis.
	functional_blur_xy : float, optional
		Factor by which to smooth data in the xy-plane; if parameter evaluates to false, no smoothing will be applied.
		Ideally this value should correspond to the resolution or smoothness in the z-direction (assuing z represents the lower-resolution slice-encoding direction).
	functional_match : dict, optional
		Dictionary specifying a whitelist to use for functional data inclusion into the workflow; if dictionary is empty no whitelist is present and all data will be considered.
		The dictionary should have keys which are 'acquisition', 'task', or 'modality', and values which are lists of acceptable strings for the respective BIDS field.
	keep_work : bool, str
		Whether to keep the work directory after workflow conclusion (this directory contains all the intermediary processing commands, inputs, and outputs --- it is invaluable for debugging but many times larger in size than the actual output).
	n_jobs : int, optional
		Number of processors to maximally use for the workflow; if unspecified a best guess will be estimate based on `n_jobs_percentage` and hardware (but not on current load).
	n_jobs_percentage : float, optional
		Percentage of available processors (as in available hardware, not available free load) to maximally use for the workflow (this is overriden by `n_jobs`).
	out_base : str, optional
		Output base directory - inside which a directory named `workflow_name` (as well as associated directories) will be created.
	realign : {"space","time","spacetime",""}, optional
		Parameter that dictates slictiming correction and realignment of slices. "time" (FSL.SliceTimer) is default, since it works safely. Use others only with caution!
	registration_mask : str, optional
		Mask to use for the registration process.
		This mask will constrain the area for similarity metric evaluation, but the data will not be cropped.
	sessions : list, optional
		A whitelist of sessions to include in the workflow, if the list is empty there is no whitelist and all sessions will be considered.
	structural_match : dict, optional
		Dictionary specifying a whitelist to use for structural data inclusion into the workflow; if dictionary is empty no whitelist is present and all data will be considered.
		The dictionary should have keys which are 'acquisition', or 'modality', and values which are lists of acceptable strings for the respective BIDS field.
	subjects : list, optional
		A whitelist of subjects to include in the workflow, if the list is empty there is no whitelist and all sessions will be considered.
	tr : float, optional
		Repetition time, explicitly.
		WARNING! This is a parameter waiting for deprecation.
	workflow_name : str, optional
		Top level name for the output directory.
	'''

    try:
        import nipype.interfaces.ants.legacy as antslegacy
    except ModuleNotFoundError:
        print('''
			The `nipype.interfaces.ants.legacy` was not found on this system.
			You may want to downgrade nipype to e.g. 1.1.1, as this module has been removed in more recent versions:
			https://github.com/nipy/nipype/issues/3197
		''')

    bids_base, out_base, out_dir, template, registration_mask, data_selection, functional_scan_types, structural_scan_types, subjects_sessions, func_ind, struct_ind = common_select(
        bids_base,
        out_base,
        workflow_name,
        template,
        registration_mask,
        functional_match,
        structural_match,
        subjects,
        sessions,
        exclude,
    )

    if not n_jobs:
        n_jobs = max(int(round(mp.cpu_count() * n_jobs_percentage)), 2)

    get_f_scan = pe.Node(name='get_f_scan',
                         interface=util.Function(
                             function=get_bids_scan,
                             input_names=inspect.getargspec(get_bids_scan)[0],
                             output_names=[
                                 'scan_path', 'scan_type', 'task', 'nii_path',
                                 'nii_name', 'events_name', 'subject_session',
                                 'metadata_filename', 'dict_slice', 'ind_type'
                             ]))
    get_f_scan.inputs.ignore_exception = True
    get_f_scan.inputs.data_selection = data_selection
    get_f_scan.inputs.bids_base = bids_base
    get_f_scan.iterables = ("ind_type", func_ind)

    dummy_scans = pe.Node(
        name='dummy_scans',
        interface=util.Function(
            function=force_dummy_scans,
            input_names=inspect.getargspec(force_dummy_scans)[0],
            output_names=['out_file', 'deleted_scans']))
    dummy_scans.inputs.desired_dummy_scans = enforce_dummy_scans

    events_file = pe.Node(
        name='events_file',
        interface=util.Function(
            function=write_bids_events_file,
            input_names=inspect.getargspec(write_bids_events_file)[0],
            output_names=['out_file']))

    temporal_mean = pe.Node(interface=fsl.MeanImage(), name="temporal_mean")

    f_resize = pe.Node(interface=VoxelResize(), name="f_resize")
    f_resize.inputs.resize_factors = [10, 10, 10]

    f_percentile = pe.Node(interface=fsl.ImageStats(), name="f_percentile")
    f_percentile.inputs.op_string = '-p 98'

    f_threshold = pe.Node(interface=fsl.Threshold(), name="f_threshold")

    f_fast = pe.Node(interface=fsl.FAST(), name="f_fast")
    f_fast.inputs.no_pve = True
    f_fast.inputs.output_biascorrected = True

    f_bet = pe.Node(interface=fsl.BET(), name="f_BET")

    f_swapdim = pe.Node(interface=fsl.SwapDimensions(), name="f_swapdim")
    f_swapdim.inputs.new_dims = ('x', '-z', '-y')

    f_deleteorient = pe.Node(interface=FSLOrient(), name="f_deleteorient")
    f_deleteorient.inputs.main_option = 'deleteorient'

    datasink = pe.Node(nio.DataSink(), name='datasink')
    datasink.inputs.base_directory = out_dir
    datasink.inputs.parameterization = False

    workflow_connections = [
        (get_f_scan, dummy_scans, [('nii_path', 'in_file')]),
        (dummy_scans, events_file, [('deleted_scans', 'forced_dummy_scans')]),
        (dummy_scans, f_resize, [('out_file', 'in_file')]),
        (get_f_scan, events_file, [('nii_path', 'timecourse_file'),
                                   ('task', 'task'),
                                   ('scan_path', 'scan_dir')]),
        (events_file, datasink, [('out_file', 'func.@events')]),
        (get_f_scan, events_file, [('events_name', 'out_file')]),
        (get_f_scan, datasink, [(('subject_session', ss_to_path), 'container')
                                ]),
        (temporal_mean, f_percentile, [('out_file', 'in_file')]),
        # here we divide by 10 assuming 10 percent noise
        (f_percentile, f_threshold, [(('out_stat', divideby_10), 'thresh')]),
        (temporal_mean, f_threshold, [('out_file', 'in_file')]),
        (f_threshold, f_fast, [('out_file', 'in_files')]),
        (f_fast, f_bet, [('restored_image', 'in_file')]),
        (f_resize, f_deleteorient, [('out_file', 'in_file')]),
        (f_deleteorient, f_swapdim, [('out_file', 'in_file')]),
    ]

    if realign == "space":
        realigner = pe.Node(interface=spm.Realign(), name="realigner")
        realigner.inputs.register_to_mean = True
        workflow_connections.extend([
            (f_swapdim, realigner, [('out_file', 'in_file')]),
        ])

    elif realign == "spacetime":
        realigner = pe.Node(interface=nipy.SpaceTimeRealigner(),
                            name="realigner")
        realigner.inputs.slice_times = "asc_alt_2"
        realigner.inputs.tr = tr
        realigner.inputs.slice_info = 3  #3 for coronal slices (2 for horizontal, 1 for sagittal)
        workflow_connections.extend([
            (f_swapdim, realigner, [('out_file', 'in_file')]),
        ])

    elif realign == "time":
        realigner = pe.Node(interface=fsl.SliceTimer(), name="slicetimer")
        realigner.inputs.time_repetition = tr
        workflow_connections.extend([
            (f_swapdim, realigner, [('out_file', 'in_file')]),
        ])

    f_antsintroduction = pe.Node(interface=antslegacy.antsIntroduction(),
                                 name='ants_introduction')
    f_antsintroduction.inputs.dimension = 3
    f_antsintroduction.inputs.reference_image = template
    #will need updating to `1`
    f_antsintroduction.inputs.bias_field_correction = True
    f_antsintroduction.inputs.transformation_model = 'GR'
    f_antsintroduction.inputs.max_iterations = [8, 15, 8]

    f_warp = pe.Node(interface=ants.WarpTimeSeriesImageMultiTransform(),
                     name='f_warp')
    f_warp.inputs.reference_image = template
    f_warp.inputs.dimension = 4

    f_copysform2qform = pe.Node(interface=FSLOrient(),
                                name='f_copysform2qform')
    f_copysform2qform.inputs.main_option = 'copysform2qform'

    warp_merge = pe.Node(util.Merge(2), name='warp_merge')

    workflow_connections.extend([
        (f_bet, f_antsintroduction, [('out_file', 'input_image')]),
        (f_antsintroduction, warp_merge, [('warp_field', 'in1')]),
        (f_antsintroduction, warp_merge, [('affine_transformation', 'in2')]),
        (warp_merge, f_warp, [('out', 'transformation_series')]),
        (f_warp, f_copysform2qform, [('output_image', 'in_file')]),
    ])
    if realign == "space":
        workflow_connections.extend([
            (realigner, temporal_mean, [('realigned_files', 'in_file')]),
            (realigner, f_warp, [('realigned_files', 'input_image')]),
        ])
    elif realign == "spacetime":
        workflow_connections.extend([
            (realigner, temporal_mean, [('out_file', 'in_file')]),
            (realigner, f_warp, [('out_file', 'input_image')]),
        ])
    elif realign == "time":
        workflow_connections.extend([
            (realigner, temporal_mean, [('slice_time_corrected_file',
                                         'in_file')]),
            (realigner, f_warp, [('slice_time_corrected_file', 'input_image')
                                 ]),
        ])
    else:
        workflow_connections.extend([
            (f_resize, temporal_mean, [('out_file', 'in_file')]),
            (f_swapdim, f_warp, [('out_file', 'input_image')]),
        ])

    if functional_blur_xy:
        blur = pe.Node(interface=afni.preprocess.BlurToFWHM(), name="blur")
        blur.inputs.fwhmxy = functional_blur_xy
        workflow_connections.extend([
            (get_f_scan, blur, [('nii_name', 'out_file')]),
            (f_copysform2qform, blur, [('out_file', 'in_file')]),
            (blur, datasink, [('out_file', 'func')]),
        ])
    else:

        f_rename = pe.Node(util.Rename(), name='f_rename')

        workflow_connections.extend([
            (get_f_scan, f_rename, [('nii_name', 'format_string')]),
            (f_copysform2qform, f_rename, [('out_file', 'in_file')]),
            (f_rename, datasink, [('out_file', 'func')]),
        ])

    workflow_config = {
        'execution': {
            'crashdump_dir': path.join(out_base, 'crashdump'),
        }
    }
    if debug:
        workflow_config['logging'] = {
            'workflow_level': 'DEBUG',
            'utils_level': 'DEBUG',
            'interface_level': 'DEBUG',
            'filemanip_level': 'DEBUG',
            'log_to_file': 'true',
        }

    workdir_name = workflow_name + "_work"
    #this gives the name of the workdir, the output name is passed to the datasink
    workflow = pe.Workflow(name=workdir_name)
    workflow.connect(workflow_connections)
    workflow.base_dir = out_base
    workflow.config = workflow_config
    try:
        workflow.write_graph(dotfilename=path.join(workflow.base_dir,
                                                   workdir_name, "graph.dot"),
                             graph2use="hierarchical",
                             format="png")
    except OSError:
        print(
            'We could not write the DOT file for visualization (`dot` function from the graphviz package). This is non-critical to the processing, but you should get this fixed.'
        )

    workflow.run(plugin="MultiProc", plugin_args={'n_procs': n_jobs})
    copy_bids_files(bids_base, os.path.join(out_base, workflow_name))
    if not keep_work:
        workdir = path.join(workflow.base_dir, workdir_name)
        try:
            shutil.rmtree(workdir)
        except OSError as e:
            if str(e) == 'Cannot call rmtree on a symbolic link':
                print(
                    'Not deleting top level workdir (`{}`), as it is a symlink. Deleting only contents instead'
                    .format(workdir))
                for file_object in os.listdir(workdir):
                    file_object_path = os.path.join(workdir, file_object)
                    if os.path.isfile(file_object_path):
                        os.unlink(file_object_path)
                    else:
                        shutil.rmtree(file_object_path)
            else:
                raise OSError(str(e))
    def create_workflow(self, flow, inputnode, outputnode):
        """Create the stage worflow.

        Parameters
        ----------
        flow : nipype.pipeline.engine.Workflow
            The nipype.pipeline.engine.Workflow instance of the fMRI pipeline

        inputnode : nipype.interfaces.utility.IdentityInterface
            Identity interface describing the inputs of the stage

        outputnode : nipype.interfaces.utility.IdentityInterface
            Identity interface describing the outputs of the stage
        """
        discard_output = pe.Node(
            interface=util.IdentityInterface(fields=["discard_output"]),
            name="discard_output",
        )
        if self.config.discard_n_volumes > 0:
            discard = pe.Node(
                interface=DiscardTP(n_discard=self.config.discard_n_volumes),
                name="discard_volumes",
            )
            # fmt:off
            flow.connect(
                [
                    (inputnode, discard, [("functional", "in_file")]),
                    (discard, discard_output, [("out_file", "discard_output")]),
                ]
            )
            # fmt:on
        else:
            # fmt:off
            flow.connect(
                [(inputnode, discard_output, [("functional", "discard_output")])]
            )
            # fmt:on

        despiking_output = pe.Node(
            interface=util.IdentityInterface(fields=["despiking_output"]),
            name="despkiking_output",
        )
        if self.config.despiking:
            despike = pe.Node(interface=Despike(), name="afni_despike")
            converter = pe.Node(
                interface=afni.AFNItoNIFTI(out_file="fMRI_despike.nii.gz"),
                name="converter",
            )
            # fmt:off
            flow.connect(
                [
                    (discard_output, despike, [("discard_output", "in_file")]),
                    (despike, converter, [("out_file", "in_file")]),
                    (converter, despiking_output, [("out_file", "despiking_output")]),
                ]
            )
            # fmt:on
        else:
            # fmt:off
            flow.connect(
                [
                    (discard_output, despiking_output, [("discard_output", "despiking_output")],)
                ]
            )
            # fmt:on

        if self.config.slice_timing != "none":
            slc_timing = pe.Node(interface=fsl.SliceTimer(), name="slice_timing")
            slc_timing.inputs.time_repetition = self.config.repetition_time
            if self.config.slice_timing == "bottom-top interleaved":
                slc_timing.inputs.interleaved = True
                slc_timing.inputs.index_dir = False
            elif self.config.slice_timing == "top-bottom interleaved":
                slc_timing.inputs.interleaved = True
                slc_timing.inputs.index_dir = True
            elif self.config.slice_timing == "bottom-top":
                slc_timing.inputs.interleaved = False
                slc_timing.inputs.index_dir = False
            elif self.config.slice_timing == "top-bottom":
                slc_timing.inputs.interleaved = False
                slc_timing.inputs.index_dir = True

        # def add_header_and_convert_to_tsv(in_file):

        #     try:

        if self.config.motion_correction:
            mo_corr = pe.Node(
                interface=fsl.MCFLIRT(
                    stats_imgs=True, save_mats=False, save_plots=True, mean_vol=True
                ),
                name="motion_correction",
            )

        if self.config.slice_timing != "none":
            # fmt:off
            flow.connect(
                [(despiking_output, slc_timing, [("despiking_output", "in_file")])]
            )
            # fmt:on
            if self.config.motion_correction:
                # fmt:off
                flow.connect(
                    [
                        (slc_timing, mo_corr, [("slice_time_corrected_file", "in_file")],),
                        (mo_corr, outputnode, [("out_file", "functional_preproc")]),
                        (mo_corr, outputnode, [("par_file", "par_file")]),
                        (mo_corr, outputnode, [("mean_img", "mean_vol")]),
                    ]
                )
                # fmt:on
            else:
                mean = pe.Node(interface=fsl.MeanImage(), name="mean")
                # fmt:off
                flow.connect(
                    [
                        (slc_timing, outputnode, [("slice_time_corrected_file", "functional_preproc")],),
                        (slc_timing, mean, [("slice_time_corrected_file", "in_file")]),
                        (mean, outputnode, [("out_file", "mean_vol")]),
                    ]
                )
                # fmt:on
        else:
            if self.config.motion_correction:
                # fmt:off
                flow.connect(
                    [
                        (despiking_output, mo_corr, [("despiking_output", "in_file")]),
                        (mo_corr, outputnode, [("out_file", "functional_preproc"),
                                               ("par_file", "par_file"),
                                               ("mean_img", "mean_vol")]),
                    ]
                )
                # fmt:on
            else:
                mean = pe.Node(interface=fsl.MeanImage(), name="mean")
                # fmt:off
                flow.connect(
                    [
                        (despiking_output, outputnode, [("despiking_output", "functional_preproc")]),
                        (despiking_output, mean, [("despiking_output", "in_file")]),
                        (mean, outputnode, [("out_file", "mean_vol")]),
                    ]
                )
Exemple #29
0
def bruker(
    measurements_base,
    template,
    DEBUG=False,
    exclude={},
    functional_match={},
    structural_match={},
    sessions=[],
    subjects=[],
    actual_size=True,
    functional_blur_xy=False,
    functional_registration_method="structural",
    highpass_sigma=225,
    lowpass_sigma=None,
    negative_contrast_agent=False,
    n_procs=N_PROCS,
    realign="time",
    registration_mask=False,
    tr=1,
    very_nasty_bruker_delay_hack=False,
    workflow_name="generic",
    keep_work=False,
    autorotate=False,
    strict=False,
    verbose=False,
):
    '''

	realign: {"space","time","spacetime",""}
		Parameter that dictates slictiming correction and realignment of slices. "time" (FSL.SliceTimer) is default, since it works safely. Use others only with caution!

	'''
    if template:
        if template == "mouse":
            template = fetch_mouse_DSURQE()['template']
            registration_mask = fetch_mouse_DSURQE()['mask']
        elif template == "rat":
            template = fetch_rat_waxholm()['template']
            registration_mask = fetch_rat_waxholm()['mask']
        else:
            pass
    else:
        raise ValueError("No species or template specified")
        return -1

    measurements_base = path.abspath(path.expanduser(measurements_base))

    # add subject and session filters if present
    if subjects:
        structural_scan_types['subject'] = subjects
    if sessions:
        structural_scan_types['session'] = sessions

    # define measurement directories to be processed, and populate the list either with the given include_measurements, or with an intelligent selection
    data_selection = pd.DataFrame([])
    if structural_match:
        s_data_selection = get_data_selection(
            measurements_base,
            match=structural_match,
            exclude=exclude,
        )
        structural_scan_types = s_data_selection['scan_type'].unique()
        data_selection = pd.concat([data_selection, s_data_selection])
    if functional_match:
        f_data_selection = get_data_selection(
            measurements_base,
            match=functional_match,
            exclude=exclude,
        )
        functional_scan_types = f_data_selection['scan_type'].unique()
        data_selection = pd.concat([data_selection, f_data_selection])

    # we currently only support one structural scan type per session
    #if functional_registration_method in ("structural", "composite") and structural_scan_types:
    #	structural_scan_types = [structural_scan_types[0]]

    # we start to define nipype workflow elements (nodes, connections, meta)
    subjects_sessions = data_selection[["subject", "session"
                                        ]].drop_duplicates().values.tolist()
    if debug:
        print('Data selection:')
        print(data_selection)
        print('Iterating over:')
        print(subjects_sessions)
    infosource = pe.Node(interface=util.IdentityInterface(
        fields=['subject_session'], mandatory_inputs=False),
                         name="infosource")
    infosource.iterables = [('subject_session', subjects_sessions)]

    get_f_scan = pe.Node(name='get_f_scan',
                         interface=util.Function(
                             function=get_scan,
                             input_names=inspect.getargspec(get_scan)[0],
                             output_names=['scan_path', 'scan_type', 'trial']))
    if not strict:
        get_f_scan.inputs.ignore_exception = True
    get_f_scan.inputs.data_selection = data_selection
    get_f_scan.inputs.measurements_base = measurements_base
    get_f_scan.iterables = ("scan_type", functional_scan_types)

    f_bru2nii = pe.Node(interface=bru2nii.Bru2(), name="f_bru2nii")
    f_bru2nii.inputs.actual_size = actual_size

    dummy_scans = pe.Node(
        name='dummy_scans',
        interface=util.Function(
            function=force_dummy_scans,
            input_names=inspect.getargspec(force_dummy_scans)[0],
            output_names=['out_file']))
    dummy_scans.inputs.desired_dummy_scans = DUMMY_SCANS

    bandpass = pe.Node(interface=fsl.maths.TemporalFilter(), name="bandpass")
    bandpass.inputs.highpass_sigma = highpass_sigma
    if lowpass_sigma:
        bandpass.inputs.lowpass_sigma = lowpass_sigma
    else:
        bandpass.inputs.lowpass_sigma = tr

    #bids_filename = pe.Node(name='bids_filename', interface=util.Function(function=sss_filename,input_names=inspect.getargspec(sss_filename)[0], output_names=['filename']))
    bids_filename = pe.Node(name='bids_filename',
                            interface=util.Function(
                                function=bids_naming,
                                input_names=inspect.getargspec(bids_naming)[0],
                                output_names=['filename']))
    bids_filename.inputs.metadata = data_selection

    #bids_stim_filename = pe.Node(name='bids_stim_filename', interface=util.Function(function=sss_filename,input_names=inspect.getargspec(sss_filename)[0], output_names=['filename']))
    bids_stim_filename = pe.Node(
        name='bids_stim_filename',
        interface=util.Function(function=bids_naming,
                                input_names=inspect.getargspec(bids_naming)[0],
                                output_names=['filename']))
    bids_stim_filename.inputs.suffix = "events"
    bids_stim_filename.inputs.extension = ".tsv"
    bids_stim_filename.inputs.metadata = data_selection

    events_file = pe.Node(
        name='events_file',
        interface=util.Function(
            function=write_events_file,
            input_names=inspect.getargspec(write_events_file)[0],
            output_names=['out_file']))
    events_file.inputs.dummy_scans_ms = DUMMY_SCANS * tr * 1000
    events_file.inputs.stim_protocol_dictionary = STIM_PROTOCOL_DICTIONARY
    events_file.inputs.very_nasty_bruker_delay_hack = very_nasty_bruker_delay_hack
    if not (strict or verbose):
        events_file.inputs.ignore_exception = True

    datasink = pe.Node(nio.DataSink(), name='datasink')
    datasink.inputs.base_directory = path.join(measurements_base,
                                               "preprocessing", workflow_name)
    datasink.inputs.parameterization = False
    if not (strict or verbose):
        datasink.inputs.ignore_exception = True

    workflow_connections = [
        (infosource, get_f_scan, [('subject_session', 'selector')]),
        (infosource, bids_stim_filename, [('subject_session',
                                           'subject_session')]),
        (get_f_scan, bids_stim_filename, [('scan_type', 'scan_type')]),
        (get_f_scan, f_bru2nii, [('scan_path', 'input_dir')]),
        (f_bru2nii, dummy_scans, [('nii_file', 'in_file')]),
        (get_f_scan, dummy_scans, [('scan_path', 'scan_dir')]),
        (get_f_scan, events_file, [('trial', 'trial'),
                                   ('scan_path', 'scan_dir')]),
        (events_file, datasink, [('out_file', 'func.@events')]),
        (bids_stim_filename, events_file, [('filename', 'out_file')]),
        (infosource, datasink, [(('subject_session', ss_to_path), 'container')
                                ]),
        (infosource, bids_filename, [('subject_session', 'subject_session')]),
        (get_f_scan, bids_filename, [('scan_type', 'scan_type')]),
        (bids_filename, bandpass, [('filename', 'out_file')]),
        (bandpass, datasink, [('out_file', 'func')]),
    ]

    if realign == "space":
        realigner = pe.Node(interface=spm.Realign(), name="realigner")
        realigner.inputs.register_to_mean = True
        workflow_connections.extend([
            (dummy_scans, realigner, [('out_file', 'in_file')]),
        ])

    elif realign == "spacetime":
        realigner = pe.Node(interface=nipy.SpaceTimeRealigner(),
                            name="realigner")
        realigner.inputs.slice_times = "asc_alt_2"
        realigner.inputs.tr = tr
        realigner.inputs.slice_info = 3  #3 for coronal slices (2 for horizontal, 1 for sagittal)
        workflow_connections.extend([
            (dummy_scans, realigner, [('out_file', 'in_file')]),
        ])

    elif realign == "time":
        realigner = pe.Node(interface=fsl.SliceTimer(), name="slicetimer")
        realigner.inputs.time_repetition = tr
        workflow_connections.extend([
            (dummy_scans, realigner, [('out_file', 'in_file')]),
        ])

    #ADDING SELECTABLE NODES AND EXTENDING WORKFLOW AS APPROPRIATE:
    if actual_size:
        s_biascorrect, f_biascorrect = real_size_nodes()
    else:
        s_biascorrect, f_biascorrect = inflated_size_nodes()

    if structural_scan_types.any():
        get_s_scan = pe.Node(
            name='get_s_scan',
            interface=util.Function(
                function=get_scan,
                input_names=inspect.getargspec(get_scan)[0],
                output_names=['scan_path', 'scan_type', 'trial']))
        if not strict:
            get_s_scan.inputs.ignore_exception = True
        get_s_scan.inputs.data_selection = data_selection
        get_s_scan.inputs.measurements_base = measurements_base
        get_s_scan.iterables = ("scan_type", structural_scan_types)

        s_bru2nii = pe.Node(interface=bru2nii.Bru2(), name="s_bru2nii")
        s_bru2nii.inputs.force_conversion = True
        s_bru2nii.inputs.actual_size = actual_size

        #s_bids_filename = pe.Node(name='s_bids_filename', interface=util.Function(function=sss_filename,input_names=inspect.getargspec(sss_filename)[0], output_names=['filename']))
        s_bids_filename = pe.Node(
            name='s_bids_filename',
            interface=util.Function(
                function=bids_naming,
                input_names=inspect.getargspec(bids_naming)[0],
                output_names=['filename']))
        s_bids_filename.inputs.metadata = data_selection

        if actual_size:
            s_register, s_warp, _, _ = DSURQEc_structural_registration(
                template, registration_mask)
            #TODO: incl. in func registration
            if autorotate:
                workflow_connections.extend([
                    (s_biascorrect, s_rotated, [('output_image', 'out_file')]),
                    (s_rotated, s_register, [('out_file', 'moving_image')]),
                ])
            else:
                workflow_connections.extend([
                    (s_biascorrect, s_register, [('output_image',
                                                  'moving_image')]),
                    (s_register, s_warp, [('composite_transform', 'transforms')
                                          ]),
                    (s_bru2nii, s_warp, [('nii_file', 'input_image')]),
                    (s_warp, datasink, [('output_image', 'anat')]),
                ])
        else:
            s_reg_biascorrect = pe.Node(interface=ants.N4BiasFieldCorrection(),
                                        name="s_reg_biascorrect")
            s_reg_biascorrect.inputs.dimension = 3
            s_reg_biascorrect.inputs.bspline_fitting_distance = 95
            s_reg_biascorrect.inputs.shrink_factor = 2
            s_reg_biascorrect.inputs.n_iterations = [500, 500, 500, 500]
            s_reg_biascorrect.inputs.convergence_threshold = 1e-14

            s_cutoff = pe.Node(interface=fsl.ImageMaths(), name="s_cutoff")
            s_cutoff.inputs.op_string = "-thrP 20 -uthrp 98"

            s_BET = pe.Node(interface=fsl.BET(), name="s_BET")
            s_BET.inputs.mask = True
            s_BET.inputs.frac = 0.3
            s_BET.inputs.robust = True

            s_mask = pe.Node(interface=fsl.ApplyMask(), name="s_mask")
            s_register, s_warp, f_warp = structural_registration(template)

            workflow_connections.extend([
                (s_bru2nii, s_reg_biascorrect, [('nii_file', 'input_image')]),
                (s_reg_biascorrect, s_cutoff, [('output_image', 'in_file')]),
                (s_cutoff, s_BET, [('out_file', 'in_file')]),
                (s_biascorrect, s_mask, [('output_image', 'in_file')]),
                (s_BET, s_mask, [('mask_file', 'mask_file')]),
            ])

            #TODO: incl. in func registration
            if autorotate:
                workflow_connections.extend([
                    (s_mask, s_rotated, [('out_file', 'out_file')]),
                    (s_rotated, s_register, [('out_file', 'moving_image')]),
                ])
            else:
                workflow_connections.extend([
                    (s_mask, s_register, [('out_file', 'moving_image')]),
                    (s_register, s_warp, [('composite_transform', 'transforms')
                                          ]),
                    (s_bru2nii, s_warp, [('nii_file', 'input_image')]),
                    (s_warp, datasink, [('output_image', 'anat')]),
                ])

        if autorotate:
            s_rotated = autorotate(template)

        workflow_connections.extend([
            (infosource, get_s_scan, [('subject_session', 'selector')]),
            (infosource, s_bids_filename, [('subject_session',
                                            'subject_session')]),
            (get_s_scan, s_bru2nii, [('scan_path', 'input_dir')]),
            (get_s_scan, s_bids_filename, [('scan_type', 'scan_type')]),
            (s_bids_filename, s_warp, [('filename', 'output_image')]),
            (s_bru2nii, s_biascorrect, [('nii_file', 'input_image')]),
        ])

    if functional_registration_method == "structural":
        if not structural_scan_types:
            raise ValueError(
                'The option `registration="structural"` requires there to be a structural scan type.'
            )
        workflow_connections.extend([
            (s_register, f_warp, [('composite_transform', 'transforms')]),
        ])
        if realign == "space":
            workflow_connections.extend([
                (realigner, f_warp, [('realigned_files', 'input_image')]),
            ])
        elif realign == "spacetime":
            workflow_connections.extend([
                (realigner, f_warp, [('out_file', 'input_image')]),
            ])
        elif realign == "time":
            workflow_connections.extend([
                (realigner, f_warp, [('slice_time_corrected_file',
                                      'input_image')]),
            ])
        else:
            workflow_connections.extend([
                (dummy_scans, f_warp, [('out_file', 'input_image')]),
            ])

    if functional_registration_method == "composite":
        if not structural_scan_types.any():
            raise ValueError(
                'The option `registration="composite"` requires there to be a structural scan type.'
            )
        _, _, f_register, f_warp = DSURQEc_structural_registration(
            template, registration_mask)

        temporal_mean = pe.Node(interface=fsl.MeanImage(),
                                name="temporal_mean")

        merge = pe.Node(util.Merge(2), name='merge')

        workflow_connections.extend([
            (temporal_mean, f_biascorrect, [('out_file', 'input_image')]),
            (f_biascorrect, f_register, [('output_image', 'moving_image')]),
            (s_biascorrect, f_register, [('output_image', 'fixed_image')]),
            (f_register, merge, [('composite_transform', 'in1')]),
            (s_register, merge, [('composite_transform', 'in2')]),
            (merge, f_warp, [('out', 'transforms')]),
        ])
        if realign == "space":
            workflow_connections.extend([
                (realigner, temporal_mean, [('realigned_files', 'in_file')]),
                (realigner, f_warp, [('realigned_files', 'input_image')]),
            ])
        elif realign == "spacetime":
            workflow_connections.extend([
                (realigner, temporal_mean, [('out_file', 'in_file')]),
                (realigner, f_warp, [('out_file', 'input_image')]),
            ])
        elif realign == "time":
            workflow_connections.extend([
                (realigner, temporal_mean, [('slice_time_corrected_file',
                                             'in_file')]),
                (realigner, f_warp, [('slice_time_corrected_file',
                                      'input_image')]),
            ])
        else:
            workflow_connections.extend([
                (dummy_scans, temporal_mean, [('out_file', 'in_file')]),
                (dummy_scans, f_warp, [('out_file', 'input_image')]),
            ])

    elif functional_registration_method == "functional":
        f_register, f_warp = functional_registration(template)

        temporal_mean = pe.Node(interface=fsl.MeanImage(),
                                name="temporal_mean")

        #f_cutoff = pe.Node(interface=fsl.ImageMaths(), name="f_cutoff")
        #f_cutoff.inputs.op_string = "-thrP 30"

        #f_BET = pe.Node(interface=fsl.BET(), name="f_BET")
        #f_BET.inputs.mask = True
        #f_BET.inputs.frac = 0.5

        workflow_connections.extend([
            (temporal_mean, f_biascorrect, [('out_file', 'input_image')]),
            #(f_biascorrect, f_cutoff, [('output_image', 'in_file')]),
            #(f_cutoff, f_BET, [('out_file', 'in_file')]),
            #(f_BET, f_register, [('out_file', 'moving_image')]),
            (f_biascorrect, f_register, [('output_image', 'moving_image')]),
            (f_register, f_warp, [('composite_transform', 'transforms')]),
        ])
        if realign == "space":
            workflow_connections.extend([
                (realigner, temporal_mean, [('realigned_files', 'in_file')]),
                (realigner, f_warp, [('realigned_files', 'input_image')]),
            ])
        elif realign == "spacetime":
            workflow_connections.extend([
                (realigner, temporal_mean, [('out_file', 'in_file')]),
                (realigner, f_warp, [('out_file', 'input_image')]),
            ])
        elif realign == "time":
            workflow_connections.extend([
                (realigner, temporal_mean, [('slice_time_corrected_file',
                                             'in_file')]),
                (realigner, f_warp, [('slice_time_corrected_file',
                                      'input_image')]),
            ])
        else:
            workflow_connections.extend([
                (dummy_scans, temporal_mean, [('out_file', 'in_file')]),
                (dummy_scans, f_warp, [('out_file', 'input_image')]),
            ])

    invert = pe.Node(interface=fsl.ImageMaths(), name="invert")
    if functional_blur_xy and negative_contrast_agent:
        blur = pe.Node(interface=afni.preprocess.BlurToFWHM(), name="blur")
        blur.inputs.fwhmxy = functional_blur_xy
        workflow_connections.extend([
            (f_warp, blur, [('output_image', 'in_file')]),
            (blur, invert, [(('out_file', fslmaths_invert_values), 'op_string')
                            ]),
            (blur, invert, [('out_file', 'in_file')]),
            (invert, bandpass, [('out_file', 'in_file')]),
        ])
    elif functional_blur_xy:
        blur = pe.Node(interface=afni.preprocess.BlurToFWHM(), name="blur")
        blur.inputs.fwhmxy = functional_blur_xy
        workflow_connections.extend([
            (f_warp, blur, [('output_image', 'in_file')]),
            (blur, bandpass, [('out_file', 'in_file')]),
        ])
    elif negative_contrast_agent:
        blur = pe.Node(interface=afni.preprocess.BlurToFWHM(), name="blur")
        blur.inputs.fwhmxy = functional_blur_xy
        workflow_connections.extend([
            (f_warp, invert, [(('output_image', fslmaths_invert_values),
                               'op_string')]),
            (f_warp, invert, [('output_image', 'in_file')]),
            (invert, bandpass, [('out_file', 'in_file')]),
        ])
    else:
        workflow_connections.extend([
            (f_warp, bandpass, [('output_image', 'in_file')]),
        ])

    workflow_config = {
        'execution': {
            'crashdump_dir':
            path.join(measurements_base, 'preprocessing/crashdump'),
        }
    }
    if debug:
        workflow_config['logging'] = {
            'workflow_level': 'DEBUG',
            'utils_level': 'DEBUG',
            'interface_level': 'DEBUG',
            'filemanip_level': 'DEBUG',
            'log_to_file': 'true',
        }

    workdir_name = workflow_name + "_work"
    workflow = pe.Workflow(name=workdir_name)
    workflow.connect(workflow_connections)
    workflow.base_dir = path.join(measurements_base, "preprocessing")
    workflow.config = workflow_config
    workflow.write_graph(dotfilename=path.join(workflow.base_dir, workdir_name,
                                               "graph.dot"),
                         graph2use="hierarchical",
                         format="png")

    workflow.run(plugin="MultiProc", plugin_args={'n_procs': n_procs})
    if not keep_work:
        shutil.rmtree(path.join(workflow.base_dir, workdir_name))
Exemple #30
0
def generic(
    bids_base,
    template,
    actual_size=True,
    autorotate=False,
    debug=False,
    functional_blur_xy=False,
    functional_match={},
    functional_registration_method="composite",
    keep_work=False,
    negative_contrast_agent=False,
    n_procs=N_PROCS,
    out_base=None,
    realign="time",
    registration_mask="",
    sessions=[],
    structural_match={},
    subjects=[],
    tr=1,
    workflow_name='generic',
    params={},
    phase_dictionary=GENERIC_PHASES,
):
    '''
	Generic preprocessing and registration workflow for small animal data in BIDS format.

	Parameters
	----------
	bids_base : str
		Path to the BIDS data set root.
	template : str
		Path to the template to register the data to.
	actual_size : bool, optional
		Whether to keep the data at its original scale; if `False`, the spatial representation will be stretched 10-fold in each dimension.
	autorotate : bool, optional
		Whether to use a multi-rotation-state transformation start.
		This allows the registration to commence with the best rotational fit, and may help if the orientation of the data is malformed with respect to the header.
	debug : bool, optional
		Whether to enable nipype debug mode.
		This increases logging.
	functional_blur_xy : float, optional
		Factor by which to smooth data in the xy-plane; if parameter evaluates to false, no smoothing will be applied.
		Ideally this value should correspond to the resolution or smoothness in the z-direction (assuing z represents the lower-resolution slice-encoding direction).
	functional_match : dict, optional
		Dictionary specifying a whitelist to use for functional data inclusion into the workflow; if dictionary is empty no whitelist is present and all data will be considered.
		The dictionary should have keys which are 'acquisition', 'task', or 'modality', and values which are lists of acceptable strings for the respective BIDS field.
	functional_registration_method : {'composite','functional','structural'}, optional
		How to register the functional scan to the template.
		Values mean the following: 'composite' that it will be registered to the structural scan which will in turn be registered to the template, 'functional' that it will be registered directly, 'structural' that it will be registered exactly as the structural scan.
	keep_work : bool, str
		Whether to keep the work directory after workflow conclusion (this directory contains all the intermediary processing commands, inputs, and outputs --- it is invaluable for debugging but many times larger in size than the actual output).
	negative_contrast_agent : bool, optional
		Whether the scan was acquired witn a negative contrast agent given the imaging modality; if true the values will be inverted with respect to zero.
		This is commonly used for iron nano-particle Cerebral Blood Volume (CBV) measurements.
	n_procs : int, optional
		Number of processors to maximally use for the workflow; if unspecified a best guess will be estimate based on hardware (but not on current load).
	out_base : str, optional
		Output base directory --- inside which a directory named `workflow_name`(as well as associated directories) will be created.
	realign : {"space","time","spacetime",""}, optional
		Parameter that dictates slictiming correction and realignment of slices. "time" (FSL.SliceTimer) is default, since it works safely. Use others only with caution!
	registration_mask : str, optional
		Mask to use for the registration process.
		This mask will constrain the area for similarity metric evaluation, but the data will not be cropped.
	sessions : list, optional
		A whitelist of sessions to include in the workflow, if the list is empty there is no whitelist and all sessions will be considered.
	structural_match : dict, optional
		Dictionary specifying a whitelist to use for structural data inclusion into the workflow; if dictionary is empty no whitelist is present and all data will be considered.
		The dictionary should have keys which are 'acquisition', or 'modality', and values which are lists of acceptable strings for the respective BIDS field.
	subjects : list, optional
		A whitelist of subjects to include in the workflow, if the list is empty there is no whitelist and all sessions will be considered.
	tr : float, optional
		Repetition time, explicitly.
		WARNING! This is a parameter waiting for deprecation.
	workflow_name : str, optional
		Top level name for the output directory.
	'''

    bids_base, out_base, out_dir, template, registration_mask, data_selection, functional_scan_types, structural_scan_types, subjects_sessions, func_ind, struct_ind = common_select(
        bids_base,
        out_base,
        workflow_name,
        template,
        registration_mask,
        functional_match,
        structural_match,
        subjects,
        sessions,
    )

    get_f_scan = pe.Node(name='get_f_scan',
                         interface=util.Function(
                             function=get_bids_scan,
                             input_names=inspect.getargspec(get_bids_scan)[0],
                             output_names=[
                                 'scan_path', 'scan_type', 'task', 'nii_path',
                                 'nii_name', 'file_name', 'events_name',
                                 'subject_session'
                             ]))
    get_f_scan.inputs.ignore_exception = True
    get_f_scan.inputs.data_selection = data_selection
    get_f_scan.inputs.bids_base = bids_base
    get_f_scan.iterables = ("ind_type", func_ind)

    dummy_scans = pe.Node(
        name='dummy_scans',
        interface=util.Function(
            function=force_dummy_scans,
            input_names=inspect.getargspec(force_dummy_scans)[0],
            output_names=['out_file', 'deleted_scans']))
    dummy_scans.inputs.desired_dummy_scans = DUMMY_SCANS

    events_file = pe.Node(
        name='events_file',
        interface=util.Function(
            function=write_bids_events_file,
            input_names=inspect.getargspec(write_bids_events_file)[0],
            output_names=['out_file']))

    datasink = pe.Node(nio.DataSink(), name='datasink')
    datasink.inputs.base_directory = out_dir
    datasink.inputs.parameterization = False

    workflow_connections = [
        (get_f_scan, dummy_scans, [('nii_path', 'in_file')]),
        (get_f_scan, dummy_scans, [('scan_path', 'scan_dir')]),
        (dummy_scans, events_file, [('deleted_scans', 'forced_dummy_scans')]),
        (get_f_scan, events_file, [('nii_path', 'timecourse_file'),
                                   ('task', 'task'),
                                   ('scan_path', 'scan_dir')]),
        (events_file, datasink, [('out_file', 'func.@events')]),
        (get_f_scan, events_file, [('events_name', 'out_file')]),
        (get_f_scan, datasink, [(('subject_session', ss_to_path), 'container')
                                ]),
    ]

    if realign == "space":
        realigner = pe.Node(interface=spm.Realign(), name="realigner")
        realigner.inputs.register_to_mean = True
        workflow_connections.extend([
            (dummy_scans, realigner, [('out_file', 'in_file')]),
        ])

    elif realign == "spacetime":
        realigner = pe.Node(interface=nipy.SpaceTimeRealigner(),
                            name="realigner")
        realigner.inputs.slice_times = "asc_alt_2"
        realigner.inputs.tr = tr
        realigner.inputs.slice_info = 3  #3 for coronal slices (2 for horizontal, 1 for sagittal)
        workflow_connections.extend([
            (dummy_scans, realigner, [('out_file', 'in_file')]),
        ])

    elif realign == "time":
        realigner = pe.Node(interface=fsl.SliceTimer(), name="slicetimer")
        realigner.inputs.time_repetition = tr
        workflow_connections.extend([
            (dummy_scans, realigner, [('out_file', 'in_file')]),
        ])

    #ADDING SELECTABLE NODES AND EXTENDING WORKFLOW AS APPROPRIATE:
    if actual_size:
        s_biascorrect, f_biascorrect = real_size_nodes()
    else:
        s_biascorrect, f_biascorrect = inflated_size_nodes()

    if structural_scan_types.any():
        get_s_scan = pe.Node(
            name='get_s_scan',
            interface=util.Function(
                function=get_bids_scan,
                input_names=inspect.getargspec(get_bids_scan)[0],
                output_names=[
                    'scan_path', 'scan_type', 'task', 'nii_path', 'nii_name',
                    'file_name', 'events_name', 'subject_session'
                ]))
        get_s_scan.inputs.ignore_exception = True
        get_s_scan.inputs.data_selection = data_selection
        get_s_scan.inputs.bids_base = bids_base

        if actual_size:
            s_register, s_warp, _, _ = DSURQEc_structural_registration(
                template,
                registration_mask,
                parameters=params,
                phase_dictionary=phase_dictionary,
            )
            #TODO: incl. in func registration
            if autorotate:
                workflow_connections.extend([
                    (s_biascorrect, s_rotated, [('output_image', 'out_file')]),
                    (s_rotated, s_register, [('out_file', 'moving_image')]),
                ])
            else:
                workflow_connections.extend([
                    (s_biascorrect, s_register, [('output_image',
                                                  'moving_image')]),
                    (s_register, s_warp, [('composite_transform', 'transforms')
                                          ]),
                    (get_s_scan, s_warp, [('nii_path', 'input_image')]),
                    (s_warp, datasink, [('output_image', 'anat')]),
                ])
        else:
            s_reg_biascorrect = pe.Node(interface=ants.N4BiasFieldCorrection(),
                                        name="s_reg_biascorrect")
            s_reg_biascorrect.inputs.dimension = 3
            s_reg_biascorrect.inputs.bspline_fitting_distance = 95
            s_reg_biascorrect.inputs.shrink_factor = 2
            s_reg_biascorrect.inputs.n_iterations = [500, 500, 500, 500]
            s_reg_biascorrect.inputs.convergence_threshold = 1e-14

            s_cutoff = pe.Node(interface=fsl.ImageMaths(), name="s_cutoff")
            s_cutoff.inputs.op_string = "-thrP 20 -uthrp 98"

            s_BET = pe.Node(interface=fsl.BET(), name="s_BET")
            s_BET.inputs.mask = True
            s_BET.inputs.frac = 0.3
            s_BET.inputs.robust = True

            s_mask = pe.Node(interface=fsl.ApplyMask(), name="s_mask")
            s_register, s_warp, f_warp = structural_registration(template)

            workflow_connections.extend([
                (get_s_scan, s_reg_biascorrect, [('nii_path', 'input_image')]),
                (s_reg_biascorrect, s_cutoff, [('output_image', 'in_file')]),
                (s_cutoff, s_BET, [('out_file', 'in_file')]),
                (s_biascorrect, s_mask, [('output_image', 'in_file')]),
                (s_BET, s_mask, [('mask_file', 'mask_file')]),
            ])

            #TODO: incl. in func registration
            if autorotate:
                workflow_connections.extend([
                    (s_mask, s_rotated, [('out_file', 'out_file')]),
                    (s_rotated, s_register, [('out_file', 'moving_image')]),
                ])
            else:
                workflow_connections.extend([
                    (s_mask, s_register, [('out_file', 'moving_image')]),
                    (s_register, s_warp, [('composite_transform', 'transforms')
                                          ]),
                    (get_s_scan, s_warp, [('nii_path', 'input_image')]),
                    (s_warp, datasink, [('output_image', 'anat')]),
                ])

        if autorotate:
            s_rotated = autorotate(template)

        workflow_connections.extend([
            (get_f_scan, get_s_scan, [('subject_session', 'selector')]),
            (get_s_scan, s_warp, [('nii_name', 'output_image')]),
            (get_s_scan, s_biascorrect, [('nii_path', 'input_image')]),
        ])

    if functional_registration_method == "structural":
        if not structural_scan_types.any():
            raise ValueError(
                'The option `registration="structural"` requires there to be a structural scan type.'
            )
        workflow_connections.extend([
            (s_register, f_warp, [('composite_transform', 'transforms')]),
        ])
        if realign == "space":
            workflow_connections.extend([
                (realigner, f_warp, [('realigned_files', 'input_image')]),
            ])
        elif realign == "spacetime":
            workflow_connections.extend([
                (realigner, f_warp, [('out_file', 'input_image')]),
            ])
        elif realign == "time":
            workflow_connections.extend([
                (realigner, f_warp, [('slice_time_corrected_file',
                                      'input_image')]),
            ])
        else:
            workflow_connections.extend([
                (dummy_scans, f_warp, [('out_file', 'input_image')]),
            ])
    elif functional_registration_method == "composite":
        if not structural_scan_types.any():
            raise ValueError(
                'The option `registration="composite"` requires there to be a structural scan type.'
            )
        _, _, f_register, f_warp = DSURQEc_structural_registration(
            template,
            registration_mask,
            parameters=params,
            phase_dictionary=phase_dictionary,
        )
        temporal_mean = pe.Node(interface=fsl.MeanImage(),
                                name="temporal_mean")

        merge = pe.Node(util.Merge(2), name='merge')

        workflow_connections.extend([
            (temporal_mean, f_biascorrect, [('out_file', 'input_image')]),
            (f_biascorrect, f_register, [('output_image', 'moving_image')]),
            (s_biascorrect, f_register, [('output_image', 'fixed_image')]),
            (f_register, merge, [('composite_transform', 'in1')]),
            (s_register, merge, [('composite_transform', 'in2')]),
            (merge, f_warp, [('out', 'transforms')]),
        ])
        if realign == "space":
            workflow_connections.extend([
                (realigner, temporal_mean, [('realigned_files', 'in_file')]),
                (realigner, f_warp, [('realigned_files', 'input_image')]),
            ])
        elif realign == "spacetime":
            workflow_connections.extend([
                (realigner, temporal_mean, [('out_file', 'in_file')]),
                (realigner, f_warp, [('out_file', 'input_image')]),
            ])
        elif realign == "time":
            workflow_connections.extend([
                (realigner, temporal_mean, [('slice_time_corrected_file',
                                             'in_file')]),
                (realigner, f_warp, [('slice_time_corrected_file',
                                      'input_image')]),
            ])
        else:
            workflow_connections.extend([
                (dummy_scans, temporal_mean, [('out_file', 'in_file')]),
                (dummy_scans, f_warp, [('out_file', 'input_image')]),
            ])
    elif functional_registration_method == "functional":
        f_register, f_warp = functional_registration(template)

        temporal_mean = pe.Node(interface=fsl.MeanImage(),
                                name="temporal_mean")

        #f_cutoff = pe.Node(interface=fsl.ImageMaths(), name="f_cutoff")
        #f_cutoff.inputs.op_string = "-thrP 30"

        #f_BET = pe.Node(interface=fsl.BET(), name="f_BET")
        #f_BET.inputs.mask = True
        #f_BET.inputs.frac = 0.5

        workflow_connections.extend([
            (temporal_mean, f_biascorrect, [('out_file', 'input_image')]),
            #(f_biascorrect, f_cutoff, [('output_image', 'in_file')]),
            #(f_cutoff, f_BET, [('out_file', 'in_file')]),
            #(f_BET, f_register, [('out_file', 'moving_image')]),
            (f_biascorrect, f_register, [('output_image', 'moving_image')]),
            (f_register, f_warp, [('composite_transform', 'transforms')]),
        ])
        if realign == "space":
            workflow_connections.extend([
                (realigner, temporal_mean, [('realigned_files', 'in_file')]),
                (realigner, f_warp, [('realigned_files', 'input_image')]),
            ])
        elif realign == "spacetime":
            workflow_connections.extend([
                (realigner, temporal_mean, [('out_file', 'in_file')]),
                (realigner, f_warp, [('out_file', 'input_image')]),
            ])
        elif realign == "time":
            workflow_connections.extend([
                (realigner, temporal_mean, [('slice_time_corrected_file',
                                             'in_file')]),
                (realigner, f_warp, [('slice_time_corrected_file',
                                      'input_image')]),
            ])
        else:
            workflow_connections.extend([
                (dummy_scans, temporal_mean, [('out_file', 'in_file')]),
                (dummy_scans, f_warp, [('out_file', 'input_image')]),
            ])

    invert = pe.Node(interface=fsl.ImageMaths(), name="invert")
    blur = pe.Node(interface=afni.preprocess.BlurToFWHM(), name="blur")
    blur.inputs.fwhmxy = functional_blur_xy
    if functional_blur_xy and negative_contrast_agent:
        workflow_connections.extend([
            (f_warp, blur, [('output_image', 'in_file')]),
            (blur, invert, [(('out_file', fslmaths_invert_values), 'op_string')
                            ]),
            (blur, invert, [('out_file', 'in_file')]),
            (get_f_scan, invert, [('nii_name', 'output_image')]),
            (invert, datasink, [('out_file', 'func')]),
        ])

    elif functional_blur_xy:
        workflow_connections.extend([
            (get_f_scan, blur, [('nii_name', 'output_image')]),
            (f_warp, blur, [('output_image', 'in_file')]),
            (blur, datasink, [('out_file', 'func')]),
        ])

    elif negative_contrast_agent:
        workflow_connections.extend([
            (get_f_scan, invert, [('nii_name', 'out_file')]),
            (f_warp, invert, [(('output_image', fslmaths_invert_values),
                               'op_string')]),
            (f_warp, invert, [('output_image', 'in_file')]),
            (invert, datasink, [('out_file', 'func')]),
        ])
    else:
        workflow_connections.extend([
            (get_f_scan, f_warp, [('nii_name', 'output_image')]),
            (f_warp, datasink, [('output_image', 'func')]),
        ])

    workflow_config = {
        'execution': {
            'crashdump_dir': path.join(out_base, 'crashdump'),
        }
    }
    if debug:
        workflow_config['logging'] = {
            'workflow_level': 'DEBUG',
            'utils_level': 'DEBUG',
            'interface_level': 'DEBUG',
            'filemanip_level': 'DEBUG',
            'log_to_file': 'true',
        }

    workdir_name = workflow_name + "_work"
    #this gives the name of the workdir, the output name is passed to the datasink
    workflow = pe.Workflow(name=workdir_name)
    workflow.connect(workflow_connections)
    workflow.base_dir = out_base
    workflow.config = workflow_config
    workflow.write_graph(dotfilename=path.join(workflow.base_dir, workdir_name,
                                               "graph.dot"),
                         graph2use="hierarchical",
                         format="png")

    workflow.run(plugin="MultiProc", plugin_args={'n_procs': n_procs})
    if not keep_work:
        workdir = path.join(workflow.base_dir, workdir_name)
        try:
            shutil.rmtree(workdir)
        except OSError as e:
            if str(e) == 'Cannot call rmtree on a symbolic link':
                print(
                    'Not deleting top level workdir (`{}`), as it is a symlink. Deleting only contents instead'
                    .format(workdir))
                for file_object in os.listdir(workdir):
                    file_object_path = os.path.join(workdir, file_object)
                    if os.path.isfile(file_object_path):
                        os.unlink(file_object_path)
                    else:
                        shutil.rmtree(file_object_path)
            else:
                raise OSError(str(e))